diff --git a/.github/workflows/benchmark.yaml b/.github/workflows/benchmark.yaml index cf262a25f4..12057cd17e 100644 --- a/.github/workflows/benchmark.yaml +++ b/.github/workflows/benchmark.yaml @@ -30,7 +30,7 @@ concurrency: jobs: benchmark: - runs-on: ubuntu-large + runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v4 @@ -57,10 +57,13 @@ jobs: env: GH_TOKEN: ${{ github.token }} run: | + # Baseline = latest UPSTREAM release. Our fork doesn't tag releases, + # so querying ${{ github.repository }} gives 404. Comparing against + # upstream is also the meaningful baseline for performance work. if [[ -n "${{ inputs.before_image }}" ]]; then echo "BEFORE_IMAGE=${{ inputs.before_image }}" >> "$GITHUB_OUTPUT" else - LATEST_TAG=$(gh api repos/${{ github.repository }}/releases/latest --jq '.tag_name') + LATEST_TAG=$(gh api repos/kubescape/node-agent/releases/latest --jq '.tag_name') echo "BEFORE_IMAGE=quay.io/kubescape/node-agent:${LATEST_TAG}" >> "$GITHUB_OUTPUT" fi diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml new file mode 100644 index 0000000000..f647ab7ea1 --- /dev/null +++ b/.github/workflows/build.yaml @@ -0,0 +1,102 @@ +name: build-image + +on: + workflow_dispatch: + inputs: + IMAGE_TAG: + required: true + type: string + description: "Image tag for the node-agent image" + STORAGE_REF: + required: false + type: string + default: "" + description: "Branch/tag/commit of k8sstormcenter/storage to use (leave empty to keep go.mod default)" + PLATFORMS: + type: boolean + required: false + default: false + description: "Build for both amd64 and arm64" + +jobs: + build: + runs-on: ubuntu-latest + permissions: + id-token: write + packages: write + contents: read + steps: + - uses: actions/checkout@v4 + with: + submodules: recursive + + - uses: actions/setup-go@v5 + with: + go-version: "1.25" + + - name: Update storage dependency + if: ${{ inputs.STORAGE_REF != '' }} + env: + STORAGE_REF: ${{ inputs.STORAGE_REF }} + GONOSUMCHECK: "*" + GOFLAGS: "" + run: | + echo "Replacing github.com/kubescape/storage with github.com/k8sstormcenter/storage@${STORAGE_REF}" + go mod edit -replace "github.com/kubescape/storage=github.com/k8sstormcenter/storage@${STORAGE_REF}" + go mod tidy + echo "Resolved storage version:" + grep "k8sstormcenter/storage" go.sum | head -1 + + - name: Ensure ig is installed + run: | + curl -L https://github.com/inspektor-gadget/inspektor-gadget/releases/download/v0.45.0/ig_0.45.0_amd64.deb -O + sudo dpkg -i ig_0.45.0_amd64.deb + + - name: Build gadgets + run: make gadgets + + - name: Set up QEMU + if: ${{ inputs.PLATFORMS }} + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build and push + uses: docker/build-push-action@v6 + with: + context: . + file: build/Dockerfile + tags: ghcr.io/${{ github.repository_owner }}/node-agent:${{ inputs.IMAGE_TAG }} + build-args: image_version=${{ inputs.IMAGE_TAG }} + platforms: ${{ inputs.PLATFORMS && 'linux/amd64,linux/arm64' || 'linux/amd64' }} + push: true + + + trigger-component-tests: + needs: build + runs-on: ubuntu-latest + permissions: + actions: write + steps: + - name: Trigger component tests + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + STORAGE_TAG="${{ inputs.IMAGE_TAG }}" + NODE_AGENT_TAG="${{ inputs.IMAGE_TAG }}" + STORAGE_REF="${{ inputs.STORAGE_REF }}" + echo "Triggering component tests with STORAGE_TAG=${STORAGE_TAG} NODE_AGENT_TAG=${NODE_AGENT_TAG} STORAGE_REF=${STORAGE_REF}" + gh workflow run component-tests.yaml \ + --repo "${{ github.repository }}" \ + --ref "${{ github.ref_name }}" \ + -f STORAGE_TAG="${STORAGE_TAG}" \ + -f NODE_AGENT_TAG="${NODE_AGENT_TAG}" \ + -f STORAGE_REF="${STORAGE_REF}" diff --git a/.github/workflows/bypass.yaml b/.github/workflows/bypass.yaml index 49b38b42c5..be81f0260b 100644 --- a/.github/workflows/bypass.yaml +++ b/.github/workflows/bypass.yaml @@ -20,7 +20,7 @@ jobs: needs: reset-run-number uses: ./.github/workflows/incluster-comp-pr-merged.yaml with: - IMAGE_NAME: quay.io/${{ github.repository_owner }}/node-agent + IMAGE_NAME: ghcr.io/${{ github.repository_owner }}/node-agent IMAGE_TAG: v0.2.${{ needs.reset-run-number.outputs.run-number }} COMPONENT_NAME: nodeAgent CGO_ENABLED: 0 diff --git a/.github/workflows/component-tests.yaml b/.github/workflows/component-tests.yaml index 6c45625f05..b3133464a4 100644 --- a/.github/workflows/component-tests.yaml +++ b/.github/workflows/component-tests.yaml @@ -1,24 +1,134 @@ +# ============================================================================= +# Node Agent Component Tests +# ============================================================================= +# +# Architecture: +# There are TWO independent artifacts in play: +# +# 1. Node-agent container image — the eBPF runtime agent deployed INTO the +# Kind cluster via Helm. Lives in pkg/, cmd/, Makefile, Dockerfile, etc. +# Changes here require an image rebuild before tests can validate them. +# +# 2. Component test binary — a Go test suite compiled on-the-fly from +# tests/component_test.go via `go test`. Runs OUTSIDE the cluster on the +# CI runner. It drives the cluster by creating k8s resources, exec-ing +# into pods, and querying Alertmanager for alerts. +# Changes here do NOT require a node-agent image rebuild. +# +# Rebuild logic (on push): +# - If ONLY files under tests/ or .github/ changed → skip image build, +# run tests immediately against the existing 'latest' image. +# - If ANY agent code changed (pkg/, cmd/, go.mod, Makefile, …) → rebuild +# the node-agent image first, then run tests against the freshly built image. +# +# Manual trigger (workflow_dispatch): +# - Use the `build_image` checkbox to force an image rebuild. +# - Supply NODE_AGENT_TAG / STORAGE_TAG to pin specific pre-built images. +# ============================================================================= + name: Node Agent Component Tests on: - pull_request: - types: [synchronize, ready_for_review, opened, reopened] + push: + branches: + - feat/signature-verification + - feat/tamperalert + - feat/tamper-detection + workflow_dispatch: + inputs: + build_image: + description: 'Build and push a new container image for the test' + type: boolean + required: false + default: false + STORAGE_TAG: + description: 'Storage image tag (must match the tag built by storage/build)' + type: string + required: true + default: 'latest' + NODE_AGENT_TAG: + description: 'Node-agent image tag (must match the tag built by node-agent/build)' + type: string + required: true + default: 'latest' + STORAGE_REF: + description: 'Commit SHA of k8sstormcenter/storage to use (leave empty to resolve current main HEAD at runtime)' + type: string + required: false + default: '' concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true jobs: + # ------------------------------------------------------------------- + # Detect what changed to decide whether an image rebuild is needed. + # On push: compare HEAD with HEAD~1. + # On workflow_dispatch: always outputs false (rebuild controlled by input). + # ------------------------------------------------------------------- + detect-changes: + runs-on: ubuntu-latest + outputs: + needs_rebuild: ${{ steps.check.outputs.needs_rebuild }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + fetch-depth: 2 + + - name: Check for agent code changes + id: check + run: | + if [ "${{ github.event_name }}" != "push" ]; then + echo "Not a push event — rebuild decision deferred to workflow inputs" + echo "needs_rebuild=false" >> "$GITHUB_OUTPUT" + exit 0 + fi + + CHANGED=$(git diff --name-only HEAD~1 HEAD) + echo "=== Changed files ===" + echo "$CHANGED" + echo "" + + # Agent code = anything outside tests/ and .github/ + # These are the paths that end up in the node-agent container image. + AGENT_CHANGES=$(echo "$CHANGED" | grep -vE '^(tests/|\.github/)' || true) + + if [ -n "$AGENT_CHANGES" ]; then + echo "=== Agent code changed (rebuild needed) ===" + echo "$AGENT_CHANGES" + echo "needs_rebuild=true" >> "$GITHUB_OUTPUT" + else + echo "=== Only test/workflow files changed — no rebuild needed ===" + echo "needs_rebuild=false" >> "$GITHUB_OUTPUT" + fi + + # ------------------------------------------------------------------- + # Build and push the node-agent container image. + # Triggers when: + # - Manual dispatch with build_image=true, OR + # - Push event where agent code changed (detected above) + # ------------------------------------------------------------------- build-and-push-image: + needs: [detect-changes] + if: >- + (github.event_name == 'workflow_dispatch' && inputs.build_image == true) || + (needs.detect-changes.outputs.needs_rebuild == 'true') runs-on: ubuntu-latest + permissions: + contents: read + packages: write + id-token: write steps: - name: Checkout code uses: actions/checkout@v4 - - name: Login to Quay.io + - name: Login to GitHub Container Registry uses: docker/login-action@v3 with: - registry: quay.io/kubescape - username: ${{ secrets.QUAYIO_REGISTRY_USERNAME }} - password: ${{ secrets.QUAYIO_REGISTRY_PASSWORD }} + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Install IG run: | sudo apt-get update @@ -28,23 +138,82 @@ jobs: echo "Installing IG version: ${IG_VERSION}" curl -sL https://github.com/inspektor-gadget/inspektor-gadget/releases/download/${IG_VERSION}/ig-linux-${IG_ARCH}-${IG_VERSION}.tar.gz | sudo tar -C /usr/local/bin -xzf - ig sudo chmod +x /usr/local/bin/ig - - name: Build the Image and Push to Quay.io + + # Resolve the storage commit SHA once and use the same one for the + # image build AND the test runner (output downstream). Without this, + # the docker image and the test binary can compile against different + # storage versions when their go.mod replace directives drift. + - name: Resolve storage ref + id: resolve-storage + env: + STORAGE_REF_INPUT: ${{ inputs.STORAGE_REF }} + run: | + STORAGE_REF="${STORAGE_REF_INPUT}" + if [ -z "${STORAGE_REF}" ]; then + STORAGE_REF=$(git ls-remote https://github.com/k8sstormcenter/storage refs/heads/main | awk '{print $1}') + echo "Resolved k8sstormcenter/storage main to: ${STORAGE_REF}" + else + echo "Using supplied STORAGE_REF: ${STORAGE_REF}" + fi + echo "storage_ref=${STORAGE_REF}" >> "$GITHUB_OUTPUT" + echo "storage_short=${STORAGE_REF:0:7}" >> "$GITHUB_OUTPUT" + + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: "1.25" + + - name: Pin storage version for image build + env: + STORAGE_REF: ${{ steps.resolve-storage.outputs.storage_ref }} + GOFLAGS: "" + run: | + echo "Replacing github.com/kubescape/storage with github.com/k8sstormcenter/storage@${STORAGE_REF}" + go mod edit -replace "github.com/kubescape/storage=github.com/k8sstormcenter/storage@${STORAGE_REF}" + go mod tidy + echo "Resolved storage version:" + grep "k8sstormcenter/storage" go.sum | head -1 + + - name: Build the Image and Push to GHCR id: build-and-push-image run: | COMMIT_HASH=$(git rev-parse --short HEAD) - export IMAGE_TAG=test-${COMMIT_HASH} - export IMAGE_REPO=quay.io/kubescape/node-agent + STORAGE_SHORT="${{ steps.resolve-storage.outputs.storage_short }}" + # Image tag encodes both node-agent and storage SHAs so the same + # source pair always produces the same artifact and can be cached. + export IMAGE_TAG=test-${COMMIT_HASH}-s${STORAGE_SHORT} + export IMAGE_REPO=ghcr.io/${{ github.repository_owner }}/node-agent echo "image_repo=${IMAGE_REPO}" >> "$GITHUB_OUTPUT" - export IMAGE_NAME=quay.io/kubescape/node-agent:${IMAGE_TAG} + export IMAGE_NAME=ghcr.io/${{ github.repository_owner }}/node-agent:${IMAGE_TAG} echo "image_tag=${IMAGE_TAG}" >> "$GITHUB_OUTPUT" make docker-build TAG=${IMAGE_TAG} IMAGE=${IMAGE_REPO} && make docker-push TAG=${IMAGE_TAG} IMAGE=${IMAGE_REPO} outputs: image_tag: ${{ steps.build-and-push-image.outputs.image_tag }} image_repo: ${{ steps.build-and-push-image.outputs.image_repo }} + storage_ref: ${{ steps.resolve-storage.outputs.storage_ref }} + # ------------------------------------------------------------------- + # Component tests. + # + # These are Go tests compiled from tests/component_test.go — they are + # NOT part of the node-agent container image. The test binary runs on + # the CI runner and talks to the Kind cluster via the k8s API. + # + # Dependency logic: + # - If build-and-push-image ran → waits for it, uses the freshly + # built image tag. + # - If build-and-push-image was skipped (tests-only change) → runs + # immediately with the default 'latest' image. + # - If build-and-push-image failed → tests do NOT run (no point + # testing against a stale image when code changed). + # ------------------------------------------------------------------- component-tests: + needs: [detect-changes, build-and-push-image] + # Run when build succeeded or was skipped; don't run if build failed. + if: >- + always() && !cancelled() && + (needs.build-and-push-image.result == 'success' || needs.build-and-push-image.result == 'skipped') runs-on: ubuntu-latest - needs: build-and-push-image continue-on-error: true strategy: matrix: @@ -71,7 +240,14 @@ jobs: Test_21_AlertOnPartialThenLearnNetworkTest, Test_22_AlertOnPartialNetworkProfileTest, Test_23_RuleCooldownTest, - Test_24_ProcessTreeDepthTest + Test_24_ProcessTreeDepthTest, + Test_27_ApplicationProfileOpens, + Test_28_UserDefinedNetworkNeighborhood, + Test_29_SignedApplicationProfile, + Test_30_TamperedSignedProfiles, + Test_31_TamperDetectionAlert, + Test_32_UnexpectedProcessArguments, + Test_33_AnalyzeOpensWildcardAnchoring ] steps: - name: Checkout code @@ -97,17 +273,39 @@ jobs: helm upgrade --install prometheus prometheus-community/kube-prometheus-stack --set grafana.enabled=false --namespace monitoring --create-namespace --set prometheus.prometheusSpec.podMonitorSelectorNilUsesHelmValues=false,prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues=false --set prometheus.prometheusSpec.maximumStartupDurationSeconds=300 --wait --timeout 5m # Check that the prometheus pod is running kubectl wait --for=condition=Ready pod -l app.kubernetes.io/name=prometheus -n monitoring --timeout=300s + + # Image selection logic: + # - If the build job ran and produced a tag → use it. + # - Otherwise fall back to the workflow_dispatch input or 'latest'. - name: Install Node Agent Chart run: | - STORAGE_TAG=$(./tests/scripts/storage-tag.sh) - echo "Storage tag that will be used: ${STORAGE_TAG}" - helm upgrade --install kubescape ./tests/chart --set clusterName=`kubectl config current-context` --set nodeAgent.image.tag=${{ needs.build-and-push-image.outputs.image_tag }} --set nodeAgent.image.repository=${{ needs.build-and-push-image.outputs.image_repo }} --set storage.image.tag=${STORAGE_TAG} -n kubescape --create-namespace --wait --timeout 5m --debug + STORAGE_TAG="${{ inputs.STORAGE_TAG || 'latest' }}" + echo "Storage tag: ${STORAGE_TAG}" + + # Prefer freshly built image; fall back to input or default. + IMAGE_TAG="${{ needs.build-and-push-image.outputs.image_tag || inputs.NODE_AGENT_TAG || 'latest' }}" + IMAGE_REPO="${{ needs.build-and-push-image.outputs.image_repo || 'ghcr.io/k8sstormcenter/node-agent' }}" + echo "Node Agent image: ${IMAGE_REPO}:${IMAGE_TAG}" + + # Log whether we're using a freshly built image or a pre-existing one. + if [ -n "${{ needs.build-and-push-image.outputs.image_tag }}" ]; then + echo ">>> Using FRESHLY BUILT image from this workflow run" + else + echo ">>> Using PRE-EXISTING image (no agent code changes detected)" + fi + + helm upgrade --install kubescape ./tests/chart --set clusterName=`kubectl config current-context` --set nodeAgent.image.tag=${IMAGE_TAG} --set nodeAgent.image.repository=${IMAGE_REPO} --set storage.image.tag=${STORAGE_TAG} -n kubescape --create-namespace --wait --timeout 5m --debug # Check that the node-agent pod is running kubectl wait --for=condition=Ready pod -l app.kubernetes.io/name=node-agent -n kubescape --timeout=300s sleep 5 - name: Run Port Forwarding run: | ./tests/scripts/port-forward.sh + + # The test binary is compiled from source here — it is NOT part of + # the node-agent container image. Only changes under tests/ affect + # it; agent code changes (pkg/, cmd/, …) require an image rebuild + # but do NOT change the test binary. - name: Set up Go env: CGO_ENABLED: 0 @@ -117,6 +315,21 @@ jobs: - name: Set unlimited memlock limit run: | sudo sh -c "ulimit -l unlimited" + - name: Update storage dependency + env: + STORAGE_REF: ${{ needs.build-and-push-image.outputs.storage_ref || inputs.STORAGE_REF }} + GONOSUMCHECK: "*" + GOFLAGS: "" + run: | + if [ -z "${STORAGE_REF}" ]; then + STORAGE_REF=$(git ls-remote https://github.com/k8sstormcenter/storage refs/heads/main | awk '{print $1}') + echo "Resolved k8sstormcenter/storage main to: ${STORAGE_REF}" + fi + echo "Replacing github.com/kubescape/storage with github.com/k8sstormcenter/storage@${STORAGE_REF}" + go mod edit -replace "github.com/kubescape/storage=github.com/k8sstormcenter/storage@${STORAGE_REF}" + go mod tidy + echo "Resolved storage version:" + grep "k8sstormcenter/storage" go.sum | head -1 - name: Run test run: | cd tests && go test -v ./... -run ${{ matrix.test }} --timeout=20m --tags=component @@ -128,3 +341,28 @@ jobs: echo "-----------------------------------------" echo "Storage logs" kubectl logs $(kubectl get pods -n kubescape -o name | grep storage) -n kubescape + + trigger-integration-tests: + needs: component-tests + if: >- + github.event_name == 'workflow_dispatch' && + inputs.STORAGE_TAG != '' && + inputs.NODE_AGENT_TAG != '' + runs-on: ubuntu-latest + steps: + - name: Trigger storage integration tests + env: + GH_TOKEN: ${{ secrets.CROSS_REPO_PAT }} + run: | + STORAGE_TAG="${{ inputs.STORAGE_TAG }}" + NODE_AGENT_TAG="${{ inputs.NODE_AGENT_TAG }}" + echo "Triggering storage integration tests" + echo " node_agent_image=ghcr.io/${{ github.repository_owner }}/node-agent:${NODE_AGENT_TAG}" + echo " storage_image=ghcr.io/${{ github.repository_owner }}/storage:${STORAGE_TAG}" + gh workflow run manual-integration-tests.yml \ + --repo "${{ github.repository_owner }}/storage" \ + --ref "${{ github.ref_name }}" \ + -f branch="${{ github.ref_name }}" \ + -f branch_helm_chart=main \ + -f node_agent_image="ghcr.io/${{ github.repository_owner }}/node-agent:${NODE_AGENT_TAG}" \ + -f storage_image="ghcr.io/${{ github.repository_owner }}/storage:${STORAGE_TAG}" diff --git a/.github/workflows/incluster-comp-pr-merged.yaml b/.github/workflows/incluster-comp-pr-merged.yaml index dface65b20..831858e430 100644 --- a/.github/workflows/incluster-comp-pr-merged.yaml +++ b/.github/workflows/incluster-comp-pr-merged.yaml @@ -109,12 +109,12 @@ jobs: id: unit-test run: go test -exec sudo -v ./... - - name: Login to Quay + - name: Login to GitHub Container Registry uses: docker/login-action@v3 with: - registry: quay.io - username: ${{ secrets.QUAYIO_REGISTRY_USERNAME }} - password: ${{ secrets.QUAYIO_REGISTRY_PASSWORD }} + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} - name: Build and push uses: docker/build-push-action@v6 @@ -357,12 +357,12 @@ jobs: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - - name: Login to Quay + - name: Login to GitHub Container Registry uses: docker/login-action@v3 with: - registry: quay.io - username: ${{ secrets.QUAYIO_REGISTRY_USERNAME }} - password: ${{ secrets.QUAYIO_REGISTRY_PASSWORD }} + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} - name: Docker retag run: | diff --git a/.github/workflows/pr-created.yaml b/.github/workflows/pr-created.yaml index 856fa2bc26..76ad4f3cbf 100644 --- a/.github/workflows/pr-created.yaml +++ b/.github/workflows/pr-created.yaml @@ -13,6 +13,10 @@ concurrency: jobs: pr-created: + permissions: + pull-requests: write + security-events: write + contents: read uses: ./.github/workflows/incluster-comp-pr-created.yaml with: GO_VERSION: "1.25" diff --git a/.github/workflows/pr-merged.yaml b/.github/workflows/pr-merged.yaml index ddeee9206c..1e98658f59 100644 --- a/.github/workflows/pr-merged.yaml +++ b/.github/workflows/pr-merged.yaml @@ -35,7 +35,7 @@ jobs: pull-requests: read uses: ./.github/workflows/incluster-comp-pr-merged.yaml with: - IMAGE_NAME: quay.io/${{ github.repository_owner }}/node-agent + IMAGE_NAME: ghcr.io/${{ github.repository_owner }}/node-agent IMAGE_TAG: v0.3.${{ needs.reset-run-number.outputs.run-number }} COMPONENT_NAME: nodeAgent CGO_ENABLED: 0 diff --git a/.github/workflows/sign-object.yaml b/.github/workflows/sign-object.yaml new file mode 100644 index 0000000000..9233c62347 --- /dev/null +++ b/.github/workflows/sign-object.yaml @@ -0,0 +1,67 @@ +name: Build sign-object image + +on: + push: + branches: [main, feat/signature-verification] + paths: + - 'cmd/sign-object/**' + - 'pkg/signature/**' + - 'pkg/signature/profiles/**' + - 'go.mod' + - 'go.sum' + pull_request: + paths: + - 'cmd/sign-object/**' + - 'pkg/signature/**' + - 'pkg/signature/profiles/**' + workflow_dispatch: + inputs: + IMAGE_TAG: + required: false + type: string + default: 'latest' + description: 'Image tag for the sign-object image' + +permissions: + packages: write + contents: read + +env: + REGISTRY: ghcr.io + IMAGE_NAME: ${{ github.repository_owner }}/sign-object + +jobs: + build-and-push: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Set image tag + id: tag + run: | + if [ "${{ github.event_name }}" = "workflow_dispatch" ] && [ -n "${{ inputs.IMAGE_TAG }}" ]; then + echo "tag=${{ inputs.IMAGE_TAG }}" >> "$GITHUB_OUTPUT" + elif [ "${{ github.event_name }}" = "pull_request" ]; then + echo "tag=pr-${{ github.event.number }}" >> "$GITHUB_OUTPUT" + else + echo "tag=latest" >> "$GITHUB_OUTPUT" + fi + + - uses: docker/setup-buildx-action@v3 + + - uses: docker/login-action@v3 + if: github.event_name != 'pull_request' + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - uses: docker/build-push-action@v6 + with: + context: . + file: cmd/sign-object/Dockerfile + tags: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tag.outputs.tag }} + platforms: linux/amd64,linux/arm64 + cache-from: type=gha + cache-to: type=gha,mode=max + push: ${{ github.event_name != 'pull_request' }} diff --git a/.gitignore b/.gitignore index 135c3206a7..db15f79ba9 100644 --- a/.gitignore +++ b/.gitignore @@ -5,4 +5,5 @@ temp resources/ebpf/falco/* node-agent __pycache__ -tracers.tar \ No newline at end of file +tracers.tar +vendor \ No newline at end of file diff --git a/Makefile b/Makefile index b9687e802f..c22b9b2aa9 100644 --- a/Makefile +++ b/Makefile @@ -11,6 +11,10 @@ TAG?=test binary: CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o $(BINARY_NAME) ./cmd/main.go +.PHONY: check-legacy-packages +check-legacy-packages: + go test ./tests/containerprofilecache -run TestLegacyPackagesDeleted + docker-build-only: docker buildx build --platform linux/amd64 -t $(IMAGE):$(TAG) -f $(DOCKERFILE_PATH) --load . diff --git a/cmd/main.go b/cmd/main.go index 9fc4824bf5..b9d527b17c 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -45,10 +45,9 @@ import ( "github.com/kubescape/node-agent/pkg/nodeprofilemanager" nodeprofilemanagerv1 "github.com/kubescape/node-agent/pkg/nodeprofilemanager/v1" "github.com/kubescape/node-agent/pkg/objectcache" - "github.com/kubescape/node-agent/pkg/objectcache/applicationprofilecache" + "github.com/kubescape/node-agent/pkg/objectcache/containerprofilecache" "github.com/kubescape/node-agent/pkg/objectcache/dnscache" "github.com/kubescape/node-agent/pkg/objectcache/k8scache" - "github.com/kubescape/node-agent/pkg/objectcache/networkneighborhoodcache" objectcachev1 "github.com/kubescape/node-agent/pkg/objectcache/v1" "github.com/kubescape/node-agent/pkg/processtree" containerprocesstree "github.com/kubescape/node-agent/pkg/processtree/container" @@ -229,7 +228,7 @@ func main() { ruleBindingCache = rulebindingcachev1.NewCache(cfg, k8sClient, ruleCreator) rulesWatcher := ruleswatcher.NewRulesWatcher(k8sClient, ruleCreator, func() { ruleBindingCache.RefreshRuleBindingsRules() - }) + }, &cfg) dWatcher.AddAdaptor(rulesWatcher) } @@ -297,16 +296,19 @@ func main() { ruleBindingNotify = make(chan rulebinding.RuleBindingNotify, 100) ruleBindingCache.AddNotifier(&ruleBindingNotify) - apc := applicationprofilecache.NewApplicationProfileCache(cfg, storageClient, k8sObjectCache) - apc.Start(ctx) - - nnc := networkneighborhoodcache.NewNetworkNeighborhoodCache(cfg, storageClient, k8sObjectCache) - nnc.Start(ctx) + cpc := containerprofilecache.NewContainerProfileCache(cfg, storageClient, k8sObjectCache, prometheusExporter) + // Wire R1016 tamper alerts: when a user-defined AP/NN overlay is + // loaded but its signature no longer verifies, the CP cache emits + // "Signed profile tampered" through this exporter. Optional — + // nil-safe inside the cache. + cpc.SetTamperAlertExporter(exporter) + cpc.Start(ctx) + logger.L().Info("ContainerProfileCache active; legacy AP/NN caches removed") dc := dnscache.NewDnsCache(dnsResolver) // create object cache - objCache = objectcachev1.NewObjectCache(k8sObjectCache, apc, nnc, dc) + objCache = objectcachev1.NewObjectCache(k8sObjectCache, cpc, dc) ruleCooldown := rulecooldown.NewRuleCooldown(cfg.RuleCoolDown) @@ -328,10 +330,9 @@ func main() { } else { ruleManager = rulemanager.CreateRuleManagerMock() - apc := &objectcache.ApplicationProfileCacheMock{} - nnc := &objectcache.NetworkNeighborhoodCacheMock{} + cpc := &objectcache.ContainerProfileCacheMock{} dc := &objectcache.DnsCacheMock{} - objCache = objectcachev1.NewObjectCache(k8sObjectCache, apc, nnc, dc) + objCache = objectcachev1.NewObjectCache(k8sObjectCache, cpc, dc) ruleBindingNotify = make(chan rulebinding.RuleBindingNotify, 1) } diff --git a/cmd/sign-object/Dockerfile b/cmd/sign-object/Dockerfile new file mode 100644 index 0000000000..0f4284c473 --- /dev/null +++ b/cmd/sign-object/Dockerfile @@ -0,0 +1,20 @@ +FROM --platform=$BUILDPLATFORM golang:1.25-trixie AS builder + +ENV GO111MODULE=on CGO_ENABLED=0 +WORKDIR /src +ARG TARGETOS TARGETARCH + +COPY go.mod go.sum ./ +RUN --mount=type=cache,target=/root/.cache/go-build \ + --mount=type=cache,target=/go/pkg \ + go mod download + +COPY . . +RUN --mount=type=cache,target=/root/.cache/go-build \ + --mount=type=cache,target=/go/pkg \ + GOOS=$TARGETOS GOARCH=$TARGETARCH go build -o /sign-object ./cmd/sign-object + +FROM gcr.io/distroless/static-debian13:latest +COPY --from=builder /sign-object /usr/local/bin/sign-object +WORKDIR /work +ENTRYPOINT ["sign-object"] diff --git a/cmd/sign-object/main.go b/cmd/sign-object/main.go new file mode 100644 index 0000000000..c803320b3c --- /dev/null +++ b/cmd/sign-object/main.go @@ -0,0 +1,550 @@ +package main + +import ( + "crypto/x509" + "encoding/base64" + "encoding/json" + "encoding/pem" + "flag" + "fmt" + "os" + "strings" + + k8syaml "k8s.io/apimachinery/pkg/util/yaml" + + rulemanagertypesv1 "github.com/kubescape/node-agent/pkg/rulemanager/types/v1" + "github.com/kubescape/node-agent/pkg/signature" + "github.com/kubescape/node-agent/pkg/signature/profiles" + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" + sigsyaml "sigs.k8s.io/yaml" +) + +var ( + inputFile string + outputFile string + keyFile string + objectType string + useKeyless bool + verbose bool + strict bool + jsonOutput bool + publicOnly bool + command string +) + +func main() { + if len(os.Args) < 2 { + printUsage() + os.Exit(1) + } + + command = os.Args[1] + + argsRewritten := false + if command == "-h" || command == "--help" { + printUsage() + os.Exit(0) + } + if strings.HasPrefix(command, "-") { + command = "sign" + argsRewritten = true + } + + switch command { + case "sign", "": + parseSignFlags() + if argsRewritten { + os.Args = append([]string{"sign-object"}, os.Args[1:]...) + } + case "verify": + parseVerifyFlags() + os.Args = append([]string{"sign-object verify"}, os.Args[2:]...) + case "generate-keypair": + parseGenerateFlags() + os.Args = append([]string{"sign-object generate-keypair"}, os.Args[2:]...) + case "extract-signature": + parseExtractFlags() + os.Args = append([]string{"sign-object extract-signature"}, os.Args[2:]...) + case "help", "--help", "-h": + printUsage() + os.Exit(0) + default: + fmt.Fprintf(os.Stderr, "Unknown command: %s\n", command) + printUsage() + os.Exit(1) + } + + if err := runCommand(); err != nil { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + os.Exit(1) + } +} + +func parseSignFlags() { + fs := flag.NewFlagSet("sign-object sign", flag.ExitOnError) + fs.StringVar(&inputFile, "file", "", "Input object YAML file (required)") + fs.StringVar(&outputFile, "output", "", "Output file for signed object (required)") + fs.StringVar(&keyFile, "key", "", "Path to private key file") + fs.StringVar(&objectType, "type", "auto", "Object type: applicationprofile, seccompprofile, networkneighborhood, rules, or auto") + fs.BoolVar(&useKeyless, "keyless", false, "Use keyless signing (OIDC)") + fs.BoolVar(&verbose, "verbose", false, "Enable verbose logging") + + offset := 2 + if len(os.Args) > 1 && strings.HasPrefix(os.Args[1], "-") { + offset = 1 + } + + if err := fs.Parse(os.Args[offset:]); err != nil { + fmt.Fprintf(os.Stderr, "Error parsing flags: %v\n", err) + os.Exit(1) + } + + if inputFile == "" { + fmt.Fprintln(os.Stderr, "Error: --file is required") + fs.PrintDefaults() + os.Exit(1) + } + + if outputFile == "" { + fmt.Fprintln(os.Stderr, "Error: --output is required") + fs.PrintDefaults() + os.Exit(1) + } + + if !useKeyless && keyFile == "" { + fmt.Fprintln(os.Stderr, "Error: either --keyless or --key must be specified") + fs.PrintDefaults() + os.Exit(1) + } +} + +func parseVerifyFlags() { + fs := flag.NewFlagSet("sign-object verify", flag.ExitOnError) + fs.StringVar(&inputFile, "file", "", "Signed object YAML file (required)") + fs.StringVar(&objectType, "type", "auto", "Object type: applicationprofile, seccompprofile, networkneighborhood, rules, or auto") + fs.BoolVar(&strict, "strict", true, "Require trusted issuer/identity") + fs.BoolVar(&verbose, "verbose", false, "Enable verbose logging") + + if err := fs.Parse(os.Args[2:]); err != nil { + fmt.Fprintf(os.Stderr, "Error parsing flags: %v\n", err) + os.Exit(1) + } + + if inputFile == "" { + fmt.Fprintln(os.Stderr, "Error: --file is required") + fs.PrintDefaults() + os.Exit(1) + } +} + +func parseGenerateFlags() { + fs := flag.NewFlagSet("sign-object generate-keypair", flag.ExitOnError) + fs.StringVar(&outputFile, "output", "", "Output PEM file") + fs.BoolVar(&publicOnly, "public-only", false, "Only output public key") + + if err := fs.Parse(os.Args[2:]); err != nil { + fmt.Fprintf(os.Stderr, "Error parsing flags: %v\n", err) + os.Exit(1) + } + + if outputFile == "" { + fmt.Fprintln(os.Stderr, "Error: --output is required") + fs.PrintDefaults() + os.Exit(1) + } +} + +func parseExtractFlags() { + fs := flag.NewFlagSet("sign-object extract-signature", flag.ExitOnError) + fs.StringVar(&inputFile, "file", "", "Signed object YAML file (required)") + fs.StringVar(&objectType, "type", "auto", "Object type: applicationprofile, seccompprofile, networkneighborhood, rules, or auto") + fs.BoolVar(&jsonOutput, "json", false, "Output as JSON") + + if err := fs.Parse(os.Args[2:]); err != nil { + fmt.Fprintf(os.Stderr, "Error parsing flags: %v\n", err) + os.Exit(1) + } + + if inputFile == "" { + fmt.Fprintln(os.Stderr, "Error: --file is required") + fs.PrintDefaults() + os.Exit(1) + } +} + +func runCommand() error { + switch command { + case "sign", "": + return runSign() + case "verify": + return runVerify() + case "generate-keypair": + return runGenerateKeyPair() + case "extract-signature": + return runExtractSignature() + default: + return fmt.Errorf("unknown command: %s", command) + } +} + +func runSign() error { + data, err := os.ReadFile(inputFile) + if err != nil { + return fmt.Errorf("failed to read input file: %w", err) + } + + if verbose { + fmt.Printf("Reading profile from: %s\n", inputFile) + fmt.Printf("Profile size: %d bytes\n", len(data)) + } + + profileAdapter, err := detectObjectType(objectType, data) + if err != nil { + return fmt.Errorf("failed to detect profile type: %w", err) + } + + if verbose { + fmt.Printf("Detected object type: %s\n", getObjectName(profileAdapter)) + } + + var signErr error + if useKeyless { + if verbose { + fmt.Println("Using keyless signing (OIDC)") + } + signErr = signature.SignObjectKeyless(profileAdapter) + } else { + if verbose { + fmt.Printf("Using local key from: %s\n", keyFile) + } + + keyData, err := os.ReadFile(keyFile) + if err != nil { + return fmt.Errorf("failed to read private key file: %w", err) + } + + block, _ := pem.Decode(keyData) + if block == nil { + return fmt.Errorf("failed to decode PEM block from key file") + } + + privateKey, err := x509.ParseECPrivateKey(block.Bytes) + if err != nil { + return fmt.Errorf("failed to parse EC private key: %w", err) + } + + signErr = signature.SignObject(profileAdapter, signature.WithPrivateKey(privateKey)) + } + + if signErr != nil { + return fmt.Errorf("failed to sign profile: %w", signErr) + } + + sig, err := signature.GetObjectSignature(profileAdapter) + if err != nil { + return fmt.Errorf("failed to get signature: %w", err) + } + + fmt.Printf("✓ Profile signed successfully\n") + fmt.Printf(" Issuer: %s\n", sig.Issuer) + fmt.Printf(" Identity: %s\n", sig.Identity) + fmt.Printf(" Timestamp: %d\n", sig.Timestamp) + + profileBytes, err := sigsyaml.Marshal(profileAdapter.GetUpdatedObject()) + if err != nil { + return fmt.Errorf("failed to marshal signed object: %w", err) + } + + if err := os.WriteFile(outputFile, profileBytes, 0644); err != nil { + return fmt.Errorf("failed to write output file: %w", err) + } + + fmt.Printf("✓ Signed profile written to: %s\n", outputFile) + return nil +} + +func runVerify() error { + data, err := os.ReadFile(inputFile) + if err != nil { + return fmt.Errorf("failed to read file: %w", err) + } + + if verbose { + fmt.Printf("Reading profile from: %s\n", inputFile) + } + + profileAdapter, err := detectObjectType(objectType, data) + if err != nil { + return fmt.Errorf("failed to detect profile type: %w", err) + } + + sig, err := signature.GetObjectSignature(profileAdapter) + if err != nil { + return fmt.Errorf("profile is not signed: %w", err) + } + + fmt.Printf("Signature found:\n") + fmt.Printf(" Issuer: %s\n", sig.Issuer) + fmt.Printf(" Identity: %s\n", sig.Identity) + fmt.Printf(" Timestamp: %d\n", sig.Timestamp) + + var verifyErr error + if strict { + if verbose { + fmt.Println("Verifying with strict mode (keyless signatures must have issuer/identity)") + } + verifyErr = signature.VerifyObjectStrict(profileAdapter) + } else { + if verbose { + fmt.Println("Verifying in non-strict mode (allowing untrusted signatures)") + } + verifyErr = signature.VerifyObjectAllowUntrusted(profileAdapter) + } + + if verifyErr != nil { + return fmt.Errorf("signature verification failed: %w", verifyErr) + } + + fmt.Printf("✓ Signature verification successful\n") + return nil +} + +func runGenerateKeyPair() error { + adapter, err := signature.NewCosignAdapter(false) + if err != nil { + return fmt.Errorf("failed to create adapter: %w", err) + } + + pubKeyBytes, err := adapter.GetPublicKeyPEM() + if err != nil { + return fmt.Errorf("failed to get public key: %w", err) + } + + if publicOnly { + if err := os.WriteFile(outputFile, pubKeyBytes, 0644); err != nil { + return fmt.Errorf("failed to write public key file: %w", err) + } + + fmt.Printf("✓ Public key written to: %s\n", outputFile) + return nil + } + + privKeyBytes, err := adapter.GetPrivateKeyPEM() + if err != nil { + return fmt.Errorf("failed to get private key: %w", err) + } + + if err := os.WriteFile(outputFile, privKeyBytes, 0600); err != nil { + return fmt.Errorf("failed to write private key file: %w", err) + } + + pubKeyFile := outputFile + ".pub" + if err := os.WriteFile(pubKeyFile, pubKeyBytes, 0644); err != nil { + return fmt.Errorf("failed to write public key file: %w", err) + } + + fmt.Printf("✓ Private key written to: %s\n", outputFile) + fmt.Printf("✓ Public key written to: %s\n", pubKeyFile) + return nil +} + +func runExtractSignature() error { + data, err := os.ReadFile(inputFile) + if err != nil { + return fmt.Errorf("failed to read file: %w", err) + } + + profileAdapter, err := detectObjectType(objectType, data) + if err != nil { + return fmt.Errorf("failed to detect profile type: %w", err) + } + + sig, err := signature.GetObjectSignature(profileAdapter) + if err != nil { + return fmt.Errorf("profile is not signed: %w", err) + } + + sigInfo := map[string]interface{}{ + "signature_size": len(sig.Signature), + "certificate_size": len(sig.Certificate), + "issuer": sig.Issuer, + "identity": sig.Identity, + "timestamp": sig.Timestamp, + "signature_base64": base64.StdEncoding.EncodeToString(sig.Signature), + "certificate_base64": base64.StdEncoding.EncodeToString(sig.Certificate), + } + + if jsonOutput { + jsonData, err := json.MarshalIndent(sigInfo, "", " ") + if err != nil { + return fmt.Errorf("failed to marshal JSON: %w", err) + } + fmt.Println(string(jsonData)) + } else { + fmt.Println("Signature Information:") + fmt.Printf(" Issuer: %s\n", sig.Issuer) + fmt.Printf(" Identity: %s\n", sig.Identity) + fmt.Printf(" Timestamp: %d\n", sig.Timestamp) + fmt.Printf(" Signature Size: %d bytes\n", len(sig.Signature)) + fmt.Printf(" Certificate Size: %d bytes\n", len(sig.Certificate)) + + block, _ := pem.Decode(sig.Certificate) + if block != nil { + fmt.Printf(" Certificate Type: %s\n", block.Type) + } + } + + return nil +} + +func detectObjectType(objectType string, data []byte) (signature.SignableObject, error) { + var decoded map[string]interface{} + if err := k8syaml.Unmarshal(data, &decoded); err != nil { + return nil, fmt.Errorf("failed to unmarshal YAML: %w", err) + } + + kind, _ := decoded["kind"].(string) + apiVersion, _ := decoded["apiVersion"].(string) + + if verbose { + fmt.Printf("Detected API: %s, Kind: %s\n", apiVersion, kind) + } + + if objectType != "auto" { + switch strings.ToLower(objectType) { + case "applicationprofile", "application-profile", "ap": + return loadApplicationProfile(data) + case "seccompprofile", "seccomp-profile", "sp": + return loadSeccompProfile(data) + case "networkneighborhood", "network-neighborhood", "nn": + return loadNetworkNeighborhood(data) + case "rules", "rule", "r": + return loadRules(data) + default: + return nil, fmt.Errorf("unknown object type: %s", objectType) + } + } + + if strings.Contains(strings.ToLower(apiVersion), "softwarecomposition") { + switch strings.ToLower(kind) { + case "applicationprofile", "application-profile": + return loadApplicationProfile(data) + case "seccompprofile", "seccomp-profile": + return loadSeccompProfile(data) + case "networkneighborhood", "network-neighborhood": + return loadNetworkNeighborhood(data) + } + } + + if strings.Contains(strings.ToLower(apiVersion), "kubescape.io") && strings.ToLower(kind) == "rules" { + return loadRules(data) + } + + return nil, fmt.Errorf("unable to auto-detect object type") +} + +func loadApplicationProfile(data []byte) (signature.SignableObject, error) { + var profile v1beta1.ApplicationProfile + if err := k8syaml.Unmarshal(data, &profile); err != nil { + return nil, fmt.Errorf("failed to unmarshal ApplicationProfile: %w", err) + } + return profiles.NewApplicationProfileAdapter(&profile), nil +} + +func loadSeccompProfile(data []byte) (signature.SignableObject, error) { + var profile v1beta1.SeccompProfile + if err := k8syaml.Unmarshal(data, &profile); err != nil { + return nil, fmt.Errorf("failed to unmarshal SeccompProfile: %w", err) + } + return profiles.NewSeccompProfileAdapter(&profile), nil +} + +func loadNetworkNeighborhood(data []byte) (signature.SignableObject, error) { + var nn v1beta1.NetworkNeighborhood + if err := k8syaml.Unmarshal(data, &nn); err != nil { + return nil, fmt.Errorf("failed to unmarshal NetworkNeighborhood: %w", err) + } + return profiles.NewNetworkNeighborhoodAdapter(&nn), nil +} + +func loadRules(data []byte) (signature.SignableObject, error) { + var rules rulemanagertypesv1.Rules + if err := k8syaml.Unmarshal(data, &rules); err != nil { + return nil, fmt.Errorf("failed to unmarshal Rules: %w", err) + } + return profiles.NewRulesAdapter(&rules), nil +} + +func getObjectName(profile signature.SignableObject) string { + if _, ok := profile.(*profiles.ApplicationProfileAdapter); ok { + return "ApplicationProfile" + } + if _, ok := profile.(*profiles.SeccompProfileAdapter); ok { + return "SeccompProfile" + } + if _, ok := profile.(*profiles.NetworkNeighborhoodAdapter); ok { + return "NetworkNeighborhood" + } + if _, ok := profile.(*profiles.RulesAdapter); ok { + return "Rules" + } + return "Unknown" +} + +func printUsage() { + fmt.Println(`sign-object - Sign and verify Kubernetes security objects + +USAGE: + sign-object [flags] + +COMMANDS: + sign Sign a profile (default command) + verify Verify a signed object + generate-keypair Generate a new ECDSA key pair + extract-signature Extract signature info from a profile + help Show this help message + +SIGN FLAGS: + --file Input object YAML file (required) + --output Output file for signed object (required) + --keyless Use keyless signing (OIDC) + --key Path to private key file + --type Object type: applicationprofile, seccompprofile, networkneighborhood, rules, or auto (default: auto) + --verbose Enable verbose logging + +VERIFY FLAGS: + --file Signed object YAML file (required) + --type Object type: applicationprofile, seccompprofile, networkneighborhood, rules, or auto (default: auto) + --strict Require trusted issuer/identity (default: true) + --verbose Enable verbose logging + +GENERATE-KEYPAIR FLAGS: + --output Output PEM file for private key (required) + --public-only Only output public key (no private key) + +EXTRACT-SIGNATURE FLAGS: + --file Signed object YAML file (required) + --type Object type: applicationprofile, seccompprofile, networkneighborhood, rules, or auto (default: auto) + --json Output as JSON + +EXAMPLES: + # Sign with keyless (OIDC) + sign-object --keyless --file object.yaml --output signed-object.yaml + + # Sign with local key + sign-object --key my-key.pem --file object.yaml --output signed-object.yaml + + # Verify a signed object + sign-object verify --file signed-object.yaml + + # Generate a key pair (writes my-key.pem and my-key.pem.pub) + sign-object generate-keypair --output my-key.pem + + # Generate only public key + sign-object generate-keypair --output my-key.pem --public-only + + # Extract signature information + sign-object extract-signature --file signed-object.yaml + +For more information, see: docs/signing/README.md`) +} diff --git a/go.mod b/go.mod index ae6d275e7e..37605d1948 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/Masterminds/semver/v3 v3.4.0 github.com/anchore/syft v1.32.0 github.com/aquilax/truncate v1.0.0 - github.com/armosec/armoapi-go v0.0.694 + github.com/armosec/armoapi-go v0.0.696 github.com/armosec/utils-k8s-go v0.0.35 github.com/cenkalti/backoff v2.2.1+incompatible github.com/cenkalti/backoff/v4 v4.3.0 @@ -22,9 +22,10 @@ require ( github.com/dustin/go-humanize v1.0.1 github.com/dutchcoders/go-clamd v0.0.0-20170520113014-b970184f4d9e github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb - github.com/go-openapi/strfmt v0.23.0 + github.com/go-openapi/strfmt v0.26.0 + github.com/golang-jwt/jwt/v5 v5.3.0 github.com/google/cel-go v0.26.1 - github.com/google/go-containerregistry v0.20.7 + github.com/google/go-containerregistry v0.21.2 github.com/google/uuid v1.6.0 github.com/goradd/maps v1.3.0 github.com/grafana/pyroscope-go v1.2.2 @@ -34,7 +35,7 @@ require ( github.com/joncrlsn/dque v0.0.0-20241024143830-7723fd131a64 github.com/kubescape/backend v0.0.39 github.com/kubescape/go-logger v0.0.28 - github.com/kubescape/k8s-interface v0.0.206 + github.com/kubescape/k8s-interface v0.0.208 github.com/kubescape/storage v0.0.258 github.com/kubescape/workerpool v0.0.0-20250526074519-0e4a4e7f44cf github.com/moby/sys/mountinfo v0.7.2 @@ -47,7 +48,11 @@ require ( github.com/prometheus/alertmanager v0.27.0 github.com/prometheus/client_golang v1.23.2 github.com/prometheus/procfs v0.19.2 - github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af + github.com/sigstore/cosign/v3 v3.0.5 + github.com/sigstore/fulcio v1.8.5 + github.com/sigstore/rekor v1.5.1 + github.com/sigstore/sigstore v1.10.4 + github.com/sirupsen/logrus v1.9.4 github.com/spf13/afero v1.15.0 github.com/spf13/viper v1.21.0 github.com/stretchr/testify v1.11.1 @@ -55,18 +60,19 @@ require ( go.uber.org/multierr v1.11.0 golang.org/x/net v0.53.0 golang.org/x/sys v0.43.0 + golang.org/x/tools v0.43.0 gonum.org/v1/plot v0.14.0 google.golang.org/grpc v1.80.0 google.golang.org/protobuf v1.36.11 gopkg.in/mcuadros/go-syslog.v2 v2.3.0 istio.io/pkg v0.0.0-20231221211216-7635388a563e - k8s.io/api v0.35.0 - k8s.io/apimachinery v0.35.0 - k8s.io/client-go v0.35.0 + k8s.io/api v0.35.1 + k8s.io/apimachinery v0.35.1 + k8s.io/client-go v0.35.1 k8s.io/cri-api v0.35.0 k8s.io/kubectl v0.34.1 k8s.io/utils v0.0.0-20260210185600-b8788abfbbc2 - modernc.org/sqlite v1.38.2 + modernc.org/sqlite v1.46.1 oras.land/oras-go/v2 v2.6.0 sigs.k8s.io/yaml v1.6.0 ) @@ -127,7 +133,7 @@ require ( github.com/armosec/gojay v1.2.17 // indirect github.com/armosec/utils-go v0.0.58 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect - github.com/aws/aws-sdk-go v1.55.7 // indirect + github.com/aws/aws-sdk-go v1.55.8 // indirect github.com/aws/aws-sdk-go-v2 v1.41.5 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.8 // indirect github.com/aws/aws-sdk-go-v2/config v1.32.12 // indirect @@ -156,11 +162,15 @@ require ( github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb // indirect github.com/blang/semver v3.5.1+incompatible // indirect github.com/blang/semver/v4 v4.0.0 // indirect - github.com/bmatcuk/doublestar/v4 v4.9.1 // indirect + github.com/bmatcuk/doublestar/v4 v4.10.0 // indirect github.com/bodgit/plumbing v1.3.0 // indirect github.com/bodgit/sevenzip v1.6.1 // indirect github.com/bodgit/windows v1.0.1 // indirect github.com/briandowns/spinner v1.23.2 // indirect + github.com/buildkite/agent/v3 v3.115.4 // indirect + github.com/buildkite/go-pipeline v0.16.0 // indirect + github.com/buildkite/interpolate v0.1.5 // indirect + github.com/buildkite/roko v1.4.0 // indirect github.com/campoy/embedmd v1.0.0 // indirect github.com/charmbracelet/colorprofile v0.3.1 // indirect github.com/charmbracelet/lipgloss v1.1.0 // indirect @@ -181,26 +191,29 @@ require ( github.com/containerd/log v0.1.0 // indirect github.com/containerd/nri v0.9.0 // indirect github.com/containerd/platforms v0.2.1 // indirect - github.com/containerd/stargz-snapshotter/estargz v0.18.1 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.18.2 // indirect github.com/containerd/ttrpc v1.2.7 // indirect github.com/containerd/typeurl/v2 v2.2.3 // indirect github.com/containers/common v0.64.2 // indirect github.com/coreos/go-oidc/v3 v3.17.0 // indirect github.com/coreos/go-systemd/v22 v22.6.0 // indirect + github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/deitch/magic v0.0.0-20240306090643-c67ab88f10cb // indirect + github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 // indirect + github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 // indirect github.com/diskfs/go-diskfs v1.7.0 // indirect github.com/distribution/reference v0.6.0 // indirect - github.com/docker/cli v29.2.0+incompatible // indirect + github.com/docker/cli v29.3.0+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect github.com/docker/docker v28.5.2+incompatible // indirect - github.com/docker/docker-credential-helpers v0.9.3 // indirect + github.com/docker/docker-credential-helpers v0.9.5 // indirect github.com/docker/go-connections v0.6.0 // indirect github.com/docker/go-events v0.0.0-20250114142523-c867878c5e32 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 // indirect github.com/elliotchance/phpserialize v1.4.0 // indirect - github.com/emicklei/go-restful/v3 v3.12.2 // indirect + github.com/emicklei/go-restful/v3 v3.13.0 // indirect github.com/emirpasic/gods v1.18.1 // indirect github.com/envoyproxy/go-control-plane/envoy v1.36.0 // indirect github.com/envoyproxy/protoc-gen-validate v1.3.0 // indirect @@ -212,10 +225,11 @@ require ( github.com/francoispqt/gojay v1.2.13 // indirect github.com/fsnotify/fsnotify v1.9.0 // indirect github.com/fxamacker/cbor/v2 v2.9.0 // indirect - github.com/gabriel-vasile/mimetype v1.4.10 // indirect + github.com/gabriel-vasile/mimetype v1.4.13 // indirect github.com/gammazero/deque v1.0.0 // indirect - github.com/github/go-spdx/v2 v2.3.3 // indirect + github.com/github/go-spdx/v2 v2.4.0 // indirect github.com/go-asn1-ber/asn1-ber v1.5.7 // indirect + github.com/go-chi/chi/v5 v5.2.5 // indirect github.com/go-errors/errors v1.5.1 // indirect github.com/go-fonts/liberation v0.3.2 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect @@ -226,30 +240,43 @@ require ( github.com/go-ldap/ldap/v3 v3.4.10 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/analysis v0.23.0 // indirect - github.com/go-openapi/errors v0.22.2 // indirect - github.com/go-openapi/jsonpointer v0.21.2 // indirect - github.com/go-openapi/jsonreference v0.21.0 // indirect - github.com/go-openapi/loads v0.22.0 // indirect - github.com/go-openapi/runtime v0.28.0 // indirect - github.com/go-openapi/spec v0.21.0 // indirect - github.com/go-openapi/swag v0.23.1 // indirect - github.com/go-openapi/validate v0.24.0 // indirect + github.com/go-openapi/analysis v0.24.3 // indirect + github.com/go-openapi/errors v0.22.7 // indirect + github.com/go-openapi/jsonpointer v0.22.5 // indirect + github.com/go-openapi/jsonreference v0.21.5 // indirect + github.com/go-openapi/loads v0.23.3 // indirect + github.com/go-openapi/runtime v0.29.3 // indirect + github.com/go-openapi/spec v0.22.4 // indirect + github.com/go-openapi/swag v0.25.5 // indirect + github.com/go-openapi/swag/cmdutils v0.25.5 // indirect + github.com/go-openapi/swag/conv v0.25.5 // indirect + github.com/go-openapi/swag/fileutils v0.25.5 // indirect + github.com/go-openapi/swag/jsonname v0.25.5 // indirect + github.com/go-openapi/swag/jsonutils v0.25.5 // indirect + github.com/go-openapi/swag/loading v0.25.5 // indirect + github.com/go-openapi/swag/mangling v0.25.5 // indirect + github.com/go-openapi/swag/netutils v0.25.5 // indirect + github.com/go-openapi/swag/stringutils v0.25.5 // indirect + github.com/go-openapi/swag/typeutils v0.25.5 // indirect + github.com/go-openapi/swag/yamlutils v0.25.5 // indirect + github.com/go-openapi/validate v0.25.2 // indirect github.com/go-pdf/fpdf v0.9.0 // indirect github.com/go-restruct/restruct v1.2.0-alpha // indirect - github.com/go-viper/mapstructure/v2 v2.4.0 // indirect + github.com/go-viper/mapstructure/v2 v2.5.0 // indirect github.com/goccy/go-yaml v1.18.0 // indirect github.com/godbus/dbus/v5 v5.2.0 // indirect github.com/gofrs/flock v0.13.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/gohugoio/hashstructure v0.5.0 // indirect + github.com/gohugoio/hashstructure v0.6.0 // indirect github.com/golang-jwt/jwt/v4 v4.5.2 // indirect github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/golang/snappy v1.0.0 // indirect github.com/google/btree v1.1.3 // indirect + github.com/google/certificate-transparency-go v1.3.2 // indirect github.com/google/gnostic-models v0.7.0 // indirect github.com/google/go-cmp v0.7.0 // indirect + github.com/google/go-querystring v1.2.0 // indirect github.com/google/licensecheck v0.3.1 // indirect github.com/google/pprof v0.0.0-20250630185457-6e76a2b096b5 // indirect github.com/google/s2a-go v0.1.9 // indirect @@ -265,26 +292,28 @@ require ( github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-getter v1.8.6 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-retryablehttp v0.7.8 // indirect github.com/hashicorp/go-version v1.8.0 // indirect github.com/hashicorp/hcl/v2 v2.24.0 // indirect github.com/huandu/xstrings v1.5.0 // indirect github.com/iancoleman/strcase v0.3.0 // indirect + github.com/in-toto/attestation v1.1.2 // indirect github.com/in-toto/in-toto-golang v0.9.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect + github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 // indirect github.com/jinzhu/copier v0.4.0 // indirect - github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/josharian/intern v1.0.0 // indirect + github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24 // indirect github.com/josharian/native v1.1.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kastenhq/goversion v0.0.0-20230811215019-93b2f8823953 // indirect github.com/kevinburke/ssh_config v1.2.0 // indirect github.com/klauspost/compress v1.18.5 // indirect github.com/klauspost/pgzip v1.2.6 // indirect + github.com/letsencrypt/boulder v0.20251110.0 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/lucasb-eyer/go-colorful v1.2.0 // indirect github.com/mackerelio/go-osstat v0.2.5 // indirect - github.com/mailru/easyjson v0.9.0 // indirect github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.21 // indirect github.com/mattn/go-runewidth v0.0.16 // indirect @@ -297,7 +326,6 @@ require ( github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect - github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/locker v1.0.1 // indirect @@ -314,16 +342,18 @@ require ( github.com/muesli/termenv v0.16.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect - github.com/ncruces/go-strftime v0.1.9 // indirect + github.com/ncruces/go-strftime v1.0.0 // indirect github.com/ncw/directio v1.0.5 // indirect github.com/nix-community/go-nix v0.0.0-20250101154619-4bdde671e0a1 // indirect github.com/notaryproject/notation-core-go v1.3.0 // indirect github.com/notaryproject/notation-go v1.3.2 // indirect github.com/notaryproject/notation-plugin-framework-go v1.0.0 // indirect github.com/notaryproject/tspclient-go v1.0.0 // indirect + github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 // indirect github.com/nwaples/rardecode v1.1.3 // indirect github.com/nwaples/rardecode/v2 v2.2.0 // indirect - github.com/oklog/ulid v1.3.1 // indirect + github.com/oklog/ulid/v2 v2.1.1 // indirect + github.com/oleiade/reflections v1.1.0 // indirect github.com/olekukonko/errors v1.1.0 // indirect github.com/olekukonko/ll v0.0.9 // indirect github.com/olekukonko/tablewriter v1.0.9 // indirect @@ -331,23 +361,24 @@ require ( github.com/opcoder0/capabilities v0.0.0-20221222060822-17fd73bffd2a // indirect github.com/opencontainers/runtime-spec v1.2.1 // indirect github.com/opencontainers/selinux v1.13.1 // indirect - github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b // indirect github.com/packetcap/go-pcap v0.0.0-20250723190045-d00b185f30b7 // indirect github.com/pborman/indent v1.2.1 // indirect + github.com/pborman/uuid v1.2.1 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pelletier/go-toml/v2 v2.2.4 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/petermattis/goid v0.0.0-20241211131331-93ee7e083c43 // indirect github.com/pierrec/lz4/v4 v4.1.22 // indirect github.com/pjbgf/sha1cd v0.4.0 // indirect + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pkg/profile v1.7.0 // indirect github.com/pkg/xattr v0.4.12 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_model v0.6.2 // indirect - github.com/prometheus/common v0.67.4 // indirect - github.com/puzpuzpuz/xsync/v2 v2.4.1 // indirect + github.com/prometheus/common v0.67.5 // indirect + github.com/puzpuzpuz/xsync/v2 v2.5.1 // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/rust-secure-code/go-rustaudit v0.0.0-20250226111315-e20ec32e963c // indirect @@ -356,14 +387,17 @@ require ( github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d // indirect github.com/sasha-s/go-deadlock v0.3.5 // indirect github.com/sassoftware/go-rpmutils v0.4.0 // indirect + github.com/sassoftware/relic v7.2.1+incompatible // indirect github.com/scylladb/go-set v1.0.3-0.20200225121959-cc7b2070d91e // indirect github.com/seccomp/libseccomp-golang v0.11.0 // indirect - github.com/secure-systems-lab/go-securesystemslib v0.9.1 // indirect + github.com/secure-systems-lab/go-securesystemslib v0.10.0 // indirect github.com/sergi/go-diff v1.4.0 // indirect github.com/shibumi/go-pathspec v1.3.0 // indirect github.com/shopspring/decimal v1.4.0 // indirect github.com/sigstore/protobuf-specs v0.5.0 // indirect - github.com/sigstore/sigstore v1.10.4 // indirect + github.com/sigstore/rekor-tiles/v2 v2.2.0 // indirect + github.com/sigstore/sigstore-go v1.1.4 // indirect + github.com/sigstore/timestamp-authority/v2 v2.0.4 // indirect github.com/skeema/knownhosts v1.3.1 // indirect github.com/sorairolake/lzip-go v0.3.8 // indirect github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect @@ -379,7 +413,13 @@ require ( github.com/subosito/gotenv v1.6.0 // indirect github.com/sylabs/sif/v2 v2.22.0 // indirect github.com/sylabs/squashfs v1.0.6 // indirect + github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect github.com/therootcompany/xz v1.0.1 // indirect + github.com/theupdateframework/go-tuf v0.7.0 // indirect + github.com/theupdateframework/go-tuf/v2 v2.4.1 // indirect + github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect + github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c // indirect + github.com/transparency-dev/merkle v0.0.2 // indirect github.com/ulikunitz/xz v0.5.15 // indirect github.com/uptrace/opentelemetry-go-extra/otelutil v0.3.2 // indirect github.com/uptrace/opentelemetry-go-extra/otelzap v0.3.2 // indirect @@ -391,7 +431,7 @@ require ( github.com/vishvananda/netlink v1.3.1 // indirect github.com/vishvananda/netns v0.0.5 // indirect github.com/wagoodman/go-partybus v0.0.0-20230516145632-8ccac152c651 // indirect - github.com/wagoodman/go-progress v0.0.0-20230925121702-07e42b3cdba0 // indirect + github.com/wagoodman/go-progress v0.0.0-20260303201901-10176f79b2c0 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect @@ -399,7 +439,6 @@ require ( github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect github.com/yl2chen/cidranger v1.0.2 // indirect github.com/zclconf/go-cty v1.16.3 // indirect - go.mongodb.org/mongo-driver v1.17.6 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/contrib/bridges/otelslog v0.18.0 // indirect @@ -427,7 +466,7 @@ require ( go4.org v0.0.0-20230225012048-214862532bf5 // indirect go4.org/netipx v0.0.0-20231129151722-fdeea329fbba // indirect golang.org/x/crypto v0.50.0 // indirect - golang.org/x/exp v0.0.0-20250718183923-645b1fa84792 // indirect + golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect golang.org/x/image v0.38.0 // indirect golang.org/x/mod v0.34.0 // indirect golang.org/x/oauth2 v0.36.0 // indirect @@ -435,7 +474,6 @@ require ( golang.org/x/term v0.42.0 // indirect golang.org/x/text v0.36.0 // indirect golang.org/x/time v0.15.0 // indirect - golang.org/x/tools v0.43.0 // indirect golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect google.golang.org/api v0.271.0 // indirect google.golang.org/genproto v0.0.0-20260128011058-8636f8732409 // indirect @@ -453,7 +491,7 @@ require ( k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect k8s.io/kubelet v0.35.0 // indirect - modernc.org/libc v1.66.3 // indirect + modernc.org/libc v1.67.6 // indirect modernc.org/mathutil v1.7.1 // indirect modernc.org/memory v1.11.0 // indirect sigs.k8s.io/controller-runtime v0.21.0 // indirect @@ -468,3 +506,5 @@ require ( replace github.com/inspektor-gadget/inspektor-gadget => github.com/matthyx/inspektor-gadget v0.0.0-20260421100818-fd383d3d7db4 replace github.com/cilium/ebpf => github.com/matthyx/ebpf v0.0.0-20260421101317-8a32d06def6c + +replace github.com/kubescape/storage => github.com/k8sstormcenter/storage v0.0.240-0.20260503184242-43795bb4f0b6 diff --git a/go.sum b/go.sum index d18386e6c7..0d4fa4b531 100644 --- a/go.sum +++ b/go.sum @@ -50,6 +50,8 @@ cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1 cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY= cloud.google.com/go/iam v1.5.3 h1:+vMINPiDF2ognBJ97ABAYYwRgsaqxPbQDlMnbHMjolc= cloud.google.com/go/iam v1.5.3/go.mod h1:MR3v9oLkZCTlaqljW6Eb2d3HGDGK5/bDv93jhfISFvU= +cloud.google.com/go/kms v1.25.0 h1:gVqvGGUmz0nYCmtoxWmdc1wli2L1apgP8U4fghPGSbQ= +cloud.google.com/go/kms v1.25.0/go.mod h1:XIdHkzfj0bUO3E+LvwPg+oc7s58/Ns8Nd8Sdtljihbk= cloud.google.com/go/logging v1.13.1 h1:O7LvmO0kGLaHY/gq8cV7T0dyp6zJhYAOtZPX4TF3QtY= cloud.google.com/go/logging v1.13.1/go.mod h1:XAQkfkMBxQRjQek96WLPNze7vsOmay9H5PqfsNYDqvw= cloud.google.com/go/longrunning v0.8.0 h1:LiKK77J3bx5gDLi4SMViHixjD2ohlkwBi+mKA7EhfW8= @@ -78,6 +80,8 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7 dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +filippo.io/edwards25519 v1.1.1 h1:YpjwWWlNmGIDyXOn8zLzqiD+9TyIlPhGFG96P39uBpw= +filippo.io/edwards25519 v1.1.1/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= git.sr.ht/~sbinet/cmpimg v0.1.0 h1:E0zPRk2muWuCqSKSVZIWsgtU9pjsw3eKHi8VmQeScxo= git.sr.ht/~sbinet/cmpimg v0.1.0/go.mod h1:FU12psLbF4TfNXkKH2ZZQ29crIqoiqTZmeQ7dkp/pxE= @@ -87,11 +91,26 @@ github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8af github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20250520111509-a70c2aa677fa h1:x6kFzdPgBoLbyoNkA/jny0ENpoEz4wqY8lPTQL2DPkg= github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20250520111509-a70c2aa677fa/go.mod h1:gCLVsLfv1egrcZu+GoJATN5ts75F2s62ih/457eWzOw= +github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d h1:zjqpY4C7H15HjRPEenkS4SAn3Jy2eRRjkjZbGR30TOg= +github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d/go.mod h1:XNqJ7hv2kY++g8XEHREpi+JqZo3+0l+CH2egBVN4yqM= +github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0 h1:fou+2+WFTib47nS+nz/ozhEBnvU96bKHy6LjRsY4E28= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0/go.mod h1:t76Ruy8AHvUAC8GfMWJMa0ElSbuIcO03NLpynfbgsPA= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 h1:Hk5QBxZQC1jb2Fwj6mpzme37xbCDdNTxU7O9eb5+LB4= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1/go.mod h1:IYus9qsFobWIc2YVwe/WPjcnyCkPKtnHAqUYeebc8z0= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.4.0 h1:E4MgwLBGeVB5f2MdcIVD3ELVAWpr+WD6MUe1i+tM/PA= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.4.0/go.mod h1:Y2b/1clN4zsAoUd/pgNAQHjLDnTis/6ROkUfyob6psM= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0 h1:nCYfgcSyHZXJI8J0IWE5MsCGlb2xp9fJiXyxWgmOFg4= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0/go.mod h1:ucUjca2JtSZboY8IoUqyQyuuXvwbMBVwFOm0vdQPNhA= github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= github.com/Azure/go-ntlmssp v0.1.1 h1:l+FM/EEMb0U9QZE7mKNEDw5Mu3mFiaa2GKOoTSsNDPw= github.com/Azure/go-ntlmssp v0.1.1/go.mod h1:NYqdhxd/8aAct/s4qSYZEerdPuH1liG2/X9DiVTbhpk= +github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgvJqCH0sFfrBUTnUJSBrBf7++ypk+twtRs= +github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= @@ -146,6 +165,8 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0= +github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7VVbI0o4wBRNQIgn917usHWOd6VAffYI= github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4= github.com/anchore/archiver/v3 v3.5.3-0.20241210171143-5b1d8d1c7c51 h1:yhk+P8lF3ZiROjmaVRao9WGTRo4b/wYjoKEiAHWrKwc= @@ -205,8 +226,8 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/armosec/armoapi-go v0.0.694 h1:LDScWAzikv7mJdDhO+VM0DfNoMhQbhA6do6LWXHRIQs= -github.com/armosec/armoapi-go v0.0.694/go.mod h1:9jAH0g8ZsryhiBDd/aNMX4+n10bGwTx/doWCyyjSxts= +github.com/armosec/armoapi-go v0.0.696 h1:+0Ll7y4oWNaKEO47qbGDFIQLxkSJeKYzylS0FwI84XE= +github.com/armosec/armoapi-go v0.0.696/go.mod h1:9jAH0g8ZsryhiBDd/aNMX4+n10bGwTx/doWCyyjSxts= github.com/armosec/gojay v1.2.17 h1:VSkLBQzD1c2V+FMtlGFKqWXNsdNvIKygTKJI9ysY8eM= github.com/armosec/gojay v1.2.17/go.mod h1:vuvX3DlY0nbVrJ0qCklSS733AWMoQboq3cFyuQW9ybc= github.com/armosec/utils-go v0.0.58 h1:g9RnRkxZAmzTfPe2ruMo2OXSYLwVSegQSkSavOfmaIE= @@ -217,8 +238,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3d github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4= github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI= -github.com/aws/aws-sdk-go v1.55.7 h1:UJrkFq7es5CShfBwlWAC8DA077vp8PyVbQd3lqLiztE= -github.com/aws/aws-sdk-go v1.55.7/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ= +github.com/aws/aws-sdk-go v1.55.8/go.mod h1:ZkViS9AqA6otK+JBBNH2++sx1sgxrPKcSzPPvQkUtXk= github.com/aws/aws-sdk-go-v2 v1.41.5 h1:dj5kopbwUsVUVFgO4Fi5BIT3t4WyqIDjGKCangnV/yY= github.com/aws/aws-sdk-go-v2 v1.41.5/go.mod h1:mwsPRE8ceUUpiTgF7QmQIJ7lgsKUPQOUl3o72QBrE1o= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.8 h1:eBMB84YGghSocM7PsjmmPffTa+1FBUeNvGvFou6V/4o= @@ -247,6 +268,8 @@ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.21 h1:c31//R3x github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.21/go.mod h1:r6+pf23ouCB718FUxaqzZdbpYFyDtehyZcmP5KL9FkA= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.21 h1:ZlvrNcHSFFWURB8avufQq9gFsheUgjVD9536obIknfM= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.21/go.mod h1:cv3TNhVrssKR0O/xxLJVRfd2oazSnZnkUeTf6ctUwfQ= +github.com/aws/aws-sdk-go-v2/service/kms v1.49.5 h1:DKibav4XF66XSeaXcrn9GlWGHos6D/vJ4r7jsK7z5CE= +github.com/aws/aws-sdk-go-v2/service/kms v1.49.5/go.mod h1:1SdcmEGUEQE1mrU2sIgeHtcMSxHuybhPvuEPANzIDfI= github.com/aws/aws-sdk-go-v2/service/s3 v1.97.3 h1:HwxWTbTrIHm5qY+CAEur0s/figc3qwvLWsNkF4RPToo= github.com/aws/aws-sdk-go-v2/service/s3 v1.97.3/go.mod h1:uoA43SdFwacedBfSgfFSjjCvYe8aYBS7EnU5GZ/YKMM= github.com/aws/aws-sdk-go-v2/service/signin v1.0.8 h1:0GFOLzEbOyZABS3PhYfBIx2rNBACYcKty+XGkTgw1ow= @@ -278,8 +301,8 @@ github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdn github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/bmatcuk/doublestar/v4 v4.9.1 h1:X8jg9rRZmJd4yRy7ZeNDRnM+T3ZfHv15JiBJ/avrEXE= -github.com/bmatcuk/doublestar/v4 v4.9.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= +github.com/bmatcuk/doublestar/v4 v4.10.0 h1:zU9WiOla1YA122oLM6i4EXvGW62DvKZVxIe6TYWexEs= +github.com/bmatcuk/doublestar/v4 v4.10.0/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= github.com/bodgit/plumbing v1.3.0 h1:pf9Itz1JOQgn7vEOE7v7nlEfBykYqvUYioC61TwWCFU= github.com/bodgit/plumbing v1.3.0/go.mod h1:JOTb4XiRu5xfnmdnDJo6GmSbSbtSyufrsyZFByMtKEs= github.com/bodgit/sevenzip v1.6.1 h1:kikg2pUMYC9ljU7W9SaqHXhym5HyKm8/M/jd31fYan4= @@ -292,6 +315,14 @@ github.com/bradleyjkemp/cupaloy/v2 v2.8.0/go.mod h1:bm7JXdkRd4BHJk9HpwqAI8BoAY1l github.com/briandowns/spinner v1.23.2 h1:Zc6ecUnI+YzLmJniCfDNaMbW0Wid1d5+qcTq4L2FW8w= github.com/briandowns/spinner v1.23.2/go.mod h1:LaZeM4wm2Ywy6vO571mvhQNRcWfRUnXOs0RcKV0wYKM= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/buildkite/agent/v3 v3.115.4 h1:oxuLAjwHADBlTZuTrTb0JPt0FBcbGo55ZqDHPJ0jn+E= +github.com/buildkite/agent/v3 v3.115.4/go.mod h1:LKY99ujcnFwX8ihEXuMLuPIy3SPL2unKWGJ/DRLICr0= +github.com/buildkite/go-pipeline v0.16.0 h1:wEgWUMRAgSg1ZnWOoA3AovtYYdTvN0dLY1zwUWmPP+4= +github.com/buildkite/go-pipeline v0.16.0/go.mod h1:VE37qY3X5pmAKKUMoDZvPsHOQuyakB9cmXj9Qn6QasA= +github.com/buildkite/interpolate v0.1.5 h1:v2Ji3voik69UZlbfoqzx+qfcsOKLA61nHdU79VV+tPU= +github.com/buildkite/interpolate v0.1.5/go.mod h1:dHnrwHew5O8VNOAgMDpwRlFnhL5VSN6M1bHVmRZ9Ccc= +github.com/buildkite/roko v1.4.0 h1:DxixoCdpNqxu4/1lXrXbfsKbJSd7r1qoxtef/TT2J80= +github.com/buildkite/roko v1.4.0/go.mod h1:0vbODqUFEcVf4v2xVXRfZZRsqJVsCCHTG/TBRByGK4E= github.com/campoy/embedmd v1.0.0 h1:V4kI2qTJJLf4J29RzI/MAt2c3Bl4dQSYPuflzwFH2hY= github.com/campoy/embedmd v1.0.0/go.mod h1:oxyr9RCiSXg0M3VJ3ks0UGfp98BpSSGr0kpiX3MzVl8= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= @@ -373,8 +404,8 @@ github.com/containerd/nri v0.9.0 h1:jribDJs/oQ95vLO4Yn19HKFYriZGWKiG6nKWjl9Y/x4= github.com/containerd/nri v0.9.0/go.mod h1:sDRoMy5U4YolsWthg7TjTffAwPb6LEr//83O+D3xVU4= github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= -github.com/containerd/stargz-snapshotter/estargz v0.18.1 h1:cy2/lpgBXDA3cDKSyEfNOFMA/c10O1axL69EU7iirO8= -github.com/containerd/stargz-snapshotter/estargz v0.18.1/go.mod h1:ALIEqa7B6oVDsrF37GkGN20SuvG/pIMm7FwP7ZmRb0Q= +github.com/containerd/stargz-snapshotter/estargz v0.18.2 h1:yXkZFYIzz3eoLwlTUZKz2iQ4MrckBxJjkmD16ynUTrw= +github.com/containerd/stargz-snapshotter/estargz v0.18.2/go.mod h1:XyVU5tcJ3PRpkA9XS2T5us6Eg35yM0214Y+wvrZTBrY= github.com/containerd/ttrpc v1.2.7 h1:qIrroQvuOL9HQ1X6KHe2ohc7p+HP/0VE6XPU7elJRqQ= github.com/containerd/ttrpc v1.2.7/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o= github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40= @@ -395,8 +426,12 @@ github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s= github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= github.com/crewjam/rfc5424 v0.1.0 h1:MSeXJm22oKovLzWj44AHwaItjIMUMugYGkEzfa831H8= github.com/crewjam/rfc5424 v0.1.0/go.mod h1:RCi9M3xHVOeerf6ULZzqv2xOGRO/zYaVUeRyPnBW3gQ= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 h1:uX1JmpONuD549D73r6cgnxyUu18Zb7yHAy5AYU0Pm4Q= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= github.com/cyphar/filepath-securejoin v0.6.0 h1:BtGB77njd6SVO6VztOHfPxKitJvd/VPT+OFBFMOi1Is= github.com/cyphar/filepath-securejoin v0.6.0/go.mod h1:A8hd4EnAeyujCJRrICiOWqjS1AX0a9kM5XL+NwKoYSc= +github.com/danieljoos/wincred v1.2.3 h1:v7dZC2x32Ut3nEfRH+vhoZGvN72+dQ/snVXo/vMFLdQ= +github.com/danieljoos/wincred v1.2.3/go.mod h1:6qqX0WNrS4RzPZ1tnroDzq9kY3fu1KwE7MRLQK4X0bs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -408,6 +443,11 @@ github.com/deitch/magic v0.0.0-20240306090643-c67ab88f10cb/go.mod h1:B3tI9iGHi4i github.com/dghubble/trie v0.1.0 h1:kJnjBLFFElBwS60N4tkPvnLhnpcDxbBjIulgI8CpNGM= github.com/dghubble/trie v0.1.0/go.mod h1:sOmnzfBNH7H92ow2292dDFWNsVQuh/izuD7otCYb1ak= github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1/go.mod h1:+hnT3ywWDTAFrW5aE+u2Sa/wT555ZqwoCS+pk3p6ry4= +github.com/digitorus/pkcs7 v0.0.0-20230713084857-e76b763bdc49/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= +github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 h1:ge14PCmCvPjpMQMIAH7uKg0lrtNSOdpYsRXlwk3QbaE= +github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= +github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 h1:lxmTCgmHE1GUYL7P0MlNa00M67axePTq+9nBSGddR8I= +github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7/go.mod h1:GvWntX9qiTlOud0WkQ6ewFm0LPy5JUR1Xo0Ngbd1w6Y= github.com/diskfs/go-diskfs v1.7.0 h1:vonWmt5CMowXwUc79jWyGrf2DIMeoOjkLlMnQYGVOs8= github.com/diskfs/go-diskfs v1.7.0/go.mod h1:LhQyXqOugWFRahYUSw47NyZJPezFzB9UELwhpszLP/k= github.com/distribution/distribution v2.8.2+incompatible h1:k9+4DKdOG+quPFZXT/mUsiQrGu9vYCp+dXpuPkuqhk8= @@ -416,14 +456,14 @@ github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5Qvfr github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c= github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0= -github.com/docker/cli v29.2.0+incompatible h1:9oBd9+YM7rxjZLfyMGxjraKBKE4/nVyvVfN4qNl9XRM= -github.com/docker/cli v29.2.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v29.3.0+incompatible h1:z3iWveU7h19Pqx7alZES8j+IeFQZ1lhTwb2F+V9SVvk= +github.com/docker/cli v29.3.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM= github.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8= -github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo= +github.com/docker/docker-credential-helpers v0.9.5 h1:EFNN8DHvaiK8zVqFA2DT6BjXE0GzfLOZ38ggPTKePkY= +github.com/docker/docker-credential-helpers v0.9.5/go.mod h1:v1S+hepowrQXITkEfw6o4+BMbGot02wiKpzWhGUZK6c= github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94= github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE= github.com/docker/go-events v0.0.0-20250114142523-c867878c5e32 h1:EHZfspsnLAz8Hzccd67D5abwLiqoqym2jz/jOS39mCk= @@ -444,8 +484,8 @@ github.com/elliotchance/phpserialize v1.4.0 h1:cAp/9+KSnEbUC8oYCE32n2n84BeW8HOY3 github.com/elliotchance/phpserialize v1.4.0/go.mod h1:gt7XX9+ETUcLXbtTKEuyrqW3lcLUAeS/AnGZ2e49TZs= github.com/elliotwutingfeng/asciiset v0.0.0-20230602022725-51bbb787efab h1:h1UgjJdAAhj+uPL68n7XASS6bU+07ZX1WJvVS2eyoeY= github.com/elliotwutingfeng/asciiset v0.0.0-20230602022725-51bbb787efab/go.mod h1:GLo/8fDswSAniFG+BFIaiSPcK610jyzgEhWYPQwuQdw= -github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= -github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.13.0 h1:C4Bl2xDndpU6nJ4bc1jXd+uTmYPVUwkD6bFY/oTyCes= +github.com/emicklei/go-restful/v3 v3.13.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -494,18 +534,20 @@ github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiD github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= -github.com/gabriel-vasile/mimetype v1.4.10 h1:zyueNbySn/z8mJZHLt6IPw0KoZsiQNszIpU+bX4+ZK0= -github.com/gabriel-vasile/mimetype v1.4.10/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s= +github.com/gabriel-vasile/mimetype v1.4.13 h1:46nXokslUBsAJE/wMsp5gtO500a4F3Nkz9Ufpk2AcUM= +github.com/gabriel-vasile/mimetype v1.4.13/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s= github.com/gammazero/deque v1.0.0 h1:LTmimT8H7bXkkCy6gZX7zNLtkbz4NdS2z8LZuor3j34= github.com/gammazero/deque v1.0.0/go.mod h1:iflpYvtGfM3U8S8j+sZEKIak3SAKYpA5/SQewgfXDKo= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/github/go-spdx/v2 v2.3.3 h1:QI7evnHWEfWkT54eJwkoV/f3a0xD3gLlnVmT5wQG6LE= -github.com/github/go-spdx/v2 v2.3.3/go.mod h1:2ZxKsOhvBp+OYBDlsGnUMcchLeo2mrpEBn2L1C+U3IQ= +github.com/github/go-spdx/v2 v2.4.0 h1:+4IwVwJJbm3rzvrQ6P1nI9BDMcy3la4RchRy5uehV/M= +github.com/github/go-spdx/v2 v2.4.0/go.mod h1:/5rwgS0txhGtRdUZwc02bTglzg6HK3FfuEbECKlK2Sg= github.com/glebarez/go-sqlite v1.20.3 h1:89BkqGOXR9oRmG58ZrzgoY/Fhy5x0M+/WV48U5zVrZ4= github.com/glebarez/go-sqlite v1.20.3/go.mod h1:u3N6D/wftiAzIOJtZl6BmedqxmmkDfH3q+ihjqxC9u0= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= @@ -513,6 +555,8 @@ github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c= github.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU= github.com/go-asn1-ber/asn1-ber v1.5.7 h1:DTX+lbVTWaTw1hQ+PbZPlnDZPEIs0SS/GCZAl535dDk= github.com/go-asn1-ber/asn1-ber v1.5.7/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-chi/chi/v5 v5.2.5 h1:Eg4myHZBjyvJmAFjFvWgrqDTXFyOzjj7YIm3L3mu6Ug= +github.com/go-chi/chi/v5 v5.2.5/go.mod h1:X7Gx4mteadT3eDOMTsXzmI4/rwUpOwBHLpAfupzFJP0= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk= github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= @@ -550,40 +594,74 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= -github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU= -github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= -github.com/go-openapi/errors v0.22.2 h1:rdxhzcBUazEcGccKqbY1Y7NS8FDcMyIRr0934jrYnZg= -github.com/go-openapi/errors v0.22.2/go.mod h1:+n/5UdIqdVnLIJ6Q9Se8HNGUXYaY6CN8ImWzfi/Gzp0= -github.com/go-openapi/jsonpointer v0.21.2 h1:AqQaNADVwq/VnkCmQg6ogE+M3FOsKTytwges0JdwVuA= -github.com/go-openapi/jsonpointer v0.21.2/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk= -github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= -github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= -github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco= -github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs= -github.com/go-openapi/runtime v0.28.0 h1:gpPPmWSNGo214l6n8hzdXYhPuJcGtziTOgUpvsFWGIQ= -github.com/go-openapi/runtime v0.28.0/go.mod h1:QN7OzcS+XuYmkQLw05akXk0jRH/eZ3kb18+1KwW9gyc= -github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= -github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= -github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= -github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= -github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU= -github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0= -github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58= -github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= +github.com/go-openapi/analysis v0.24.3 h1:a1hrvMr8X0Xt69KP5uVTu5jH62DscmDifrLzNglAayk= +github.com/go-openapi/analysis v0.24.3/go.mod h1:Nc+dWJ/FxZbhSow5Yh3ozg5CLJioB+XXT6MdLvJUsUw= +github.com/go-openapi/errors v0.22.7 h1:JLFBGC0Apwdzw3484MmBqspjPbwa2SHvpDm0u5aGhUA= +github.com/go-openapi/errors v0.22.7/go.mod h1://QW6SD9OsWtH6gHllUCddOXDL0tk0ZGNYHwsw4sW3w= +github.com/go-openapi/jsonpointer v0.22.5 h1:8on/0Yp4uTb9f4XvTrM2+1CPrV05QPZXu+rvu2o9jcA= +github.com/go-openapi/jsonpointer v0.22.5/go.mod h1:gyUR3sCvGSWchA2sUBJGluYMbe1zazrYWIkWPjjMUY0= +github.com/go-openapi/jsonreference v0.21.5 h1:6uCGVXU/aNF13AQNggxfysJ+5ZcU4nEAe+pJyVWRdiE= +github.com/go-openapi/jsonreference v0.21.5/go.mod h1:u25Bw85sX4E2jzFodh1FOKMTZLcfifd1Q+iKKOUxExw= +github.com/go-openapi/loads v0.23.3 h1:g5Xap1JfwKkUnZdn+S0L3SzBDpcTIYzZ5Qaag0YDkKQ= +github.com/go-openapi/loads v0.23.3/go.mod h1:NOH07zLajXo8y55hom0omlHWDVVvCwBM/S+csCK8LqA= +github.com/go-openapi/runtime v0.29.3 h1:h5twGaEqxtQg40ePiYm9vFFH1q06Czd7Ot6ufdK0w/Y= +github.com/go-openapi/runtime v0.29.3/go.mod h1:8A1W0/L5eyNJvKciqZtvIVQvYO66NlB7INMSZ9bw/oI= +github.com/go-openapi/spec v0.22.4 h1:4pxGjipMKu0FzFiu/DPwN3CTBRlVM2yLf/YTWorYfDQ= +github.com/go-openapi/spec v0.22.4/go.mod h1:WQ6Ai0VPWMZgMT4XySjlRIE6GP1bGQOtEThn3gcWLtQ= +github.com/go-openapi/strfmt v0.26.0 h1:SDdQLyOEqu8W96rO1FRG1fuCtVyzmukky0zcD6gMGLU= +github.com/go-openapi/strfmt v0.26.0/go.mod h1:Zslk5VZPOISLwmWTMBIS7oiVFem1o1EI6zULY8Uer7Y= +github.com/go-openapi/swag v0.25.5 h1:pNkwbUEeGwMtcgxDr+2GBPAk4kT+kJ+AaB+TMKAg+TU= +github.com/go-openapi/swag v0.25.5/go.mod h1:B3RT6l8q7X803JRxa2e59tHOiZlX1t8viplOcs9CwTA= +github.com/go-openapi/swag/cmdutils v0.25.5 h1:yh5hHrpgsw4NwM9KAEtaDTXILYzdXh/I8Whhx9hKj7c= +github.com/go-openapi/swag/cmdutils v0.25.5/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0= +github.com/go-openapi/swag/conv v0.25.5 h1:wAXBYEXJjoKwE5+vc9YHhpQOFj2JYBMF2DUi+tGu97g= +github.com/go-openapi/swag/conv v0.25.5/go.mod h1:CuJ1eWvh1c4ORKx7unQnFGyvBbNlRKbnRyAvDvzWA4k= +github.com/go-openapi/swag/fileutils v0.25.5 h1:B6JTdOcs2c0dBIs9HnkyTW+5gC+8NIhVBUwERkFhMWk= +github.com/go-openapi/swag/fileutils v0.25.5/go.mod h1:V3cT9UdMQIaH4WiTrUc9EPtVA4txS0TOmRURmhGF4kc= +github.com/go-openapi/swag/jsonname v0.25.5 h1:8p150i44rv/Drip4vWI3kGi9+4W9TdI3US3uUYSFhSo= +github.com/go-openapi/swag/jsonname v0.25.5/go.mod h1:jNqqikyiAK56uS7n8sLkdaNY/uq6+D2m2LANat09pKU= +github.com/go-openapi/swag/jsonutils v0.25.5 h1:XUZF8awQr75MXeC+/iaw5usY/iM7nXPDwdG3Jbl9vYo= +github.com/go-openapi/swag/jsonutils v0.25.5/go.mod h1:48FXUaz8YsDAA9s5AnaUvAmry1UcLcNVWUjY42XkrN4= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.5 h1:SX6sE4FrGb4sEnnxbFL/25yZBb5Hcg1inLeErd86Y1U= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.5/go.mod h1:/2KvOTrKWjVA5Xli3DZWdMCZDzz3uV/T7bXwrKWPquo= +github.com/go-openapi/swag/loading v0.25.5 h1:odQ/umlIZ1ZVRteI6ckSrvP6e2w9UTF5qgNdemJHjuU= +github.com/go-openapi/swag/loading v0.25.5/go.mod h1:I8A8RaaQ4DApxhPSWLNYWh9NvmX2YKMoB9nwvv6oW6g= +github.com/go-openapi/swag/mangling v0.25.5 h1:hyrnvbQRS7vKePQPHHDso+k6CGn5ZBs5232UqWZmJZw= +github.com/go-openapi/swag/mangling v0.25.5/go.mod h1:6hadXM/o312N/h98RwByLg088U61TPGiltQn71Iw0NY= +github.com/go-openapi/swag/netutils v0.25.5 h1:LZq2Xc2QI8+7838elRAaPCeqJnHODfSyOa7ZGfxDKlU= +github.com/go-openapi/swag/netutils v0.25.5/go.mod h1:lHbtmj4m57APG/8H7ZcMMSWzNqIQcu0RFiXrPUara14= +github.com/go-openapi/swag/stringutils v0.25.5 h1:NVkoDOA8YBgtAR/zvCx5rhJKtZF3IzXcDdwOsYzrB6M= +github.com/go-openapi/swag/stringutils v0.25.5/go.mod h1:PKK8EZdu4QJq8iezt17HM8RXnLAzY7gW0O1KKarrZII= +github.com/go-openapi/swag/typeutils v0.25.5 h1:EFJ+PCga2HfHGdo8s8VJXEVbeXRCYwzzr9u4rJk7L7E= +github.com/go-openapi/swag/typeutils v0.25.5/go.mod h1:itmFmScAYE1bSD8C4rS0W+0InZUBrB2xSPbWt6DLGuc= +github.com/go-openapi/swag/yamlutils v0.25.5 h1:kASCIS+oIeoc55j28T4o8KwlV2S4ZLPT6G0iq2SSbVQ= +github.com/go-openapi/swag/yamlutils v0.25.5/go.mod h1:Gek1/SjjfbYvM+Iq4QGwa/2lEXde9n2j4a3wI3pNuOQ= +github.com/go-openapi/testify/enable/yaml/v2 v2.4.1 h1:NZOrZmIb6PTv5LTFxr5/mKV/FjbUzGE7E6gLz7vFoOQ= +github.com/go-openapi/testify/enable/yaml/v2 v2.4.1/go.mod h1:r7dwsujEHawapMsxA69i+XMGZrQ5tRauhLAjV/sxg3Q= +github.com/go-openapi/testify/v2 v2.4.1 h1:zB34HDKj4tHwyUQHrUkpV0Q0iXQ6dUCOQtIqn8hE6Iw= +github.com/go-openapi/testify/v2 v2.4.1/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= +github.com/go-openapi/validate v0.25.2 h1:12NsfLAwGegqbGWr2CnvT65X/Q2USJipmJ9b7xDJZz0= +github.com/go-openapi/validate v0.25.2/go.mod h1:Pgl1LpPPGFnZ+ys4/hTlDiRYQdI1ocKypgE+8Q8BLfY= github.com/go-pdf/fpdf v0.9.0 h1:PPvSaUuo1iMi9KkaAn90NuKi+P4gwMedWPHhj8YlJQw= github.com/go-pdf/fpdf v0.9.0/go.mod h1:oO8N111TkmKb9D7VvWGLvLJlaZUQVPM+6V42pp3iV4Y= github.com/go-quicktest/qt v1.101.1-0.20240301121107-c6c8733fa1e6 h1:teYtXy9B7y5lHTp8V9KPxpYRAVA7dozigQcMiBust1s= github.com/go-quicktest/qt v1.101.1-0.20240301121107-c6c8733fa1e6/go.mod h1:p4lGIVX+8Wa6ZPNDvqcxq36XpUDLh42FLetFU7odllI= github.com/go-restruct/restruct v1.2.0-alpha h1:2Lp474S/9660+SJjpVxoKuWX09JsXHSrdV7Nv3/gkvc= github.com/go-restruct/restruct v1.2.0-alpha/go.mod h1:KqrpKpn4M8OLznErihXTGLlsXFGeLxHUrLRRI/1YjGk= +github.com/go-rod/rod v0.116.2 h1:A5t2Ky2A+5eD/ZJQr1EfsQSe5rms5Xof/qj296e+ZqA= +github.com/go-rod/rod v0.116.2/go.mod h1:H+CMO9SCNc2TJ2WfrG+pKhITz57uGNYU43qYHh438Mg= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo= +github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U= github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= -github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= -github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-viper/mapstructure/v2 v2.5.0 h1:vM5IJoUAy3d7zRSVtIwQgBj7BiWtMPfmPEgAXnvj1Ro= +github.com/go-viper/mapstructure/v2 v2.5.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM= github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY= @@ -598,10 +676,12 @@ github.com/gofrs/flock v0.13.0/go.mod h1:jxeyy9R1auM5S6JYDBhDt+E2TCo7DkratH4Pgi8 github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/gohugoio/hashstructure v0.5.0 h1:G2fjSBU36RdwEJBWJ+919ERvOVqAg9tfcYp47K9swqg= -github.com/gohugoio/hashstructure v0.5.0/go.mod h1:Ser0TniXuu/eauYmrwM4o64EBvySxNzITEOLlm4igec= +github.com/gohugoio/hashstructure v0.6.0 h1:7wMB/2CfXoThFYhdWRGv3u3rUM761Cq29CxUW+NltUg= +github.com/gohugoio/hashstructure v0.6.0/go.mod h1:lapVLk9XidheHG1IQ4ZSbyYrXcaILU1ZEP/+vno5rBQ= github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= +github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -641,6 +721,7 @@ github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiu github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs= github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -649,6 +730,8 @@ github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/cel-go v0.26.1 h1:iPbVVEdkhTX++hpe3lzSk7D3G3QSYqLGoHOcEio+UXQ= github.com/google/cel-go v0.26.1/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM= +github.com/google/certificate-transparency-go v1.3.2 h1:9ahSNZF2o7SYMaKaXhAumVEzXB2QaayzII9C8rv7v+A= +github.com/google/certificate-transparency-go v1.3.2/go.mod h1:H5FpMUaGa5Ab2+KCYsxg6sELw3Flkl7pGZzWdBoYLXs= github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -669,11 +752,15 @@ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/go-containerregistry v0.20.7 h1:24VGNpS0IwrOZ2ms2P1QE3Xa5X9p4phx0aUgzYzHW6I= -github.com/google/go-containerregistry v0.20.7/go.mod h1:Lx5LCZQjLH1QBaMPeGwsME9biPeo1lPx6lbGj/UmzgM= +github.com/google/go-containerregistry v0.21.2 h1:vYaMU4nU55JJGFC9JR/s8NZcTjbE9DBBbvusTW9NeS0= +github.com/google/go-containerregistry v0.21.2/go.mod h1:ctO5aCaewH4AK1AumSF5DPW+0+R+d2FmylMJdp5G7p0= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/go-querystring v1.2.0 h1:yhqkPbu2/OH+V9BfpCVPZkNmUXhb2gBxJArfhIxNtP0= +github.com/google/go-querystring v1.2.0/go.mod h1:8IFJqpSRITyJ8QhQ13bmbeMBDfmeEJZD5A0egEOmkqU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/licensecheck v0.3.1 h1:QoxgoDkaeC4nFrtGN1jV7IPmDCHFNIVh54e5hSt6sPs= github.com/google/licensecheck v0.3.1/go.mod h1:ORkR35t/JjW+emNKtfJDII0zlciG9JgbT7SmsohlHmY= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= @@ -694,6 +781,7 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= @@ -704,6 +792,9 @@ github.com/google/pprof v0.0.0-20250630185457-6e76a2b096b5/go.mod h1:5hDyRhoBCxV github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= +github.com/google/trillian v1.7.2 h1:EPBxc4YWY4Ak8tcuhyFleY+zYlbCDCa4Sn24e1Ka8Js= +github.com/google/trillian v1.7.2/go.mod h1:mfQJW4qRH6/ilABtPYNBerVJAJ/upxHLX81zxNQw05s= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -738,6 +829,8 @@ github.com/grafana/pyroscope-go/godeltaprof v0.1.8/go.mod h1:2+l7K7twW49Ct4wFluZ github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 h1:HWRh5R2+9EifMyIHV7ZV+MIZqgz+PMpZ14Jynv3O2Zs= @@ -757,6 +850,8 @@ github.com/hashicorp/go-getter v1.8.6 h1:9sQboWULaydVphxc4S64oAI4YqpuCk7nPmvbk13 github.com/hashicorp/go-getter v1.8.6/go.mod h1:nVH12eOV2P58dIiL3rsU6Fh3wLeJEKBOJzhMmzlSWoo= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= @@ -765,8 +860,17 @@ github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48= +github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0 h1:U+kC2dOhMFQctRfhK0gRctKAPTloZdMU5ZJxaesJ/VM= +github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0/go.mod h1:Ll013mhdmsVDuoIXVfBtvgGJsXDYkTw1kooNcoCXuE0= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw= +github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= @@ -781,6 +885,8 @@ github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uG github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/hcl v1.0.1-vault-7 h1:ag5OxFVy3QYTFTJODRzTKVZ6xvdfLLCA1cy/Y6xGI0I= +github.com/hashicorp/hcl v1.0.1-vault-7/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= github.com/hashicorp/hcl/v2 v2.24.0 h1:2QJdZ454DSsYGoaE6QheQZjtKZSUs9Nh2izTWiwQxvE= github.com/hashicorp/hcl/v2 v2.24.0/go.mod h1:oGoO1FIQYfn/AgyOhlg9qLC6/nOJPX3qGbkZpYAcqfM= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= @@ -790,6 +896,11 @@ github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOn github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= +github.com/hashicorp/vault/api v1.22.0 h1:+HYFquE35/B74fHoIeXlZIP2YADVboaPjaSicHEZiH0= +github.com/hashicorp/vault/api v1.22.0/go.mod h1:IUZA2cDvr4Ok3+NtK2Oq/r+lJeXkeCrHRmqdyWfpmGM= +github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef h1:A9HsByNhogrvm9cWb28sjiS3i7tcKCkflWFEkHfuAgM= +github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= @@ -801,11 +912,21 @@ github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1: github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw= github.com/iceber/iouring-go v0.0.0-20230403020409-002cfd2e2a90 h1:xrtfZokN++5kencK33hn2Kx3Uj8tGnjMEhdt6FMvHD0= github.com/iceber/iouring-go v0.0.0-20230403020409-002cfd2e2a90/go.mod h1:LEzdaZarZ5aqROlLIwJ4P7h3+4o71008fSy6wpaEB+s= +github.com/in-toto/attestation v1.1.2 h1:MBFn6lsMq6dptQZJBhalXTcWMb/aJy3V+GX3VYj/V1E= +github.com/in-toto/attestation v1.1.2/go.mod h1:gYFddHMZj3DiQ0b62ltNi1Vj5rC879bTmBbrv9CRHpM= github.com/in-toto/in-toto-golang v0.9.0 h1:tHny7ac4KgtsfrG6ybU8gVOZux2H8jN05AXJ9EBM1XU= github.com/in-toto/in-toto-golang v0.9.0/go.mod h1:xsBVrVsHNsB61++S6Dy2vWosKhuA3lUTQd+eF9HdeMo= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs= +github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= @@ -820,16 +941,21 @@ github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh6 github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= +github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 h1:TMtDYDHKYY15rFihtRfck/bfFqNfvcabqvXAFQfAUpY= +github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267/go.mod h1:h1nSAbGFqGVzn6Jyl1R/iCcBUHN4g+gW1u9CoBTrb9E= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/jellydator/ttlcache/v3 v3.4.0 h1:YS4P125qQS0tNhtL6aeYkheEaB/m8HCqdMMP4mnWdTY= +github.com/jellydator/ttlcache/v3 v3.4.0/go.mod h1:Hw9EgjymziQD3yGsQdf1FqFdpp7YjFMd4Srg5EJlgD4= github.com/jinzhu/copier v0.4.0 h1:w3ciUoD19shMCRargcpm0cm91ytaBhDvuRpz1ODO/U8= github.com/jinzhu/copier v0.4.0/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24 h1:liMMTbpW34dhU4az1GN0pTPADwNmvoRSeoZ6PItiqnY= +github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs= +github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI= github.com/joncrlsn/dque v0.0.0-20241024143830-7723fd131a64 h1:fmH2K7R8pZJ0wVvJyGFmDnECuAE3NLjfAoJkN9mtfc8= github.com/joncrlsn/dque v0.0.0-20241024143830-7723fd131a64/go.mod h1:dNKs71rs2VJGBAmttu7fouEsRQlRjxy0p1Sx+T5wbpY= -github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/josharian/native v0.0.0-20200817173448-b6b71def0850/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= github.com/josharian/native v1.0.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= @@ -855,6 +981,8 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/k8sstormcenter/storage v0.0.240-0.20260503184242-43795bb4f0b6 h1:pzIvtCkXBC6t4v7EIIekbltfBnWfvWKjB6ZsgdhkWr0= +github.com/k8sstormcenter/storage v0.0.240-0.20260503184242-43795bb4f0b6/go.mod h1:amdg/Qok9bqPzs1vZH5FW9/3MbCawc5wVsz9u3uIfu4= github.com/kastenhq/goversion v0.0.0-20230811215019-93b2f8823953 h1:WdAeg/imY2JFPc/9CST4bZ80nNJbiBFCAdSZCSgrS5Y= github.com/kastenhq/goversion v0.0.0-20230811215019-93b2f8823953/go.mod h1:6o+UrvuZWc4UTyBhQf0LGjW9Ld7qJxLz/OqvSOWWlEc= github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= @@ -885,15 +1013,15 @@ github.com/kubescape/backend v0.0.39 h1:B1QRfKCSFlzuE+jWOnk/l7EpH71/Q3n14KKq0QSn github.com/kubescape/backend v0.0.39/go.mod h1:cMEGP8cXUZgY89YU4GRBGIla9HZW7grZsUtlCwvZgAE= github.com/kubescape/go-logger v0.0.28 h1:xulKTp9kOg3rD98sopFELQ6yZCHQoQXMDzteoSHDFKI= github.com/kubescape/go-logger v0.0.28/go.mod h1:YZHFjwGCDar1hP9OyBLE46oR7a0Y/Z/0FperDo8+9D0= -github.com/kubescape/k8s-interface v0.0.206 h1:qaYu4mlLmSBePanSGq+DBCssh4O785TAT0lQGNGWyGw= -github.com/kubescape/k8s-interface v0.0.206/go.mod h1:WNYUG93aZ5kDmuaRKFLtVhp18Yc6EfaHdD1gLYtVTN4= -github.com/kubescape/storage v0.0.258 h1:0mL0z3dAmtP1qup7VgoEgwLgbBSROu5oOusBAPeMmus= -github.com/kubescape/storage v0.0.258/go.mod h1:VHs+xQzvZKE2lJDN8rR1sFmTa43N6XJAcatZ249gviU= +github.com/kubescape/k8s-interface v0.0.208 h1:vmZ2FVAQRsz3XRKNG/6wJAYvZJ12RtMoDTLVxFEktms= +github.com/kubescape/k8s-interface v0.0.208/go.mod h1:WNYUG93aZ5kDmuaRKFLtVhp18Yc6EfaHdD1gLYtVTN4= github.com/kubescape/workerpool v0.0.0-20250526074519-0e4a4e7f44cf h1:hI0jVwrB6fT4GJWvuUjzObfci1CUknrZdRHfnRVtKM0= github.com/kubescape/workerpool v0.0.0-20250526074519-0e4a4e7f44cf/go.mod h1:Il5baM40PV9cTt4OGdLMeTRRAai3TMfvImu31itIeCM= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs= +github.com/letsencrypt/boulder v0.20251110.0 h1:J8MnKICeilO91dyQ2n5eBbab24neHzUpYMUIOdOtbjc= +github.com/letsencrypt/boulder v0.20251110.0/go.mod h1:ogKCJQwll82m7OVHWyTuf8eeFCjuzdRQlgnZcCl0V+8= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/logrusorgru/aurora v2.0.3+incompatible h1:tOpm7WcpBTn4fjmVfgpQq0EfczGlG91VSDkswnjF5A8= @@ -909,8 +1037,6 @@ github.com/magiconair/properties v1.8.10 h1:s31yESBquKXCV9a/ScB3ESkOjUYYv+X0rg8S github.com/magiconair/properties v1.8.10/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= -github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= github.com/matthyx/ebpf v0.0.0-20260421101317-8a32d06def6c h1:ZCCeIMu86h4NhF0UfSm9Kdy1AHVWPogk86MdQD6OvPM= github.com/matthyx/ebpf v0.0.0-20260421101317-8a32d06def6c/go.mod h1:pzLjFymM+uZPLk/IXZUL63xdx5VXEo+enTzxkZXdycw= github.com/matthyx/inspektor-gadget v0.0.0-20260421100818-fd383d3d7db4 h1:+10X5NKBH8AOfLSqKqet2pyMvduv4gHImvYHVohyB/I= @@ -978,8 +1104,8 @@ github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTS github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= @@ -1029,8 +1155,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4= -github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= +github.com/natefinch/atomic v1.0.1 h1:ZPYKxkqQOx3KZ+RsbnP/YsgvxWQPGxjC0oBt2AhwV0A= +github.com/natefinch/atomic v1.0.1/go.mod h1:N/D/ELrljoqDyT3rZrsUmtsuzvHkeB/wWjHV22AZRbM= +github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w= +github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= github.com/ncw/directio v1.0.5 h1:JSUBhdjEvVaJvOoyPAbcW0fnd0tvRXD76wEfZ1KcQz4= github.com/ncw/directio v1.0.5/go.mod h1:rX/pKEYkOXBGOggmcyJeJGloCkleSvphPx2eV3t6ROk= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= @@ -1046,16 +1174,24 @@ github.com/notaryproject/notation-plugin-framework-go v1.0.0 h1:6Qzr7DGXoCgXEQN+ github.com/notaryproject/notation-plugin-framework-go v1.0.0/go.mod h1:RqWSrTOtEASCrGOEffq0n8pSg2KOgKYiWqFWczRSics= github.com/notaryproject/tspclient-go v1.0.0 h1:AwQ4x0gX8IHnyiZB1tggpn5NFqHpTEm1SDX8YNv4Dg4= github.com/notaryproject/tspclient-go v1.0.0/go.mod h1:LGyA/6Kwd2FlM0uk8Vc5il3j0CddbWSHBj/4kxQDbjs= +github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 h1:Up6+btDp321ZG5/zdSLo48H9Iaq0UQGthrhWC6pCxzE= +github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481/go.mod h1:yKZQO8QE2bHlgozqWDiRVqTFlLQSj30K/6SAK8EeYFw= github.com/nsf/jsondiff v0.0.0-20210926074059-1e845ec5d249 h1:NHrXEjTNQY7P0Zfx1aMrNhpgxHmow66XQtm0aQLY0AE= github.com/nsf/jsondiff v0.0.0-20210926074059-1e845ec5d249/go.mod h1:mpRZBD8SJ55OIICQ3iWH0Yz3cjzA61JdqMLoWXeB2+8= github.com/nwaples/rardecode v1.1.3 h1:cWCaZwfM5H7nAD6PyEdcVnczzV8i/JtotnyW/dD9lEc= github.com/nwaples/rardecode v1.1.3/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0= github.com/nwaples/rardecode/v2 v2.2.0 h1:4ufPGHiNe1rYJxYfehALLjup4Ls3ck42CWwjKiOqu0A= github.com/nwaples/rardecode/v2 v2.2.0/go.mod h1:7uz379lSxPe6j9nvzxUZ+n7mnJNgjsRNb6IbvGVHRmw= -github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= +github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= +github.com/oklog/ulid/v2 v2.1.1 h1:suPZ4ARWLOJLegGFiZZ1dFAkqzhMjL3J1TzI+5wHz8s= +github.com/oklog/ulid/v2 v2.1.1/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ= github.com/oleiade/lane/v2 v2.0.0 h1:XW/ex/Inr+bPkLd3O240xrFOhUkTd4Wy176+Gv0E3Qw= github.com/oleiade/lane/v2 v2.0.0/go.mod h1:i5FBPFAYSWCgLh58UkUGCChjcCzef/MI7PlQm2TKCeg= +github.com/oleiade/reflections v1.1.0 h1:D+I/UsXQB4esMathlt0kkZRJZdUDmhv5zGi/HOwYTWo= +github.com/oleiade/reflections v1.1.0/go.mod h1:mCxx0QseeVCHs5Um5HhJeCKVC7AwS8kO67tky4rdisA= github.com/olekukonko/errors v1.1.0 h1:RNuGIh15QdDenh+hNvKrJkmxxjV4hcS50Db478Ou5sM= github.com/olekukonko/errors v1.1.0/go.mod h1:ppzxA5jBKcO1vIpCXQ9ZqgDh8iwODz6OXIGKU8r5m4Y= github.com/olekukonko/ll v0.0.9 h1:Y+1YqDfVkqMWuEQMclsF9HUR5+a82+dxJuL1HHSRpxI= @@ -1064,9 +1200,18 @@ github.com/olekukonko/tablewriter v1.0.9 h1:XGwRsYLC2bY7bNd93Dk51bcPZksWZmLYuaTH github.com/olekukonko/tablewriter v1.0.9/go.mod h1:5c+EBPeSqvXnLLgkm9isDdzR3wjfBkHR9Nhfp3NWrzo= github.com/olvrng/ujson v1.1.0 h1:8xVUzVlqwdMVWh5d1UHBtLQ1D50nxoPuPEq9Wozs8oA= github.com/olvrng/ujson v1.1.0/go.mod h1:Mz4G3RODTUfbkKyvi0lgmPx/7vd3Saksk+1jgk8s9xo= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns= github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= github.com/opcoder0/capabilities v0.0.0-20221222060822-17fd73bffd2a h1:sbMMqulR2c6d2aeqOg5kzWv87unK0O4V78Dl1+YG4ys= @@ -1083,8 +1228,6 @@ github.com/opencontainers/runtime-tools v0.9.1-0.20250523060157-0ea5ed0382a2 h1: github.com/opencontainers/runtime-tools v0.9.1-0.20250523060157-0ea5ed0382a2/go.mod h1:MXdPzqAA8pHC58USHqNCSjyLnRQ6D+NjbpP+02Z1U/0= github.com/opencontainers/selinux v1.13.1 h1:A8nNeceYngH9Ow++M+VVEwJVpdFmrlxsN22F+ISDCJE= github.com/opencontainers/selinux v1.13.1/go.mod h1:S10WXZ/osk2kWOYKy1x2f/eXF5ZHJoUs8UU/2caNRbg= -github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b h1:FfH+VrHHk6Lxt9HdVS0PXzSXFyS2NbZKXv33FYPol0A= -github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b/go.mod h1:AC62GU6hc0BrNm+9RK9VSiwa/EUe1bkIeFORAMcHvJU= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0= github.com/packetcap/go-pcap v0.0.0-20250723190045-d00b185f30b7 h1:MfXxQU9tEe3zmyLVVwE8gJwQVtsG2aqzBkFNz0N6eAo= @@ -1093,8 +1236,11 @@ github.com/panjf2000/ants/v2 v2.11.3 h1:AfI0ngBoXJmYOpDh9m516vjqoUu2sLrIVgppI9TZ github.com/panjf2000/ants/v2 v2.11.3/go.mod h1:8u92CYMUc6gyvTIw8Ru7Mt7+/ESnJahz5EVtqfrilek= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= github.com/pborman/indent v1.2.1 h1:lFiviAbISHv3Rf0jcuh489bi06hj98JsVMtIDZQb9yM= github.com/pborman/indent v1.2.1/go.mod h1:FitS+t35kIYtB5xWTZAPhnmrxcciEEOdbyrrpz5K6Vw= +github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw= +github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= @@ -1111,6 +1257,8 @@ github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pjbgf/sha1cd v0.4.0 h1:NXzbL1RvjTUi6kgYZCX3fPwwl27Q1LJndxtUDVfJGRY= github.com/pjbgf/sha1cd v0.4.0/go.mod h1:zQWigSxVmsHEZow5qaLtPYxpcKMMQpa09ixqBxuCS6A= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -1145,16 +1293,16 @@ github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvM github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc= -github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI= +github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4= +github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws= github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw= -github.com/puzpuzpuz/xsync/v2 v2.4.1 h1:aGdE1C/HaR/QC6YAFdtZXi60Df8/qBIrs8PKrzkItcM= -github.com/puzpuzpuz/xsync/v2 v2.4.1/go.mod h1:gD2H2krq/w52MfPLE+Uy64TzJDVY7lP2znR9qmR35kU= +github.com/puzpuzpuz/xsync/v2 v2.5.1 h1:mVGYAvzDSu52+zaGyNjC+24Xw2bQi3kTr4QJ6N9pIIU= +github.com/puzpuzpuz/xsync/v2 v2.5.1/go.mod h1:gD2H2krq/w52MfPLE+Uy64TzJDVY7lP2znR9qmR35kU= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= @@ -1172,6 +1320,8 @@ github.com/rust-secure-code/go-rustaudit v0.0.0-20250226111315-e20ec32e963c h1:8 github.com/rust-secure-code/go-rustaudit v0.0.0-20250226111315-e20ec32e963c/go.mod h1:kwM/7r/rVluTE8qJbHAffduuqmSv4knVQT2IajGvSiA= github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/s3rj1k/go-fanotify/fanotify v0.0.0-20240229202106-bca3154da60a h1:4VFls9SuqkqeioVevnaeTXrYKQ7JiEsxqKHfxp+/ovA= github.com/s3rj1k/go-fanotify/fanotify v0.0.0-20240229202106-bca3154da60a/go.mod h1:2zG1g57bc+D6FpNc68gsRXJgkidteqTMhWiiUP3m8UE= github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig= @@ -1187,6 +1337,10 @@ github.com/sasha-s/go-deadlock v0.3.5 h1:tNCOEEDG6tBqrNDOX35j/7hL5FcFViG6awUGROb github.com/sasha-s/go-deadlock v0.3.5/go.mod h1:bugP6EGbdGYObIlx7pUZtWqlvo8k9H6vCBBsiChJQ5U= github.com/sassoftware/go-rpmutils v0.4.0 h1:ojND82NYBxgwrV+mX1CWsd5QJvvEZTKddtCdFLPWhpg= github.com/sassoftware/go-rpmutils v0.4.0/go.mod h1:3goNWi7PGAT3/dlql2lv3+MSN5jNYPjT5mVcQcIsYzI= +github.com/sassoftware/relic v7.2.1+incompatible h1:Pwyh1F3I0r4clFJXkSI8bOyJINGqpgjJU3DYAZeI05A= +github.com/sassoftware/relic v7.2.1+incompatible/go.mod h1:CWfAxv73/iLZ17rbyhIEq3K9hs5w6FpNMdUT//qR+zk= +github.com/sassoftware/relic/v7 v7.6.2 h1:rS44Lbv9G9eXsukknS4mSjIAuuX+lMq/FnStgmZlUv4= +github.com/sassoftware/relic/v7 v7.6.2/go.mod h1:kjmP0IBVkJZ6gXeAu35/KCEfca//+PKM6vTAsyDPY+k= github.com/scylladb/go-set v1.0.3-0.20200225121959-cc7b2070d91e h1:7q6NSFZDeGfvvtIRwBrU/aegEYJYmvev0cHAwo17zZQ= github.com/scylladb/go-set v1.0.3-0.20200225121959-cc7b2070d91e/go.mod h1:DkpGd78rljTxKAnTDPFqXSGxvETQnJyuSOQwsHycqfs= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= @@ -1194,8 +1348,8 @@ github.com/sebdah/goldie/v2 v2.7.1 h1:PkBHymaYdtvEkZV7TmyqKxdmn5/Vcj+8TpATWZjnG5 github.com/sebdah/goldie/v2 v2.7.1/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI= github.com/seccomp/libseccomp-golang v0.11.0 h1:SDkcBRqGLP+sezmMACkxO1EfgbghxIxnRKfd6mHUEis= github.com/seccomp/libseccomp-golang v0.11.0/go.mod h1:5m1Lk8E9OwgZTTVz4bBOer7JuazaBa+xTkM895tDiWc= -github.com/secure-systems-lab/go-securesystemslib v0.9.1 h1:nZZaNz4DiERIQguNy0cL5qTdn9lR8XKHf4RUyG1Sx3g= -github.com/secure-systems-lab/go-securesystemslib v0.9.1/go.mod h1:np53YzT0zXGMv6x4iEWc9Z59uR+x+ndLwCLqPYpLXVU= +github.com/secure-systems-lab/go-securesystemslib v0.10.0 h1:l+H5ErcW0PAehBNrBxoGv1jjNpGYdZ9RcheFkB2WI14= +github.com/secure-systems-lab/go-securesystemslib v0.10.0/go.mod h1:MRKONWmRoFzPNQ9USRF9i1mc7MvAVvF1LlW8X5VWDvk= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= @@ -1227,15 +1381,35 @@ github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1l github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= +github.com/sigstore/cosign/v3 v3.0.5 h1:c1zPqjU+H4wmirgysC+AkWMg7a7fykyOYF/m+F1150I= +github.com/sigstore/cosign/v3 v3.0.5/go.mod h1:ble1vMvJagCFyTIDkibCq6MIHiWDw00JNYl0f9rB4T4= +github.com/sigstore/fulcio v1.8.5 h1:HYTD1/L5wlBp8JxsWxUf8hmfaNBBF/x3r3p5l6tZwbA= +github.com/sigstore/fulcio v1.8.5/go.mod h1:tSLYK3JsKvJpDW1BsIsVHZgHj+f8TjXARzqIUWSsSPQ= github.com/sigstore/protobuf-specs v0.5.0 h1:F8YTI65xOHw70NrvPwJ5PhAzsvTnuJMGLkA4FIkofAY= github.com/sigstore/protobuf-specs v0.5.0/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= +github.com/sigstore/rekor v1.5.1 h1:Ca1egHRWRuDvXV4tZu9aXEXc3Gej9FG+HKeapV9OAMQ= +github.com/sigstore/rekor v1.5.1/go.mod h1:gTLDuZuo3SyQCuZvKqwRPA79Qo/2rw39/WtLP/rZjUQ= +github.com/sigstore/rekor-tiles/v2 v2.2.0 h1:QwJNwxT+k5A3id+Hrg+8vYcNsTaB0Sj51xjfW2rKyAs= +github.com/sigstore/rekor-tiles/v2 v2.2.0/go.mod h1:/WNRYctHKdxcjgXydYwO5OclW72Zqh6fNHSyGE8zQOE= github.com/sigstore/sigstore v1.10.4 h1:ytOmxMgLdcUed3w1SbbZOgcxqwMG61lh1TmZLN+WeZE= github.com/sigstore/sigstore v1.10.4/go.mod h1:tDiyrdOref3q6qJxm2G+JHghqfmvifB7hw+EReAfnbI= +github.com/sigstore/sigstore-go v1.1.4 h1:wTTsgCHOfqiEzVyBYA6mDczGtBkN7cM8mPpjJj5QvMg= +github.com/sigstore/sigstore-go v1.1.4/go.mod h1:2U/mQOT9cjjxrtIUeKDVhL+sHBKsnWddn8URlswdBsg= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.4 h1:VZ+L6SKVWbLPHznIF0tBuO7qKMFdJiJMVwFKu9DlY5o= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.4/go.mod h1:Rstj47WpJym25il8j4jTL0BfikzP/9AhVD+DsBcYzZc= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.10.4 h1:G7yOv8bxk3zIEEZyVCixPxtePIAm+t3ZWSaKRPzVw+o= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.10.4/go.mod h1:hxJelB/bRItMYOzi6qD9xEKjse2QZcikh4TbysfdDHc= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.10.4 h1:Qxt6dE4IwhJ6gIXmg2q4S/SeqEDSZ29nmfsv7Zb6LL4= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.10.4/go.mod h1:hJVeNOwarqfyALjOwsf0OR8YA/A96NABucEaQumPr30= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.10.4 h1:KVavYMPfSf5NryOl6VrZ9nRG3fXOOJOPp7Czk/YCPkM= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.10.4/go.mod h1:J7CA1AaBkyK8dYq6EdQANhj+8oEcsA7PrIp088qgPiY= +github.com/sigstore/timestamp-authority/v2 v2.0.4 h1:65IBa4LUeFWDQu9hiTt5lBpi/F5jonJWZtH6VLn4InU= +github.com/sigstore/timestamp-authority/v2 v2.0.4/go.mod h1:EXJLiMDBqRPlzC02hPiFSiYTCqSuUpU68a4vr0DFePM= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af h1:Sp5TG9f7K39yfB+If0vjp97vuT74F72r8hfRpP8jLU0= -github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w= +github.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g= github.com/skeema/knownhosts v1.3.1 h1:X2osQ+RAjK76shCbvhHHHVl3ZlgDm8apHEHFqRjnBY8= github.com/skeema/knownhosts v1.3.1/go.mod h1:r7KTdC8l4uxWRyK2TpQZ/1o5HaSzh06ePQNxPwTcfiY= github.com/sorairolake/lzip-go v0.3.8 h1:j5Q2313INdTA80ureWYRhX+1K78mUXfMoPZCw/ivWik= @@ -1285,6 +1459,7 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= @@ -1300,11 +1475,31 @@ github.com/sylabs/sif/v2 v2.22.0 h1:Y+xXufp4RdgZe02SR3nWEg7S6q4tPWN237WHYzkDSKA= github.com/sylabs/sif/v2 v2.22.0/go.mod h1:W1XhWTmG1KcG7j5a3KSYdMcUIFvbs240w/MMVW627hs= github.com/sylabs/squashfs v1.0.6 h1:PvJcDzxr+vIm2kH56mEMbaOzvGu79gK7P7IX+R7BDZI= github.com/sylabs/squashfs v1.0.6/go.mod h1:DlDeUawVXLWAsSRa085Eo0ZenGzAB32JdAUFaB0LZfE= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/terminalstatic/go-xsd-validate v0.1.6 h1:TenYeQ3eY631qNi1/cTmLH/s2slHPRKTTHT+XSHkepo= github.com/terminalstatic/go-xsd-validate v0.1.6/go.mod h1:18lsvYFofBflqCrvo1umpABZ99+GneNTw2kEEc8UPJw= github.com/therootcompany/xz v1.0.1 h1:CmOtsn1CbtmyYiusbfmhmkpAAETj0wBIH6kCYaX+xzw= github.com/therootcompany/xz v1.0.1/go.mod h1:3K3UH1yCKgBneZYhuQUvJ9HPD19UEXEI0BWbMn8qNMY= +github.com/theupdateframework/go-tuf v0.7.0 h1:CqbQFrWo1ae3/I0UCblSbczevCCbS31Qvs5LdxRWqRI= +github.com/theupdateframework/go-tuf v0.7.0/go.mod h1:uEB7WSY+7ZIugK6R1hiBMBjQftaFzn7ZCDJcp1tCUug= +github.com/theupdateframework/go-tuf/v2 v2.4.1 h1:K6ewW064rKZCPkRo1W/CTbTtm/+IB4+coG1iNURAGCw= +github.com/theupdateframework/go-tuf/v2 v2.4.1/go.mod h1:Nex2enPVYDFCklrnbTzl3OVwD7fgIAj0J5++z/rvCj8= +github.com/tink-crypto/tink-go-awskms/v2 v2.1.0 h1:N9UxlsOzu5mttdjhxkDLbzwtEecuXmlxZVo/ds7JKJI= +github.com/tink-crypto/tink-go-awskms/v2 v2.1.0/go.mod h1:PxSp9GlOkKL9rlybW804uspnHuO9nbD98V/fDX4uSis= +github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0 h1:3B9i6XBXNTRspfkTC0asN5W0K6GhOSgcujNiECNRNb0= +github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0/go.mod h1:jY5YN2BqD/KSCHM9SqZPIpJNG/u3zwfLXHgws4x2IRw= +github.com/tink-crypto/tink-go-hcvault/v2 v2.4.0 h1:j+S+WKBQ5ya26A5EM/uXoVe+a2IaPQN8KgBJZ22cJ+4= +github.com/tink-crypto/tink-go-hcvault/v2 v2.4.0/go.mod h1:OCKJIujnTzDq7f+73NhVs99oA2c1TR6nsOpuasYM6Yo= +github.com/tink-crypto/tink-go/v2 v2.6.0 h1:+KHNBHhWH33Vn+igZWcsgdEPUxKwBMEe0QC60t388v4= +github.com/tink-crypto/tink-go/v2 v2.6.0/go.mod h1:2WbBA6pfNsAfBwDCggboaHeB2X29wkU8XHtGwh2YIk8= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= +github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c h1:5a2XDQ2LiAUV+/RjckMyq9sXudfrPSuCY4FuPC1NyAw= +github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c/go.mod h1:g85IafeFJZLxlzZCDRu4JLpfS7HKzR+Hw9qRh3bVzDI= +github.com/transparency-dev/merkle v0.0.2 h1:Q9nBoQcZcgPamMkGn7ghV8XiTZ/kRxn1yCG81+twTK4= +github.com/transparency-dev/merkle v0.0.2/go.mod h1:pqSy+OXefQ1EDUVmAJ8MUhHB9TXGuzVAT58PqBoHz1A= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ulikunitz/xz v0.5.15 h1:9DNdB5s+SgV3bQ2ApL10xRc35ck0DuIX/isZvIk+ubY= @@ -1331,8 +1526,8 @@ github.com/vishvananda/netns v0.0.5 h1:DfiHV+j8bA32MFM7bfEunvT8IAqQ/NzSJHtcmW5zd github.com/vishvananda/netns v0.0.5/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/wagoodman/go-partybus v0.0.0-20230516145632-8ccac152c651 h1:jIVmlAFIqV3d+DOxazTR9v+zgj8+VYuQBzPgBZvWBHA= github.com/wagoodman/go-partybus v0.0.0-20230516145632-8ccac152c651/go.mod h1:b26F2tHLqaoRQf8DywqzVaV1MQ9yvjb0OMcNl7Nxu20= -github.com/wagoodman/go-progress v0.0.0-20230925121702-07e42b3cdba0 h1:0KGbf+0SMg+UFy4e1A/CPVvXn21f1qtWdeJwxZFoQG8= -github.com/wagoodman/go-progress v0.0.0-20230925121702-07e42b3cdba0/go.mod h1:jLXFoL31zFaHKAAyZUh+sxiTDFe1L1ZHrcK2T1itVKA= +github.com/wagoodman/go-progress v0.0.0-20260303201901-10176f79b2c0 h1:EHsPe0Q0ANoLOZff1dBLAyeWLTA4sbPTpGI+2zb0FnM= +github.com/wagoodman/go-progress v0.0.0-20260303201901-10176f79b2c0/go.mod h1:g/D9uEUFp5YLyciwCpVsSOZOm56hfv4rzGJod6MlqIM= github.com/weaveworks/procspy v0.0.0-20150706124340-cb970aa190c3 h1:UC4iN/yCDCObTBhKzo34/R2U6qptTPmqbzG6UiQVMUQ= github.com/weaveworks/procspy v0.0.0-20150706124340-cb970aa190c3/go.mod h1:cJTfuBcxkdbj8Mabk4PPdaf0AXv9TYEJmkFxKcWxYY4= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= @@ -1355,12 +1550,24 @@ github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZ github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= github.com/yl2chen/cidranger v1.0.2 h1:lbOWZVCG1tCRX4u24kuM1Tb4nHqWkDxwLdoS+SevawU= github.com/yl2chen/cidranger v1.0.2/go.mod h1:9U1yz7WPYDwf0vpNWFaeRh0bjwz5RVgRy/9UEQfHl0g= +github.com/ysmood/fetchup v0.2.3 h1:ulX+SonA0Vma5zUFXtv52Kzip/xe7aj4vqT5AJwQ+ZQ= +github.com/ysmood/fetchup v0.2.3/go.mod h1:xhibcRKziSvol0H1/pj33dnKrYyI2ebIvz5cOOkYGns= +github.com/ysmood/goob v0.4.0 h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ= +github.com/ysmood/goob v0.4.0/go.mod h1:u6yx7ZhS4Exf2MwciFr6nIM8knHQIE22lFpWHnfql18= +github.com/ysmood/got v0.40.0 h1:ZQk1B55zIvS7zflRrkGfPDrPG3d7+JOza1ZkNxcc74Q= +github.com/ysmood/got v0.40.0/go.mod h1:W7DdpuX6skL3NszLmAsC5hT7JAhuLZhByVzHTq874Qg= +github.com/ysmood/gson v0.7.3 h1:QFkWbTH8MxyUTKPkVWAENJhxqdBa4lYTQWqZCiLG6kE= +github.com/ysmood/gson v0.7.3/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg= +github.com/ysmood/leakless v0.9.0 h1:qxCG5VirSBvmi3uynXFkcnLMzkphdh3xx5FtrORwDCU= +github.com/ysmood/leakless v0.9.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zalando/go-keyring v0.2.3 h1:v9CUu9phlABObO4LPWycf+zwMG7nlbb3t/B5wa97yms= +github.com/zalando/go-keyring v0.2.3/go.mod h1:HL4k+OXQfJUWaMnqyuSOc0drfGPX2b51Du6K+MRgZMk= github.com/zclconf/go-cty v1.16.3 h1:osr++gw2T61A8KVYHoQiFbFd1Lh3JOCXc/jFLJXKTxk= github.com/zclconf/go-cty v1.16.3/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo= @@ -1368,8 +1575,6 @@ github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmB go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs= -go.mongodb.org/mongo-driver v1.17.6 h1:87JUG1wZfWsr6rIz3ZmpH90rL5tea7O3IHuSwHUpsss= -go.mongodb.org/mongo-driver v1.17.6/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= @@ -1425,6 +1630,8 @@ go.opentelemetry.io/otel/trace v1.43.0/go.mod h1:/QJhyVBUUswCphDVxq+8mld+AvhXZLh go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v1.10.0 h1:IQRWgT5srOCYfiWnpqUYz9CVmbO8bFmKcwYxpuCSL2g= go.opentelemetry.io/proto/otlp v1.10.0/go.mod h1:/CV4QoCR/S9yaPj8utp3lvQPoqMtxXdzn7ozvvozVqk= +go.step.sm/crypto v0.76.2 h1:JJ/yMcs/rmcCAwlo+afrHjq74XBFRTJw5B2y4Q4Z4c4= +go.step.sm/crypto v0.76.2/go.mod h1:m6KlB/HzIuGFep0UWI5e0SYi38UxpoKeCg6qUaHV6/Q= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= @@ -1484,8 +1691,8 @@ golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= -golang.org/x/exp v0.0.0-20250718183923-645b1fa84792 h1:R9PFI6EUdfVKgwKjZef7QIwGcBKu86OEFpJ9nUEP2l4= -golang.org/x/exp v0.0.0-20250718183923-645b1fa84792/go.mod h1:A+z0yzpGtvnG90cToK5n2tu8UJVP2XUATh+r+sfOOOc= +golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 h1:mgKeJMpvi0yx/sU5GsxQ7p6s2wtOnGAHZWCHUM4KGzY= +golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/image v0.38.0 h1:5l+q+Y9JDC7mBOMjo4/aPhMDcxEptsX+Tt3GgRQRPuE= @@ -1562,6 +1769,7 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -1578,6 +1786,7 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= @@ -1585,6 +1794,8 @@ golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210928044308-7d9f5e0b762b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= @@ -1670,11 +1881,14 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1702,6 +1916,7 @@ golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201218084310-7d0127a74742/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210110051926-789bb1bd4061/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210123111255-9b0068b26619/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1734,6 +1949,7 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1858,6 +2074,7 @@ golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82u golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= @@ -1883,6 +2100,7 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= gonum.org/v1/gonum v0.17.0 h1:VbpOemQlsSMrYmn7T2OUvQ4dqxQXU+ouZFQsZOx50z4= @@ -2066,11 +2284,14 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EV gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo= gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/mcuadros/go-syslog.v2 v2.3.0 h1:kcsiS+WsTKyIEPABJBJtoG0KkOS6yzvJ+/eZlhD79kk= gopkg.in/mcuadros/go-syslog.v2 v2.3.0/go.mod h1:l5LPIyOOyIdQquNg+oU6Z3524YwrcqEm0aKH+5zpt2U= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -2079,6 +2300,7 @@ gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -2099,18 +2321,18 @@ honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= istio.io/pkg v0.0.0-20231221211216-7635388a563e h1:ZlLVbKDlCzfP0MPbWc6VRcY23d9NdjLxwpPQpDrh3Gc= istio.io/pkg v0.0.0-20231221211216-7635388a563e/go.mod h1:fvmqEdHhZjYYwf6dSiIwvwc7db54kMWVTfsb91KmhzY= -k8s.io/api v0.35.0 h1:iBAU5LTyBI9vw3L5glmat1njFK34srdLmktWwLTprlY= -k8s.io/api v0.35.0/go.mod h1:AQ0SNTzm4ZAczM03QH42c7l3bih1TbAXYo0DkF8ktnA= +k8s.io/api v0.35.1 h1:0PO/1FhlK/EQNVK5+txc4FuhQibV25VLSdLMmGpDE/Q= +k8s.io/api v0.35.1/go.mod h1:28uR9xlXWml9eT0uaGo6y71xK86JBELShLy4wR1XtxM= k8s.io/apiextensions-apiserver v0.35.0 h1:3xHk2rTOdWXXJM+RDQZJvdx0yEOgC0FgQ1PlJatA5T4= k8s.io/apiextensions-apiserver v0.35.0/go.mod h1:E1Ahk9SADaLQ4qtzYFkwUqusXTcaV2uw3l14aqpL2LU= -k8s.io/apimachinery v0.35.0 h1:Z2L3IHvPVv/MJ7xRxHEtk6GoJElaAqDCCU0S6ncYok8= -k8s.io/apimachinery v0.35.0/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns= +k8s.io/apimachinery v0.35.1 h1:yxO6gV555P1YV0SANtnTjXYfiivaTPvCTKX6w6qdDsU= +k8s.io/apimachinery v0.35.1/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns= k8s.io/apiserver v0.35.0 h1:CUGo5o+7hW9GcAEF3x3usT3fX4f9r8xmgQeCBDaOgX4= k8s.io/apiserver v0.35.0/go.mod h1:QUy1U4+PrzbJaM3XGu2tQ7U9A4udRRo5cyxkFX0GEds= k8s.io/cli-runtime v0.35.0 h1:PEJtYS/Zr4p20PfZSLCbY6YvaoLrfByd6THQzPworUE= k8s.io/cli-runtime v0.35.0/go.mod h1:VBRvHzosVAoVdP3XwUQn1Oqkvaa8facnokNkD7jOTMY= -k8s.io/client-go v0.35.0 h1:IAW0ifFbfQQwQmga0UdoH0yvdqrbwMdq9vIFEhRpxBE= -k8s.io/client-go v0.35.0/go.mod h1:q2E5AAyqcbeLGPdoRB+Nxe3KYTfPce1Dnu1myQdqz9o= +k8s.io/client-go v0.35.1 h1:+eSfZHwuo/I19PaSxqumjqZ9l5XiTEKbIaJ+j1wLcLM= +k8s.io/client-go v0.35.1/go.mod h1:1p1KxDt3a0ruRfc/pG4qT/3oHmUj1AhSHEcxNSGg+OA= k8s.io/component-base v0.35.0 h1:+yBrOhzri2S1BVqyVSvcM3PtPyx5GUxCK2tinZz1G94= k8s.io/component-base v0.35.0/go.mod h1:85SCX4UCa6SCFt6p3IKAPej7jSnF3L8EbfSyMZayJR0= k8s.io/cri-api v0.35.0 h1:fxLSKyJHqbyCSUsg1rW4DRpmjSEM/elZ1GXzYTSLoDQ= @@ -2125,18 +2347,20 @@ k8s.io/kubelet v0.35.0 h1:8cgJHCBCKLYuuQ7/Pxb/qWbJfX1LXIw7790ce9xHq7c= k8s.io/kubelet v0.35.0/go.mod h1:ciRzAXn7C4z5iB7FhG1L2CGPPXLTVCABDlbXt/Zz8YA= k8s.io/utils v0.0.0-20260210185600-b8788abfbbc2 h1:AZYQSJemyQB5eRxqcPky+/7EdBj0xi3g0ZcxxJ7vbWU= k8s.io/utils v0.0.0-20260210185600-b8788abfbbc2/go.mod h1:xDxuJ0whA3d0I4mf/C4ppKHxXynQ+fxnkmQH0vTHnuk= -modernc.org/cc/v4 v4.26.2 h1:991HMkLjJzYBIfha6ECZdjrIYz2/1ayr+FL8GN+CNzM= -modernc.org/cc/v4 v4.26.2/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0= -modernc.org/ccgo/v4 v4.28.0 h1:rjznn6WWehKq7dG4JtLRKxb52Ecv8OUGah8+Z/SfpNU= -modernc.org/ccgo/v4 v4.28.0/go.mod h1:JygV3+9AV6SmPhDasu4JgquwU81XAKLd3OKTUDNOiKE= -modernc.org/fileutil v1.3.8 h1:qtzNm7ED75pd1C7WgAGcK4edm4fvhtBsEiI/0NQ54YM= -modernc.org/fileutil v1.3.8/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc= +modernc.org/cc/v4 v4.27.1 h1:9W30zRlYrefrDV2JE2O8VDtJ1yPGownxciz5rrbQZis= +modernc.org/cc/v4 v4.27.1/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0= +modernc.org/ccgo/v4 v4.30.1 h1:4r4U1J6Fhj98NKfSjnPUN7Ze2c6MnAdL0hWw6+LrJpc= +modernc.org/ccgo/v4 v4.30.1/go.mod h1:bIOeI1JL54Utlxn+LwrFyjCx2n2RDiYEaJVSrgdrRfM= +modernc.org/fileutil v1.3.40 h1:ZGMswMNc9JOCrcrakF1HrvmergNLAmxOPjizirpfqBA= +modernc.org/fileutil v1.3.40/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc= modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI= modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito= +modernc.org/gc/v3 v3.1.1 h1:k8T3gkXWY9sEiytKhcgyiZ2L0DTyCQ/nvX+LoCljoRE= +modernc.org/gc/v3 v3.1.1/go.mod h1:HFK/6AGESC7Ex+EZJhJ2Gni6cTaYpSMmU/cT9RmlfYY= modernc.org/goabi0 v0.2.0 h1:HvEowk7LxcPd0eq6mVOAEMai46V+i7Jrj13t4AzuNks= modernc.org/goabi0 v0.2.0/go.mod h1:CEFRnnJhKvWT1c1JTI3Avm+tgOWbkOu5oPA8eH8LnMI= -modernc.org/libc v1.66.3 h1:cfCbjTUcdsKyyZZfEUKfoHcP3S0Wkvz3jgSzByEWVCQ= -modernc.org/libc v1.66.3/go.mod h1:XD9zO8kt59cANKvHPXpx7yS2ELPheAey0vjIuZOhOU8= +modernc.org/libc v1.67.6 h1:eVOQvpModVLKOdT+LvBPjdQqfrZq+pC39BygcT+E7OI= +modernc.org/libc v1.67.6/go.mod h1:JAhxUVlolfYDErnwiqaLvUqc8nfb2r6S6slAgZOnaiE= modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU= modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg= modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI= @@ -2145,8 +2369,8 @@ modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8= modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns= modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w= modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE= -modernc.org/sqlite v1.38.2 h1:Aclu7+tgjgcQVShZqim41Bbw9Cho0y/7WzYptXqkEek= -modernc.org/sqlite v1.38.2/go.mod h1:cPTJYSlgg3Sfg046yBShXENNtPrWrDX8bsbAQBzgQ5E= +modernc.org/sqlite v1.46.1 h1:eFJ2ShBLIEnUWlLy12raN0Z1plqmFX9Qe3rjQTKt6sU= +modernc.org/sqlite v1.46.1/go.mod h1:CzbrU2lSB1DKUusvwGz7rqEKIq+NUd8GWuBBZDs9/nA= modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0= modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A= modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= @@ -2173,6 +2397,8 @@ sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099Yo sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= +software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= +software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= zombiezen.com/go/sqlite v1.4.0 h1:N1s3RIljwtp4541Y8rM880qgGIgq3fTD2yks1xftnKU= diff --git a/pkg/config/config.go b/pkg/config/config.go index eb410ef7d2..dbc55b080f 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -76,6 +76,7 @@ type Config struct { EnableRuntimeDetection bool `mapstructure:"runtimeDetectionEnabled"` EnableSbomGeneration bool `mapstructure:"sbomGenerationEnabled"` EnableSeccomp bool `mapstructure:"seccompServiceEnabled"` + EnableSignatureVerification bool `mapstructure:"enableSignatureVerification"` HostMonitoringEnabled bool `mapstructure:"hostMonitoringEnabled"` StandaloneMonitoringEnabled bool `mapstructure:"standaloneMonitoringEnabled"` SeccompProfileBackend string `mapstructure:"seccompProfileBackend"` @@ -106,6 +107,7 @@ type Config struct { ProcfsPidScanInterval time.Duration `mapstructure:"procfsPidScanInterval"` ProcfsScanInterval time.Duration `mapstructure:"procfsScanInterval"` ProfilesCacheRefreshRate time.Duration `mapstructure:"profilesCacheRefreshRate"` + StorageRPCBudget time.Duration `mapstructure:"storageRPCBudget"` RuleCoolDown rulecooldown.RuleCooldownConfig `mapstructure:"ruleCooldown"` TestMode bool `mapstructure:"testMode"` UpdateDataPeriod time.Duration `mapstructure:"updateDataPeriod"` @@ -197,6 +199,7 @@ func LoadConfigOptional(path string, errNotFound bool) (Config, error) { viper.SetDefault("celConfigCache::maxSize", 100000) viper.SetDefault("celConfigCache::ttl", 1*time.Minute) viper.SetDefault("ignoreRuleBindings", false) + viper.SetDefault("enableSignatureVerification", false) viper.SetDefault("eventDedup::enabled", true) viper.SetDefault("eventDedup::slotsExponent", 18) @@ -231,6 +234,7 @@ func LoadConfigOptional(path string, errNotFound bool) (Config, error) { viper.SetDefault("hostSensorInterval", 5*time.Minute) viper.AutomaticEnv() + _ = viper.BindEnv("enableSignatureVerification", "ENABLE_SIGNATURE_VERIFICATION") if err := viper.ReadInConfig(); err != nil { var notFound viper.ConfigFileNotFoundError diff --git a/pkg/containerprofilemanager/v1/lifecycle.go b/pkg/containerprofilemanager/v1/lifecycle.go index 8e40fd8702..dc9b8ac45a 100644 --- a/pkg/containerprofilemanager/v1/lifecycle.go +++ b/pkg/containerprofilemanager/v1/lifecycle.go @@ -93,14 +93,17 @@ func (cpm *ContainerProfileManager) addContainer(container *containercollection. return fmt.Errorf("failed to get shared data for container %s: %w", containerID, err) } - // Check if the container should use a user-defined profile + // Check if the container should use a user-defined profile. + // When both an ApplicationProfile and a NetworkNeighborhood are + // user-provided, skip ALL recording — there is nothing to learn. if sharedData.UserDefinedProfile != "" { logger.L().Debug("ignoring container with a user-defined profile", helpers.String("containerID", containerID), helpers.String("containerName", container.Runtime.ContainerName), helpers.String("podName", container.K8s.PodName), helpers.String("namespace", container.K8s.Namespace), - helpers.String("userDefinedProfile", sharedData.UserDefinedProfile)) + helpers.String("userDefinedProfile", sharedData.UserDefinedProfile), + helpers.String("userDefinedNetwork", sharedData.UserDefinedNetwork)) // Close ready channel before removing entry if entry, exists := cpm.getContainerEntry(containerID); exists { entry.readyOnce.Do(func() { @@ -159,6 +162,7 @@ func (cpm *ContainerProfileManager) addContainer(container *containercollection. // Setup monitoring timer sniffingTime := cpm.calculateSniffingTime(container) + sharedData.LearningPeriod = sniffingTime timer := time.AfterFunc(sniffingTime, func() { cpm.handleContainerMaxTime(container) }) diff --git a/pkg/containerwatcher/v2/container_watcher_collection.go b/pkg/containerwatcher/v2/container_watcher_collection.go index 834ecb4125..b919084aac 100644 --- a/pkg/containerwatcher/v2/container_watcher_collection.go +++ b/pkg/containerwatcher/v2/container_watcher_collection.go @@ -60,8 +60,7 @@ func (cw *ContainerWatcher) StartContainerCollection(ctx context.Context) error cw.containerCallbackAsync, cw.containerProcessTree.ContainerCallback, cw.containerProfileManager.ContainerCallback, - cw.objectCache.ApplicationProfileCache().ContainerCallback, - cw.objectCache.NetworkNeighborhoodCache().ContainerCallback, + cw.objectCache.ContainerProfileCache().ContainerCallback, cw.malwareManager.ContainerCallback, cw.ruleManager.ContainerCallback, cw.sbomManager.ContainerCallback, diff --git a/pkg/ebpf/gadgets/randomx/program.bpf.c b/pkg/ebpf/gadgets/randomx/program.bpf.c index ed9cd5812b..46e7425e29 100644 --- a/pkg/ebpf/gadgets/randomx/program.bpf.c +++ b/pkg/ebpf/gadgets/randomx/program.bpf.c @@ -166,4 +166,176 @@ int tracepoint__x86_fpu_regs_deactivated(struct trace_event_raw_x86_fpu *ctx) char LICENSE[] SEC("license") = "GPL"; -#endif // defined(__TARGET_ARCH_x86) \ No newline at end of file +#endif // defined(__TARGET_ARCH_x86) + +/* // Kernel types definitions +#include + +// eBPF helpers signatures +// Check https://man7.org/linux/man-pages/man7/bpf-helpers.7.html to learn +// more about different available helpers +#include +#include + +// Inspektor Gadget buffer +#include +// Helpers to handle common data +#include +// Inspektor Gadget macros +#include +// Inspektor Gadget filtering +#include +// Inspektor Gadget types +#include +// Inspektor Gadget mntns +#include + +#include "program.h" +#include "upper_layer.h" +#include "exe_path.h" + +#if defined(__TARGET_ARCH_x86) + +#define TARGET_RANDOMX_EVENTS_COUNT 5 +// 5 seconds in nanoseconds +#define MAX_NS_BETWEEN_EVENTS 5000000000ULL + +// This struct will hold the state for each mount namespace +struct mntns_cache { + u64 timestamp; + u64 events_count; + bool alerted; +}; + +// A map to store the cache per mntns_id. +// key: mntns_id (u64), value: struct mntns_cache +struct { + __uint(type, BPF_MAP_TYPE_LRU_HASH); + __uint(max_entries, 1024); + __type(key, u64); + __type(value, struct mntns_cache); +} mntns_event_count SEC(".maps"); + +// events is the name of the buffer map and 1024 * 256 (256KB) is its size. +GADGET_TRACER_MAP(events, 1024 * 256); + +// Define a tracer +GADGET_TRACER(randomx, events, event); + +// Utilize the kernel version provided by libbpf. (kconfig must be present). +extern int LINUX_KERNEL_VERSION __kconfig; + +#if LINUX_KERNEL_VERSION <= KERNEL_VERSION(5, 15, 0) +struct old_fpu { + unsigned int last_cpu; + unsigned char initialized; + long: 24; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + union fpregs_state state; +}; +#endif + +SEC("tracepoint/x86_fpu/x86_fpu_regs_deactivated") +int tracepoint__x86_fpu_regs_deactivated(struct trace_event_raw_x86_fpu *ctx) +{ + if (gadget_should_discard_data_current()) { + return 0; + } + + u64 mntns_id; + mntns_id = gadget_get_current_mntns_id(); + struct mntns_cache *cache; + cache = bpf_map_lookup_elem(&mntns_event_count, &mntns_id); + + u64 now = bpf_ktime_get_ns(); + + if (!cache) { + // First event for this mntns. Create a new entry. + struct mntns_cache new_cache = {}; + new_cache.timestamp = now; + new_cache.events_count = 1; + new_cache.alerted = false; + bpf_map_update_elem(&mntns_event_count, &mntns_id, &new_cache, BPF_ANY); + return 0; // Don't send an event yet + } + + // If we have already sent an alert for this mntns, do nothing. + if (cache->alerted) { + return 0; + } + + // Check if the last event was too long ago and reset if necessary. + if (now - cache->timestamp > MAX_NS_BETWEEN_EVENTS) { + cache->timestamp = now; + cache->events_count = 1; + bpf_map_update_elem(&mntns_event_count, &mntns_id, cache, BPF_ANY); + return 0; // Don't send an event yet + } + + // Increment the count. Using bpf_map_update_elem is not atomic, but for + // this use case (a single CPU tracepoint), it's safe. + cache->events_count++; + cache->timestamp = now; // Update timestamp with the latest event + + // Check if we have seen enough events + if (cache->events_count <= TARGET_RANDOMX_EVENTS_COUNT) { + // Not enough events yet, just update the map and exit. + bpf_map_update_elem(&mntns_event_count, &mntns_id, cache, BPF_ANY); + return 0; + } + + // --- Threshold has been reached! --- + // We only reach this point ONCE per mntns. + + // Mark as alerted to prevent sending more events for this mntns. + cache->alerted = true; + bpf_map_update_elem(&mntns_event_count, &mntns_id, cache, BPF_ANY); + + struct event *event; + event = gadget_reserve_buf(&events, sizeof(*event)); + if (!event) { + return 0; + } + + // Populate the event with data. This code is the same as before. + gadget_process_populate(&event->proc); + + void *fpu = BPF_CORE_READ(ctx, fpu); + if (fpu == NULL) { + gadget_discard_buf(event); + return 0; + } + + u32 mxcsr; + if(LINUX_KERNEL_VERSION <= KERNEL_VERSION(5, 15, 0)) { + bpf_probe_read_kernel(&mxcsr, sizeof(mxcsr), &((struct old_fpu*)fpu)->state.xsave.i387.mxcsr); + } else { + mxcsr = BPF_CORE_READ((struct fpu*)fpu, fpstate, regs.xsave.i387.mxcsr); + } + + int fpcr = (mxcsr & 0x6000) >> 13; + if (fpcr != 0) { + event->upper_layer = has_upper_layer(); + read_exe_path(event->exepath, sizeof(event->exepath)); + + event->timestamp_raw = bpf_ktime_get_boot_ns(); + + gadget_submit_buf(ctx, &events, event, sizeof(*event)); + } else { + gadget_discard_buf(event); + } + + return 0; +} + +char LICENSE[] SEC("license") = "GPL"; + +#endif // defined(__TARGET_ARCH_x86) + + */ \ No newline at end of file diff --git a/pkg/hostsensormanager/sensor_kubelet.go b/pkg/hostsensormanager/sensor_kubelet.go index 0950f5e1fc..dafb165773 100644 --- a/pkg/hostsensormanager/sensor_kubelet.go +++ b/pkg/hostsensormanager/sensor_kubelet.go @@ -4,8 +4,10 @@ import ( "context" "fmt" + logger "github.com/kubescape/go-logger" "github.com/kubescape/go-logger/helpers" "github.com/kubescape/k8s-interface/hostsensor" + "sigs.k8s.io/yaml" ) const ( @@ -25,6 +27,32 @@ var kubeletKubeConfigDefaultPathList = []string{ "/var/lib/kubelet/kubeconfig", } +var kubeletServiceFilePaths = []string{ + "/etc/systemd/system/kubelet.service", + "/usr/lib/systemd/system/kubelet.service", + "/lib/systemd/system/kubelet.service", +} + +const kubeletServiceDropInDir = "/etc/systemd/system/kubelet.service.d" + +// kubeletConfigYAML is a minimal subset of KubeletConfiguration for CA file extraction. +type kubeletConfigYAML struct { + Authentication struct { + X509 struct { + ClientCAFile string `json:"clientCAFile"` + } `json:"x509"` + } `json:"authentication"` +} + +// extractClientCAFromKubeletConfig parses kubelet config YAML and returns the clientCAFile path. +func extractClientCAFromKubeletConfig(content []byte) (string, error) { + var cfg kubeletConfigYAML + if err := yaml.Unmarshal(content, &cfg); err != nil { + return "", fmt.Errorf("failed to parse kubelet config: %w", err) + } + return cfg.Authentication.X509.ClientCAFile, nil +} + // KubeletInfoSensor implements the Sensor interface for kubelet info data type KubeletInfoSensor struct { nodeName string @@ -73,12 +101,31 @@ func (s *KubeletInfoSensor) Sense() (interface{}, error) { ret.KubeConfigFile = makeContaineredFileInfoFromListVerbose(ctx, kubeletProcess, kubeletKubeConfigDefaultPathList, true, helpers.String("in", "SenseKubeletInfo")) } - // Client CA + // Client CA: check cmdLine first, then fall back to kubelet config YAML if caFilePath, ok := kubeletProcess.GetArg(kubeletClientCAArgName); ok { ret.ClientCAFile = makeContaineredFileInfoVerbose(ctx, kubeletProcess, caFilePath, false, helpers.String("in", "SenseKubeletInfo")) + } else if ret.ConfigFile != nil && len(ret.ConfigFile.Content) > 0 { + if caFilePath, err := extractClientCAFromKubeletConfig(ret.ConfigFile.Content); err != nil { + logger.L().Debug("failed to extract clientCAFile from kubelet config", helpers.String("in", "SenseKubeletInfo"), helpers.Error(err)) + } else if caFilePath != "" { + ret.ClientCAFile = makeContaineredFileInfoVerbose(ctx, kubeletProcess, caFilePath, false, helpers.String("in", "SenseKubeletInfo")) + } } ret.CmdLine = kubeletProcess.RawCmd() + // Service files: main unit file and drop-in directory + for _, svcPath := range kubeletServiceFilePaths { + if fi := makeHostFileInfoVerbose(ctx, svcPath, false); fi != nil { + ret.ServiceFiles = append(ret.ServiceFiles, *fi) + break + } + } + if dropIns, err := makeHostDirFilesInfoVerbose(ctx, kubeletServiceDropInDir, false, 0); err == nil { + for _, fi := range dropIns { + ret.ServiceFiles = append(ret.ServiceFiles, *fi) + } + } + return &ret, nil } diff --git a/pkg/metricsmanager/metrics_manager_interface.go b/pkg/metricsmanager/metrics_manager_interface.go index 1542c13006..e6c20b62c2 100644 --- a/pkg/metricsmanager/metrics_manager_interface.go +++ b/pkg/metricsmanager/metrics_manager_interface.go @@ -20,4 +20,9 @@ type MetricsManager interface { ReportContainerStart() ReportContainerStop() ReportDedupEvent(eventType utils.EventType, duplicate bool) + ReportContainerProfileLegacyLoad(kind, completeness string) + SetContainerProfileCacheEntries(kind string, count float64) + ReportContainerProfileCacheHit(hit bool) + ReportContainerProfileReconcilerDuration(phase string, duration time.Duration) + ReportContainerProfileReconcilerEviction(reason string) } diff --git a/pkg/metricsmanager/metrics_manager_mock.go b/pkg/metricsmanager/metrics_manager_mock.go index 74424e07b1..70f118da8e 100644 --- a/pkg/metricsmanager/metrics_manager_mock.go +++ b/pkg/metricsmanager/metrics_manager_mock.go @@ -66,4 +66,9 @@ func (m *MetricsMock) ReportContainerStart() {} func (m *MetricsMock) ReportContainerStop() {} -func (m *MetricsMock) ReportDedupEvent(eventType utils.EventType, duplicate bool) {} +func (m *MetricsMock) ReportDedupEvent(eventType utils.EventType, duplicate bool) {} +func (m *MetricsMock) ReportContainerProfileLegacyLoad(_, _ string) {} +func (m *MetricsMock) SetContainerProfileCacheEntries(_ string, _ float64) {} +func (m *MetricsMock) ReportContainerProfileCacheHit(_ bool) {} +func (m *MetricsMock) ReportContainerProfileReconcilerDuration(_ string, _ time.Duration) {} +func (m *MetricsMock) ReportContainerProfileReconcilerEviction(_ string) {} diff --git a/pkg/metricsmanager/metrics_manager_noop.go b/pkg/metricsmanager/metrics_manager_noop.go index c797f348a1..092b5a5e46 100644 --- a/pkg/metricsmanager/metrics_manager_noop.go +++ b/pkg/metricsmanager/metrics_manager_noop.go @@ -22,3 +22,8 @@ func (m *MetricsNoop) ReportRuleEvaluationTime(_ string, _ utils.EventType, _ ti func (m *MetricsNoop) ReportContainerStart() {} func (m *MetricsNoop) ReportContainerStop() {} func (m *MetricsNoop) ReportDedupEvent(_ utils.EventType, _ bool) {} +func (m *MetricsNoop) ReportContainerProfileLegacyLoad(_, _ string) {} +func (m *MetricsNoop) SetContainerProfileCacheEntries(_ string, _ float64) {} +func (m *MetricsNoop) ReportContainerProfileCacheHit(_ bool) {} +func (m *MetricsNoop) ReportContainerProfileReconcilerDuration(_ string, _ time.Duration) {} +func (m *MetricsNoop) ReportContainerProfileReconcilerEviction(_ string) {} diff --git a/pkg/metricsmanager/prometheus/prometheus.go b/pkg/metricsmanager/prometheus/prometheus.go index 30211664e6..d729924ab5 100644 --- a/pkg/metricsmanager/prometheus/prometheus.go +++ b/pkg/metricsmanager/prometheus/prometheus.go @@ -63,6 +63,13 @@ type PrometheusMetric struct { // Dedup metrics dedupEventCounter *prometheus.CounterVec + // ContainerProfile cache metrics + cpCacheLegacyLoadsCounter *prometheus.CounterVec + cpCacheEntriesGauge *prometheus.GaugeVec + cpCacheHitCounter *prometheus.CounterVec + cpReconcilerDurationHistogram *prometheus.HistogramVec + cpReconcilerEvictionsCounter *prometheus.CounterVec + // Cache to avoid allocating Labels maps on every call ruleCounterCache map[string]prometheus.Counter rulePrefilteredCounterCache map[string]prometheus.Counter @@ -215,6 +222,29 @@ func NewPrometheusMetric() *PrometheusMetric { Help: "Total number of events processed by the dedup layer", }, []string{eventTypeLabel, "result"}), + // ContainerProfile cache metrics + cpCacheLegacyLoadsCounter: promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "node_agent_user_profile_legacy_loads_total", + Help: "Number of times a user-authored legacy ApplicationProfile or NetworkNeighborhood was loaded into the ContainerProfileCache; will be removed in a future release.", + }, []string{"kind", "completeness"}), + cpCacheEntriesGauge: promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "node_agent_containerprofile_cache_entries", + Help: "Current number of cached ContainerProfile entries per kind.", + }, []string{"kind"}), + cpCacheHitCounter: promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "node_agent_containerprofile_cache_hit_total", + Help: "Total number of ContainerProfile cache lookups by result.", + }, []string{"result"}), + cpReconcilerDurationHistogram: promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "node_agent_containerprofile_reconciler_duration_seconds", + Help: "Duration of ContainerProfile reconciler phases in seconds.", + Buckets: prometheus.DefBuckets, + }, []string{"phase"}), + cpReconcilerEvictionsCounter: promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "node_agent_containerprofile_reconciler_evictions_total", + Help: "Total number of ContainerProfile cache evictions by reason.", + }, []string{"reason"}), + // Initialize counter caches ruleCounterCache: make(map[string]prometheus.Counter), rulePrefilteredCounterCache: make(map[string]prometheus.Counter), @@ -256,6 +286,11 @@ func (p *PrometheusMetric) Destroy() { prometheus.Unregister(p.containerStartCounter) prometheus.Unregister(p.containerStopCounter) prometheus.Unregister(p.dedupEventCounter) + prometheus.Unregister(p.cpCacheLegacyLoadsCounter) + prometheus.Unregister(p.cpCacheEntriesGauge) + prometheus.Unregister(p.cpCacheHitCounter) + prometheus.Unregister(p.cpReconcilerDurationHistogram) + prometheus.Unregister(p.cpReconcilerEvictionsCounter) // Unregister program ID metrics prometheus.Unregister(p.programRuntimeGauge) prometheus.Unregister(p.programRunCountGauge) @@ -432,3 +467,27 @@ func (p *PrometheusMetric) ReportDedupEvent(eventType utils.EventType, duplicate } p.dedupEventCounter.WithLabelValues(string(eventType), result).Inc() } + +func (p *PrometheusMetric) ReportContainerProfileLegacyLoad(kind, completeness string) { + p.cpCacheLegacyLoadsCounter.WithLabelValues(kind, completeness).Inc() +} + +func (p *PrometheusMetric) SetContainerProfileCacheEntries(kind string, count float64) { + p.cpCacheEntriesGauge.WithLabelValues(kind).Set(count) +} + +func (p *PrometheusMetric) ReportContainerProfileCacheHit(hit bool) { + result := "hit" + if !hit { + result = "miss" + } + p.cpCacheHitCounter.WithLabelValues(result).Inc() +} + +func (p *PrometheusMetric) ReportContainerProfileReconcilerDuration(phase string, duration time.Duration) { + p.cpReconcilerDurationHistogram.WithLabelValues(phase).Observe(duration.Seconds()) +} + +func (p *PrometheusMetric) ReportContainerProfileReconcilerEviction(reason string) { + p.cpReconcilerEvictionsCounter.WithLabelValues(reason).Inc() +} diff --git a/pkg/objectcache/applicationprofilecache/applicationprofilecache.go b/pkg/objectcache/applicationprofilecache/applicationprofilecache.go deleted file mode 100644 index adb0fea10c..0000000000 --- a/pkg/objectcache/applicationprofilecache/applicationprofilecache.go +++ /dev/null @@ -1,766 +0,0 @@ -package applicationprofilecache - -import ( - "context" - "fmt" - "strings" - "sync" - "time" - - "github.com/cenkalti/backoff/v5" - mapset "github.com/deckarep/golang-set/v2" - "github.com/goradd/maps" - containercollection "github.com/inspektor-gadget/inspektor-gadget/pkg/container-collection" - "github.com/kubescape/go-logger" - "github.com/kubescape/go-logger/helpers" - helpersv1 "github.com/kubescape/k8s-interface/instanceidhandler/v1/helpers" - "github.com/kubescape/node-agent/pkg/config" - "github.com/kubescape/node-agent/pkg/objectcache" - "github.com/kubescape/node-agent/pkg/objectcache/applicationprofilecache/callstackcache" - "github.com/kubescape/node-agent/pkg/resourcelocks" - "github.com/kubescape/node-agent/pkg/storage" - "github.com/kubescape/node-agent/pkg/utils" - "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" -) - -// ContainerInfo holds container metadata we need for application profile mapping -type ContainerInfo struct { - ContainerID string - WorkloadID string - InstanceTemplateHash string - Namespace string - Name string - SeenContainerFromTheStart bool // True if container was seen from the start - UserDefinedProfile string -} - -// ContainerCallStackIndex maintains call stack search trees for a container -type ContainerCallStackIndex struct { - searchTree *callstackcache.CallStackSearchTree -} - -type ApplicationProfileCacheImpl struct { - cfg config.Config - workloadIDToProfile maps.SafeMap[string, *v1beta1.ApplicationProfile] - workloadIDToProfileState maps.SafeMap[string, *objectcache.ProfileState] // Tracks profile state even if not in cache - containerIDToInfo maps.SafeMap[string, *ContainerInfo] - profileToUserManagedIdentifier maps.SafeMap[string, string] // profileName -> user-managed profile unique identifier (This is used to prevent merging the same user-managed profile multiple times) - containerToCallStackIndex maps.SafeMap[string, *ContainerCallStackIndex] - storageClient storage.ProfileClient - k8sObjectCache objectcache.K8sObjectCache - updateInterval time.Duration - updateInProgress bool // Flag to track if update is in progress - updateMutex sync.Mutex // Mutex to protect the flag - containerLocks *resourcelocks.ResourceLocks // Locks for each container to prevent concurrent modifications -} - -// NewApplicationProfileCache creates a new application profile cache with periodic updates -func NewApplicationProfileCache(cfg config.Config, storageClient storage.ProfileClient, k8sObjectCache objectcache.K8sObjectCache) *ApplicationProfileCacheImpl { - updateInterval := utils.AddJitter(cfg.ProfilesCacheRefreshRate, 10) // Add 10% jitter to avoid high load on the storage - - apc := &ApplicationProfileCacheImpl{ - cfg: cfg, - workloadIDToProfile: maps.SafeMap[string, *v1beta1.ApplicationProfile]{}, - workloadIDToProfileState: maps.SafeMap[string, *objectcache.ProfileState]{}, - containerIDToInfo: maps.SafeMap[string, *ContainerInfo]{}, - profileToUserManagedIdentifier: maps.SafeMap[string, string]{}, - containerToCallStackIndex: maps.SafeMap[string, *ContainerCallStackIndex]{}, - storageClient: storageClient, - k8sObjectCache: k8sObjectCache, - updateInterval: updateInterval, - containerLocks: resourcelocks.New(), - } - - return apc -} - -// Start begins the periodic update process -func (apc *ApplicationProfileCacheImpl) Start(ctx context.Context) { - go apc.periodicUpdate(ctx) -} - -// periodicUpdate periodically fetches and updates application profiles from storage -func (apc *ApplicationProfileCacheImpl) periodicUpdate(ctx context.Context) { - ticker := time.NewTicker(apc.updateInterval) - defer ticker.Stop() - - for { - select { - case <-ticker.C: - // Check if an update is already in progress - apc.updateMutex.Lock() - if apc.updateInProgress { - // Skip this update cycle - logger.L().Debug("skipping profile update: previous update still in progress") - apc.updateMutex.Unlock() - continue - } - - // Set the flag and release the lock before the potentially long-running call - apc.updateInProgress = true - apc.updateMutex.Unlock() - - // Run the update directly - apc.updateAllProfiles(ctx) - - // Mark the update as complete - apc.updateMutex.Lock() - apc.updateInProgress = false - apc.updateMutex.Unlock() - - case <-ctx.Done(): - logger.L().Info("ApplicationProfileCache periodic update stopped") - return - } - } -} - -// updateAllProfiles fetches all application profiles from storage and updates the cache -func (apc *ApplicationProfileCacheImpl) updateAllProfiles(ctx context.Context) { - // Get unique namespaces from container info - namespaces := apc.getNamespaces() - if len(namespaces) == 0 { - logger.L().Debug("no namespaces found in cache, skipping profile update") - return - } - - // Iterate over each namespace - for _, namespace := range namespaces { - // Get container IDs for this namespace - containerIDs := apc.getContainerIDsForNamespace(namespace) - if len(containerIDs) == 0 { - logger.L().Debug("no containers found for namespace, skipping", - helpers.String("namespace", namespace)) - continue - } - - // Get profiles list for this namespace - var profileList *v1beta1.ApplicationProfileList - continueToken := "" - for { - list, err := apc.storageClient.ListApplicationProfiles(namespace, int64(50), continueToken) - if err != nil { - logger.L().Error("failed to list application profiles", - helpers.String("namespace", namespace), - helpers.Error(err)) - break - } - - if profileList == nil { - profileList = list - } else { - profileList.Items = append(profileList.Items, list.Items...) - } - - continueToken = list.Continue - if continueToken == "" { - break - } - } - - if profileList == nil { - continue - } - - // Process each profile - for _, profile := range profileList.Items { - // Handle user-managed profiles - if isUserManagedProfile(&profile) { - apc.handleUserManagedProfile(&profile) - continue - } - - // Get the workload ID from profile - workloadID := apc.wlidKey(profile.Annotations[helpersv1.WlidMetadataKey], profile.Labels[helpersv1.TemplateHashKey]) - if workloadID == "" { - continue // this is the case for user-defined profiles - } - - // Update profile state regardless of whether we'll update the full profile - profileState := &objectcache.ProfileState{ - Completion: profile.Annotations[helpersv1.CompletionMetadataKey], - Status: profile.Annotations[helpersv1.StatusMetadataKey], - Name: profile.Name, - Error: nil, - } - apc.workloadIDToProfileState.Set(workloadID, profileState) - - // Only consider completed profiles - if profile.Annotations[helpersv1.StatusMetadataKey] != helpersv1.Completed { - continue - } - - // Check if this workload ID is used by any container in this namespace - workloadIDInUse := false - hasNewContainer := false // Track if any container using this workload was seen from start - for _, containerID := range containerIDs { - if containerInfo, exists := apc.containerIDToInfo.Load(containerID); exists && - containerInfo.WorkloadID == workloadID && - containerInfo.InstanceTemplateHash == profile.Labels[helpersv1.TemplateHashKey] { - workloadIDInUse = true - // If any container was seen from start, mark it - if containerInfo.SeenContainerFromTheStart { - hasNewContainer = true - } - } - } - - if !workloadIDInUse { - continue - } - - // If we have a "new" container (seen from start) and the profile is partial, - // skip it - we don't want to use partial profiles for containers we're tracking from the start - if hasNewContainer && profile.Annotations[helpersv1.CompletionMetadataKey] == helpersv1.Partial { - logger.L().Debug("updateAllProfiles: skipping partial profile for new container", - helpers.String("profileName", profile.Name), - helpers.String("workloadID", workloadID)) - continue - } - - // Update the profile in the cache - if existingProfile, exists := apc.workloadIDToProfile.Load(workloadID); exists { - // If the profile already exists and it's complete/completed, continue to the next one - if existingProfile.Annotations[helpersv1.CompletionMetadataKey] == helpersv1.Full { - continue - } - - // If the new profile is not complete and we already have a completed/partial one, skip it - if profile.Annotations[helpersv1.CompletionMetadataKey] != helpersv1.Full { - continue - } - } - - // Fetch the profile from storage - fullProfile, err := apc.storageClient.GetApplicationProfile(namespace, profile.Name) - if err != nil { - logger.L().Error("failed to get application profile", - helpers.String("workloadID", workloadID), - helpers.String("namespace", namespace), - helpers.String("profileName", profile.Name), - helpers.Error(err)) - // Update the profile state to indicate an error - profileState.Error = err - apc.workloadIDToProfileState.Set(workloadID, profileState) - continue - } - - apc.workloadIDToProfile.Set(workloadID, fullProfile) - logger.L().Debug("application profile downloaded, starting anomaly detection", - helpers.String("workloadID", workloadID), - helpers.String("namespace", namespace), - helpers.String("status", profile.Annotations[helpersv1.StatusMetadataKey]), - helpers.String("completion", profile.Annotations[helpersv1.CompletionMetadataKey])) - - // Update call stack search trees for containers using this workload ID - for _, containerID := range containerIDs { - if containerInfo, exists := apc.containerIDToInfo.Load(containerID); exists && - containerInfo.WorkloadID == workloadID && - containerInfo.InstanceTemplateHash == profile.Labels[helpersv1.TemplateHashKey] { - // Create or update call stack search tree if not exists - apc.indexContainerCallStacks(containerID, containerInfo.Name, fullProfile) - } - } - } - // Continue to next namespace - } -} - -// handleUserManagedProfile handles user-managed profiles -func (apc *ApplicationProfileCacheImpl) handleUserManagedProfile(profile *v1beta1.ApplicationProfile) { - normalizedProfileName := strings.TrimPrefix(profile.Name, helpersv1.UserApplicationProfilePrefix) - userManagedProfileUniqueIdentifier := profile.ResourceVersion + string(profile.UID) - - // Create a unique tracking key for this user profile - profileKey := apc.profileKey(profile.Namespace, normalizedProfileName) - - // Check if we've already processed this exact version of the user-managed profile - if storedIdentifier, exists := apc.profileToUserManagedIdentifier.Load(profileKey); exists && - storedIdentifier == userManagedProfileUniqueIdentifier { - return - } - - // Find and collect the profile to merge - var toMerge struct { - wlid string - profile *v1beta1.ApplicationProfile - } - - apc.workloadIDToProfile.Range(func(wlid string, originalProfile *v1beta1.ApplicationProfile) bool { - if originalProfile.Name == normalizedProfileName && originalProfile.Namespace == profile.Namespace { - toMerge.wlid = wlid - toMerge.profile = originalProfile - logger.L().Debug("found matching profile for user-managed profile", - helpers.String("workloadID", wlid), - helpers.String("namespace", originalProfile.Namespace), - helpers.String("profileName", originalProfile.Name)) - // Stop iteration - return false - } - return true - }) - - // If we didn't find a matching profile, skip merging - if toMerge.profile == nil { - return - } - - // Fetch the full user profile - fullUserProfile, err := apc.storageClient.GetApplicationProfile(profile.Namespace, profile.Name) - if err != nil { - logger.L().Error("failed to get user-managed profile", - helpers.String("namespace", profile.Namespace), - helpers.String("profileName", profile.Name), - helpers.Error(err)) - return - } - - // Merge the user-managed profile with the normal profile - - // First, pull the original profile from the storage - originalProfile, err := apc.storageClient.GetApplicationProfile(toMerge.profile.Namespace, toMerge.profile.Name) - if err != nil { - logger.L().Error("failed to get original profile", - helpers.String("namespace", toMerge.profile.Namespace), - helpers.String("profileName", toMerge.profile.Name), - helpers.Error(err)) - return - } - // Merge the profiles - mergedProfile := apc.performMerge(originalProfile, fullUserProfile) - // Update the cache with the merged profile - apc.workloadIDToProfile.Set(toMerge.wlid, mergedProfile) - // Update profile state for the merged profile - profileState := &objectcache.ProfileState{ - Completion: mergedProfile.Annotations[helpersv1.CompletionMetadataKey], - Status: mergedProfile.Annotations[helpersv1.StatusMetadataKey], - Name: mergedProfile.Name, - Error: nil, - } - apc.workloadIDToProfileState.Set(toMerge.wlid, profileState) - - logger.L().Debug("merged user-managed profile with normal profile", - helpers.String("workloadID", toMerge.wlid), - helpers.String("namespace", profile.Namespace), - helpers.String("profileName", profile.Name)) - - // We need to index the call stacks for the merged profile here, but currently we don't support that. - - // Record that we've processed this version of the profile - apc.profileToUserManagedIdentifier.Set(profileKey, userManagedProfileUniqueIdentifier) -} - -// indexContainerCallStacks builds the search index for a container's call stacks and removes them from the profile -func (apc *ApplicationProfileCacheImpl) indexContainerCallStacks(containerID, containerName string, appProfile *v1beta1.ApplicationProfile) { - if appProfile == nil { - logger.L().Warning("ApplicationProfileCacheImpl - application profile is nil", - helpers.String("containerID", containerID), - helpers.String("containerName", containerName)) - return - } - - // Create a new call stack search tree - callStackSearchTree := callstackcache.NewCallStackSearchTree() - apc.containerToCallStackIndex.Set(containerID, &ContainerCallStackIndex{ - searchTree: callStackSearchTree, - }) - - // Iterate over the containers in the application profile - // Find the container in the profile and index its call stacks - for _, c := range appProfile.Spec.Containers { - if c.Name == containerName { - // Index all call stacks - for _, stack := range c.IdentifiedCallStacks { - callStackSearchTree.AddCallStack(stack) - } - - // Clear the call stacks to free memory - c.IdentifiedCallStacks = nil - break - } - } - - // Also check init containers - for _, c := range appProfile.Spec.InitContainers { - if c.Name == containerName { - for _, stack := range c.IdentifiedCallStacks { - callStackSearchTree.AddCallStack(stack) - } - - // Clear the call stacks to free memory - c.IdentifiedCallStacks = nil - break - } - } - - // And ephemeral containers - for _, c := range appProfile.Spec.EphemeralContainers { - if c.Name == containerName { - for _, stack := range c.IdentifiedCallStacks { - callStackSearchTree.AddCallStack(stack) - } - - // Clear the call stacks to free memory - c.IdentifiedCallStacks = nil - break - } - } -} - -// ContainerCallback handles container lifecycle events -func (apc *ApplicationProfileCacheImpl) ContainerCallback(notif containercollection.PubSubEvent) { - isHost := utils.IsHostContainer(notif.Container) - namespace := notif.Container.K8s.Namespace - if isHost { - namespace = "host" - } - switch notif.Type { - case containercollection.EventTypeAddContainer: - if !isHost && apc.cfg.IgnoreContainer(namespace, notif.Container.K8s.PodName, notif.Container.K8s.PodLabels) { - return - } - container := notif.Container - if isHost { - containerCopy := *notif.Container - containerCopy.K8s.Namespace = namespace - container = &containerCopy - } - go apc.addContainerWithTimeout(container) - case containercollection.EventTypeRemoveContainer: - if !isHost && apc.cfg.IgnoreContainer(namespace, notif.Container.K8s.PodName, notif.Container.K8s.PodLabels) { - return - } - go apc.deleteContainer(notif.Container.Runtime.ContainerID) - } -} - -// addContainerWithTimeout handles adding a container with a timeout to prevent hanging -func (apc *ApplicationProfileCacheImpl) addContainerWithTimeout(container *containercollection.Container) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) - defer cancel() - - done := make(chan error, 1) - go func() { - done <- apc.addContainer(container, ctx) - }() - - select { - case err := <-done: - if err != nil { - logger.L().Error("failed to add container to the cache", helpers.Error(err)) - } - case <-ctx.Done(): - logger.L().Error("timeout while adding container to the cache", - helpers.String("containerID", container.Runtime.ContainerID), - helpers.String("containerName", container.Runtime.ContainerName), - helpers.String("podName", container.K8s.PodName), - helpers.String("namespace", container.K8s.Namespace)) - } -} - -// addContainer adds a container to the cache -func (apc *ApplicationProfileCacheImpl) addContainer(container *containercollection.Container, ctx context.Context) error { - containerID := container.Runtime.ContainerID - - return apc.containerLocks.WithLockAndError(containerID, func() error { - // Get workload ID from shared data - sharedData, err := apc.waitForSharedContainerData(containerID, ctx) - if err != nil { - logger.L().Error("failed to get shared data for container", - helpers.String("containerID", containerID), - helpers.Error(err)) - return err - } - - workloadID := apc.wlidKey(sharedData.Wlid, sharedData.InstanceID.GetTemplateHash()) - if workloadID == "" { - logger.L().Debug("empty workloadID for container", helpers.String("containerID", containerID)) - return nil - } - - // If container restarts and profile is partial, delete it from cache - // This ensures we don't alert on activity we didn't see after restart - if existingProfile, exists := apc.workloadIDToProfile.Load(workloadID); exists && !sharedData.PreRunningContainer { - if existingProfile != nil && existingProfile.Annotations != nil { - completion := existingProfile.Annotations[helpersv1.CompletionMetadataKey] - if completion == helpersv1.Partial { - logger.L().Debug("deleting partial profile on container restart", - helpers.String("containerID", containerID), - helpers.String("workloadID", workloadID), - helpers.String("namespace", container.K8s.Namespace)) - - // Delete the profile from cache - profileKey := apc.profileKey(existingProfile.Namespace, existingProfile.Name) - apc.profileToUserManagedIdentifier.Delete(profileKey) - apc.workloadIDToProfile.Delete(workloadID) - - // Also delete call stack indices for all containers using this workload ID - // (including the current container if it exists from a previous run) - apc.containerToCallStackIndex.Delete(containerID) - apc.containerIDToInfo.Range(func(cID string, info *ContainerInfo) bool { - if info.WorkloadID == workloadID { - apc.containerToCallStackIndex.Delete(cID) - } - return true - }) - } - } - } else { - apc.workloadIDToProfileState.Set(workloadID, nil) - } - - // Create container info - // Mark container as "seen from start" if it is not pre-running - containerInfo := &ContainerInfo{ - ContainerID: containerID, - WorkloadID: workloadID, - InstanceTemplateHash: sharedData.InstanceID.GetTemplateHash(), - Namespace: container.K8s.Namespace, - Name: container.Runtime.ContainerName, - SeenContainerFromTheStart: !sharedData.PreRunningContainer, - } - - // Check for user-defined profile - if userDefinedProfile, ok := container.K8s.PodLabels[helpersv1.UserDefinedProfileMetadataKey]; ok { - if userDefinedProfile != "" { - // Set the user-defined profile in container info - containerInfo.UserDefinedProfile = userDefinedProfile - // Fetch the profile from storage - // TODO should we cache user-defined profiles separately? - it could allow deduplication - fullProfile, err := apc.storageClient.GetApplicationProfile(container.K8s.Namespace, userDefinedProfile) - if err != nil { - logger.L().Error("failed to get user-defined profile", - helpers.String("containerID", containerID), - helpers.String("workloadID", workloadID), - helpers.String("namespace", container.K8s.Namespace), - helpers.String("profileName", userDefinedProfile), - helpers.Error(err)) - // Update the profile state to indicate an error - profileState := &objectcache.ProfileState{ - Error: err, - } - apc.workloadIDToProfileState.Set(workloadID, profileState) - return nil - } - // Update the profile in the cache - apc.workloadIDToProfile.Set(workloadID, fullProfile) - logger.L().Debug("user-defined application profile downloaded, starting anomaly detection", - helpers.String("containerID", containerID), - helpers.String("workloadID", workloadID), - helpers.String("namespace", container.K8s.Namespace), - helpers.String("profileName", userDefinedProfile)) - } - } - - // Add to container info map - apc.containerIDToInfo.Set(containerID, containerInfo) - - logger.L().Debug("container added to cache", - helpers.String("containerID", containerID), - helpers.String("workloadID", workloadID), - helpers.String("namespace", container.K8s.Namespace)) - - return nil - }) -} - -// deleteContainer deletes a container from the cache -func (apc *ApplicationProfileCacheImpl) deleteContainer(containerID string) { - apc.containerLocks.WithLock(containerID, func() { - // Get container info - containerInfo, exists := apc.containerIDToInfo.Load(containerID) - if !exists { - logger.L().Debug("containerID not found in cache", helpers.String("containerID", containerID)) - return - } - - // Clean up container info and call stack index - apc.containerIDToInfo.Delete(containerID) - apc.containerToCallStackIndex.Delete(containerID) - - // Check if any other container is using the same workload ID - workloadStillInUse := false - apc.containerIDToInfo.Range(func(_ string, info *ContainerInfo) bool { - if info.WorkloadID == containerInfo.WorkloadID { - workloadStillInUse = true - return false // Stop iteration - } - return true // Continue iteration - }) - - // If no other container is using the same workload ID, delete it from the cache - if !workloadStillInUse { - if profile, exists := apc.workloadIDToProfile.Load(containerInfo.WorkloadID); exists { - // Remove the profile from the cache - profileKey := apc.profileKey(profile.Namespace, profile.Name) - apc.profileToUserManagedIdentifier.Delete(profileKey) - } - apc.workloadIDToProfileState.Delete(containerInfo.WorkloadID) - apc.workloadIDToProfile.Delete(containerInfo.WorkloadID) - logger.L().Debug("deleted workloadID from cache", helpers.String("workloadID", containerInfo.WorkloadID)) - } - }) - - // Clean up the lock when done - call this outside the WithLock closure - apc.containerLocks.ReleaseLock(containerID) -} - -// waitForSharedContainerData waits for shared container data to be available -func (apc *ApplicationProfileCacheImpl) waitForSharedContainerData(containerID string, ctx context.Context) (*objectcache.WatchedContainerData, error) { - return backoff.Retry(ctx, func() (*objectcache.WatchedContainerData, error) { - if sharedData := apc.k8sObjectCache.GetSharedContainerData(containerID); sharedData != nil { - return sharedData, nil - } - return nil, fmt.Errorf("container %s not found in shared data", containerID) - }, backoff.WithBackOff(backoff.NewExponentialBackOff())) -} - -func (apc *ApplicationProfileCacheImpl) profileKey(namespace, name string) string { - return fmt.Sprintf("%s/%s", namespace, name) -} - -func (apc *ApplicationProfileCacheImpl) wlidKey(wlid, templateHash string) string { - return fmt.Sprintf("%s/%s", wlid, templateHash) -} - -func (apc *ApplicationProfileCacheImpl) performMerge(normalProfile, userManagedProfile *v1beta1.ApplicationProfile) *v1beta1.ApplicationProfile { - mergedProfile := normalProfile.DeepCopy() - - // Merge spec - mergedProfile.Spec.Containers = apc.mergeContainers(mergedProfile.Spec.Containers, userManagedProfile.Spec.Containers) - mergedProfile.Spec.InitContainers = apc.mergeContainers(mergedProfile.Spec.InitContainers, userManagedProfile.Spec.InitContainers) - mergedProfile.Spec.EphemeralContainers = apc.mergeContainers(mergedProfile.Spec.EphemeralContainers, userManagedProfile.Spec.EphemeralContainers) - - return mergedProfile -} - -func (apc *ApplicationProfileCacheImpl) mergeContainers(normalContainers, userManagedContainers []v1beta1.ApplicationProfileContainer) []v1beta1.ApplicationProfileContainer { - if len(userManagedContainers) != len(normalContainers) { - // If the number of containers don't match, we can't merge - logger.L().Warning("ApplicationProfileCacheImpl - failed to merge user-managed profile with base profile", - helpers.Int("normalContainers len", len(normalContainers)), - helpers.Int("userManagedContainers len", len(userManagedContainers)), - helpers.String("reason", "number of containers don't match")) - return normalContainers - } - - // Assuming the normalContainers are already in the correct Pod order - // We'll merge user containers at their corresponding positions - for i := range normalContainers { - for _, userContainer := range userManagedContainers { - if normalContainers[i].Name == userContainer.Name { - apc.mergeContainer(&normalContainers[i], &userContainer) - break - } - } - } - return normalContainers -} - -func (apc *ApplicationProfileCacheImpl) mergeContainer(normalContainer, userContainer *v1beta1.ApplicationProfileContainer) { - normalContainer.Capabilities = append(normalContainer.Capabilities, userContainer.Capabilities...) - normalContainer.Execs = append(normalContainer.Execs, userContainer.Execs...) - normalContainer.Opens = append(normalContainer.Opens, userContainer.Opens...) - normalContainer.Syscalls = append(normalContainer.Syscalls, userContainer.Syscalls...) - normalContainer.Endpoints = append(normalContainer.Endpoints, userContainer.Endpoints...) - for k, v := range userContainer.PolicyByRuleId { - if existingPolicy, exists := normalContainer.PolicyByRuleId[k]; exists { - normalContainer.PolicyByRuleId[k] = utils.MergePolicies(existingPolicy, v) - } else { - normalContainer.PolicyByRuleId[k] = v - } - } -} - -func isUserManagedProfile(appProfile *v1beta1.ApplicationProfile) bool { - return appProfile.Annotations != nil && - appProfile.Annotations[helpersv1.ManagedByMetadataKey] == helpersv1.ManagedByUserValue && - strings.HasPrefix(appProfile.GetName(), helpersv1.UserApplicationProfilePrefix) -} - -// GetApplicationProfile gets the application profile for a container -func (apc *ApplicationProfileCacheImpl) GetApplicationProfile(containerID string) *v1beta1.ApplicationProfile { - // Get container info - if containerInfo, exists := apc.containerIDToInfo.Load(containerID); exists { - workloadID := containerInfo.WorkloadID - if workloadID == "" { - return nil - } - - // Try to get profile from cache - if profile, exists := apc.workloadIDToProfile.Load(workloadID); exists { - if profile != nil { - return profile - } - } - } - - return nil -} - -// GetApplicationProfileState gets the profile state for a container -func (apc *ApplicationProfileCacheImpl) GetApplicationProfileState(containerID string) *objectcache.ProfileState { - // Get container info - containerInfo, exists := apc.containerIDToInfo.Load(containerID) - if !exists { - return &objectcache.ProfileState{ - Error: fmt.Errorf("container %s not found in cache", containerID), - } - } - - workloadID := containerInfo.WorkloadID - if workloadID == "" { - return &objectcache.ProfileState{ - Error: fmt.Errorf("no workload ID for container %s", containerID), - } - } - - // Try to get profile state from cache - if profileState, exists := apc.workloadIDToProfileState.Load(workloadID); exists { - if profileState != nil { - return profileState - } else { - return &objectcache.ProfileState{ - Error: fmt.Errorf("profile state not available - shouldn't happen"), - } - } - } - - return &objectcache.ProfileState{ - Error: fmt.Errorf("profile state not found for workload ID %s", workloadID), - } -} - -// GetCallStackSearchTree gets the call stack index for a container -func (apc *ApplicationProfileCacheImpl) GetCallStackSearchTree(containerID string) *callstackcache.CallStackSearchTree { - if index, exist := apc.containerToCallStackIndex.Load(containerID); exist { - return index.searchTree - } - - return nil -} - -// getNamespaces retrieves all unique namespaces from the container info cache -func (apc *ApplicationProfileCacheImpl) getNamespaces() []string { - namespaceSet := mapset.NewSet[string]() - apc.containerIDToInfo.Range(func(_ string, info *ContainerInfo) bool { - namespaceSet.Add(info.Namespace) - return true - }) - return namespaceSet.ToSlice() -} - -// getContainerIDsForNamespace retrieves all container IDs for a given namespace -func (apc *ApplicationProfileCacheImpl) getContainerIDsForNamespace(namespace string) []string { - containerIDs := []string{} - apc.containerIDToInfo.Range(func(containerID string, info *ContainerInfo) bool { - if info.Namespace == namespace { - containerIDs = append(containerIDs, containerID) - } - return true - }) - return containerIDs -} - -// Ensure ApplicationProfileCacheImpl implements the ApplicationProfileCache interface -var _ objectcache.ApplicationProfileCache = (*ApplicationProfileCacheImpl)(nil) diff --git a/pkg/objectcache/applicationprofilecache/applicationprofilecache_test.go b/pkg/objectcache/applicationprofilecache/applicationprofilecache_test.go deleted file mode 100644 index 7ce56181c7..0000000000 --- a/pkg/objectcache/applicationprofilecache/applicationprofilecache_test.go +++ /dev/null @@ -1,103 +0,0 @@ -package applicationprofilecache - -import ( - "context" - "fmt" - "testing" - - "github.com/kubescape/node-agent/pkg/config" - "github.com/kubescape/node-agent/pkg/storage" - "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// SpyProfileClient for testing pagination -type SpyProfileClient struct { - storage.ProfileClient - Profiles []v1beta1.ApplicationProfile - CallCount int -} - -func (m *SpyProfileClient) ListApplicationProfiles(namespace string, limit int64, cont string) (*v1beta1.ApplicationProfileList, error) { - m.CallCount++ - start := 0 - if cont != "" { - fmt.Sscanf(cont, "%d", &start) - } - - end := start + int(limit) - nextCont := "" - if end < len(m.Profiles) { - nextCont = fmt.Sprintf("%d", end) - } else { - end = len(m.Profiles) - } - - return &v1beta1.ApplicationProfileList{ - ListMeta: metav1.ListMeta{ - Continue: nextCont, - }, - Items: m.Profiles[start:end], - }, nil -} - -func (m *SpyProfileClient) GetApplicationProfile(namespace, name string) (*v1beta1.ApplicationProfile, error) { - // Return empty profile to avoid errors in update loop - return &v1beta1.ApplicationProfile{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Annotations: map[string]string{ - "kubescape.io/completion": "complete", - "kubescape.io/status": "completed", - }, - }, - }, nil -} - -func TestPagination(t *testing.T) { - totalProfiles := 120 - profiles := make([]v1beta1.ApplicationProfile, totalProfiles) - for i := 0; i < totalProfiles; i++ { - profiles[i] = v1beta1.ApplicationProfile{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("profile-%d", i), - Namespace: "default", - Annotations: map[string]string{ - "kubescape.io/completion": "complete", - "kubescape.io/status": "completed", - }, - Labels: map[string]string{ - "kubescape.io/wlid-template-hash": "hash", - }, - }, - } - } - - spy := &SpyProfileClient{Profiles: profiles} - - // mock k8s object cache is irrelevant since we inject container info directly - cache := NewApplicationProfileCache(config.Config{}, spy, nil) - - // Inject a container so that "default" namespace is processed. - // The WorkloadID needs to match something if we want deeper logic to run, - // but for pagination of ListApplicationProfiles, we just need to get past `getContainerIDsForNamespace` check. - // AND we need to simulate at least one container to trigger the list call. - cache.containerIDToInfo.Set("test-container", &ContainerInfo{ - Namespace: "default", - WorkloadID: "wlid", - }) - - // Call the private method - cache.updateAllProfiles(context.Background()) - - // We expect 3 calls: - // 1. 0-50, returns continue="50" - // 2. 50-100, returns continue="100" - // 3. 100-120, returns continue="" - // (Implementation loop checks continueToken == "") - - if spy.CallCount != 3 { - t.Errorf("Expected 3 calls to ListApplicationProfiles, got %d", spy.CallCount) - } -} diff --git a/pkg/objectcache/applicationprofilecache_interface.go b/pkg/objectcache/applicationprofilecache_interface.go deleted file mode 100644 index 780efa23b4..0000000000 --- a/pkg/objectcache/applicationprofilecache_interface.go +++ /dev/null @@ -1,34 +0,0 @@ -package objectcache - -import ( - containercollection "github.com/inspektor-gadget/inspektor-gadget/pkg/container-collection" - "github.com/kubescape/node-agent/pkg/objectcache/applicationprofilecache/callstackcache" - "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" -) - -type ApplicationProfileCache interface { - GetApplicationProfile(containerID string) *v1beta1.ApplicationProfile - GetApplicationProfileState(containerID string) *ProfileState - GetCallStackSearchTree(containerID string) *callstackcache.CallStackSearchTree - ContainerCallback(notif containercollection.PubSubEvent) -} - -var _ ApplicationProfileCache = (*ApplicationProfileCacheMock)(nil) - -type ApplicationProfileCacheMock struct { -} - -func (ap *ApplicationProfileCacheMock) GetApplicationProfile(_ string) *v1beta1.ApplicationProfile { - return nil -} - -func (ap *ApplicationProfileCacheMock) GetCallStackSearchTree(_ string) *callstackcache.CallStackSearchTree { - return nil -} - -func (ap *ApplicationProfileCacheMock) ContainerCallback(_ containercollection.PubSubEvent) { -} - -func (ap *ApplicationProfileCacheMock) GetApplicationProfileState(_ string) *ProfileState { - return nil -} diff --git a/pkg/objectcache/applicationprofilecache/callstackcache/callstackcache.go b/pkg/objectcache/callstackcache/callstackcache.go similarity index 100% rename from pkg/objectcache/applicationprofilecache/callstackcache/callstackcache.go rename to pkg/objectcache/callstackcache/callstackcache.go diff --git a/pkg/objectcache/applicationprofilecache/callstackcache/callstackcache_test.go b/pkg/objectcache/callstackcache/callstackcache_test.go similarity index 100% rename from pkg/objectcache/applicationprofilecache/callstackcache/callstackcache_test.go rename to pkg/objectcache/callstackcache/callstackcache_test.go diff --git a/pkg/objectcache/containerprofilecache/containerprofilecache.go b/pkg/objectcache/containerprofilecache/containerprofilecache.go new file mode 100644 index 0000000000..c4788a0bbb --- /dev/null +++ b/pkg/objectcache/containerprofilecache/containerprofilecache.go @@ -0,0 +1,633 @@ +// Package containerprofilecache provides a unified, container-keyed cache for ContainerProfile objects. +package containerprofilecache + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/cenkalti/backoff/v5" + "github.com/goradd/maps" + containercollection "github.com/inspektor-gadget/inspektor-gadget/pkg/container-collection" + "github.com/kubescape/go-logger" + "github.com/kubescape/go-logger/helpers" + helpersv1 "github.com/kubescape/k8s-interface/instanceidhandler/v1/helpers" + "github.com/kubescape/node-agent/pkg/config" + "github.com/kubescape/node-agent/pkg/exporters" + "github.com/kubescape/node-agent/pkg/metricsmanager" + "github.com/kubescape/node-agent/pkg/objectcache" + "github.com/kubescape/node-agent/pkg/objectcache/callstackcache" + "github.com/kubescape/node-agent/pkg/resourcelocks" + "github.com/kubescape/node-agent/pkg/storage" + "github.com/kubescape/node-agent/pkg/utils" + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// defaultReconcileInterval is the fallback refresh cadence when +// config.ProfilesCacheRefreshRate is zero. +// defaultStorageRPCBudget is the per-call timeout applied by refreshRPC when +// config.StorageRPCBudget is zero. +const ( + defaultReconcileInterval = 30 * time.Second + defaultStorageRPCBudget = 5 * time.Second +) + +// namespacedName is a minimal identifier for a legacy user-authored CRD +// (ApplicationProfile / NetworkNeighborhood) overlaid on a ContainerProfile. +type namespacedName struct { + Namespace string + Name string +} + +// CachedContainerProfile is the per-container cache entry. One entry per live +// containerID, populated on ContainerCallback (Add) and removed on Remove. +// +// Profile may be the raw storage-fetched pointer (Shared=true, fast path) or +// a DeepCopy with user-authored AP/NN overlays merged in (Shared=false). +// entry.Profile is read-only once stored; storage.ProfileClient returns +// fresh-decoded objects per call (thin wrapper over client-go typed client) +// so shared aliasing is safe. +type CachedContainerProfile struct { + Profile *v1beta1.ContainerProfile + State *objectcache.ProfileState + CallStackTree *callstackcache.CallStackSearchTree + + ContainerName string + PodName string + Namespace string + PodUID string + WorkloadID string + + // UserAPRef / UserNNRef are set when the entry was built with a legacy + // user-authored AP/NN overlay. Used by the reconciler to re-fetch on + // refresh and to key deprecation warnings. + UserAPRef *namespacedName + UserNNRef *namespacedName + + // CPName is the storage name of the ContainerProfile. Populated at + // addContainer time so the reconciler can re-fetch without re-querying + // shared data (which may have been evicted from K8sObjectCache by then). + CPName string + + // WorkloadName is the per-workload slug used to fetch the workload-level + // ApplicationProfile / NetworkNeighborhood (primary data source while the + // storage-side consolidated CP isn't publicly queryable) and, with the + // "ug-" prefix, the user-managed AP/NN. Populated at addContainer time. + WorkloadName string + + Shared bool // true iff Profile is the shared storage-fetched pointer (read-only) + RV string // ContainerProfile resourceVersion at last load + UserManagedAPRV string // user-managed AP (ug-) RV at last projection, "" if absent + UserManagedNNRV string // user-managed NN (ug-) RV at last projection, "" if absent + UserAPRV string // user-AP (label-referenced) resourceVersion at last projection, "" if no overlay + UserNNRV string // user-NN (label-referenced) resourceVersion at last projection, "" if no overlay +} + +// pendingContainer captures the minimum state needed to retry the initial +// ContainerProfile GET when the CP is not yet in storage at addContainer time. +// The reconciler iterates pending each tick, re-issues the GET, and promotes +// the entry to `entries` on success. Component-tests regression (PR #788) +// showed the legacy periodic-scan path was load-bearing; this is its +// equivalent in the point-lookup model. +type pendingContainer struct { + container *containercollection.Container + sharedData *objectcache.WatchedContainerData + cpName string + workloadName string +} + +// ContainerProfileCacheImpl is the unified container-keyed cache for ContainerProfile objects. +type ContainerProfileCacheImpl struct { + cfg config.Config + entries maps.SafeMap[string, *CachedContainerProfile] + pending maps.SafeMap[string, *pendingContainer] + containerLocks *resourcelocks.ResourceLocks + storageClient storage.ProfileClient + k8sObjectCache objectcache.K8sObjectCache + metricsManager metricsmanager.MetricsManager + + // tamperAlertExporter receives R1016 "Signed profile tampered" alerts + // when a user-supplied AP/NN overlay fails signature verification. Set + // after construction via SetTamperAlertExporter; nil disables alerting. + tamperAlertExporter exporters.Exporter + + reconcileEvery time.Duration + rpcBudget time.Duration + refreshInProgress atomic.Bool + + // deprecationDedup tracks (kind|ns/name@rv) keys to emit one WARN log + // per legacy CRD resource-version across the process lifetime. + deprecationDedup sync.Map +} + +// NewContainerProfileCache creates a new ContainerProfileCacheImpl. +// metricsManager may be nil; internally we substitute a no-op so call sites +// don't need nil checks. +func NewContainerProfileCache(cfg config.Config, storageClient storage.ProfileClient, k8sObjectCache objectcache.K8sObjectCache, metricsManager metricsmanager.MetricsManager) *ContainerProfileCacheImpl { + reconcileEvery := utils.AddJitter(cfg.ProfilesCacheRefreshRate, 10) + if cfg.ProfilesCacheRefreshRate <= 0 { + reconcileEvery = defaultReconcileInterval + } + if metricsManager == nil { + metricsManager = metricsmanager.NewMetricsNoop() + } + rpcBudget := cfg.StorageRPCBudget + if rpcBudget <= 0 { + rpcBudget = defaultStorageRPCBudget + } + return &ContainerProfileCacheImpl{ + cfg: cfg, + containerLocks: resourcelocks.New(), + storageClient: storageClient, + k8sObjectCache: k8sObjectCache, + metricsManager: metricsManager, + reconcileEvery: reconcileEvery, + rpcBudget: rpcBudget, + } +} + +// refreshRPC calls fn with a context bounded by c.rpcBudget, enforcing a +// per-call SLO so a slow API server cannot stall a full reconciler burst. +func (c *ContainerProfileCacheImpl) refreshRPC(ctx context.Context, fn func(context.Context) error) error { + rpcCtx, cancel := context.WithTimeout(ctx, c.rpcBudget) + defer cancel() + return fn(rpcCtx) +} + +// Start begins the periodic reconciler goroutine. The loop evicts entries +// whose container is no longer Running and refreshes live entries' base CP + +// user AP/NN overlays. See reconciler.go for the tick loop and RPC-cost +// characterization. +func (c *ContainerProfileCacheImpl) Start(ctx context.Context) { + go c.tickLoop(ctx) +} + +// ContainerCallback handles container lifecycle events (add/remove). Mirrors +// the shape used by the legacy caches. +func (c *ContainerProfileCacheImpl) ContainerCallback(notif containercollection.PubSubEvent) { + isHost := utils.IsHostContainer(notif.Container) + namespace := notif.Container.K8s.Namespace + if isHost { + namespace = "host" + } + switch notif.Type { + case containercollection.EventTypeAddContainer: + if !isHost && c.cfg.IgnoreContainer(namespace, notif.Container.K8s.PodName, notif.Container.K8s.PodLabels) { + return + } + container := notif.Container + if isHost { + containerCopy := *notif.Container + containerCopy.K8s.Namespace = namespace + container = &containerCopy + } + go c.addContainerWithTimeout(container) + case containercollection.EventTypeRemoveContainer: + // Skip the ignore check on Remove: a container added before its pod + // labels matched the ignore filter would otherwise leak in the cache. + // The reconciler eviction path is the safety net, but a Remove event + // should always clean up regardless of current label state. + go c.deleteContainer(notif.Container.Runtime.ContainerID) + } +} + +// addContainerWithTimeout runs addContainer with a 10-minute cap to prevent +// a stuck storage client from wedging the callback goroutine. +func (c *ContainerProfileCacheImpl) addContainerWithTimeout(container *containercollection.Container) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer cancel() + + done := make(chan error, 1) + go func() { + done <- c.addContainer(container, ctx) + }() + + select { + case err := <-done: + if err != nil { + logger.L().Error("failed to add container to the container-profile cache", helpers.Error(err)) + } + case <-ctx.Done(): + logger.L().Error("timeout while adding container to the container-profile cache", + helpers.String("containerID", container.Runtime.ContainerID), + helpers.String("containerName", container.Runtime.ContainerName), + helpers.String("podName", container.K8s.PodName), + helpers.String("namespace", container.K8s.Namespace)) + } +} + +// addContainer builds and stores a cache entry for the container: fetches +// the ContainerProfile from storage, optionally fetches user-authored AP/NN +// CRDs, projects them onto a DeepCopy (or fast-paths via shared pointer), and +// builds the call-stack search tree. +func (c *ContainerProfileCacheImpl) addContainer(container *containercollection.Container, ctx context.Context) error { + containerID := container.Runtime.ContainerID + + return c.containerLocks.WithLockAndError(containerID, func() error { + sharedData, err := c.waitForSharedContainerData(containerID, ctx) + if err != nil { + logger.L().Error("failed to get shared data for container", + helpers.String("containerID", containerID), + helpers.Error(err)) + return err + } + + // Names we need: + // cpName = per-container stable slug, for the consolidated CP. + // Kept for forward-compat; current storage does not + // publish a queryable consolidated CP at this name, + // so we treat a 404 as "not yet". + // workloadName = per-workload stable slug, where the server-side + // aggregation publishes the ApplicationProfile and + // NetworkNeighborhood CRs. Legacy caches read these + // directly; the new cache does the same while the + // server-side consolidated-CP plumbing matures. + cpName, err := sharedData.InstanceID.GetSlug(false) + if err != nil { + logger.L().Error("failed to compute container profile slug", + helpers.String("containerID", containerID), + helpers.Error(err)) + return err + } + workloadName, err := sharedData.InstanceID.GetSlug(true) + if err != nil { + logger.L().Error("failed to compute workload profile slug", + helpers.String("containerID", containerID), + helpers.Error(err)) + return err + } + + if populated := c.tryPopulateEntry(ctx, containerID, container, sharedData, cpName, workloadName); !populated { + // No profile data available yet (neither consolidated CP nor + // workload AP/NN have landed in storage). Record a pending entry; + // the reconciler will retry each tick until data shows up or the + // container stops. This preserves the legacy periodic-scan + // recovery that kicked in when profiles were created after + // container-start. + c.pending.Set(containerID, &pendingContainer{ + container: container, + sharedData: sharedData, + cpName: cpName, + workloadName: workloadName, + }) + c.metricsManager.SetContainerProfileCacheEntries("pending", float64(c.pending.Len())) + } + return nil + }) +} + +// tryPopulateEntry issues the CP GET (plus any user-AP/NN overlay) and +// installs the cache entry on success. Returns true iff an entry was +// installed. Must be called while holding containerLocks.WithLock(id). +func (c *ContainerProfileCacheImpl) tryPopulateEntry( + ctx context.Context, + containerID string, + container *containercollection.Container, + sharedData *objectcache.WatchedContainerData, + cpName, workloadName string, +) bool { + ns := container.K8s.Namespace + + // Fetch consolidated ContainerProfile. The storage server aggregates the + // per-tick time-series CPs (written by containerprofilemanager at names + // ending in a random UUID suffix) into a consolidated CP at the stable + // name returned by GetSlug(false). Until that aggregation runs the Get + // returns 404 — we record pending and the reconciler retries on each + // tick. + var ( + cp *v1beta1.ContainerProfile + cpErr error + ) + _ = c.refreshRPC(ctx, func(rctx context.Context) error { + cp, cpErr = c.storageClient.GetContainerProfile(rctx, ns, cpName) + return cpErr + }) + if cpErr != nil { + logger.L().Debug("ContainerProfile not yet available", + helpers.String("containerID", containerID), + helpers.String("namespace", ns), + helpers.String("name", cpName), + helpers.Error(cpErr)) + cp = nil + } + + // Fetch user-managed AP / NN published at "ug-". Legacy + // caches auto-detected these via the `kubescape.io/managed-by: User` + // annotation and merged them on top of the base profile; we read them + // directly by their well-known name instead, avoiding a List and an + // annotation filter. Both are optional: nil on 404. + var userManagedAP *v1beta1.ApplicationProfile + var userManagedNN *v1beta1.NetworkNeighborhood + if workloadName != "" { + ugName := helpersv1.UserApplicationProfilePrefix + workloadName + var ugAPErr error + _ = c.refreshRPC(ctx, func(rctx context.Context) error { + userManagedAP, ugAPErr = c.storageClient.GetApplicationProfile(rctx, ns, ugName) + return ugAPErr + }) + if ugAPErr != nil { + logger.L().Debug("user-managed ApplicationProfile not available", + helpers.String("containerID", containerID), + helpers.String("namespace", ns), + helpers.String("name", ugName), + helpers.Error(ugAPErr)) + userManagedAP = nil + } + ugNNName := helpersv1.UserNetworkNeighborhoodPrefix + workloadName + var ugNNErr error + _ = c.refreshRPC(ctx, func(rctx context.Context) error { + userManagedNN, ugNNErr = c.storageClient.GetNetworkNeighborhood(rctx, ns, ugNNName) + return ugNNErr + }) + if ugNNErr != nil { + logger.L().Debug("user-managed NetworkNeighborhood not available", + helpers.String("containerID", containerID), + helpers.String("namespace", ns), + helpers.String("name", ugNNName), + helpers.Error(ugNNErr)) + userManagedNN = nil + } + } + + // Fix (reviewer #3): if the consolidated CP is still Partial and this + // container is not PreRunning (i.e. we saw it start fresh after the + // agent was already up), the partial view belongs to a PREVIOUS container + // incarnation. Legacy caches explicitly deleted such partials on restart + // so rule evaluation fell through to "no profile" until a new Full + // profile arrived. Mirror that: keep pending, retry each tick. + if !sharedData.PreRunningContainer { + if cp != nil && cp.Annotations[helpersv1.CompletionMetadataKey] == helpersv1.Partial { + cp = nil + } + } + + // Fetch user-authored legacy CRDs when the pod carries the + // UserDefinedProfileMetadataKey label. Fix (reviewer #2): fetch + // independently of the base-CP result, so a container that only has a + // user-defined profile still gets a cache entry. Recording the refs is + // gated on successful fetch here (otherwise the projection has no data + // to merge); the reconciler's refresh path re-fetches on each tick so + // transient failures are recovered. + var userAP *v1beta1.ApplicationProfile + var userNN *v1beta1.NetworkNeighborhood + overlayName, hasOverlay := container.K8s.PodLabels[helpersv1.UserDefinedProfileMetadataKey] + if hasOverlay && overlayName != "" { + var userAPErr error + _ = c.refreshRPC(ctx, func(rctx context.Context) error { + userAP, userAPErr = c.storageClient.GetApplicationProfile(rctx, ns, overlayName) + return userAPErr + }) + if userAPErr != nil { + logger.L().Debug("user-defined ApplicationProfile not available", + helpers.String("containerID", containerID), + helpers.String("namespace", ns), + helpers.String("name", overlayName), + helpers.Error(userAPErr)) + userAP = nil + } + // Re-verify the user-supplied AP signature on every load. Emits + // R1016 if the profile is signed but tampered. Does not gate + // loading unless cfg.EnableSignatureVerification is true. + if userAP != nil && !c.verifyUserApplicationProfile(userAP, containerID) { + userAP = nil + } + var userNNErr error + _ = c.refreshRPC(ctx, func(rctx context.Context) error { + userNN, userNNErr = c.storageClient.GetNetworkNeighborhood(rctx, ns, overlayName) + return userNNErr + }) + if userNNErr != nil { + logger.L().Debug("user-defined NetworkNeighborhood not available", + helpers.String("containerID", containerID), + helpers.String("namespace", ns), + helpers.String("name", overlayName), + helpers.Error(userNNErr)) + userNN = nil + } + // Same tamper-check on the NN side. + if userNN != nil && !c.verifyUserNetworkNeighborhood(userNN, containerID) { + userNN = nil + } + } + + // Need SOMETHING to cache. If we have nothing, stay pending and retry. + if cp == nil && userManagedAP == nil && userManagedNN == nil && userAP == nil && userNN == nil { + return false + } + + // When no consolidated CP is available, synthesize an empty CP named + // after the workload so downstream state display is sensible. Projection + // below merges user-managed + user-defined overlay onto this base. + if cp == nil { + syntheticName := workloadName + if syntheticName == "" { + syntheticName = overlayName + } + cp = &v1beta1.ContainerProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: syntheticName, + Namespace: ns, + Annotations: map[string]string{ + helpersv1.CompletionMetadataKey: helpersv1.Full, + helpersv1.StatusMetadataKey: helpersv1.Completed, + }, + }, + } + } + + pod := c.k8sObjectCache.GetPod(container.K8s.Namespace, container.K8s.PodName) + if pod == nil { + logger.L().Debug("pod not found in k8s cache; skipping pod-aware merge checks", + helpers.String("containerID", containerID), + helpers.String("namespace", container.K8s.Namespace), + helpers.String("podName", container.K8s.PodName)) + } + + // User-managed projection pass (published at the + // "ug-" well-known name). Legacy caches auto-merged these + // in handleUserManagedProfile after detecting the managed-by annotation; + // here we always union in whatever's published at the convention name. + // This is what Test_12_MergingProfilesTest / Test_13_MergingNetworkNeighborhoodTest + // exercise: rules must alert on events absent from the merged base+user-managed + // profile. + userManagedApplied := userManagedAP != nil || userManagedNN != nil + if userManagedApplied { + projected, warnings := projectUserProfiles(cp, userManagedAP, userManagedNN, pod, container.Runtime.ContainerName) + cp = projected + c.emitOverlayMetrics(userManagedAP, userManagedNN, warnings) + } + + entry := c.buildEntry(cp, userAP, userNN, pod, container, sharedData, userManagedApplied) + // Override CPName with the real consolidated-CP slug. buildEntry sets + // CPName from cp.Name, but when cp was synthesized above (no consolidated + // CP in storage yet), cp.Name is the workloadName/overlayName — NOT the + // GetSlug(false) name refreshOneEntry must GET. Without this override, + // refresh queries the synthetic name, always 404s, and the fast-skip + // keeps the synthetic entry forever (stored RV is "" == absent-match). + entry.CPName = cpName + // Fill in user-managed bookkeeping so refreshOneEntry can re-fetch these + // sources on every tick. WorkloadName is the "ug-" lookup prefix. + entry.WorkloadName = workloadName + if userManagedAP != nil { + entry.UserManagedAPRV = userManagedAP.ResourceVersion + } + if userManagedNN != nil { + entry.UserManagedNNRV = userManagedNN.ResourceVersion + } + + // Fix (reviewer #2): when the overlay label is set, record UserAPRef / + // UserNNRef even if the initial fetch failed. The refresh loop uses + // these refs to re-fetch on every tick; without them, a transient 404 + // at add time would permanently lose the overlay. + if hasOverlay && overlayName != "" { + if entry.UserAPRef == nil { + entry.UserAPRef = &namespacedName{Namespace: ns, Name: overlayName} + } + if entry.UserNNRef == nil { + entry.UserNNRef = &namespacedName{Namespace: ns, Name: overlayName} + } + } + + c.entries.Set(containerID, entry) + c.pending.Delete(containerID) + c.metricsManager.SetContainerProfileCacheEntries("container", float64(c.entries.Len())) + c.metricsManager.SetContainerProfileCacheEntries("pending", float64(c.pending.Len())) + + logger.L().Debug("ContainerProfileCache - container added", + helpers.String("containerID", containerID), + helpers.String("namespace", container.K8s.Namespace), + helpers.String("podName", container.K8s.PodName), + helpers.String("cpName", cpName), + helpers.String("shared", fmt.Sprintf("%v", entry.Shared))) + return true +} + +// buildEntry constructs a CachedContainerProfile, choosing the fast-path +// (shared pointer, no user overlay) or projection path (DeepCopy + merge). +func (c *ContainerProfileCacheImpl) buildEntry( + cp *v1beta1.ContainerProfile, + userAP *v1beta1.ApplicationProfile, + userNN *v1beta1.NetworkNeighborhood, + pod *corev1.Pod, + container *containercollection.Container, + sharedData *objectcache.WatchedContainerData, + userManagedApplied bool, +) *CachedContainerProfile { + entry := &CachedContainerProfile{ + ContainerName: container.Runtime.ContainerName, + PodName: container.K8s.PodName, + Namespace: container.K8s.Namespace, + WorkloadID: sharedData.Wlid + "/" + sharedData.InstanceID.GetTemplateHash(), + CPName: cp.Name, + RV: cp.ResourceVersion, + } + if pod != nil { + entry.PodUID = string(pod.UID) + } + + if userAP == nil && userNN == nil && !userManagedApplied { + // Fast path: share the storage-fetched pointer. Profile is the raw + // storage object — callers must not mutate it. + entry.Profile = cp + entry.Shared = true + } else { + projected, warnings := projectUserProfiles(cp, userAP, userNN, pod, container.Runtime.ContainerName) + entry.Profile = projected + entry.Shared = false + + if userAP != nil { + entry.UserAPRef = &namespacedName{Namespace: userAP.Namespace, Name: userAP.Name} + entry.UserAPRV = userAP.ResourceVersion + } + if userNN != nil { + entry.UserNNRef = &namespacedName{Namespace: userNN.Namespace, Name: userNN.Name} + entry.UserNNRV = userNN.ResourceVersion + } + + c.emitOverlayMetrics(userAP, userNN, warnings) + } + + // Build call-stack search tree from entry.Profile.Spec.IdentifiedCallStacks. + // Shared path: do not mutate the storage-fetched pointer; call stacks + // stay in the profile but are never read through Profile (only through + // CallStackTree). + tree := callstackcache.NewCallStackSearchTree() + for _, stack := range entry.Profile.Spec.IdentifiedCallStacks { + tree.AddCallStack(stack) + } + entry.CallStackTree = tree + + // ProfileState from CP annotations (Completion/Status) + Name. + entry.State = &objectcache.ProfileState{ + Completion: cp.Annotations[helpersv1.CompletionMetadataKey], + Status: cp.Annotations[helpersv1.StatusMetadataKey], + Name: cp.Name, + } + + return entry +} + +// deleteContainer removes a container entry. The per-container lock entry is +// intentionally NOT released: Phase-4 review flagged a race where a concurrent +// addContainer can hold a reference to the old mutex while a subsequent +// GetLock creates a new one, breaking mutual exclusion. Memory cost is bounded +// by the node's container-ID churn (live containers + recently-deleted), so +// keeping stale lock entries is cheaper than getting the atomic-release right. +func (c *ContainerProfileCacheImpl) deleteContainer(id string) { + c.containerLocks.WithLock(id, func() { + c.entries.Delete(id) + c.pending.Delete(id) + }) + c.metricsManager.SetContainerProfileCacheEntries("container", float64(c.entries.Len())) + c.metricsManager.SetContainerProfileCacheEntries("pending", float64(c.pending.Len())) +} + +// GetContainerProfile returns the cached ContainerProfile pointer for a +// container, or nil if there is no entry. Reports a cache-hit metric. +func (c *ContainerProfileCacheImpl) GetContainerProfile(containerID string) *v1beta1.ContainerProfile { + if entry, ok := c.entries.Load(containerID); ok && entry != nil && entry.Profile != nil { + c.metricsManager.ReportContainerProfileCacheHit(true) + return entry.Profile + } + c.metricsManager.ReportContainerProfileCacheHit(false) + return nil +} + +// GetContainerProfileState returns the cached ProfileState for a container +// (completion/status/name). Returns a synthetic error state when the entry +// is missing. +func (c *ContainerProfileCacheImpl) GetContainerProfileState(containerID string) *objectcache.ProfileState { + if entry, ok := c.entries.Load(containerID); ok && entry != nil && entry.State != nil { + return entry.State + } + return &objectcache.ProfileState{ + Error: fmt.Errorf("container %s not found in container-profile cache", containerID), + } +} + +// GetCallStackSearchTree returns the cached call-stack index for a container, +// or nil if there is no entry or no tree. +func (c *ContainerProfileCacheImpl) GetCallStackSearchTree(containerID string) *callstackcache.CallStackSearchTree { + if entry, ok := c.entries.Load(containerID); ok && entry != nil { + return entry.CallStackTree + } + return nil +} + +// waitForSharedContainerData blocks until K8sObjectCache has shared data for +// the container (populated by containerwatcher) or ctx expires. +func (c *ContainerProfileCacheImpl) waitForSharedContainerData(containerID string, ctx context.Context) (*objectcache.WatchedContainerData, error) { + return backoff.Retry(ctx, func() (*objectcache.WatchedContainerData, error) { + if sharedData := c.k8sObjectCache.GetSharedContainerData(containerID); sharedData != nil { + return sharedData, nil + } + return nil, fmt.Errorf("container %s not found in shared data", containerID) + }, backoff.WithBackOff(backoff.NewExponentialBackOff())) +} + +// Ensure ContainerProfileCacheImpl implements the ContainerProfileCache interface. +var _ objectcache.ContainerProfileCache = (*ContainerProfileCacheImpl)(nil) diff --git a/pkg/objectcache/containerprofilecache/containerprofilecache_test.go b/pkg/objectcache/containerprofilecache/containerprofilecache_test.go new file mode 100644 index 0000000000..1cf039391d --- /dev/null +++ b/pkg/objectcache/containerprofilecache/containerprofilecache_test.go @@ -0,0 +1,331 @@ +package containerprofilecache + +import ( + "context" + "errors" + "testing" + "time" + + containercollection "github.com/inspektor-gadget/inspektor-gadget/pkg/container-collection" + eventtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" + instanceidhandlerV1 "github.com/kubescape/k8s-interface/instanceidhandler/v1" + helpersv1 "github.com/kubescape/k8s-interface/instanceidhandler/v1/helpers" + "github.com/kubescape/node-agent/pkg/config" + "github.com/kubescape/node-agent/pkg/objectcache" + "github.com/kubescape/node-agent/pkg/storage" + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// fakeProfileClient is a minimal storage.ProfileClient stub for tests. It +// always returns the same CP pointer (so the fast-path can be asserted via +// pointer equality). +type fakeProfileClient struct { + cp *v1beta1.ContainerProfile + ap *v1beta1.ApplicationProfile // returned for Get by ap.Name match (or any if overlayOnly is empty) + nn *v1beta1.NetworkNeighborhood + cpErr error + apErr error + nnErr error + + // userManagedAP / userManagedNN, when non-nil, are returned for any + // GetApplicationProfile / GetNetworkNeighborhood whose name starts with + // the "ug-" prefix (the convention used by legacy user-managed profiles). + // This lets tests exercise the user-managed merge path added for + // Test_12_MergingProfilesTest / Test_13_MergingNetworkNeighborhoodTest + // without fighting the overlayOnly restriction. + userManagedAP *v1beta1.ApplicationProfile + userManagedNN *v1beta1.NetworkNeighborhood + + // overlayOnly, if non-empty, restricts ap/nn returns to only the given + // name; other names return (nil, nil). Tests that mix workload-AP/NN + // with overlay-AP/NN use this to keep the fixture scoped. + overlayOnly string + + getCPCalls int +} + +var _ storage.ProfileClient = (*fakeProfileClient)(nil) + +func (f *fakeProfileClient) GetApplicationProfile(_ context.Context, _, name string) (*v1beta1.ApplicationProfile, error) { + if len(name) >= 3 && name[:3] == helpersv1.UserApplicationProfilePrefix { + return f.userManagedAP, nil + } + if f.overlayOnly != "" && name != f.overlayOnly { + return nil, nil + } + return f.ap, f.apErr +} +func (f *fakeProfileClient) GetNetworkNeighborhood(_ context.Context, _, name string) (*v1beta1.NetworkNeighborhood, error) { + if len(name) >= 3 && name[:3] == helpersv1.UserNetworkNeighborhoodPrefix { + return f.userManagedNN, nil + } + if f.overlayOnly != "" && name != f.overlayOnly { + return nil, nil + } + return f.nn, f.nnErr +} +func (f *fakeProfileClient) GetContainerProfile(_ context.Context, _, _ string) (*v1beta1.ContainerProfile, error) { + f.getCPCalls++ + return f.cp, f.cpErr +} +func (f *fakeProfileClient) ListApplicationProfiles(_ context.Context, _ string, _ int64, _ string) (*v1beta1.ApplicationProfileList, error) { + return &v1beta1.ApplicationProfileList{}, nil +} +func (f *fakeProfileClient) ListNetworkNeighborhoods(_ context.Context, _ string, _ int64, _ string) (*v1beta1.NetworkNeighborhoodList, error) { + return &v1beta1.NetworkNeighborhoodList{}, nil +} + +// newTestCache returns a cache wired with an in-memory K8sObjectCacheMock. +func newTestCache(t *testing.T, client storage.ProfileClient) (*ContainerProfileCacheImpl, *objectcache.K8sObjectCacheMock) { + t.Helper() + k8s := &objectcache.K8sObjectCacheMock{} + cfg := config.Config{ProfilesCacheRefreshRate: 30 * time.Second} + return NewContainerProfileCache(cfg, client, k8s, nil), k8s +} + +// primeSharedData stashes a WatchedContainerData so waitForSharedContainerData +// resolves instantly. It builds a real InstanceID from a pod because the cache +// code calls .GetOneTimeSlug and .GetTemplateHash on it. +func primeSharedData(t *testing.T, k8s *objectcache.K8sObjectCacheMock, containerID, wlid string) { + t.Helper() + ids, err := instanceidhandlerV1.GenerateInstanceIDFromPod(&corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "nginx-abc", Namespace: "default"}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "nginx", Image: "nginx:1.25"}}, + }, + Status: corev1.PodStatus{ + ContainerStatuses: []corev1.ContainerStatus{{Name: "nginx", ImageID: "sha256:deadbeef"}}, + }, + }) + require.NoError(t, err) + require.NotEmpty(t, ids) + k8s.SetSharedContainerData(containerID, &objectcache.WatchedContainerData{ + InstanceID: ids[0], + Wlid: wlid, + }) +} + +// eventContainer returns a minimal *containercollection.Container. +func eventContainer(id string) *containercollection.Container { + return &containercollection.Container{ + Runtime: containercollection.RuntimeMetadata{BasicRuntimeMetadata: eventtypes.BasicRuntimeMetadata{ + ContainerID: id, + ContainerName: "nginx", + ContainerPID: 42, + }}, + K8s: containercollection.K8sMetadata{BasicK8sMetadata: eventtypes.BasicK8sMetadata{ + Namespace: "default", + PodName: "nginx-abc", + }}, + } +} + +// TestSharedFastPath_NoOverlay verifies that two separate add calls for the +// same CP yield entries that share the very same *ContainerProfile pointer. +func TestSharedFastPath_NoOverlay(t *testing.T) { + cp := &v1beta1.ContainerProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cp-shared", + Namespace: "default", + ResourceVersion: "7", + Annotations: map[string]string{ + helpersv1.CompletionMetadataKey: helpersv1.Full, + helpersv1.StatusMetadataKey: helpersv1.Completed, + }, + }, + Spec: v1beta1.ContainerProfileSpec{ + Capabilities: []string{"NET_ADMIN"}, + }, + } + client := &fakeProfileClient{cp: cp} + c, k8s := newTestCache(t, client) + + ids := []string{"container-id-A", "container-id-B"} + for _, id := range ids { + primeSharedData(t, k8s, id, "wlid://cluster-a/namespace-default/deployment-nginx") + require.NoError(t, c.addContainer(eventContainer(id), context.Background())) + } + + entryA, okA := c.entries.Load(ids[0]) + entryB, okB := c.entries.Load(ids[1]) + require.True(t, okA) + require.True(t, okB) + assert.True(t, entryA.Shared, "fast path must mark entry Shared=true") + assert.True(t, entryB.Shared, "fast path must mark entry Shared=true") + assert.Same(t, entryA.Profile, entryB.Profile, "both entries must share the same storage-fetched pointer") + assert.Same(t, cp, entryA.Profile, "fast path must not DeepCopy") +} + +// TestOverlayPath_DeepCopies verifies that when userAP is present we build a +// distinct DeepCopy (pointer inequality with the storage-fetched cp) and mark +// Shared=false. +func TestOverlayPath_DeepCopies(t *testing.T) { + cp := &v1beta1.ContainerProfile{ + ObjectMeta: metav1.ObjectMeta{Name: "cp-1", Namespace: "default", ResourceVersion: "1"}, + Spec: v1beta1.ContainerProfileSpec{Capabilities: []string{"SYS_PTRACE"}}, + } + userAP := &v1beta1.ApplicationProfile{ + ObjectMeta: metav1.ObjectMeta{Name: "override", Namespace: "default", ResourceVersion: "u1"}, + Spec: v1beta1.ApplicationProfileSpec{ + Containers: []v1beta1.ApplicationProfileContainer{{ + Name: "nginx", + Capabilities: []string{"NET_BIND_SERVICE"}, + }}, + }, + } + client := &fakeProfileClient{cp: cp, ap: userAP, overlayOnly: "override"} + c, k8s := newTestCache(t, client) + + id := "container-overlay" + primeSharedData(t, k8s, id, "wlid://cluster-a/namespace-default/deployment-nginx") + + ev := eventContainer(id) + ev.K8s.PodLabels = map[string]string{helpersv1.UserDefinedProfileMetadataKey: "override"} + require.NoError(t, c.addContainer(ev, context.Background())) + + entry, ok := c.entries.Load(id) + require.True(t, ok) + assert.False(t, entry.Shared, "overlay path must mark Shared=false") + assert.NotSame(t, cp, entry.Profile, "overlay path must DeepCopy, not share") + // Merged caps: base + user + assert.ElementsMatch(t, []string{"SYS_PTRACE", "NET_BIND_SERVICE"}, entry.Profile.Spec.Capabilities) + require.NotNil(t, entry.UserAPRef) + assert.Equal(t, "override", entry.UserAPRef.Name) + assert.Equal(t, "u1", entry.UserAPRV) +} + +// TestDeleteContainer_LockAndCleanup verifies that deleteContainer removes +// the entry and releases the per-container lock so a later Add re-uses a +// fresh mutex. +func TestDeleteContainer_LockAndCleanup(t *testing.T) { + cp := &v1beta1.ContainerProfile{ + ObjectMeta: metav1.ObjectMeta{Name: "cp-delete", Namespace: "default", ResourceVersion: "1"}, + } + client := &fakeProfileClient{cp: cp} + c, k8s := newTestCache(t, client) + + id := "container-delete" + primeSharedData(t, k8s, id, "wlid://x") + require.NoError(t, c.addContainer(eventContainer(id), context.Background())) + require.True(t, c.containerLocks.HasLock(id), "lock should exist after add") + require.NotNil(t, c.GetContainerProfile(id)) + + c.deleteContainer(id) + assert.Nil(t, c.GetContainerProfile(id), "entry must be gone after delete") + // Phase-4 review fix: deleteContainer intentionally does NOT release the + // lock to avoid a race where a concurrent addContainer could hold a + // reference to a mutex that another caller re-creates after Delete. + // Memory cost is bounded by live+recently-deleted container IDs. + assert.True(t, c.containerLocks.HasLock(id), "lock is retained by design after delete") +} + +// TestContainerCallback_IgnoredContainer verifies IgnoreContainer short-circuits +// before any storage call is issued. +func TestContainerCallback_IgnoredContainer(t *testing.T) { + cp := &v1beta1.ContainerProfile{ObjectMeta: metav1.ObjectMeta{Name: "cp", Namespace: "default", ResourceVersion: "1"}} + client := &fakeProfileClient{cp: cp} + c, _ := newTestCache(t, client) + c.cfg.ExcludeNamespaces = []string{"kube-system"} + + ev := containercollection.PubSubEvent{ + Type: containercollection.EventTypeAddContainer, + Container: &containercollection.Container{ + Runtime: containercollection.RuntimeMetadata{BasicRuntimeMetadata: eventtypes.BasicRuntimeMetadata{ + ContainerID: "ignored", ContainerPID: 42, ContainerName: "c", + }}, + K8s: containercollection.K8sMetadata{BasicK8sMetadata: eventtypes.BasicK8sMetadata{ + Namespace: "kube-system", PodName: "p", + }}, + }, + } + c.ContainerCallback(ev) + // Allow any mistakenly-spawned goroutine a brief window — none should run. + time.Sleep(20 * time.Millisecond) + assert.Equal(t, 0, client.getCPCalls, "IgnoreContainer must short-circuit before any storage call") +} + +// TestContainerCallback_HostContainer verifies that host containers do NOT +// trigger IgnoreContainer even when their namespace is in ExcludeNamespaces +// (host events carry namespace="host" after override, not the original one). +func TestContainerCallback_HostContainer(t *testing.T) { + cp := &v1beta1.ContainerProfile{ObjectMeta: metav1.ObjectMeta{Name: "cp", Namespace: "host", ResourceVersion: "1"}} + client := &fakeProfileClient{cp: cp} + c, _ := newTestCache(t, client) + // Even with every namespace excluded, host containers bypass the check. + c.cfg.ExcludeNamespaces = []string{"default", "host"} + + hostContainer := &containercollection.Container{ + Runtime: containercollection.RuntimeMetadata{BasicRuntimeMetadata: eventtypes.BasicRuntimeMetadata{ + ContainerID: "host-c", ContainerPID: 1, ContainerName: "host", + }}, + K8s: containercollection.K8sMetadata{BasicK8sMetadata: eventtypes.BasicK8sMetadata{ + Namespace: "default", PodName: "", + }}, + } + c.ContainerCallback(containercollection.PubSubEvent{Type: containercollection.EventTypeAddContainer, Container: hostContainer}) + // The callback dispatches a goroutine that will stall on backoff (no + // shared data is primed) — we only assert the callback returns without + // panic and did not short-circuit on IgnoreContainer. We cannot assert + // storage was called without racing the backoff; just confirm no panic. + time.Sleep(20 * time.Millisecond) +} + +// TestCallStackIndexBuiltFromProfile verifies that the call-stack tree is +// populated from CP.Spec.IdentifiedCallStacks and retrievable via +// GetCallStackSearchTree. +func TestCallStackIndexBuiltFromProfile(t *testing.T) { + cp := &v1beta1.ContainerProfile{ + ObjectMeta: metav1.ObjectMeta{Name: "cp-stack", Namespace: "default", ResourceVersion: "1"}, + Spec: v1beta1.ContainerProfileSpec{ + IdentifiedCallStacks: []v1beta1.IdentifiedCallStack{ + { + CallID: "r1", + CallStack: v1beta1.CallStack{Root: v1beta1.CallStackNode{ + Frame: v1beta1.StackFrame{FileID: "f1", Lineno: "10"}, + Children: []v1beta1.CallStackNode{ + {Frame: v1beta1.StackFrame{FileID: "f2", Lineno: "20"}}, + }, + }}, + }, + }, + }, + } + client := &fakeProfileClient{cp: cp} + c, k8s := newTestCache(t, client) + + id := "c-stack" + primeSharedData(t, k8s, id, "wlid://x") + require.NoError(t, c.addContainer(eventContainer(id), context.Background())) + + tree := c.GetCallStackSearchTree(id) + require.NotNil(t, tree) + require.NotNil(t, tree.PathsByCallID) + _, hasCallID := tree.PathsByCallID["r1"] + assert.True(t, hasCallID, "call-stack tree must contain CallID 'r1' from CP") +} + +// TestGetContainerProfile_Miss sanity-checks the nil path returns nil and a +// synthetic error ProfileState (no panic). +func TestGetContainerProfile_Miss(t *testing.T) { + c, _ := newTestCache(t, &fakeProfileClient{}) + assert.Nil(t, c.GetContainerProfile("nope")) + state := c.GetContainerProfileState("nope") + require.NotNil(t, state) + require.Error(t, state.Error) +} + +// TestStorageError_NoEntry ensures storage errors don't panic and don't +// populate a cache entry. +func TestStorageError_NoEntry(t *testing.T) { + client := &fakeProfileClient{cpErr: errors.New("kaboom")} + c, k8s := newTestCache(t, client) + id := "c-err" + primeSharedData(t, k8s, id, "wlid://x") + require.NoError(t, c.addContainer(eventContainer(id), context.Background())) + _, ok := c.entries.Load(id) + assert.False(t, ok, "storage error must not create a cache entry") +} diff --git a/pkg/objectcache/containerprofilecache/export_test.go b/pkg/objectcache/containerprofilecache/export_test.go new file mode 100644 index 0000000000..c5277665c0 --- /dev/null +++ b/pkg/objectcache/containerprofilecache/export_test.go @@ -0,0 +1,50 @@ +package containerprofilecache + +// export_test.go exposes internal symbols to the containerprofilecache_test +// package (the *_test.go files in this directory). Compiled only during +// `go test`; never included in the production binary. + +import "context" + +func (c *ContainerProfileCacheImpl) ReconcileOnce(ctx context.Context) { + c.reconcileOnce(ctx) +} + +func (c *ContainerProfileCacheImpl) SeedEntryForTest(containerID string, entry *CachedContainerProfile) { + c.entries.Set(containerID, entry) +} + +func (c *ContainerProfileCacheImpl) RefreshAllEntriesForTest(ctx context.Context) { + c.refreshAllEntries(ctx) +} + +// WarmContainerLocksForTest acquires and immediately releases each container +// lock, initialising the internal SafeMap before the concurrent phase to avoid +// the goradd/maps nil-check-before-lock initialisation race (SafeMap v1.3.0). +func (c *ContainerProfileCacheImpl) WarmContainerLocksForTest(ids []string) { + for _, id := range ids { + c.containerLocks.WithLock(id, func() {}) + } +} + +// WarmPendingForTest initialises the pending SafeMap via a Set+Delete cycle +// for each id, preventing the goradd/maps nil-check-before-lock race in +// SafeMap.Len / SafeMap.Delete during concurrent test phases. +func (c *ContainerProfileCacheImpl) WarmPendingForTest(ids []string) { + for _, id := range ids { + c.pending.Set(id, nil) + c.pending.Delete(id) + } +} + +// SeedEntryWithOverlayForTest seeds an entry with user AP and NN overlay refs. +// Pass empty strings to leave a ref nil. +func (c *ContainerProfileCacheImpl) SeedEntryWithOverlayForTest(containerID string, entry *CachedContainerProfile, apNS, apName, nnNS, nnName string) { + if apName != "" { + entry.UserAPRef = &namespacedName{Namespace: apNS, Name: apName} + } + if nnName != "" { + entry.UserNNRef = &namespacedName{Namespace: nnNS, Name: nnName} + } + c.entries.Set(containerID, entry) +} diff --git a/pkg/objectcache/containerprofilecache/init_eviction_test.go b/pkg/objectcache/containerprofilecache/init_eviction_test.go new file mode 100644 index 0000000000..b7f3535603 --- /dev/null +++ b/pkg/objectcache/containerprofilecache/init_eviction_test.go @@ -0,0 +1,154 @@ +package containerprofilecache_test + +import ( + "context" + "testing" + "time" + + containercollection "github.com/inspektor-gadget/inspektor-gadget/pkg/container-collection" + "github.com/kubescape/node-agent/pkg/config" + "github.com/kubescape/node-agent/pkg/objectcache" + cpc "github.com/kubescape/node-agent/pkg/objectcache/containerprofilecache" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" +) + +// newCPCForEvictionTest wires up a ContainerProfileCacheImpl with the provided +// storage and k8s stubs for eviction testing. Start is NOT called so the +// reconciler goroutine never runs — tests drive ReconcileOnce directly. +func newCPCForEvictionTest(storage *stubStorage, k8s *stubK8sCache) *cpc.ContainerProfileCacheImpl { + cfg := config.Config{ProfilesCacheRefreshRate: 30 * time.Second} + return cpc.NewContainerProfileCache(cfg, storage, k8s, nil) +} + +// seedEntry builds and seeds a minimal CachedContainerProfile into the cache +// using the exported SeedEntryForTest hook. +func seedEntry(cache *cpc.ContainerProfileCacheImpl, containerID string, cp *v1beta1.ContainerProfile, containerName, podName, namespace, podUID string) { + entry := &cpc.CachedContainerProfile{ + Profile: cp, + State: &objectcache.ProfileState{Name: cp.Name}, + ContainerName: containerName, + PodName: podName, + Namespace: namespace, + PodUID: podUID, + CPName: cp.Name, + RV: cp.ResourceVersion, + Shared: true, + } + cache.SeedEntryForTest(containerID, entry) +} + +// TestInitContainerEvictionViaRemoveEvent — T2a. +// +// Pod has 1 init container (initID) + 1 regular container (regID), both seeded +// into the cache. Fire EventTypeRemoveContainer for the init container via +// ContainerCallback. Assert that the init entry is evicted and the regular +// entry is untouched. +func TestInitContainerEvictionViaRemoveEvent(t *testing.T) { + const ( + namespace = "default" + podName = "testpod" + initID = "init-container-id" + regID = "regular-container-id" + initName = "init-container" + regularName = "regular" + podUID = "pod-uid-t2a" + ) + + cp := &v1beta1.ContainerProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cp-test", + Namespace: namespace, + ResourceVersion: "1", + }, + } + store := newFakeStorage(cp) + k8s := newFakeK8sCache() + cache := newCPCForEvictionTest(store, k8s) + + // Seed both containers directly — no goroutines, no races. + seedEntry(cache, initID, cp, initName, podName, namespace, podUID) + seedEntry(cache, regID, cp, regularName, podName, namespace, podUID) + + assert.NotNil(t, cache.GetContainerProfile(initID), "init container must be cached before eviction") + assert.NotNil(t, cache.GetContainerProfile(regID), "regular container must be cached before eviction") + + // Fire remove event for init container only. deleteContainer runs in a + // goroutine; wait for it to complete. + cache.ContainerCallback(containercollection.PubSubEvent{ + Type: containercollection.EventTypeRemoveContainer, + Container: makeTestContainer(initID, podName, namespace, initName), + }) + + // deleteContainer goroutine is very fast (just a map delete + lock release). + assert.Eventually(t, func() bool { + return cache.GetContainerProfile(initID) == nil + }, 3*time.Second, 10*time.Millisecond, "init container entry must be evicted after RemoveContainer event") + + // Regular container must survive. + assert.NotNil(t, cache.GetContainerProfile(regID), "regular container entry must remain after init eviction") +} + +// TestMissedRemoveEventEvictedByReconciler — T2b. +// +// Init container entry is seeded directly. Pod status is then flipped so the +// init container is no longer Running (simulating it finishing without a remove +// event). ReconcileOnce must evict the stale entry. +func TestMissedRemoveEventEvictedByReconciler(t *testing.T) { + const ( + namespace = "default" + podName = "testpod-reconcile" + initID = "init-container-reconcile" + initName = "init-container" + podUID = "pod-uid-reconcile" + ) + + cp := &v1beta1.ContainerProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cp-reconcile", + Namespace: namespace, + ResourceVersion: "1", + }, + } + store := newFakeStorage(cp) + k8s := newFakeK8sCache() + + // Start: pod shows init container Running. + runningPod := makeTestPod(podName, namespace, podUID, + nil, + []corev1.ContainerStatus{{ + Name: initName, + ContainerID: "containerd://" + initID, + State: corev1.ContainerState{Running: &corev1.ContainerStateRunning{}}, + }}, + ) + k8s.setPod(namespace, podName, runningPod) + + cache := newCPCForEvictionTest(store, k8s) + + // Seed init container entry directly. + seedEntry(cache, initID, cp, initName, podName, namespace, podUID) + assert.NotNil(t, cache.GetContainerProfile(initID), "init container must be seeded before reconciler test") + + // Simulate init container finishing: flip status to Terminated, no remove event. + terminatedPod := makeTestPod(podName, namespace, podUID, + nil, + []corev1.ContainerStatus{{ + Name: initName, + ContainerID: "containerd://" + initID, + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ExitCode: 0}, + }, + }}, + ) + k8s.setPod(namespace, podName, terminatedPod) + + // Drive the reconciler directly — no tick loop running, no goroutines. + cache.ReconcileOnce(context.Background()) + + assert.Nil(t, cache.GetContainerProfile(initID), + "reconciler must evict init container entry when pod status shows Terminated") +} diff --git a/pkg/objectcache/containerprofilecache/integration_helpers_test.go b/pkg/objectcache/containerprofilecache/integration_helpers_test.go new file mode 100644 index 0000000000..4965f0c732 --- /dev/null +++ b/pkg/objectcache/containerprofilecache/integration_helpers_test.go @@ -0,0 +1,143 @@ +// Integration/acceptance tests for the ContainerProfile cache unification +// (plan v2 §2.7 + §2.8 step 9). Shared test helpers for this package. +package containerprofilecache_test + +import ( + "context" + "sync" + + containercollection "github.com/inspektor-gadget/inspektor-gadget/pkg/container-collection" + eventtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" + "github.com/kubescape/node-agent/pkg/objectcache" + "github.com/kubescape/node-agent/pkg/storage" + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +// makeTestContainer builds a minimal *containercollection.Container for use +// in ContainerCallback events. +func makeTestContainer(id, podName, namespace, containerName string) *containercollection.Container { + return &containercollection.Container{ + Runtime: containercollection.RuntimeMetadata{ + BasicRuntimeMetadata: eventtypes.BasicRuntimeMetadata{ + ContainerID: id, + ContainerName: containerName, + ContainerPID: 42, + }, + }, + K8s: containercollection.K8sMetadata{ + BasicK8sMetadata: eventtypes.BasicK8sMetadata{ + Namespace: namespace, + PodName: podName, + }, + }, + } +} + +// makeTestPod builds a *corev1.Pod with the provided container statuses. +func makeTestPod(name, namespace, uid string, containerStatuses []corev1.ContainerStatus, initStatuses []corev1.ContainerStatus) *corev1.Pod { + return &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + UID: types.UID(uid), + }, + Status: corev1.PodStatus{ + ContainerStatuses: containerStatuses, + InitContainerStatuses: initStatuses, + }, + } +} + +// stubStorage is a minimal storage.ProfileClient stub with settable responses. +type stubStorage struct { + mu sync.RWMutex + cp *v1beta1.ContainerProfile + ap *v1beta1.ApplicationProfile + nn *v1beta1.NetworkNeighborhood +} + +var _ storage.ProfileClient = (*stubStorage)(nil) + +func newFakeStorage(cp *v1beta1.ContainerProfile) *stubStorage { + return &stubStorage{cp: cp} +} + +func (s *stubStorage) GetContainerProfile(_ context.Context, _, _ string) (*v1beta1.ContainerProfile, error) { + s.mu.RLock() + defer s.mu.RUnlock() + return s.cp, nil +} + +func (s *stubStorage) GetApplicationProfile(_ context.Context, _, _ string) (*v1beta1.ApplicationProfile, error) { + s.mu.RLock() + defer s.mu.RUnlock() + return s.ap, nil +} + +func (s *stubStorage) GetNetworkNeighborhood(_ context.Context, _, _ string) (*v1beta1.NetworkNeighborhood, error) { + s.mu.RLock() + defer s.mu.RUnlock() + return s.nn, nil +} + +func (s *stubStorage) ListApplicationProfiles(_ context.Context, _ string, _ int64, _ string) (*v1beta1.ApplicationProfileList, error) { + return &v1beta1.ApplicationProfileList{}, nil +} + +func (s *stubStorage) ListNetworkNeighborhoods(_ context.Context, _ string, _ int64, _ string) (*v1beta1.NetworkNeighborhoodList, error) { + return &v1beta1.NetworkNeighborhoodList{}, nil +} + +// stubK8sCache is a controllable K8sObjectCache stub. +type stubK8sCache struct { + mu sync.RWMutex + pods map[string]*corev1.Pod + data map[string]*objectcache.WatchedContainerData +} + +var _ objectcache.K8sObjectCache = (*stubK8sCache)(nil) + +func newFakeK8sCache() *stubK8sCache { + return &stubK8sCache{ + pods: make(map[string]*corev1.Pod), + data: make(map[string]*objectcache.WatchedContainerData), + } +} + +func (k *stubK8sCache) setPod(namespace, podName string, pod *corev1.Pod) { + k.mu.Lock() + defer k.mu.Unlock() + k.pods[namespace+"/"+podName] = pod +} + +func (k *stubK8sCache) GetPod(namespace, podName string) *corev1.Pod { + k.mu.RLock() + defer k.mu.RUnlock() + return k.pods[namespace+"/"+podName] +} + +func (k *stubK8sCache) GetPodSpec(_, _ string) *corev1.PodSpec { return nil } +func (k *stubK8sCache) GetPodStatus(_, _ string) *corev1.PodStatus { return nil } +func (k *stubK8sCache) GetApiServerIpAddress() string { return "" } +func (k *stubK8sCache) GetPods() []*corev1.Pod { return nil } + +func (k *stubK8sCache) SetSharedContainerData(id string, d *objectcache.WatchedContainerData) { + k.mu.Lock() + defer k.mu.Unlock() + k.data[id] = d +} + +func (k *stubK8sCache) GetSharedContainerData(id string) *objectcache.WatchedContainerData { + k.mu.RLock() + defer k.mu.RUnlock() + return k.data[id] +} + +func (k *stubK8sCache) DeleteSharedContainerData(id string) { + k.mu.Lock() + defer k.mu.Unlock() + delete(k.data, id) +} diff --git a/pkg/objectcache/containerprofilecache/lock_stress_test.go b/pkg/objectcache/containerprofilecache/lock_stress_test.go new file mode 100644 index 0000000000..d690b94cf7 --- /dev/null +++ b/pkg/objectcache/containerprofilecache/lock_stress_test.go @@ -0,0 +1,200 @@ +package containerprofilecache_test + +import ( + "context" + "math/rand" + "runtime" + "sync" + "testing" + "time" + + containercollection "github.com/inspektor-gadget/inspektor-gadget/pkg/container-collection" + "github.com/kubescape/node-agent/pkg/config" + "github.com/kubescape/node-agent/pkg/objectcache" + cpc "github.com/kubescape/node-agent/pkg/objectcache/containerprofilecache" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + instanceidhandlerV1 "github.com/kubescape/k8s-interface/instanceidhandler/v1" + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" +) + +// TestLockStressAddEvictInterleaved — T7. +// +// 100 goroutines, each running 50 iterations of random seed/delete for a pool +// of 10 container IDs. Uses SeedEntryForTest + deleteContainer (via +// EventTypeRemoveContainer → deleteContainer path) to test the cache's +// per-container locking under concurrent interleaved add/evict. +// +// NOTE on race detector: goradd/maps v1.3.0 has a pre-existing data race in +// SafeMap.Load / SafeMap.Len (nil-check outside the read-lock vs Set +// initialization write). This race is present in pkg/resourcelocks own tests +// (TestConcurrentMultipleContainers fails with -race even before this commit). +// To avoid triggering that upstream race, all SafeMap instances are +// pre-warmed (via SeedEntryForTest) before the concurrent phase starts. +func TestLockStressAddEvictInterleaved(t *testing.T) { + const ( + namespace = "default" + podName = "stress-pod" + podUID = "stress-pod-uid" + numWorkers = 100 + numIters = 50 + poolSize = 10 + wlid = "wlid://cluster-test/namespace-default/deployment-stress" + ) + + cp := &v1beta1.ContainerProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cp-stress", + Namespace: namespace, + ResourceVersion: "1", + }, + } + store := newFakeStorage(cp) + k8s := newFakeK8sCache() + + // Prime shared data for each container in the pool so that the internal + // waitForSharedContainerData path resolves if needed. + containerIDs := make([]string, poolSize) + for i := 0; i < poolSize; i++ { + id := "stress-container-" + itoa3(i) + containerIDs[i] = id + primeSharedDataForStress(t, k8s, id, podName, namespace, "container-"+itoa3(i), wlid) + } + + cfg := config.Config{ProfilesCacheRefreshRate: 30 * time.Second} + // Start is NOT called — no background reconciler goroutine runs. + cache := cpc.NewContainerProfileCache(cfg, store, k8s, nil) + + // Pre-warm all internal SafeMap instances before the concurrent phase to + // avoid triggering the goradd/maps nil-check-before-lock initialization + // race (pre-existing upstream bug in SafeMap.Load / SafeMap.Len). + // WarmContainerLocksForTest pre-initialises the containerLocks SafeMap; + // SeedEntryForTest pre-initialises the entries SafeMap; + // WarmPendingForTest pre-initialises the pending SafeMap (touched by + // deleteContainer via ContainerCallback(EventTypeRemoveContainer)). + cache.WarmContainerLocksForTest(containerIDs) + cache.WarmPendingForTest(containerIDs) + for _, id := range containerIDs { + cache.SeedEntryForTest(id, &cpc.CachedContainerProfile{ + Profile: cp, + State: &objectcache.ProfileState{Name: cp.Name}, + ContainerName: "container", + PodName: podName, + Namespace: namespace, + PodUID: podUID, + CPName: cp.Name, + RV: cp.ResourceVersion, + Shared: true, + }) + } + + baseline := runtime.NumGoroutine() + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + var wg sync.WaitGroup + wg.Add(numWorkers) + for w := 0; w < numWorkers; w++ { + go func(worker int) { + defer wg.Done() + r := rand.New(rand.NewSource(time.Now().UnixNano() + int64(worker))) + for iter := 0; iter < numIters; iter++ { + if ctx.Err() != nil { + return + } + id := containerIDs[r.Intn(poolSize)] + if r.Intn(2) == 0 { + // Add path: seed entry directly (no goroutine spawn, + // no backoff, no storage RPC — pure lock stress). + cache.SeedEntryForTest(id, &cpc.CachedContainerProfile{ + Profile: cp, + State: &objectcache.ProfileState{Name: cp.Name}, + ContainerName: "container", + PodName: podName, + Namespace: namespace, + PodUID: podUID, + CPName: cp.Name, + RV: cp.ResourceVersion, + Shared: true, + }) + } else { + // Evict path: use the production remove-event path so + // deleteContainer and per-container locking are exercised. + cache.ContainerCallback(containercollection.PubSubEvent{ + Type: containercollection.EventTypeRemoveContainer, + Container: makeTestContainer(id, podName, namespace, "container"), + }) + } + time.Sleep(time.Millisecond * time.Duration(r.Intn(2))) + } + }(w) + } + + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + + select { + case <-done: + // all goroutines finished within budget + case <-ctx.Done(): + t.Fatal("TestLockStressAddEvictInterleaved timed out after 5s") + } + + // ContainerCallback(EventTypeRemoveContainer) spawns go deleteContainer(...) + // asynchronously, so those goroutines may still be running immediately after + // wg.Wait(). Poll briefly until they drain before asserting goroutine count. + drainDeadline := time.Now().Add(200 * time.Millisecond) + for runtime.NumGoroutine() > baseline+10 && time.Now().Before(drainDeadline) { + runtime.Gosched() + time.Sleep(5 * time.Millisecond) + } + runtime.GC() + assert.LessOrEqual(t, runtime.NumGoroutine(), baseline+10, + "goroutine count should stay near baseline (no leaked goroutines)") + + // Implicit: if any goroutine panicked the test would have already failed. + assert.True(t, true, "no panic occurred") +} + +// primeSharedDataForStress primes shared data for a container used in the +// stress test. +func primeSharedDataForStress(t *testing.T, k8s *stubK8sCache, containerID, podName, namespace, containerName, wlid string) { + t.Helper() + ids, err := instanceidhandlerV1.GenerateInstanceIDFromPod(&corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: podName, Namespace: namespace}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: containerName, Image: "nginx:1.25"}}, + }, + Status: corev1.PodStatus{ + ContainerStatuses: []corev1.ContainerStatus{{Name: containerName, ImageID: "sha256:deadbeef"}}, + }, + }) + require.NoError(t, err) + require.NotEmpty(t, ids) + k8s.SetSharedContainerData(containerID, &objectcache.WatchedContainerData{ + InstanceID: ids[0], + Wlid: wlid, + }) +} + +// itoa3 converts a small non-negative int to a string without strconv. +func itoa3(i int) string { + if i == 0 { + return "0" + } + buf := [10]byte{} + pos := len(buf) + for i > 0 { + pos-- + buf[pos] = byte('0' + i%10) + i /= 10 + } + return string(buf[pos:]) +} diff --git a/pkg/objectcache/containerprofilecache/metrics.go b/pkg/objectcache/containerprofilecache/metrics.go new file mode 100644 index 0000000000..3a3a48cee7 --- /dev/null +++ b/pkg/objectcache/containerprofilecache/metrics.go @@ -0,0 +1,66 @@ +package containerprofilecache + +import ( + "fmt" + + "github.com/kubescape/go-logger" + "github.com/kubescape/go-logger/helpers" + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" +) + +// Kind labels for ReportContainerProfileLegacyLoad and related metrics. +const ( + kindApplication = "application" + kindNetwork = "network" + + completenessFull = "full" + completenessPartial = "partial" +) + +// reportDeprecationWarn emits a one-shot WARN log for a user-authored legacy +// CRD (ApplicationProfile or NetworkNeighborhood) that was merged into the +// ContainerProfile. Dedup key is (kind, namespace, name, resourceVersion) so a +// single RV only logs once per process lifetime, even across many containers. +func (c *ContainerProfileCacheImpl) reportDeprecationWarn(kind, namespace, name, rv string, reason string) { + key := fmt.Sprintf("%s|%s/%s@%s", kind, namespace, name, rv) + if _, already := c.deprecationDedup.LoadOrStore(key, struct{}{}); already { + return + } + logger.L().Warning("ContainerProfileCache - user-authored legacy profile merged (deprecated)", + helpers.String("kind", kind), + helpers.String("namespace", namespace), + helpers.String("name", name), + helpers.String("resourceVersion", rv), + helpers.String("reason", reason)) +} + +// emitOverlayMetrics fires the per-kind completeness metric + deprecation WARN +// once per (kind, namespace, name, rv). Shared by addContainer's buildEntry +// and the reconciler's rebuildEntry so the two stay in lockstep. +func (c *ContainerProfileCacheImpl) emitOverlayMetrics( + userAP *v1beta1.ApplicationProfile, + userNN *v1beta1.NetworkNeighborhood, + warnings []partialProfileWarning, +) { + partialByKind := map[string]struct{}{} + for _, w := range warnings { + partialByKind[w.Kind] = struct{}{} + c.metricsManager.ReportContainerProfileLegacyLoad(w.Kind, completenessPartial) + c.reportDeprecationWarn(w.Kind, w.Namespace, w.Name, w.ResourceVersion, + fmt.Sprintf("pod has containers missing from user CRD: %v", w.MissingContainers)) + } + if userAP != nil { + if _, partial := partialByKind[kindApplication]; !partial { + c.metricsManager.ReportContainerProfileLegacyLoad(kindApplication, completenessFull) + } + c.reportDeprecationWarn(kindApplication, userAP.Namespace, userAP.Name, userAP.ResourceVersion, + "user-authored ApplicationProfile merged into ContainerProfile") + } + if userNN != nil { + if _, partial := partialByKind[kindNetwork]; !partial { + c.metricsManager.ReportContainerProfileLegacyLoad(kindNetwork, completenessFull) + } + c.reportDeprecationWarn(kindNetwork, userNN.Namespace, userNN.Name, userNN.ResourceVersion, + "user-authored NetworkNeighborhood merged into ContainerProfile") + } +} diff --git a/pkg/objectcache/containerprofilecache/packages_deleted_test.go b/pkg/objectcache/containerprofilecache/packages_deleted_test.go new file mode 100644 index 0000000000..3396e56d4c --- /dev/null +++ b/pkg/objectcache/containerprofilecache/packages_deleted_test.go @@ -0,0 +1,73 @@ +package containerprofilecache_test + +import ( + "strings" + "testing" + + "golang.org/x/tools/go/packages" +) + +// TestLegacyPackagesDeleted — T5. +// +// Walks the full dependency graph of ./... and asserts that neither of the +// deleted legacy cache packages appears as a reachable import path. Any +// surviving importer is listed in the failure message. +func TestLegacyPackagesDeleted(t *testing.T) { + const ( + legacyAP = "github.com/kubescape/node-agent/pkg/objectcache/applicationprofilecache" + legacyNN = "github.com/kubescape/node-agent/pkg/objectcache/networkneighborhoodcache" + ) + + cfg := &packages.Config{ + Mode: packages.NeedName | packages.NeedImports | packages.NeedDeps, + // Load from the module root so that ./... expands correctly. + Dir: "../../..", + } + + pkgs, err := packages.Load(cfg, "./...") + if err != nil { + t.Fatalf("packages.Load failed: %v", err) + } + + // Collect errors from the package loader (missing modules, parse errors, …). + var loadErrs []string + packages.Visit(pkgs, nil, func(p *packages.Package) { + for _, e := range p.Errors { + loadErrs = append(loadErrs, e.Msg) + } + }) + if len(loadErrs) > 0 { + // Non-fatal: the loader often emits spurious CGO / build-tag errors on + // CI. We only fail if we can't inspect any packages at all. + t.Logf("packages.Load reported %d non-fatal errors (first: %s)", len(loadErrs), loadErrs[0]) + } + + if len(pkgs) == 0 { + t.Fatal("packages.Load returned no packages — cannot verify legacy-path absence") + } + + // Build import-path → importing package map for the two legacy paths. + importers := map[string][]string{ + legacyAP: {}, + legacyNN: {}, + } + + packages.Visit(pkgs, func(p *packages.Package) bool { + for importPath := range p.Imports { + if importPath == legacyAP { + importers[legacyAP] = append(importers[legacyAP], p.PkgPath) + } + if importPath == legacyNN { + importers[legacyNN] = append(importers[legacyNN], p.PkgPath) + } + } + return true + }, nil) + + for legacy, importerList := range importers { + if len(importerList) > 0 { + t.Errorf("legacy package %q is still imported by:\n %s", + legacy, strings.Join(importerList, "\n ")) + } + } +} diff --git a/pkg/objectcache/containerprofilecache/projection.go b/pkg/objectcache/containerprofilecache/projection.go new file mode 100644 index 0000000000..1ff1bd1032 --- /dev/null +++ b/pkg/objectcache/containerprofilecache/projection.go @@ -0,0 +1,339 @@ +package containerprofilecache + +import ( + "github.com/kubescape/node-agent/pkg/utils" + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// partialProfileWarning describes a user-authored legacy CRD that couldn't be +// fully merged into the ContainerProfile (e.g. the user CRD is missing entries +// for containers that exist in the pod spec). Emitted by the cache at merge +// time for deprecation observability. +type partialProfileWarning struct { + Kind string // "application" | "network" + Namespace string + Name string + ResourceVersion string + MissingContainers []string +} + +// projectUserProfiles overlays a user-authored ApplicationProfile and/or +// NetworkNeighborhood onto a base ContainerProfile for a single container. +// Returns a DeepCopy of the base with user fields merged in and a list of +// partial-merge warnings when the user CRD doesn't cover every container in +// the pod spec. +// +// cp MUST be non-nil. Either (or both) of userAP / userNN may be nil; nil +// user inputs contribute no merge but also no warning. pod may be nil, in +// which case the missing-container check is skipped (but the name-based +// per-container merge still runs). +func projectUserProfiles( + cp *v1beta1.ContainerProfile, + userAP *v1beta1.ApplicationProfile, + userNN *v1beta1.NetworkNeighborhood, + pod *corev1.Pod, + containerName string, +) (projected *v1beta1.ContainerProfile, warnings []partialProfileWarning) { + projected = cp.DeepCopy() + + if userAP != nil { + if missing := mergeApplicationProfile(projected, userAP, pod, containerName); len(missing) > 0 { + warnings = append(warnings, partialProfileWarning{ + Kind: kindApplication, + Namespace: userAP.Namespace, + Name: userAP.Name, + ResourceVersion: userAP.ResourceVersion, + MissingContainers: missing, + }) + } + } + + if userNN != nil { + if missing := mergeNetworkNeighborhood(projected, userNN, pod, containerName); len(missing) > 0 { + warnings = append(warnings, partialProfileWarning{ + Kind: kindNetwork, + Namespace: userNN.Namespace, + Name: userNN.Name, + ResourceVersion: userNN.ResourceVersion, + MissingContainers: missing, + }) + } + } + + return projected, warnings +} + +// mergeApplicationProfile finds the container entry in userAP matching +// containerName (across Spec.Containers / InitContainers / EphemeralContainers) +// and merges its fields into projected.Spec. Returns the list of pod-spec +// container names that are not present anywhere in userAP.Spec. +// +// ported from pkg/objectcache/applicationprofilecache/applicationprofilecache.go:660-673 +// (mergeContainer), applied here to a single-container ContainerProfile +// instead of a full ApplicationProfile. +func mergeApplicationProfile(projected *v1beta1.ContainerProfile, userAP *v1beta1.ApplicationProfile, pod *corev1.Pod, containerName string) []string { + // Defensive copy: slices inside matched (e.g. Execs[i].Args, Opens[i].Flags, + // Endpoints[i].Methods) would otherwise alias the caller's CRD object and + // could change if the CRD is refreshed concurrently. + userAP = userAP.DeepCopy() + if matched := findUserAPContainer(userAP, containerName); matched != nil { + projected.Spec.Capabilities = append(projected.Spec.Capabilities, matched.Capabilities...) + projected.Spec.Execs = append(projected.Spec.Execs, matched.Execs...) + projected.Spec.Opens = append(projected.Spec.Opens, matched.Opens...) + projected.Spec.Syscalls = append(projected.Spec.Syscalls, matched.Syscalls...) + projected.Spec.Endpoints = append(projected.Spec.Endpoints, matched.Endpoints...) + if projected.Spec.PolicyByRuleId == nil && len(matched.PolicyByRuleId) > 0 { + projected.Spec.PolicyByRuleId = make(map[string]v1beta1.RulePolicy, len(matched.PolicyByRuleId)) + } + for k, v := range matched.PolicyByRuleId { + if existing, ok := projected.Spec.PolicyByRuleId[k]; ok { + projected.Spec.PolicyByRuleId[k] = utils.MergePolicies(existing, v) + } else { + projected.Spec.PolicyByRuleId[k] = v + } + } + } + + return missingPodContainers(pod, userAPNames(userAP)) +} + +// mergeNetworkNeighborhood finds the container entry in userNN matching +// containerName and merges its Ingress/Egress into projected.Spec, then +// overlays the user CRD's pod LabelSelector onto projected's embedded +// LabelSelector. Returns missing-from-userNN pod container names. +// +// ported from pkg/objectcache/networkneighborhoodcache/networkneighborhoodcache.go:560-636 +// (performMerge, mergeContainer, mergeNetworkNeighbors) applied to a single +// container's rules on a ContainerProfile. +func mergeNetworkNeighborhood(projected *v1beta1.ContainerProfile, userNN *v1beta1.NetworkNeighborhood, pod *corev1.Pod, containerName string) []string { + // Defensive copy: neighbor slices (DNSNames, Ports, MatchExpressions) and + // LabelSelector.MatchExpressions would otherwise alias the caller's CRD. + userNN = userNN.DeepCopy() + if matched := findUserNNContainer(userNN, containerName); matched != nil { + projected.Spec.Ingress = mergeNetworkNeighbors(projected.Spec.Ingress, matched.Ingress) + projected.Spec.Egress = mergeNetworkNeighbors(projected.Spec.Egress, matched.Egress) + } + + // Merge LabelSelector (ContainerProfileSpec embeds metav1.LabelSelector). + if userNN.Spec.LabelSelector.MatchLabels != nil { + if projected.Spec.LabelSelector.MatchLabels == nil { + projected.Spec.LabelSelector.MatchLabels = make(map[string]string) + } + for k, v := range userNN.Spec.LabelSelector.MatchLabels { + projected.Spec.LabelSelector.MatchLabels[k] = v + } + } + projected.Spec.LabelSelector.MatchExpressions = append( + projected.Spec.LabelSelector.MatchExpressions, + userNN.Spec.LabelSelector.MatchExpressions..., + ) + + return missingPodContainers(pod, userNNNames(userNN)) +} + +func findUserAPContainer(userAP *v1beta1.ApplicationProfile, containerName string) *v1beta1.ApplicationProfileContainer { + if userAP == nil { + return nil + } + for i := range userAP.Spec.Containers { + if userAP.Spec.Containers[i].Name == containerName { + return &userAP.Spec.Containers[i] + } + } + for i := range userAP.Spec.InitContainers { + if userAP.Spec.InitContainers[i].Name == containerName { + return &userAP.Spec.InitContainers[i] + } + } + for i := range userAP.Spec.EphemeralContainers { + if userAP.Spec.EphemeralContainers[i].Name == containerName { + return &userAP.Spec.EphemeralContainers[i] + } + } + return nil +} + +func findUserNNContainer(userNN *v1beta1.NetworkNeighborhood, containerName string) *v1beta1.NetworkNeighborhoodContainer { + if userNN == nil { + return nil + } + for i := range userNN.Spec.Containers { + if userNN.Spec.Containers[i].Name == containerName { + return &userNN.Spec.Containers[i] + } + } + for i := range userNN.Spec.InitContainers { + if userNN.Spec.InitContainers[i].Name == containerName { + return &userNN.Spec.InitContainers[i] + } + } + for i := range userNN.Spec.EphemeralContainers { + if userNN.Spec.EphemeralContainers[i].Name == containerName { + return &userNN.Spec.EphemeralContainers[i] + } + } + return nil +} + +func userAPNames(userAP *v1beta1.ApplicationProfile) map[string]struct{} { + names := map[string]struct{}{} + if userAP == nil { + return names + } + for _, c := range userAP.Spec.Containers { + names[c.Name] = struct{}{} + } + for _, c := range userAP.Spec.InitContainers { + names[c.Name] = struct{}{} + } + for _, c := range userAP.Spec.EphemeralContainers { + names[c.Name] = struct{}{} + } + return names +} + +func userNNNames(userNN *v1beta1.NetworkNeighborhood) map[string]struct{} { + names := map[string]struct{}{} + if userNN == nil { + return names + } + for _, c := range userNN.Spec.Containers { + names[c.Name] = struct{}{} + } + for _, c := range userNN.Spec.InitContainers { + names[c.Name] = struct{}{} + } + for _, c := range userNN.Spec.EphemeralContainers { + names[c.Name] = struct{}{} + } + return names +} + +// missingPodContainers returns the set of pod-spec container names that are +// not present in the given set. If pod is nil, returns nil (check skipped). +func missingPodContainers(pod *corev1.Pod, have map[string]struct{}) []string { + if pod == nil { + return nil + } + var missing []string + for _, c := range pod.Spec.Containers { + if _, ok := have[c.Name]; !ok { + missing = append(missing, c.Name) + } + } + for _, c := range pod.Spec.InitContainers { + if _, ok := have[c.Name]; !ok { + missing = append(missing, c.Name) + } + } + for _, c := range pod.Spec.EphemeralContainers { + if _, ok := have[c.Name]; !ok { + missing = append(missing, c.Name) + } + } + return missing +} + +// mergeNetworkNeighbors merges user neighbors into a normal-neighbor list, +// keyed by Identifier. ported from +// pkg/objectcache/networkneighborhoodcache/networkneighborhoodcache.go:617-636. +func mergeNetworkNeighbors(normalNeighbors, userNeighbors []v1beta1.NetworkNeighbor) []v1beta1.NetworkNeighbor { + neighborMap := make(map[string]int, len(normalNeighbors)) + for i, neighbor := range normalNeighbors { + neighborMap[neighbor.Identifier] = i + } + for _, userNeighbor := range userNeighbors { + if idx, exists := neighborMap[userNeighbor.Identifier]; exists { + normalNeighbors[idx] = mergeNetworkNeighbor(normalNeighbors[idx], userNeighbor) + } else { + normalNeighbors = append(normalNeighbors, userNeighbor) + } + } + return normalNeighbors +} + +// mergeNetworkNeighbor merges a user-managed neighbor into an existing one. +// ported from +// pkg/objectcache/networkneighborhoodcache/networkneighborhoodcache.go:638-706. +func mergeNetworkNeighbor(normal, user v1beta1.NetworkNeighbor) v1beta1.NetworkNeighbor { + merged := normal.DeepCopy() + + dnsNamesSet := make(map[string]struct{}) + for _, dns := range normal.DNSNames { + dnsNamesSet[dns] = struct{}{} + } + for _, dns := range user.DNSNames { + dnsNamesSet[dns] = struct{}{} + } + merged.DNSNames = make([]string, 0, len(dnsNamesSet)) + for dns := range dnsNamesSet { + merged.DNSNames = append(merged.DNSNames, dns) + } + + merged.Ports = mergeNetworkPorts(merged.Ports, user.Ports) + + if user.PodSelector != nil { + if merged.PodSelector == nil { + merged.PodSelector = &metav1.LabelSelector{} + } + if user.PodSelector.MatchLabels != nil { + if merged.PodSelector.MatchLabels == nil { + merged.PodSelector.MatchLabels = make(map[string]string) + } + for k, v := range user.PodSelector.MatchLabels { + merged.PodSelector.MatchLabels[k] = v + } + } + merged.PodSelector.MatchExpressions = append( + merged.PodSelector.MatchExpressions, + user.PodSelector.MatchExpressions..., + ) + } + + if user.NamespaceSelector != nil { + if merged.NamespaceSelector == nil { + merged.NamespaceSelector = &metav1.LabelSelector{} + } + if user.NamespaceSelector.MatchLabels != nil { + if merged.NamespaceSelector.MatchLabels == nil { + merged.NamespaceSelector.MatchLabels = make(map[string]string) + } + for k, v := range user.NamespaceSelector.MatchLabels { + merged.NamespaceSelector.MatchLabels[k] = v + } + } + merged.NamespaceSelector.MatchExpressions = append( + merged.NamespaceSelector.MatchExpressions, + user.NamespaceSelector.MatchExpressions..., + ) + } + + if user.IPAddress != "" { + merged.IPAddress = user.IPAddress + } + if user.Type != "" { + merged.Type = user.Type + } + + return *merged +} + +// mergeNetworkPorts merges user ports into a normal-ports list, keyed by Name. +// ported from +// pkg/objectcache/networkneighborhoodcache/networkneighborhoodcache.go:708-727. +func mergeNetworkPorts(normalPorts, userPorts []v1beta1.NetworkPort) []v1beta1.NetworkPort { + portMap := make(map[string]int, len(normalPorts)) + for i, port := range normalPorts { + portMap[port.Name] = i + } + for _, userPort := range userPorts { + if idx, exists := portMap[userPort.Name]; exists { + normalPorts[idx] = userPort + } else { + normalPorts = append(normalPorts, userPort) + } + } + return normalPorts +} diff --git a/pkg/objectcache/containerprofilecache/projection_test.go b/pkg/objectcache/containerprofilecache/projection_test.go new file mode 100644 index 0000000000..85b106ee01 --- /dev/null +++ b/pkg/objectcache/containerprofilecache/projection_test.go @@ -0,0 +1,222 @@ +package containerprofilecache + +import ( + "testing" + + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func baseCP() *v1beta1.ContainerProfile { + return &v1beta1.ContainerProfile{ + ObjectMeta: metav1.ObjectMeta{Name: "cp", Namespace: "default", ResourceVersion: "1"}, + Spec: v1beta1.ContainerProfileSpec{ + Capabilities: []string{"SYS_PTRACE"}, + Execs: []v1beta1.ExecCalls{ + {Path: "/bin/ls", Args: []string{"-la"}}, + }, + PolicyByRuleId: map[string]v1beta1.RulePolicy{ + "R0901": {AllowedProcesses: []string{"ls"}}, + }, + Ingress: []v1beta1.NetworkNeighbor{ + {Identifier: "ing-1", DNSNames: []string{"a.svc.local"}}, + }, + }, + } +} + +func podWith(containers ...string) *corev1.Pod { + var cs []corev1.Container + for _, n := range containers { + cs = append(cs, corev1.Container{Name: n}) + } + return &corev1.Pod{Spec: corev1.PodSpec{Containers: cs}} +} + +// TestProjection_UserAPOnly_Match verifies the happy-path merge of a matching +// user AP container: capabilities / execs / policies merged, no warnings. +func TestProjection_UserAPOnly_Match(t *testing.T) { + cp := baseCP() + userAP := &v1beta1.ApplicationProfile{ + ObjectMeta: metav1.ObjectMeta{Name: "ua", Namespace: "default", ResourceVersion: "u1"}, + Spec: v1beta1.ApplicationProfileSpec{ + Containers: []v1beta1.ApplicationProfileContainer{{ + Name: "nginx", + Capabilities: []string{"NET_BIND_SERVICE"}, + Execs: []v1beta1.ExecCalls{{Path: "/bin/cat"}}, + PolicyByRuleId: map[string]v1beta1.RulePolicy{ + "R0901": {AllowedProcesses: []string{"cat"}}, + "R0902": {AllowedProcesses: []string{"echo"}}, + }, + }}, + }, + } + pod := podWith("nginx") + + projected, warnings := projectUserProfiles(cp, userAP, nil, pod, "nginx") + require.NotNil(t, projected) + assert.Empty(t, warnings) + assert.NotSame(t, cp, projected, "projected must be a distinct DeepCopy") + assert.ElementsMatch(t, []string{"SYS_PTRACE", "NET_BIND_SERVICE"}, projected.Spec.Capabilities) + assert.Len(t, projected.Spec.Execs, 2) + // R0901 merged, R0902 added + assert.Contains(t, projected.Spec.PolicyByRuleId, "R0901") + assert.Contains(t, projected.Spec.PolicyByRuleId, "R0902") +} + +// TestProjection_UserNNOnly_Match verifies merge of matching NN container: +// ingress merged by Identifier, LabelSelector MatchLabels overlaid. +func TestProjection_UserNNOnly_Match(t *testing.T) { + cp := baseCP() + cp.Spec.LabelSelector = metav1.LabelSelector{MatchLabels: map[string]string{"app": "nginx"}} + userNN := &v1beta1.NetworkNeighborhood{ + ObjectMeta: metav1.ObjectMeta{Name: "un", Namespace: "default", ResourceVersion: "n1"}, + Spec: v1beta1.NetworkNeighborhoodSpec{ + LabelSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{"env": "prod"}, + }, + Containers: []v1beta1.NetworkNeighborhoodContainer{{ + Name: "nginx", + Ingress: []v1beta1.NetworkNeighbor{ + {Identifier: "ing-1", DNSNames: []string{"b.svc.local"}}, + {Identifier: "ing-2", DNSNames: []string{"c.svc.local"}}, + }, + }}, + }, + } + pod := podWith("nginx") + + projected, warnings := projectUserProfiles(cp, nil, userNN, pod, "nginx") + require.NotNil(t, projected) + assert.Empty(t, warnings) + require.Len(t, projected.Spec.Ingress, 2) + // ing-1 merged (DNSNames union) + var merged v1beta1.NetworkNeighbor + for _, ing := range projected.Spec.Ingress { + if ing.Identifier == "ing-1" { + merged = ing + break + } + } + assert.ElementsMatch(t, []string{"a.svc.local", "b.svc.local"}, merged.DNSNames) + // LabelSelector overlaid + assert.Equal(t, "nginx", projected.Spec.LabelSelector.MatchLabels["app"]) + assert.Equal(t, "prod", projected.Spec.LabelSelector.MatchLabels["env"]) +} + +// TestProjection_Both verifies both AP and NN can overlay in a single call. +func TestProjection_Both(t *testing.T) { + cp := baseCP() + userAP := &v1beta1.ApplicationProfile{ + ObjectMeta: metav1.ObjectMeta{Name: "ua", Namespace: "default", ResourceVersion: "u1"}, + Spec: v1beta1.ApplicationProfileSpec{ + Containers: []v1beta1.ApplicationProfileContainer{{ + Name: "nginx", + Capabilities: []string{"NET_ADMIN"}, + }}, + }, + } + userNN := &v1beta1.NetworkNeighborhood{ + ObjectMeta: metav1.ObjectMeta{Name: "un", Namespace: "default", ResourceVersion: "n1"}, + Spec: v1beta1.NetworkNeighborhoodSpec{ + Containers: []v1beta1.NetworkNeighborhoodContainer{{ + Name: "nginx", + Ingress: []v1beta1.NetworkNeighbor{{Identifier: "ing-new"}}, + }}, + }, + } + pod := podWith("nginx") + + projected, warnings := projectUserProfiles(cp, userAP, userNN, pod, "nginx") + require.NotNil(t, projected) + assert.Empty(t, warnings) + assert.Contains(t, projected.Spec.Capabilities, "NET_ADMIN") + // Original ing-1 plus appended ing-new + assert.Len(t, projected.Spec.Ingress, 2) +} + +// TestProjection_UserAP_NonMatchingContainer verifies that when the user CRD +// doesn't include the target container name, no merge happens — but missing +// pod containers still produce a warning. +func TestProjection_UserAP_NonMatchingContainer(t *testing.T) { + cp := baseCP() + userAP := &v1beta1.ApplicationProfile{ + ObjectMeta: metav1.ObjectMeta{Name: "ua", Namespace: "default", ResourceVersion: "u1"}, + Spec: v1beta1.ApplicationProfileSpec{ + Containers: []v1beta1.ApplicationProfileContainer{{ + Name: "other", // not "nginx" + Capabilities: []string{"NET_BIND_SERVICE"}, + }}, + }, + } + pod := podWith("nginx", "sidecar") + + projected, warnings := projectUserProfiles(cp, userAP, nil, pod, "nginx") + require.NotNil(t, projected) + // No merge because no container matched "nginx" + assert.ElementsMatch(t, []string{"SYS_PTRACE"}, projected.Spec.Capabilities) + require.Len(t, warnings, 1) + assert.Equal(t, kindApplication, warnings[0].Kind) + assert.ElementsMatch(t, []string{"nginx", "sidecar"}, warnings[0].MissingContainers) +} + +// TestProjection_UserAP_PartialContainers verifies that when the user AP has +// one container but the pod has two, we emit a partial warning naming the +// missing pod container. +func TestProjection_UserAP_PartialContainers(t *testing.T) { + cp := baseCP() + userAP := &v1beta1.ApplicationProfile{ + ObjectMeta: metav1.ObjectMeta{Name: "ua", Namespace: "default", ResourceVersion: "u1"}, + Spec: v1beta1.ApplicationProfileSpec{ + Containers: []v1beta1.ApplicationProfileContainer{{ + Name: "nginx", + Capabilities: []string{"NET_BIND_SERVICE"}, + }}, + }, + } + pod := podWith("nginx", "sidecar") + + projected, warnings := projectUserProfiles(cp, userAP, nil, pod, "nginx") + require.NotNil(t, projected) + // Target container merged. + assert.Contains(t, projected.Spec.Capabilities, "NET_BIND_SERVICE") + require.Len(t, warnings, 1) + assert.Equal(t, kindApplication, warnings[0].Kind) + assert.Equal(t, []string{"sidecar"}, warnings[0].MissingContainers) +} + +// TestProjection_NoUserCRDs verifies projection with neither user CRD returns +// a DeepCopy (distinct pointer) and no warnings. +func TestProjection_NoUserCRDs(t *testing.T) { + cp := baseCP() + pod := podWith("nginx") + + projected, warnings := projectUserProfiles(cp, nil, nil, pod, "nginx") + require.NotNil(t, projected) + assert.Empty(t, warnings) + assert.NotSame(t, cp, projected) + assert.Equal(t, cp.Spec.Capabilities, projected.Spec.Capabilities) +} + +// TestProjection_NilPod verifies the merge still runs when pod is nil; the +// missing-container check is skipped (no warning emitted for partial). +func TestProjection_NilPod(t *testing.T) { + cp := baseCP() + userAP := &v1beta1.ApplicationProfile{ + ObjectMeta: metav1.ObjectMeta{Name: "ua", Namespace: "default", ResourceVersion: "u1"}, + Spec: v1beta1.ApplicationProfileSpec{ + Containers: []v1beta1.ApplicationProfileContainer{{ + Name: "nginx", + Capabilities: []string{"NET_BIND_SERVICE"}, + }}, + }, + } + + projected, warnings := projectUserProfiles(cp, userAP, nil, nil, "nginx") + require.NotNil(t, projected) + assert.Empty(t, warnings) + assert.Contains(t, projected.Spec.Capabilities, "NET_BIND_SERVICE") +} diff --git a/pkg/objectcache/containerprofilecache/reconciler.go b/pkg/objectcache/containerprofilecache/reconciler.go new file mode 100644 index 0000000000..29c0307af3 --- /dev/null +++ b/pkg/objectcache/containerprofilecache/reconciler.go @@ -0,0 +1,565 @@ +// Package containerprofilecache — reconciler.go +// +// The reconciler is the safety-net eviction path AND the freshness refresh +// loop. Each tick it: +// 1. reconcileOnce: evicts cache entries whose pod is gone or whose +// container is no longer Running. +// 2. refreshAllEntries (single-flight via atomic flag): re-fetches the +// consolidated CP, the workload-level AP+NN, the user-managed +// "ug-" AP+NN, and any label-referenced user AP/NN overlay, +// then rebuilds the projection iff any resourceVersion changed. Fast-skip +// when every RV matches what's already cached. +// +// RPC cost @ 300 containers / 30s cadence steady-state: up to 7 gets per +// entry per tick (CP + 3×AP + 3×NN). At 300 entries that's 70 RPC/s in the +// worst case, dropping close to 0 once fast-skip catches on. Most entries +// carry only workload-level AP+NN, so the common case is 3 RPC/tick per +// entry = 30 RPC/s. +package containerprofilecache + +import ( + "context" + "time" + + "github.com/kubescape/go-logger" + "github.com/kubescape/go-logger/helpers" + helpersv1 "github.com/kubescape/k8s-interface/instanceidhandler/v1/helpers" + "github.com/kubescape/node-agent/pkg/objectcache" + "github.com/kubescape/node-agent/pkg/objectcache/callstackcache" + "github.com/kubescape/node-agent/pkg/utils" + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// tickLoop drives the reconciler. Evict runs synchronously on the tick; +// refresh runs on a single-flight goroutine guarded by refreshInProgress so a +// slow refresh never stacks. +func (c *ContainerProfileCacheImpl) tickLoop(ctx context.Context) { + if c.reconcileEvery == 0 { + c.reconcileEvery = defaultReconcileInterval + } + logger.L().Info("ContainerProfileCache reconciler started", + helpers.String("interval", c.reconcileEvery.String())) + ticker := time.NewTicker(c.reconcileEvery) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + logger.L().Info("ContainerProfileCache reconciler stopped") + return + case <-ticker.C: + start := time.Now() + entriesBefore := c.entries.Len() + pendingBefore := c.pending.Len() + c.reconcileOnce(ctx) + c.retryPendingEntries(ctx) + // Emit the debug breadcrumb only when something actually moved: + // entries delta != 0 OR pending delta != 0. Keeping the log gated + // avoids flooding the journal with identical zero-delta ticks while + // still leaving the observability hook for the test-regression + // investigations that motivated the log. + entriesAfter := c.entries.Len() + pendingAfter := c.pending.Len() + if entriesBefore != entriesAfter || pendingBefore != pendingAfter { + logger.L().Debug("ContainerProfileCache reconciler tick", + helpers.Int("entries_before", entriesBefore), + helpers.Int("entries_after", entriesAfter), + helpers.Int("pending_before", pendingBefore), + helpers.Int("pending_after", pendingAfter)) + } + c.metricsManager.ReportContainerProfileReconcilerDuration("evict", time.Since(start)) + if c.refreshInProgress.CompareAndSwap(false, true) { + go func() { + defer c.refreshInProgress.Store(false) + c.refreshAllEntries(ctx) + }() + } + } + } +} + +// reconcileOnce evicts cache entries whose container is no longer Running. +// Exposed (lowercase but package-public) for tests. +func (c *ContainerProfileCacheImpl) reconcileOnce(ctx context.Context) { + var toEvict []string + c.entries.Range(func(id string, e *CachedContainerProfile) bool { + if ctx.Err() != nil { // delta #3: honor cancellation mid-range + return false + } + pod := c.k8sObjectCache.GetPod(e.Namespace, e.PodName) + if pod == nil { + // Pod not yet in k8s cache (or briefly absent during watch + // resync). Do NOT evict — the pod cache routinely lags the + // ContainerCallback Add events by tens of seconds on busy nodes, + // and evicting here would churn every entry every tick until the + // cache catches up. Cleanup for terminated containers flows + // through deleteContainer on EventTypeRemoveContainer. + return true + } + // Only evict when the pod IS in cache AND the container has clearly + // exited (Terminated state). "Not yet Running" (Waiting state) is + // NOT a reason to evict — init containers and pre-running containers + // legitimately pass through Waiting before transitioning to Running. + if isContainerTerminated(pod, e, id) { + toEvict = append(toEvict, id) + } + return true + }) + for _, id := range toEvict { + c.containerLocks.WithLock(id, func() { + c.entries.Delete(id) + }) + // See deleteContainer comment on why we don't ReleaseLock here. + c.metricsManager.ReportContainerProfileReconcilerEviction("pod_stopped") + } + + // NOTE: we intentionally do NOT GC pending entries based on pod state. + // A previous version dropped pending entries when GetPod returned nil or + // the container wasn't yet Running — but the k8s pod cache and container + // statuses lag the containerwatcher Add event by tens of seconds on busy + // nodes, so the GC dropped every pending entry before retries had a + // chance to succeed. Cleanup for terminated containers flows through + // deleteContainer (EventTypeRemoveContainer) which clears both entries + // and pending atomically. Memory growth from stuck-pending entries is + // bounded by the node's container churn. + + c.metricsManager.SetContainerProfileCacheEntries("total", float64(c.entries.Len())) + c.metricsManager.SetContainerProfileCacheEntries("pending", float64(c.pending.Len())) +} + +// isContainerRunning reports whether the container identified by `id` (the +// cache key, a trimmed containerID) or by (e.ContainerName, e.PodUID) is in +// State=Running in the pod's container/initContainer/ephemeralContainer +// statuses. +// +// Pre-running init containers can appear with an empty ContainerID in the +// status (kubelet hasn't published it yet). In that case we fall back to +// matching on (Name, PodUID) so we don't prematurely evict the entry the +// instant it's populated. +// isContainerTerminated reports whether the container identified by `id` or +// by (e.ContainerName, e.PodUID) has a Terminated state in the pod's +// container/initContainer/ephemeralContainer statuses. This is stricter than +// "not Running": a container in Waiting state is NOT considered terminated. +// Used by reconcileOnce as the eviction signal. +func isContainerTerminated(pod *corev1.Pod, e *CachedContainerProfile, id string) bool { + statuses := make([]corev1.ContainerStatus, 0, + len(pod.Status.ContainerStatuses)+ + len(pod.Status.InitContainerStatuses)+ + len(pod.Status.EphemeralContainerStatuses)) + statuses = append(statuses, pod.Status.ContainerStatuses...) + statuses = append(statuses, pod.Status.InitContainerStatuses...) + statuses = append(statuses, pod.Status.EphemeralContainerStatuses...) + for _, s := range statuses { + if s.ContainerID == "" { + if s.Name == e.ContainerName && string(pod.UID) == e.PodUID { + return s.State.Terminated != nil + } + continue + } + if utils.TrimRuntimePrefix(s.ContainerID) == id { + return s.State.Terminated != nil + } + } + // Container not found in any status list. If no statuses have been + // published yet (kubelet lag on a brand-new pod), do NOT evict — the + // empty list is indistinguishable from a fully-reaped container otherwise. + if len(statuses) == 0 { + return false + } + // Statuses were published but this container is absent: it was reaped. + return true +} + +func isContainerRunning(pod *corev1.Pod, e *CachedContainerProfile, id string) bool { + statuses := make([]corev1.ContainerStatus, 0, + len(pod.Status.ContainerStatuses)+ + len(pod.Status.InitContainerStatuses)+ + len(pod.Status.EphemeralContainerStatuses)) + statuses = append(statuses, pod.Status.ContainerStatuses...) + statuses = append(statuses, pod.Status.InitContainerStatuses...) + statuses = append(statuses, pod.Status.EphemeralContainerStatuses...) + for _, s := range statuses { + if s.ContainerID == "" { + // pre-running init container: match by (Name, PodUID) + if s.Name == e.ContainerName && string(pod.UID) == e.PodUID { + return s.State.Running != nil + } + continue + } + if utils.TrimRuntimePrefix(s.ContainerID) == id { + return s.State.Running != nil + } + } + return false +} + +// refreshAllEntries re-fetches CP + user AP/NN for each cache entry and +// updates the projection if any ResourceVersion changed. Fast-skip when RV + +// UserAPRV + UserNNRV all match (delta #4). Exposed for tests. +func (c *ContainerProfileCacheImpl) refreshAllEntries(ctx context.Context) { + start := time.Now() + defer func() { + c.metricsManager.ReportContainerProfileReconcilerDuration("refresh", time.Since(start)) + }() + // Snapshot first to avoid holding SafeMap's RLock while refreshOneEntry + // writes back via Set (which needs the write lock). + type snapshot struct { + id string + e *CachedContainerProfile + } + var work []snapshot + c.entries.Range(func(id string, e *CachedContainerProfile) bool { + if ctx.Err() != nil { // delta #3 + return false + } + work = append(work, snapshot{id: id, e: e}) + return true + }) + for _, w := range work { + if ctx.Err() != nil { + return + } + c.containerLocks.WithLock(w.id, func() { + c.refreshOneEntry(ctx, w.id, w.e) + }) + } +} + +// refreshOneEntry refreshes a single cache entry under the per-container lock. +// Re-fetches ALL sources the entry was originally built from (consolidated CP, +// workload-level AP/NN, user-managed AP/NN at "ug-", and any +// label-referenced user AP/NN overlay) and rebuilds the projection if ANY +// ResourceVersion changed. Keeping the existing entry on fetch errors is fine: +// the next tick will retry. +// +// Rebuild on refresh applies the same projection ladder as tryPopulateEntry: +// +// base CP → workload AP+NN → user-managed (ug-) AP+NN → user overlay AP+NN. +// +// We intentionally DO NOT re-apply the partial-on-non-PreRunning gate here: +// any entry that survived addContainer already passed that gate (or was +// PreRunning), so refresh can accept partial profiles freely. (Fix B for +// Test_17 / Test_19: the workload AP/NN must be re-fetched each tick so a +// "ready" -> "completed" transition propagates to ProfileState.Status, which +// in turn promotes fail_on_profile from false to true.) +func (c *ContainerProfileCacheImpl) refreshOneEntry(ctx context.Context, id string, e *CachedContainerProfile) { + // Resurrection guard (reviewer #1): refreshAllEntries snapshots entries + // without holding containerLocks, so a concurrent deleteContainer / + // reconcile-evict may have removed the entry between snapshot and lock + // acquisition. If so, bail; otherwise the rebuild's c.entries.Set would + // resurrect a dead container. + if _, still := c.entries.Load(id); !still { + return + } + + ns := e.Namespace + + // Re-fetch all sources. CP fetch errors (including 404) are treated as + // "not available right now" — mirroring tryPopulateEntry's behavior. We + // leave cp=nil and rely on the RV-match fast-skip below to preserve the + // existing entry when nothing has changed. This is what lets refresh + // pick up workload-level AP/NN transitions ("ready" -> "completed") even + // while the storage-side consolidated CP remains unpublished. + var cp *v1beta1.ContainerProfile + var cpErr error + _ = c.refreshRPC(ctx, func(rctx context.Context) error { + cp, cpErr = c.storageClient.GetContainerProfile(rctx, ns, e.CPName) + return cpErr + }) + if cpErr != nil { + // If the previous entry was built off a real CP (non-empty RV), a + // CP fetch error on this tick is transient — keep the entry as-is. + // If the entry never had a CP (RV == "", pure workload/user-managed + // build), treat the error as 404 and let workload/user-managed + // re-fetches drive any refresh. + if e.RV != "" { + logger.L().Debug("refreshOneEntry: CP fetch failed; keeping cached entry", + helpers.String("containerID", id), + helpers.String("cpName", e.CPName), + helpers.Error(cpErr)) + return + } + logger.L().Debug("refreshOneEntry: CP fetch failed (no prior CP); treating as not-available", + helpers.String("containerID", id), + helpers.String("cpName", e.CPName), + helpers.Error(cpErr)) + cp = nil + } + var userManagedAP *v1beta1.ApplicationProfile + var userManagedNN *v1beta1.NetworkNeighborhood + if e.WorkloadName != "" { + ugAPName := helpersv1.UserApplicationProfilePrefix + e.WorkloadName + var userManagedAPErr error + _ = c.refreshRPC(ctx, func(rctx context.Context) error { + userManagedAP, userManagedAPErr = c.storageClient.GetApplicationProfile(rctx, ns, ugAPName) + return userManagedAPErr + }) + if userManagedAPErr != nil && e.UserManagedAPRV != "" { + logger.L().Debug("refreshOneEntry: user-managed AP fetch failed; keeping cached entry", + helpers.String("containerID", id), + helpers.String("name", ugAPName), + helpers.Error(userManagedAPErr)) + return + } + if userManagedAPErr != nil { + userManagedAP = nil // k8s client returns non-nil zero-value on 404; treat as absent + } + ugNNName := helpersv1.UserNetworkNeighborhoodPrefix + e.WorkloadName + var userManagedNNErr error + _ = c.refreshRPC(ctx, func(rctx context.Context) error { + userManagedNN, userManagedNNErr = c.storageClient.GetNetworkNeighborhood(rctx, ns, ugNNName) + return userManagedNNErr + }) + if userManagedNNErr != nil && e.UserManagedNNRV != "" { + logger.L().Debug("refreshOneEntry: user-managed NN fetch failed; keeping cached entry", + helpers.String("containerID", id), + helpers.String("name", ugNNName), + helpers.Error(userManagedNNErr)) + return + } + if userManagedNNErr != nil { + userManagedNN = nil + } + } + var userAP *v1beta1.ApplicationProfile + var userNN *v1beta1.NetworkNeighborhood + if e.UserAPRef != nil { + var userAPErr error + _ = c.refreshRPC(ctx, func(rctx context.Context) error { + userAP, userAPErr = c.storageClient.GetApplicationProfile(rctx, e.UserAPRef.Namespace, e.UserAPRef.Name) + return userAPErr + }) + if userAPErr != nil && e.UserAPRV != "" { + logger.L().Debug("refreshOneEntry: user-defined AP fetch failed; keeping cached entry", + helpers.String("containerID", id), + helpers.String("name", e.UserAPRef.Name), + helpers.Error(userAPErr)) + return + } + if userAPErr != nil { + userAP = nil + } + } + if e.UserNNRef != nil { + var userNNErr error + _ = c.refreshRPC(ctx, func(rctx context.Context) error { + userNN, userNNErr = c.storageClient.GetNetworkNeighborhood(rctx, e.UserNNRef.Namespace, e.UserNNRef.Name) + return userNNErr + }) + if userNNErr != nil && e.UserNNRV != "" { + logger.L().Debug("refreshOneEntry: user-defined NN fetch failed; keeping cached entry", + helpers.String("containerID", id), + helpers.String("name", e.UserNNRef.Name), + helpers.Error(userNNErr)) + return + } + if userNNErr != nil { + userNN = nil + } + } + + // Fast-skip when nothing changed. We match "absent" (nil) with empty RV: + // this avoids spurious rebuilds when an optional source is still missing, + // as long as it was also missing at the last build. + if rvsMatchCP(cp, e.RV) && + rvsMatchAP(userManagedAP, e.UserManagedAPRV) && + rvsMatchNN(userManagedNN, e.UserManagedNNRV) && + rvsMatchAP(userAP, e.UserAPRV) && + rvsMatchNN(userNN, e.UserNNRV) { + return + } + + c.rebuildEntryFromSources(id, e, cp, userManagedAP, userManagedNN, userAP, userNN) +} + +// rvsMatchCP, rvsMatchAP, rvsMatchNN return true when either (a) the object is +// absent and the stored RV is empty, or (b) the object is present and its RV +// matches the stored RV. This lets fast-skip treat "still missing" as a match. +func rvsMatchCP(obj *v1beta1.ContainerProfile, rv string) bool { + if obj == nil { + return rv == "" + } + return obj.ResourceVersion == rv +} +func rvsMatchAP(obj *v1beta1.ApplicationProfile, rv string) bool { + if obj == nil { + return rv == "" + } + return obj.ResourceVersion == rv +} +func rvsMatchNN(obj *v1beta1.NetworkNeighborhood, rv string) bool { + if obj == nil { + return rv == "" + } + return obj.ResourceVersion == rv +} + +// rebuildEntryFromSources constructs a fresh CachedContainerProfile from the +// given sources and stores it under `id`. Applies the projection ladder from +// tryPopulateEntry: base CP (or synthesized) → user-managed (ug-) AP+NN → +// label-referenced user overlay AP+NN. +// +// Called by the reconciler when any input ResourceVersion has changed. +func (c *ContainerProfileCacheImpl) rebuildEntryFromSources( + id string, + prev *CachedContainerProfile, + cp *v1beta1.ContainerProfile, + userManagedAP *v1beta1.ApplicationProfile, + userManagedNN *v1beta1.NetworkNeighborhood, + userAP *v1beta1.ApplicationProfile, + userNN *v1beta1.NetworkNeighborhood, +) { + pod := c.k8sObjectCache.GetPod(prev.Namespace, prev.PodName) + + // Backfill PodUID when the entry was originally added before the pod + // appeared in the k8s cache. An empty PodUID on a pre-running init + // container (where the pod-status ContainerID is also empty) makes + // isContainerTerminated's (Name, PodUID) fallback match zero and treat + // the entry as terminated on the next eviction pass. Healing it here + // lets the next reconcileOnce correctly classify the container. + podUID := prev.PodUID + if podUID == "" && pod != nil { + podUID = string(pod.UID) + } + + // When the consolidated CP is absent but we still have user-managed / + // user-defined overlays to project, synthesize an empty base so + // downstream state display is sensible. + effectiveCP := cp + if effectiveCP == nil { + syntheticName := prev.WorkloadName + if syntheticName == "" { + syntheticName = prev.CPName + } + effectiveCP = &v1beta1.ContainerProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: syntheticName, + Namespace: prev.Namespace, + Annotations: map[string]string{ + helpersv1.CompletionMetadataKey: helpersv1.Full, + helpersv1.StatusMetadataKey: helpersv1.Completed, + }, + }, + } + } + + projected := effectiveCP + // Ladder pass #1: user-managed "ug-" AP + NN. + if userManagedAP != nil || userManagedNN != nil { + p, warnings := projectUserProfiles(projected, userManagedAP, userManagedNN, pod, prev.ContainerName) + projected = p + c.emitOverlayMetrics(userManagedAP, userManagedNN, warnings) + } + // Ladder pass #2: label-referenced user overlay AP + NN. + shared := userAP == nil && userNN == nil && + userManagedAP == nil && userManagedNN == nil && + cp != nil + var userWarnings []partialProfileWarning + if userAP != nil || userNN != nil { + p, w := projectUserProfiles(projected, userAP, userNN, pod, prev.ContainerName) + projected = p + userWarnings = w + } + c.emitOverlayMetrics(userAP, userNN, userWarnings) + + // Rebuild the call-stack search tree from the projected profile. + tree := callstackcache.NewCallStackSearchTree() + for _, stack := range projected.Spec.IdentifiedCallStacks { + tree.AddCallStack(stack) + } + + newEntry := &CachedContainerProfile{ + Profile: projected, + State: &objectcache.ProfileState{Completion: effectiveCP.Annotations[helpersv1.CompletionMetadataKey], Status: effectiveCP.Annotations[helpersv1.StatusMetadataKey], Name: effectiveCP.Name}, + CallStackTree: tree, + ContainerName: prev.ContainerName, + PodName: prev.PodName, + Namespace: prev.Namespace, + PodUID: podUID, + WorkloadID: prev.WorkloadID, + CPName: prev.CPName, + WorkloadName: prev.WorkloadName, + Shared: shared, + RV: rvOfCP(cp), + UserManagedAPRV: rvOfAP(userManagedAP), + UserManagedNNRV: rvOfNN(userManagedNN), + UserAPRV: rvOfAP(userAP), + UserNNRV: rvOfNN(userNN), + } + if userAP != nil { + newEntry.UserAPRef = &namespacedName{Namespace: userAP.Namespace, Name: userAP.Name} + } else if prev.UserAPRef != nil { + // Preserve the ref so subsequent ticks still know to re-fetch the + // overlay (e.g. transient fetch error during this tick). + newEntry.UserAPRef = prev.UserAPRef + } + if userNN != nil { + newEntry.UserNNRef = &namespacedName{Namespace: userNN.Namespace, Name: userNN.Name} + } else if prev.UserNNRef != nil { + newEntry.UserNNRef = prev.UserNNRef + } + + c.entries.Set(id, newEntry) +} + +// rvOfCP / rvOfAP / rvOfNN return the object's ResourceVersion or "" when nil. +// Separate typed versions avoid the Go nil-interface trap where a typed-nil +// pointer wrapped in an interface is not == nil. +func rvOfCP(o *v1beta1.ContainerProfile) string { + if o == nil { + return "" + } + return o.ResourceVersion +} +func rvOfAP(o *v1beta1.ApplicationProfile) string { + if o == nil { + return "" + } + return o.ResourceVersion +} +func rvOfNN(o *v1beta1.NetworkNeighborhood) string { + if o == nil { + return "" + } + return o.ResourceVersion +} + +// retryPendingEntries re-issues GetContainerProfile for every containerID that +// was seen on ContainerCallback(Add) but whose CP was not yet in storage. On +// success the entry is promoted into the main cache and removed from pending. +// Exposed for tests. +// +// This preserves the legacy-cache behavior where the periodic "ListProfiles" +// tick recovered containers whose CP showed up after container-start. Without +// this retry, a container whose CP is created asynchronously (the normal +// path, since containerprofilemanager creates the CP after observing behavior) +// would never enter the cache. See component-test regression analysis at +// .omc/plans/containerprofile-cache-component-test-findings.md. +func (c *ContainerProfileCacheImpl) retryPendingEntries(ctx context.Context) { + type snap struct { + id string + p *pendingContainer + } + var work []snap + c.pending.Range(func(id string, p *pendingContainer) bool { + if ctx.Err() != nil { + return false + } + work = append(work, snap{id: id, p: p}) + return true + }) + for _, w := range work { + if ctx.Err() != nil { + return + } + c.containerLocks.WithLock(w.id, func() { + // Double-check pending still contains this id (could have been + // promoted or dropped by a concurrent path). + if _, still := c.pending.Load(w.id); !still { + return + } + c.tryPopulateEntry(ctx, w.id, w.p.container, w.p.sharedData, w.p.cpName, w.p.workloadName) + }) + } +} diff --git a/pkg/objectcache/containerprofilecache/reconciler_test.go b/pkg/objectcache/containerprofilecache/reconciler_test.go new file mode 100644 index 0000000000..0bdf92f180 --- /dev/null +++ b/pkg/objectcache/containerprofilecache/reconciler_test.go @@ -0,0 +1,1199 @@ +package containerprofilecache + +import ( + "context" + "sync" + "sync/atomic" + "testing" + "time" + + helpersv1 "github.com/kubescape/k8s-interface/instanceidhandler/v1/helpers" + "github.com/kubescape/node-agent/pkg/config" + "github.com/kubescape/node-agent/pkg/metricsmanager" + "github.com/kubescape/node-agent/pkg/objectcache" + "github.com/kubescape/node-agent/pkg/storage" + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +// controllableK8sCache is a K8sObjectCache stub whose GetPod can be scripted +// per (namespace, podName) and whose invocation count is observable for the +// cancellation test. The unexported methods required by the interface are +// implemented as no-ops. +type controllableK8sCache struct { + pods map[string]*corev1.Pod + podHook func(namespace, podName string) *corev1.Pod // optional override + calls atomic.Int64 +} + +var _ objectcache.K8sObjectCache = (*controllableK8sCache)(nil) + +func newControllableK8sCache() *controllableK8sCache { + return &controllableK8sCache{pods: map[string]*corev1.Pod{}} +} + +func (k *controllableK8sCache) setPod(namespace, podName string, pod *corev1.Pod) { + k.pods[namespace+"/"+podName] = pod +} + +func (k *controllableK8sCache) GetPod(namespace, podName string) *corev1.Pod { + k.calls.Add(1) + if k.podHook != nil { + return k.podHook(namespace, podName) + } + if p, ok := k.pods[namespace+"/"+podName]; ok { + return p + } + return nil +} +func (k *controllableK8sCache) GetPodSpec(_, _ string) *corev1.PodSpec { return nil } +func (k *controllableK8sCache) GetPodStatus(_, _ string) *corev1.PodStatus { return nil } +func (k *controllableK8sCache) GetApiServerIpAddress() string { return "" } +func (k *controllableK8sCache) GetPods() []*corev1.Pod { return nil } +func (k *controllableK8sCache) SetSharedContainerData(_ string, _ *objectcache.WatchedContainerData) { +} +func (k *controllableK8sCache) GetSharedContainerData(_ string) *objectcache.WatchedContainerData { + return nil +} +func (k *controllableK8sCache) DeleteSharedContainerData(_ string) {} + +// countingProfileClient tracks per-method RPC counts so tests can assert +// fast-skip behavior. +type countingProfileClient struct { + cp *v1beta1.ContainerProfile + ap *v1beta1.ApplicationProfile + nn *v1beta1.NetworkNeighborhood + + cpCalls atomic.Int64 + apCalls atomic.Int64 + nnCalls atomic.Int64 +} + +var _ storage.ProfileClient = (*countingProfileClient)(nil) + +func (f *countingProfileClient) GetContainerProfile(_ context.Context, _, _ string) (*v1beta1.ContainerProfile, error) { + f.cpCalls.Add(1) + return f.cp, nil +} +func (f *countingProfileClient) GetApplicationProfile(_ context.Context, _, _ string) (*v1beta1.ApplicationProfile, error) { + f.apCalls.Add(1) + return f.ap, nil +} +func (f *countingProfileClient) GetNetworkNeighborhood(_ context.Context, _, _ string) (*v1beta1.NetworkNeighborhood, error) { + f.nnCalls.Add(1) + return f.nn, nil +} +func (f *countingProfileClient) ListApplicationProfiles(_ context.Context, _ string, _ int64, _ string) (*v1beta1.ApplicationProfileList, error) { + return &v1beta1.ApplicationProfileList{}, nil +} +func (f *countingProfileClient) ListNetworkNeighborhoods(_ context.Context, _ string, _ int64, _ string) (*v1beta1.NetworkNeighborhoodList, error) { + return &v1beta1.NetworkNeighborhoodList{}, nil +} + +// countingMetrics tallies ReportContainerProfileLegacyLoad calls so the T8 +// end-to-end test can assert the overlay refresh re-emits the full-load signal. +type countingMetrics struct { + metricsmanager.MetricsMock + mu sync.Mutex + legacyLoads map[string]int // key = kind+"|"+completeness + evictions map[string]int + entriesByKnd map[string]float64 +} + +func newCountingMetrics() *countingMetrics { + return &countingMetrics{ + legacyLoads: map[string]int{}, + evictions: map[string]int{}, + entriesByKnd: map[string]float64{}, + } +} +func (m *countingMetrics) ReportContainerProfileLegacyLoad(kind, completeness string) { + m.mu.Lock() + defer m.mu.Unlock() + m.legacyLoads[kind+"|"+completeness]++ +} +func (m *countingMetrics) ReportContainerProfileReconcilerEviction(reason string) { + m.mu.Lock() + defer m.mu.Unlock() + m.evictions[reason]++ +} +func (m *countingMetrics) SetContainerProfileCacheEntries(kind string, count float64) { + m.mu.Lock() + defer m.mu.Unlock() + m.entriesByKnd[kind] = count +} +func (m *countingMetrics) legacyLoad(kind, completeness string) int { + m.mu.Lock() + defer m.mu.Unlock() + return m.legacyLoads[kind+"|"+completeness] +} +func (m *countingMetrics) eviction(reason string) int { + m.mu.Lock() + defer m.mu.Unlock() + return m.evictions[reason] +} + +// newReconcilerCache returns a cache wired with a controllable k8s cache and +// a counting profile client. Tests drive reconcileOnce / refreshAllEntries +// directly. +func newReconcilerCache(t *testing.T, client storage.ProfileClient, k8s objectcache.K8sObjectCache, metrics metricsmanager.MetricsManager) *ContainerProfileCacheImpl { + t.Helper() + cfg := config.Config{ProfilesCacheRefreshRate: 30 * time.Second} + return NewContainerProfileCache(cfg, client, k8s, metrics) +} + +// newEntry makes a CachedContainerProfile for tests without going through +// addContainer (which requires priming shared data + instance-id machinery). +func newEntry(cp *v1beta1.ContainerProfile, containerName, podName, namespace, podUID string) *CachedContainerProfile { + return &CachedContainerProfile{ + Profile: cp, + State: &objectcache.ProfileState{Name: cp.Name}, + ContainerName: containerName, + PodName: podName, + Namespace: namespace, + PodUID: podUID, + CPName: cp.Name, + RV: cp.ResourceVersion, + Shared: true, + } +} + +// TestReconcilerKeepsEntryWhenPodMissing — entry whose pod returns nil is +// retained (not evicted). The k8s pod cache routinely lags container events +// on busy nodes; evicting on "pod not found" churned every entry per tick. +// Cleanup for terminated containers flows through deleteContainer. +func TestReconcilerKeepsEntryWhenPodMissing(t *testing.T) { + cp := &v1beta1.ContainerProfile{ObjectMeta: metav1.ObjectMeta{Name: "cp", Namespace: "default", ResourceVersion: "1"}} + client := &countingProfileClient{cp: cp} + k8s := newControllableK8sCache() // GetPod returns nil for everything + metrics := newCountingMetrics() + c := newReconcilerCache(t, client, k8s, metrics) + + id := "c1" + c.entries.Set(id, newEntry(cp, "nginx", "nginx-abc", "default", "uid-1")) + + c.reconcileOnce(context.Background()) + + assert.NotNil(t, c.GetContainerProfile(id), "entry must be retained when pod is missing from cache") + assert.Equal(t, 0, metrics.eviction("pod_stopped"), "no eviction when pod is absent") +} + +// TestReconcilerEvictsTerminatedContainer — entry whose container has +// clearly transitioned to Terminated state IS evicted. +func TestReconcilerEvictsTerminatedContainer(t *testing.T) { + cp := &v1beta1.ContainerProfile{ObjectMeta: metav1.ObjectMeta{Name: "cp", Namespace: "default", ResourceVersion: "1"}} + client := &countingProfileClient{cp: cp} + k8s := newControllableK8sCache() + id := "terminated123" + k8s.setPod("default", "nginx-abc", &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "nginx-abc", Namespace: "default", UID: types.UID("uid-1")}, + Status: corev1.PodStatus{ContainerStatuses: []corev1.ContainerStatus{{ + Name: "nginx", + ContainerID: "containerd://" + id, + State: corev1.ContainerState{Terminated: &corev1.ContainerStateTerminated{ExitCode: 0}}, + }}}, + }) + metrics := newCountingMetrics() + c := newReconcilerCache(t, client, k8s, metrics) + c.entries.Set(id, newEntry(cp, "nginx", "nginx-abc", "default", "uid-1")) + + c.reconcileOnce(context.Background()) + + assert.Nil(t, c.GetContainerProfile(id), "terminated container entry must be evicted") + assert.Equal(t, 1, metrics.eviction("pod_stopped"), "should report one eviction") +} + +// TestReconcilerKeepsWaitingContainer — entry whose container is in Waiting +// state (e.g. newly-started or pre-running init container with empty ID) +// must NOT be evicted. +func TestReconcilerKeepsWaitingContainer(t *testing.T) { + cp := &v1beta1.ContainerProfile{ObjectMeta: metav1.ObjectMeta{Name: "cp", Namespace: "default", ResourceVersion: "1"}} + client := &countingProfileClient{cp: cp} + k8s := newControllableK8sCache() + id := "waitingabc" + k8s.setPod("default", "nginx-abc", &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "nginx-abc", Namespace: "default", UID: types.UID("uid-1")}, + Status: corev1.PodStatus{ContainerStatuses: []corev1.ContainerStatus{{ + Name: "nginx", + ContainerID: "containerd://" + id, + State: corev1.ContainerState{Waiting: &corev1.ContainerStateWaiting{Reason: "ContainerCreating"}}, + }}}, + }) + metrics := newCountingMetrics() + c := newReconcilerCache(t, client, k8s, metrics) + c.entries.Set(id, newEntry(cp, "nginx", "nginx-abc", "default", "uid-1")) + + c.reconcileOnce(context.Background()) + + assert.NotNil(t, c.GetContainerProfile(id), "waiting container entry must be retained") + assert.Equal(t, 0, metrics.eviction("pod_stopped"), "no eviction for Waiting state") +} + +// TestReconcilerKeepsRunningContainer — entry is kept when pod has a Running +// container status matching `id`. +func TestReconcilerKeepsRunningContainer(t *testing.T) { + cp := &v1beta1.ContainerProfile{ObjectMeta: metav1.ObjectMeta{Name: "cp", Namespace: "default", ResourceVersion: "1"}} + client := &countingProfileClient{cp: cp} + k8s := newControllableK8sCache() + id := "abc123" + k8s.setPod("default", "nginx-abc", &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "nginx-abc", Namespace: "default", UID: types.UID("uid-1")}, + Status: corev1.PodStatus{ContainerStatuses: []corev1.ContainerStatus{{ + Name: "nginx", + ContainerID: "containerd://" + id, + State: corev1.ContainerState{Running: &corev1.ContainerStateRunning{}}, + }}}, + }) + metrics := newCountingMetrics() + c := newReconcilerCache(t, client, k8s, metrics) + c.entries.Set(id, newEntry(cp, "nginx", "nginx-abc", "default", "uid-1")) + + c.reconcileOnce(context.Background()) + + assert.NotNil(t, c.GetContainerProfile(id), "running container entry must remain") + assert.Equal(t, 0, metrics.eviction("pod_stopped"), "should not evict a running entry") +} + +// TestIsContainerRunning_PreRunningInitWithEmptyContainerID — T2c from the +// plan risks. Pre-running init container publishes an empty ContainerID, so +// we fall back to (Name, PodUID) matching. +func TestIsContainerRunning_PreRunningInitWithEmptyContainerID(t *testing.T) { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{UID: types.UID("pod-uid-123")}, + Status: corev1.PodStatus{InitContainerStatuses: []corev1.ContainerStatus{{ + Name: "init-1", + ContainerID: "", // not published yet + State: corev1.ContainerState{Running: &corev1.ContainerStateRunning{}}, + }}}, + } + entry := &CachedContainerProfile{ContainerName: "init-1", PodUID: "pod-uid-123"} + assert.True(t, isContainerRunning(pod, entry, "init-cid"), + "pre-running init container with empty ContainerID must match on (Name, PodUID)") +} + +// TestIsContainerRunning_ContainerIDMatchTakesPriority — the containerd:// etc +// prefix is stripped before comparing against the cache key. +func TestIsContainerRunning_ContainerIDMatchTakesPriority(t *testing.T) { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{UID: types.UID("pod-uid-123")}, + Status: corev1.PodStatus{ContainerStatuses: []corev1.ContainerStatus{{ + Name: "nginx", + ContainerID: "docker://abc", + State: corev1.ContainerState{Running: &corev1.ContainerStateRunning{}}, + }}}, + } + entry := &CachedContainerProfile{ContainerName: "nginx", PodUID: "pod-uid-123"} + assert.True(t, isContainerRunning(pod, entry, "abc"), "docker:// prefix should be stripped") + assert.False(t, isContainerRunning(pod, entry, "zzz"), "id mismatch should return false") +} + +// TestIsContainerRunning_NotRunning — container exists but is Terminated. +func TestIsContainerRunning_NotRunning(t *testing.T) { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{UID: types.UID("pod-uid-123")}, + Status: corev1.PodStatus{ContainerStatuses: []corev1.ContainerStatus{{ + Name: "nginx", + ContainerID: "containerd://abc", + State: corev1.ContainerState{Terminated: &corev1.ContainerStateTerminated{ExitCode: 0}}, + }}}, + } + entry := &CachedContainerProfile{ContainerName: "nginx", PodUID: "pod-uid-123"} + assert.False(t, isContainerRunning(pod, entry, "abc")) +} + +// TestReconcilerExitsOnCtxCancel — R2 from plan risks, delta #3. Cancelling +// ctx mid-Range stops iteration early. +func TestReconcilerExitsOnCtxCancel(t *testing.T) { + cp := &v1beta1.ContainerProfile{ObjectMeta: metav1.ObjectMeta{Name: "cp", Namespace: "default", ResourceVersion: "1"}} + client := &countingProfileClient{cp: cp} + k8s := newControllableK8sCache() + ctx, cancel := context.WithCancel(context.Background()) + // Hook: cancel ctx on the 3rd GetPod call, return nil to drive the + // Range's continuation. After cancel(), ctx.Err() is set and subsequent + // Range iterations should short-circuit. + var visits atomic.Int64 + k8s.podHook = func(_, _ string) *corev1.Pod { + visits.Add(1) + if visits.Load() == 3 { + cancel() + } + return nil + } + metrics := newCountingMetrics() + c := newReconcilerCache(t, client, k8s, metrics) + + // Populate 100 entries. + for i := 0; i < 100; i++ { + id := "c-" + itoa(i) + c.entries.Set(id, newEntry(cp, "nginx", "pod-"+itoa(i), "default", "uid-"+itoa(i))) + } + + c.reconcileOnce(ctx) + + got := visits.Load() + assert.Less(t, got, int64(100), "ctx cancel should short-circuit the Range well before 100 iterations") + assert.GreaterOrEqual(t, got, int64(3), "should observe at least the iterations up to cancel") + // We do NOT assert a specific eviction count: entries visited before the + // cancel were appended to toEvict and DO get evicted. The invariant under + // test is only that iteration stopped early. +} + +// TestRefreshFastSkipWhenAllRVsMatch — delta #4. When CP RV and both overlay +// RVs match the cached values, refreshOneEntry returns without rebuilding. +func TestRefreshFastSkipWhenAllRVsMatch(t *testing.T) { + cp := &v1beta1.ContainerProfile{ObjectMeta: metav1.ObjectMeta{Name: "cp", Namespace: "default", ResourceVersion: "100"}} + ap := &v1beta1.ApplicationProfile{ObjectMeta: metav1.ObjectMeta{Name: "override", Namespace: "default", ResourceVersion: "50"}} + nn := &v1beta1.NetworkNeighborhood{ObjectMeta: metav1.ObjectMeta{Name: "override", Namespace: "default", ResourceVersion: "60"}} + client := &countingProfileClient{cp: cp, ap: ap, nn: nn} + k8s := newControllableK8sCache() + metrics := newCountingMetrics() + c := newReconcilerCache(t, client, k8s, metrics) + + id := "c1" + entry := &CachedContainerProfile{ + Profile: cp, + State: &objectcache.ProfileState{Name: cp.Name}, + ContainerName: "nginx", + PodName: "nginx-abc", + Namespace: "default", + PodUID: "uid-1", + CPName: "cp", + UserAPRef: &namespacedName{Namespace: "default", Name: "override"}, + UserNNRef: &namespacedName{Namespace: "default", Name: "override"}, + Shared: false, + RV: "100", + UserAPRV: "50", + UserNNRV: "60", + } + c.entries.Set(id, entry) + beforeProfilePtr := entry.Profile + + c.refreshAllEntries(context.Background()) + + // Fetched CP once + overlays once each to check RVs; then fast-skipped. + assert.Equal(t, int64(1), client.cpCalls.Load(), "CP should be fetched once") + assert.Equal(t, int64(1), client.apCalls.Load(), "AP should be fetched once for RV check") + assert.Equal(t, int64(1), client.nnCalls.Load(), "NN should be fetched once for RV check") + + stored, ok := c.entries.Load(id) + require.True(t, ok) + // Same pointer: the entry was NOT rebuilt. + assert.Same(t, entry, stored, "entry must not be replaced on fast-skip") + assert.Same(t, beforeProfilePtr, stored.Profile, "Profile pointer must not change on fast-skip") + // No legacy-load metric emitted on fast-skip. + assert.Equal(t, 0, metrics.legacyLoad(kindApplication, completenessFull)) + assert.Equal(t, 0, metrics.legacyLoad(kindNetwork, completenessFull)) +} + +// TestRefreshRebuildsOnUserAPChange — entry has stale UserAPRV; refresh sees +// a newer AP RV and rebuilds. +func TestRefreshRebuildsOnUserAPChange(t *testing.T) { + cp := &v1beta1.ContainerProfile{ + ObjectMeta: metav1.ObjectMeta{Name: "cp", Namespace: "default", ResourceVersion: "100"}, + Spec: v1beta1.ContainerProfileSpec{Capabilities: []string{"SYS_PTRACE"}}, + } + ap := &v1beta1.ApplicationProfile{ + ObjectMeta: metav1.ObjectMeta{Name: "override", Namespace: "default", ResourceVersion: "51"}, + Spec: v1beta1.ApplicationProfileSpec{ + Containers: []v1beta1.ApplicationProfileContainer{{ + Name: "nginx", + Capabilities: []string{"NET_BIND_SERVICE"}, + }}, + }, + } + client := &countingProfileClient{cp: cp, ap: ap} + k8s := newControllableK8sCache() + metrics := newCountingMetrics() + c := newReconcilerCache(t, client, k8s, metrics) + + id := "c1" + entry := &CachedContainerProfile{ + Profile: cp, + State: &objectcache.ProfileState{Name: cp.Name}, + ContainerName: "nginx", + PodName: "nginx-abc", + Namespace: "default", + PodUID: "uid-1", + CPName: "cp", + UserAPRef: &namespacedName{Namespace: "default", Name: "override"}, + Shared: false, + RV: "100", + UserAPRV: "50", // stale: storage now returns 51 + } + c.entries.Set(id, entry) + + c.refreshAllEntries(context.Background()) + + stored, ok := c.entries.Load(id) + require.True(t, ok) + assert.NotSame(t, entry, stored, "entry must be replaced when user-AP RV changes") + assert.Equal(t, "51", stored.UserAPRV, "new UserAPRV must be recorded") + assert.ElementsMatch(t, []string{"SYS_PTRACE", "NET_BIND_SERVICE"}, stored.Profile.Spec.Capabilities, + "rebuilt projection must include merged overlay capabilities") +} + +// TestRefreshRebuildsOnCPChange — CP RV changed; entry rebuilds with fresh CP. +func TestRefreshRebuildsOnCPChange(t *testing.T) { + cp := &v1beta1.ContainerProfile{ + ObjectMeta: metav1.ObjectMeta{Name: "cp", Namespace: "default", ResourceVersion: "101"}, + Spec: v1beta1.ContainerProfileSpec{Capabilities: []string{"SYS_ADMIN"}}, + } + client := &countingProfileClient{cp: cp} + k8s := newControllableK8sCache() + metrics := newCountingMetrics() + c := newReconcilerCache(t, client, k8s, metrics) + + oldCP := &v1beta1.ContainerProfile{ + ObjectMeta: metav1.ObjectMeta{Name: "cp", Namespace: "default", ResourceVersion: "100"}, + } + id := "c1" + entry := newEntry(oldCP, "nginx", "nginx-abc", "default", "uid-1") + c.entries.Set(id, entry) + + c.refreshAllEntries(context.Background()) + + stored, ok := c.entries.Load(id) + require.True(t, ok) + assert.Equal(t, "101", stored.RV, "RV must update to the fresh CP's version") + assert.Same(t, cp, stored.Profile, "shared fast-path: fresh CP pointer stored directly") +} + +// TestT8_EndToEndRefreshUpdatesProjection — delta #5. Mutate the user-AP in +// the stubbed storage so its RV + execs change; assert the cached projection +// reflects the new execs AND that the legacy-load metric was re-emitted. +func TestT8_EndToEndRefreshUpdatesProjection(t *testing.T) { + cp := &v1beta1.ContainerProfile{ + ObjectMeta: metav1.ObjectMeta{Name: "cp", Namespace: "default", ResourceVersion: "100"}, + Spec: v1beta1.ContainerProfileSpec{ + Execs: []v1beta1.ExecCalls{{Path: "/bin/base", Args: []string{"a"}}}, + }, + } + ap := &v1beta1.ApplicationProfile{ + ObjectMeta: metav1.ObjectMeta{Name: "override", Namespace: "default", ResourceVersion: "50"}, + Spec: v1beta1.ApplicationProfileSpec{ + Containers: []v1beta1.ApplicationProfileContainer{{ + Name: "nginx", + Execs: []v1beta1.ExecCalls{{Path: "/bin/old", Args: []string{"x"}}}, + }}, + }, + } + client := &countingProfileClient{cp: cp, ap: ap} + k8s := newControllableK8sCache() + metrics := newCountingMetrics() + c := newReconcilerCache(t, client, k8s, metrics) + + // Initial entry built from base CP + overlay: use addContainer's private + // buildEntry logic via projectUserProfiles directly, then seed. + initialProjected, _ := projectUserProfiles(cp, ap, nil, nil, "nginx") + id := "c1" + entry := &CachedContainerProfile{ + Profile: initialProjected, + State: &objectcache.ProfileState{Name: cp.Name}, + ContainerName: "nginx", + PodName: "nginx-abc", + Namespace: "default", + PodUID: "uid-1", + CPName: "cp", + UserAPRef: &namespacedName{Namespace: "default", Name: "override"}, + Shared: false, + RV: "100", + UserAPRV: "50", + } + c.entries.Set(id, entry) + + // Mutate storage: new AP RV + new execs. + client.ap = &v1beta1.ApplicationProfile{ + ObjectMeta: metav1.ObjectMeta{Name: "override", Namespace: "default", ResourceVersion: "51"}, + Spec: v1beta1.ApplicationProfileSpec{ + Containers: []v1beta1.ApplicationProfileContainer{{ + Name: "nginx", + Execs: []v1beta1.ExecCalls{{Path: "/bin/new", Args: []string{"y"}}}, + }}, + }, + } + + c.refreshAllEntries(context.Background()) + + stored, ok := c.entries.Load(id) + require.True(t, ok) + assert.Equal(t, "51", stored.UserAPRV, "refresh must record the new user-AP RV") + + // The projection must include the new exec (merged on top of the base CP's exec). + var paths []string + for _, e := range stored.Profile.Spec.Execs { + paths = append(paths, e.Path) + } + assert.Contains(t, paths, "/bin/base", "base CP exec must be preserved") + assert.Contains(t, paths, "/bin/new", "new user-AP exec must be projected into the cache") + assert.NotContains(t, paths, "/bin/old", "stale user-AP exec must NOT be in the projection") + + assert.GreaterOrEqual(t, metrics.legacyLoad(kindApplication, completenessFull), 1, + "refresh with user-AP overlay must emit full-load metric") +} + +// TestRefreshNoEntryWhenCPGetFails — storage error on CP keeps the existing +// entry unchanged (no deletion). +func TestRefreshNoEntryWhenCPGetFails(t *testing.T) { + cp := &v1beta1.ContainerProfile{ObjectMeta: metav1.ObjectMeta{Name: "cp", Namespace: "default", ResourceVersion: "100"}} + failing := &failingProfileClient{cpErr: assertErr{}} + k8s := newControllableK8sCache() + metrics := newCountingMetrics() + c := newReconcilerCache(t, failing, k8s, metrics) + + id := "c1" + entry := newEntry(cp, "nginx", "nginx-abc", "default", "uid-1") + c.entries.Set(id, entry) + + c.refreshAllEntries(context.Background()) + + stored, ok := c.entries.Load(id) + require.True(t, ok, "CP fetch error must not delete the entry") + assert.Same(t, entry, stored, "entry pointer must not change when CP fetch fails") +} + +// TestRefreshPreservesEntryOnTransientOverlayError — overlay fetch errors must +// not strip overlay data from the cache. If a user-managed or user-defined +// AP/NN GET returns an error while the entry already has a non-empty cached RV +// for that overlay, refreshOneEntry must keep the old entry unchanged (same +// pointer) rather than rebuilding without the overlay and clearing its RV. +// Regression test for the refreshRPC timeout → silent nil → spurious rebuild path. +func TestRefreshPreservesEntryOnTransientOverlayError(t *testing.T) { + cp := &v1beta1.ContainerProfile{ + ObjectMeta: metav1.ObjectMeta{Name: "cp", Namespace: "default", ResourceVersion: "100"}, + Spec: v1beta1.ContainerProfileSpec{Capabilities: []string{"SYS_PTRACE"}}, + } + + type overlayFields struct { + workloadName string + userManagedAPRV string + userManagedNNRV string + userAPRef *namespacedName + userAPRV string + userNNRef *namespacedName + userNNRV string + } + tests := []struct { + name string + apErr bool + nnErr bool + overlay overlayFields + }{ + { + name: "user-managed AP timeout preserves entry", + apErr: true, + overlay: overlayFields{ + workloadName: "nginx", + userManagedAPRV: "9", + }, + }, + { + name: "user-managed NN timeout preserves entry", + nnErr: true, + overlay: overlayFields{ + workloadName: "nginx", + userManagedNNRV: "7", + }, + }, + { + name: "user-defined AP timeout preserves entry", + apErr: true, + overlay: overlayFields{ + userAPRef: &namespacedName{Namespace: "default", Name: "override"}, + userAPRV: "50", + }, + }, + { + name: "user-defined NN timeout preserves entry", + nnErr: true, + overlay: overlayFields{ + userNNRef: &namespacedName{Namespace: "default", Name: "override"}, + userNNRV: "60", + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + apErr := error(nil) + if tc.apErr { + apErr = assertErr{} + } + nnErr := error(nil) + if tc.nnErr { + nnErr = assertErr{} + } + client := &overlayErrorClient{cp: cp, apErr: apErr, nnErr: nnErr} + k8s := newControllableK8sCache() + c := newReconcilerCache(t, client, k8s, nil) + + id := "c1" + entry := &CachedContainerProfile{ + Profile: cp, + State: &objectcache.ProfileState{Name: cp.Name}, + ContainerName: "nginx", + PodName: "nginx-abc", + Namespace: "default", + PodUID: "uid-1", + CPName: "cp", + RV: "100", + WorkloadName: tc.overlay.workloadName, + UserManagedAPRV: tc.overlay.userManagedAPRV, + UserManagedNNRV: tc.overlay.userManagedNNRV, + UserAPRef: tc.overlay.userAPRef, + UserAPRV: tc.overlay.userAPRV, + UserNNRef: tc.overlay.userNNRef, + UserNNRV: tc.overlay.userNNRV, + Shared: false, + } + c.entries.Set(id, entry) + + c.refreshAllEntries(context.Background()) + + stored, ok := c.entries.Load(id) + require.True(t, ok, "overlay error must not delete the entry") + assert.Same(t, entry, stored, "entry pointer must not change when overlay fetch fails transiently") + // Overlay RVs must be unchanged (not cleared to ""). + assert.Equal(t, tc.overlay.userManagedAPRV, stored.UserManagedAPRV) + assert.Equal(t, tc.overlay.userManagedNNRV, stored.UserManagedNNRV) + assert.Equal(t, tc.overlay.userAPRV, stored.UserAPRV) + assert.Equal(t, tc.overlay.userNNRV, stored.UserNNRV) + }) + } +} + +// overlayErrorClient returns a valid CP but fails AP/NN calls with the +// configured errors. Used to test overlay error-preservation logic. +type overlayErrorClient struct { + cp *v1beta1.ContainerProfile + apErr error + nnErr error +} + +var _ storage.ProfileClient = (*overlayErrorClient)(nil) + +func (o *overlayErrorClient) GetContainerProfile(_ context.Context, _, _ string) (*v1beta1.ContainerProfile, error) { + return o.cp, nil +} +func (o *overlayErrorClient) GetApplicationProfile(_ context.Context, _, _ string) (*v1beta1.ApplicationProfile, error) { + return nil, o.apErr +} +func (o *overlayErrorClient) GetNetworkNeighborhood(_ context.Context, _, _ string) (*v1beta1.NetworkNeighborhood, error) { + return nil, o.nnErr +} +func (o *overlayErrorClient) ListApplicationProfiles(_ context.Context, _ string, _ int64, _ string) (*v1beta1.ApplicationProfileList, error) { + return &v1beta1.ApplicationProfileList{}, nil +} +func (o *overlayErrorClient) ListNetworkNeighborhoods(_ context.Context, _ string, _ int64, _ string) (*v1beta1.NetworkNeighborhoodList, error) { + return &v1beta1.NetworkNeighborhoodList{}, nil +} + +// --- helpers --- + +// itoa is a local int-to-string so tests don't pull in strconv just for one +// call site. +func itoa(i int) string { + if i == 0 { + return "0" + } + neg := i < 0 + if neg { + i = -i + } + buf := [20]byte{} + pos := len(buf) + for i > 0 { + pos-- + buf[pos] = byte('0' + i%10) + i /= 10 + } + if neg { + pos-- + buf[pos] = '-' + } + return string(buf[pos:]) +} + +// assertErr is a trivial error sentinel used in a few negative tests. +type assertErr struct{} + +func (assertErr) Error() string { return "synthetic error" } + +// failingProfileClient always returns cpErr from GetContainerProfile. +type failingProfileClient struct { + cpErr error +} + +var _ storage.ProfileClient = (*failingProfileClient)(nil) + +func (f *failingProfileClient) GetContainerProfile(_ context.Context, _, _ string) (*v1beta1.ContainerProfile, error) { + return nil, f.cpErr +} +func (f *failingProfileClient) GetApplicationProfile(_ context.Context, _, _ string) (*v1beta1.ApplicationProfile, error) { + return nil, nil +} +func (f *failingProfileClient) GetNetworkNeighborhood(_ context.Context, _, _ string) (*v1beta1.NetworkNeighborhood, error) { + return nil, nil +} +func (f *failingProfileClient) ListApplicationProfiles(_ context.Context, _ string, _ int64, _ string) (*v1beta1.ApplicationProfileList, error) { + return &v1beta1.ApplicationProfileList{}, nil +} +func (f *failingProfileClient) ListNetworkNeighborhoods(_ context.Context, _ string, _ int64, _ string) (*v1beta1.NetworkNeighborhoodList, error) { + return &v1beta1.NetworkNeighborhoodList{}, nil +} + +// silence unused-import linter: helpersv1 is referenced only via the const in +// containerprofilecache.go (used by some entries). Import explicitly so the +// file compiles without the import when those constants aren't dereferenced. +var _ = helpersv1.CompletionMetadataKey + +// TestRefreshHonorsContextCancellationMidRPC verifies that a context +// cancellation while refreshOneEntry is blocked in GetContainerProfile +// causes the refresh to return within the rpcBudget, not hang for the +// full reconciler timeout. +func TestRefreshHonorsContextCancellationMidRPC(t *testing.T) { + // Buffered so the signal is stored even if the test's <-blocked read is + // slightly delayed — prevents a lossy non-blocking send from dropping it. + blocked := make(chan struct{}, 1) + unblock := make(chan struct{}) + blocking := &blockingProfileClient{ + blocked: blocked, + unblock: unblock, + } + cp := &v1beta1.ContainerProfile{ + ObjectMeta: metav1.ObjectMeta{Name: "cp-1", Namespace: "default", ResourceVersion: "42"}, + } + // Seed an existing entry so refreshOneEntry attempts a CP re-fetch. + k8s := newControllableK8sCache() + cfg := config.Config{ + ProfilesCacheRefreshRate: 30 * time.Second, + StorageRPCBudget: 100 * time.Millisecond, + } + cache := NewContainerProfileCache(cfg, blocking, k8s, nil) + cache.SeedEntryForTest("id1", &CachedContainerProfile{ + Profile: cp, + State: &objectcache.ProfileState{Name: cp.Name}, + ContainerName: "c1", + PodName: "pod1", + Namespace: "default", + PodUID: "uid1", + CPName: "cp-1", + RV: "old-rv", // differs from cp.RV so fast-skip is skipped + }) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + done := make(chan struct{}) + go func() { + defer close(done) + cache.refreshAllEntries(ctx) + }() + + // Wait for the RPC to block, then cancel the context. + <-blocked + cancel() + + // The refresh must return within 2s of cancellation (well above the + // 100ms rpcBudget; the generous budget accommodates loaded CI runners). + select { + case <-done: + case <-time.After(2 * time.Second): + t.Fatal("refreshAllEntries did not return after context cancellation") + } + close(unblock) +} + +// blockingProfileClient blocks GetContainerProfile until unblocked. +type blockingProfileClient struct { + blocked chan struct{} + unblock chan struct{} +} + +var _ storage.ProfileClient = (*blockingProfileClient)(nil) + +func (b *blockingProfileClient) GetContainerProfile(ctx context.Context, _, _ string) (*v1beta1.ContainerProfile, error) { + b.blocked <- struct{}{} // buffered(1): stored if reader hasn't arrived yet + select { + case <-b.unblock: + return nil, nil + case <-ctx.Done(): + return nil, ctx.Err() + } +} +func (b *blockingProfileClient) GetApplicationProfile(_ context.Context, _, _ string) (*v1beta1.ApplicationProfile, error) { + return nil, nil +} +func (b *blockingProfileClient) GetNetworkNeighborhood(_ context.Context, _, _ string) (*v1beta1.NetworkNeighborhood, error) { + return nil, nil +} +func (b *blockingProfileClient) ListApplicationProfiles(_ context.Context, _ string, _ int64, _ string) (*v1beta1.ApplicationProfileList, error) { + return &v1beta1.ApplicationProfileList{}, nil +} +func (b *blockingProfileClient) ListNetworkNeighborhoods(_ context.Context, _ string, _ int64, _ string) (*v1beta1.NetworkNeighborhoodList, error) { + return &v1beta1.NetworkNeighborhoodList{}, nil +} + +// TestRetryPendingEntries_CPCreatedAfterAdd exercises the bug that slipped +// through PR #788 component tests: at EventTypeAddContainer the CP may not +// yet be in storage (it is created asynchronously by containerprofilemanager +// after observing the container). The new cache must retry per reconciler +// tick; otherwise the container is permanently absent from the cache and +// rule evaluation short-circuits as "no profile". +func TestRetryPendingEntries_CPCreatedAfterAdd(t *testing.T) { + cp := &v1beta1.ContainerProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cp-pending", + Namespace: "default", + ResourceVersion: "1", + }, + } + + // Start with storage returning 404 for the initial GET. + client := &fakeProfileClient{cp: nil, cpErr: assertErrNotFound("cp-pending")} + c, k8s := newTestCache(t, client) + + id := "container-pending" + primeSharedData(t, k8s, id, "wlid://cluster-a/namespace-default/deployment-nginx") + + // addContainer: sees 404 -> pending bookkeeping, not an entry. + require.NoError(t, c.addContainer(eventContainer(id), context.Background())) + assert.Nil(t, c.GetContainerProfile(id), "no entry before CP exists in storage") + assert.Equal(t, 1, c.pending.Len(), "container recorded as pending") + + // Storage creates the CP asynchronously (60s after start in real runs). + client.cp = cp + client.cpErr = nil + + // Simulate one reconciler tick. retryPendingEntries iterates pending and + // promotes on successful GET. + c.retryPendingEntries(context.Background()) + + assert.NotNil(t, c.GetContainerProfile(id), "entry promoted after CP appears") + assert.Equal(t, 0, c.pending.Len(), "pending drained on successful promotion") + // Exactly two GETs: one from addContainer (404), one from retry (200). + assert.Equal(t, 2, client.getCPCalls, "retry should only re-GET once per tick") +} + +// TestPendingEntriesAreNotGCedBeforeRetry verifies we no longer drop pending +// entries from reconcileOnce. The component-tests regression (CI run +// 24781030436 on ce329196) showed the k8s pod cache and container statuses +// lag the containerwatcher Add event by tens of seconds on busy nodes, so a +// pod-state-driven GC dropped every pending entry before retries had a +// chance to succeed. Cleanup now flows exclusively through deleteContainer. +func TestPendingEntriesAreNotGCedBeforeRetry(t *testing.T) { + client := &fakeProfileClient{cp: nil, cpErr: assertErrNotFound("cp-missing")} + c, k8s := newTestCache(t, client) + _ = k8s + + id := "container-pending" + primeSharedData(t, k8s, id, "wlid://cluster-a/namespace-default/deployment-nginx") + require.NoError(t, c.addContainer(eventContainer(id), context.Background())) + require.Equal(t, 1, c.pending.Len()) + + // Several reconciler passes with nil-returning GetPod must leave the + // pending entry in place so retry has a chance to succeed once profile + // data shows up in storage. + for range 3 { + c.reconcileOnce(context.Background()) + } + assert.Equal(t, 1, c.pending.Len(), "pending entry retained across reconcile ticks") + + // Only deleteContainer clears pending. + c.deleteContainer(id) + assert.Equal(t, 0, c.pending.Len(), "deleteContainer clears pending") +} + +// assertErrNotFound is a minimal non-nil error for GET failures in tests. +// Using a sentinel keeps the test readable without pulling in apierrors. +func assertErrNotFound(name string) error { + return &testNotFoundErr{name: name} +} + +type testNotFoundErr struct{ name string } + +func (e *testNotFoundErr) Error() string { return "container profile " + e.name + ": not found" } + +// TestPartialCP_NonPreRunning_StaysPending verifies that a CP marked partial +// is NOT cached when the container is not PreRunning (i.e. started after the +// agent was up). Legacy caches explicitly deleted partials on restart; we +// mirror that by staying pending until the CP becomes Full. +func TestPartialCP_NonPreRunning_StaysPending(t *testing.T) { + cp := &v1beta1.ContainerProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cp-partial", + Namespace: "default", + ResourceVersion: "1", + Annotations: map[string]string{ + helpersv1.CompletionMetadataKey: helpersv1.Partial, + helpersv1.StatusMetadataKey: helpersv1.Completed, + }, + }, + } + client := &fakeProfileClient{cp: cp} + c, k8s := newTestCache(t, client) + + id := "container-partial-restart" + primeSharedData(t, k8s, id, "wlid://cluster-a/namespace-default/deployment-nginx") + // sharedData.PreRunningContainer is false by default → this simulates a + // fresh container start observed by a running agent. + + require.NoError(t, c.addContainer(eventContainer(id), context.Background())) + assert.Nil(t, c.GetContainerProfile(id), "partial CP must not populate cache on fresh container") + assert.Equal(t, 1, c.pending.Len(), "partial-on-restart stays pending") + + // Simulate the CP becoming Full (new agent-side aggregation round). + cp.Annotations[helpersv1.CompletionMetadataKey] = helpersv1.Full + cp.ResourceVersion = "2" + c.retryPendingEntries(context.Background()) + + assert.NotNil(t, c.GetContainerProfile(id), "Full CP promotes pending entry") + assert.Equal(t, 0, c.pending.Len(), "pending drained on Full") +} + +// TestPartialCP_PreRunning_Accepted verifies the inverse: when the agent +// restarts (all containers become PreRunning), we accept even a partial CP so +// rule evaluation can still alert on out-of-profile behavior (Test_19 +// semantics). +func TestPartialCP_PreRunning_Accepted(t *testing.T) { + cp := &v1beta1.ContainerProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cp-partial-prerunning", + Namespace: "default", + ResourceVersion: "1", + Annotations: map[string]string{ + helpersv1.CompletionMetadataKey: helpersv1.Partial, + helpersv1.StatusMetadataKey: helpersv1.Completed, + }, + }, + } + client := &fakeProfileClient{cp: cp} + c, k8s := newTestCache(t, client) + + id := "container-partial-prerunning" + // Mark PreRunning so the partial is accepted. + primePreRunningSharedData(t, k8s, id, "wlid://cluster-a/namespace-default/deployment-nginx") + + require.NoError(t, c.addContainer(eventContainer(id), context.Background())) + assert.NotNil(t, c.GetContainerProfile(id), "partial CP accepted for PreRunning container") + assert.Equal(t, 0, c.pending.Len(), "not pending when accepted") +} + +// TestOverlayLabel_TransientFetchFailure_RefsRetained verifies that when +// UserDefinedProfileMetadataKey is set but the user-AP/NN fetch fails, the +// entry still records UserAPRef / UserNNRef so the refresh loop can re-fetch +// on subsequent ticks instead of permanently dropping the overlay. +func TestOverlayLabel_TransientFetchFailure_RefsRetained(t *testing.T) { + cp := &v1beta1.ContainerProfile{ + ObjectMeta: metav1.ObjectMeta{Name: "cp-with-overlay", Namespace: "default", ResourceVersion: "1"}, + } + // Overlay fetch returns an error; the base CP is fine. + client := &fakeProfileClient{cp: cp, apErr: assertErrNotFound("override"), nnErr: assertErrNotFound("override")} + c, k8s := newTestCache(t, client) + + id := "container-transient-overlay" + primeSharedData(t, k8s, id, "wlid://cluster-a/namespace-default/deployment-nginx") + + // Build the container with the overlay label set. + ct := eventContainer(id) + ct.K8s.PodLabels = map[string]string{helpersv1.UserDefinedProfileMetadataKey: "override"} + + require.NoError(t, c.addContainer(ct, context.Background())) + + entry, ok := c.entries.Load(id) + require.True(t, ok, "entry stored with base CP even if overlay fetch failed") + require.NotNil(t, entry.UserAPRef, "UserAPRef retained for refresh retry") + require.NotNil(t, entry.UserNNRef, "UserNNRef retained for refresh retry") + assert.Equal(t, "override", entry.UserAPRef.Name) + assert.Equal(t, "override", entry.UserNNRef.Name) +} + +// TestRefreshDoesNotResurrectDeletedEntry verifies the Phase-4 reviewer race: +// refreshAllEntries snapshots entries without a lock; if deleteContainer +// removes the entry before refreshOneEntry takes the lock, the refresh must +// NOT re-insert it. +func TestRefreshDoesNotResurrectDeletedEntry(t *testing.T) { + cp := &v1beta1.ContainerProfile{ + ObjectMeta: metav1.ObjectMeta{Name: "cp-resurrect", Namespace: "default", ResourceVersion: "1"}, + } + client := &fakeProfileClient{cp: cp} + c, k8s := newTestCache(t, client) + + id := "container-resurrect" + primeSharedData(t, k8s, id, "wlid://cluster-a/namespace-default/deployment-nginx") + require.NoError(t, c.addContainer(eventContainer(id), context.Background())) + require.NotNil(t, c.GetContainerProfile(id)) + + // Simulate the race: snapshot the entry, delete, then call refreshOneEntry. + entry, ok := c.entries.Load(id) + require.True(t, ok) + c.deleteContainer(id) + require.Nil(t, c.GetContainerProfile(id), "entry gone after delete") + + // Refresh for the deleted id must bail instead of resurrecting. + c.containerLocks.WithLock(id, func() { + c.refreshOneEntry(context.Background(), id, entry) + }) + + assert.Nil(t, c.GetContainerProfile(id), "refresh must not resurrect deleted entry") +} + +// TestUserDefinedProfileOnly_NoBaseCP verifies that a container with only a +// user-defined AP/NN (no base CP yet) still gets a cache entry, mirroring the +// legacy behavior where user-defined profiles were stored directly. +func TestUserDefinedProfileOnly_NoBaseCP(t *testing.T) { + userAP := &v1beta1.ApplicationProfile{ + ObjectMeta: metav1.ObjectMeta{Name: "user-override", Namespace: "default", ResourceVersion: "10"}, + Spec: v1beta1.ApplicationProfileSpec{ + Containers: []v1beta1.ApplicationProfileContainer{ + {Name: "nginx", Capabilities: []string{"CAP_NET_ADMIN"}}, + }, + }, + } + // Base CP fetch fails (404); only the overlay exists. + client := &fakeProfileClient{cp: nil, cpErr: assertErrNotFound("no-base"), ap: userAP} + c, k8s := newTestCache(t, client) + + id := "container-user-only" + primeSharedData(t, k8s, id, "wlid://cluster-a/namespace-default/deployment-nginx") + ct := eventContainer(id) + ct.K8s.PodLabels = map[string]string{helpersv1.UserDefinedProfileMetadataKey: "user-override"} + + require.NoError(t, c.addContainer(ct, context.Background())) + + cached := c.GetContainerProfile(id) + require.NotNil(t, cached, "entry populated from user-AP even without base CP") + // The synthesized CP + projection should carry the user AP's capabilities. + assert.Contains(t, cached.Spec.Capabilities, "CAP_NET_ADMIN") +} + +// primePreRunningSharedData is a variant of primeSharedData that sets the +// PreRunningContainer flag. +func primePreRunningSharedData(t *testing.T, k8s *objectcache.K8sObjectCacheMock, containerID, wlid string) { + t.Helper() + primeSharedData(t, k8s, containerID, wlid) + existing := k8s.GetSharedContainerData(containerID) + require.NotNil(t, existing) + existing.PreRunningContainer = true + k8s.SetSharedContainerData(containerID, existing) +} + +// TestRefreshUpdatesCPStatus exercises the refresh path: at addContainer +// time the consolidated CP may still be in Status="ready"; the cache must +// re-fetch it on each tick so a later "ready" -> "completed" transition +// propagates to the cached ProfileState, which in turn flips fail_on_profile +// from false to true (Test_17 / Test_19 semantics). +func TestRefreshUpdatesCPStatus(t *testing.T) { + cp := &v1beta1.ContainerProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cp-ready", + Namespace: "default", + ResourceVersion: "1", + Annotations: map[string]string{ + helpersv1.CompletionMetadataKey: helpersv1.Full, + helpersv1.StatusMetadataKey: helpersv1.Learning, // "ready" + }, + }, + } + client := &fakeProfileClient{cp: cp} + c, k8s := newTestCache(t, client) + + id := "container-cp-ready" + primeSharedData(t, k8s, id, "wlid://cluster-a/namespace-default/deployment-nginx") + require.NoError(t, c.addContainer(eventContainer(id), context.Background())) + + entry, ok := c.entries.Load(id) + require.True(t, ok, "entry populated from CP") + require.NotNil(t, entry.State) + assert.Equal(t, helpersv1.Learning, entry.State.Status, + "Status reflects the CP at add time (ready / learning)") + + // Storage transitions CP to Status=completed. + client.cp = &v1beta1.ContainerProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cp-ready", + Namespace: "default", + ResourceVersion: "2", + Annotations: map[string]string{ + helpersv1.CompletionMetadataKey: helpersv1.Full, + helpersv1.StatusMetadataKey: helpersv1.Completed, + }, + }, + } + + c.refreshAllEntries(context.Background()) + + stored, ok := c.entries.Load(id) + require.True(t, ok) + require.NotNil(t, stored.State) + assert.Equal(t, helpersv1.Completed, stored.State.Status, + "refresh propagates CP Status=completed into ProfileState") + assert.Equal(t, "2", stored.RV, "refresh records the new CP RV") +} + +// TestUserManagedProfileMerged exercises the user-managed merge path +// (Test_12_MergingProfilesTest / Test_13_MergingNetworkNeighborhoodTest): +// a user-managed AP published at "ug-" is merged on top of +// the base CP. Anomalies NOT in the union of base + user-managed should +// produce alerts; anomalies present in either source should not. +func TestUserManagedProfileMerged(t *testing.T) { + // Base CP has exec "/bin/X"; user-managed AP adds "/bin/Y". + cp := &v1beta1.ContainerProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cp-base", + Namespace: "default", + ResourceVersion: "1", + Annotations: map[string]string{ + helpersv1.CompletionMetadataKey: helpersv1.Full, + helpersv1.StatusMetadataKey: helpersv1.Completed, + }, + }, + Spec: v1beta1.ContainerProfileSpec{ + Execs: []v1beta1.ExecCalls{{Path: "/bin/X"}}, + }, + } + userManagedAP := &v1beta1.ApplicationProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ug-nginx", + Namespace: "default", + ResourceVersion: "9", + Annotations: map[string]string{ + helpersv1.CompletionMetadataKey: helpersv1.Full, + helpersv1.StatusMetadataKey: helpersv1.Completed, + }, + }, + Spec: v1beta1.ApplicationProfileSpec{ + Containers: []v1beta1.ApplicationProfileContainer{{ + Name: "nginx", + Execs: []v1beta1.ExecCalls{{Path: "/bin/Y"}}, + }}, + }, + } + client := &fakeProfileClient{ + cp: cp, + userManagedAP: userManagedAP, + } + c, k8s := newTestCache(t, client) + + id := "container-user-managed" + primeSharedData(t, k8s, id, "wlid://cluster-a/namespace-default/deployment-nginx") + require.NoError(t, c.addContainer(eventContainer(id), context.Background())) + + cached := c.GetContainerProfile(id) + require.NotNil(t, cached, "entry populated") + var paths []string + for _, e := range cached.Spec.Execs { + paths = append(paths, e.Path) + } + assert.Contains(t, paths, "/bin/X", "base workload AP exec must be present") + assert.Contains(t, paths, "/bin/Y", "user-managed (ug-) AP exec must be merged in") + + // Verify the RV was captured so a later user-managed update would trigger + // a refresh rebuild. + entry, ok := c.entries.Load(id) + require.True(t, ok) + assert.Equal(t, "9", entry.UserManagedAPRV, "UserManagedAPRV recorded at add time") +} diff --git a/pkg/objectcache/containerprofilecache/shared_pointer_race_test.go b/pkg/objectcache/containerprofilecache/shared_pointer_race_test.go new file mode 100644 index 0000000000..5fe4dffa60 --- /dev/null +++ b/pkg/objectcache/containerprofilecache/shared_pointer_race_test.go @@ -0,0 +1,210 @@ +package containerprofilecache_test + +// TestSharedPointerReadersDoNotCorruptCache — PR 3 Part A. +// +// Validates that concurrent readers and a concurrent reconciler-refresh do not +// produce data races on the shared *v1beta1.ContainerProfile pointer returned +// by GetContainerProfile. +// +// Design: +// - Seed a cache entry backed by cpV1 (RV="1"). Storage serves cpV2 (RV="2") +// so every RefreshAllEntriesForTest call triggers a rebuild (atomic pointer +// swap on the entries map, no in-place mutation of the old slice). +// - 50 reader goroutines call GetContainerProfile in a tight loop and iterate +// the returned Spec.Execs, Spec.Opens, Spec.Capabilities slices READ-ONLY. +// - 1 writer goroutine alternates: RefreshAllEntriesForTest (triggers rebuild) +// then SeedEntryForTest (resets RV to "1" so the next refresh rebuilds again). +// - Run for 500ms under -race. The race detector will surface any unprotected +// concurrent read/write pair. If none fires, the shared-pointer fast-path is +// demonstrably safe for read-only consumers. +// +// NOTE: deliberately-mutating consumer (anti-pattern) is NOT tested here because +// it is expected to trigger the race detector and would make CI non-deterministic. +// That pattern is covered by the code-review gate enforced by ReadOnlyCP (Part B). + +import ( + "context" + "runtime" + "sync" + "testing" + "time" + + "github.com/kubescape/node-agent/pkg/config" + "github.com/kubescape/node-agent/pkg/objectcache" + cpc "github.com/kubescape/node-agent/pkg/objectcache/containerprofilecache" + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestSharedPointerReadersDoNotCorruptCache(t *testing.T) { + const ( + id = "race-container" + numReaders = 50 + testDuration = 500 * time.Millisecond + rpcBudgetMs = 100 * time.Millisecond + ) + + // cpV1 — what is seeded initially (RV="1") + cpV1 := &v1beta1.ContainerProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cp-race", + Namespace: "default", + ResourceVersion: "1", + }, + Spec: v1beta1.ContainerProfileSpec{ + Execs: []v1beta1.ExecCalls{{Path: "/bin/sh", Args: []string{"a", "b", "c"}}}, + Opens: []v1beta1.OpenCalls{{Path: "/etc/passwd", Flags: []string{"O_RDONLY"}}}, + Capabilities: []string{"CAP_NET_ADMIN", "CAP_SYS_PTRACE"}, + }, + } + + // cpV2 — what storage returns after a refresh (RV="2"); the reconciler will + // create a brand-new entry pointing to cpV2 (never mutating cpV1). + cpV2 := &v1beta1.ContainerProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cp-race", + Namespace: "default", + ResourceVersion: "2", + }, + Spec: v1beta1.ContainerProfileSpec{ + Execs: []v1beta1.ExecCalls{{Path: "/bin/bash", Args: []string{"x", "y"}}}, + Opens: []v1beta1.OpenCalls{{Path: "/etc/shadow", Flags: []string{"O_WRONLY"}}}, + Capabilities: []string{"CAP_CHOWN"}, + }, + } + + store := newFakeStorage(cpV2) // storage always returns cpV2 + k8s := newFakeK8sCache() + + cfg := config.Config{ + ProfilesCacheRefreshRate: 30 * time.Second, + StorageRPCBudget: rpcBudgetMs, + } + cache := cpc.NewContainerProfileCache(cfg, store, k8s, nil) + + seedV1 := func() { + cache.SeedEntryForTest(id, &cpc.CachedContainerProfile{ + Profile: cpV1, + State: &objectcache.ProfileState{Name: "cp-race"}, + ContainerName: "container", + PodName: "pod-race", + Namespace: "default", + PodUID: "uid-race", + CPName: "cp-race", + RV: "1", // stale — guarantees refresh rebuilds on each tick + Shared: true, + }) + } + + // Pre-warm SafeMap so concurrent Load never hits the nil-check-before-lock + // initialization race present in goradd/maps v1.3.0 (pre-existing upstream bug). + seedV1() + + require.NotNil(t, cache.GetContainerProfile(id), "pre-condition: entry present before test") + + ctx, cancel := context.WithTimeout(context.Background(), testDuration) + defer cancel() + + var wg sync.WaitGroup + + // 50 reader goroutines — read-only traversal of the returned profile. + wg.Add(numReaders) + for i := 0; i < numReaders; i++ { + go func() { + defer wg.Done() + for ctx.Err() == nil { + cp := cache.GetContainerProfile(id) + if cp == nil { + runtime.Gosched() + continue + } + // Read-only: iterate slices without writing. + for _, e := range cp.Spec.Execs { + _ = e.Path + _ = len(e.Args) + } + for _, o := range cp.Spec.Opens { + _ = o.Path + _ = len(o.Flags) + } + _ = len(cp.Spec.Capabilities) + _ = cp.ResourceVersion + runtime.Gosched() + } + }() + } + + // 1 writer goroutine: alternate refresh (rebuilds entry → cpV2) and reset + // (reseeds entry → cpV1) to keep the refresh loop active across the window. + wg.Add(1) + go func() { + defer wg.Done() + for ctx.Err() == nil { + cache.RefreshAllEntriesForTest(ctx) + // Reset to cpV1 so the next refresh sees a stale RV and rebuilds again. + seedV1() + } + }() + + wg.Wait() + + // If the race detector fired, the test is already marked as failed. We add + // an explicit liveness assertion to guard against a scenario where the entry + // gets permanently nil-ed out by a refresh bug. + finalCP := cache.GetContainerProfile(id) + // Entry may legitimately be nil if the last operation was a refresh that + // returned cpV2 and then another seedV1 race lost; what we must NOT see is + // a panic above or a non-nil entry with a nil Profile. + if finalCP != nil { + assert.NotEmpty(t, finalCP.ResourceVersion, "final cached entry must have a non-empty RV") + } +} + +// TestSharedPointerFastPathPreservesPointerIdentity verifies that when the +// reconciler rebuilds an entry from a storage pointer with no overlay, the +// new entry's Profile points directly to the storage object (Shared=true, +// no DeepCopy). This is the memory property that Part A is guarding — if it +// regresses to DeepCopy-on-every-refresh the T3 memory budget is blown. +func TestSharedPointerFastPathPreservesPointerIdentity(t *testing.T) { + cpInStorage := &v1beta1.ContainerProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cp-identity", + Namespace: "default", + ResourceVersion: "99", + }, + Spec: v1beta1.ContainerProfileSpec{ + Capabilities: []string{"CAP_NET_RAW"}, + }, + } + + store := newFakeStorage(cpInStorage) + k8s := newFakeK8sCache() + cfg := config.Config{ + ProfilesCacheRefreshRate: 30 * time.Second, + StorageRPCBudget: 100 * time.Millisecond, + } + cache := cpc.NewContainerProfileCache(cfg, store, k8s, nil) + + // Seed with a stale RV so the refresh rebuilds. + cache.SeedEntryForTest("id-identity", &cpc.CachedContainerProfile{ + Profile: cpInStorage, + State: &objectcache.ProfileState{Name: "cp-identity"}, + ContainerName: "container", + PodName: "pod-identity", + Namespace: "default", + PodUID: "uid-identity", + CPName: "cp-identity", + RV: "old", + Shared: true, + }) + + cache.RefreshAllEntriesForTest(context.Background()) + + got := cache.GetContainerProfile("id-identity") + require.NotNil(t, got, "entry must be present after refresh") + assert.Same(t, cpInStorage, got, + "shared fast-path: refresh must store the storage pointer directly (no DeepCopy)") + assert.Equal(t, "99", got.ResourceVersion, "RV must match the storage object") +} diff --git a/pkg/objectcache/containerprofilecache/t8_overlay_refresh_test.go b/pkg/objectcache/containerprofilecache/t8_overlay_refresh_test.go new file mode 100644 index 0000000000..ea67a5d172 --- /dev/null +++ b/pkg/objectcache/containerprofilecache/t8_overlay_refresh_test.go @@ -0,0 +1,110 @@ +package containerprofilecache_test + +// TestT8_EndToEndRefreshUpdatesProjection mirrors the same-named unit test from +// reconciler_test.go using only the public / test-helper API so it can live at +// the integration test level (tests/containerprofilecache/). +// +// Scenario: an entry backed by CP (RV=100) + user-AP overlay (RV=50) is seeded +// via SeedEntryWithOverlayForTest. Storage is mutated to serve a new AP +// (RV=51, different execs). A single RefreshAllEntriesForTest call must rebuild +// the projection so the cached execs reflect the new AP, not the stale one. + +import ( + "context" + "testing" + "time" + + "github.com/kubescape/node-agent/pkg/config" + "github.com/kubescape/node-agent/pkg/objectcache" + cpc "github.com/kubescape/node-agent/pkg/objectcache/containerprofilecache" + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestT8_EndToEndRefreshUpdatesProjection(t *testing.T) { + cp := &v1beta1.ContainerProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cp", + Namespace: "default", + ResourceVersion: "100", + }, + Spec: v1beta1.ContainerProfileSpec{ + Execs: []v1beta1.ExecCalls{{Path: "/bin/base", Args: []string{"a"}}}, + }, + } + apV1 := &v1beta1.ApplicationProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "override", + Namespace: "default", + ResourceVersion: "50", + }, + Spec: v1beta1.ApplicationProfileSpec{ + Containers: []v1beta1.ApplicationProfileContainer{{ + Name: "nginx", + Execs: []v1beta1.ExecCalls{{Path: "/bin/old", Args: []string{"x"}}}, + }}, + }, + } + apV2 := &v1beta1.ApplicationProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "override", + Namespace: "default", + ResourceVersion: "51", + }, + Spec: v1beta1.ApplicationProfileSpec{ + Containers: []v1beta1.ApplicationProfileContainer{{ + Name: "nginx", + Execs: []v1beta1.ExecCalls{{Path: "/bin/new", Args: []string{"y"}}}, + }}, + }, + } + + store := newFakeStorage(cp) + store.mu.Lock() + store.ap = apV1 + store.mu.Unlock() + + k8s := newFakeK8sCache() + cfg := config.Config{ + ProfilesCacheRefreshRate: 30 * time.Second, + StorageRPCBudget: 500 * time.Millisecond, + } + cache := cpc.NewContainerProfileCache(cfg, store, k8s, nil) + + const id = "c1" + // Seed a projected entry with a stale UserAPRV so refresh sees the RV change. + // The Profile here is just the base CP; the reconciler will re-project on refresh. + cache.SeedEntryWithOverlayForTest(id, &cpc.CachedContainerProfile{ + Profile: cp, + State: &objectcache.ProfileState{Name: cp.Name}, + ContainerName: "nginx", + PodName: "nginx-abc", + Namespace: "default", + PodUID: "uid-1", + CPName: "cp", + RV: "100", + UserAPRV: "50", // stale — triggers rebuild when storage returns RV=51 + Shared: false, + }, "default", "override", "", "") + + // Advance storage to apV2 (RV=51). The reconciler will see the RV mismatch + // and rebuild the projection from cp + apV2. + store.mu.Lock() + store.ap = apV2 + store.mu.Unlock() + + cache.RefreshAllEntriesForTest(context.Background()) + + stored := cache.GetContainerProfile(id) + require.NotNil(t, stored, "entry must remain after refresh") + + var paths []string + for _, e := range stored.Spec.Execs { + paths = append(paths, e.Path) + } + assert.Contains(t, paths, "/bin/base", "base CP exec must be preserved after overlay refresh") + assert.Contains(t, paths, "/bin/new", "new user-AP exec must appear in the rebuilt projection") + assert.NotContains(t, paths, "/bin/old", "stale user-AP exec must NOT survive the rebuild") +} diff --git a/pkg/objectcache/containerprofilecache/tamper_alert.go b/pkg/objectcache/containerprofilecache/tamper_alert.go new file mode 100644 index 0000000000..6caf7c764a --- /dev/null +++ b/pkg/objectcache/containerprofilecache/tamper_alert.go @@ -0,0 +1,155 @@ +// Tamper detection for user-supplied profile overlays loaded into the +// ContainerProfileCache. +// +// When a user references a signed ApplicationProfile or NetworkNeighborhood +// via the `kubescape.io/user-defined-profile` pod label, this code path +// re-verifies the signature on every cache load and emits an R1016 +// "Signed profile tampered" alert via the rule-alert exporter when the +// signature is present but no longer valid. +// +// This is the new home of the legacy applicationprofilecache's tamper +// detection (originally introduced in fork commit c2d681e0 — "Feat/ +// tamperalert"). Upstream PR #788 deleted the legacy cache; this re-wires +// the same behavior onto containerprofilecache without changing the alert +// shape so existing component tests (Test_31_TamperDetectionAlert) keep +// working. +package containerprofilecache + +import ( + "fmt" + "strings" + + "github.com/armosec/armoapi-go/armotypes" + "github.com/kubescape/go-logger" + "github.com/kubescape/go-logger/helpers" + "github.com/kubescape/node-agent/pkg/exporters" + "github.com/kubescape/node-agent/pkg/rulemanager/types" + "github.com/kubescape/node-agent/pkg/signature" + "github.com/kubescape/node-agent/pkg/signature/profiles" + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" +) + +// SetTamperAlertExporter wires the rule-alert exporter used to emit R1016. +// Optional — when nil, signature verification still runs (and is logged) +// but no alert is emitted. Production wiring lives in cmd/main.go after the +// alert exporter is constructed. +func (c *ContainerProfileCacheImpl) SetTamperAlertExporter(e exporters.Exporter) { + c.tamperAlertExporter = e +} + +// verifyUserApplicationProfile re-verifies the signature of a user-supplied +// ApplicationProfile overlay and emits R1016 if the signature is present +// but no longer valid (i.e. the profile was tampered after signing). +// +// Returns true iff the profile is acceptable for further use: +// - profile is signed and verifies → true +// - profile is not signed → true (signing is opt-in; the empty-signature +// case is handled by the caller's normal not-signed flow) +// - profile is signed but verification fails → false (and R1016 emitted) +// +// The boolean lets the caller decide whether to project the overlay into +// the cache. Today we always proceed (the legacy semantics don't actually +// gate loading on verification unless EnableSignatureVerification is true), +// but having the return value keeps the door open for stricter modes. +func (c *ContainerProfileCacheImpl) verifyUserApplicationProfile(profile *v1beta1.ApplicationProfile, containerID string) bool { + if profile == nil { + return true + } + adapter := profiles.NewApplicationProfileAdapter(profile) + if !signature.IsSigned(adapter) { + return true + } + // AllowUntrusted: accept self-signed/local-CA signatures as long as the + // signature itself verifies against the cert in the annotations. We only + // want to flag actual tampering, not the absence of a Sigstore Fulcio + // trust chain. Matches `cmd/sign-object`'s default verifier. + if err := signature.VerifyObjectAllowUntrusted(adapter); err != nil { + logger.L().Warning("user-defined ApplicationProfile signature verification failed (tamper detected)", + helpers.String("profile", profile.Name), + helpers.String("namespace", profile.Namespace), + helpers.String("containerID", containerID), + helpers.Error(err)) + c.emitTamperAlert(profile.Name, profile.Namespace, containerID, "ApplicationProfile", err) + return !c.cfg.EnableSignatureVerification + } + return true +} + +// verifyUserNetworkNeighborhood is the NN-side counterpart to +// verifyUserApplicationProfile. Same contract, different object kind in +// the alert description. +func (c *ContainerProfileCacheImpl) verifyUserNetworkNeighborhood(nn *v1beta1.NetworkNeighborhood, containerID string) bool { + if nn == nil { + return true + } + adapter := profiles.NewNetworkNeighborhoodAdapter(nn) + if !signature.IsSigned(adapter) { + return true + } + if err := signature.VerifyObjectAllowUntrusted(adapter); err != nil { + logger.L().Warning("user-defined NetworkNeighborhood signature verification failed (tamper detected)", + helpers.String("profile", nn.Name), + helpers.String("namespace", nn.Namespace), + helpers.String("containerID", containerID), + helpers.Error(err)) + c.emitTamperAlert(nn.Name, nn.Namespace, containerID, "NetworkNeighborhood", err) + return !c.cfg.EnableSignatureVerification + } + return true +} + +// emitTamperAlert sends a single R1016 "Signed profile tampered" alert +// through the rule-alert exporter. No-op when the exporter is unset. +// +// Alert shape mirrors the legacy applicationprofilecache.emitTamperAlert +// (fork commit c2d681e0) so dashboards and component tests keep matching. +func (c *ContainerProfileCacheImpl) emitTamperAlert(profileName, namespace, containerID, objectKind string, verifyErr error) { + if c.tamperAlertExporter == nil { + return + } + + ruleFailure := &types.GenericRuleFailure{ + BaseRuntimeAlert: armotypes.BaseRuntimeAlert{ + AlertName: "Signed profile tampered", + InfectedPID: 1, + Severity: 10, + FixSuggestions: "Investigate who modified the " + objectKind + " '" + profileName + "' in namespace '" + namespace + "'. Re-sign the profile after verifying its contents.", + }, + AlertType: armotypes.AlertTypeRule, + RuntimeProcessDetails: armotypes.ProcessTree{ + ProcessTree: armotypes.Process{ + PID: 1, + Comm: "node-agent", + }, + }, + RuleAlert: armotypes.RuleAlert{ + RuleDescription: fmt.Sprintf("Signed %s '%s' in namespace '%s' has been tampered with: %v", + objectKind, profileName, namespace, verifyErr), + }, + RuntimeAlertK8sDetails: armotypes.RuntimeAlertK8sDetails{ + Namespace: namespace, + }, + RuleID: "R1016", + } + + // Best-effort workload identifier. The legacy cache used a wlid string; + // this cache is keyed on containerID, so we just stash that as the + // workload reference. Downstream consumers (Alertmanager, exporter + // pipelines) don't structurally depend on the wlid prefix. + ruleFailure.SetWorkloadDetails(extractWlidFromContainerID(containerID)) + + c.tamperAlertExporter.SendRuleAlert(ruleFailure) +} + +// extractWlidFromContainerID is a placeholder that returns the containerID +// as-is. The legacy cache had a richer "wlid:///// +// /" string available; the new cache is keyed on +// containerID so callers that consume wlid get an opaque identifier here. +// Retained as a separate function so the alert path can be upgraded to a +// proper wlid lookup later without touching emitTamperAlert. +func extractWlidFromContainerID(containerID string) string { + if idx := strings.LastIndex(containerID, "/"); idx > 0 { + return containerID[:idx] + } + return containerID +} diff --git a/pkg/objectcache/containerprofilecache_interface.go b/pkg/objectcache/containerprofilecache_interface.go new file mode 100644 index 0000000000..fcf73ab9e9 --- /dev/null +++ b/pkg/objectcache/containerprofilecache_interface.go @@ -0,0 +1,41 @@ +// Package objectcache defines interfaces for the node-agent object cache layer. +package objectcache + +import ( + "context" + "errors" + + containercollection "github.com/inspektor-gadget/inspektor-gadget/pkg/container-collection" + "github.com/kubescape/node-agent/pkg/objectcache/callstackcache" + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" +) + +type ContainerProfileCache interface { + GetContainerProfile(containerID string) *v1beta1.ContainerProfile + GetContainerProfileState(containerID string) *ProfileState + GetCallStackSearchTree(containerID string) *callstackcache.CallStackSearchTree + ContainerCallback(notif containercollection.PubSubEvent) + Start(ctx context.Context) +} + +var _ ContainerProfileCache = (*ContainerProfileCacheMock)(nil) + +type ContainerProfileCacheMock struct{} + +func (cp *ContainerProfileCacheMock) GetContainerProfile(_ string) *v1beta1.ContainerProfile { + return nil +} + +func (cp *ContainerProfileCacheMock) GetContainerProfileState(_ string) *ProfileState { + return &ProfileState{Error: errors.New("mock: profile not found")} +} + +func (cp *ContainerProfileCacheMock) GetCallStackSearchTree(_ string) *callstackcache.CallStackSearchTree { + return nil +} + +func (cp *ContainerProfileCacheMock) ContainerCallback(_ containercollection.PubSubEvent) { +} + +func (cp *ContainerProfileCacheMock) Start(_ context.Context) { +} diff --git a/pkg/objectcache/networkneighborhoodcache/networkneighborhoodcache.go b/pkg/objectcache/networkneighborhoodcache/networkneighborhoodcache.go deleted file mode 100644 index 050600f6f2..0000000000 --- a/pkg/objectcache/networkneighborhoodcache/networkneighborhoodcache.go +++ /dev/null @@ -1,758 +0,0 @@ -package networkneighborhoodcache - -import ( - "context" - "fmt" - "strings" - "sync" - "time" - - "github.com/cenkalti/backoff/v5" - mapset "github.com/deckarep/golang-set/v2" - "github.com/goradd/maps" - containercollection "github.com/inspektor-gadget/inspektor-gadget/pkg/container-collection" - "github.com/kubescape/go-logger" - "github.com/kubescape/go-logger/helpers" - helpersv1 "github.com/kubescape/k8s-interface/instanceidhandler/v1/helpers" - "github.com/kubescape/node-agent/pkg/config" - "github.com/kubescape/node-agent/pkg/objectcache" - "github.com/kubescape/node-agent/pkg/resourcelocks" - "github.com/kubescape/node-agent/pkg/storage" - "github.com/kubescape/node-agent/pkg/utils" - "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// ContainerInfo holds container metadata we need for network neighborhood mapping -type ContainerInfo struct { - ContainerID string - WorkloadID string - InstanceTemplateHash string - Namespace string - SeenContainerFromTheStart bool // True if container was seen from the start -} - -// NetworkNeighborhoodCacheImpl implements the NetworkNeighborhoodCache interface -type NetworkNeighborhoodCacheImpl struct { - cfg config.Config - workloadIDToNetworkNeighborhood maps.SafeMap[string, *v1beta1.NetworkNeighborhood] - workloadIDToProfileState maps.SafeMap[string, *objectcache.ProfileState] // Tracks profile state even if not in cache - containerIDToInfo maps.SafeMap[string, *ContainerInfo] - networkNeighborhoodToUserManagedIdentifier maps.SafeMap[string, string] // networkNeighborhoodName -> user-managed profile unique identifier - storageClient storage.ProfileClient - k8sObjectCache objectcache.K8sObjectCache - updateInterval time.Duration - updateInProgress bool // Flag to track if update is in progress - updateMutex sync.Mutex // Mutex to protect the flag - containerLocks *resourcelocks.ResourceLocks // Locks for each container to prevent concurrent modifications -} - -// NewNetworkNeighborhoodCache creates a new network neighborhood cache with periodic updates -func NewNetworkNeighborhoodCache(cfg config.Config, storageClient storage.ProfileClient, k8sObjectCache objectcache.K8sObjectCache) *NetworkNeighborhoodCacheImpl { - updateInterval := utils.AddJitter(cfg.ProfilesCacheRefreshRate, 10) // Add 10% jitter to avoid high load on the storage - - nnc := &NetworkNeighborhoodCacheImpl{ - cfg: cfg, - workloadIDToNetworkNeighborhood: maps.SafeMap[string, *v1beta1.NetworkNeighborhood]{}, - workloadIDToProfileState: maps.SafeMap[string, *objectcache.ProfileState]{}, - containerIDToInfo: maps.SafeMap[string, *ContainerInfo]{}, - networkNeighborhoodToUserManagedIdentifier: maps.SafeMap[string, string]{}, - storageClient: storageClient, - k8sObjectCache: k8sObjectCache, - updateInterval: updateInterval, - containerLocks: resourcelocks.New(), - } - - return nnc -} - -// Start begins the periodic update process -func (nnc *NetworkNeighborhoodCacheImpl) Start(ctx context.Context) { - go nnc.periodicUpdate(ctx) -} - -// periodicUpdate periodically fetches and updates network neighborhoods from storage -func (nnc *NetworkNeighborhoodCacheImpl) periodicUpdate(ctx context.Context) { - ticker := time.NewTicker(nnc.updateInterval) - defer ticker.Stop() - - for { - select { - case <-ticker.C: - // Check if an update is already in progress - nnc.updateMutex.Lock() - if nnc.updateInProgress { - // Skip this update cycle - logger.L().Debug("skipping profile update: previous update still in progress") - nnc.updateMutex.Unlock() - continue - } - - // Set the flag and release the lock before the potentially long-running call - nnc.updateInProgress = true - nnc.updateMutex.Unlock() - - // Run the update directly - nnc.updateAllNetworkNeighborhoods(ctx) - - // Mark the update as complete - nnc.updateMutex.Lock() - nnc.updateInProgress = false - nnc.updateMutex.Unlock() - - case <-ctx.Done(): - logger.L().Info("NetworkNeighborhoodsCache periodic update stopped") - return - } - } -} - -// updateAllNetworkNeighborhoods fetches all network neighborhoods from storage and updates the cache -func (nnc *NetworkNeighborhoodCacheImpl) updateAllNetworkNeighborhoods(ctx context.Context) { - // Get unique namespaces from container info - namespaces := nnc.getNamespaces() - if len(namespaces) == 0 { - logger.L().Debug("no namespaces found in cache, skipping network neighborhood update") - return - } - - // Iterate over each namespace - for _, namespace := range namespaces { - // Get container IDs for this namespace - containerIDs := nnc.getContainerIDsForNamespace(namespace) - if len(containerIDs) == 0 { - logger.L().Debug("no containers found for namespace, skipping", - helpers.String("namespace", namespace)) - continue - } - - // Get network neighborhoods list for this namespace - var nnList *v1beta1.NetworkNeighborhoodList - continueToken := "" - for { - list, err := nnc.storageClient.ListNetworkNeighborhoods(namespace, int64(50), continueToken) - if err != nil { - logger.L().Error("failed to list network neighborhoods", - helpers.String("namespace", namespace), - helpers.Error(err)) - break - } - - if nnList == nil { - nnList = list - } else { - nnList.Items = append(nnList.Items, list.Items...) - } - - continueToken = list.Continue - if continueToken == "" { - break - } - } - - if nnList == nil { - continue - } - - // Process each network neighborhood - for _, nn := range nnList.Items { - // Handle user-managed network neighborhoods - if isUserManagedNN(&nn) { - nnc.handleUserManagedNetworkNeighborhood(&nn) - continue - } - - // Get the workload ID from network neighborhood - workloadID := nnc.wlidKey( - nn.Annotations[helpersv1.WlidMetadataKey], - nn.Labels[helpersv1.TemplateHashKey], - ) - if workloadID == "" { - continue - } - - // Update profile state regardless of whether we'll update the full profile - profileState := &objectcache.ProfileState{ - Completion: nn.Annotations[helpersv1.CompletionMetadataKey], - Status: nn.Annotations[helpersv1.StatusMetadataKey], - Name: nn.Name, - Error: nil, - } - nnc.workloadIDToProfileState.Set(workloadID, profileState) - - // Only consider completed network neighborhoods - if nn.Annotations[helpersv1.StatusMetadataKey] != helpersv1.Completed { - continue - } - - // Check if this workload ID is used by any container in this namespace - workloadIDInUse := false - hasNewContainer := false // Track if any container using this workload was seen from start - for _, containerID := range containerIDs { - if containerInfo, exists := nnc.containerIDToInfo.Load(containerID); exists && - containerInfo.WorkloadID == workloadID && - containerInfo.InstanceTemplateHash == nn.Labels[helpersv1.TemplateHashKey] { - workloadIDInUse = true - // If any container was seen from start, mark it - if containerInfo.SeenContainerFromTheStart { - hasNewContainer = true - } - } - } - - if !workloadIDInUse { - continue - } - - // If we have a "new" container (seen from start) and the network neighborhood is partial, - // skip it - we don't want to use partial profiles for containers we're tracking from the start - if hasNewContainer && nn.Annotations[helpersv1.CompletionMetadataKey] == helpersv1.Partial { - logger.L().Debug("skipping partial network neighborhood for container seen from start", - helpers.String("workloadID", workloadID), - helpers.String("namespace", namespace)) - continue - } - - // Update the network neighborhood in the cache - if existingNN, exists := nnc.workloadIDToNetworkNeighborhood.Load(workloadID); exists { - // If the network neighborhood already exists and it's complete/completed, continue to the next one - if existingNN.Annotations[helpersv1.CompletionMetadataKey] == helpersv1.Full { - continue - } - - // If the new network neighborhood is not complete and we already have a completed/partial one, skip it - if nn.Annotations[helpersv1.CompletionMetadataKey] != helpersv1.Full { - continue - } - } - - // Fetch the network neighborhood from storage - fullNN, err := nnc.storageClient.GetNetworkNeighborhood(namespace, nn.Name) - if err != nil { - logger.L().Error("failed to get network neighborhood", - helpers.String("workloadID", workloadID), - helpers.String("namespace", namespace), - helpers.Error(err)) - profileState.Error = err - nnc.workloadIDToProfileState.Set(workloadID, profileState) - continue - } - - nnc.workloadIDToNetworkNeighborhood.Set(workloadID, fullNN) - logger.L().Debug("updated network neighborhood in cache", - helpers.String("workloadID", workloadID), - helpers.String("namespace", namespace), - helpers.String("status", nn.Annotations[helpersv1.StatusMetadataKey]), - helpers.String("completion", nn.Annotations[helpersv1.CompletionMetadataKey])) - } - } -} - -// handleUserManagedNetworkNeighborhood handles user-managed network neighborhoods -func (nnc *NetworkNeighborhoodCacheImpl) handleUserManagedNetworkNeighborhood(nn *v1beta1.NetworkNeighborhood) { - normalizedNNName := strings.TrimPrefix(nn.Name, helpersv1.UserNetworkNeighborhoodPrefix) - userManagedNNUniqueIdentifier := nn.ResourceVersion + string(nn.UID) - - // Create a unique tracking key for this user network neighborhood - nnKey := nnc.networkNeighborhoodKey(nn.Namespace, normalizedNNName) - - // Check if we've already processed this exact version of the user-managed network neighborhood - if storedIdentifier, exists := nnc.networkNeighborhoodToUserManagedIdentifier.Load(nnKey); exists && - storedIdentifier == userManagedNNUniqueIdentifier { - return - } - - // Find and collect the network neighborhood to merge - var toMerge struct { - wlid string - nn *v1beta1.NetworkNeighborhood - } - - nnc.workloadIDToNetworkNeighborhood.Range(func(wlid string, originalNN *v1beta1.NetworkNeighborhood) bool { - if originalNN.Name == normalizedNNName && originalNN.Namespace == nn.Namespace { - toMerge.wlid = wlid - toMerge.nn = originalNN - logger.L().Debug("found matching network neighborhood for user-managed network neighborhood", - helpers.String("workloadID", wlid), - helpers.String("namespace", originalNN.Namespace), - helpers.String("nnName", originalNN.Name)) - // Stop iteration - return false - } - return true - }) - - // If we didn't find a matching network neighborhood, skip merging - if toMerge.nn == nil { - return - } - - // Fetch the full user network neighborhood - fullUserNN, err := nnc.storageClient.GetNetworkNeighborhood(nn.Namespace, nn.Name) - if err != nil { - logger.L().Error("failed to get user-managed network neighborhood", - helpers.String("namespace", nn.Namespace), - helpers.String("nnName", nn.Name), - helpers.Error(err)) - return - } - - // Merge the user-managed network neighborhood with the normal network neighborhood - - // First, pull the original network neighborhood from the storage - originalNN, err := nnc.storageClient.GetNetworkNeighborhood(toMerge.nn.Namespace, toMerge.nn.Name) - if err != nil { - logger.L().Error("failed to get original network neighborhood", - helpers.String("namespace", toMerge.nn.Namespace), - helpers.String("nnName", toMerge.nn.Name), - helpers.Error(err)) - return - } - // Merge the network neighborhoods - mergedNN := nnc.performMerge(originalNN, fullUserNN) - // Update the cache with the merged network neighborhood - nnc.workloadIDToNetworkNeighborhood.Set(toMerge.wlid, mergedNN) - // Update profile state for the merged profile - profileState := &objectcache.ProfileState{ - Completion: mergedNN.Annotations[helpersv1.CompletionMetadataKey], - Status: mergedNN.Annotations[helpersv1.StatusMetadataKey], - Name: mergedNN.Name, - Error: nil, - } - nnc.workloadIDToProfileState.Set(toMerge.wlid, profileState) - logger.L().Debug("merged user-managed network neighborhood with normal network neighborhood", - helpers.String("workloadID", toMerge.wlid), - helpers.String("namespace", nn.Namespace), - helpers.String("nnName", nn.Name)) - - // Record that we've processed this version of the network neighborhood - nnc.networkNeighborhoodToUserManagedIdentifier.Set(nnKey, userManagedNNUniqueIdentifier) -} - -// ContainerCallback handles container lifecycle events -func (nnc *NetworkNeighborhoodCacheImpl) ContainerCallback(notif containercollection.PubSubEvent) { - isHost := utils.IsHostContainer(notif.Container) - namespace := notif.Container.K8s.Namespace - if isHost { - namespace = "host" - } - switch notif.Type { - case containercollection.EventTypeAddContainer: - if !isHost && nnc.cfg.IgnoreContainer(namespace, notif.Container.K8s.PodName, notif.Container.K8s.PodLabels) { - return - } - container := notif.Container - if isHost { - containerCopy := *notif.Container - containerCopy.K8s.Namespace = namespace - container = &containerCopy - } - go nnc.addContainerWithTimeout(container) - case containercollection.EventTypeRemoveContainer: - if !isHost && nnc.cfg.IgnoreContainer(namespace, notif.Container.K8s.PodName, notif.Container.K8s.PodLabels) { - return - } - go nnc.deleteContainer(notif.Container.Runtime.ContainerID) - } -} - -// addContainerWithTimeout handles adding a container with a timeout to prevent hanging -func (nnc *NetworkNeighborhoodCacheImpl) addContainerWithTimeout(container *containercollection.Container) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) - defer cancel() - - done := make(chan error, 1) - go func() { - done <- nnc.addContainer(container, ctx) - }() - - select { - case err := <-done: - if err != nil { - logger.L().Error("failed to add container to the cache", helpers.Error(err)) - } - case <-ctx.Done(): - logger.L().Error("timeout while adding container to the cache", - helpers.String("containerID", container.Runtime.ContainerID), - helpers.String("containerName", container.Runtime.ContainerName), - helpers.String("podName", container.K8s.PodName), - helpers.String("namespace", container.K8s.Namespace)) - } -} - -// addContainer adds a container to the cache -func (nnc *NetworkNeighborhoodCacheImpl) addContainer(container *containercollection.Container, ctx context.Context) error { - containerID := container.Runtime.ContainerID - - return nnc.containerLocks.WithLockAndError(containerID, func() error { - // Get workload ID from shared data - sharedData, err := nnc.waitForSharedContainerData(containerID, ctx) - if err != nil { - logger.L().Error("failed to get shared data for container", - helpers.String("containerID", containerID), - helpers.Error(err)) - return err - } - - workloadID := nnc.wlidKey(sharedData.Wlid, sharedData.InstanceID.GetTemplateHash()) - if workloadID == "" { - logger.L().Debug("empty workloadID for container", helpers.String("containerID", containerID)) - return nil - } - - // If container restarts and profile is partial, delete it from cache - // This ensures we don't alert on activity we didn't see after restart - if existingNN, exists := nnc.workloadIDToNetworkNeighborhood.Load(workloadID); exists && !sharedData.PreRunningContainer { - if existingNN != nil && existingNN.Annotations != nil { - completion := existingNN.Annotations[helpersv1.CompletionMetadataKey] - if completion == helpersv1.Partial { - logger.L().Debug("deleting partial network neighborhood on container restart", - helpers.String("containerID", containerID), - helpers.String("workloadID", workloadID), - helpers.String("namespace", container.K8s.Namespace)) - - // Delete the network neighborhood from cache - nnKey := nnc.networkNeighborhoodKey(existingNN.Namespace, existingNN.Name) - nnc.networkNeighborhoodToUserManagedIdentifier.Delete(nnKey) - nnc.workloadIDToNetworkNeighborhood.Delete(workloadID) - } - } - } - - // Create container info - // Mark container as "seen from start" if it is not pre-running - containerInfo := &ContainerInfo{ - ContainerID: containerID, - WorkloadID: workloadID, - InstanceTemplateHash: sharedData.InstanceID.GetTemplateHash(), - Namespace: container.K8s.Namespace, - SeenContainerFromTheStart: !sharedData.PreRunningContainer, - } - - // Add to container info map - nnc.containerIDToInfo.Set(containerID, containerInfo) - - // Create workload ID to state mapping - if _, exists := nnc.workloadIDToProfileState.Load(workloadID); !exists { - nnc.workloadIDToProfileState.Set(workloadID, nil) - } - - logger.L().Debug("container added to cache", - helpers.String("containerID", containerID), - helpers.String("workloadID", workloadID), - helpers.String("namespace", container.K8s.Namespace)) - - return nil - }) -} - -// deleteContainer deletes a container from the cache -func (nnc *NetworkNeighborhoodCacheImpl) deleteContainer(containerID string) { - nnc.containerLocks.WithLock(containerID, func() { - // Get container info - containerInfo, exists := nnc.containerIDToInfo.Load(containerID) - if !exists { - logger.L().Debug("containerID not found in cache", helpers.String("containerID", containerID)) - return - } - - // Clean up container info - nnc.containerIDToInfo.Delete(containerID) - - // Check if any other container is using the same workload ID - workloadStillInUse := false - nnc.containerIDToInfo.Range(func(_ string, info *ContainerInfo) bool { - if info.WorkloadID == containerInfo.WorkloadID { - workloadStillInUse = true - return false // Stop iteration - } - return true // Continue iteration - }) - - // If no other container is using the same workload ID, delete it from the cache - if !workloadStillInUse { - if nn, exists := nnc.workloadIDToNetworkNeighborhood.Load(containerInfo.WorkloadID); exists { - // Remove any user managed identifiers related to this network neighborhood - nnKey := nnc.networkNeighborhoodKey(nn.Namespace, nn.Name) - nnc.networkNeighborhoodToUserManagedIdentifier.Delete(nnKey) - } - nnc.workloadIDToNetworkNeighborhood.Delete(containerInfo.WorkloadID) - nnc.workloadIDToProfileState.Delete(containerInfo.WorkloadID) - logger.L().Debug("deleted workloadID from cache", helpers.String("workloadID", containerInfo.WorkloadID)) - } - }) - - // Clean up the lock when done - call this outside the WithLock closure - nnc.containerLocks.ReleaseLock(containerID) -} - -// waitForSharedContainerData waits for shared container data to be available -func (nnc *NetworkNeighborhoodCacheImpl) waitForSharedContainerData(containerID string, ctx context.Context) (*objectcache.WatchedContainerData, error) { - return backoff.Retry(ctx, func() (*objectcache.WatchedContainerData, error) { - if sharedData := nnc.k8sObjectCache.GetSharedContainerData(containerID); sharedData != nil { - return sharedData, nil - } - return nil, fmt.Errorf("container %s not found in shared data", containerID) - }, backoff.WithBackOff(backoff.NewExponentialBackOff())) -} - -func (nnc *NetworkNeighborhoodCacheImpl) networkNeighborhoodKey(namespace, name string) string { - return fmt.Sprintf("%s/%s", namespace, name) -} - -func (nnc *NetworkNeighborhoodCacheImpl) wlidKey(wlid, templateHash string) string { - return fmt.Sprintf("%s/%s", wlid, templateHash) -} - -// GetNetworkNeighborhood gets the network neighborhood for a container -func (nnc *NetworkNeighborhoodCacheImpl) GetNetworkNeighborhood(containerID string) *v1beta1.NetworkNeighborhood { - // Get container info - if containerInfo, exists := nnc.containerIDToInfo.Load(containerID); exists { - workloadID := containerInfo.WorkloadID - if workloadID == "" { - return nil - } - - // Try to get network neighborhood from cache - if nn, exists := nnc.workloadIDToNetworkNeighborhood.Load(workloadID); exists { - if nn != nil { - return nn - } - } - } - - return nil -} - -// GetNetworkNeighborhoodState gets the profile state for a container -func (nnc *NetworkNeighborhoodCacheImpl) GetNetworkNeighborhoodState(containerID string) *objectcache.ProfileState { - // Get container info - containerInfo, exists := nnc.containerIDToInfo.Load(containerID) - if !exists { - return &objectcache.ProfileState{ - Error: fmt.Errorf("container %s not found in cache", containerID), - } - } - - workloadID := containerInfo.WorkloadID - if workloadID == "" { - return &objectcache.ProfileState{ - Error: fmt.Errorf("no workload ID for container %s", containerID), - } - } - - // Try to get profile state from cache - if profileState, exists := nnc.workloadIDToProfileState.Load(workloadID); exists { - if profileState != nil { - return profileState - } else { - return &objectcache.ProfileState{ - Error: fmt.Errorf("profile state not available - shouldn't happen"), - } - } - } - - return &objectcache.ProfileState{ - Error: fmt.Errorf("profile state not found for workload ID %s", workloadID), - } -} - -// performMerge merges a user-managed network neighborhood with a normal network neighborhood -func (nnc *NetworkNeighborhoodCacheImpl) performMerge(normalNN, userManagedNN *v1beta1.NetworkNeighborhood) *v1beta1.NetworkNeighborhood { - mergedNN := normalNN.DeepCopy() - - // Merge spec - mergedNN.Spec.Containers = nnc.mergeContainers(mergedNN.Spec.Containers, userManagedNN.Spec.Containers) - mergedNN.Spec.InitContainers = nnc.mergeContainers(mergedNN.Spec.InitContainers, userManagedNN.Spec.InitContainers) - mergedNN.Spec.EphemeralContainers = nnc.mergeContainers(mergedNN.Spec.EphemeralContainers, userManagedNN.Spec.EphemeralContainers) - - // Merge LabelSelector - if userManagedNN.Spec.LabelSelector.MatchLabels != nil { - if mergedNN.Spec.LabelSelector.MatchLabels == nil { - mergedNN.Spec.LabelSelector.MatchLabels = make(map[string]string) - } - for k, v := range userManagedNN.Spec.LabelSelector.MatchLabels { - mergedNN.Spec.LabelSelector.MatchLabels[k] = v - } - } - mergedNN.Spec.LabelSelector.MatchExpressions = append( - mergedNN.Spec.LabelSelector.MatchExpressions, - userManagedNN.Spec.LabelSelector.MatchExpressions..., - ) - - return mergedNN -} - -func (nnc *NetworkNeighborhoodCacheImpl) mergeContainers(normalContainers, userManagedContainers []v1beta1.NetworkNeighborhoodContainer) []v1beta1.NetworkNeighborhoodContainer { - if len(userManagedContainers) != len(normalContainers) { - // If the number of containers don't match, we can't merge - logger.L().Warning("NetworkNeighborhoodCacheImpl - failed to merge user-managed profile with base profile", - helpers.Int("normalContainers len", len(normalContainers)), - helpers.Int("userManagedContainers len", len(userManagedContainers)), - helpers.String("reason", "number of containers don't match")) - return normalContainers - } - - // Assuming the normalContainers are already in the correct Pod order - // We'll merge user containers at their corresponding positions - for i := range normalContainers { - for _, userContainer := range userManagedContainers { - if normalContainers[i].Name == userContainer.Name { - nnc.mergeContainer(&normalContainers[i], &userContainer) - break - } - } - } - return normalContainers -} - -func (nnc *NetworkNeighborhoodCacheImpl) mergeContainer(normalContainer, userContainer *v1beta1.NetworkNeighborhoodContainer) { - // Merge ingress rules - normalContainer.Ingress = nnc.mergeNetworkNeighbors(normalContainer.Ingress, userContainer.Ingress) - - // Merge egress rules - normalContainer.Egress = nnc.mergeNetworkNeighbors(normalContainer.Egress, userContainer.Egress) -} - -func (nnc *NetworkNeighborhoodCacheImpl) mergeNetworkNeighbors(normalNeighbors, userNeighbors []v1beta1.NetworkNeighbor) []v1beta1.NetworkNeighbor { - // Use map to track existing neighbors by identifier - neighborMap := make(map[string]int) - for i, neighbor := range normalNeighbors { - neighborMap[neighbor.Identifier] = i - } - - // Merge or append user neighbors - for _, userNeighbor := range userNeighbors { - if idx, exists := neighborMap[userNeighbor.Identifier]; exists { - // Merge existing neighbor - normalNeighbors[idx] = nnc.mergeNetworkNeighbor(normalNeighbors[idx], userNeighbor) - } else { - // Append new neighbor - normalNeighbors = append(normalNeighbors, userNeighbor) - } - } - - return normalNeighbors -} - -func (nnc *NetworkNeighborhoodCacheImpl) mergeNetworkNeighbor(normal, user v1beta1.NetworkNeighbor) v1beta1.NetworkNeighbor { - merged := normal.DeepCopy() - - // Merge DNS names (removing duplicates) - dnsNamesSet := make(map[string]struct{}) - for _, dns := range normal.DNSNames { - dnsNamesSet[dns] = struct{}{} - } - for _, dns := range user.DNSNames { - dnsNamesSet[dns] = struct{}{} - } - merged.DNSNames = make([]string, 0, len(dnsNamesSet)) - for dns := range dnsNamesSet { - merged.DNSNames = append(merged.DNSNames, dns) - } - - // Merge ports based on patchMergeKey (name) - merged.Ports = nnc.mergeNetworkPorts(merged.Ports, user.Ports) - - // Merge pod selector if provided - if user.PodSelector != nil { - if merged.PodSelector == nil { - merged.PodSelector = &metav1.LabelSelector{} - } - if user.PodSelector.MatchLabels != nil { - if merged.PodSelector.MatchLabels == nil { - merged.PodSelector.MatchLabels = make(map[string]string) - } - for k, v := range user.PodSelector.MatchLabels { - merged.PodSelector.MatchLabels[k] = v - } - } - merged.PodSelector.MatchExpressions = append( - merged.PodSelector.MatchExpressions, - user.PodSelector.MatchExpressions..., - ) - } - - // Merge namespace selector if provided - if user.NamespaceSelector != nil { - if merged.NamespaceSelector == nil { - merged.NamespaceSelector = &metav1.LabelSelector{} - } - if user.NamespaceSelector.MatchLabels != nil { - if merged.NamespaceSelector.MatchLabels == nil { - merged.NamespaceSelector.MatchLabels = make(map[string]string) - } - for k, v := range user.NamespaceSelector.MatchLabels { - merged.NamespaceSelector.MatchLabels[k] = v - } - } - merged.NamespaceSelector.MatchExpressions = append( - merged.NamespaceSelector.MatchExpressions, - user.NamespaceSelector.MatchExpressions..., - ) - } - - // Take the user's IP address if provided - if user.IPAddress != "" { - merged.IPAddress = user.IPAddress - } - - // Take the user's type if provided - if user.Type != "" { - merged.Type = user.Type - } - - return *merged -} - -func (nnc *NetworkNeighborhoodCacheImpl) mergeNetworkPorts(normalPorts, userPorts []v1beta1.NetworkPort) []v1beta1.NetworkPort { - // Use map to track existing ports by name (patchMergeKey) - portMap := make(map[string]int) - for i, port := range normalPorts { - portMap[port.Name] = i - } - - // Merge or append user ports - for _, userPort := range userPorts { - if idx, exists := portMap[userPort.Name]; exists { - // Update existing port - normalPorts[idx] = userPort - } else { - // Append new port - normalPorts = append(normalPorts, userPort) - } - } - - return normalPorts -} - -func isUserManagedNN(nn *v1beta1.NetworkNeighborhood) bool { - return nn.Annotations != nil && - nn.Annotations[helpersv1.ManagedByMetadataKey] == helpersv1.ManagedByUserValue && - strings.HasPrefix(nn.GetName(), helpersv1.UserNetworkNeighborhoodPrefix) -} - -// getNamespaces retrieves all unique namespaces from the container info cache -func (nnc *NetworkNeighborhoodCacheImpl) getNamespaces() []string { - namespaceSet := mapset.NewSet[string]() - nnc.containerIDToInfo.Range(func(_ string, info *ContainerInfo) bool { - namespaceSet.Add(info.Namespace) - return true - }) - return namespaceSet.ToSlice() -} - -// getContainerIDsForNamespace retrieves all container IDs for a given namespace -func (nnc *NetworkNeighborhoodCacheImpl) getContainerIDsForNamespace(namespace string) []string { - containerIDs := []string{} - nnc.containerIDToInfo.Range(func(containerID string, info *ContainerInfo) bool { - if info.Namespace == namespace { - containerIDs = append(containerIDs, containerID) - } - return true - }) - return containerIDs -} - -// Ensure NetworkNeighborhoodCacheImpl implements the NetworkNeighborhoodCache interface -var _ objectcache.NetworkNeighborhoodCache = (*NetworkNeighborhoodCacheImpl)(nil) diff --git a/pkg/objectcache/networkneighborhoodcache/networkneighborhoodcache_test.go b/pkg/objectcache/networkneighborhoodcache/networkneighborhoodcache_test.go deleted file mode 100644 index f2714141cb..0000000000 --- a/pkg/objectcache/networkneighborhoodcache/networkneighborhoodcache_test.go +++ /dev/null @@ -1,101 +0,0 @@ -package networkneighborhoodcache - -import ( - "context" - "fmt" - "testing" - - "github.com/kubescape/node-agent/pkg/config" - "github.com/kubescape/node-agent/pkg/storage" - "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// SpyProfileClient for testing pagination -type SpyProfileClient struct { - storage.ProfileClient - NetworkNeighborhoods []v1beta1.NetworkNeighborhood - CallCount int -} - -func (m *SpyProfileClient) ListNetworkNeighborhoods(namespace string, limit int64, cont string) (*v1beta1.NetworkNeighborhoodList, error) { - m.CallCount++ - start := 0 - if cont != "" { - fmt.Sscanf(cont, "%d", &start) - } - - end := start + int(limit) - nextCont := "" - if end < len(m.NetworkNeighborhoods) { - nextCont = fmt.Sprintf("%d", end) - } else { - end = len(m.NetworkNeighborhoods) - } - - return &v1beta1.NetworkNeighborhoodList{ - ListMeta: metav1.ListMeta{ - Continue: nextCont, - }, - Items: m.NetworkNeighborhoods[start:end], - }, nil -} - -func (m *SpyProfileClient) ListApplicationProfiles(namespace string, limit int64, cont string) (*v1beta1.ApplicationProfileList, error) { - return &v1beta1.ApplicationProfileList{}, nil -} - -func (m *SpyProfileClient) GetNetworkNeighborhood(namespace, name string) (*v1beta1.NetworkNeighborhood, error) { - // Return empty object - return &v1beta1.NetworkNeighborhood{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Annotations: map[string]string{ - "kubescape.io/completion": "complete", - "kubescape.io/status": "completed", - }, - }, - }, nil -} - -func TestPagination(t *testing.T) { - totalItems := 120 - items := make([]v1beta1.NetworkNeighborhood, totalItems) - for i := 0; i < totalItems; i++ { - items[i] = v1beta1.NetworkNeighborhood{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("nn-%d", i), - Namespace: "default", - Annotations: map[string]string{ - "kubescape.io/completion": "complete", - "kubescape.io/status": "completed", - }, - Labels: map[string]string{ - "kubescape.io/wlid-template-hash": "hash", - }, - }, - } - } - - spy := &SpyProfileClient{NetworkNeighborhoods: items} - - cache := NewNetworkNeighborhoodCache(config.Config{}, spy, nil) - - // Inject a container so that "default" namespace is processed. - cache.containerIDToInfo.Set("test-container", &ContainerInfo{ - Namespace: "default", - WorkloadID: "wlid", - }) - - // Call the private method - cache.updateAllNetworkNeighborhoods(context.Background()) - - // We expect 3 calls: - // 1. 0-50, returns continue="50" - // 2. 50-100, returns continue="100" - // 3. 100-120, returns continue="" - if spy.CallCount != 3 { - t.Errorf("Expected 3 calls to ListNetworkNeighborhoods, got %d", spy.CallCount) - } -} diff --git a/pkg/objectcache/networkneighborhoodcache_interface.go b/pkg/objectcache/networkneighborhoodcache_interface.go deleted file mode 100644 index fe617ced6d..0000000000 --- a/pkg/objectcache/networkneighborhoodcache_interface.go +++ /dev/null @@ -1,28 +0,0 @@ -package objectcache - -import ( - containercollection "github.com/inspektor-gadget/inspektor-gadget/pkg/container-collection" - "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" -) - -type NetworkNeighborhoodCache interface { - GetNetworkNeighborhood(containerID string) *v1beta1.NetworkNeighborhood - GetNetworkNeighborhoodState(containerID string) *ProfileState - ContainerCallback(notif containercollection.PubSubEvent) -} - -var _ NetworkNeighborhoodCache = (*NetworkNeighborhoodCacheMock)(nil) - -type NetworkNeighborhoodCacheMock struct { -} - -func (nn *NetworkNeighborhoodCacheMock) GetNetworkNeighborhood(_ string) *v1beta1.NetworkNeighborhood { - return nil -} - -func (nn *NetworkNeighborhoodCacheMock) ContainerCallback(_ containercollection.PubSubEvent) { -} - -func (nn *NetworkNeighborhoodCacheMock) GetNetworkNeighborhoodState(_ string) *ProfileState { - return nil -} diff --git a/pkg/objectcache/objectcache_interface.go b/pkg/objectcache/objectcache_interface.go index 8621b0b84e..ce89ff12fe 100644 --- a/pkg/objectcache/objectcache_interface.go +++ b/pkg/objectcache/objectcache_interface.go @@ -2,8 +2,7 @@ package objectcache type ObjectCache interface { K8sObjectCache() K8sObjectCache - ApplicationProfileCache() ApplicationProfileCache - NetworkNeighborhoodCache() NetworkNeighborhoodCache + ContainerProfileCache() ContainerProfileCache DnsCache() DnsCache } @@ -19,11 +18,8 @@ func (om *ObjectCacheMock) K8sObjectCache() K8sObjectCache { return &K8sObjectCacheMock{} } -func (om *ObjectCacheMock) ApplicationProfileCache() ApplicationProfileCache { - return &ApplicationProfileCacheMock{} -} -func (om *ObjectCacheMock) NetworkNeighborhoodCache() NetworkNeighborhoodCache { - return &NetworkNeighborhoodCacheMock{} +func (om *ObjectCacheMock) ContainerProfileCache() ContainerProfileCache { + return &ContainerProfileCacheMock{} } func (om *ObjectCacheMock) DnsCache() DnsCache { diff --git a/pkg/objectcache/shared_container_data.go b/pkg/objectcache/shared_container_data.go index f2773c6289..606ed3bd21 100644 --- a/pkg/objectcache/shared_container_data.go +++ b/pkg/objectcache/shared_container_data.go @@ -19,6 +19,11 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +// UserDefinedNetworkMetadataKey is the pod label that references a +// user-provided NetworkNeighborhood resource by name (analogous to +// helpersv1.UserDefinedProfileMetadataKey for ApplicationProfiles). +const UserDefinedNetworkMetadataKey = "kubescape.io/user-defined-network" + type ContainerType int const ( @@ -81,7 +86,9 @@ type WatchedContainerData struct { PreviousReportTimestamp time.Time CurrentReportTimestamp time.Time UserDefinedProfile string + UserDefinedNetwork string LabelOverrides map[string]string // optional label overrides applied after GetLabels() + LearningPeriod time.Duration } type ContainerInfo struct { @@ -90,31 +97,19 @@ type ContainerInfo struct { ImageID string } +func formatDuration(d time.Duration) string { + s := d.String() + s = strings.Replace(s, "m0s", "m", 1) + s = strings.Replace(s, "h0m", "h", 1) + return s +} + func GetLabels(cloudMetadata *armotypes.CloudMetadata, watchedContainer *WatchedContainerData, stripContainer bool) map[string]string { labels := watchedContainer.InstanceID.GetLabels() - for i := range labels { - if labels[i] == "" || (stripContainer && i == helpersv1.ContainerNameMetadataKey) { - delete(labels, i) - continue - } - if errs := content.IsLabelValue(labels[i]); len(errs) != 0 { - logger.L().Debug("GetLabels - label is not valid", helpers.String("label", labels[i])) - for j := range errs { - logger.L().Debug("GetLabels - label err description", helpers.String("Err: ", errs[j])) - } - delete(labels, i) - } - } + labels[helpersv1.LearningPeriodMetadataKey] = formatDuration(watchedContainer.LearningPeriod) // Apply label overrides for k, v := range watchedContainer.LabelOverrides { - if v == "" { - delete(labels, k) - } else if errs := content.IsLabelValue(v); len(errs) != 0 { - logger.L().Warning("GetLabels - label override value is not valid, skipping", helpers.String("key", k), helpers.String("value", v)) - delete(labels, k) - } else { - labels[k] = v - } + labels[k] = v } if watchedContainer.ParentResourceVersion != "" { labels[helpersv1.ResourceVersionMetadataKey] = watchedContainer.ParentResourceVersion @@ -134,6 +129,20 @@ func GetLabels(cloudMetadata *armotypes.CloudMetadata, watchedContainer *Watched labels[helpersv1.RegionMetadataKey] = region } } + // Sanitize labels + for i := range labels { + if labels[i] == "" || (stripContainer && i == helpersv1.ContainerNameMetadataKey) { + delete(labels, i) + continue + } + if errs := content.IsLabelValue(labels[i]); len(errs) != 0 { + logger.L().Debug("GetLabels - label is not valid", helpers.String("label", labels[i])) + for j := range errs { + logger.L().Debug("GetLabels - label err description", helpers.String("Err: ", errs[j])) + } + delete(labels, i) + } + } return labels } @@ -169,6 +178,16 @@ func (watchedContainer *WatchedContainerData) SetContainerInfo(wl workloadinterf watchedContainer.UserDefinedProfile = userDefinedProfile } } + // check for user defined network neighborhood + if userDefinedNetwork, ok := labels[UserDefinedNetworkMetadataKey]; ok { + if userDefinedNetwork != "" { + logger.L().Info("container has a user defined network neighborhood", + helpers.String("network", userDefinedNetwork), + helpers.String("container", containerName), + helpers.String("workload", wl.GetName())) + watchedContainer.UserDefinedNetwork = userDefinedNetwork + } + } podSpec, err := wl.GetPodSpec() if err != nil { return fmt.Errorf("failed to get pod spec: %w", err) diff --git a/pkg/objectcache/shared_container_data_test.go b/pkg/objectcache/shared_container_data_test.go index 63eb1983c3..ff1cd4752c 100644 --- a/pkg/objectcache/shared_container_data_test.go +++ b/pkg/objectcache/shared_container_data_test.go @@ -2,6 +2,7 @@ package objectcache import ( "testing" + "time" "github.com/kubescape/k8s-interface/instanceidhandler/v1" "github.com/stretchr/testify/assert" @@ -51,6 +52,7 @@ func Test_GetLabels(t *testing.T) { "kubescape.io/workload-api-version": "v1", "kubescape.io/workload-container-name": "redis", "kubescape.io/workload-kind": "Deployment", + "kubescape.io/learning-period": "0s", "kubescape.io/workload-name": "redis", "kubescape.io/workload-namespace": "aaa", }, @@ -67,6 +69,7 @@ func Test_GetLabels(t *testing.T) { want: map[string]string{ "kubescape.io/workload-api-version": "v1", "kubescape.io/workload-kind": "Deployment", + "kubescape.io/learning-period": "0s", "kubescape.io/workload-name": "redis", "kubescape.io/workload-namespace": "aaa", }, @@ -79,3 +82,36 @@ func Test_GetLabels(t *testing.T) { }) } } + +func Test_formatDuration(t *testing.T) { + tests := []struct { + d time.Duration + want string + }{ + { + d: 5 * time.Minute, + want: "5m", + }, + { + d: 1*time.Hour + 30*time.Minute, + want: "1h30m", + }, + { + d: 45 * time.Second, + want: "45s", + }, + { + d: 1*time.Hour + 30*time.Second, + want: "1h30s", + }, + { + d: 1 * time.Hour, + want: "1h", + }, + } + for _, tt := range tests { + t.Run(tt.d.String(), func(t *testing.T) { + assert.Equal(t, tt.want, formatDuration(tt.d)) + }) + } +} diff --git a/pkg/objectcache/v1/mock.go b/pkg/objectcache/v1/mock.go index c6cdeeb945..98c41e0db3 100644 --- a/pkg/objectcache/v1/mock.go +++ b/pkg/objectcache/v1/mock.go @@ -2,24 +2,40 @@ package objectcache import ( "context" + "errors" corev1 "k8s.io/api/core/v1" "github.com/goradd/maps" containercollection "github.com/inspektor-gadget/inspektor-gadget/pkg/container-collection" "github.com/kubescape/node-agent/pkg/objectcache" - "github.com/kubescape/node-agent/pkg/objectcache/applicationprofilecache/callstackcache" + "github.com/kubescape/node-agent/pkg/objectcache/callstackcache" "github.com/kubescape/node-agent/pkg/watcher" "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" "k8s.io/apimachinery/pkg/runtime" ) -// RuleObjectCacheMock implementation as provided +// RuleObjectCacheMock is a test double for RuleObjectCache. +// +// Setter partition contract — SetApplicationProfile and SetNetworkNeighborhood +// both write into cpByContainerName entries but own non-overlapping fields: +// +// SetApplicationProfile → Architectures, Capabilities, Execs, Opens, Syscalls, +// SeccompProfile, Endpoints, ImageID, ImageTag, +// PolicyByRuleId, IdentifiedCallStacks +// SetNetworkNeighborhood → LabelSelector, Ingress, Egress +// +// Calling both setters produces a fully-populated ContainerProfile with no +// field conflict. Both setters apply a first-container-wins rule for r.cp +// (backward-compat pointer for single-container tests); the per-container map +// cpByContainerName is authoritative for multi-container tests. type RuleObjectCacheMock struct { profile *v1beta1.ApplicationProfile podSpec *corev1.PodSpec podStatus *corev1.PodStatus nn *v1beta1.NetworkNeighborhood + cp *v1beta1.ContainerProfile + cpByContainerName map[string]*v1beta1.ContainerProfile dnsCache map[string]string ContainerIDToSharedData *maps.SafeMap[string, *objectcache.WatchedContainerData] } @@ -34,9 +50,78 @@ func (r *RuleObjectCacheMock) GetCallStackSearchTree(string) *callstackcache.Cal func (r *RuleObjectCacheMock) SetApplicationProfile(profile *v1beta1.ApplicationProfile) { r.profile = profile + if profile == nil { + return + } + if r.cpByContainerName == nil { + r.cpByContainerName = make(map[string]*v1beta1.ContainerProfile) + } + apply := func(c *v1beta1.ApplicationProfileContainer) { + cp, ok := r.cpByContainerName[c.Name] + if !ok { + cp = &v1beta1.ContainerProfile{} + r.cpByContainerName[c.Name] = cp + } + cp.Spec.Architectures = profile.Spec.Architectures + cp.Spec.Capabilities = c.Capabilities + cp.Spec.Execs = c.Execs + cp.Spec.Opens = c.Opens + cp.Spec.Syscalls = c.Syscalls + cp.Spec.SeccompProfile = c.SeccompProfile + cp.Spec.Endpoints = c.Endpoints + cp.Spec.ImageID = c.ImageID + cp.Spec.ImageTag = c.ImageTag + cp.Spec.PolicyByRuleId = c.PolicyByRuleId + cp.Spec.IdentifiedCallStacks = c.IdentifiedCallStacks + } + for i := range profile.Spec.Containers { + apply(&profile.Spec.Containers[i]) + } + for i := range profile.Spec.InitContainers { + apply(&profile.Spec.InitContainers[i]) + } + for i := range profile.Spec.EphemeralContainers { + apply(&profile.Spec.EphemeralContainers[i]) + } + // r.cp = first container's entry (backward compat for single-container tests). + switch { + case len(profile.Spec.Containers) > 0: + r.cp = r.cpByContainerName[profile.Spec.Containers[0].Name] + case len(profile.Spec.InitContainers) > 0: + r.cp = r.cpByContainerName[profile.Spec.InitContainers[0].Name] + case len(profile.Spec.EphemeralContainers) > 0: + r.cp = r.cpByContainerName[profile.Spec.EphemeralContainers[0].Name] + } } -func (r *RuleObjectCacheMock) ApplicationProfileCache() objectcache.ApplicationProfileCache { +func (r *RuleObjectCacheMock) GetContainerProfile(containerID string) *v1beta1.ContainerProfile { + if r.ContainerIDToSharedData != nil && containerID != "" { + data, ok := r.ContainerIDToSharedData.Load(containerID) + if !ok { + return nil + } + // Resolve the per-container profile via the registered InstanceID so + // multi-container tests get the correct container's profile. + if data != nil && data.InstanceID != nil { + if cp, found := r.cpByContainerName[data.InstanceID.GetContainerName()]; found { + return cp + } + } + } + return r.cp +} + +func (r *RuleObjectCacheMock) SetContainerProfile(cp *v1beta1.ContainerProfile) { + r.cp = cp +} + +func (r *RuleObjectCacheMock) GetContainerProfileState(_ string) *objectcache.ProfileState { + return &objectcache.ProfileState{Error: errors.New("mock: profile not found")} +} + +func (r *RuleObjectCacheMock) Start(_ context.Context) {} + +func (r *RuleObjectCacheMock) ContainerProfileCache() objectcache.ContainerProfileCache { return r } @@ -87,16 +172,46 @@ func (r *RuleObjectCacheMock) K8sObjectCache() objectcache.K8sObjectCache { return r } -func (r *RuleObjectCacheMock) NetworkNeighborhoodCache() objectcache.NetworkNeighborhoodCache { - return r -} - func (r *RuleObjectCacheMock) GetNetworkNeighborhood(string) *v1beta1.NetworkNeighborhood { return r.nn } func (r *RuleObjectCacheMock) SetNetworkNeighborhood(nn *v1beta1.NetworkNeighborhood) { r.nn = nn + if nn == nil { + return + } + if r.cpByContainerName == nil { + r.cpByContainerName = make(map[string]*v1beta1.ContainerProfile) + } + apply := func(c *v1beta1.NetworkNeighborhoodContainer) { + cp, ok := r.cpByContainerName[c.Name] + if !ok { + cp = &v1beta1.ContainerProfile{} + r.cpByContainerName[c.Name] = cp + } + cp.Spec.LabelSelector = nn.Spec.LabelSelector + cp.Spec.Ingress = c.Ingress + cp.Spec.Egress = c.Egress + } + for i := range nn.Spec.Containers { + apply(&nn.Spec.Containers[i]) + } + for i := range nn.Spec.InitContainers { + apply(&nn.Spec.InitContainers[i]) + } + for i := range nn.Spec.EphemeralContainers { + apply(&nn.Spec.EphemeralContainers[i]) + } + // r.cp = first container's entry (backward compat for single-container tests). + switch { + case len(nn.Spec.Containers) > 0: + r.cp = r.cpByContainerName[nn.Spec.Containers[0].Name] + case len(nn.Spec.InitContainers) > 0: + r.cp = r.cpByContainerName[nn.Spec.InitContainers[0].Name] + case len(nn.Spec.EphemeralContainers) > 0: + r.cp = r.cpByContainerName[nn.Spec.EphemeralContainers[0].Name] + } } func (r *RuleObjectCacheMock) DnsCache() objectcache.DnsCache { diff --git a/pkg/objectcache/v1/objectcache.go b/pkg/objectcache/v1/objectcache.go index 9986077ee6..c1820a909e 100644 --- a/pkg/objectcache/v1/objectcache.go +++ b/pkg/objectcache/v1/objectcache.go @@ -8,16 +8,14 @@ var _ objectcache.ObjectCache = (*ObjectCacheImpl)(nil) type ObjectCacheImpl struct { k objectcache.K8sObjectCache - ap objectcache.ApplicationProfileCache - np objectcache.NetworkNeighborhoodCache + cp objectcache.ContainerProfileCache dc objectcache.DnsCache } -func NewObjectCache(k objectcache.K8sObjectCache, ap objectcache.ApplicationProfileCache, np objectcache.NetworkNeighborhoodCache, dc objectcache.DnsCache) *ObjectCacheImpl { +func NewObjectCache(k objectcache.K8sObjectCache, cp objectcache.ContainerProfileCache, dc objectcache.DnsCache) *ObjectCacheImpl { return &ObjectCacheImpl{ k: k, - ap: ap, - np: np, + cp: cp, dc: dc, } } @@ -26,11 +24,8 @@ func (o *ObjectCacheImpl) K8sObjectCache() objectcache.K8sObjectCache { return o.k } -func (o *ObjectCacheImpl) ApplicationProfileCache() objectcache.ApplicationProfileCache { - return o.ap -} -func (o *ObjectCacheImpl) NetworkNeighborhoodCache() objectcache.NetworkNeighborhoodCache { - return o.np +func (o *ObjectCacheImpl) ContainerProfileCache() objectcache.ContainerProfileCache { + return o.cp } func (o *ObjectCacheImpl) DnsCache() objectcache.DnsCache { diff --git a/pkg/objectcache/v1/objectcache_test.go b/pkg/objectcache/v1/objectcache_test.go index 207722ea5a..6af7e69c5e 100644 --- a/pkg/objectcache/v1/objectcache_test.go +++ b/pkg/objectcache/v1/objectcache_test.go @@ -10,18 +10,12 @@ import ( func TestK8sObjectCache(t *testing.T) { k := &objectcache.K8sObjectCacheMock{} - k8sObjectCache := NewObjectCache(k, nil, nil, nil) + k8sObjectCache := NewObjectCache(k, nil, nil) assert.NotNil(t, k8sObjectCache.K8sObjectCache()) } -func TestApplicationProfileCache(t *testing.T) { - ap := &objectcache.ApplicationProfileCacheMock{} - k8sObjectCache := NewObjectCache(nil, ap, nil, nil) - assert.NotNil(t, k8sObjectCache.ApplicationProfileCache()) -} - -func TestNetworkNeighborhoodCache(t *testing.T) { - nn := &objectcache.NetworkNeighborhoodCacheMock{} - k8sObjectCache := NewObjectCache(nil, nil, nn, nil) - assert.NotNil(t, k8sObjectCache.NetworkNeighborhoodCache()) +func TestContainerProfileCache(t *testing.T) { + cp := &objectcache.ContainerProfileCacheMock{} + k8sObjectCache := NewObjectCache(nil, cp, nil) + assert.NotNil(t, k8sObjectCache.ContainerProfileCache()) } diff --git a/pkg/rulemanager/cel/libraries/applicationprofile/capability.go b/pkg/rulemanager/cel/libraries/applicationprofile/capability.go index 5f3c09f217..13cbc0866c 100644 --- a/pkg/rulemanager/cel/libraries/applicationprofile/capability.go +++ b/pkg/rulemanager/cel/libraries/applicationprofile/capability.go @@ -23,12 +23,12 @@ func (l *apLibrary) wasCapabilityUsed(containerID, capabilityName ref.Val) ref.V return types.MaybeNoSuchOverloadErr(capabilityName) } - container, _, err := profilehelper.GetContainerApplicationProfile(l.objectCache, containerIDStr) + cp, _, err := profilehelper.GetContainerProfile(l.objectCache, containerIDStr) if err != nil { return cache.NewProfileNotAvailableErr("%v", err) } - if slices.Contains(container.Capabilities, capabilityNameStr) { + if slices.Contains(cp.Spec.Capabilities, capabilityNameStr) { return types.Bool(true) } diff --git a/pkg/rulemanager/cel/libraries/applicationprofile/exec.go b/pkg/rulemanager/cel/libraries/applicationprofile/exec.go index d7a16d0908..e02e1524cb 100644 --- a/pkg/rulemanager/cel/libraries/applicationprofile/exec.go +++ b/pkg/rulemanager/cel/libraries/applicationprofile/exec.go @@ -1,8 +1,6 @@ package applicationprofile import ( - "slices" - "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" @@ -11,6 +9,7 @@ import ( "github.com/kubescape/node-agent/pkg/rulemanager/cel/libraries/cache" "github.com/kubescape/node-agent/pkg/rulemanager/cel/libraries/celparse" "github.com/kubescape/node-agent/pkg/rulemanager/profilehelper" + "github.com/kubescape/storage/pkg/registry/file/dynamicpathdetector" ) func (l *apLibrary) wasExecuted(containerID, path ref.Val) ref.Val { @@ -32,14 +31,14 @@ func (l *apLibrary) wasExecuted(containerID, path ref.Val) ref.Val { return types.Bool(true) } - container, _, err := profilehelper.GetContainerApplicationProfile(l.objectCache, containerIDStr) + cp, _, err := profilehelper.GetContainerProfile(l.objectCache, containerIDStr) if err != nil { // Return a special error that will NOT be cached, allowing retry when profile becomes available. // The caller should convert this to false after the cache layer. return cache.NewProfileNotAvailableErr("%v", err) } - for _, exec := range container.Execs { + for _, exec := range cp.Spec.Execs { if exec.Path == pathStr { return types.Bool(true) } @@ -77,18 +76,16 @@ func (l *apLibrary) wasExecutedWithArgs(containerID, path, args ref.Val) ref.Val return types.Bool(true) } - container, _, err := profilehelper.GetContainerApplicationProfile(l.objectCache, containerIDStr) + cp, _, err := profilehelper.GetContainerProfile(l.objectCache, containerIDStr) if err != nil { // Return a special error that will NOT be cached, allowing retry when profile becomes available. // The caller should convert this to false after the cache layer. return cache.NewProfileNotAvailableErr("%v", err) } - for _, exec := range container.Execs { - if exec.Path == pathStr { - if slices.Compare(exec.Args, celArgs) == 0 { - return types.Bool(true) - } + for _, exec := range cp.Spec.Execs { + if exec.Path == pathStr && dynamicpathdetector.CompareExecArgs(exec.Args, celArgs) { + return types.Bool(true) } } diff --git a/pkg/rulemanager/cel/libraries/applicationprofile/exec_test.go b/pkg/rulemanager/cel/libraries/applicationprofile/exec_test.go index 8821e7bdfd..8b2d0d0ffd 100644 --- a/pkg/rulemanager/cel/libraries/applicationprofile/exec_test.go +++ b/pkg/rulemanager/cel/libraries/applicationprofile/exec_test.go @@ -299,6 +299,127 @@ func TestExecWithArgsNoProfile(t *testing.T) { assert.False(t, actualResult, "ap.was_executed_with_args should return false when no profile is available") } +// TestExecWithArgsWildcardInProfile exercises wildcard tokens inside a +// user-defined ApplicationProfile's exec arg vector: +// +// "⋯" (DynamicIdentifier) — matches exactly one argument position. +// "*" (WildcardIdentifier) — matches zero or more consecutive args. +// +// The runtime exec arg vector is matched against the profile via +// dynamicpathdetector.CompareExecArgs (added in +// k8sstormcenter/storage#23 — the matcher that this CEL function now +// routes through instead of slices.Compare). +func TestExecWithArgsWildcardInProfile(t *testing.T) { + objCache := objectcachev1.RuleObjectCacheMock{ + ContainerIDToSharedData: maps.NewSafeMap[string, *objectcache.WatchedContainerData](), + } + + objCache.SetSharedContainerData("test-container-id", &objectcache.WatchedContainerData{ + ContainerType: objectcache.Container, + ContainerInfos: map[objectcache.ContainerType][]objectcache.ContainerInfo{ + objectcache.Container: { + { + Name: "test-container", + }, + }, + }, + }) + + profile := &v1beta1.ApplicationProfile{} + profile.Spec.Containers = append(profile.Spec.Containers, v1beta1.ApplicationProfileContainer{ + Name: "test-container", + Execs: []v1beta1.ExecCalls{ + // curl any URL: --user must be literal, value is one position. + { + Path: "/usr/bin/curl", + Args: []string{"--user", "⋯"}, + }, + // sh -c with any trailing payload (zero or more args). + { + Path: "/bin/sh", + Args: []string{"-c", "*"}, + }, + // ls -l in any directory — single trailing position. + { + Path: "/bin/ls", + Args: []string{"-l", "⋯"}, + }, + // echo with any number of greeting words after a literal anchor. + { + Path: "/bin/echo", + Args: []string{"hello", "*"}, + }, + }, + }) + objCache.SetApplicationProfile(profile) + + env, err := cel.NewEnv( + cel.Variable("containerID", cel.StringType), + cel.Variable("path", cel.StringType), + cel.Variable("args", cel.ListType(cel.StringType)), + AP(&objCache, config.Config{}), + ) + if err != nil { + t.Fatalf("failed to create env: %v", err) + } + + testCases := []struct { + name string + path string + args []string + expectedResult bool + }{ + // curl with --user, dynamic value + {"curl --user alice — ⋯ matches one arg", "/usr/bin/curl", []string{"--user", "alice"}, true}, + {"curl --user alice bob — extra arg, ⋯ rejects", "/usr/bin/curl", []string{"--user", "alice", "bob"}, false}, + {"curl --user — missing value, ⋯ requires one arg", "/usr/bin/curl", []string{"--user"}, false}, + {"curl --pass alice — literal mismatch", "/usr/bin/curl", []string{"--pass", "alice"}, false}, + + // sh -c with arbitrary trailing payload + {"sh -c with single command", "/bin/sh", []string{"-c", "echo hi"}, true}, + {"sh -c with multi-token command", "/bin/sh", []string{"-c", "while", "true;", "do", "sleep", "1;", "done"}, true}, + {"sh -c with no trailing args (* matches zero)", "/bin/sh", []string{"-c"}, true}, + {"sh -x — wrong flag", "/bin/sh", []string{"-x", "echo hi"}, false}, + + // ls -l in any directory + {"ls -l /var/log", "/bin/ls", []string{"-l", "/var/log"}, true}, + {"ls -l with no directory (⋯ requires one)", "/bin/ls", []string{"-l"}, false}, + + // echo hello * + {"echo hello world from test", "/bin/echo", []string{"hello", "world", "from", "test"}, true}, + {"echo hello (no trailing args)", "/bin/echo", []string{"hello"}, true}, + {"echo goodbye world — wrong literal anchor", "/bin/echo", []string{"goodbye", "world"}, false}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ast, issues := env.Compile(`ap.was_executed_with_args(containerID, path, args)`) + if issues != nil { + t.Fatalf("failed to compile expression: %v", issues.Err()) + } + + program, err := env.Program(ast) + if err != nil { + t.Fatalf("failed to create program: %v", err) + } + + result, _, err := program.Eval(map[string]interface{}{ + "containerID": "test-container-id", + "path": tc.path, + "args": tc.args, + }) + if err != nil { + t.Fatalf("failed to eval program: %v", err) + } + + actualResult := result.Value().(bool) + assert.Equal(t, tc.expectedResult, actualResult, + "runtime args %v vs profile (one of curl/sh/ls/echo overlay): got %v want %v", + tc.args, actualResult, tc.expectedResult) + }) + } +} + func TestExecWithArgsCompilation(t *testing.T) { objCache := objectcachev1.RuleObjectCacheMock{} diff --git a/pkg/rulemanager/cel/libraries/applicationprofile/http.go b/pkg/rulemanager/cel/libraries/applicationprofile/http.go index ef7132e29c..fe91609a55 100644 --- a/pkg/rulemanager/cel/libraries/applicationprofile/http.go +++ b/pkg/rulemanager/cel/libraries/applicationprofile/http.go @@ -28,12 +28,12 @@ func (l *apLibrary) wasEndpointAccessed(containerID, endpoint ref.Val) ref.Val { return types.MaybeNoSuchOverloadErr(endpoint) } - container, _, err := profilehelper.GetContainerApplicationProfile(l.objectCache, containerIDStr) + cp, _, err := profilehelper.GetContainerProfile(l.objectCache, containerIDStr) if err != nil { return cache.NewProfileNotAvailableErr("%v", err) } - for _, ep := range container.Endpoints { + for _, ep := range cp.Spec.Endpoints { if dynamicpathdetector.CompareDynamic(ep.Endpoint, endpointStr) { return types.Bool(true) } @@ -61,12 +61,12 @@ func (l *apLibrary) wasEndpointAccessedWithMethod(containerID, endpoint, method return types.MaybeNoSuchOverloadErr(method) } - container, _, err := profilehelper.GetContainerApplicationProfile(l.objectCache, containerIDStr) + cp, _, err := profilehelper.GetContainerProfile(l.objectCache, containerIDStr) if err != nil { return cache.NewProfileNotAvailableErr("%v", err) } - for _, ep := range container.Endpoints { + for _, ep := range cp.Spec.Endpoints { if dynamicpathdetector.CompareDynamic(ep.Endpoint, endpointStr) { if slices.Contains(ep.Methods, methodStr) { return types.Bool(true) @@ -97,12 +97,12 @@ func (l *apLibrary) wasEndpointAccessedWithMethods(containerID, endpoint, method return types.NewErr("failed to parse methods: %v", err) } - container, _, err := profilehelper.GetContainerApplicationProfile(l.objectCache, containerIDStr) + cp, _, err := profilehelper.GetContainerProfile(l.objectCache, containerIDStr) if err != nil { return cache.NewProfileNotAvailableErr("%v", err) } - for _, ep := range container.Endpoints { + for _, ep := range cp.Spec.Endpoints { if dynamicpathdetector.CompareDynamic(ep.Endpoint, endpointStr) { for _, method := range celMethods { if slices.Contains(ep.Methods, method) { @@ -130,12 +130,12 @@ func (l *apLibrary) wasEndpointAccessedWithPrefix(containerID, prefix ref.Val) r return types.MaybeNoSuchOverloadErr(prefix) } - container, _, err := profilehelper.GetContainerApplicationProfile(l.objectCache, containerIDStr) + cp, _, err := profilehelper.GetContainerProfile(l.objectCache, containerIDStr) if err != nil { return cache.NewProfileNotAvailableErr("%v", err) } - for _, ep := range container.Endpoints { + for _, ep := range cp.Spec.Endpoints { if strings.HasPrefix(ep.Endpoint, prefixStr) { return types.Bool(true) } @@ -159,12 +159,12 @@ func (l *apLibrary) wasEndpointAccessedWithSuffix(containerID, suffix ref.Val) r return types.MaybeNoSuchOverloadErr(suffix) } - container, _, err := profilehelper.GetContainerApplicationProfile(l.objectCache, containerIDStr) + cp, _, err := profilehelper.GetContainerProfile(l.objectCache, containerIDStr) if err != nil { return cache.NewProfileNotAvailableErr("%v", err) } - for _, ep := range container.Endpoints { + for _, ep := range cp.Spec.Endpoints { if strings.HasSuffix(ep.Endpoint, suffixStr) { return types.Bool(true) } @@ -189,12 +189,12 @@ func (l *apLibrary) wasHostAccessed(containerID, host ref.Val) ref.Val { } // Check HTTP endpoints for host access - container, _, err := profilehelper.GetContainerApplicationProfile(l.objectCache, containerIDStr) + cp, _, err := profilehelper.GetContainerProfile(l.objectCache, containerIDStr) if err != nil { return cache.NewProfileNotAvailableErr("%v", err) } - for _, ep := range container.Endpoints { + for _, ep := range cp.Spec.Endpoints { // Parse the endpoint URL to extract host if parsedURL, err := url.Parse(ep.Endpoint); err == nil && parsedURL.Host != "" { if parsedURL.Host == hostStr || parsedURL.Hostname() == hostStr { diff --git a/pkg/rulemanager/cel/libraries/applicationprofile/open.go b/pkg/rulemanager/cel/libraries/applicationprofile/open.go index fc584e6fcb..63d8f604a4 100644 --- a/pkg/rulemanager/cel/libraries/applicationprofile/open.go +++ b/pkg/rulemanager/cel/libraries/applicationprofile/open.go @@ -25,12 +25,12 @@ func (l *apLibrary) wasPathOpened(containerID, path ref.Val) ref.Val { return types.MaybeNoSuchOverloadErr(path) } - container, _, err := profilehelper.GetContainerApplicationProfile(l.objectCache, containerIDStr) + cp, _, err := profilehelper.GetContainerProfile(l.objectCache, containerIDStr) if err != nil { return cache.NewProfileNotAvailableErr("%v", err) } - for _, open := range container.Opens { + for _, open := range cp.Spec.Opens { if dynamicpathdetector.CompareDynamic(open.Path, pathStr) { return types.Bool(true) } @@ -59,12 +59,12 @@ func (l *apLibrary) wasPathOpenedWithFlags(containerID, path, flags ref.Val) ref return types.NewErr("failed to parse flags: %v", err) } - container, _, err := profilehelper.GetContainerApplicationProfile(l.objectCache, containerIDStr) + cp, _, err := profilehelper.GetContainerProfile(l.objectCache, containerIDStr) if err != nil { return cache.NewProfileNotAvailableErr("%v", err) } - for _, open := range container.Opens { + for _, open := range cp.Spec.Opens { if dynamicpathdetector.CompareDynamic(open.Path, pathStr) { if compareOpenFlags(celFlags, open.Flags) { return types.Bool(true) @@ -89,12 +89,12 @@ func (l *apLibrary) wasPathOpenedWithSuffix(containerID, suffix ref.Val) ref.Val return types.MaybeNoSuchOverloadErr(suffix) } - container, _, err := profilehelper.GetContainerApplicationProfile(l.objectCache, containerIDStr) + cp, _, err := profilehelper.GetContainerProfile(l.objectCache, containerIDStr) if err != nil { return cache.NewProfileNotAvailableErr("%v", err) } - for _, open := range container.Opens { + for _, open := range cp.Spec.Opens { if strings.HasSuffix(open.Path, suffixStr) { return types.Bool(true) } @@ -117,12 +117,12 @@ func (l *apLibrary) wasPathOpenedWithPrefix(containerID, prefix ref.Val) ref.Val return types.MaybeNoSuchOverloadErr(prefix) } - container, _, err := profilehelper.GetContainerApplicationProfile(l.objectCache, containerIDStr) + cp, _, err := profilehelper.GetContainerProfile(l.objectCache, containerIDStr) if err != nil { return cache.NewProfileNotAvailableErr("%v", err) } - for _, open := range container.Opens { + for _, open := range cp.Spec.Opens { if strings.HasPrefix(open.Path, prefixStr) { return types.Bool(true) } diff --git a/pkg/rulemanager/cel/libraries/applicationprofile/syscall.go b/pkg/rulemanager/cel/libraries/applicationprofile/syscall.go index 7a26aa1846..7383aec5ba 100644 --- a/pkg/rulemanager/cel/libraries/applicationprofile/syscall.go +++ b/pkg/rulemanager/cel/libraries/applicationprofile/syscall.go @@ -23,12 +23,12 @@ func (l *apLibrary) wasSyscallUsed(containerID, syscallName ref.Val) ref.Val { return types.MaybeNoSuchOverloadErr(syscallName) } - container, _, err := profilehelper.GetContainerApplicationProfile(l.objectCache, containerIDStr) + cp, _, err := profilehelper.GetContainerProfile(l.objectCache, containerIDStr) if err != nil { return cache.NewProfileNotAvailableErr("%v", err) } - if slices.Contains(container.Syscalls, syscallNameStr) { + if slices.Contains(cp.Spec.Syscalls, syscallNameStr) { return types.Bool(true) } diff --git a/pkg/rulemanager/cel/libraries/k8s/k8s_test.go b/pkg/rulemanager/cel/libraries/k8s/k8s_test.go index e8001c2a8b..039c9fbeb4 100644 --- a/pkg/rulemanager/cel/libraries/k8s/k8s_test.go +++ b/pkg/rulemanager/cel/libraries/k8s/k8s_test.go @@ -61,7 +61,7 @@ func TestK8sLibrary(t *testing.T) { // Add the pod to the cache directly k8sObjCache.AddHandler(context.Background(), testPod) - objectCache := objectcache.NewObjectCache(k8sObjCache, nil, nil, nil) + objectCache := objectcache.NewObjectCache(k8sObjCache, nil, nil) env, err := cel.NewEnv( cel.Variable("event", cel.AnyType), K8s(objectCache.K8sObjectCache(), config.Config{}), @@ -134,7 +134,7 @@ func TestK8sLibraryGetContainerByName(t *testing.T) { // Add the pod to the cache directly k8sObjCache.AddHandler(context.Background(), testPod) - objectCache := objectcache.NewObjectCache(k8sObjCache, nil, nil, nil) + objectCache := objectcache.NewObjectCache(k8sObjCache, nil, nil) env, err := cel.NewEnv( cel.Variable("event", cel.AnyType), K8s(objectCache.K8sObjectCache(), config.Config{}), diff --git a/pkg/rulemanager/cel/libraries/networkneighborhood/network.go b/pkg/rulemanager/cel/libraries/networkneighborhood/network.go index 4fb334f7f1..0449ebf962 100644 --- a/pkg/rulemanager/cel/libraries/networkneighborhood/network.go +++ b/pkg/rulemanager/cel/libraries/networkneighborhood/network.go @@ -24,12 +24,12 @@ func (l *nnLibrary) wasAddressInEgress(containerID, address ref.Val) ref.Val { return types.MaybeNoSuchOverloadErr(address) } - container, err := profilehelper.GetContainerNetworkNeighborhood(l.objectCache, containerIDStr) + cp, _, err := profilehelper.GetContainerProfile(l.objectCache, containerIDStr) if err != nil { return cache.NewProfileNotAvailableErr("%v", err) } - for _, egress := range container.Egress { + for _, egress := range cp.Spec.Egress { if egress.IPAddress == addressStr { return types.Bool(true) } @@ -52,12 +52,12 @@ func (l *nnLibrary) wasAddressInIngress(containerID, address ref.Val) ref.Val { return types.MaybeNoSuchOverloadErr(address) } - container, err := profilehelper.GetContainerNetworkNeighborhood(l.objectCache, containerIDStr) + cp, _, err := profilehelper.GetContainerProfile(l.objectCache, containerIDStr) if err != nil { return cache.NewProfileNotAvailableErr("%v", err) } - for _, ingress := range container.Ingress { + for _, ingress := range cp.Spec.Ingress { if ingress.IPAddress == addressStr { return types.Bool(true) } @@ -80,12 +80,12 @@ func (l *nnLibrary) isDomainInEgress(containerID, domain ref.Val) ref.Val { return types.MaybeNoSuchOverloadErr(domain) } - container, err := profilehelper.GetContainerNetworkNeighborhood(l.objectCache, containerIDStr) + cp, _, err := profilehelper.GetContainerProfile(l.objectCache, containerIDStr) if err != nil { return cache.NewProfileNotAvailableErr("%v", err) } - for _, egress := range container.Egress { + for _, egress := range cp.Spec.Egress { if slices.Contains(egress.DNSNames, domainStr) || egress.DNS == domainStr { return types.Bool(true) } @@ -108,12 +108,12 @@ func (l *nnLibrary) isDomainInIngress(containerID, domain ref.Val) ref.Val { return types.MaybeNoSuchOverloadErr(domain) } - container, err := profilehelper.GetContainerNetworkNeighborhood(l.objectCache, containerIDStr) + cp, _, err := profilehelper.GetContainerProfile(l.objectCache, containerIDStr) if err != nil { return cache.NewProfileNotAvailableErr("%v", err) } - for _, ingress := range container.Ingress { + for _, ingress := range cp.Spec.Ingress { if slices.Contains(ingress.DNSNames, domainStr) { return types.Bool(true) } @@ -144,12 +144,12 @@ func (l *nnLibrary) wasAddressPortProtocolInEgress(containerID, address, port, p return types.MaybeNoSuchOverloadErr(protocol) } - container, err := profilehelper.GetContainerNetworkNeighborhood(l.objectCache, containerIDStr) + cp, _, err := profilehelper.GetContainerProfile(l.objectCache, containerIDStr) if err != nil { return cache.NewProfileNotAvailableErr("%v", err) } - for _, egress := range container.Egress { + for _, egress := range cp.Spec.Egress { if egress.IPAddress == addressStr { for _, portInfo := range egress.Ports { if portInfo.Protocol == v1beta1.Protocol(protocolStr) && portInfo.Port != nil && *portInfo.Port == int32(portInt) { @@ -184,12 +184,12 @@ func (l *nnLibrary) wasAddressPortProtocolInIngress(containerID, address, port, return types.MaybeNoSuchOverloadErr(protocol) } - container, err := profilehelper.GetContainerNetworkNeighborhood(l.objectCache, containerIDStr) + cp, _, err := profilehelper.GetContainerProfile(l.objectCache, containerIDStr) if err != nil { return cache.NewProfileNotAvailableErr("%v", err) } - for _, ingress := range container.Ingress { + for _, ingress := range cp.Spec.Ingress { if ingress.IPAddress == addressStr { for _, portInfo := range ingress.Ports { if portInfo.Protocol == v1beta1.Protocol(protocolStr) && portInfo.Port != nil && *portInfo.Port == int32(portInt) { diff --git a/pkg/rulemanager/profilehelper/profilehelper.go b/pkg/rulemanager/profilehelper/profilehelper.go index f177bb0a94..0f4d5ed0e3 100644 --- a/pkg/rulemanager/profilehelper/profilehelper.go +++ b/pkg/rulemanager/profilehelper/profilehelper.go @@ -9,58 +9,19 @@ import ( corev1 "k8s.io/api/core/v1" ) -func GetApplicationProfile(containerID string, objectCache objectcache.ObjectCache) (*v1beta1.ApplicationProfile, error) { - ap := objectCache.ApplicationProfileCache().GetApplicationProfile(containerID) - if ap == nil { - return nil, errors.New("no profile available") +// GetContainerProfile returns the ContainerProfile for a containerID plus its +// SyncChecksumMetadataKey annotation. This is the forward API; legacy callers +// go through the shims below until step 6c deletes them. +func GetContainerProfile(objectCache objectcache.ObjectCache, containerID string) (*v1beta1.ContainerProfile, string, error) { + cpc := objectCache.ContainerProfileCache() + if cpc == nil { + return nil, "", errors.New("no container profile cache available") } - return ap, nil -} - -func GetNetworkNeighborhood(containerID string, objectCache objectcache.ObjectCache) (*v1beta1.NetworkNeighborhood, error) { - nn := objectCache.NetworkNeighborhoodCache().GetNetworkNeighborhood(containerID) - if nn == nil { - return nil, errors.New("no profile available") - } - return nn, nil -} - -func GetContainerFromApplicationProfile(ap *v1beta1.ApplicationProfile, containerName string) (v1beta1.ApplicationProfileContainer, error) { - for _, s := range ap.Spec.Containers { - if s.Name == containerName { - return s, nil - } - } - for _, s := range ap.Spec.InitContainers { - if s.Name == containerName { - return s, nil - } - } - for _, s := range ap.Spec.EphemeralContainers { - if s.Name == containerName { - return s, nil - } + cp := cpc.GetContainerProfile(containerID) + if cp == nil { + return nil, "", errors.New("no profile available") } - return v1beta1.ApplicationProfileContainer{}, errors.New("container not found") -} - -func GetContainerFromNetworkNeighborhood(nn *v1beta1.NetworkNeighborhood, containerName string) (v1beta1.NetworkNeighborhoodContainer, error) { - for _, c := range nn.Spec.Containers { - if c.Name == containerName { - return c, nil - } - } - for _, c := range nn.Spec.InitContainers { - if c.Name == containerName { - return c, nil - } - } - for _, c := range nn.Spec.EphemeralContainers { - if c.Name == containerName { - return c, nil - } - } - return v1beta1.NetworkNeighborhoodContainer{}, errors.New("container not found") + return cp, cp.Annotations[helpers.SyncChecksumMetadataKey], nil } func GetContainerName(objectCache objectcache.ObjectCache, containerID string) string { @@ -92,40 +53,3 @@ func GetPodSpec(objectCache objectcache.ObjectCache, containerID string) (*corev return podSpec, nil } -func GetContainerApplicationProfile(objectCache objectcache.ObjectCache, containerID string) (v1beta1.ApplicationProfileContainer, string, error) { - ap, err := GetApplicationProfile(containerID, objectCache) - if err != nil { - return v1beta1.ApplicationProfileContainer{}, "", err - } - - containerName := GetContainerName(objectCache, containerID) - if containerName == "" { - return v1beta1.ApplicationProfileContainer{}, "", errors.New("container name not found") - } - - container, err := GetContainerFromApplicationProfile(ap, containerName) - if err != nil { - return v1beta1.ApplicationProfileContainer{}, "", err - } - - return container, ap.Annotations[helpers.SyncChecksumMetadataKey], nil -} - -func GetContainerNetworkNeighborhood(objectCache objectcache.ObjectCache, containerID string) (v1beta1.NetworkNeighborhoodContainer, error) { - nn, err := GetNetworkNeighborhood(containerID, objectCache) - if err != nil { - return v1beta1.NetworkNeighborhoodContainer{}, err - } - - containerName := GetContainerName(objectCache, containerID) - if containerName == "" { - return v1beta1.NetworkNeighborhoodContainer{}, errors.New("container name not found") - } - - container, err := GetContainerFromNetworkNeighborhood(nn, containerName) - if err != nil { - return v1beta1.NetworkNeighborhoodContainer{}, err - } - - return container, nil -} diff --git a/pkg/rulemanager/rule_manager.go b/pkg/rulemanager/rule_manager.go index 7fde0990ad..a14a5ee86b 100644 --- a/pkg/rulemanager/rule_manager.go +++ b/pkg/rulemanager/rule_manager.go @@ -200,7 +200,7 @@ func (rm *RuleManager) ReportEnrichedEvent(enrichedEvent *events.EnrichedEvent) return } - _, apChecksum, err := profilehelper.GetContainerApplicationProfile(rm.objectCache, enrichedEvent.ContainerID) + _, apChecksum, err := profilehelper.GetContainerProfile(rm.objectCache, enrichedEvent.ContainerID) profileExists = err == nil // Early exit if monitoring is disabled for this context - skip rule evaluation @@ -345,9 +345,9 @@ func (rm *RuleManager) HasApplicableRuleBindings(namespace, name string) bool { func (rm *RuleManager) HasFinalApplicationProfile(pod *corev1.Pod) bool { for _, c := range utils.GetContainerStatuses(pod.Status) { - ap := rm.objectCache.ApplicationProfileCache().GetApplicationProfile(utils.TrimRuntimePrefix(c.ContainerID)) - if ap != nil { - if status, ok := ap.Annotations[helpersv1.StatusMetadataKey]; ok { + cp := rm.objectCache.ContainerProfileCache().GetContainerProfile(utils.TrimRuntimePrefix(c.ContainerID)) + if cp != nil { + if status, ok := cp.Annotations[helpersv1.StatusMetadataKey]; ok { // in theory, only completed profiles are stored in cache, but we check anyway return status == helpersv1.Completed } @@ -410,12 +410,12 @@ func (rm *RuleManager) EvaluatePolicyRulesForEvent(eventType utils.EventType, ev } func (rm *RuleManager) validateRulePolicy(rule typesv1.Rule, event utils.K8sEvent, containerID string) bool { - ap, _, err := profilehelper.GetContainerApplicationProfile(rm.objectCache, containerID) + cp, _, err := profilehelper.GetContainerProfile(rm.objectCache, containerID) if err != nil { return false } - allowed, err := rm.rulePolicyValidator.Validate(rule.ID, event.(utils.EnrichEvent).GetComm(), &ap) + allowed, err := rm.rulePolicyValidator.Validate(rule.ID, event.(utils.EnrichEvent).GetComm(), cp) if err != nil { logger.L().Error("RuleManager - failed to validate rule policy", helpers.Error(err)) return false diff --git a/pkg/rulemanager/ruleadapters/creator.go b/pkg/rulemanager/ruleadapters/creator.go index 9420569f7f..75783f9d32 100644 --- a/pkg/rulemanager/ruleadapters/creator.go +++ b/pkg/rulemanager/ruleadapters/creator.go @@ -145,7 +145,7 @@ func (r *RuleFailureCreator) setProfileMetadata(rule typesv1.Rule, ruleFailure * switch profileType { case armotypes.ApplicationProfile: - state := objectCache.ApplicationProfileCache().GetApplicationProfileState(triggerEvent.GetContainerID()) + state := objectCache.ContainerProfileCache().GetContainerProfileState(triggerEvent.GetContainerID()) if state != nil { profileMetadata := &armotypes.ProfileMetadata{ Status: state.Status, @@ -162,7 +162,7 @@ func (r *RuleFailureCreator) setProfileMetadata(rule typesv1.Rule, ruleFailure * } case armotypes.NetworkProfile: - state := objectCache.NetworkNeighborhoodCache().GetNetworkNeighborhoodState(triggerEvent.GetContainerID()) + state := objectCache.ContainerProfileCache().GetContainerProfileState(triggerEvent.GetContainerID()) if state != nil { profileMetadata := &armotypes.ProfileMetadata{ Status: state.Status, diff --git a/pkg/rulemanager/rulepolicy.go b/pkg/rulemanager/rulepolicy.go index 9a58943b00..f5562b2b2c 100644 --- a/pkg/rulemanager/rulepolicy.go +++ b/pkg/rulemanager/rulepolicy.go @@ -20,12 +20,12 @@ func NewRulePolicyValidator(objectCache objectcache.ObjectCache) *RulePolicyVali } } -func (v *RulePolicyValidator) Validate(ruleId string, process string, ap *v1beta1.ApplicationProfileContainer) (bool, error) { - if _, ok := ap.PolicyByRuleId[ruleId]; !ok { +func (v *RulePolicyValidator) Validate(ruleId string, process string, cp *v1beta1.ContainerProfile) (bool, error) { + if _, ok := cp.Spec.PolicyByRuleId[ruleId]; !ok { return false, nil } - if policy, ok := ap.PolicyByRuleId[ruleId]; ok { + if policy, ok := cp.Spec.PolicyByRuleId[ruleId]; ok { if policy.AllowedContainer || slices.Contains(policy.AllowedProcesses, process) { return true, nil } diff --git a/pkg/rulemanager/ruleswatcher/watcher.go b/pkg/rulemanager/ruleswatcher/watcher.go index 45782beb23..9d4c4b003e 100644 --- a/pkg/rulemanager/ruleswatcher/watcher.go +++ b/pkg/rulemanager/ruleswatcher/watcher.go @@ -2,14 +2,18 @@ package ruleswatcher import ( "context" + "errors" "os" "github.com/Masterminds/semver/v3" "github.com/kubescape/go-logger" "github.com/kubescape/go-logger/helpers" + "github.com/kubescape/node-agent/pkg/config" "github.com/kubescape/node-agent/pkg/k8sclient" "github.com/kubescape/node-agent/pkg/rulemanager/rulecreator" typesv1 "github.com/kubescape/node-agent/pkg/rulemanager/types/v1" + "github.com/kubescape/node-agent/pkg/signature" + "github.com/kubescape/node-agent/pkg/signature/profiles" "github.com/kubescape/node-agent/pkg/watcher" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -22,14 +26,16 @@ var _ RulesWatcher = (*RulesWatcherImpl)(nil) type RulesWatcherImpl struct { ruleCreator rulecreator.RuleCreator k8sClient k8sclient.K8sClientInterface + cfg *config.Config callback RulesWatcherCallback watchResources []watcher.WatchResource } -func NewRulesWatcher(k8sClient k8sclient.K8sClientInterface, ruleCreator rulecreator.RuleCreator, callback RulesWatcherCallback) *RulesWatcherImpl { +func NewRulesWatcher(k8sClient k8sclient.K8sClientInterface, ruleCreator rulecreator.RuleCreator, callback RulesWatcherCallback, cfg *config.Config) *RulesWatcherImpl { return &RulesWatcherImpl{ ruleCreator: ruleCreator, k8sClient: k8sClient, + cfg: cfg, callback: callback, watchResources: []watcher.WatchResource{ watcher.NewWatchResource(typesv1.RuleGvr, metav1.ListOptions{}), @@ -71,7 +77,8 @@ func (w *RulesWatcherImpl) syncAllRulesAndNotify(ctx context.Context) { // syncAllRulesFromCluster fetches all rules from the cluster and syncs them with the rule creator. // Rules are filtered by: // 1. Enabled status - only enabled rules are considered -// 2. Agent version compatibility - rules with AgentVersionRequirement are checked against AGENT_VERSION env var using semver +// 2. Signature verification - if enabled, verifies rules have valid signatures +// 3. Agent version compatibility - rules with AgentVersionRequirement are checked against AGENT_VERSION env var using semver func (w *RulesWatcherImpl) syncAllRulesFromCluster(ctx context.Context) error { unstructuredList, err := w.k8sClient.GetDynamicClient().Resource(typesv1.RuleGvr).List(ctx, metav1.ListOptions{}) if err != nil { @@ -80,12 +87,20 @@ func (w *RulesWatcherImpl) syncAllRulesFromCluster(ctx context.Context) error { var enabledRules []typesv1.Rule var skippedVersionCount int + var skippedVerificationCount int for _, item := range unstructuredList.Items { rules, err := unstructuredToRules(&item) if err != nil { logger.L().Warning("RulesWatcher - failed to convert rule during sync", helpers.Error(err)) continue } + + // Verify signature if enabled + if err := w.verifyRules(rules); err != nil { + skippedVerificationCount++ + continue + } + for _, rule := range rules.Spec.Rules { if rule.Enabled { // Check agent version requirement if specified @@ -109,7 +124,8 @@ func (w *RulesWatcherImpl) syncAllRulesFromCluster(ctx context.Context) error { logger.L().Info("RulesWatcher - synced rules from cluster", helpers.Int("enabledRules", len(enabledRules)), helpers.Int("totalRules", len(unstructuredList.Items)), - helpers.Int("skippedByVersion", skippedVersionCount)) + helpers.Int("skippedByVersion", skippedVersionCount), + helpers.Int("skippedByVerification", skippedVerificationCount)) return nil } @@ -126,6 +142,30 @@ func unstructuredToRules(obj *unstructured.Unstructured) (*typesv1.Rules, error) return rule, nil } +func (w *RulesWatcherImpl) verifyRules(rules *typesv1.Rules) error { + if w.cfg == nil || !w.cfg.EnableSignatureVerification { + return nil + } + rulesAdapter := profiles.NewRulesAdapter(rules) + if err := signature.VerifyObject(rulesAdapter); err != nil { + if errors.Is(err, signature.ErrObjectNotSigned) { + logger.L().Debug("Rules resource is not signed, skipping", + helpers.String("name", rules.Name), + helpers.String("namespace", rules.Namespace)) + } else { + logger.L().Warning("Rules resource signature verification failed", + helpers.String("name", rules.Name), + helpers.String("namespace", rules.Namespace), + helpers.Error(err)) + } + return err + } + logger.L().Debug("Rules resource signature verification successful", + helpers.String("name", rules.Name), + helpers.String("namespace", rules.Namespace)) + return nil +} + // isAgentVersionCompatible checks if the current agent version satisfies the given requirement // using semantic versioning constraints. Returns true if compatible, false otherwise. func isAgentVersionCompatible(requirement string) bool { diff --git a/pkg/sbommanager/v1/sbom_manager.go b/pkg/sbommanager/v1/sbom_manager.go index 577e4e1a61..2f6d059b93 100644 --- a/pkg/sbommanager/v1/sbom_manager.go +++ b/pkg/sbommanager/v1/sbom_manager.go @@ -17,6 +17,7 @@ import ( "github.com/DmitriyVTitov/size" "github.com/anchore/syft/syft" + "github.com/anchore/syft/syft/cataloging" "github.com/anchore/syft/syft/cataloging/pkgcataloging" sbomcataloger "github.com/anchore/syft/syft/pkg/cataloger/sbom" "github.com/aquilax/truncate" @@ -471,6 +472,13 @@ func (s *SbomManager) processContainerWithMetadata(notif containercollection.Pub sbomCfg := syft.DefaultCreateSBOMConfig() sbomCfg.ToolName = "syft" sbomCfg.ToolVersion = s.version + sbomCfg = sbomCfg.WithCatalogerSelection( + cataloging.NewSelectionRequest().WithRemovals( + "file-digest-cataloger", + "file-metadata-cataloger", + "file-executable-cataloger", + ), + ) if s.cfg.EnableEmbeddedSboms { sbomCfg.WithCatalogers(pkgcataloging.NewCatalogerReference(sbomcataloger.NewCataloger(), []string{pkgcataloging.ImageTag})) } diff --git a/pkg/sbomscanner/v1/server.go b/pkg/sbomscanner/v1/server.go index 1b105bb286..360d67c70d 100644 --- a/pkg/sbomscanner/v1/server.go +++ b/pkg/sbomscanner/v1/server.go @@ -9,6 +9,7 @@ import ( "time" "github.com/anchore/syft/syft" + "github.com/anchore/syft/syft/cataloging" "github.com/anchore/syft/syft/cataloging/pkgcataloging" sbomcataloger "github.com/anchore/syft/syft/pkg/cataloger/sbom" "github.com/kubescape/go-logger" @@ -59,6 +60,13 @@ func (s *scannerServer) CreateSBOM(ctx context.Context, req *pb.CreateSBOMReques cfg := syft.DefaultCreateSBOMConfig() cfg.ToolName = "syft" cfg.ToolVersion = s.version + cfg = cfg.WithCatalogerSelection( + cataloging.NewSelectionRequest().WithRemovals( + "file-digest-cataloger", + "file-metadata-cataloger", + "file-executable-cataloger", + ), + ) if req.EnableEmbeddedSboms { cfg.WithCatalogers(pkgcataloging.NewCatalogerReference(sbomcataloger.NewCataloger(), []string{pkgcataloging.ImageTag})) } diff --git a/pkg/signature/annotations.go b/pkg/signature/annotations.go new file mode 100644 index 0000000000..8df333d21e --- /dev/null +++ b/pkg/signature/annotations.go @@ -0,0 +1,16 @@ +package signature + +import "errors" + +const ( + AnnotationPrefix = "signature.kubescape.io" + + AnnotationSignature = AnnotationPrefix + "/signature" + AnnotationCertificate = AnnotationPrefix + "/certificate" + AnnotationRekorBundle = AnnotationPrefix + "/rekor-bundle" + AnnotationIssuer = AnnotationPrefix + "/issuer" + AnnotationIdentity = AnnotationPrefix + "/identity" + AnnotationTimestamp = AnnotationPrefix + "/timestamp" +) + +var ErrObjectNotSigned = errors.New("object is not signed (missing signature annotation)") diff --git a/pkg/signature/cluster_flow_test.go b/pkg/signature/cluster_flow_test.go new file mode 100644 index 0000000000..23dfe89580 --- /dev/null +++ b/pkg/signature/cluster_flow_test.go @@ -0,0 +1,150 @@ +package signature + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "math/big" + "testing" + "time" + + sigstore_signature "github.com/sigstore/sigstore/pkg/signature" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/kubescape/node-agent/pkg/signature/profiles" + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" +) + +func TestClusterProfileStructure(t *testing.T) { + // Simulate a cluster profile with empty TypeMeta (like from cluster) + profile := &v1beta1.ApplicationProfile{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "", + Kind: "", + }, + } + profile.Name = "test-signed" + profile.Namespace = "default" + + adapter := profiles.NewApplicationProfileAdapter(profile) + content := adapter.GetContent() + + if m, ok := content.(map[string]interface{}); ok { + t.Logf("apiVersion: %v (type: %T)", m["apiVersion"], m["apiVersion"]) + t.Logf("kind: %v (type: %T)", m["kind"], m["kind"]) + + // Verify fallback values are applied + if m["apiVersion"] != "spdx.softwarecomposition.kubescape.io/v1beta1" { + t.Errorf("Expected fallback apiVersion, got %s", m["apiVersion"]) + } + if m["kind"] != "ApplicationProfile" { + t.Errorf("Expected fallback kind, got %s", m["kind"]) + } + } else { + t.Errorf("Expected map, got %T", content) + } +} + +func TestReproduceClusterVerificationFlow(t *testing.T) { + // Simulate the exact scenario from the cluster + profile := &v1beta1.ApplicationProfile{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "", + Kind: "", + }, + } + profile.Name = "replicaset-nginx2-5bffdcc777-signed" + profile.Namespace = "default" + profile.Labels = map[string]string{ + "kubescape.io/instance-template-hash": "5bffdcc777", + "kubescape.io/workload-api-group": "apps", + "kubescape.io/workload-api-version": "v1", + "kubescape.io/workload-kind": "Deployment", + "kubescape.io/workload-name": "nginx2", + "kubescape.io/workload-namespace": "default", + "kubescape.io/workload-resource-version": "15471", + } + + adapter := profiles.NewApplicationProfileAdapter(profile) + + // Calculate hash + cosignAdapter := &CosignAdapter{} + hash, err := cosignAdapter.GetContentHash(adapter.GetContent()) + if err != nil { + t.Fatalf("Failed to compute hash: %v", err) + } + + t.Logf("Computed hash: %s", hash) + + // Generate a key and sign + privKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatalf("Failed to generate key: %v", err) + } + signer, err := sigstore_signature.LoadECDSASigner(privKey, crypto.SHA256) + if err != nil { + t.Fatalf("Failed to load signer: %v", err) + } + + sig, err := signer.SignMessage(bytes.NewReader([]byte(hash))) + if err != nil { + t.Fatalf("Failed to sign message: %v", err) + } + certBytes, err := generateTestCertificate(privKey) + if err != nil { + t.Fatalf("Failed to generate test certificate: %v", err) + } + + // Use the package-level annotation flow + sigObj := &Signature{ + Signature: sig, + Certificate: certBytes, + Timestamp: time.Now().Unix(), + } + annotations, err := cosignAdapter.EncodeSignatureToAnnotations(sigObj) + if err != nil { + t.Fatalf("Failed to encode signature to annotations: %v", err) + } + adapter.SetAnnotations(annotations) + + // Now verify using the higher-level flow + err = VerifyObjectAllowUntrusted(adapter) + if err != nil { + t.Fatalf("VerifyObjectAllowUntrusted failed: %v", err) + } +} + +func generateTestCertificate(privKey *ecdsa.PrivateKey) ([]byte, error) { + serialNumber, err := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 128)) + if err != nil { + return nil, err + } + + template := x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + CommonName: "test-signer", + }, + NotBefore: time.Now(), + NotAfter: time.Now().Add(365 * 24 * time.Hour), + KeyUsage: x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageCodeSigning}, + } + + certDER, err := x509.CreateCertificate(rand.Reader, &template, &template, &privKey.PublicKey, privKey) + if err != nil { + return nil, err + } + + certPEM := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: certDER, + }) + + return certPEM, nil +} diff --git a/pkg/signature/cluster_scenario_test.go b/pkg/signature/cluster_scenario_test.go new file mode 100644 index 0000000000..b26813d6d2 --- /dev/null +++ b/pkg/signature/cluster_scenario_test.go @@ -0,0 +1,88 @@ +package signature + +import ( + "testing" + + "github.com/kubescape/node-agent/pkg/signature/profiles" + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// This test replicates the exact scenario from the production cluster where: +// 1. Profiles are loaded from the cluster with empty TypeMeta (APIVersion="", Kind="") +// 2. The adapter's GetContent() should fill in the correct fallback values +// 3. Signatures created and verified using these profiles should succeed + +func TestClusterScenarioIntegration(t *testing.T) { + // Simulate a profile as it comes from the cluster (empty TypeMeta) + clusterProfile := &v1beta1.ApplicationProfile{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "", + Kind: "", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "replicaset-test-workload-123456789", + Namespace: "default", + Labels: map[string]string{ + "kubescape.io/instance-template-hash": "123456789", + "kubescape.io/workload-kind": "Deployment", + "kubescape.io/workload-name": "test-workload", + "kubescape.io/workload-namespace": "default", + }, + }, + } + + // Create adapter + adapter := profiles.NewApplicationProfileAdapter(clusterProfile) + + // Verify GetContent() populates TypeMeta correctly + content := adapter.GetContent() + contentMap, ok := content.(map[string]interface{}) + if !ok { + t.Fatalf("GetContent() should return map[string]interface{}, got %T", content) + } + + // Check that fallback values are applied + if contentMap["apiVersion"] != "spdx.softwarecomposition.kubescape.io/v1beta1" { + t.Errorf("Expected apiVersion fallback to be applied, got: %v", contentMap["apiVersion"]) + } + if contentMap["kind"] != "ApplicationProfile" { + t.Errorf("Expected kind fallback to be applied, got: %v", contentMap["kind"]) + } + + // Verify metadata is correctly structured + metadata, ok := contentMap["metadata"].(map[string]interface{}) + if !ok { + t.Fatal("metadata should be a map[string]interface{}") + } + + if metadata["name"] != clusterProfile.Name { + t.Errorf("Expected metadata.name=%s, got %v", clusterProfile.Name, metadata["name"]) + } + if metadata["namespace"] != clusterProfile.Namespace { + t.Errorf("Expected metadata.namespace=%s, got %v", clusterProfile.Namespace, metadata["namespace"]) + } + if metadata["labels"] == nil { + t.Error("metadata.labels should not be nil") + } + + // Now verify that signing and verification work end-to-end + if err := SignObjectDisableKeyless(adapter); err != nil { + t.Fatalf("Failed to sign object: %v", err) + } + + if clusterProfile.Annotations == nil { + t.Fatal("Annotations should be set after signing") + } + + if _, ok := clusterProfile.Annotations[AnnotationSignature]; !ok { + t.Error("Signature annotation should be set after signing") + } + + // Verify the signature + if err := VerifyObjectAllowUntrusted(adapter); err != nil { + t.Fatalf("Failed to verify object: %v", err) + } + + t.Log("✓ Cluster scenario integration test passed: profile with empty TypeMeta successfully signed and verified") +} diff --git a/pkg/signature/cosign_adapter.go b/pkg/signature/cosign_adapter.go new file mode 100644 index 0000000000..b78d8920ab --- /dev/null +++ b/pkg/signature/cosign_adapter.go @@ -0,0 +1,572 @@ +package signature + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/base64" + "encoding/json" + "encoding/pem" + "fmt" + "math/big" + "net/url" + "strconv" + "time" + + "context" + "github.com/golang-jwt/jwt/v5" + "github.com/kubescape/storage/pkg/utils" + "github.com/sigstore/cosign/v3/pkg/cosign" + "github.com/sigstore/cosign/v3/pkg/cosign/bundle" + "github.com/sigstore/cosign/v3/pkg/providers" + _ "github.com/sigstore/cosign/v3/pkg/providers/all" + "github.com/sigstore/fulcio/pkg/api" + "github.com/sigstore/rekor/pkg/generated/client" + "github.com/sigstore/rekor/pkg/generated/models" + "github.com/sigstore/sigstore/pkg/cryptoutils" + "github.com/sigstore/sigstore/pkg/fulcioroots" + "github.com/sigstore/sigstore/pkg/oauthflow" + sigstore_signature "github.com/sigstore/sigstore/pkg/signature" +) + +var _ = cosign.Signature +var _ = providers.Enabled +var _ = bundle.RekorBundle{} +var _ = api.CertificateRequest{} +var _ = client.Rekor{} +var _ = models.LogEntry{} +var _ = fulcioroots.Get +var _ = oauthflow.OIDConnect +var _ = oauthflow.DefaultIDTokenGetter + +const ( + sigstoreIssuer = "https://token.actions.githubusercontent.com" + sigstoreOIDC = "kubernetes.io" + fulcioURL = "https://fulcio.sigstore.dev" + rekorURL = "https://rekor.sigstore.dev" +) + +type CosignAdapter struct { + privateKey *ecdsa.PrivateKey + signer sigstore_signature.Signer + verifier sigstore_signature.Verifier + useKeyless bool + tokenProvider func(ctx context.Context) (string, error) +} + +func NewCosignAdapter(useKeyless bool) (*CosignAdapter, error) { + if useKeyless { + return &CosignAdapter{ + useKeyless: true, + }, nil + } + + privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + return nil, fmt.Errorf("failed to generate private key: %w", err) + } + + signer, err := sigstore_signature.LoadECDSASigner(privateKey, crypto.SHA256) + if err != nil { + return nil, fmt.Errorf("failed to load ECDSA signer: %w", err) + } + + verifier, err := sigstore_signature.LoadECDSAVerifier(&privateKey.PublicKey, crypto.SHA256) + if err != nil { + return nil, fmt.Errorf("failed to load ECDSA verifier: %w", err) + } + + return &CosignAdapter{ + privateKey: privateKey, + signer: signer, + verifier: verifier, + useKeyless: false, + }, nil +} + +func NewCosignAdapterWithPrivateKey(useKeyless bool, privateKey *ecdsa.PrivateKey) (*CosignAdapter, error) { + if privateKey == nil { + return nil, fmt.Errorf("private key cannot be nil") + } + + signer, err := sigstore_signature.LoadECDSASigner(privateKey, crypto.SHA256) + if err != nil { + return nil, fmt.Errorf("failed to load ECDSA signer: %w", err) + } + + verifier, err := sigstore_signature.LoadECDSAVerifier(&privateKey.PublicKey, crypto.SHA256) + if err != nil { + return nil, fmt.Errorf("failed to load ECDSA verifier: %w", err) + } + + return &CosignAdapter{ + privateKey: privateKey, + signer: signer, + verifier: verifier, + useKeyless: useKeyless, + }, nil +} + +func (c *CosignAdapter) SignData(data []byte) (*Signature, error) { + if c.useKeyless { + return c.signKeyless(data) + } + + return c.signWithKey(data) +} + +func (c *CosignAdapter) SetTokenProvider(provider func(context.Context) (string, error)) { + c.tokenProvider = provider +} + +func (c *CosignAdapter) signKeyless(data []byte) (*Signature, error) { + ctx := context.Background() + + var tok string + var err error + var identity string + var issuer string + + // 1. Get OIDC Token + if c.tokenProvider != nil { + tok, err = c.tokenProvider(ctx) + if err != nil { + return nil, fmt.Errorf("failed to provide OIDC token from provider: %w", err) + } + } else if providers.Enabled(ctx) { + tok, err = providers.Provide(ctx, "sigstore") + if err != nil { + return nil, fmt.Errorf("failed to provide OIDC token: %w", err) + } + } + + if tok != "" { + // Extract "sub" and "iss" from the JWT token + parser := jwt.NewParser() + token, _, err := parser.ParseUnverified(tok, jwt.MapClaims{}) + if err != nil { + return nil, fmt.Errorf("failed to parse OIDC token: %w", err) + } + + claims, ok := token.Claims.(jwt.MapClaims) + if !ok { + return nil, fmt.Errorf("failed to get claims from OIDC token") + } + + sub, ok := claims["sub"].(string) + if !ok { + return nil, fmt.Errorf("failed to get 'sub' claim from OIDC token") + } + identity = sub + + iss, ok := claims["iss"].(string) + if !ok { + return nil, fmt.Errorf("failed to get 'iss' claim from OIDC token") + } + issuer = iss + } else { + // Fallback to interactive flow if not in CI and no provider + fmt.Println("No OIDC provider enabled (CI). Falling back to interactive flow...") + // Sigstore's default issuer and client ID + issuerURL := "https://oauth2.sigstore.dev/auth" + clientID := "sigstore" + // This will open a browser window for authentication + oidcToken, err := oauthflow.OIDConnect(issuerURL, clientID, "", "", oauthflow.DefaultIDTokenGetter) + if err != nil { + return nil, fmt.Errorf("failed to get interactive OIDC token: %w", err) + } + tok = oidcToken.RawString + identity = oidcToken.Subject + issuer = issuerURL + } + _ = tok + + // 2. Generate Ephemeral Key Pair + privKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + return nil, fmt.Errorf("failed to generate ephemeral key: %w", err) + } + signer, err := sigstore_signature.LoadECDSASigner(privKey, crypto.SHA256) + if err != nil { + return nil, fmt.Errorf("failed to load ephemeral signer: %w", err) + } + + // 3. Get Certificate from Fulcio using the real client + certBytes, err := c.getFulcioCertificate(ctx, privKey, identity, tok) + if err != nil { + return nil, fmt.Errorf("failed to get certificate from Fulcio: %w", err) + } + + // 4. Sign Data + sig, err := signer.SignMessage(bytes.NewReader(data)) + if err != nil { + return nil, fmt.Errorf("failed to sign data: %w", err) + } + + // 5. Upload to Rekor (Placeholder for real upload) + // rekorClient, _ := rekor.GetByProxy(rekorURL) + // entry, _ := cosign.TLogUpload(ctx, rekorClient, sig, certBytes, data) + + return &Signature{ + Signature: sig, + Certificate: certBytes, + Issuer: issuer, + Identity: identity, + Timestamp: time.Now().Unix(), + }, nil +} + +func (c *CosignAdapter) simulateKeyless(data []byte) (*Signature, error) { + return nil, fmt.Errorf("simulateKeyless is deprecated, use real keyless signing") +} + +func (c *CosignAdapter) signWithKey(data []byte) (*Signature, error) { + sig, err := c.signer.SignMessage(bytes.NewReader(data)) + if err != nil { + return nil, fmt.Errorf("failed to sign message: %w", err) + } + + certBytes, err := c.generateCertificate(c.privateKey, "local-key", "local") + if err != nil { + return nil, fmt.Errorf("failed to generate certificate: %w", err) + } + + sigObj := &Signature{ + Signature: sig, + Certificate: certBytes, + Issuer: "local", + Identity: "local-key", + Timestamp: time.Now().Unix(), + } + + return sigObj, nil +} + +func (c *CosignAdapter) getFulcioCertificate(ctx context.Context, privKey *ecdsa.PrivateKey, identity, oidcToken string) ([]byte, error) { + // Parse Fulcio URL + fulcioAddr, err := url.Parse(fulcioURL) + if err != nil { + return nil, fmt.Errorf("failed to parse Fulcio URL: %w", err) + } + + // Create Fulcio client + fulcioClient := api.NewClient(fulcioAddr) + + // Marshal public key to ASN.1 DER format + pubKeyBytes, err := x509.MarshalPKIXPublicKey(&privKey.PublicKey) + if err != nil { + return nil, fmt.Errorf("failed to marshal public key: %w", err) + } + + // Create CertificateRequest with the public key + certReq := api.CertificateRequest{ + PublicKey: api.Key{ + Content: pubKeyBytes, + Algorithm: "ecdsa", + }, + } + + // We need to prove possession of the OIDC token's identity by signing the identity + // Fulcio expects a signature over the identity (e.g. email or subject) + proof, err := c.ecdsaSign(privKey, []byte(identity)) + if err != nil { + return nil, fmt.Errorf("failed to sign identity for proof: %w", err) + } + certReq.SignedEmailAddress = proof + + // Call Fulcio API to get certificate + certResp, err := fulcioClient.SigningCert(certReq, oidcToken) + if err != nil { + return nil, fmt.Errorf("Fulcio SigningCert failed: %w", err) + } + + return certResp.CertPEM, nil +} + +func (c *CosignAdapter) generateCertificate(privKey *ecdsa.PrivateKey, identity, issuer string) ([]byte, error) { + serialNumber, err := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 128)) + if err != nil { + return nil, fmt.Errorf("failed to generate serial number: %w", err) + } + + template := x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + CommonName: identity, + }, + NotBefore: time.Now(), + NotAfter: time.Now().Add(365 * 24 * time.Hour), + KeyUsage: x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageCodeSigning}, + BasicConstraintsValid: true, + } + + certDER, err := x509.CreateCertificate(rand.Reader, &template, &template, &privKey.PublicKey, privKey) + if err != nil { + return nil, fmt.Errorf("failed to create certificate: %w", err) + } + + certPEM := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: certDER, + }) + + return certPEM, nil +} + +func (c *CosignAdapter) ecdsaSign(privKey *ecdsa.PrivateKey, data []byte) ([]byte, error) { + signer, err := sigstore_signature.LoadECDSASigner(privKey, crypto.SHA256) + if err != nil { + return nil, err + } + return signer.SignMessage(bytes.NewReader(data)) +} + +func (c *CosignAdapter) GetPrivateKeyPEM() ([]byte, error) { + if c.privateKey == nil { + return nil, fmt.Errorf("no private key available") + } + + derBytes, err := x509.MarshalECPrivateKey(c.privateKey) + if err != nil { + return nil, fmt.Errorf("failed to marshal private key: %w", err) + } + + block := &pem.Block{ + Type: "EC PRIVATE KEY", + Bytes: derBytes, + } + + return pem.EncodeToMemory(block), nil +} + +func (c *CosignAdapter) GetPublicKeyPEM() ([]byte, error) { + if c.privateKey == nil { + return nil, fmt.Errorf("no private key available") + } + + pubKeyBytes, err := cryptoutils.MarshalPublicKeyToPEM(&c.privateKey.PublicKey) + if err != nil { + return nil, fmt.Errorf("failed to marshal public key: %w", err) + } + + return pubKeyBytes, nil +} + +func (c *CosignAdapter) VerifyData(data []byte, sig *Signature, allowUntrusted bool) error { + if sig == nil { + return fmt.Errorf("VerifyData: Signature value is nil") + } + if len(sig.Certificate) == 0 { + return fmt.Errorf("VerifyData: Signature.Certificate is empty") + } + + var verifier sigstore_signature.Verifier + var err error + + // If we have a certificate, it could be a keyless signature (Fulcio) or a key-based signature with a cert. + // For keyless, we should ideally verify the certificate chain and Rekor bundle. + // For now, we continue to support the simplified verification but using sigstore's abstractions. + + block, _ := pem.Decode(sig.Certificate) + if block != nil && block.Type == "CERTIFICATE" { + var cert *x509.Certificate + cert, err = x509.ParseCertificate(block.Bytes) + if err != nil { + return fmt.Errorf("failed to parse certificate: %w", err) + } + + if !allowUntrusted { + if cert.IsCA { + return fmt.Errorf("invalid certificate: must not be CA") + } + + // Build and verify the certificate chain + roots, err := fulcioroots.Get() + if err != nil { + return fmt.Errorf("failed to get Fulcio roots: %w", err) + } + opts := x509.VerifyOptions{ + Roots: roots, + KeyUsages: []x509.ExtKeyUsage{ + x509.ExtKeyUsageCodeSigning, + }, + CurrentTime: time.Unix(sig.Timestamp, 0), + } + if _, err := cert.Verify(opts); err != nil { + return fmt.Errorf("failed to verify certificate chain: %w", err) + } + + if time.Unix(sig.Timestamp, 0).Before(cert.NotBefore) || time.Unix(sig.Timestamp, 0).After(cert.NotAfter) { + return fmt.Errorf("certificate was not valid at signing time") + } + + // In a production environment, we would verify the certificate chain here + // against the Fulcio root set and system roots. + // roots, _ := fulcioroots.Get() + // cert.Verify(x509.VerifyOptions{Roots: roots}) + + // Check identity. Fulcio certs store identity in Subject Alternative Name (SAN) + // but many systems still look at CommonName or use specific extensions. + // Sigstore's verify library is usually used for this, but for now we'll check SANs. + foundIdentity := false + if cert.Subject.CommonName == sig.Identity { + foundIdentity = true + } else { + for _, email := range cert.EmailAddresses { + if email == sig.Identity { + foundIdentity = true + break + } + } + if !foundIdentity { + for _, uri := range cert.URIs { + if uri.String() == sig.Identity { + foundIdentity = true + break + } + } + } + } + + if sig.Identity != "" && !foundIdentity { + return fmt.Errorf("identity mismatch: certificate does not match signature identity %q (CN: %q, SANs: %v)", sig.Identity, cert.Subject.CommonName, cert.EmailAddresses) + } + + // Validate Rekor/CT evidence if Rekor bundle is present + if len(sig.RekorBundle) > 0 { + // In a full implementation, we would use cosign.VerifyBundle + // for now we acknowledge its presence for strict verification + } else if sig.Issuer != "local" && sig.Issuer != "" { + // For non-local certificates, we expect a Rekor bundle in strict mode + // But we'll allow it if we are in interactive mode (where Rekor might not be used) + if sig.Issuer != "https://oauth2.sigstore.dev/auth" { + return fmt.Errorf("strict verification failed: missing Rekor bundle for certificate from %q", sig.Issuer) + } + } + } + verifier, err = sigstore_signature.LoadVerifier(cert.PublicKey, crypto.SHA256) + if err != nil { + return fmt.Errorf("failed to load verifier from certificate: %w", err) + } + } else { + // If not a certificate, it must be a public key + if !allowUntrusted { + return fmt.Errorf("untrusted public key rejected: require valid x509 certificate chain") + } + + pubKey, err := cryptoutils.UnmarshalPEMToPublicKey(sig.Certificate) + if err != nil { + // Try parsing as raw DER + pubKey, err = x509.ParsePKIXPublicKey(sig.Certificate) + if err != nil { + return fmt.Errorf("failed to unmarshal public key: %w", err) + } + } + + verifier, err = sigstore_signature.LoadVerifier(pubKey, crypto.SHA256) + if err != nil { + return fmt.Errorf("failed to load verifier: %w", err) + } + } + + if err := verifier.VerifySignature(bytes.NewReader(sig.Signature), bytes.NewReader(data)); err != nil { + return fmt.Errorf("invalid signature: %w", err) + } + + // In a full Cosign implementation, if we have a Rekor bundle, we would verify it here. + // sig.RekorBundle (if added to the Signature struct) could be used with cosign/pkg/cosign.VerifyBundle. + + if c.useKeyless && !allowUntrusted { + if sig.Issuer == "" || sig.Identity == "" { + return fmt.Errorf("keyless signature missing issuer or identity") + } + } + + return nil +} + +func (c *CosignAdapter) GetContentHash(obj interface{}) (string, error) { + data, err := json.Marshal(obj) + if err != nil { + return "", fmt.Errorf("failed to marshal object: %w", err) + } + + hash, err := utils.CanonicalHash(data) + if err != nil { + return "", err + } + + return hash, nil +} + +func (c *CosignAdapter) EncodeSignatureToAnnotations(sig *Signature) (map[string]string, error) { + annotations := make(map[string]string) + + annotations[AnnotationSignature] = base64.StdEncoding.EncodeToString(sig.Signature) + + if len(sig.Certificate) > 0 { + annotations[AnnotationCertificate] = base64.StdEncoding.EncodeToString(sig.Certificate) + } + if len(sig.RekorBundle) > 0 { + annotations[AnnotationRekorBundle] = base64.StdEncoding.EncodeToString(sig.RekorBundle) + } + if sig.Issuer != "" { + annotations[AnnotationIssuer] = sig.Issuer + } + if sig.Identity != "" { + annotations[AnnotationIdentity] = sig.Identity + } + annotations[AnnotationTimestamp] = fmt.Sprintf("%d", sig.Timestamp) + + return annotations, nil +} + +func (c *CosignAdapter) DecodeSignatureFromAnnotations(annotations map[string]string) (*Signature, error) { + sig := &Signature{} + + signatureB64, ok := annotations[AnnotationSignature] + if !ok { + return nil, fmt.Errorf("missing %s annotation", AnnotationSignature) + } + + var err error + sig.Signature, err = base64.StdEncoding.DecodeString(signatureB64) + if err != nil { + // Try raw if base64 fails + sig.Signature = []byte(signatureB64) + } + + if certB64, ok := annotations[AnnotationCertificate]; ok { + sig.Certificate, err = base64.StdEncoding.DecodeString(certB64) + if err != nil { + // Try raw if base64 fails + sig.Certificate = []byte(certB64) + } + } + + if rekorB64, ok := annotations[AnnotationRekorBundle]; ok { + sig.RekorBundle, err = base64.StdEncoding.DecodeString(rekorB64) + if err != nil { + // Try raw if base64 fails + sig.RekorBundle = []byte(rekorB64) + } + } + + sig.Issuer = annotations[AnnotationIssuer] + sig.Identity = annotations[AnnotationIdentity] + + if timestamp, ok := annotations[AnnotationTimestamp]; ok { + ts, err := strconv.ParseInt(timestamp, 10, 64) + if err != nil { + return nil, fmt.Errorf("failed to parse timestamp: %w", err) + } + sig.Timestamp = ts + } + + return sig, nil +} diff --git a/pkg/signature/cosign_adapter_test.go b/pkg/signature/cosign_adapter_test.go new file mode 100644 index 0000000000..b125f51753 --- /dev/null +++ b/pkg/signature/cosign_adapter_test.go @@ -0,0 +1,143 @@ +package signature + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "testing" +) + +func TestNewCosignAdapterWithPrivateKey(t *testing.T) { + privKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + + t.Run("Valid private key", func(t *testing.T) { + adapter, err := NewCosignAdapterWithPrivateKey(false, privKey) + if err != nil { + t.Fatalf("Expected no error, got %v", err) + } + if adapter.privateKey != privKey { + t.Error("Private key not set correctly") + } + }) + + t.Run("Nil private key", func(t *testing.T) { + _, err := NewCosignAdapterWithPrivateKey(false, nil) + if err == nil { + t.Error("Expected error for nil private key, got nil") + } + }) +} + +func TestCosignAdapter_GetKeysPEM(t *testing.T) { + privKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + adapter, _ := NewCosignAdapterWithPrivateKey(false, privKey) + + t.Run("GetPrivateKeyPEM", func(t *testing.T) { + pem, err := adapter.GetPrivateKeyPEM() + if err != nil { + t.Fatalf("Expected no error, got %v", err) + } + if len(pem) == 0 { + t.Error("Expected non-empty PEM") + } + }) + + t.Run("GetPublicKeyPEM", func(t *testing.T) { + pem, err := adapter.GetPublicKeyPEM() + if err != nil { + t.Fatalf("Expected no error, got %v", err) + } + if len(pem) == 0 { + t.Error("Expected non-empty PEM") + } + }) + + t.Run("No private key", func(t *testing.T) { + emptyAdapter := &CosignAdapter{} + _, err := emptyAdapter.GetPrivateKeyPEM() + if err == nil { + t.Error("Expected error, got nil") + } + _, err = emptyAdapter.GetPublicKeyPEM() + if err == nil { + t.Error("Expected error, got nil") + } + }) +} + +func TestWithPrivateKey(t *testing.T) { + privKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + opts := &SignOptions{} + WithPrivateKey(privKey)(opts) + if opts.PrivateKey != privKey { + t.Error("PrivateKey option not set correctly") + } +} + +func TestCosignSigner(t *testing.T) { + signer, err := NewCosignSigner(false) + if err != nil { + t.Fatalf("Expected no error, got %v", err) + } + + data := []byte("test data") + sig, err := signer.Sign(data) + if err != nil { + t.Fatalf("Expected no error, got %v", err) + } + if len(sig.Signature) == 0 { + t.Error("Expected non-empty signature") + } +} + +func TestCosignAdapter_ecdsaSign(t *testing.T) { + privKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + adapter := &CosignAdapter{} + data := []byte("test data") + sig, err := adapter.ecdsaSign(privKey, data) + if err != nil { + t.Fatalf("Expected no error, got %v", err) + } + if len(sig) == 0 { + t.Error("Expected non-empty signature") + } +} + +func TestVerifyData_ErrorCases(t *testing.T) { + privKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + adapter, _ := NewCosignAdapterWithPrivateKey(false, privKey) + data := []byte("test data") + + t.Run("Invalid certificate PEM", func(t *testing.T) { + sig := &Signature{ + Signature: []byte("sig"), + Certificate: []byte("invalid-pem"), + } + err := adapter.VerifyData(data, sig, false) + if err == nil { + t.Error("Expected error for invalid certificate PEM, got nil") + } + }) + + t.Run("PublicKey is not ECDSA", func(t *testing.T) { + // Mock a non-ECDSA public key? Hard to do with current implementation. + // Skipping for now. + }) + + t.Run("Certificate is CA", func(t *testing.T) { + // Create a CA certificate + template := x509.Certificate{ + IsCA: true, + } + certDER, _ := x509.CreateCertificate(rand.Reader, &template, &template, &privKey.PublicKey, privKey) + sig := &Signature{ + Signature: []byte("sig"), + Certificate: certDER, + } + err := adapter.VerifyData(data, sig, false) + if err == nil { + t.Error("Expected error for CA certificate, got nil") + } + }) +} diff --git a/pkg/signature/interface.go b/pkg/signature/interface.go new file mode 100644 index 0000000000..720ca7a59c --- /dev/null +++ b/pkg/signature/interface.go @@ -0,0 +1,63 @@ +package signature + +import ( + "crypto/ecdsa" +) + +type Signer interface { + Sign(data []byte) (*Signature, error) +} + +type Verifier interface { + Verify(data []byte, sig *Signature) error +} + +type SignableObject interface { + GetAnnotations() map[string]string + SetAnnotations(annotations map[string]string) + GetUID() string + GetNamespace() string + GetName() string + GetContent() interface{} + GetUpdatedObject() interface{} +} + +type Signature struct { + Signature []byte + Certificate []byte + RekorBundle []byte + Issuer string + Identity string + Timestamp int64 +} + +type SignOptions struct { + UseKeyless bool + PrivateKey *ecdsa.PrivateKey +} + +type SignOption func(*SignOptions) + +func WithKeyless(useKeyless bool) SignOption { + return func(opts *SignOptions) { + opts.UseKeyless = useKeyless + } +} + +func WithPrivateKey(privateKey *ecdsa.PrivateKey) SignOption { + return func(opts *SignOptions) { + opts.PrivateKey = privateKey + } +} + +type VerifyOptions struct { + AllowUntrusted bool +} + +type VerifyOption func(*VerifyOptions) + +func WithUntrusted(allowUntrusted bool) VerifyOption { + return func(opts *VerifyOptions) { + opts.AllowUntrusted = allowUntrusted + } +} diff --git a/pkg/signature/profiles/adapter_test.go b/pkg/signature/profiles/adapter_test.go new file mode 100644 index 0000000000..0f9af91682 --- /dev/null +++ b/pkg/signature/profiles/adapter_test.go @@ -0,0 +1,335 @@ +package profiles + +import ( + "testing" + + "github.com/kubescape/node-agent/pkg/signature" + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +func TestApplicationProfileAdapter(t *testing.T) { + profile := &v1beta1.ApplicationProfile{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "softwarecomposition.kubescape.io/v1beta1", + Kind: "ApplicationProfile", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ap", + Namespace: "default", + UID: types.UID("ap-uid-123"), + Labels: map[string]string{ + "app": "test", + }, + }, + Spec: v1beta1.ApplicationProfileSpec{ + Architectures: []string{"amd64"}, + Containers: []v1beta1.ApplicationProfileContainer{ + { + Name: "nginx", + Capabilities: []string{"CAP_NET_BIND_SERVICE"}, + }, + }, + }, + } + + adapter := NewApplicationProfileAdapter(profile) + + if adapter == nil { + t.Fatal("Expected non-nil adapter") + } + + if adapter.GetUID() != "ap-uid-123" { + t.Errorf("Expected UID 'ap-uid-123', got '%s'", adapter.GetUID()) + } + + if adapter.GetNamespace() != "default" { + t.Errorf("Expected namespace 'default', got '%s'", adapter.GetNamespace()) + } + + if adapter.GetName() != "test-ap" { + t.Errorf("Expected name 'test-ap', got '%s'", adapter.GetName()) + } + + annotations := adapter.GetAnnotations() + if annotations == nil { + t.Error("Expected annotations map, got nil") + } + + testAnnotations := map[string]string{ + "test-key": "test-value", + } + adapter.SetAnnotations(testAnnotations) + if profile.Annotations["test-key"] != "test-value" { + t.Error("Failed to set annotations") + } + + content := adapter.GetContent() + if content == nil { + t.Fatal("Expected non-nil content") + } + + apContent, ok := content.(map[string]interface{}) + if !ok { + t.Fatal("Expected map[string]interface{} content type") + } + + metadata, ok := apContent["metadata"].(map[string]interface{}) + if !ok { + t.Fatal("Expected metadata to be map[string]interface{}") + } + + if metadata["name"] != "test-ap" { + t.Errorf("Expected content name 'test-ap', got '%v'", metadata["name"]) + } + + if metadata["namespace"] != "default" { + t.Errorf("Expected content namespace 'default', got '%v'", metadata["namespace"]) + } + + if apContent["apiVersion"] != "softwarecomposition.kubescape.io/v1beta1" { + t.Errorf("Expected apiVersion 'softwarecomposition.kubescape.io/v1beta1', got '%v'", apContent["apiVersion"]) + } + + if apContent["kind"] != "ApplicationProfile" { + t.Errorf("Expected kind 'ApplicationProfile', got '%v'", apContent["kind"]) + } +} + +func TestApplicationProfileAdapterSignAndVerify(t *testing.T) { + profile := &v1beta1.ApplicationProfile{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "softwarecomposition.kubescape.io/v1beta1", + Kind: "ApplicationProfile", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "sign-test-ap", + Namespace: "default", + UID: types.UID("sign-ap-uid"), + Labels: map[string]string{ + "test": "signing", + }, + }, + Spec: v1beta1.ApplicationProfileSpec{ + Architectures: []string{"amd64", "arm64"}, + Containers: []v1beta1.ApplicationProfileContainer{ + { + Name: "app", + Capabilities: []string{"CAP_NET_ADMIN"}, + }, + }, + }, + } + + adapter := NewApplicationProfileAdapter(profile) + + err := signature.SignObjectDisableKeyless(adapter) + if err != nil { + t.Fatalf("SignObjectDisableKeyless failed: %v", err) + } + + if profile.Annotations == nil { + t.Error("Expected annotations to be set on profile") + } + + if _, ok := profile.Annotations[signature.AnnotationSignature]; !ok { + t.Error("Expected signature annotation on profile") + } + + err = signature.VerifyObjectAllowUntrusted(adapter) + if err != nil { + t.Fatalf("VerifyObjectAllowUntrusted failed: %v", err) + } +} + +func TestSeccompProfileAdapter(t *testing.T) { + profile := &v1beta1.SeccompProfile{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "softwarecomposition.kubescape.io/v1beta1", + Kind: "SeccompProfile", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-seccomp", + Namespace: "default", + UID: types.UID("seccomp-uid-456"), + Labels: map[string]string{ + "seccomp": "test", + }, + }, + Spec: v1beta1.SeccompProfileSpec{ + Containers: []v1beta1.SingleSeccompProfile{ + { + Name: "test-container", + }, + }, + }, + } + + adapter := NewSeccompProfileAdapter(profile) + + if adapter == nil { + t.Fatal("Expected non-nil adapter") + } + + if adapter.GetUID() != "seccomp-uid-456" { + t.Errorf("Expected UID 'seccomp-uid-456', got '%s'", adapter.GetUID()) + } + + if adapter.GetNamespace() != "default" { + t.Errorf("Expected namespace 'default', got '%s'", adapter.GetNamespace()) + } + + if adapter.GetName() != "test-seccomp" { + t.Errorf("Expected name 'test-seccomp', got '%s'", adapter.GetName()) + } + + annotations := adapter.GetAnnotations() + if annotations == nil { + t.Error("Expected annotations map, got nil") + } + + testAnnotations := map[string]string{ + "seccomp-key": "seccomp-value", + } + adapter.SetAnnotations(testAnnotations) + if profile.Annotations["seccomp-key"] != "seccomp-value" { + t.Error("Failed to set annotations") + } + + content := adapter.GetContent() + if content == nil { + t.Fatal("Expected non-nil content") + } + + scContent, ok := content.(map[string]interface{}) + if !ok { + t.Fatal("Expected map[string]interface{} content type") + } + + metadata, ok := scContent["metadata"].(map[string]interface{}) + if !ok { + t.Fatal("Expected metadata to be map[string]interface{}") + } + + if metadata["name"] != "test-seccomp" { + t.Errorf("Expected content name 'test-seccomp', got '%v'", metadata["name"]) + } + + if metadata["namespace"] != "default" { + t.Errorf("Expected content namespace 'default', got '%v'", metadata["namespace"]) + } + + if scContent["apiVersion"] != "softwarecomposition.kubescape.io/v1beta1" { + t.Errorf("Expected apiVersion 'softwarecomposition.kubescape.io/v1beta1', got '%v'", scContent["apiVersion"]) + } + + if scContent["kind"] != "SeccompProfile" { + t.Errorf("Expected kind 'SeccompProfile', got '%v'", scContent["kind"]) + } +} + +func TestSeccompProfileAdapterSignAndVerify(t *testing.T) { + profile := &v1beta1.SeccompProfile{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "softwarecomposition.kubescape.io/v1beta1", + Kind: "SeccompProfile", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "sign-test-seccomp", + Namespace: "default", + UID: types.UID("sign-seccomp-uid"), + Labels: map[string]string{ + "test": "seccomp-signing", + }, + }, + Spec: v1beta1.SeccompProfileSpec{ + Containers: []v1beta1.SingleSeccompProfile{ + { + Name: "app-container", + }, + }, + }, + } + + adapter := NewSeccompProfileAdapter(profile) + + err := signature.SignObjectDisableKeyless(adapter) + if err != nil { + t.Fatalf("SignObjectDisableKeyless failed: %v", err) + } + + if profile.Annotations == nil { + t.Error("Expected annotations to be set on profile") + } + + if _, ok := profile.Annotations[signature.AnnotationSignature]; !ok { + t.Error("Expected signature annotation on profile") + } + + err = signature.VerifyObjectAllowUntrusted(adapter) + if err != nil { + t.Fatalf("VerifyObjectAllowUntrusted failed: %v", err) + } +} + +func TestAdapterUniqueness(t *testing.T) { + ap := &v1beta1.ApplicationProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "unique-ap", + Namespace: "default", + UID: types.UID("ap-unique-uid"), + }, + Spec: v1beta1.ApplicationProfileSpec{ + Architectures: []string{"amd64"}, + }, + } + + sp := &v1beta1.SeccompProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "unique-sp", + Namespace: "default", + UID: types.UID("sp-unique-uid"), + }, + Spec: v1beta1.SeccompProfileSpec{}, + } + + apAdapter := NewApplicationProfileAdapter(ap) + spAdapter := NewSeccompProfileAdapter(sp) + + err := signature.SignObjectDisableKeyless(apAdapter) + if err != nil { + t.Fatalf("SignObjectDisableKeyless failed for ApplicationProfile: %v", err) + } + + err = signature.SignObjectDisableKeyless(spAdapter) + if err != nil { + t.Fatalf("SignObjectDisableKeyless failed for SeccompProfile: %v", err) + } + + apSig, err := signature.GetObjectSignature(apAdapter) + if err != nil { + t.Fatalf("GetObjectSignature failed for ApplicationProfile: %v", err) + } + + if apSig == nil { + t.Fatal("GetObjectSignature returned nil for ApplicationProfile") + } + + spSig, err := signature.GetObjectSignature(spAdapter) + if err != nil { + t.Fatalf("GetObjectSignature failed for SeccompProfile: %v", err) + } + + if spSig == nil { + t.Fatal("GetObjectSignature returned nil for SeccompProfile") + } + + if apSig.Issuer != "local" { + t.Errorf("Expected AP issuer 'local', got '%s'", apSig.Issuer) + } + + if spSig.Issuer != "local" { + t.Errorf("Expected SP issuer 'local', got '%s'", spSig.Issuer) + } +} diff --git a/pkg/signature/profiles/applicationprofile_adapter.go b/pkg/signature/profiles/applicationprofile_adapter.go new file mode 100644 index 0000000000..5a21b0a2e3 --- /dev/null +++ b/pkg/signature/profiles/applicationprofile_adapter.go @@ -0,0 +1,81 @@ +package profiles + +import ( + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" +) + +type ApplicationProfileAdapter struct { + profile *v1beta1.ApplicationProfile +} + +func NewApplicationProfileAdapter(profile *v1beta1.ApplicationProfile) *ApplicationProfileAdapter { + return &ApplicationProfileAdapter{ + profile: profile, + } +} + +func (a *ApplicationProfileAdapter) GetAnnotations() map[string]string { + if a.profile.Annotations == nil { + a.profile.Annotations = make(map[string]string) + } + return a.profile.Annotations +} + +func (a *ApplicationProfileAdapter) SetAnnotations(annotations map[string]string) { + a.profile.Annotations = annotations +} + +func (a *ApplicationProfileAdapter) GetUID() string { + return string(a.profile.UID) +} + +func (a *ApplicationProfileAdapter) GetNamespace() string { + return a.profile.Namespace +} + +func (a *ApplicationProfileAdapter) GetName() string { + return a.profile.Name +} + +func (a *ApplicationProfileAdapter) GetContent() interface{} { + // Normalize PolicyByRuleId to ensure consistent JSON representation + // Empty maps become {} instead of null + for i := range a.profile.Spec.Containers { + if a.profile.Spec.Containers[i].PolicyByRuleId == nil { + a.profile.Spec.Containers[i].PolicyByRuleId = make(map[string]v1beta1.RulePolicy) + } + } + for i := range a.profile.Spec.InitContainers { + if a.profile.Spec.InitContainers[i].PolicyByRuleId == nil { + a.profile.Spec.InitContainers[i].PolicyByRuleId = make(map[string]v1beta1.RulePolicy) + } + } + for i := range a.profile.Spec.EphemeralContainers { + if a.profile.Spec.EphemeralContainers[i].PolicyByRuleId == nil { + a.profile.Spec.EphemeralContainers[i].PolicyByRuleId = make(map[string]v1beta1.RulePolicy) + } + } + + apiVersion := a.profile.APIVersion + if apiVersion == "" { + apiVersion = "spdx.softwarecomposition.kubescape.io/v1beta1" + } + kind := a.profile.Kind + if kind == "" { + kind = "ApplicationProfile" + } + return map[string]interface{}{ + "apiVersion": apiVersion, + "kind": kind, + "metadata": map[string]interface{}{ + "name": a.profile.Name, + "namespace": a.profile.Namespace, + "labels": a.profile.Labels, + }, + "spec": a.profile.Spec, + } +} + +func (a *ApplicationProfileAdapter) GetUpdatedObject() interface{} { + return a.profile +} diff --git a/pkg/signature/profiles/empty_typemeta_test.go b/pkg/signature/profiles/empty_typemeta_test.go new file mode 100644 index 0000000000..259ded5c77 --- /dev/null +++ b/pkg/signature/profiles/empty_typemeta_test.go @@ -0,0 +1,78 @@ +package profiles + +import ( + "testing" + + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestApplicationProfileAdapterEmptyTypeMeta(t *testing.T) { + profile := &v1beta1.ApplicationProfile{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "", + Kind: "", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ap", + Namespace: "default", + }, + Spec: v1beta1.ApplicationProfileSpec{ + Architectures: []string{"amd64"}, + }, + } + + adapter := NewApplicationProfileAdapter(profile) + + content := adapter.GetContent() + if content == nil { + t.Fatal("Expected non-nil content") + } + + apContent, ok := content.(map[string]interface{}) + if !ok { + t.Fatal("Expected map[string]interface{} content type") + } + + if apContent["apiVersion"] != "spdx.softwarecomposition.kubescape.io/v1beta1" { + t.Errorf("Expected fallback apiVersion 'spdx.softwarecomposition.kubescape.io/v1beta1', got '%v'", apContent["apiVersion"]) + } + + if apContent["kind"] != "ApplicationProfile" { + t.Errorf("Expected fallback kind 'ApplicationProfile', got '%v'", apContent["kind"]) + } +} + +func TestSeccompProfileAdapterEmptyTypeMeta(t *testing.T) { + profile := &v1beta1.SeccompProfile{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "", + Kind: "", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-seccomp", + Namespace: "default", + }, + Spec: v1beta1.SeccompProfileSpec{}, + } + + adapter := NewSeccompProfileAdapter(profile) + + content := adapter.GetContent() + if content == nil { + t.Fatal("Expected non-nil content") + } + + scContent, ok := content.(map[string]interface{}) + if !ok { + t.Fatal("Expected map[string]interface{} content type") + } + + if scContent["apiVersion"] != "spdx.softwarecomposition.kubescape.io/v1beta1" { + t.Errorf("Expected fallback apiVersion 'spdx.softwarecomposition.kubescape.io/v1beta1', got '%v'", scContent["apiVersion"]) + } + + if scContent["kind"] != "SeccompProfile" { + t.Errorf("Expected fallback kind 'SeccompProfile', got '%v'", scContent["kind"]) + } +} diff --git a/pkg/signature/profiles/networkneighborhood_adapter.go b/pkg/signature/profiles/networkneighborhood_adapter.go new file mode 100644 index 0000000000..e62caf4312 --- /dev/null +++ b/pkg/signature/profiles/networkneighborhood_adapter.go @@ -0,0 +1,63 @@ +package profiles + +import ( + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" +) + +type NetworkNeighborhoodAdapter struct { + nn *v1beta1.NetworkNeighborhood +} + +func NewNetworkNeighborhoodAdapter(nn *v1beta1.NetworkNeighborhood) *NetworkNeighborhoodAdapter { + return &NetworkNeighborhoodAdapter{ + nn: nn, + } +} + +func (a *NetworkNeighborhoodAdapter) GetAnnotations() map[string]string { + if a.nn.Annotations == nil { + a.nn.Annotations = make(map[string]string) + } + return a.nn.Annotations +} + +func (a *NetworkNeighborhoodAdapter) SetAnnotations(annotations map[string]string) { + a.nn.Annotations = annotations +} + +func (a *NetworkNeighborhoodAdapter) GetUID() string { + return string(a.nn.UID) +} + +func (a *NetworkNeighborhoodAdapter) GetNamespace() string { + return a.nn.Namespace +} + +func (a *NetworkNeighborhoodAdapter) GetName() string { + return a.nn.Name +} + +func (a *NetworkNeighborhoodAdapter) GetContent() interface{} { + apiVersion := a.nn.APIVersion + if apiVersion == "" { + apiVersion = "spdx.softwarecomposition.kubescape.io/v1beta1" + } + kind := a.nn.Kind + if kind == "" { + kind = "NetworkNeighborhood" + } + return map[string]interface{}{ + "apiVersion": apiVersion, + "kind": kind, + "metadata": map[string]interface{}{ + "name": a.nn.Name, + "namespace": a.nn.Namespace, + "labels": a.nn.Labels, + }, + "spec": a.nn.Spec, + } +} + +func (a *NetworkNeighborhoodAdapter) GetUpdatedObject() interface{} { + return a.nn +} diff --git a/pkg/signature/profiles/networkneighborhood_adapter_test.go b/pkg/signature/profiles/networkneighborhood_adapter_test.go new file mode 100644 index 0000000000..7968784eb6 --- /dev/null +++ b/pkg/signature/profiles/networkneighborhood_adapter_test.go @@ -0,0 +1,99 @@ +package profiles + +import ( + "testing" + + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestNetworkNeighborhoodAdapter(t *testing.T) { + nn := &v1beta1.NetworkNeighborhood{ + TypeMeta: metav1.TypeMeta{ + Kind: "NetworkNeighborhood", + APIVersion: "spdx.softwarecomposition.kubescape.io/v1beta1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-nn", + Namespace: "test-ns", + UID: "test-uid", + Annotations: map[string]string{ + "existing": "annotation", + }, + }, + Spec: v1beta1.NetworkNeighborhoodSpec{ + Containers: []v1beta1.NetworkNeighborhoodContainer{ + { + Name: "test-container", + Ingress: []v1beta1.NetworkNeighbor{ + { + Identifier: "test-neighbor", + }, + }, + }, + }, + }, + } + + adapter := NewNetworkNeighborhoodAdapter(nn) + + assert.Equal(t, "test-nn", adapter.GetName()) + assert.Equal(t, "test-ns", adapter.GetNamespace()) + assert.Equal(t, "test-uid", adapter.GetUID()) + + annotations := adapter.GetAnnotations() + assert.Equal(t, "annotation", annotations["existing"]) + + newAnnotations := map[string]string{"new": "annotation"} + adapter.SetAnnotations(newAnnotations) + assert.Equal(t, newAnnotations, nn.Annotations) + + content := adapter.GetContent().(map[string]interface{}) + assert.Equal(t, "NetworkNeighborhood", content["kind"]) + assert.Equal(t, "spdx.softwarecomposition.kubescape.io/v1beta1", content["apiVersion"]) + + metadata := content["metadata"].(map[string]interface{}) + assert.Equal(t, "test-nn", metadata["name"]) + assert.Equal(t, "test-ns", metadata["namespace"]) + + spec := content["spec"].(v1beta1.NetworkNeighborhoodSpec) + assert.Equal(t, 1, len(spec.Containers)) + assert.Equal(t, "test-container", spec.Containers[0].Name) + + assert.Equal(t, nn, adapter.GetUpdatedObject()) +} + +func TestNetworkNeighborhoodAdapter_EmptyTypeMeta(t *testing.T) { + nn := &v1beta1.NetworkNeighborhood{ + TypeMeta: metav1.TypeMeta{ + Kind: "", + APIVersion: "", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-nn", + Namespace: "test-ns", + }, + Spec: v1beta1.NetworkNeighborhoodSpec{ + Containers: []v1beta1.NetworkNeighborhoodContainer{ + { + Name: "test-container", + }, + }, + }, + } + + adapter := NewNetworkNeighborhoodAdapter(nn) + content := adapter.GetContent().(map[string]interface{}) + + assert.Equal(t, "NetworkNeighborhood", content["kind"]) + assert.Equal(t, "spdx.softwarecomposition.kubescape.io/v1beta1", content["apiVersion"]) + + metadata := content["metadata"].(map[string]interface{}) + assert.Equal(t, "test-nn", metadata["name"]) + assert.Equal(t, "test-ns", metadata["namespace"]) + + spec := content["spec"].(v1beta1.NetworkNeighborhoodSpec) + assert.Equal(t, 1, len(spec.Containers)) + assert.Equal(t, "test-container", spec.Containers[0].Name) +} diff --git a/pkg/signature/profiles/rules_adapter.go b/pkg/signature/profiles/rules_adapter.go new file mode 100644 index 0000000000..248e3c1a91 --- /dev/null +++ b/pkg/signature/profiles/rules_adapter.go @@ -0,0 +1,60 @@ +package profiles + +import ( + rulemanagertypesv1 "github.com/kubescape/node-agent/pkg/rulemanager/types/v1" +) + +type RulesAdapter struct { + rules *rulemanagertypesv1.Rules +} + +func NewRulesAdapter(rules *rulemanagertypesv1.Rules) *RulesAdapter { + return &RulesAdapter{ + rules: rules, + } +} + +func (r *RulesAdapter) GetAnnotations() map[string]string { + return r.rules.Annotations +} + +func (r *RulesAdapter) SetAnnotations(annotations map[string]string) { + r.rules.Annotations = annotations +} + +func (r *RulesAdapter) GetUID() string { + return string(r.rules.UID) +} + +func (r *RulesAdapter) GetNamespace() string { + return r.rules.Namespace +} + +func (r *RulesAdapter) GetName() string { + return r.rules.Name +} + +func (r *RulesAdapter) GetContent() interface{} { + apiVersion := r.rules.APIVersion + if apiVersion == "" { + apiVersion = "kubescape.io/v1" + } + kind := r.rules.Kind + if kind == "" { + kind = "Rules" + } + return map[string]interface{}{ + "apiVersion": apiVersion, + "kind": kind, + "metadata": map[string]interface{}{ + "name": r.rules.Name, + "namespace": r.rules.Namespace, + "labels": r.rules.Labels, + }, + "spec": r.rules.Spec, + } +} + +func (r *RulesAdapter) GetUpdatedObject() interface{} { + return r.rules +} diff --git a/pkg/signature/profiles/rules_adapter_test.go b/pkg/signature/profiles/rules_adapter_test.go new file mode 100644 index 0000000000..f617e4ebe6 --- /dev/null +++ b/pkg/signature/profiles/rules_adapter_test.go @@ -0,0 +1,184 @@ +package profiles + +import ( + "strings" + "testing" + + rulemanagertypesv1 "github.com/kubescape/node-agent/pkg/rulemanager/types/v1" + "github.com/kubescape/node-agent/pkg/signature" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8stypes "k8s.io/apimachinery/pkg/types" +) + +func TestRulesAdapterGetContent(t *testing.T) { + rules := &rulemanagertypesv1.Rules{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-rules", + Namespace: "default", + UID: k8stypes.UID("test-uid"), + Labels: map[string]string{"label": "value"}, + }, + Spec: rulemanagertypesv1.RulesSpec{ + Rules: []rulemanagertypesv1.Rule{ + { + Enabled: true, + ID: "rule-1", + Name: "Test Rule", + Description: "A test rule", + Expressions: rulemanagertypesv1.RuleExpressions{ + Message: "message", + UniqueID: "uniqueId", + RuleExpression: []rulemanagertypesv1.RuleExpression{}, + }, + ProfileDependency: 0, + Severity: 1, + SupportPolicy: false, + Tags: []string{"test"}, + }, + }, + }, + } + + adapter := NewRulesAdapter(rules) + content := adapter.GetContent() + + if content == nil { + t.Fatal("Expected content not to be nil") + } + + contentMap, ok := content.(map[string]interface{}) + if !ok { + t.Fatal("Expected content to be a map") + } + + if contentMap["apiVersion"] != "kubescape.io/v1" { + t.Errorf("Expected apiVersion 'kubescape.io/v1', got '%v'", contentMap["apiVersion"]) + } + + if contentMap["kind"] != "Rules" { + t.Errorf("Expected kind 'Rules', got '%v'", contentMap["kind"]) + } + + metadata, ok := contentMap["metadata"].(map[string]interface{}) + if !ok { + t.Fatal("Expected metadata to be a map") + } + + if metadata["name"] != "test-rules" { + t.Errorf("Expected name 'test-rules', got '%v'", metadata["name"]) + } + + if metadata["namespace"] != "default" { + t.Errorf("Expected namespace 'default', got '%v'", metadata["namespace"]) + } + + if _, ok := contentMap["spec"]; !ok { + t.Error("Expected spec in content") + } +} + +func TestRulesAdapterSignAndVerify(t *testing.T) { + rules := &rulemanagertypesv1.Rules{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "kubescape.io/v1", + Kind: "Rules", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "sign-test-rules", + Namespace: "default", + UID: k8stypes.UID("sign-rules-uid"), + Labels: map[string]string{ + "test": "rules-signing", + }, + }, + Spec: rulemanagertypesv1.RulesSpec{ + Rules: []rulemanagertypesv1.Rule{ + { + Enabled: true, + ID: "test-rule-id", + Name: "Test Rule", + Description: "A test rule", + Expressions: rulemanagertypesv1.RuleExpressions{ + Message: "message", + UniqueID: "uniqueId", + RuleExpression: []rulemanagertypesv1.RuleExpression{}, + }, + ProfileDependency: 0, + Severity: 1, + SupportPolicy: false, + Tags: []string{"test"}, + }, + }, + }, + } + + adapter := NewRulesAdapter(rules) + + err := signature.SignObjectDisableKeyless(adapter) + if err != nil { + t.Fatalf("SignObjectDisableKeyless failed: %v", err) + } + + if rules.Annotations == nil { + t.Error("Expected annotations to be set on rules") + } + + if _, ok := rules.Annotations[signature.AnnotationSignature]; !ok { + t.Error("Expected signature annotation on rules") + } + + err = signature.VerifyObjectAllowUntrusted(adapter) + if err != nil { + t.Fatalf("VerifyObjectAllowUntrusted failed: %v", err) + } +} + +func TestRulesAdapterSignAndVerifyWithTampering(t *testing.T) { + rules := &rulemanagertypesv1.Rules{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "kubescape.io/v1", + Kind: "Rules", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "tamper-test-rules", + Namespace: "default", + }, + Spec: rulemanagertypesv1.RulesSpec{ + Rules: []rulemanagertypesv1.Rule{ + { + Enabled: true, + ID: "tamper-rule-id", + Name: "Tamper Test Rule", + Description: "A tamper test rule", + Expressions: rulemanagertypesv1.RuleExpressions{ + Message: "message", + UniqueID: "uniqueId", + RuleExpression: []rulemanagertypesv1.RuleExpression{}, + }, + ProfileDependency: 0, + Severity: 1, + SupportPolicy: false, + Tags: []string{"test"}, + }, + }, + }, + } + + adapter := NewRulesAdapter(rules) + + err := signature.SignObjectDisableKeyless(adapter) + if err != nil { + t.Fatalf("SignObjectDisableKeyless failed: %v", err) + } + + rules.Spec.Rules[0].Name = "Modified Rule Name" + + err = signature.VerifyObjectAllowUntrusted(adapter) + if err == nil { + t.Fatal("Expected verification to fail after tampering, but it succeeded") + } + + if !strings.Contains(err.Error(), "signature verification failed") { + t.Errorf("Expected signature verification error, got: %v", err) + } +} diff --git a/pkg/signature/profiles/seccompprofile_adapter.go b/pkg/signature/profiles/seccompprofile_adapter.go new file mode 100644 index 0000000000..8252cfbf76 --- /dev/null +++ b/pkg/signature/profiles/seccompprofile_adapter.go @@ -0,0 +1,63 @@ +package profiles + +import ( + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" +) + +type SeccompProfileAdapter struct { + profile *v1beta1.SeccompProfile +} + +func NewSeccompProfileAdapter(profile *v1beta1.SeccompProfile) *SeccompProfileAdapter { + return &SeccompProfileAdapter{ + profile: profile, + } +} + +func (s *SeccompProfileAdapter) GetAnnotations() map[string]string { + if s.profile.Annotations == nil { + s.profile.Annotations = make(map[string]string) + } + return s.profile.Annotations +} + +func (s *SeccompProfileAdapter) SetAnnotations(annotations map[string]string) { + s.profile.Annotations = annotations +} + +func (s *SeccompProfileAdapter) GetUID() string { + return string(s.profile.UID) +} + +func (s *SeccompProfileAdapter) GetNamespace() string { + return s.profile.Namespace +} + +func (s *SeccompProfileAdapter) GetName() string { + return s.profile.Name +} + +func (s *SeccompProfileAdapter) GetContent() interface{} { + apiVersion := s.profile.APIVersion + if apiVersion == "" { + apiVersion = "spdx.softwarecomposition.kubescape.io/v1beta1" + } + kind := s.profile.Kind + if kind == "" { + kind = "SeccompProfile" + } + return map[string]interface{}{ + "apiVersion": apiVersion, + "kind": kind, + "metadata": map[string]interface{}{ + "name": s.profile.Name, + "namespace": s.profile.Namespace, + "labels": s.profile.Labels, + }, + "spec": s.profile.Spec, + } +} + +func (s *SeccompProfileAdapter) GetUpdatedObject() interface{} { + return s.profile +} diff --git a/pkg/signature/sign.go b/pkg/signature/sign.go new file mode 100644 index 0000000000..74ef6ba819 --- /dev/null +++ b/pkg/signature/sign.go @@ -0,0 +1,114 @@ +package signature + +import ( + "fmt" + + "github.com/kubescape/go-logger" + "github.com/kubescape/go-logger/helpers" +) + +func SignObject(obj SignableObject, opts ...SignOption) error { + if obj == nil { + return fmt.Errorf("object is nil") + } + options := &SignOptions{ + UseKeyless: true, + } + + for _, opt := range opts { + opt(options) + } + + var adapter *CosignAdapter + var err error + + if options.PrivateKey != nil { + adapter, err = NewCosignAdapterWithPrivateKey(false, options.PrivateKey) + } else { + adapter, err = NewCosignAdapter(options.UseKeyless) + } + + if err != nil { + return fmt.Errorf("failed to create cosign adapter: %w", err) + } + + content := obj.GetContent() + + hash, err := adapter.GetContentHash(content) + if err != nil { + return fmt.Errorf("failed to compute content hash: %w", err) + } + + logger.L().Debug("Signing object", + helpers.String("namespace", obj.GetNamespace()), + helpers.String("name", obj.GetName()), + helpers.String("contentHash", hash)) + + sig, err := adapter.SignData([]byte(hash)) + if err != nil { + return fmt.Errorf("failed to sign object: %w", err) + } + + annotations, err := adapter.EncodeSignatureToAnnotations(sig) + if err != nil { + return fmt.Errorf("failed to encode signature to annotations: %w", err) + } + + existingAnnotations := obj.GetAnnotations() + if existingAnnotations == nil { + existingAnnotations = make(map[string]string) + } + + for k, v := range annotations { + existingAnnotations[k] = v + } + + obj.SetAnnotations(existingAnnotations) + + logger.L().Info("Successfully signed object", + helpers.String("namespace", obj.GetNamespace()), + helpers.String("name", obj.GetName()), + helpers.String("identity", sig.Identity), + helpers.String("issuer", sig.Issuer)) + + return nil +} + +func SignObjectDisableKeyless(obj SignableObject) error { + return SignObject(obj, WithKeyless(false)) +} + +func SignObjectKeyless(obj SignableObject) error { + return SignObject(obj, WithKeyless(true)) +} + +func GetObjectSignature(obj SignableObject) (*Signature, error) { + if obj == nil { + return nil, fmt.Errorf("GetObjectSignature: nil object") + } + annotations := obj.GetAnnotations() + if annotations == nil { + return nil, fmt.Errorf("object has no annotations") + } + + adapter := &CosignAdapter{} + sig, err := adapter.DecodeSignatureFromAnnotations(annotations) + if err != nil { + return nil, fmt.Errorf("failed to decode signature from annotations: %w", err) + } + + return sig, nil +} + +func IsSigned(obj SignableObject) bool { + if obj == nil { + return false + } + annotations := obj.GetAnnotations() + if annotations == nil { + return false + } + + _, ok := annotations[AnnotationSignature] + return ok +} diff --git a/pkg/signature/sign_test.go b/pkg/signature/sign_test.go new file mode 100644 index 0000000000..091484bce8 --- /dev/null +++ b/pkg/signature/sign_test.go @@ -0,0 +1,231 @@ +package signature + +import ( + "os" + "testing" +) + +type MockSignableObject struct { + annotations map[string]string + uid string + namespace string + name string + content interface{} +} + +func NewMockSignableObject(uid, namespace, name string, content interface{}) *MockSignableObject { + return &MockSignableObject{ + annotations: make(map[string]string), + uid: uid, + namespace: namespace, + name: name, + content: content, + } +} + +func (m *MockSignableObject) GetAnnotations() map[string]string { + return m.annotations +} + +func (m *MockSignableObject) SetAnnotations(annotations map[string]string) { + m.annotations = annotations +} + +func (m *MockSignableObject) GetUID() string { + return m.uid +} + +func (m *MockSignableObject) GetNamespace() string { + return m.namespace +} + +func (m *MockSignableObject) GetName() string { + return m.name +} + +func (m *MockSignableObject) GetContent() interface{} { + return m.content +} + +func (m *MockSignableObject) GetUpdatedObject() interface{} { + return m.content +} + +func TestSignObjectKeyless(t *testing.T) { + if os.Getenv("ENABLE_KEYLESS_TESTS") == "" { + t.Skip("Skipping TestSignObjectKeyless. Set ENABLE_KEYLESS_TESTS to run.") + } + profileContent := map[string]interface{}{ + "type": "test-profile", + "data": "test-data", + } + + profile := NewMockSignableObject("test-uid", "test-ns", "test-profile", profileContent) + + err := SignObjectKeyless(profile) + if err != nil { + t.Fatalf("SignObjectKeyless failed: %v", err) + } + + if !IsSigned(profile) { + t.Error("Profile should be signed") + } + + sig, err := GetObjectSignature(profile) + if err != nil { + t.Fatalf("GetObjectSignature failed: %v", err) + } + + if len(sig.Signature) == 0 { + t.Error("Signature should not be empty") + } + + if len(sig.Certificate) == 0 { + t.Error("Certificate should not be empty") + } + + if sig.Issuer == "" { + t.Error("Issuer should not be empty for keyless signing") + } + + if sig.Identity == "" { + t.Error("Identity should not be empty for keyless signing") + } +} + +func TestSignObjectDisableKeyless(t *testing.T) { + profileContent := map[string]interface{}{ + "type": "test-profile", + "data": "test-data", + } + + profile := NewMockSignableObject("test-uid", "test-ns", "test-profile-key", profileContent) + + err := SignObjectDisableKeyless(profile) + if err != nil { + t.Fatalf("SignObjectDisableKeyless failed: %v", err) + } + + if !IsSigned(profile) { + t.Error("Profile should be signed") + } + + sig, err := GetObjectSignature(profile) + if err != nil { + t.Fatalf("GetObjectSignature failed: %v", err) + } + + if len(sig.Signature) == 0 { + t.Error("Signature should not be empty") + } + + if sig.Issuer != "local" { + t.Errorf("Expected issuer 'local', got '%s'", sig.Issuer) + } + + if sig.Identity != "local-key" { + t.Errorf("Expected identity 'local-key', got '%s'", sig.Identity) + } +} + +func TestIsSigned(t *testing.T) { + tests := []struct { + name string + profile *MockSignableObject + expected bool + }{ + { + name: "Unsigned profile", + profile: NewMockSignableObject("uid", "ns", "name", map[string]string{}), + expected: false, + }, + { + name: "Profile with empty annotations", + profile: &MockSignableObject{annotations: make(map[string]string)}, + expected: false, + }, + { + name: "Profile with signature annotation", + profile: func() *MockSignableObject { + p := NewMockSignableObject("uid", "ns", "name", map[string]string{}) + p.SetAnnotations(map[string]string{ + AnnotationSignature: "test-sig", + }) + return p + }(), + expected: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := IsSigned(tt.profile) + if result != tt.expected { + t.Errorf("IsSigned() = %v, expected %v", result, tt.expected) + } + }) + } +} + +func TestGetObjectSignature(t *testing.T) { + tests := []struct { + name string + profile *MockSignableObject + wantErr bool + setupSign bool + setupAnnotations func(*MockSignableObject) + }{ + { + name: "Nil annotations", + profile: &MockSignableObject{uid: "uid", namespace: "ns", name: "name", content: map[string]string{}, annotations: nil}, + wantErr: true, + setupSign: false, + }, + { + name: "Missing signature annotation", + profile: NewMockSignableObject("uid", "ns", "name", map[string]string{}), + wantErr: true, + setupAnnotations: func(p *MockSignableObject) { + p.SetAnnotations(map[string]string{ + AnnotationIssuer: "test-issuer", + }) + }, + }, + { + name: "Complete signature", + profile: NewMockSignableObject("uid", "ns", "name", map[string]string{}), + wantErr: false, + setupSign: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.setupSign { + if os.Getenv("ENABLE_KEYLESS_TESTS") == "" { + t.Skip("Skipping subtest with SignObjectKeyless. Set ENABLE_KEYLESS_TESTS to run.") + } + SignObjectKeyless(tt.profile) + } else if tt.setupAnnotations != nil { + tt.setupAnnotations(tt.profile) + } + + sig, err := GetObjectSignature(tt.profile) + + if tt.wantErr { + if err == nil { + t.Error("Expected error, got nil") + } + return + } + + if err != nil { + t.Fatalf("GetObjectSignature failed: %v", err) + } + + if sig == nil { + t.Fatal("Expected signature, got nil") + } + }) + } +} diff --git a/pkg/signature/signer.go b/pkg/signature/signer.go new file mode 100644 index 0000000000..8f3197bd93 --- /dev/null +++ b/pkg/signature/signer.go @@ -0,0 +1,20 @@ +package signature + +type CosignSigner struct { + adapter *CosignAdapter +} + +func NewCosignSigner(useKeyless bool) (*CosignSigner, error) { + adapter, err := NewCosignAdapter(useKeyless) + if err != nil { + return nil, err + } + + return &CosignSigner{ + adapter: adapter, + }, nil +} + +func (s *CosignSigner) Sign(data []byte) (*Signature, error) { + return s.adapter.SignData(data) +} diff --git a/pkg/signature/verifier.go b/pkg/signature/verifier.go new file mode 100644 index 0000000000..4278757cb8 --- /dev/null +++ b/pkg/signature/verifier.go @@ -0,0 +1,38 @@ +package signature + +import "fmt" + +type CosignVerifier struct { + adapter *CosignAdapter +} + +func NewCosignVerifier(useKeyless bool) (*CosignVerifier, error) { + adapter, err := NewCosignAdapter(useKeyless) + if err != nil { + return nil, err + } + + return &CosignVerifier{ + adapter: adapter, + }, nil +} + +func (v *CosignVerifier) Verify(data []byte, sig *Signature) error { + if v == nil || v.adapter == nil { + return fmt.Errorf("verifier not initialized") + } + if sig == nil { + return fmt.Errorf("signature is nil") + } + return v.adapter.VerifyData(data, sig, false) +} + +func (v *CosignVerifier) VerifyAllowUntrusted(data []byte, sig *Signature) error { + if v == nil || v.adapter == nil { + return fmt.Errorf("verifier not initialized") + } + if sig == nil { + return fmt.Errorf("signature is nil") + } + return v.adapter.VerifyData(data, sig, true) +} diff --git a/pkg/signature/verify.go b/pkg/signature/verify.go new file mode 100644 index 0000000000..f5d3d9913a --- /dev/null +++ b/pkg/signature/verify.go @@ -0,0 +1,85 @@ +package signature + +import ( + "fmt" + + "github.com/kubescape/go-logger" + "github.com/kubescape/go-logger/helpers" +) + +func VerifyObject(obj SignableObject, opts ...VerifyOption) error { + if obj == nil { + return fmt.Errorf("object is nil") + } + options := &VerifyOptions{ + AllowUntrusted: false, + } + + for _, opt := range opts { + opt(options) + } + + annotations := obj.GetAnnotations() + if annotations == nil { + return fmt.Errorf("%w (missing %s annotation)", ErrObjectNotSigned, AnnotationSignature) + } + + if _, ok := annotations[AnnotationSignature]; !ok { + return fmt.Errorf("%w (missing %s annotation)", ErrObjectNotSigned, AnnotationSignature) + } + + // useKeyless=true is fine for verification since we use the certificate + // stored in the object annotations, regardless of how the object was signed + adapter, err := NewCosignAdapter(true) + if err != nil { + return fmt.Errorf("failed to create cosign adapter: %w", err) + } + + sig, err := adapter.DecodeSignatureFromAnnotations(annotations) + if err != nil { + return fmt.Errorf("failed to decode signature from annotations: %w", err) + } + + content := obj.GetContent() + hash, err := adapter.GetContentHash(content) + if err != nil { + return fmt.Errorf("failed to compute content hash: %w", err) + } + + verifier, err := NewCosignVerifier(true) + if err != nil { + return fmt.Errorf("failed to create verifier: %w", err) + } + + var verifyErr error + if options.AllowUntrusted { + verifyErr = verifier.VerifyAllowUntrusted([]byte(hash), sig) + } else { + verifyErr = verifier.Verify([]byte(hash), sig) + } + + if verifyErr != nil { + logger.L().Warning("Object signature verification failed", + helpers.String("namespace", obj.GetNamespace()), + helpers.String("name", obj.GetName()), + helpers.String("error", verifyErr.Error())) + + return fmt.Errorf("signature verification failed: %w", verifyErr) + } + + logger.L().Info("Successfully verified object signature", + helpers.String("namespace", obj.GetNamespace()), + helpers.String("name", obj.GetName()), + helpers.String("identity", sig.Identity), + helpers.String("issuer", sig.Issuer)) + + return nil +} + +func VerifyObjectStrict(obj SignableObject) error { + return VerifyObject(obj, WithUntrusted(false)) +} + +func VerifyObjectAllowUntrusted(obj SignableObject) error { + return VerifyObject(obj, WithUntrusted(true)) +} diff --git a/pkg/signature/verify_test.go b/pkg/signature/verify_test.go new file mode 100644 index 0000000000..70973bf2ee --- /dev/null +++ b/pkg/signature/verify_test.go @@ -0,0 +1,435 @@ +package signature + +import ( + "io" + "os" + "strings" + "testing" + + logger "github.com/kubescape/go-logger" + "github.com/kubescape/node-agent/pkg/signature/profiles" + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestVerifyObjectStrict(t *testing.T) { + if os.Getenv("ENABLE_KEYLESS_TESTS") == "" { + t.Skip("Skipping TestVerifyObjectStrict. Set ENABLE_KEYLESS_TESTS to run.") + } + profileContent := map[string]interface{}{ + "type": "test-profile", + "data": "test-data", + "value": 123, + } + + profile := NewMockSignableObject("test-uid", "test-ns", "test-profile-verify", profileContent) + + err := SignObjectKeyless(profile) + if err != nil { + t.Fatalf("SignObjectKeyless failed: %v", err) + } + + err = VerifyObjectStrict(profile) + if err != nil { + t.Fatalf("VerifyObjectStrict failed: %v", err) + } +} + +func TestVerifyObjectAllowUntrusted(t *testing.T) { + profileContent := map[string]interface{}{ + "type": "test-profile", + "data": "test-data", + "value": 456, + } + + profile := NewMockSignableObject("test-uid", "test-ns", "test-profile-verify-2", profileContent) + + err := SignObjectDisableKeyless(profile) + if err != nil { + t.Fatalf("SignObjectDisableKeyless failed: %v", err) + } + + err = VerifyObjectAllowUntrusted(profile) + if err != nil { + t.Fatalf("VerifyObjectAllowUntrusted failed: %v", err) + } +} + +func TestVerifyObjectTampered(t *testing.T) { + if os.Getenv("ENABLE_KEYLESS_TESTS") == "" { + t.Skip("Skipping TestVerifyObjectTampered. Set ENABLE_KEYLESS_TESTS to run.") + } + originalContent := map[string]interface{}{ + "type": "test-profile", + "data": "test-data", + "value": 789, + "confident": "secret", + } + + profile := NewMockSignableObject("test-uid", "test-ns", "test-profile-tamper", originalContent) + + err := SignObjectKeyless(profile) + if err != nil { + t.Fatalf("SignObjectKeyless failed: %v", err) + } + + tamperedContent := map[string]interface{}{ + "type": "test-profile", + "data": "test-data", + "value": 999, + "confident": "mod", + } + profile.content = tamperedContent + + err = VerifyObjectStrict(profile) + if err == nil { + t.Error("Expected verification failure for tampered profile, got success") + } +} + +func TestVerifyObjectNoAnnotations(t *testing.T) { + profileContent := map[string]interface{}{ + "type": "test-profile", + "data": "test-data", + } + + profile := NewMockSignableObject("test-uid", "test-ns", "test-profile-no-sig", profileContent) + + err := VerifyObjectStrict(profile) + if err == nil { + t.Error("Expected error for profile without annotations, got nil") + } +} + +func TestVerifyObjectMissingSignature(t *testing.T) { + profileContent := map[string]interface{}{ + "type": "test-profile", + "data": "test-data", + } + + profile := NewMockSignableObject("test-uid", "test-ns", "test-profile-missing-sig", profileContent) + profile.SetAnnotations(map[string]string{ + AnnotationIssuer: "test-issuer", + AnnotationIdentity: "test-identity", + }) + + err := VerifyObjectStrict(profile) + if err == nil { + t.Error("Expected error for profile without signature annotation, got nil") + } +} + +func TestSignAndVerifyRoundTrip(t *testing.T) { + if os.Getenv("ENABLE_KEYLESS_TESTS") == "" { + t.Skip("Skipping TestSignAndVerifyRoundTrip. Set ENABLE_KEYLESS_TESTS to run.") + } + profileContent := map[string]interface{}{ + "type": "roundtrip-profile", + "containers": []string{"nginx", "redis"}, + "capabilities": []string{"NET_BIND_SERVICE"}, + "networkPolicy": "allow", + } + + profile := NewMockSignableObject("roundtrip-uid", "roundtrip-ns", "roundtrip-profile", profileContent) + + err := SignObjectKeyless(profile) + if err != nil { + t.Fatalf("SignObjectKeyless failed: %v", err) + } + + if !IsSigned(profile) { + t.Fatal("Profile should be signed after signing") + } + + sig, err := GetObjectSignature(profile) + if err != nil { + t.Fatalf("GetObjectSignature failed: %v", err) + } + + if len(sig.Signature) == 0 { + t.Error("Signature should not be empty") + } + + err = VerifyObjectStrict(profile) + if err != nil { + t.Fatalf("VerifyObjectStrict failed after signing: %v", err) + } +} + +func TestSignAndVerifyDifferentKeys(t *testing.T) { + if os.Getenv("ENABLE_KEYLESS_TESTS") == "" { + t.Skip("Skipping TestSignAndVerifyDifferentKeys. Set ENABLE_KEYLESS_TESTS to run.") + } + profileContent := map[string]interface{}{ + "type": "multi-key-test", + "data": "data", + } + + profile1 := NewMockSignableObject("uid1", "ns", "profile1", profileContent) + profile2 := NewMockSignableObject("uid2", "ns", "profile2", profileContent) + + err := SignObjectDisableKeyless(profile1) + if err != nil { + t.Fatalf("SignObjectDisableKeyless failed for profile1: %v", err) + } + + err = SignObjectKeyless(profile2) + if err != nil { + t.Fatalf("SignObjectKeyless failed for profile2: %v", err) + } + + sig1, err := GetObjectSignature(profile1) + if err != nil { + t.Fatalf("GetObjectSignature failed for profile1: %v", err) + } + + sig2, err := GetObjectSignature(profile2) + if err != nil { + t.Fatalf("GetObjectSignature failed for profile2: %v", err) + } + + if sig1.Issuer != "local" { + t.Errorf("Expected key-based signing issuer 'local', got '%s'", sig1.Issuer) + } + + if sig1.Identity != "local-key" { + t.Errorf("Expected key-based signing identity 'local-key', got '%s'", sig1.Identity) + } + + if sig2.Issuer == "" { + t.Errorf("Expected keyless signing to have issuer, got empty") + } + + if sig2.Identity == "" { + t.Errorf("Expected keyless signing to have identity, got empty") + } +} + +// captureLogOutput redirects the global logger to a pipe, runs fn, and returns +// the captured log text. The logger is restored to its previous writer afterward. +func captureLogOutput(t *testing.T, fn func()) string { + t.Helper() + + // Ensure the global logger is initialized as pretty (supports SetWriter). + logger.InitLogger("pretty") + + oldWriter := logger.L().GetWriter() + r, w, err := os.Pipe() + if err != nil { + t.Fatalf("os.Pipe: %v", err) + } + logger.L().SetWriter(w) + + fn() + + w.Close() + var buf strings.Builder + io.Copy(&buf, r) + r.Close() + + // Restore original writer. + logger.L().SetWriter(oldWriter) + + return buf.String() +} + +// TestTamperedAPLogsWarning signs an ApplicationProfile, tampers with it, +// verifies it, and asserts the warning log contains the expected fields: +// namespace, name, and "Object signature verification failed". +func TestTamperedAPLogsWarning(t *testing.T) { + ap := &v1beta1.ApplicationProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "tamper-warn-ap", + Namespace: "tamper-ns", + }, + Spec: v1beta1.ApplicationProfileSpec{ + Containers: []v1beta1.ApplicationProfileContainer{ + { + Name: "curl", + Execs: []v1beta1.ExecCalls{{Path: "/usr/bin/curl"}}, + Syscalls: []string{"read", "write"}, + }, + }, + }, + } + + adapter := profiles.NewApplicationProfileAdapter(ap) + if err := SignObjectDisableKeyless(adapter); err != nil { + t.Fatalf("sign failed: %v", err) + } + + // Tamper: add an exec entry. + ap.Spec.Containers[0].Execs = append(ap.Spec.Containers[0].Execs, + v1beta1.ExecCalls{Path: "/usr/bin/nslookup"}) + + tamperedAdapter := profiles.NewApplicationProfileAdapter(ap) + + logOutput := captureLogOutput(t, func() { + err := VerifyObjectAllowUntrusted(tamperedAdapter) + if err == nil { + t.Error("expected verification to fail for tampered AP") + } + }) + + // Assert warning log contains expected fields. + if !strings.Contains(logOutput, "Object signature verification failed") { + t.Errorf("expected warning message in log output, got:\n%s", logOutput) + } + if !strings.Contains(logOutput, "tamper-ns") { + t.Errorf("expected namespace 'tamper-ns' in log output, got:\n%s", logOutput) + } + if !strings.Contains(logOutput, "tamper-warn-ap") { + t.Errorf("expected name 'tamper-warn-ap' in log output, got:\n%s", logOutput) + } + if !strings.Contains(logOutput, "invalid signature") { + t.Errorf("expected 'invalid signature' in log output, got:\n%s", logOutput) + } +} + +// TestTamperedNNLogsWarning signs a NetworkNeighborhood, tampers with it, +// verifies it, and asserts the warning log contains the expected fields. +func TestTamperedNNLogsWarning(t *testing.T) { + nn := &v1beta1.NetworkNeighborhood{ + ObjectMeta: metav1.ObjectMeta{ + Name: "tamper-warn-nn", + Namespace: "tamper-ns", + }, + Spec: v1beta1.NetworkNeighborhoodSpec{ + Containers: []v1beta1.NetworkNeighborhoodContainer{ + { + Name: "curl", + Egress: []v1beta1.NetworkNeighbor{ + { + Identifier: "legit", + DNSNames: []string{"example.com."}, + IPAddress: "93.184.216.34", + }, + }, + }, + }, + }, + } + + adapter := profiles.NewNetworkNeighborhoodAdapter(nn) + if err := SignObjectDisableKeyless(adapter); err != nil { + t.Fatalf("sign failed: %v", err) + } + + // Tamper: add an egress entry. + nn.Spec.Containers[0].Egress = append(nn.Spec.Containers[0].Egress, + v1beta1.NetworkNeighbor{ + Identifier: "evil", + DNSNames: []string{"evil-c2.io."}, + IPAddress: "6.6.6.6", + }) + + tamperedAdapter := profiles.NewNetworkNeighborhoodAdapter(nn) + + logOutput := captureLogOutput(t, func() { + err := VerifyObjectAllowUntrusted(tamperedAdapter) + if err == nil { + t.Error("expected verification to fail for tampered NN") + } + }) + + if !strings.Contains(logOutput, "Object signature verification failed") { + t.Errorf("expected warning message in log output, got:\n%s", logOutput) + } + if !strings.Contains(logOutput, "tamper-ns") { + t.Errorf("expected namespace 'tamper-ns' in log output, got:\n%s", logOutput) + } + if !strings.Contains(logOutput, "tamper-warn-nn") { + t.Errorf("expected name 'tamper-warn-nn' in log output, got:\n%s", logOutput) + } + if !strings.Contains(logOutput, "invalid signature") { + t.Errorf("expected 'invalid signature' in log output, got:\n%s", logOutput) + } +} + +// TestSuccessfulVerifyLogsInfo verifies that a valid signature produces the +// "Successfully verified object signature" info log with identity and issuer. +func TestSuccessfulVerifyLogsInfo(t *testing.T) { + ap := &v1beta1.ApplicationProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid-ap", + Namespace: "valid-ns", + }, + Spec: v1beta1.ApplicationProfileSpec{ + Containers: []v1beta1.ApplicationProfileContainer{ + { + Name: "nginx", + Execs: []v1beta1.ExecCalls{{Path: "/usr/sbin/nginx"}}, + Syscalls: []string{"read", "write", "openat"}, + }, + }, + }, + } + + adapter := profiles.NewApplicationProfileAdapter(ap) + if err := SignObjectDisableKeyless(adapter); err != nil { + t.Fatalf("sign failed: %v", err) + } + + logOutput := captureLogOutput(t, func() { + if err := VerifyObjectAllowUntrusted(adapter); err != nil { + t.Fatalf("expected verification to succeed: %v", err) + } + }) + + if !strings.Contains(logOutput, "Successfully verified object signature") { + t.Errorf("expected info message in log output, got:\n%s", logOutput) + } + if !strings.Contains(logOutput, "valid-ns") { + t.Errorf("expected namespace 'valid-ns' in log output, got:\n%s", logOutput) + } + if !strings.Contains(logOutput, "valid-ap") { + t.Errorf("expected name 'valid-ap' in log output, got:\n%s", logOutput) + } + if !strings.Contains(logOutput, "local-key") { + t.Errorf("expected identity 'local-key' in log output, got:\n%s", logOutput) + } +} + +// TestSignLogsInfo verifies that signing an object produces the +// "Successfully signed object" info log with identity and issuer. +func TestSignLogsInfo(t *testing.T) { + ap := &v1beta1.ApplicationProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "sign-log-ap", + Namespace: "sign-ns", + }, + Spec: v1beta1.ApplicationProfileSpec{ + Containers: []v1beta1.ApplicationProfileContainer{ + { + Name: "app", + Execs: []v1beta1.ExecCalls{{Path: "/app/main"}}, + Syscalls: []string{"read"}, + }, + }, + }, + } + + adapter := profiles.NewApplicationProfileAdapter(ap) + + logOutput := captureLogOutput(t, func() { + if err := SignObjectDisableKeyless(adapter); err != nil { + t.Fatalf("sign failed: %v", err) + } + }) + + if !strings.Contains(logOutput, "Successfully signed object") { + t.Errorf("expected sign info message in log output, got:\n%s", logOutput) + } + if !strings.Contains(logOutput, "sign-ns") { + t.Errorf("expected namespace 'sign-ns' in log output, got:\n%s", logOutput) + } + if !strings.Contains(logOutput, "sign-log-ap") { + t.Errorf("expected name 'sign-log-ap' in log output, got:\n%s", logOutput) + } + if !strings.Contains(logOutput, "local-key") { + t.Errorf("expected identity 'local-key' in log output, got:\n%s", logOutput) + } + if !strings.Contains(logOutput, "local") { + t.Errorf("expected issuer 'local' in log output, got:\n%s", logOutput) + } +} diff --git a/pkg/storage/storage_interface.go b/pkg/storage/storage_interface.go index 374b9ead8e..9a1c8125f1 100644 --- a/pkg/storage/storage_interface.go +++ b/pkg/storage/storage_interface.go @@ -1,6 +1,8 @@ package storage import ( + "context" + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" spdxv1beta1 "github.com/kubescape/storage/pkg/generated/clientset/versioned/typed/softwarecomposition/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -8,10 +10,11 @@ import ( ) type ProfileClient interface { - GetApplicationProfile(namespace, name string) (*v1beta1.ApplicationProfile, error) - GetNetworkNeighborhood(namespace, name string) (*v1beta1.NetworkNeighborhood, error) - ListApplicationProfiles(namespace string, limit int64, cont string) (*v1beta1.ApplicationProfileList, error) - ListNetworkNeighborhoods(namespace string, limit int64, cont string) (*v1beta1.NetworkNeighborhoodList, error) + GetApplicationProfile(ctx context.Context, namespace, name string) (*v1beta1.ApplicationProfile, error) + GetNetworkNeighborhood(ctx context.Context, namespace, name string) (*v1beta1.NetworkNeighborhood, error) + GetContainerProfile(ctx context.Context, namespace, name string) (*v1beta1.ContainerProfile, error) + ListApplicationProfiles(ctx context.Context, namespace string, limit int64, cont string) (*v1beta1.ApplicationProfileList, error) + ListNetworkNeighborhoods(ctx context.Context, namespace string, limit int64, cont string) (*v1beta1.NetworkNeighborhoodList, error) } // ProfileCreator defines the interface for creating container profiles diff --git a/pkg/storage/storage_mock.go b/pkg/storage/storage_mock.go index 1f1c0dcbc2..13e96f3aaf 100644 --- a/pkg/storage/storage_mock.go +++ b/pkg/storage/storage_mock.go @@ -1,6 +1,8 @@ package storage import ( + "context" + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" spdxv1beta1 "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" beta1 "github.com/kubescape/storage/pkg/generated/clientset/versioned/typed/softwarecomposition/v1beta1" @@ -35,12 +37,21 @@ func (sc *StorageHttpClientMock) CreateSBOM(SBOM *v1beta1.SBOMSyft) (*v1beta1.SB return SBOM, nil } -func (sc *StorageHttpClientMock) GetApplicationProfile(_, _ string) (*spdxv1beta1.ApplicationProfile, error) { +func (sc *StorageHttpClientMock) GetContainerProfile(_ context.Context, namespace, name string) (*v1beta1.ContainerProfile, error) { + for _, p := range sc.ContainerProfiles { + if p != nil && p.Namespace == namespace && p.Name == name { + return p, nil + } + } + return nil, nil +} + +func (sc *StorageHttpClientMock) GetApplicationProfile(_ context.Context, _, _ string) (*spdxv1beta1.ApplicationProfile, error) { //TODO implement me panic("implement me") } -func (sc *StorageHttpClientMock) GetNetworkNeighborhood(_, _ string) (*spdxv1beta1.NetworkNeighborhood, error) { +func (sc *StorageHttpClientMock) GetNetworkNeighborhood(_ context.Context, _, _ string) (*spdxv1beta1.NetworkNeighborhood, error) { //TODO implement me panic("implement me") } @@ -52,12 +63,12 @@ func (sc *StorageHttpClientMock) GetStorageClient() beta1.SpdxV1beta1Interface { return nil } -func (sc *StorageHttpClientMock) ListApplicationProfiles(namespace string, limit int64, cont string) (*spdxv1beta1.ApplicationProfileList, error) { +func (sc *StorageHttpClientMock) ListApplicationProfiles(_ context.Context, namespace string, limit int64, cont string) (*spdxv1beta1.ApplicationProfileList, error) { //TODO implement me panic("implement me") } -func (sc *StorageHttpClientMock) ListNetworkNeighborhoods(namespace string, limit int64, cont string) (*spdxv1beta1.NetworkNeighborhoodList, error) { +func (sc *StorageHttpClientMock) ListNetworkNeighborhoods(_ context.Context, namespace string, limit int64, cont string) (*spdxv1beta1.NetworkNeighborhoodList, error) { //TODO implement me panic("implement me") } diff --git a/pkg/storage/v1/applicationprofile.go b/pkg/storage/v1/applicationprofile.go index 96fa7e1bb0..39f0543288 100644 --- a/pkg/storage/v1/applicationprofile.go +++ b/pkg/storage/v1/applicationprofile.go @@ -7,12 +7,12 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -func (sc *Storage) GetApplicationProfile(namespace, name string) (*v1beta1.ApplicationProfile, error) { - return sc.storageClient.ApplicationProfiles(namespace).Get(context.Background(), name, metav1.GetOptions{}) +func (sc *Storage) GetApplicationProfile(ctx context.Context, namespace, name string) (*v1beta1.ApplicationProfile, error) { + return sc.storageClient.ApplicationProfiles(namespace).Get(ctx, name, metav1.GetOptions{}) } -func (sc *Storage) ListApplicationProfiles(namespace string, limit int64, cont string) (*v1beta1.ApplicationProfileList, error) { - return sc.storageClient.ApplicationProfiles(namespace).List(context.Background(), metav1.ListOptions{ +func (sc *Storage) ListApplicationProfiles(ctx context.Context, namespace string, limit int64, cont string) (*v1beta1.ApplicationProfileList, error) { + return sc.storageClient.ApplicationProfiles(namespace).List(ctx, metav1.ListOptions{ Limit: limit, Continue: cont, }) diff --git a/pkg/storage/v1/containerprofile.go b/pkg/storage/v1/containerprofile.go index 620e42b70e..69fbc0ea5a 100644 --- a/pkg/storage/v1/containerprofile.go +++ b/pkg/storage/v1/containerprofile.go @@ -7,8 +7,12 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// CreateContainerProfileDirect directly creates the profile without queuing -// This implements the ProfileCreator interface +func (sc *Storage) GetContainerProfile(ctx context.Context, namespace, name string) (*v1beta1.ContainerProfile, error) { + return sc.storageClient.ContainerProfiles(namespace).Get(ctx, name, metav1.GetOptions{}) +} + +// CreateContainerProfileDirect directly creates the profile without queuing. +// This implements the ProfileCreator interface. func (sc *Storage) CreateContainerProfileDirect(profile *v1beta1.ContainerProfile) error { // Apply name modifications if needed (keeping your existing logic) // sc.modifyNameP(&profile.Name) diff --git a/pkg/storage/v1/networkneighborhood.go b/pkg/storage/v1/networkneighborhood.go index bfe52b2e3d..cec12b97e4 100644 --- a/pkg/storage/v1/networkneighborhood.go +++ b/pkg/storage/v1/networkneighborhood.go @@ -7,12 +7,12 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -func (sc *Storage) GetNetworkNeighborhood(namespace, name string) (*v1beta1.NetworkNeighborhood, error) { - return sc.storageClient.NetworkNeighborhoods(namespace).Get(context.Background(), name, metav1.GetOptions{}) +func (sc *Storage) GetNetworkNeighborhood(ctx context.Context, namespace, name string) (*v1beta1.NetworkNeighborhood, error) { + return sc.storageClient.NetworkNeighborhoods(namespace).Get(ctx, name, metav1.GetOptions{}) } -func (sc *Storage) ListNetworkNeighborhoods(namespace string, limit int64, cont string) (*v1beta1.NetworkNeighborhoodList, error) { - return sc.storageClient.NetworkNeighborhoods(namespace).List(context.Background(), metav1.ListOptions{ +func (sc *Storage) ListNetworkNeighborhoods(ctx context.Context, namespace string, limit int64, cont string) (*v1beta1.NetworkNeighborhoodList, error) { + return sc.storageClient.NetworkNeighborhoods(namespace).List(ctx, metav1.ListOptions{ Limit: limit, Continue: cont, }) diff --git a/scripts/HOWTO.md b/scripts/HOWTO.md new file mode 100644 index 0000000000..5be44df97c --- /dev/null +++ b/scripts/HOWTO.md @@ -0,0 +1,104 @@ +# Fork Workflow: k8sstormcenter/node-agent + +## Branch structure + +``` +upstream/main: A --- B --- C --- D (kubescape/node-agent) + \ +fork main: D --- [feat-X] --- [fork-only: .github/*] + \ +feature branch: feat/my-feature (1-2 clean commits) +``` + +**Rules:** +- Fork `main` always has a **fork-only `.github/` commit as the tip** — this is never sent upstream. +- Feature branches start from `main~1` (before the fork-only commit). +- Each feature is a small, focused branch with clean commits. + +## Day-to-day workflow + +### 1. Sync fork with upstream + +```bash +git fetch upstream +git checkout main + +# Rebase your features onto latest upstream (fork-only commit stays on top) +git rebase upstream/main + +# Force-push (safe — your main is the source of truth) +git push origin main --force-with-lease +``` + +### 2. Start a new feature + +```bash +# Always branch from main~1 (before fork-only commit) +git checkout -b feat/my-feature main~1 + +# Develop... +# Commit (sign your commits) +# Test locally with local-ci.sh or CI +``` + +### 3. Test on your fork + +```bash +# Push feature branch to your fork +git push origin feat/my-feature + +# Merge into fork main (keeps fork-only commit on top): +git checkout main +git rebase --onto feat/my-feature main~2 main +# This replays [feat/my-feature commits] + [fork-only commit] onto the feature +git push origin main --force-with-lease +``` + +Or simpler: just push the feature branch and trigger CI via workflow_dispatch. + +### 4. Create upstream PR + +```bash +# Use the script — it cherry-picks your feature onto upstream/main, +# stripping any .github/ changes automatically +./scripts/upstream-pr.sh feat/my-feature + +# Verify it's clean +git diff --stat upstream/main upstream/my-feature -- .github/ # should be empty + +# Push to upstream and open PR +git push upstream upstream/my-feature +``` + +Then open the PR at `https://github.com/kubescape/node-agent/compare/main...upstream/my-feature` + +### 5. After upstream merges your PR + +```bash +# Sync +git fetch upstream +git checkout main +git rebase upstream/main +git push origin main --force-with-lease + +# Clean up +git branch -d feat/my-feature +git branch -d upstream/my-feature +git push origin --delete feat/my-feature +``` + +## What NOT to do + +- **Don't develop on `main` directly** — always use feature branches. +- **Don't squash-merge upstream into your fork** — this is what caused the old mess (regressions baked into squash commits). Use `rebase` instead. +- **Don't mix `.github/` changes with feature commits** — keep them in the fork-only tip commit only. +- **Don't push the node-agent image as `latest` from feature branches** — use dedicated tags (`build.yaml` is already configured for this). + +## Key files + +| File | Purpose | +|---|---| +| `scripts/upstream-pr.sh` | Creates upstream-clean branches for PRs | +| `tests/scripts/local-ci.sh` | Runs component tests locally in Kind | +| `.github/workflows/component-tests.yaml` | Fork CI (triggers on `main`) | +| `.github/workflows/build.yaml` | Builds node-agent image (no `latest` tag) | diff --git a/scripts/upstream-pr.sh b/scripts/upstream-pr.sh new file mode 100755 index 0000000000..773f38ed69 --- /dev/null +++ b/scripts/upstream-pr.sh @@ -0,0 +1,91 @@ +#!/bin/bash +# upstream-pr.sh — Create an upstream-clean branch for PRing to kubescape/node-agent +# +# Your fork's main looks like: +# +# upstream/main --- [feat-A] --- [feat-B] --- [fork-only: .github/*] +# ^^ always the tip +# +# This script takes a feature branch (based on main~1), cherry-picks its +# commits onto upstream/main, and strips any .github/ changes so the +# result is clean for an upstream PR. +# +# Usage: +# ./scripts/upstream-pr.sh [upstream-branch-name] +# +# Examples: +# ./scripts/upstream-pr.sh feat/signature-verification +# ./scripts/upstream-pr.sh feat/signature-verification upstream-sig-verify +# +set -euo pipefail + +FEATURE="${1:?Usage: $0 [upstream-branch-name]}" +# Default upstream branch name: strip "feat/" prefix, prepend "upstream/" +DEFAULT_NAME="upstream/${FEATURE#feat/}" +UPSTREAM_BRANCH="${2:-$DEFAULT_NAME}" + +echo "=== upstream-pr ===" +echo " Feature branch : $FEATURE" +echo " Upstream branch: $UPSTREAM_BRANCH" +echo "" + +# Ensure we have the latest upstream +git fetch upstream + +# Fail if the branch already exists +if git rev-parse --verify "$UPSTREAM_BRANCH" &>/dev/null; then + echo "ERROR: Branch '$UPSTREAM_BRANCH' already exists." + echo " Delete it first: git branch -D $UPSTREAM_BRANCH" + exit 1 +fi + +# Find commits on the feature branch that are above origin/main +COMMITS=$(git rev-list --reverse origin/main.."$FEATURE") +if [ -z "$COMMITS" ]; then + echo "ERROR: No commits found on '$FEATURE' above origin/main." + exit 1 +fi + +# Create branch from upstream/main +git checkout -b "$UPSTREAM_BRANCH" upstream/main + +APPLIED=0 +SKIPPED=0 +for commit in $COMMITS; do + SUBJECT=$(git log --oneline -1 "$commit") + + # Skip commits that ONLY touch .github/ + NON_GITHUB=$(git diff-tree --no-commit-id --name-only -r "$commit" | grep -v '^\.github/' || true) + if [ -z "$NON_GITHUB" ]; then + echo " SKIP (github-only): $SUBJECT" + SKIPPED=$((SKIPPED + 1)) + continue + fi + + echo " APPLY: $SUBJECT" + git cherry-pick "$commit" --no-commit + + # Remove any .github changes that came along for the ride + if git diff --cached --name-only | grep -q '^\.github/'; then + git reset HEAD -- .github/ &>/dev/null || true + git checkout -- .github/ &>/dev/null || true + fi + + # Re-commit with the original message and author + git commit -C "$commit" + APPLIED=$((APPLIED + 1)) +done + +echo "" +echo "=== Done ===" +echo " Applied: $APPLIED commits" +echo " Skipped: $SKIPPED commits (.github-only)" +echo "" +echo "Verify:" +echo " git log --oneline $UPSTREAM_BRANCH --not upstream/main" +echo " git diff --stat upstream/main $UPSTREAM_BRANCH -- .github/ # should be empty" +echo "" +echo "Push to upstream:" +echo " git push upstream $UPSTREAM_BRANCH" +echo "" +echo "Then open PR at: https://github.com/kubescape/node-agent/compare/main...$UPSTREAM_BRANCH" diff --git a/tests/chart/crds/rules.crd.yaml b/tests/chart/crds/rules.crd.yaml index e4e1155eaf..f8cc94ee42 100644 --- a/tests/chart/crds/rules.crd.yaml +++ b/tests/chart/crds/rules.crd.yaml @@ -91,6 +91,19 @@ spec: type: object additionalProperties: true description: "State information for the rule" + agentVersionRequirement: + type: string + description: "Agent version requirement to evaluate this rule (supports semver ranges like ~1.0, >=1.2.0, etc.)" + isTriggerAlert: + type: boolean + description: "Whether the rule is a trigger alert" + default: true + mitreTechnique: + type: string + description: "MITRE technique associated with the rule" + mitreTactic: + type: string + description: "MITRE tactic associated with the rule" required: - enabled - id @@ -100,7 +113,9 @@ spec: - profileDependency - severity - supportPolicy - - tags + - isTriggerAlert + - mitreTechnique + - mitreTactic required: - rules subresources: diff --git a/tests/chart/crds/runtime-rule-binding.crd.yaml b/tests/chart/crds/runtime-rule-binding.crd.yaml index d37280065e..d01b29b443 100644 --- a/tests/chart/crds/runtime-rule-binding.crd.yaml +++ b/tests/chart/crds/runtime-rule-binding.crd.yaml @@ -95,4 +95,4 @@ spec: items: type: string severity: - type: string \ No newline at end of file + type: string diff --git a/tests/chart/templates/node-agent/default-rule-binding.yaml b/tests/chart/templates/node-agent/default-rule-binding.yaml index 26367de97f..710deb6e35 100644 --- a/tests/chart/templates/node-agent/default-rule-binding.yaml +++ b/tests/chart/templates/node-agent/default-rule-binding.yaml @@ -15,6 +15,7 @@ spec: - "kubeconfig" rules: - ruleName: "Unexpected process launched" + - ruleName: "Unexpected process arguments" - ruleName: "Files Access Anomalies in container" - ruleName: "Syscalls Anomalies in container" - ruleName: "Linux Capabilities Anomalies in container" diff --git a/tests/chart/templates/node-agent/default-rules.yaml b/tests/chart/templates/node-agent/default-rules.yaml index 55fd1b527e..e1972f1467 100644 --- a/tests/chart/templates/node-agent/default-rules.yaml +++ b/tests/chart/templates/node-agent/default-rules.yaml @@ -1,7 +1,7 @@ apiVersion: kubescape.io/v1 kind: Rules metadata: - name: kubescape-rules + name: default-rules namespace: kubescape annotations: kubescape.io/namespace: kubescape @@ -30,6 +30,47 @@ spec: - "process" - "exec" - "applicationprofile" + - "context:kubernetes" + # --------------------------------------------------------------- + # R0040 — Unexpected process arguments + # + # Additive companion to R0001. Fires only when: + # 1. The exec'd path IS in the user-defined ApplicationProfile + # (so R0001 stays silent), AND + # 2. The runtime arg vector does NOT match any profile entry's + # arg pattern via dynamicpathdetector.CompareExecArgs. + # + # Profile arg vectors may carry wildcard tokens: + # "⋯" — exactly one position; "*" — zero or more trailing args. + # Anything else is literal-equality. + # + # Use case: a profile entry like {Path: "/bin/sh", Args: ["-c", "*"]} + # allows `sh -c ` but flags `sh -x ` as drift. + # --------------------------------------------------------------- + - name: "Unexpected process arguments" + enabled: true + id: "R0040" + description: "Process path is allowed by profile but argument vector does not match any profile entry's arg pattern (literal or wildcard ⋯/*)" + expressions: + message: "'Unexpected process arguments: ' + event.comm + ' with PID ' + string(event.pid)" + uniqueId: "event.comm + '_' + event.exepath" + ruleExpression: + - eventType: "exec" + expression: > + ap.was_executed(event.containerId, parse.get_exec_path(event.args, event.comm)) && + !ap.was_executed_with_args(event.containerId, parse.get_exec_path(event.args, event.comm), event.args) + profileDependency: 0 + severity: 1 + supportPolicy: false + isTriggerAlert: true + mitreTactic: "TA0002" + mitreTechnique: "T1059" + tags: + - "anomaly" + - "process" + - "exec" + - "applicationprofile" + - "context:kubernetes" - name: "Files Access Anomalies in container" enabled: true id: "R0002" @@ -69,6 +110,7 @@ spec: - "file" - "open" - "applicationprofile" + - "context:kubernetes" - name: "Syscalls Anomalies in container" enabled: true id: "R0003" @@ -89,6 +131,7 @@ spec: - "anomaly" - "syscall" - "applicationprofile" + - "context:kubernetes" - name: "Linux Capabilities Anomalies in container" enabled: true id: "R0004" @@ -109,6 +152,7 @@ spec: - "anomaly" - "capabilities" - "applicationprofile" + - "context:kubernetes" - name: "DNS Anomalies in container" enabled: true id: "R0005" @@ -122,13 +166,14 @@ spec: profileDependency: 0 severity: 1 supportPolicy: false - isTriggerAlert: false + isTriggerAlert: true mitreTactic: "TA0011" mitreTechnique: "T1071.004" tags: - "dns" - "anomaly" - "networkprofile" + - "context:kubernetes" - name: "Unexpected service account token access" enabled: true id: "R0006" @@ -139,15 +184,12 @@ spec: ruleExpression: - eventType: "open" expression: > - ((event.path.startsWith('/run/secrets/kubernetes.io/serviceaccount') && event.path.endsWith('/token')) || + ((event.path.startsWith('/run/secrets/kubernetes.io/serviceaccount') && event.path.endsWith('/token')) || (event.path.startsWith('/var/run/secrets/kubernetes.io/serviceaccount') && event.path.endsWith('/token')) || (event.path.startsWith('/run/secrets/eks.amazonaws.com/serviceaccount') && event.path.endsWith('/token')) || (event.path.startsWith('/var/run/secrets/eks.amazonaws.com/serviceaccount') && event.path.endsWith('/token'))) && - !ap.was_path_opened_with_prefix(event.containerId, '/run/secrets/kubernetes.io/serviceaccount') && - !ap.was_path_opened_with_prefix(event.containerId, '/var/run/secrets/kubernetes.io/serviceaccount') && - !ap.was_path_opened_with_prefix(event.containerId, '/run/secrets/eks.amazonaws.com/serviceaccount') && - !ap.was_path_opened_with_prefix(event.containerId, '/var/run/secrets/eks.amazonaws.com/serviceaccount') - profileDependency: 1 + !ap.was_path_opened_with_suffix(event.containerId, '/token') + profileDependency: 0 severity: 5 supportPolicy: false isTriggerAlert: true @@ -157,6 +199,7 @@ spec: - "anomaly" - "serviceaccount" - "applicationprofile" + - "context:kubernetes" - name: "Workload uses Kubernetes API unexpectedly" enabled: true id: "R0007" @@ -172,7 +215,7 @@ spec: profileDependency: 0 severity: 5 # Medium supportPolicy: false - isTriggerAlert: true + isTriggerAlert: false mitreTactic: "TA0008" mitreTechnique: "T1210" tags: @@ -180,6 +223,7 @@ spec: - "network" - "anomaly" - "applicationprofile" + - "context:kubernetes" - name: "Read Environment Variables from procfs" enabled: true id: "R0008" @@ -190,7 +234,7 @@ spec: ruleExpression: - eventType: "open" expression: > - event.path.startsWith('/proc/') && + event.path.startsWith('/proc/') && event.path.endsWith('/environ') && !ap.was_path_opened_with_suffix(event.containerId, '/environ') profileDependency: 0 # Required @@ -204,6 +248,7 @@ spec: - "procfs" - "environment" - "applicationprofile" + - "context:kubernetes" - name: "eBPF Program Load" enabled: true id: "R0009" @@ -224,6 +269,7 @@ spec: - "bpf" - "ebpf" - "applicationprofile" + - "context:kubernetes" - name: "Unexpected Sensitive File Access" enabled: true id: "R0010" @@ -244,8 +290,9 @@ spec: - "files" - "anomaly" - "applicationprofile" + - "context:kubernetes" - name: "Unexpected Egress Network Traffic" - enabled: false + enabled: true id: "R0011" description: "Detecting unexpected egress network traffic that is not whitelisted by application profile." expressions: @@ -257,7 +304,7 @@ spec: profileDependency: 0 severity: 5 # Medium supportPolicy: false - isTriggerAlert: false + isTriggerAlert: true mitreTactic: "TA0010" mitreTechnique: "T1041" tags: @@ -265,6 +312,7 @@ spec: - "network" - "anomaly" - "networkprofile" + - "context:kubernetes" - name: "Process executed from malicious source" enabled: true id: "R1000" @@ -276,7 +324,7 @@ spec: - eventType: "exec" expression: > (event.exepath == '/dev/shm' || event.exepath.startsWith('/dev/shm/')) || - (event.cwd == '/dev/shm' || event.cwd.startsWith('/dev/shm/') || + (event.cwd == '/dev/shm' || event.cwd.startsWith('/dev/shm/') || (parse.get_exec_path(event.args, event.comm).startsWith('/dev/shm/'))) profileDependency: 2 severity: 8 @@ -288,6 +336,7 @@ spec: - "exec" - "signature" - "malicious" + - "context:kubernetes" - name: "Drifted process executed" enabled: true id: "R1001" @@ -313,6 +362,7 @@ spec: - "binary" - "base image" - "applicationprofile" + - "context:kubernetes" - name: "Process tries to load a kernel module" enabled: true id: "R1002" @@ -334,6 +384,7 @@ spec: - "kernel" - "module" - "load" + - "context:kubernetes" - name: "Disallowed ssh connection" enabled: false id: "R1003" @@ -356,13 +407,14 @@ spec: - "port" - "malicious" - "networkprofile" + - "context:kubernetes" - name: "Process executed from mount" enabled: true id: "R1004" description: "Detecting exec calls from mounted paths." expressions: message: "'Process (' + event.comm + ') was executed from a mounted path'" - uniqueId: "event.comm + '_' + event.exepath + '_'" + uniqueId: "event.comm" ruleExpression: - eventType: "exec" expression: "!ap.was_executed(event.containerId, parse.get_exec_path(event.args, event.comm)) && k8s.get_container_mount_paths(event.namespace, event.podName, event.containerName).exists(mount, event.exepath.startsWith(mount) || parse.get_exec_path(event.args, event.comm).startsWith(mount))" @@ -376,6 +428,7 @@ spec: - "exec" - "mount" - "applicationprofile" + - "context:kubernetes" - name: "Fileless execution detected" enabled: true id: "R1005" @@ -396,6 +449,7 @@ spec: - "fileless" - "execution" - "malicious" + - "context:kubernetes" - name: "Process tries to escape container" enabled: true id: "R1006" @@ -405,7 +459,7 @@ spec: uniqueId: "event.comm + '_' + 'unshare'" ruleExpression: - eventType: "unshare" - expression: "!ap.was_syscall_used(event.containerId, 'unshare')" + expression: "event.pcomm != 'runc' && !ap.was_syscall_used(event.containerId, 'unshare')" profileDependency: 2 severity: 5 supportPolicy: false @@ -418,6 +472,7 @@ spec: - "unshare" - "anomaly" - "applicationprofile" + - "context:kubernetes" - name: "Crypto miner launched" enabled: true id: "R1007" @@ -438,6 +493,7 @@ spec: - "crypto" - "miners" - "malicious" + - "context:kubernetes" - name: "Crypto Mining Domain Communication" enabled: true id: "R1008" @@ -447,7 +503,7 @@ spec: uniqueId: "event.name + '_' + event.comm" ruleExpression: - eventType: "dns" - expression: "event.name in ['2cryptocalc.com.', '2miners.com.', 'antpool.com.', 'asia1.ethpool.org.', 'bohemianpool.com.', 'botbox.dev.', 'btm.antpool.com.', 'c3pool.com.', 'c4pool.org.', 'ca.minexmr.com.', 'cn.stratum.slushpool.com.', 'dash.antpool.com.', 'data.miningpoolstats.stream.', 'de.minexmr.com.', 'eth-ar.dwarfpool.com.', 'eth-asia.dwarfpool.com.', 'eth-asia1.nanopool.org.', 'eth-au.dwarfpool.com.', 'eth-au1.nanopool.org.', 'eth-br.dwarfpool.com.', 'eth-cn.dwarfpool.com.', 'eth-cn2.dwarfpool.com.', 'eth-eu.dwarfpool.com.', 'eth-eu1.nanopool.org.', 'eth-eu2.nanopool.org.', 'eth-hk.dwarfpool.com.', 'eth-jp1.nanopool.org.', 'eth-ru.dwarfpool.com.', 'eth-ru2.dwarfpool.com.', 'eth-sg.dwarfpool.com.', 'eth-us-east1.nanopool.org.', 'eth-us-west1.nanopool.org.', 'eth-us.dwarfpool.com.', 'eth-us2.dwarfpool.com.', 'eth.antpool.com.', 'eu.stratum.slushpool.com.', 'eu1.ethermine.org.', 'eu1.ethpool.org.', 'fastpool.xyz.', 'fr.minexmr.com.', 'kriptokyng.com.', 'mine.moneropool.com.', 'mine.xmrpool.net.', 'miningmadness.com.', 'monero.cedric-crispin.com.', 'monero.crypto-pool.fr.', 'monero.fairhash.org.', 'monero.hashvault.pro.', 'monero.herominers.com.', 'monerod.org.', 'monerohash.com.', 'moneroocean.stream.', 'monerop.com.', 'multi-pools.com.', 'p2pool.io.', 'pool.kryptex.com.', 'pool.minexmr.com.', 'pool.monero.hashvault.pro.', 'pool.rplant.xyz.', 'pool.supportxmr.com.', 'pool.xmr.pt.', 'prohashing.com.', 'rx.unmineable.com.', 'sg.minexmr.com.', 'sg.stratum.slushpool.com.', 'skypool.org.', 'solo-xmr.2miners.com.', 'ss.antpool.com.', 'stratum-btm.antpool.com.', 'stratum-dash.antpool.com.', 'stratum-eth.antpool.com.', 'stratum-ltc.antpool.com.', 'stratum-xmc.antpool.com.', 'stratum-zec.antpool.com.', 'stratum.antpool.com.', 'supportxmr.com.', 'trustpool.cc.', 'us-east.stratum.slushpool.com.', 'us1.ethermine.org.', 'us1.ethpool.org.', 'us2.ethermine.org.', 'us2.ethpool.org.', 'web.xmrpool.eu.', 'www.domajorpool.com.', 'www.dxpool.com.', 'www.mining-dutch.nl.', 'xmc.antpool.com.', 'xmr-asia1.nanopool.org.', 'xmr-au1.nanopool.org.', 'xmr-eu1.nanopool.org.', 'xmr-eu2.nanopool.org.', 'xmr-jp1.nanopool.org.', 'xmr-us-east1.nanopool.org.', 'xmr-us-west1.nanopool.org.', 'xmr.2miners.com.', 'xmr.crypto-pool.fr.', 'xmr.gntl.uk.', 'xmr.nanopool.org.', 'xmr.pool-pay.com.', 'xmr.pool.minergate.com.', 'xmr.solopool.org.', 'xmr.volt-mine.com.', 'xmr.zeropool.io.', 'zec.antpool.com.', 'zergpool.com.', 'auto.c3pool.org.', 'us.monero.herominers.com.']" + expression: "event.name in ['2cryptocalc.com.', '2miners.com.', 'antpool.com.', 'asia1.ethpool.org.', 'bohemianpool.com.', 'botbox.dev.', 'btm.antpool.com.', 'c3pool.com.', 'c4pool.org.', 'ca.minexmr.com.', 'cn.stratum.slushpool.com.', 'dash.antpool.com.', 'data.miningpoolstats.stream.', 'de.minexmr.com.', 'eth-ar.dwarfpool.com.', 'eth-asia.dwarfpool.com.', 'eth-asia1.nanopool.org.', 'eth-au.dwarfpool.com.', 'eth-au1.nanopool.org.', 'eth-br.dwarfpool.com.', 'eth-cn.dwarfpool.com.', 'eth-cn2.dwarfpool.com.', 'eth-eu.dwarfpool.com.', 'eth-eu1.nanopool.org.', 'eth-eu2.nanopool.org.', 'eth-hk.dwarfpool.com.', 'eth-jp1.nanopool.org.', 'eth-ru.dwarfpool.com.', 'eth-ru2.dwarfpool.com.', 'eth-sg.dwarfpool.com.', 'eth-us-east1.nanopool.org.', 'eth-us-west1.nanopool.org.', 'eth-us.dwarfpool.com.', 'eth-us2.dwarfpool.com.', 'eth.antpool.com.', 'eu.stratum.slushpool.com.', 'eu1.ethermine.org.', 'eu1.ethpool.org.', 'fastpool.xyz.', 'fr.minexmr.com.', 'kriptokyng.com.', 'mine.moneropool.com.', 'mine.xmrpool.net.', 'miningmadness.com.', 'monero.cedric-crispin.com.', 'monero.crypto-pool.fr.', 'monero.fairhash.org.', 'monero.hashvault.pro.', 'monero.herominers.com.', 'monerod.org.', 'monerohash.com.', 'moneroocean.stream.', 'monerop.com.', 'multi-pools.com.', 'p2pool.io.', 'pool.kryptex.com.', 'pool.minexmr.com.', 'pool.monero.hashvault.pro.', 'pool.rplant.xyz.', 'pool.supportxmr.com.', 'pool.xmr.pt.', 'prohashing.com.', 'rx.unmineable.com.', 'sg.minexmr.com.', 'sg.stratum.slushpool.com.', 'skypool.org.', 'solo-xmr.2miners.com.', 'ss.antpool.com.', 'stratum-btm.antpool.com.', 'stratum-dash.antpool.com.', 'stratum-eth.antpool.com.', 'stratum-ltc.antpool.com.', 'stratum-xmc.antpool.com.', 'stratum-zec.antpool.com.', 'stratum.antpool.com.', 'supportxmr.com.', 'trustpool.cc.', 'us-east.stratum.slushpool.com.', 'us1.ethermine.org.', 'us1.ethpool.org.', 'us2.ethermine.org.', 'us2.ethpool.org.', 'web.xmrpool.eu.', 'www.domajorpool.com.', 'www.dxpool.com.', 'www.mining-dutch.nl.', 'xmc.antpool.com.', 'xmr-asia1.nanopool.org.', 'xmr-au1.nanopool.org.', 'xmr-eu1.nanopool.org.', 'xmr-eu2.nanopool.org.', 'xmr-jp1.nanopool.org.', 'xmr-us-east1.nanopool.org.', 'xmr-us-west1.nanopool.org.', 'xmr.2miners.com.', 'xmr.crypto-pool.fr.', 'xmr.gntl.uk.', 'xmr.nanopool.org.', 'xmr.pool-pay.com.', 'xmr.pool.minergate.com.', 'xmr.solopool.org.', 'xmr.volt-mine.com.', 'xmr.zeropool.io.', 'zec.antpool.com.', 'zergpool.com.', 'auto.c3pool.org.', 'us.monero.herominers.com.', 'xmr.kryptex.network.']" profileDependency: 2 severity: 10 supportPolicy: false @@ -460,6 +516,7 @@ spec: - "miners" - "malicious" - "dns" + - "context:kubernetes" - name: "Crypto Mining Related Port Communication" enabled: true id: "R1009" @@ -482,6 +539,7 @@ spec: - "miners" - "malicious" - "networkprofile" + - "context:kubernetes" - name: "Soft link created over sensitive file" enabled: true id: "R1010" @@ -502,6 +560,7 @@ spec: - "anomaly" - "symlink" - "applicationprofile" + - "context:kubernetes" - name: "ld_preload hooks technique detected" enabled: false id: "R1011" @@ -524,6 +583,7 @@ spec: - "exec" - "malicious" - "applicationprofile" + - "context:kubernetes" - name: "Hard link created over sensitive file" enabled: true id: "R1012" @@ -544,6 +604,7 @@ spec: - "files" - "malicious" - "applicationprofile" + - "context:kubernetes" - name: "Malicious Ptrace Usage" enabled: true id: "R1015" @@ -563,6 +624,7 @@ spec: tags: - "process" - "malicious" + - "context:kubernetes" - name: "Unexpected io_uring Operation Detected" enabled: true id: "R1030" @@ -583,3 +645,22 @@ spec: - "syscalls" - "io_uring" - "applicationprofile" + - "context:kubernetes" + - name: "Signed profile tampered" + enabled: true + id: "R1016" + description: "Detects when a previously signed ApplicationProfile or NetworkNeighborhood has been tampered with (signature no longer valid)." + expressions: + message: "'Signed profile tampered'" + uniqueId: "'R1016'" + ruleExpression: [] + profileDependency: 2 + severity: 10 + supportPolicy: false + isTriggerAlert: false + mitreTactic: "TA0005" + mitreTechnique: "T1565" + tags: + - "integrity" + - "signature" + - "tamper" diff --git a/tests/chart/values.yaml b/tests/chart/values.yaml index 7cf029c4c8..cde97df906 100644 --- a/tests/chart/values.yaml +++ b/tests/chart/values.yaml @@ -32,7 +32,7 @@ global: storage: name: "storage" image: - repository: quay.io/kubescape/storage + repository: ghcr.io/k8sstormcenter/storage tag: v0.0.156 pullPolicy: Always cleanupInterval: "6h" @@ -50,7 +50,7 @@ storage: nodeAgent: name: node-agent image: - repository: quay.io/kubescape/node-agent + repository: ghcr.io/k8sstormcenter/node-agent tag: v0.2.21 pullPolicy: IfNotPresent diff --git a/tests/component_test.go b/tests/component_test.go index 8a83226321..a41318025d 100644 --- a/tests/component_test.go +++ b/tests/component_test.go @@ -8,9 +8,11 @@ import ( "fmt" "path" "reflect" + "runtime" "slices" "sort" "strconv" + "strings" "testing" "time" @@ -18,6 +20,8 @@ import ( "github.com/kubescape/go-logger/helpers" helpersv1 "github.com/kubescape/k8s-interface/instanceidhandler/v1/helpers" "github.com/kubescape/k8s-interface/k8sinterface" + "github.com/kubescape/node-agent/pkg/signature" + "github.com/kubescape/node-agent/pkg/signature/profiles" "github.com/kubescape/node-agent/pkg/utils" "github.com/kubescape/node-agent/tests/testutils" "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" @@ -494,53 +498,246 @@ func Test_09_FalsePositiveTest(t *testing.T) { assert.Equal(t, 0, len(alerts), "Expected no alerts to be generated, but got %d alerts", len(alerts)) } +// Test_10_CryptoMinerDetection tests crypto-miner detection from two angles: +// - malware_scan: ClamAV file-scanning detects xmrig binary signature +// - empty_profile_rules: empty user-defined AP means every exec/DNS is anomalous, +// so rule-based detection fires immediately without a learning period func Test_10_MalwareDetectionTest(t *testing.T) { start := time.Now() defer tearDownTest(t, start) - t.Log("Creating namespace") - ns := testutils.NewRandomNamespace() + // --------------------------------------------------------------- + // 10a. Malware file-scanning (ClamAV signature match) + // --------------------------------------------------------------- + t.Run("malware_scan", func(t *testing.T) { + ns := testutils.NewRandomNamespace() - t.Log("Deploy container with malware") - exitCode := testutils.RunCommand("kubectl", "run", "-n", ns.Name, "malware-cryptominer", "--image=quay.io/petr_ruzicka/malware-cryptominer-container:2.0.2") - require.Equalf(t, 0, exitCode, "expected no error when deploying malware container") + t.Log("Deploy container with malware") + exitCode := testutils.RunCommand("kubectl", "run", "-n", ns.Name, "malware-cryptominer", "--image=quay.io/petr_ruzicka/malware-cryptominer-container:2.0.2") + require.Equalf(t, 0, exitCode, "expected no error when deploying malware container") - // Wait for pod to be ready - exitCode = testutils.RunCommand("kubectl", "wait", "--for=condition=Ready", "pod", "malware-cryptominer", "-n", ns.Name, "--timeout=300s") - require.Equalf(t, 0, exitCode, "expected no error when waiting for pod to be ready") + exitCode = testutils.RunCommand("kubectl", "wait", "--for=condition=Ready", "pod", "malware-cryptominer", "-n", ns.Name, "--timeout=300s") + require.Equalf(t, 0, exitCode, "expected no error when waiting for pod to be ready") - // wait for application profile to be completed - time.Sleep(3 * time.Minute) + // Wait for application profile to be completed. + time.Sleep(3 * time.Minute) - _, _, err := testutils.ExecIntoPod("malware-cryptominer", ns.Name, []string{"ls", "-l", "/usr/share/nginx/html/xmrig"}, "") - require.NoErrorf(t, err, "expected no error when executing command in malware container") + _, _, err := testutils.ExecIntoPod("malware-cryptominer", ns.Name, []string{"ls", "-l", "/usr/share/nginx/html/xmrig"}, "") + require.NoErrorf(t, err, "expected no error when executing command in malware container") - _, _, err = testutils.ExecIntoPod("malware-cryptominer", ns.Name, []string{"/usr/share/nginx/html/xmrig/xmrig"}, "") + _, _, err = testutils.ExecIntoPod("malware-cryptominer", ns.Name, []string{"/usr/share/nginx/html/xmrig/xmrig"}, "") - // wait for the alerts to be generated - time.Sleep(20 * time.Second) + time.Sleep(20 * time.Second) - alerts, err := testutils.GetMalwareAlerts(ns.Name) - require.NoError(t, err, "Error getting alerts") + alerts, err := testutils.GetMalwareAlerts(ns.Name) + require.NoError(t, err, "Error getting alerts") - expectedMalwares := []string{ - "Multios.Coinminer.Miner-6781728-2.UNOFFICIAL", - } + expectedMalwares := []string{ + "Multios.Coinminer.Miner-6781728-2.UNOFFICIAL", + } - malwaresDetected := map[string]bool{} + malwaresDetected := map[string]bool{} + for _, alert := range alerts { + podName, podNameOk := alert.Labels["pod_name"] + malwareName, malwareNameOk := alert.Labels["malware_name"] + if podNameOk && malwareNameOk { + if podName == "malware-cryptominer" && slices.Contains(expectedMalwares, malwareName) { + malwaresDetected[malwareName] = true + } + } + } - for _, alert := range alerts { - podName, podNameOk := alert.Labels["pod_name"] - malwareName, malwareNameOk := alert.Labels["malware_name"] + assert.Equal(t, len(expectedMalwares), len(malwaresDetected), + "Expected %d malwares to be detected, but got %d", len(expectedMalwares), len(malwaresDetected)) + }) + + // --------------------------------------------------------------- + // 10b. Behavioral rule detection with empty user-defined AP. + // The miner starts immediately; because the AP declares nothing, + // every exec, DNS lookup, and network connection is anomalous. + // + // Expected rules: + // R0001: Unexpected process launched (every exec) + // R0003: Syscalls Anomalies (empty syscall list) + // + // Rules that MAY fire depending on network conditions: + // R0005: DNS Anomalies (requires DNS responses with answers; + // trace_dns drops NXDOMAIN, so behind a firewall these + // won't arrive) + // R1008: Crypto Mining Domain Communication (same DNS dependency) + // R1009: Crypto Mining Related Port Communication (requires TCP + // connectivity to mining pool ports 3333/45700) + // R1007: Crypto miner launched via randomx (amd64 only) + // + // Race condition note: the node-agent fetches the user-defined AP + // from storage asynchronously after detecting the container. Events + // arriving before the fetch completes see profileExists=false, + // causing Required rules (R0001 etc.) to be skipped. The miner's + // initial exec happens during this window — so we must exec into + // the pod AFTER the profile is cached to generate observable exec + // events. + // --------------------------------------------------------------- + t.Run("empty_profile_rules", func(t *testing.T) { + ns := testutils.NewRandomNamespace() + k8sClient := k8sinterface.NewKubernetesApi() + storageClient := spdxv1beta1client.NewForConfigOrDie(k8sClient.K8SConfig) + + // Create an ApplicationProfile with an empty container entry for k8s-miner. + // The container name must match the pod's container so + // GetContainerFromApplicationProfile finds it. With no execs, syscalls, + // opens, or capabilities listed, every operation is anomalous. + ap := &v1beta1.ApplicationProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "crypto2", + Namespace: ns.Name, + }, + Spec: v1beta1.ApplicationProfileSpec{ + Containers: []v1beta1.ApplicationProfileContainer{ + {Name: "k8s-miner"}, + }, + }, + } + + _, err := storageClient.ApplicationProfiles(ns.Name).Create( + context.Background(), ap, metav1.CreateOptions{}) + require.NoError(t, err, "create empty AP in storage") + + require.Eventually(t, func() bool { + _, getErr := storageClient.ApplicationProfiles(ns.Name).Get( + context.Background(), "crypto2", v1.GetOptions{}) + return getErr == nil + }, 30*time.Second, 1*time.Second, "empty AP must be stored") - if podNameOk && malwareNameOk { - if podName == "malware-cryptominer" && slices.Contains(expectedMalwares, malwareName) { - malwaresDetected[malwareName] = true + // Deploy crypto miner with user-defined profile label. + wl, err := testutils.NewTestWorkload(ns.Name, + path.Join(utils.CurrentDir(), "resources/crypto-miner-deployment.yaml")) + require.NoError(t, err) + require.NoError(t, wl.WaitForReady(80)) + t.Log("Crypto miner pod is ready") + + // Wait for node-agent to fetch the user-defined AP from storage and + // cache it. The miner's initial execve races with this fetch, so + // R0001 is skipped for that event. Syscalls keep flowing, so R0003 + // fires once the profile is cached. + time.Sleep(20 * time.Second) + + // Exec into the pod to generate post-profile-load events: + // exec event → R0001 (cat not in empty AP) + // open event → R0002 (/etc/hostname starts with /etc/) + stdout, stderr, execErr := wl.ExecIntoPod([]string{"cat", "/etc/hostname"}, "k8s-miner") + t.Logf("exec cat /etc/hostname: err=%v stdout=%q stderr=%q", execErr, stdout, stderr) + + // Collect alerts — R0001 must appear from the exec above. + var alerts []testutils.Alert + require.Eventually(t, func() bool { + alerts, err = testutils.GetAlerts(ns.Name) + if err != nil || len(alerts) == 0 { + return false + } + for _, a := range alerts { + if a.Labels["rule_id"] == "R0001" { + return true + } } + return false + }, 120*time.Second, 10*time.Second, "expected R0001 alert from exec with empty AP") + + time.Sleep(15 * time.Second) + alerts, _ = testutils.GetAlerts(ns.Name) + + t.Logf("=== %d alerts ===", len(alerts)) + for i, a := range alerts { + t.Logf(" [%d] %s(%s) comm=%s container=%s", + i, a.Labels["rule_name"], a.Labels["rule_id"], + a.Labels["comm"], a.Labels["container_name"]) + } + + rulesSeen := map[string]bool{} + for _, a := range alerts { + rulesSeen[a.Labels["rule_id"]] = true + } + + // These rules must fire with an empty AP — every operation is anomalous. + assert.True(t, rulesSeen["R0001"], + "R0001 (Unexpected process launched) must fire — cat exec not in empty AP") + assert.True(t, rulesSeen["R0002"], + "R0002 (Files Access Anomalies) must fire — /etc/hostname not in empty AP opens") + assert.True(t, rulesSeen["R0003"], + "R0003 (Syscalls Anomalies) must fire — miner syscalls not in empty AP") + assert.True(t, rulesSeen["R0004"], + "R0004 (Linux Capabilities Anomalies) must fire — capabilities not in empty AP") + + // DNS/network rules depend on the miner resolving pool domains and + // establishing TCP connections. In sandboxed/firewalled environments + // these won't fire: trace_dns drops NXDOMAIN, and TCP to mining + // ports is blocked. Log what fired for visibility. + for _, entry := range []struct { + id, desc string + }{ + {"R0005", "DNS Anomalies"}, + {"R1007", "Crypto miner launched via randomx"}, + {"R1008", "Crypto Mining Domain Communication"}, + {"R1009", "Crypto Mining Related Port Communication"}, + } { + if rulesSeen[entry.id] { + t.Logf("%s (%s) fired", entry.id, entry.desc) + } + } + }) + + // --------------------------------------------------------------- + // 10c. RandomX detection (R1007) via xmrig benchmark mode. + // Uses --bench 1M which runs RandomX hashing without a pool + // connection, reliably triggering the x86 FPU tracepoint + // that the randomx eBPF gadget monitors. + // x86_64 (amd64) only — the gadget is disabled on arm64. + // --------------------------------------------------------------- + t.Run("randomx_bench", func(t *testing.T) { + if runtime.GOARCH != "amd64" { + t.Skip("randomx tracer is x86_64 only") + } + + ns := testutils.NewRandomNamespace() + + wl, err := testutils.NewTestWorkload(ns.Name, + path.Join(utils.CurrentDir(), "resources/crypto-miner-deployment.yaml")) + require.NoError(t, err) + require.NoError(t, wl.WaitForReady(80)) + t.Log("xmrig benchmark pod is ready, waiting for RandomX FPU events...") + + // xmrig needs ~5s to init the RandomX dataset, then starts hashing. + // The eBPF gadget needs 5 FPU events within 5s to fire. + // Give it 30s total. + var alerts []testutils.Alert + require.Eventually(t, func() bool { + alerts, err = testutils.GetAlerts(ns.Name) + if err != nil || len(alerts) == 0 { + return false + } + for _, a := range alerts { + if a.Labels["rule_id"] == "R1007" { + return true + } + } + return false + }, 120*time.Second, 10*time.Second, "expected R1007 (RandomX crypto miner) from xmrig --bench") + + alerts, _ = testutils.GetAlerts(ns.Name) + t.Logf("=== %d alerts ===", len(alerts)) + for i, a := range alerts { + t.Logf(" [%d] %s(%s) comm=%s container=%s", + i, a.Labels["rule_name"], a.Labels["rule_id"], + a.Labels["comm"], a.Labels["container_name"]) + } + + rulesSeen := map[string]bool{} + for _, a := range alerts { + rulesSeen[a.Labels["rule_id"]] = true } - } - assert.Equal(t, len(expectedMalwares), len(malwaresDetected), "Expected %d malwares to be detected, but got %d malwares", len(expectedMalwares), len(malwaresDetected)) + assert.True(t, rulesSeen["R1007"], + "R1007 (Crypto miner launched via randomx) must fire — xmrig benchmark runs RandomX hashing") + }) } func Test_11_EndpointTest(t *testing.T) { @@ -1569,3 +1766,2036 @@ func Test_24_ProcessTreeDepthTest(t *testing.T) { t.Logf("Found alerts for the process tree depth: %v", alerts) } + +// Test_27_ApplicationProfileOpens tests that the dynamic path matching in +// application profiles works correctly for both recorded (auto-learned) +// profiles and user-defined profiles. +// +// Path matching symbols: +// +// ⋯ (U+22EF DynamicIdentifier) — matches exactly ONE path segment +// * (WildcardIdentifier) — matches ZERO or more path segments +// 0 (in endpoints) — wildcard port (any port) +// +// R0002 "Files Access Anomalies in container" fires when a file is opened +// under a monitored prefix (/etc/, /var/log/, …) and the path was NOT +// recorded in the application profile. +func Test_27_ApplicationProfileOpens(t *testing.T) { + start := time.Now() + defer tearDownTest(t, start) + + const ruleName = "Files Access Anomalies in container" + const profileName = "nginx-regex-profile" + + // --- result tracking for end-of-test summary --- + type subtestResult struct { + name string + profilePath string + filePath string + expectAlert bool + passed bool + detail string + } + var results []subtestResult + addResult := func(name, profilePath, filePath string, expectAlert, passed bool, detail string) { + results = append(results, subtestResult{name, profilePath, filePath, expectAlert, passed, detail}) + } + defer func() { + t.Log("\n========== Test_27 Summary ==========") + anyFailed := false + for _, r := range results { + status := "PASS" + if !r.passed { + status = "FAIL" + anyFailed = true + } + expect := "expect alert" + if !r.expectAlert { + expect = "expect NO alert" + } + t.Logf(" [%s] %-35s profile=%-25s file=%-25s %s", status, r.name, r.profilePath, r.filePath, expect) + if !r.passed { + t.Logf(" -> %s", r.detail) + } + } + if !anyFailed { + t.Log(" All subtests passed.") + } + t.Log("======================================") + }() + + // deployWithProfile creates a user-defined ApplicationProfile with the + // given Opens list, polls until it is retrievable from storage, then + // deploys nginx with the kubescape.io/user-defined-profile label + // pointing at it, and waits for the pod to be ready. + deployWithProfile := func(t *testing.T, opens []v1beta1.OpenCalls) *testutils.TestWorkload { + t.Helper() + ns := testutils.NewRandomNamespace() + + profile := &v1beta1.ApplicationProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: profileName, + Namespace: ns.Name, + }, + Spec: v1beta1.ApplicationProfileSpec{ + Architectures: []string{"amd64"}, + Containers: []v1beta1.ApplicationProfileContainer{ + { + Name: "nginx", + Execs: []v1beta1.ExecCalls{ + {Path: "/bin/cat", Args: []string{"/bin/cat"}}, + }, + Opens: opens, + }, + }, + }, + } + + k8sClient := k8sinterface.NewKubernetesApi() + storageClient := spdxv1beta1client.NewForConfigOrDie(k8sClient.K8SConfig) + _, err := storageClient.ApplicationProfiles(ns.Name).Create( + context.Background(), profile, metav1.CreateOptions{}) + require.NoError(t, err, "create user-defined profile %q in ns %s", profileName, ns.Name) + + // Poll until the profile is retrievable from storage before deploying. + // Node-agent does a single fetch on container start with no retry. + require.Eventually(t, func() bool { + _, apErr := storageClient.ApplicationProfiles(ns.Name).Get( + context.Background(), profileName, v1.GetOptions{}) + return apErr == nil + }, 30*time.Second, 1*time.Second, "AP must be retrievable from storage before deploying the pod") + + wl, err := testutils.NewTestWorkload(ns.Name, + path.Join(utils.CurrentDir(), "resources/nginx-user-profile-deployment.yaml")) + require.NoError(t, err, "create workload in ns %s", ns.Name) + require.NoError(t, wl.WaitForReady(80), "workload not ready in ns %s", ns.Name) + + // Wait for node-agent to load the user-defined profile into cache. + time.Sleep(10 * time.Second) + return wl + } + + // triggerAndGetAlerts execs cat on the given path, then polls for alerts + // up to 60s to avoid race conditions with alert propagation. + triggerAndGetAlerts := func(t *testing.T, wl *testutils.TestWorkload, filePath string) []testutils.Alert { + t.Helper() + stdout, stderr, err := wl.ExecIntoPod([]string{"cat", filePath}, "nginx") + if err != nil { + t.Errorf("exec 'cat %s' in container nginx failed: %v (stdout=%q stderr=%q)", filePath, err, stdout, stderr) + } + // Poll for alerts — they may take time to propagate through + // eBPF → node-agent → alertmanager. + var alerts []testutils.Alert + require.Eventually(t, func() bool { + alerts, err = testutils.GetAlerts(wl.Namespace) + return err == nil + }, 60*time.Second, 5*time.Second, "alerts must be retrievable from ns %s", wl.Namespace) + // Give extra time for all alerts to arrive after first successful fetch. + time.Sleep(10 * time.Second) + alerts, err = testutils.GetAlerts(wl.Namespace) + require.NoError(t, err, "get alerts from ns %s", wl.Namespace) + return alerts + } + + // hasAlert checks whether an R0002 alert exists for comm=cat, container=nginx. + hasAlert := func(alerts []testutils.Alert) bool { + for _, a := range alerts { + if a.Labels["rule_name"] == ruleName && + a.Labels["comm"] == "cat" && + a.Labels["container_name"] == "nginx" { + return true + } + } + return false + } + + // --------------------------------------------------------------- + // 1a. Recorded (auto-learned) profile must use absolute paths. + // There must be no "." in the Opens paths. + // --------------------------------------------------------------- + t.Run("recorded_profile_absolute_paths", func(t *testing.T) { + ns := testutils.NewRandomNamespace() + wl, err := testutils.NewTestWorkload(ns.Name, + path.Join(utils.CurrentDir(), "resources/nginx-deployment.yaml")) + require.NoError(t, err) + require.NoError(t, wl.WaitForReady(80)) + require.NoError(t, wl.WaitForApplicationProfileCompletion(80)) + + profile, err := wl.GetApplicationProfile() + require.NoError(t, err, "get application profile") + + passed := true + for _, container := range profile.Spec.Containers { + for _, open := range container.Opens { + if !strings.HasPrefix(open.Path, "/") { + t.Errorf("recorded path must be absolute: got %q (container %s)", open.Path, container.Name) + passed = false + } + if open.Path == "." { + t.Errorf("recorded path must not be relative dot: got %q (container %s)", open.Path, container.Name) + passed = false + } + } + } + detail := "" + if !passed { + detail = "found non-absolute or '.' paths in recorded profile" + } + addResult("recorded_profile_absolute_paths", "(auto-learned)", "(nginx startup)", false, passed, detail) + }) + + // --------------------------------------------------------------- + // 1b. User-defined profile wildcard tests. + // Each sub-test deploys nginx in its own namespace with a + // different Opens pattern and verifies R0002 behaviour. + // --------------------------------------------------------------- + + // 1b-1: Exact path — profile has the exact file => no alert. + t.Run("exact_path_match", func(t *testing.T) { + profilePath := "/etc/nginx/nginx.conf" + filePath := "/etc/nginx/nginx.conf" + wl := deployWithProfile(t, []v1beta1.OpenCalls{ + {Path: profilePath, Flags: []string{"O_RDONLY"}}, + {Path: "/etc/ld.so.cache", Flags: []string{"O_RDONLY", "O_CLOEXEC"}}, // dynamic linker opens this on every exec + }) + alerts := triggerAndGetAlerts(t, wl, filePath) + got := hasAlert(alerts) + if got { + t.Errorf("expected NO R0002 alert: profile allows %q, opened %q, but alert fired", profilePath, filePath) + } + addResult("exact_path_match", profilePath, filePath, false, !got, + fmt.Sprintf("got %d alerts, expected none for cat", len(alerts))) + }) + + // 1b-2: Exact path — profile has a DIFFERENT file => alert. + t.Run("exact_path_mismatch", func(t *testing.T) { + profilePath := "/etc/nginx/nginx.conf" + filePath := "/etc/hostname" + wl := deployWithProfile(t, []v1beta1.OpenCalls{ + {Path: profilePath, Flags: []string{"O_RDONLY"}}, + }) + alerts := triggerAndGetAlerts(t, wl, filePath) + got := hasAlert(alerts) + if !got { + t.Errorf("expected R0002 alert: profile only allows %q, opened %q, but no alert", profilePath, filePath) + } + addResult("exact_path_mismatch", profilePath, filePath, true, got, + fmt.Sprintf("got %d alerts, expected at least one for cat", len(alerts))) + }) + + // 1b-3: Ellipsis ⋯ matches single segment — /etc/⋯ covers /etc/hostname. + t.Run("ellipsis_single_segment_match", func(t *testing.T) { + profilePath := "/etc/" + dynamicpathdetector.DynamicIdentifier + filePath := "/etc/hostname" + wl := deployWithProfile(t, []v1beta1.OpenCalls{ + {Path: profilePath, Flags: []string{"O_RDONLY"}}, + }) + alerts := triggerAndGetAlerts(t, wl, filePath) + got := hasAlert(alerts) + if got { + t.Errorf("expected NO R0002 alert: profile %q should match %q (single segment), but alert fired", profilePath, filePath) + } + addResult("ellipsis_single_segment_match", profilePath, filePath, false, !got, + fmt.Sprintf("got %d alerts, expected none for cat", len(alerts))) + }) + + // 1b-4: Ellipsis ⋯ rejects multi-segment — /etc/⋯ does NOT cover + // /etc/nginx/nginx.conf (two segments past /etc/). + t.Run("ellipsis_rejects_multi_segment", func(t *testing.T) { + profilePath := "/etc/" + dynamicpathdetector.DynamicIdentifier + filePath := "/etc/nginx/nginx.conf" + wl := deployWithProfile(t, []v1beta1.OpenCalls{ + {Path: profilePath, Flags: []string{"O_RDONLY"}}, + }) + alerts := triggerAndGetAlerts(t, wl, filePath) + got := hasAlert(alerts) + if !got { + t.Errorf("expected R0002 alert: profile %q should NOT match %q (two segments), but no alert", profilePath, filePath) + } + addResult("ellipsis_rejects_multi_segment", profilePath, filePath, true, got, + fmt.Sprintf("got %d alerts, expected at least one for cat", len(alerts))) + }) + + // 1b-5: Wildcard * matches any depth — /etc/* covers /etc/nginx/nginx.conf. + t.Run("wildcard_matches_deep_path", func(t *testing.T) { + profilePath := "/etc/*" + filePath := "/etc/nginx/nginx.conf" + wl := deployWithProfile(t, []v1beta1.OpenCalls{ + {Path: profilePath, Flags: []string{"O_RDONLY"}}, + }) + alerts := triggerAndGetAlerts(t, wl, filePath) + got := hasAlert(alerts) + if got { + t.Errorf("expected NO R0002 alert: profile %q should match %q (wildcard), but alert fired", profilePath, filePath) + } + addResult("wildcard_matches_deep_path", profilePath, filePath, false, !got, + fmt.Sprintf("got %d alerts, expected none for cat", len(alerts))) + }) + + // --------------------------------------------------------------- + // 1c. Deploy known-application-profile-wildcards.yaml (curl image) + // and verify that files under wildcard-covered opens paths + // produce no R0002 alert. + // --------------------------------------------------------------- + t.Run("wildcard_yaml_profile_allowed_opens", func(t *testing.T) { + ns := testutils.NewRandomNamespace() + wildcardProfileName := "fusioncore-profile-wildcards" + + // Create the profile matching known-application-profile-wildcards.yaml. + profile := &v1beta1.ApplicationProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: wildcardProfileName, + Namespace: ns.Name, + }, + Spec: v1beta1.ApplicationProfileSpec{ + Architectures: []string{"amd64"}, + Containers: []v1beta1.ApplicationProfileContainer{ + { + Name: "curl", + ImageID: "docker.io/curlimages/curl@sha256:08e466006f0860e54fc299378de998935333e0e130a15f6f98482e9f8dab3058", + ImageTag: "docker.io/curlimages/curl:8.5.0", + Capabilities: []string{ + "CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_DAC_READ_SEARCH", + "CAP_SETGID", "CAP_SETPCAP", "CAP_SETUID", "CAP_SYS_ADMIN", + }, + Execs: []v1beta1.ExecCalls{ + {Path: "/bin/sleep", Args: []string{"/bin/sleep", "infinity"}}, + {Path: "/bin/cat", Args: []string{"/bin/cat"}}, + {Path: "/usr/bin/curl", Args: []string{"/usr/bin/curl", "-sm2", "fusioncore.ai"}}, + }, + Opens: []v1beta1.OpenCalls{ + {Path: "/etc/*", Flags: []string{"O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"}}, + {Path: "/etc/ssl/openssl.cnf", Flags: []string{"O_RDONLY", "O_LARGEFILE"}}, + {Path: "/home/*", Flags: []string{"O_RDONLY", "O_LARGEFILE"}}, + {Path: "/lib/*", Flags: []string{"O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"}}, + {Path: "/usr/lib/*", Flags: []string{"O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"}}, + {Path: "/usr/local/lib/*", Flags: []string{"O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"}}, + {Path: "/proc/*/cgroup", Flags: []string{"O_RDONLY", "O_CLOEXEC"}}, + {Path: "/proc/*/kernel/cap_last_cap", Flags: []string{"O_RDONLY", "O_CLOEXEC"}}, + {Path: "/proc/*/mountinfo", Flags: []string{"O_RDONLY", "O_CLOEXEC"}}, + {Path: "/proc/*/task/*/fd", Flags: []string{"O_RDONLY", "O_DIRECTORY", "O_CLOEXEC"}}, + {Path: "/sys/fs/cgroup/cpu.max", Flags: []string{"O_RDONLY", "O_CLOEXEC"}}, + {Path: "/sys/kernel/mm/transparent_hugepage/hpage_pmd_size", Flags: []string{"O_RDONLY"}}, + {Path: "/7/setgroups", Flags: []string{"O_RDONLY", "O_CLOEXEC"}}, + {Path: "/runc", Flags: []string{"O_RDONLY", "O_CLOEXEC"}}, + }, + Syscalls: []string{ + "arch_prctl", "bind", "brk", "capget", "capset", "chdir", + "clone", "close", "close_range", "connect", "epoll_ctl", + "epoll_pwait", "execve", "exit", "exit_group", "faccessat2", + "fchown", "fcntl", "fstat", "fstatfs", "futex", "getcwd", + "getdents64", "getegid", "geteuid", "getgid", "getpeername", + "getppid", "getsockname", "getsockopt", "gettid", "getuid", + "ioctl", "membarrier", "mmap", "mprotect", "munmap", + "nanosleep", "newfstatat", "open", "openat", "openat2", + "pipe", "poll", "prctl", "read", "recvfrom", "recvmsg", + "rt_sigaction", "rt_sigprocmask", "rt_sigreturn", "sendto", + "set_tid_address", "setgid", "setgroups", "setsockopt", + "setuid", "sigaltstack", "socket", "statx", "tkill", + "unknown", "write", "writev", + }, + }, + }, + }, + } + + k8sClient := k8sinterface.NewKubernetesApi() + storageClient := spdxv1beta1client.NewForConfigOrDie(k8sClient.K8SConfig) + _, err := storageClient.ApplicationProfiles(ns.Name).Create( + context.Background(), profile, metav1.CreateOptions{}) + require.NoError(t, err, "create wildcard profile %q in ns %s", wildcardProfileName, ns.Name) + + // Poll until the profile is retrievable from storage before deploying. + require.Eventually(t, func() bool { + _, apErr := storageClient.ApplicationProfiles(ns.Name).Get( + context.Background(), wildcardProfileName, v1.GetOptions{}) + return apErr == nil + }, 30*time.Second, 1*time.Second, "AP must be retrievable before deploying the pod") + + wl, err := testutils.NewTestWorkload(ns.Name, + path.Join(utils.CurrentDir(), "resources/curl-user-profile-wildcards-deployment.yaml")) + require.NoError(t, err, "create curl workload in ns %s", ns.Name) + require.NoError(t, wl.WaitForReady(80), "curl workload not ready in ns %s", ns.Name) + + // Wait for node-agent to load the user-defined profile into cache. + time.Sleep(10 * time.Second) + + // Cat files that are covered by the wildcard opens. + allowedFiles := []string{ + "/etc/hosts", // covered by /etc/* + "/etc/resolv.conf", // covered by /etc/* + "/etc/ssl/openssl.cnf", // exact match + } + for _, f := range allowedFiles { + stdout, stderr, err := wl.ExecIntoPod([]string{"cat", f}, "curl") + if err != nil { + t.Logf("exec 'cat %s' failed: %v (stdout=%q stderr=%q)", f, err, stdout, stderr) + } + } + + // Poll for alerts to propagate. + time.Sleep(15 * time.Second) + alerts, err := testutils.GetAlerts(wl.Namespace) + require.NoError(t, err, "get alerts from ns %s", wl.Namespace) + + var r0002Fired bool + for _, a := range alerts { + if a.Labels["rule_name"] == ruleName && + a.Labels["comm"] == "cat" && + a.Labels["container_name"] == "curl" { + r0002Fired = true + break + } + } + if r0002Fired { + t.Errorf("expected NO R0002 for files covered by wildcard opens, but alert fired") + } + addResult("wildcard_yaml_profile_allowed_opens", + "/etc/*, /etc/ssl/openssl.cnf", "/etc/hosts, /etc/resolv.conf, /etc/ssl/openssl.cnf", + false, !r0002Fired, + fmt.Sprintf("got R0002=%v, expected none for wildcard-covered files", r0002Fired)) + }) +} + +// Test_28_UserDefinedNetworkNeighborhood exercises user-defined AP + NN. +// Each subtest gets its own namespace to avoid alert cross-contamination. +// +// The NN allows only fusioncore.ai (162.0.217.171) on TCP/80. +// R0005 requires real resolvable domains (not NXDOMAIN), because trace_dns +// drops DNS responses with 0 answers. +func Test_28_UserDefinedNetworkNeighborhood(t *testing.T) { + start := time.Now() + defer tearDownTest(t, start) + + // setup creates a namespace with user-defined AP + NN + pod. + // The NN allows only fusioncore.ai (162.0.217.171) on TCP/80. + setup := func(t *testing.T) *testutils.TestWorkload { + t.Helper() + ns := testutils.NewRandomNamespace() + k8sClient := k8sinterface.NewKubernetesApi() + storageClient := spdxv1beta1client.NewForConfigOrDie(k8sClient.K8SConfig) + + // Upstream ContainerProfileCache (kubescape/node-agent#788) reads ONE + // pod label `kubescape.io/user-defined-profile=` and uses + // as the lookup key for BOTH the user AP and the user NN. + // AP and NN MUST therefore share that single name. + const overlayName = "curl-28-overlay" + + ap := &v1beta1.ApplicationProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: overlayName, + Namespace: ns.Name, + }, + Spec: v1beta1.ApplicationProfileSpec{ + Containers: []v1beta1.ApplicationProfileContainer{ + { + Name: "curl", + Execs: []v1beta1.ExecCalls{ + {Path: "/bin/sleep"}, + {Path: "/usr/bin/curl"}, + {Path: "/usr/bin/nslookup"}, + {Path: "/usr/bin/wget"}, + }, + Syscalls: []string{"socket", "connect", "sendto", "recvfrom", "read", "write", "close", "openat", "mmap", "mprotect", "munmap", "fcntl", "ioctl", "poll", "epoll_create1", "epoll_ctl", "epoll_wait", "bind", "listen", "accept4", "getsockopt", "setsockopt", "getsockname", "getpid", "fstat", "rt_sigaction", "rt_sigprocmask", "writev"}, + }, + }, + }, + } + _, err := storageClient.ApplicationProfiles(ns.Name).Create( + context.Background(), ap, metav1.CreateOptions{}) + require.NoError(t, err, "create AP") + + nn := &v1beta1.NetworkNeighborhood{ + ObjectMeta: metav1.ObjectMeta{ + Name: overlayName, + Namespace: ns.Name, + Annotations: map[string]string{ + helpersv1.ManagedByMetadataKey: helpersv1.ManagedByUserValue, + helpersv1.StatusMetadataKey: helpersv1.Completed, + helpersv1.CompletionMetadataKey: helpersv1.Full, + }, + Labels: map[string]string{ + helpersv1.ApiGroupMetadataKey: "apps", + helpersv1.ApiVersionMetadataKey: "v1", + helpersv1.RelatedKindMetadataKey: "Deployment", + helpersv1.RelatedNameMetadataKey: "curl-28", + helpersv1.RelatedNamespaceMetadataKey: ns.Name, + }, + }, + Spec: v1beta1.NetworkNeighborhoodSpec{ + LabelSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "curl-28"}, + }, + Containers: []v1beta1.NetworkNeighborhoodContainer{ + { + Name: "curl", + Egress: []v1beta1.NetworkNeighbor{ + { + Identifier: "fusioncore-egress", + Type: "external", + DNS: "fusioncore.ai.", + DNSNames: []string{"fusioncore.ai."}, + IPAddress: "162.0.217.171", + Ports: []v1beta1.NetworkPort{ + {Name: "TCP-80", Protocol: "TCP", Port: ptr.To(int32(80))}, + }, + }, + }, + }, + }, + }, + } + _, err = storageClient.NetworkNeighborhoods(ns.Name).Create( + context.Background(), nn, metav1.CreateOptions{}) + require.NoError(t, err, "create NN") + + require.Eventually(t, func() bool { + _, apErr := storageClient.ApplicationProfiles(ns.Name).Get(context.Background(), overlayName, v1.GetOptions{}) + _, nnErr := storageClient.NetworkNeighborhoods(ns.Name).Get(context.Background(), overlayName, v1.GetOptions{}) + return apErr == nil && nnErr == nil + }, 30*time.Second, 1*time.Second, "AP+NN must be in storage before pod deploy") + + wl, err := testutils.NewTestWorkload(ns.Name, + path.Join(utils.CurrentDir(), "resources/nginx-user-defined-deployment.yaml")) + require.NoError(t, err) + require.NoError(t, wl.WaitForReady(80)) + // Cache-load latency on the upstream ContainerProfileCache is bursty + // — 15s is enough on a quiet runner but not on a loaded one. The + // failure mode is alert metadata `errorMessage:"waiting for profile + // update"`, which means the rule manager evaluated against an + // unloaded NN and fired R0005/R0011 spuriously. 30s covers the + // observed worst-case in CI without pushing total test time too + // far. Real fix would be to poll a cache-loaded signal, but no + // such signal is exposed today. + time.Sleep(30 * time.Second) + return wl + } + + countByRule := func(alerts []testutils.Alert, ruleID string) int { + n := 0 + for _, a := range alerts { + if a.Labels["rule_id"] == ruleID { + n++ + } + } + return n + } + + waitAlerts := func(t *testing.T, ns string) []testutils.Alert { + t.Helper() + var alerts []testutils.Alert + var err error + require.Eventually(t, func() bool { + alerts, err = testutils.GetAlerts(ns) + return err == nil + }, 60*time.Second, 5*time.Second, "must be able to fetch alerts") + // Extra settle time for remaining alerts. + time.Sleep(10 * time.Second) + alerts, _ = testutils.GetAlerts(ns) + return alerts + } + + logAlerts := func(t *testing.T, alerts []testutils.Alert) { + t.Helper() + for i, a := range alerts { + t.Logf(" [%d] %s(%s) comm=%s container=%s", + i, a.Labels["rule_name"], a.Labels["rule_id"], + a.Labels["comm"], a.Labels["container_name"]) + } + } + + // --------------------------------------------------------------- + // 28a. Allowed traffic — fusioncore.ai is in the NN. + // No R0005 (DNS) and no R0011 (egress) expected. + // --------------------------------------------------------------- + t.Run("allowed_fusioncore_no_alert", func(t *testing.T) { + wl := setup(t) + + // DNS lookup via nslookup (domain in NN). + stdout, stderr, err := wl.ExecIntoPod([]string{"nslookup", "fusioncore.ai"}, "curl") + t.Logf("nslookup fusioncore.ai → err=%v stdout=%q stderr=%q", err, stdout, stderr) + + // HTTP via curl (domain + IP in NN). + stdout, stderr, err = wl.ExecIntoPod([]string{"curl", "-sm5", "http://fusioncore.ai"}, "curl") + t.Logf("curl fusioncore.ai → err=%v stdout=%q stderr=%q", err, stdout, stderr) + + alerts := waitAlerts(t, wl.Namespace) + t.Logf("=== %d alerts ===", len(alerts)) + logAlerts(t, alerts) + + assert.Equal(t, 0, countByRule(alerts, "R0005"), + "fusioncore.ai is in NN — should NOT fire R0005") + assert.Equal(t, 0, countByRule(alerts, "R0011"), + "fusioncore.ai IP is in NN — should NOT fire R0011") + }) + + // --------------------------------------------------------------- + // 28b. Unknown domains — domains NOT in the NN → R0005. + // Uses both nslookup (pure DNS) and curl (DNS + TCP). + // --------------------------------------------------------------- + t.Run("unknown_domain_R0005", func(t *testing.T) { + wl := setup(t) + + // nslookup generates a DNS query without any TCP connection. + wl.ExecIntoPod([]string{"nslookup", "google.com"}, "curl") + // curl resolves + connects. + wl.ExecIntoPod([]string{"curl", "-sm5", "http://ebpf.io"}, "curl") + wl.ExecIntoPod([]string{"curl", "-sm5", "http://cloudflare.com"}, "curl") + + alerts := waitAlerts(t, wl.Namespace) + t.Logf("=== %d alerts ===", len(alerts)) + logAlerts(t, alerts) + + require.Greater(t, countByRule(alerts, "R0005"), 0, + "unknown domains must fire R0005") + }) + + // --------------------------------------------------------------- + // 28c. Unknown IPs — raw IP egress NOT in the NN → R0011. + // --------------------------------------------------------------- + t.Run("unknown_ip_R0011", func(t *testing.T) { + wl := setup(t) + + wl.ExecIntoPod([]string{"curl", "-sm5", "http://8.8.8.8"}, "curl") + wl.ExecIntoPod([]string{"curl", "-sm5", "http://1.1.1.1"}, "curl") + + alerts := waitAlerts(t, wl.Namespace) + t.Logf("=== %d alerts ===", len(alerts)) + logAlerts(t, alerts) + + require.Greater(t, countByRule(alerts, "R0011"), 0, + "IPs not in NN must fire R0011") + }) + + // --------------------------------------------------------------- + // 28d. MITM — DNS spoofing simulation. + // fusioncore.ai is an allowed domain but the IP is spoofed. + // + // Step 1: nslookup fusioncore.ai (legitimate DNS, no alert). + // Step 2: curl --resolve fusioncore.ai:80:8.8.4.4 + // Simulates a DNS MITM returning a different IP. + // The domain is allowed but the connection goes to + // 8.8.4.4 (not 162.0.217.171) → R0011. + // --------------------------------------------------------------- + t.Run("mitm_spoofed_ip_R0011", func(t *testing.T) { + wl := setup(t) + + // Step 1: Legitimate DNS lookup — no alert expected. + wl.ExecIntoPod([]string{"nslookup", "fusioncore.ai"}, "curl") + + // Step 2: MITM — domain resolves to spoofed IP 8.8.4.4. + // curl --resolve skips DNS and connects directly to the + // spoofed IP, simulating what happens after DNS poisoning. + stdout, stderr, err := wl.ExecIntoPod( + []string{"curl", "-sm5", "--resolve", "fusioncore.ai:80:8.8.4.4", "http://fusioncore.ai"}, "curl") + t.Logf("curl MITM → err=%v stdout=%q stderr=%q", err, stdout, stderr) + + alerts := waitAlerts(t, wl.Namespace) + t.Logf("=== %d alerts ===", len(alerts)) + logAlerts(t, alerts) + + require.Greater(t, countByRule(alerts, "R0011"), 0, + "MITM: fusioncore.ai allowed but spoofed IP 8.8.4.4 must fire R0011") + }) + + // --------------------------------------------------------------- + // 28e. MITM — real CoreDNS poisoning via template plugin. + // Poisons CoreDNS so fusioncore.ai resolves to 8.8.4.4 + // instead of the legitimate 162.0.217.171. + // + // nslookup triggers the poisoned DNS response. + // R0005 does NOT fire: fusioncore.ai is in the NN egress + // list and BusyBox nslookup does NOT do PTR reverse-lookups. + // R0011 does NOT fire: no TCP egress (DNS is UDP to cluster + // DNS which is a private IP filtered by is_private_ip). + // + // This documents a detection gap: pure DNS MITM (without + // subsequent TCP to the spoofed IP) is invisible to both + // R0005 and R0011 when the domain is already whitelisted. + // + // NOTE: this subtest MUST run last — it modifies the + // cluster-wide CoreDNS configmap. + // --------------------------------------------------------------- + t.Run("mitm_coredns_poisoning", func(t *testing.T) { + wl := setup(t) + ctx := context.Background() + k8sClient := k8sinterface.NewKubernetesApi() + + // ── Back up original CoreDNS Corefile ── + cm, err := k8sClient.KubernetesClient.CoreV1(). + ConfigMaps("kube-system").Get(ctx, "coredns", metav1.GetOptions{}) + require.NoError(t, err, "get coredns configmap") + originalCorefile := cm.Data["Corefile"] + + restartAndWaitCoreDNS := func() { + deploy, err := k8sClient.KubernetesClient.AppsV1(). + Deployments("kube-system").Get(ctx, "coredns", metav1.GetOptions{}) + require.NoError(t, err, "get coredns deployment") + if deploy.Spec.Template.ObjectMeta.Annotations == nil { + deploy.Spec.Template.ObjectMeta.Annotations = make(map[string]string) + } + deploy.Spec.Template.ObjectMeta.Annotations["kubectl.kubernetes.io/restartedAt"] = time.Now().Format(time.RFC3339) + _, err = k8sClient.KubernetesClient.AppsV1(). + Deployments("kube-system").Update(ctx, deploy, metav1.UpdateOptions{}) + require.NoError(t, err, "restart coredns") + + require.Eventually(t, func() bool { + d, err := k8sClient.KubernetesClient.AppsV1(). + Deployments("kube-system").Get(ctx, "coredns", metav1.GetOptions{}) + if err != nil || d.Spec.Replicas == nil { + return false + } + return d.Status.ReadyReplicas == *d.Spec.Replicas && + d.Status.UpdatedReplicas == *d.Spec.Replicas + }, 60*time.Second, 2*time.Second, "coredns must become ready") + } + + // ── Restore CoreDNS on cleanup (best-effort) ── + t.Cleanup(func() { + t.Log("cleanup: restoring CoreDNS Corefile") + cm, err := k8sClient.KubernetesClient.CoreV1(). + ConfigMaps("kube-system").Get(ctx, "coredns", metav1.GetOptions{}) + if err != nil { + t.Logf("cleanup: get coredns cm: %v", err) + return + } + cm.Data["Corefile"] = originalCorefile + if _, err := k8sClient.KubernetesClient.CoreV1(). + ConfigMaps("kube-system").Update(ctx, cm, metav1.UpdateOptions{}); err != nil { + t.Logf("cleanup: update coredns cm: %v", err) + return + } + deploy, err := k8sClient.KubernetesClient.AppsV1(). + Deployments("kube-system").Get(ctx, "coredns", metav1.GetOptions{}) + if err != nil { + t.Logf("cleanup: get coredns deploy: %v", err) + return + } + if deploy.Spec.Template.ObjectMeta.Annotations == nil { + deploy.Spec.Template.ObjectMeta.Annotations = make(map[string]string) + } + deploy.Spec.Template.ObjectMeta.Annotations["kubectl.kubernetes.io/restartedAt"] = time.Now().Format(time.RFC3339) + if _, err := k8sClient.KubernetesClient.AppsV1(). + Deployments("kube-system").Update(ctx, deploy, metav1.UpdateOptions{}); err != nil { + t.Logf("cleanup: restart coredns: %v", err) + } + }) + + // ── Poison CoreDNS: fusioncore.ai → 8.8.4.4 ── + poisoned := strings.Replace(originalCorefile, + "forward .", + "template IN A fusioncore.ai {\n answer \"fusioncore.ai. 60 IN A 8.8.4.4\"\n fallthrough\n }\n forward .", + 1) + require.NotEqual(t, originalCorefile, poisoned, "template injection must modify Corefile") + + cm.Data["Corefile"] = poisoned + _, err = k8sClient.KubernetesClient.CoreV1(). + ConfigMaps("kube-system").Update(ctx, cm, metav1.UpdateOptions{}) + require.NoError(t, err, "apply poisoned Corefile") + restartAndWaitCoreDNS() + + // Verify poisoned DNS returns the spoofed IP. + require.Eventually(t, func() bool { + stdout, _, _ := wl.ExecIntoPod([]string{"nslookup", "fusioncore.ai"}, "curl") + return strings.Contains(stdout, "8.8.4.4") + }, 30*time.Second, 3*time.Second, "poisoned CoreDNS must return 8.8.4.4 for fusioncore.ai") + + // ── Trigger alerts ── + // nslookup does DNS only (no TCP egress). + // BusyBox nslookup does NOT do PTR reverse-lookups on result IPs. + stdout, stderr, err := wl.ExecIntoPod([]string{"nslookup", "fusioncore.ai"}, "curl") + t.Logf("nslookup (poisoned) → err=%v stdout=%q stderr=%q", err, stdout, stderr) + + alerts := waitAlerts(t, wl.Namespace) + t.Logf("=== %d alerts ===", len(alerts)) + logAlerts(t, alerts) + + // R0005 does NOT fire: fusioncore.ai is already in the NN + // egress list, and BusyBox nslookup does NOT perform PTR + // reverse-lookups on result IPs, so no unknown domain is queried. + assert.Equal(t, 0, countByRule(alerts, "R0005"), + "DNS MITM: domain is in NN and no PTR lookup — R0005 should not fire") + + // R0011 does NOT fire: nslookup generates only DNS (UDP) + // traffic to the cluster DNS service, which is a private IP + // excluded by is_private_ip(). + assert.Equal(t, 0, countByRule(alerts, "R0011"), + "DNS MITM: nslookup has no TCP egress — R0011 should not fire") + }) + + // --------------------------------------------------------------- + // 28f. MITM — CoreDNS poisoning with TCP egress. + // Same CoreDNS poisoning as 28e, but now fusioncore.ai + // resolves to 128.130.194.56 (a routable IP that accepts + // TCP on port 80). curl generates a real TCP connection + // to the spoofed IP. + // + // Expected: + // R0005 = 0 — domain is in NN, no PTR reverse-lookup. + // R0011 fires — TCP egress to 128.130.194.56 which is + // NOT in the NN (NN only has 162.0.217.171). + // + // NOTE: runs after 28e; modifies cluster-wide CoreDNS. + // --------------------------------------------------------------- + t.Run("mitm_coredns_poisoning_tcp", func(t *testing.T) { + wl := setup(t) + ctx := context.Background() + k8sClient := k8sinterface.NewKubernetesApi() + + // ── Back up original CoreDNS Corefile ── + cm, err := k8sClient.KubernetesClient.CoreV1(). + ConfigMaps("kube-system").Get(ctx, "coredns", metav1.GetOptions{}) + require.NoError(t, err, "get coredns configmap") + originalCorefile := cm.Data["Corefile"] + + restartAndWaitCoreDNS := func() { + deploy, err := k8sClient.KubernetesClient.AppsV1(). + Deployments("kube-system").Get(ctx, "coredns", metav1.GetOptions{}) + require.NoError(t, err, "get coredns deployment") + if deploy.Spec.Template.ObjectMeta.Annotations == nil { + deploy.Spec.Template.ObjectMeta.Annotations = make(map[string]string) + } + deploy.Spec.Template.ObjectMeta.Annotations["kubectl.kubernetes.io/restartedAt"] = time.Now().Format(time.RFC3339) + _, err = k8sClient.KubernetesClient.AppsV1(). + Deployments("kube-system").Update(ctx, deploy, metav1.UpdateOptions{}) + require.NoError(t, err, "restart coredns") + + require.Eventually(t, func() bool { + d, err := k8sClient.KubernetesClient.AppsV1(). + Deployments("kube-system").Get(ctx, "coredns", metav1.GetOptions{}) + if err != nil || d.Spec.Replicas == nil { + return false + } + return d.Status.ReadyReplicas == *d.Spec.Replicas && + d.Status.UpdatedReplicas == *d.Spec.Replicas + }, 60*time.Second, 2*time.Second, "coredns must become ready") + } + + // ── Restore CoreDNS on cleanup (best-effort) ── + t.Cleanup(func() { + t.Log("cleanup: restoring CoreDNS Corefile") + cm, err := k8sClient.KubernetesClient.CoreV1(). + ConfigMaps("kube-system").Get(ctx, "coredns", metav1.GetOptions{}) + if err != nil { + t.Logf("cleanup: get coredns cm: %v", err) + return + } + cm.Data["Corefile"] = originalCorefile + if _, err := k8sClient.KubernetesClient.CoreV1(). + ConfigMaps("kube-system").Update(ctx, cm, metav1.UpdateOptions{}); err != nil { + t.Logf("cleanup: update coredns cm: %v", err) + return + } + deploy, err := k8sClient.KubernetesClient.AppsV1(). + Deployments("kube-system").Get(ctx, "coredns", metav1.GetOptions{}) + if err != nil { + t.Logf("cleanup: get coredns deploy: %v", err) + return + } + if deploy.Spec.Template.ObjectMeta.Annotations == nil { + deploy.Spec.Template.ObjectMeta.Annotations = make(map[string]string) + } + deploy.Spec.Template.ObjectMeta.Annotations["kubectl.kubernetes.io/restartedAt"] = time.Now().Format(time.RFC3339) + if _, err := k8sClient.KubernetesClient.AppsV1(). + Deployments("kube-system").Update(ctx, deploy, metav1.UpdateOptions{}); err != nil { + t.Logf("cleanup: restart coredns: %v", err) + } + }) + + // ── Poison CoreDNS: fusioncore.ai → 128.130.194.56 ── + poisoned := strings.Replace(originalCorefile, + "forward .", + "template IN A fusioncore.ai {\n answer \"fusioncore.ai. 60 IN A 128.130.194.56\"\n fallthrough\n }\n forward .", + 1) + require.NotEqual(t, originalCorefile, poisoned, "template injection must modify Corefile") + + cm.Data["Corefile"] = poisoned + _, err = k8sClient.KubernetesClient.CoreV1(). + ConfigMaps("kube-system").Update(ctx, cm, metav1.UpdateOptions{}) + require.NoError(t, err, "apply poisoned Corefile") + restartAndWaitCoreDNS() + + // Verify poisoned DNS returns the spoofed IP. + require.Eventually(t, func() bool { + stdout, _, _ := wl.ExecIntoPod([]string{"nslookup", "fusioncore.ai"}, "curl") + return strings.Contains(stdout, "128.130.194.56") + }, 30*time.Second, 3*time.Second, "poisoned CoreDNS must return 128.130.194.56 for fusioncore.ai") + + // ── Trigger alerts ── + // curl resolves fusioncore.ai → 128.130.194.56 (poisoned) + // then opens a TCP connection to 128.130.194.56:80. + stdout, stderr, err := wl.ExecIntoPod( + []string{"curl", "-sm5", "http://fusioncore.ai"}, "curl") + t.Logf("curl (poisoned DNS) → err=%v stdout=%q stderr=%q", err, stdout, stderr) + + alerts := waitAlerts(t, wl.Namespace) + t.Logf("=== %d alerts ===", len(alerts)) + logAlerts(t, alerts) + + // R0005 does NOT fire: fusioncore.ai is already in the NN + // egress list, and curl (like BusyBox nslookup) does NOT + // perform PTR reverse-lookups on resolved IPs. + assert.Equal(t, 0, countByRule(alerts, "R0005"), + "DNS MITM: domain is in NN and no PTR lookup — R0005 should not fire") + + // R0011 fires: TCP egress to 128.130.194.56 which is NOT + // in the NN (NN only allows 162.0.217.171). + require.Greater(t, countByRule(alerts, "R0011"), 0, + "DNS MITM: TCP to spoofed IP 128.130.194.56 must fire R0011") + }) +} + +// Test_29_SignedApplicationProfile verifies that a cryptographically signed +// ApplicationProfile can be pushed to storage, loaded by node-agent, and +// used for anomaly detection just like any other user-defined profile. +// +// The test signs an AP with key-based ECDSA (no OIDC/Sigstore needed), +// pushes it to storage, verifies the signature survives the round-trip, +// deploys a pod referencing the signed profile, and asserts that executing +// a binary NOT in the profile fires R0001 (Unexpected process launched). +func Test_29_SignedApplicationProfile(t *testing.T) { + start := time.Now() + defer tearDownTest(t, start) + + ns := testutils.NewRandomNamespace() + k8sClient := k8sinterface.NewKubernetesApi() + storageClient := spdxv1beta1client.NewForConfigOrDie(k8sClient.K8SConfig) + + // ── 1. Build the ApplicationProfile ── + // Use nil (not empty slices) for unused fields — storage normalizes + // []string{} → nil on save, which changes the content hash. + // Matching the storage representation ensures the signature survives + // the round-trip (same approach as cluster_flow_test.go). + ap := &v1beta1.ApplicationProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "signed-ap", + Namespace: ns.Name, + }, + Spec: v1beta1.ApplicationProfileSpec{ + Containers: []v1beta1.ApplicationProfileContainer{ + { + Name: "curl", + Execs: []v1beta1.ExecCalls{ + {Path: "/bin/sleep"}, + {Path: "/usr/bin/curl"}, + }, + Syscalls: []string{"close", "connect", "openat", "read", "socket", "write"}, + }, + }, + }, + } + + // ── 2. Sign the AP (key-based, no OIDC) ── + adapter := profiles.NewApplicationProfileAdapter(ap) + err := signature.SignObjectDisableKeyless(adapter) + require.NoError(t, err, "sign AP") + require.True(t, signature.IsSigned(adapter), "AP must be signed") + + // Verify signature locally. + require.NoError(t, signature.VerifyObjectAllowUntrusted(adapter), + "signature must verify immediately after signing") + + sig, err := signature.GetObjectSignature(adapter) + require.NoError(t, err, "extract signature") + require.NotEmpty(t, sig.Signature, "signature bytes must not be empty") + require.NotEmpty(t, sig.Certificate, "certificate must not be empty") + t.Logf("AP signed: issuer=%s identity=%s sigLen=%d", sig.Issuer, sig.Identity, len(sig.Signature)) + + // ── 3. Push signed AP to storage ── + // Create preserves annotations (including signature.*). + _, err = storageClient.ApplicationProfiles(ns.Name).Create( + context.Background(), ap, metav1.CreateOptions{}) + require.NoError(t, err, "create signed AP in storage") + + // ── 4. Verify signature survives the storage round-trip ── + require.Eventually(t, func() bool { + stored, getErr := storageClient.ApplicationProfiles(ns.Name).Get( + context.Background(), "signed-ap", v1.GetOptions{}) + if getErr != nil { + return false + } + return signature.IsSigned(profiles.NewApplicationProfileAdapter(stored)) + }, 30*time.Second, 1*time.Second, "stored AP must retain signature annotations") + + storedAP, err := storageClient.ApplicationProfiles(ns.Name).Get( + context.Background(), "signed-ap", v1.GetOptions{}) + require.NoError(t, err) + storedAdapter := profiles.NewApplicationProfileAdapter(storedAP) + err = signature.VerifyObjectAllowUntrusted(storedAdapter) + require.NoError(t, err, "stored AP signature must still verify after round-trip") + t.Log("Signature round-trip verification passed") + + // ── 6. Deploy pod referencing the signed profile ── + wl, err := testutils.NewTestWorkload(ns.Name, + path.Join(utils.CurrentDir(), "resources/curl-signed-deployment.yaml")) + require.NoError(t, err) + require.NoError(t, wl.WaitForReady(80)) + time.Sleep(15 * time.Second) // let node-agent load the profile + + // ── 7. Exec an allowed binary — should NOT fire R0001 ── + stdout, stderr, execErr := wl.ExecIntoPod([]string{"curl", "-sm5", "http://ebpf.io"}, "curl") + t.Logf("curl (allowed) → err=%v stdout=%q stderr=%q", execErr, stdout, stderr) + + // ── 8. Exec an anomalous binary — should fire R0001 ── + // The user-defined profile may not be cached yet when the first exec runs. + // Re-exec nslookup on each poll so the eBPF event is generated after + // the profile is loaded (same race as the crypto miner test). + stdout, stderr, execErr = wl.ExecIntoPod([]string{"nslookup", "ebpf.io"}, "curl") + t.Logf("nslookup (anomalous) → err=%v stdout=%q stderr=%q", execErr, stdout, stderr) + + // ── 9. Wait for R0001 alert ── + var alerts []testutils.Alert + require.Eventually(t, func() bool { + // Re-exec on each poll to ensure the event arrives after the profile is cached. + wl.ExecIntoPod([]string{"nslookup", "ebpf.io"}, "curl") + + alerts, err = testutils.GetAlerts(ns.Name) + if err != nil || len(alerts) == 0 { + return false + } + for _, a := range alerts { + if a.Labels["rule_id"] == "R0001" { + return true + } + } + return false + }, 120*time.Second, 10*time.Second, "nslookup is not in signed AP — must fire R0001") + + // Extra settle time. + time.Sleep(10 * time.Second) + alerts, _ = testutils.GetAlerts(ns.Name) + + t.Logf("=== %d alerts ===", len(alerts)) + for i, a := range alerts { + t.Logf(" [%d] %s(%s) comm=%s container=%s", + i, a.Labels["rule_name"], a.Labels["rule_id"], + a.Labels["comm"], a.Labels["container_name"]) + } + + // R0001 must have fired for the anomalous exec. + r0001Count := 0 + for _, a := range alerts { + if a.Labels["rule_id"] == "R0001" { + r0001Count++ + } + } + require.Greater(t, r0001Count, 0, "nslookup not in signed AP must fire R0001") +} + +// Test_30_TamperedSignedProfiles verifies that cryptographic signature +// verification detects tampering of both ApplicationProfile and +// NetworkNeighborhood objects. +// +// Current state of enforcement (as of merge): +// - enableSignatureVerification defaults to false +// - When enabled: tampered profiles are silently SKIPPED (not loaded) +// - No R-number rule fires on signature verification failure +// - User-defined NNs in addContainer() are NOT verified (known gap) +// - System fails open: no profile → no anomaly baseline → no detection +// +// This test proves: +// - The crypto layer detects tampering (sign → tamper → verify fails) +// - Without enforcement, tampered profiles are loaded and used +func Test_30_TamperedSignedProfiles(t *testing.T) { + start := time.Now() + defer tearDownTest(t, start) + + // --------------------------------------------------------------- + // 30a. Tamper detection at the crypto layer — AP and NN. + // Sign both objects, tamper their specs, verify fails. + // --------------------------------------------------------------- + t.Run("tamper_invalidates_signature", func(t *testing.T) { + // ── ApplicationProfile ── + ap := &v1beta1.ApplicationProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "tamper-test-ap", + Namespace: "tamper-test-ns", + }, + Spec: v1beta1.ApplicationProfileSpec{ + Containers: []v1beta1.ApplicationProfileContainer{ + { + Name: "app", + Execs: []v1beta1.ExecCalls{ + {Path: "/bin/sleep"}, + {Path: "/usr/bin/curl"}, + }, + Syscalls: []string{"read", "write", "close"}, + }, + }, + }, + } + + apAdapter := profiles.NewApplicationProfileAdapter(ap) + require.NoError(t, signature.SignObjectDisableKeyless(apAdapter), "sign AP") + require.True(t, signature.IsSigned(apAdapter)) + require.NoError(t, signature.VerifyObjectAllowUntrusted(apAdapter), "untampered AP must verify") + + // Tamper: attacker adds nslookup to whitelist + ap.Spec.Containers[0].Execs = append(ap.Spec.Containers[0].Execs, + v1beta1.ExecCalls{Path: "/usr/bin/nslookup"}) + + tamperedAPAdapter := profiles.NewApplicationProfileAdapter(ap) + err := signature.VerifyObjectAllowUntrusted(tamperedAPAdapter) + require.Error(t, err, "tampered AP must fail verification") + t.Logf("AP tamper detected: %v", err) + + // ── NetworkNeighborhood ── + nn := &v1beta1.NetworkNeighborhood{ + ObjectMeta: metav1.ObjectMeta{ + Name: "tamper-test-nn", + Namespace: "tamper-test-ns", + Annotations: map[string]string{ + helpersv1.ManagedByMetadataKey: helpersv1.ManagedByUserValue, + helpersv1.StatusMetadataKey: helpersv1.Completed, + helpersv1.CompletionMetadataKey: helpersv1.Full, + }, + Labels: map[string]string{ + helpersv1.RelatedKindMetadataKey: "Deployment", + helpersv1.RelatedNameMetadataKey: "tamper-test", + }, + }, + Spec: v1beta1.NetworkNeighborhoodSpec{ + LabelSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "tamper-test"}, + }, + Containers: []v1beta1.NetworkNeighborhoodContainer{ + { + Name: "app", + Egress: []v1beta1.NetworkNeighbor{ + { + Identifier: "allowed-egress", + Type: "external", + DNS: "fusioncore.ai.", + DNSNames: []string{"fusioncore.ai."}, + IPAddress: "162.0.217.171", + Ports: []v1beta1.NetworkPort{ + {Name: "TCP-80", Protocol: "TCP", Port: ptr.To(int32(80))}, + }, + }, + }, + }, + }, + }, + } + + nnAdapter := profiles.NewNetworkNeighborhoodAdapter(nn) + require.NoError(t, signature.SignObjectDisableKeyless(nnAdapter), "sign NN") + require.True(t, signature.IsSigned(nnAdapter)) + require.NoError(t, signature.VerifyObjectAllowUntrusted(nnAdapter), "untampered NN must verify") + + // Tamper: attacker adds a C2 domain to the egress whitelist + nn.Spec.Containers[0].Egress = append(nn.Spec.Containers[0].Egress, + v1beta1.NetworkNeighbor{ + Identifier: "c2-backdoor", + Type: "external", + DNS: "evil-c2.example.com.", + DNSNames: []string{"evil-c2.example.com."}, + IPAddress: "6.6.6.6", + Ports: []v1beta1.NetworkPort{ + {Name: "TCP-443", Protocol: "TCP", Port: ptr.To(int32(443))}, + }, + }) + + tamperedNNAdapter := profiles.NewNetworkNeighborhoodAdapter(nn) + err = signature.VerifyObjectAllowUntrusted(tamperedNNAdapter) + require.Error(t, err, "tampered NN must fail verification") + t.Logf("NN tamper detected: %v", err) + }) + + // --------------------------------------------------------------- + // 30b. Tampered AP is still loaded when enforcement is off. + // + // enableSignatureVerification defaults to false. + // The tampered profile is pushed to storage and node-agent + // loads it without checking the signature. Anomaly detection + // uses the tampered baseline → the attacker's added exec + // path (nslookup) is whitelisted. + // + // With enableSignatureVerification=true, the tampered profile + // would be rejected and the pod would have no baseline. + // --------------------------------------------------------------- + t.Run("tampered_profile_loaded_without_enforcement", func(t *testing.T) { + ns := testutils.NewRandomNamespace() + k8sClient := k8sinterface.NewKubernetesApi() + storageClient := spdxv1beta1client.NewForConfigOrDie(k8sClient.K8SConfig) + + // Build AP: only sleep + curl allowed. + // Use nil for unused fields (storage normalizes empty slices to nil). + ap := &v1beta1.ApplicationProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "signed-ap", + Namespace: ns.Name, + }, + Spec: v1beta1.ApplicationProfileSpec{ + Containers: []v1beta1.ApplicationProfileContainer{ + { + Name: "curl", + Execs: []v1beta1.ExecCalls{ + {Path: "/bin/sleep"}, + {Path: "/usr/bin/curl"}, + }, + Syscalls: []string{"close", "connect", "openat", "read", "socket", "write"}, + }, + }, + }, + } + + // Sign the AP. + apAdapter := profiles.NewApplicationProfileAdapter(ap) + require.NoError(t, signature.SignObjectDisableKeyless(apAdapter)) + require.NoError(t, signature.VerifyObjectAllowUntrusted(apAdapter), "pre-tamper verification") + + // Tamper: attacker adds nslookup to the whitelist. + ap.Spec.Containers[0].Execs = append(ap.Spec.Containers[0].Execs, + v1beta1.ExecCalls{Path: "/usr/bin/nslookup"}) + + // Signature is now invalid. + tamperedAdapter := profiles.NewApplicationProfileAdapter(ap) + require.Error(t, signature.VerifyObjectAllowUntrusted(tamperedAdapter), + "tampered AP must fail verification") + + // Push tampered AP to storage (signature annotations are stale). + _, err := storageClient.ApplicationProfiles(ns.Name).Create( + context.Background(), ap, metav1.CreateOptions{}) + require.NoError(t, err, "push tampered AP to storage") + + // Verify stored AP has stale signature. + require.Eventually(t, func() bool { + stored, getErr := storageClient.ApplicationProfiles(ns.Name).Get( + context.Background(), "signed-ap", v1.GetOptions{}) + if getErr != nil { + return false + } + storedAdapter := profiles.NewApplicationProfileAdapter(stored) + // Signature annotation exists but verification should fail. + if !signature.IsSigned(storedAdapter) { + return false + } + return signature.VerifyObjectAllowUntrusted(storedAdapter) != nil + }, 30*time.Second, 1*time.Second, "stored AP must have stale signature that fails verification") + t.Log("Stored AP has invalid signature (tamper detected at crypto layer)") + + // Deploy pod referencing the tampered profile. + wl, err := testutils.NewTestWorkload(ns.Name, + path.Join(utils.CurrentDir(), "resources/curl-signed-deployment.yaml")) + require.NoError(t, err) + require.NoError(t, wl.WaitForReady(80)) + + // Drive the unexpected exec inside Eventually so cache-load latency + // is absorbed by retries instead of a blind sleep. Same pattern as + // Test_29 (signed AP, anomalous exec) — without it, the first exec + // can land before the CP cache projects the user-defined AP, the + // rule manager evaluates against an empty baseline, and R0001 never + // fires within the polling window. + // + // wget is NOT in the AP (even after the attacker added nslookup), so + // once the cache loads, every wget exec produces an R0001 alert. + var alerts []testutils.Alert + require.Eventually(t, func() bool { + wl.ExecIntoPod([]string{"wget", "-qO-", "--timeout=2", "http://ebpf.io"}, "curl") + alerts, err = testutils.GetAlerts(ns.Name) + if err != nil { + return false + } + for _, a := range alerts { + if a.Labels["rule_id"] == "R0001" && a.Labels["comm"] == "wget" { + return true + } + } + return false + }, 120*time.Second, 10*time.Second, + "wget not in tampered AP must fire R0001 — proves tampered profile was loaded (enforcement off)") + + // Settle so any pending alerts flush, then dump for diagnostics. + time.Sleep(10 * time.Second) + alerts, _ = testutils.GetAlerts(ns.Name) + t.Logf("=== %d alerts ===", len(alerts)) + for i, a := range alerts { + t.Logf(" [%d] %s(%s) comm=%s container=%s", + i, a.Labels["rule_name"], a.Labels["rule_id"], + a.Labels["comm"], a.Labels["container_name"]) + } + + // With enableSignatureVerification=true: + // - The tampered AP would be rejected (verifyUserApplicationProfile returns false) + // - The pod would have no baseline → no anomaly rules fire for wget + // - System fails OPEN (attacker evades detection by tampering the profile) + // - NOTE: user-defined NNs are not yet gated on the same flag (known gap) + // R1016 ("Signed profile tampered") fires regardless of the flag — that + // path is handled by Test_31. + t.Log("With enableSignatureVerification=true, the tampered profile would be silently rejected.") + }) +} + +// Test_31_TamperDetectionAlert verifies that R1016 "Signed profile tampered" +// fires when a previously signed ApplicationProfile or NetworkNeighborhood +// has been tampered with (signature annotations stale relative to the +// resource bytes). +// +// Coverage: +// 31a — tampered AP fires R1016 (the original scenario; regression-pinned +// after upstream PR #788's cache rewrite re-wired alert emission). +// 31b — untampered signed AP does NOT fire R1016 (negative; signature +// verifies cleanly so no alert). +// 31c — unsigned AP does NOT fire R1016 (signing is opt-in; not-signed +// is not the same as tampered). +// 31d — tampered NN fires R1016 via the parallel NN code path (different +// storage call, same emission contract). +// +// All four subtests share signSignedAP / signSignedNN helpers; each subtest +// uses its own namespace + its own AP/NN name to avoid alert cross-talk +// between scenarios. +// +// R1016 fires regardless of cfg.EnableSignatureVerification: the alert is +// always emitted on tamper; the flag only gates whether the cache also +// rejects the load. +func Test_31_TamperDetectionAlert(t *testing.T) { + start := time.Now() + defer tearDownTest(t, start) + + k8sClient := k8sinterface.NewKubernetesApi() + storageClient := spdxv1beta1client.NewForConfigOrDie(k8sClient.K8SConfig) + + // signSignedAP returns a signed ApplicationProfile in nsName under name. + // + // IMPORTANT: storage's PreSave normalises spec content (DeflateSortString + // sorts+dedupes Syscalls/Capabilities/Architectures, DeflateStringer + // dedupes Execs, AnalyzeOpens/Endpoints/UnifyIdentifiedCallStacks + // rewrite their respective slices, GetContent injects empty + // PolicyByRuleId maps, and K8s itself may default fields). Signing + // locally and then pushing to storage makes the SIGNED hash mismatch + // the POST-STORE content hash that node-agent's tamper check sees, + // firing R1016 on an untampered profile. + // + // Sign-after-roundtrip eliminates every drift source at once: push + // the AP unsigned, read back the storage-normalised form, sign THAT, + // and let the caller push the signed version (deployAndWait does an + // Update-or-Create, so the second push goes through the same + // idempotent deflate and produces the same content hash). + signSignedAP := func(t *testing.T, nsName, name string) *v1beta1.ApplicationProfile { + t.Helper() + // Pre-sort syscalls so the first roundtrip is a no-op for that field + // — keeps the assertion that "deflate is idempotent on already-sorted + // content" honest. + syscalls := []string{"close", "connect", "openat", "read", "socket", "write"} + ap := &v1beta1.ApplicationProfile{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: nsName}, + Spec: v1beta1.ApplicationProfileSpec{ + Containers: []v1beta1.ApplicationProfileContainer{ + { + Name: "curl", + Execs: []v1beta1.ExecCalls{ + {Path: "/bin/sleep"}, + {Path: "/usr/bin/curl"}, + }, + Syscalls: syscalls, + }, + }, + }, + } + + // Round-trip 1: push unsigned, read back the normalised form. + _, err := storageClient.ApplicationProfiles(nsName).Create( + context.Background(), ap, metav1.CreateOptions{}) + require.NoError(t, err, "create unsigned AP for normalisation") + var stored *v1beta1.ApplicationProfile + require.Eventually(t, func() bool { + s, gerr := storageClient.ApplicationProfiles(nsName).Get( + context.Background(), name, v1.GetOptions{}) + if gerr != nil { + return false + } + stored = s + return true + }, 30*time.Second, 1*time.Second, "AP must be retrievable after unsigned create") + + // Sign the storage-normalised content. Now the hash in the signature + // annotation matches what node-agent will see when it loads the AP. + require.NoError(t, + signature.SignObjectDisableKeyless(profiles.NewApplicationProfileAdapter(stored)), + "sign storage-normalised AP") + + // Delete the unsigned in-storage copy so the caller's deployAndWait + // Create succeeds without an AlreadyExists conflict. Storage will + // re-deflate the signed AP on the second push; since that content + // is already normalised, deflate is a no-op and the hash stays + // stable. + require.NoError(t, + storageClient.ApplicationProfiles(nsName).Delete( + context.Background(), name, metav1.DeleteOptions{}), + "delete unsigned AP before caller re-pushes signed version") + // Strip server-managed metadata so the Create call doesn't see a + // stale resourceVersion / uid / creationTimestamp. + stored.ObjectMeta.ResourceVersion = "" + stored.ObjectMeta.UID = "" + stored.ObjectMeta.CreationTimestamp = v1.Time{} + stored.ObjectMeta.Generation = 0 + return stored + } + + signSignedNN := func(t *testing.T, nsName, name string) *v1beta1.NetworkNeighborhood { + t.Helper() + nn := &v1beta1.NetworkNeighborhood{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: nsName}, + Spec: v1beta1.NetworkNeighborhoodSpec{ + LabelSelector: metav1.LabelSelector{MatchLabels: map[string]string{"app": "curl-signed"}}, + Containers: []v1beta1.NetworkNeighborhoodContainer{ + {Name: "curl"}, + }, + }, + } + require.NoError(t, signature.SignObjectDisableKeyless(profiles.NewNetworkNeighborhoodAdapter(nn)), "sign NN") + return nn + } + + // deployAndWait pushes the AP (and optionally NN) into storage, then + // deploys curl-signed-deployment.yaml and waits for it to come up. The + // deployment YAML uses kubescape.io/user-defined-profile=signed-ap as + // its label, so AP+NN names must equal "signed-ap" for the upstream + // CP cache to pick them up. + deployAndWait := func(t *testing.T, ns testutils.TestNamespace, ap *v1beta1.ApplicationProfile, nn *v1beta1.NetworkNeighborhood) *testutils.TestWorkload { + t.Helper() + if ap != nil { + _, err := storageClient.ApplicationProfiles(ns.Name).Create( + context.Background(), ap, metav1.CreateOptions{}) + require.NoError(t, err, "push AP to storage") + } + if nn != nil { + _, err := storageClient.NetworkNeighborhoods(ns.Name).Create( + context.Background(), nn, metav1.CreateOptions{}) + require.NoError(t, err, "push NN to storage") + } + require.Eventually(t, func() bool { + if ap != nil { + if _, err := storageClient.ApplicationProfiles(ns.Name).Get( + context.Background(), ap.Name, v1.GetOptions{}); err != nil { + return false + } + } + if nn != nil { + if _, err := storageClient.NetworkNeighborhoods(ns.Name).Get( + context.Background(), nn.Name, v1.GetOptions{}); err != nil { + return false + } + } + return true + }, 30*time.Second, 1*time.Second, "AP/NN must be in storage before pod deploy") + + wl, err := testutils.NewTestWorkload(ns.Name, + path.Join(utils.CurrentDir(), "resources/curl-signed-deployment.yaml")) + require.NoError(t, err) + require.NoError(t, wl.WaitForReady(80)) + return wl + } + + countR1016 := func(t *testing.T, nsName string, settle time.Duration) int { + t.Helper() + // Allow node-agent to load the profile and for any alert to flush. + time.Sleep(settle) + alerts, err := testutils.GetAlerts(nsName) + if err != nil { + t.Logf("GetAlerts error: %v", err) + return 0 + } + n := 0 + for _, a := range alerts { + if a.Labels["rule_id"] == "R1016" { + n++ + assert.Equal(t, "Signed profile tampered", a.Labels["rule_name"], + "R1016 alert must have correct rule name") + assert.Equal(t, nsName, a.Labels["namespace"], + "R1016 alert must have correct namespace") + } + } + t.Logf("[%s] R1016 count = %d (out of %d alerts)", nsName, n, len(alerts)) + return n + } + + // ----------------------------------------------------------------- + // 31a — tampered AP fires R1016 + // ----------------------------------------------------------------- + t.Run("tampered_user_defined_AP_fires_R1016", func(t *testing.T) { + ns := testutils.NewRandomNamespace() + ap := signSignedAP(t, ns.Name, "signed-ap") + // Tamper after signing: append an unauthorized exec entry. The + // signature annotations stay (stale). + ap.Spec.Containers[0].Execs = append(ap.Spec.Containers[0].Execs, + v1beta1.ExecCalls{Path: "/usr/bin/nslookup"}) + require.Error(t, + signature.VerifyObjectAllowUntrusted(profiles.NewApplicationProfileAdapter(ap)), + "tampered AP must fail verification") + + _ = deployAndWait(t, ns, ap, nil) + + require.Eventually(t, func() bool { + alerts, _ := testutils.GetAlerts(ns.Name) + for _, a := range alerts { + if a.Labels["rule_id"] == "R1016" { + return true + } + } + return false + }, 120*time.Second, 5*time.Second, "tampered AP must produce R1016") + + require.Greater(t, countR1016(t, ns.Name, 5*time.Second), 0) + }) + + // ----------------------------------------------------------------- + // 31b — untampered signed AP must NOT fire R1016 + // ----------------------------------------------------------------- + t.Run("untampered_signed_AP_no_R1016", func(t *testing.T) { + ns := testutils.NewRandomNamespace() + ap := signSignedAP(t, ns.Name, "signed-ap") + // Don't tamper. Signature verifies cleanly. + require.NoError(t, + signature.VerifyObjectAllowUntrusted(profiles.NewApplicationProfileAdapter(ap)), + "untampered signed AP must verify") + + _ = deployAndWait(t, ns, ap, nil) + // Wait for cache load to happen (cache picks it up within ~15s). + assert.Equal(t, 0, countR1016(t, ns.Name, 30*time.Second), + "untampered signed AP must NOT fire R1016") + }) + + // ----------------------------------------------------------------- + // 31c — unsigned AP must NOT fire R1016 (signing is opt-in) + // ----------------------------------------------------------------- + t.Run("unsigned_AP_no_R1016", func(t *testing.T) { + ns := testutils.NewRandomNamespace() + ap := &v1beta1.ApplicationProfile{ + ObjectMeta: metav1.ObjectMeta{Name: "signed-ap", Namespace: ns.Name}, + Spec: v1beta1.ApplicationProfileSpec{ + Containers: []v1beta1.ApplicationProfileContainer{ + { + Name: "curl", + Execs: []v1beta1.ExecCalls{ + {Path: "/bin/sleep"}, + }, + Syscalls: []string{"socket"}, + }, + }, + }, + } + require.False(t, + signature.IsSigned(profiles.NewApplicationProfileAdapter(ap)), + "unsigned AP must not have signature annotations") + + _ = deployAndWait(t, ns, ap, nil) + assert.Equal(t, 0, countR1016(t, ns.Name, 30*time.Second), + "unsigned AP must NOT fire R1016 — not-signed is not the same as tampered") + }) + + // ----------------------------------------------------------------- + // 31d — tampered NN fires R1016 via the NN code path + // ----------------------------------------------------------------- + t.Run("tampered_user_defined_NN_fires_R1016", func(t *testing.T) { + ns := testutils.NewRandomNamespace() + // Untampered AP (matched on name to the pod label) so the AP path + // stays silent and we know any R1016 came from the NN path. + ap := signSignedAP(t, ns.Name, "signed-ap") + nn := signSignedNN(t, ns.Name, "signed-ap") + // Tamper the NN: add a container the original signature didn't cover. + nn.Spec.Containers = append(nn.Spec.Containers, + v1beta1.NetworkNeighborhoodContainer{Name: "drift"}) + require.Error(t, + signature.VerifyObjectAllowUntrusted(profiles.NewNetworkNeighborhoodAdapter(nn)), + "tampered NN must fail verification") + + _ = deployAndWait(t, ns, ap, nn) + + require.Eventually(t, func() bool { + alerts, _ := testutils.GetAlerts(ns.Name) + for _, a := range alerts { + if a.Labels["rule_id"] == "R1016" { + return true + } + } + return false + }, 120*time.Second, 5*time.Second, "tampered NN must produce R1016") + + require.Greater(t, countR1016(t, ns.Name, 5*time.Second), 0) + }) + +} + +// --------------------------------------------------------------------------- +// Test_32_UnexpectedProcessArguments — component test for the wildcard-aware +// exec-argument matching (R0040). Each subtest gets its own namespace so +// alerts don't cross-contaminate. +// +// AP overlay declares 4 allowed exec patterns for the curl pod: +// +// /bin/sleep [sleep, *] — pod startup, must stay silent +// /bin/sh [sh, -c, *] — sh -c +// /bin/echo [echo, hello, *] — echo hello +// /usr/bin/curl [curl, -s, ⋯] — curl -s +// +// Profile loaded into the new ContainerProfileCache via the unified +// kubescape.io/user-defined-profile= label. The exec.go CEL function +// routes ap.was_executed_with_args through dynamicpathdetector.CompareExecArgs. +// +// R0040 ("Unexpected process arguments") fires when: +// - the exec'd path IS in the profile (R0001 silent), AND +// - the runtime arg vector does NOT match any profile entry's pattern. +// +// Each subtest exec's a single command, then asserts presence/absence of +// R0040 only. R0001 / R0005 / R0011 may also fire on unrelated paths or +// network egress; those are not what this test is gating. +// --------------------------------------------------------------------------- +func Test_32_UnexpectedProcessArguments(t *testing.T) { + start := time.Now() + defer tearDownTest(t, start) + + const overlayName = "curl-32-overlay" + + setup := func(t *testing.T) *testutils.TestWorkload { + t.Helper() + ns := testutils.NewRandomNamespace() + k8sClient := k8sinterface.NewKubernetesApi() + storageClient := spdxv1beta1client.NewForConfigOrDie(k8sClient.K8SConfig) + + ap := &v1beta1.ApplicationProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: overlayName, + Namespace: ns.Name, + }, + Spec: v1beta1.ApplicationProfileSpec{ + Containers: []v1beta1.ApplicationProfileContainer{ + { + Name: "curl", + Execs: []v1beta1.ExecCalls{ + // IMPORTANT: argv[0] in the eBPF-captured event is + // the FULL exec path (see Test_27's wildcard YAML + // fixture for the same convention). Profile arg + // vectors must include argv[0] as full path so the + // matcher's first-position literal compare hits. + // + // pod startup: sleep + {Path: "/bin/sleep", Args: []string{"/bin/sleep", dynamicpathdetector.WildcardIdentifier}}, + // sh -c + {Path: "/bin/sh", Args: []string{"/bin/sh", "-c", dynamicpathdetector.WildcardIdentifier}}, + // echo hello + {Path: "/bin/echo", Args: []string{"/bin/echo", "hello", dynamicpathdetector.WildcardIdentifier}}, + // curl -s + {Path: "/usr/bin/curl", Args: []string{"/usr/bin/curl", "-s", dynamicpathdetector.DynamicIdentifier}}, + }, + Syscalls: []string{"socket", "connect", "sendto", "recvfrom", "read", "write", "close", "openat", "mmap", "mprotect", "munmap", "fcntl", "ioctl", "poll", "epoll_create1", "epoll_ctl", "epoll_wait", "bind", "listen", "accept4", "getsockopt", "setsockopt", "getsockname", "getpid", "fstat", "rt_sigaction", "rt_sigprocmask", "writev", "execve"}, + }, + }, + }, + } + _, err := storageClient.ApplicationProfiles(ns.Name).Create( + context.Background(), ap, metav1.CreateOptions{}) + require.NoError(t, err, "create AP") + + require.Eventually(t, func() bool { + _, apErr := storageClient.ApplicationProfiles(ns.Name).Get( + context.Background(), overlayName, v1.GetOptions{}) + return apErr == nil + }, 30*time.Second, 1*time.Second, "AP must be in storage before pod deploy") + + wl, err := testutils.NewTestWorkload(ns.Name, + path.Join(utils.CurrentDir(), "resources/curl-exec-arg-wildcards-deployment.yaml")) + require.NoError(t, err) + require.NoError(t, wl.WaitForReady(80)) + // let node-agent load the user AP into the CP cache + time.Sleep(15 * time.Second) + return wl + } + + countByRule := func(alerts []testutils.Alert, ruleID string) int { + n := 0 + for _, a := range alerts { + if a.Labels["rule_id"] == ruleID { + n++ + } + } + return n + } + + waitAlerts := func(t *testing.T, ns string) []testutils.Alert { + t.Helper() + var alerts []testutils.Alert + var err error + require.Eventually(t, func() bool { + alerts, err = testutils.GetAlerts(ns) + return err == nil + }, 60*time.Second, 5*time.Second, "must be able to fetch alerts") + // settle time for any in-flight alerts + time.Sleep(10 * time.Second) + alerts, _ = testutils.GetAlerts(ns) + return alerts + } + + logAlerts := func(t *testing.T, alerts []testutils.Alert) { + t.Helper() + for i, a := range alerts { + t.Logf(" [%d] %s(%s) comm=%s container=%s", + i, a.Labels["rule_name"], a.Labels["rule_id"], + a.Labels["comm"], a.Labels["container_name"]) + } + } + + // ----------------------------------------------------------------- + // 32a. sh -c — argv [sh, -c, "echo hi"] matches + // profile [sh, -c, *]. R0040 must NOT fire. + // ----------------------------------------------------------------- + t.Run("sh_dash_c_matches_wildcard_trailing", func(t *testing.T) { + wl := setup(t) + stdout, stderr, err := wl.ExecIntoPod([]string{"sh", "-c", "echo hi"}, "curl") + t.Logf("sh -c 'echo hi' → err=%v stdout=%q stderr=%q", err, stdout, stderr) + + alerts := waitAlerts(t, wl.Namespace) + t.Logf("=== %d alerts ===", len(alerts)) + logAlerts(t, alerts) + + assert.Equal(t, 0, countByRule(alerts, "R0040"), + "sh -c matches profile [sh, -c, *] — R0040 must stay silent") + }) + + // ----------------------------------------------------------------- + // 32b. sh -x — argv [sh, -x, "echo hi"] does NOT match + // profile [sh, -c, *] (literal anchor `-c` mismatch). Path + // /bin/sh IS in profile so R0001 stays silent. R0040 must fire. + // ----------------------------------------------------------------- + t.Run("sh_dash_x_mismatches_R0040", func(t *testing.T) { + wl := setup(t) + stdout, stderr, err := wl.ExecIntoPod([]string{"sh", "-x", "echo hi"}, "curl") + t.Logf("sh -x 'echo hi' → err=%v stdout=%q stderr=%q", err, stdout, stderr) + + alerts := waitAlerts(t, wl.Namespace) + t.Logf("=== %d alerts ===", len(alerts)) + logAlerts(t, alerts) + + require.Greater(t, countByRule(alerts, "R0040"), 0, + "sh -x mismatches profile [sh, -c, *] → R0040 must fire") + }) + + // ----------------------------------------------------------------- + // 32c. echo hello — argv [echo, hello, world, from, test] + // matches profile [echo, hello, *]. R0040 must NOT fire. + // ----------------------------------------------------------------- + t.Run("echo_hello_matches_wildcard_trailing", func(t *testing.T) { + wl := setup(t) + stdout, stderr, err := wl.ExecIntoPod([]string{"echo", "hello", "world", "from", "test"}, "curl") + t.Logf("echo hello world from test → err=%v stdout=%q stderr=%q", err, stdout, stderr) + + alerts := waitAlerts(t, wl.Namespace) + t.Logf("=== %d alerts ===", len(alerts)) + logAlerts(t, alerts) + + assert.Equal(t, 0, countByRule(alerts, "R0040"), + "echo hello matches profile [echo, hello, *] — R0040 must stay silent") + }) + + // ----------------------------------------------------------------- + // 32d. echo goodbye — argv [echo, goodbye, world] does + // NOT match profile [echo, hello, *] (literal anchor `hello` + // mismatch). R0040 must fire. + // ----------------------------------------------------------------- + t.Run("echo_goodbye_mismatches_R0040", func(t *testing.T) { + wl := setup(t) + stdout, stderr, err := wl.ExecIntoPod([]string{"echo", "goodbye", "world"}, "curl") + t.Logf("echo goodbye world → err=%v stdout=%q stderr=%q", err, stdout, stderr) + + alerts := waitAlerts(t, wl.Namespace) + t.Logf("=== %d alerts ===", len(alerts)) + logAlerts(t, alerts) + + require.Greater(t, countByRule(alerts, "R0040"), 0, + "echo goodbye mismatches profile [echo, hello, *] (literal anchor) → R0040 must fire") + }) +} + +// Test_33_AnalyzeOpensWildcardAnchoring pins the wildcard-matching +// contract that storage-side CompareDynamic enforces, end-to-end through +// R0002 ("Files Access Anomalies in container"). +// +// Each subtest spins up a fresh nginx pod with a user-defined AP that +// carries ONE Opens entry, then `cat`s a target path that probes a +// boundary case from the storage-side analyzer fixes (kubescape/storage +// PR #316 review by matthyx + entlein): +// +// - Anchored trailing `*` matches one OR MORE remaining segments — +// never zero. So `/etc/*` matches `/etc/passwd` but NOT the bare +// `/etc` directory. Without this rule, R0002 silently allowed +// access to the parent of any profiled directory. +// - DynamicIdentifier (⋯) consumes EXACTLY ONE segment. +// - Mid-path `*` consumes ZERO or more, so `/etc/*/*` still matches +// `/etc/ssh` (inner `*` consumed zero, trailing `*` consumed one). +// - splitPath normalises trailing slashes on both dynamic and +// regular paths so `/etc/passwd/` is treated as `/etc/passwd`. +// - Mixed `⋯/*` patterns: ⋯ pins one segment, `*` consumes the rest +// (with one-or-more semantics). +// +// Component-level pin sits ON TOP of the unit tests in storage's +// pkg/registry/file/dynamicpathdetector/tests/coverage_test.go. +// Both layers must agree — if the unit suite drifts away from these +// runtime expectations, R0002 has either a false-positive or a +// false-negative bug. +func Test_33_AnalyzeOpensWildcardAnchoring(t *testing.T) { + start := time.Now() + defer tearDownTest(t, start) + + const ruleName = "Files Access Anomalies in container" + const profileName = "nginx-regex-profile" + + type subtestResult struct { + name string + profilePath string + filePath string + expectAlert bool + passed bool + detail string + } + var results []subtestResult + addResult := func(name, profilePath, filePath string, expectAlert, passed bool, detail string) { + results = append(results, subtestResult{name, profilePath, filePath, expectAlert, passed, detail}) + } + defer func() { + t.Log("\n========== Test_33 Summary ==========") + anyFailed := false + for _, r := range results { + status := "PASS" + if !r.passed { + status = "FAIL" + anyFailed = true + } + expect := "expect alert" + if !r.expectAlert { + expect = "expect NO alert" + } + t.Logf(" [%s] %-50s profile=%-25s file=%-30s %s", status, r.name, r.profilePath, r.filePath, expect) + if !r.passed { + t.Logf(" -> %s", r.detail) + } + } + if !anyFailed { + t.Log(" All subtests passed.") + } + t.Log("======================================") + }() + + // deployWithProfile creates a user-defined AP with a single Opens + // entry (plus a couple of always-needed paths nginx hits at startup), + // then deploys nginx with the user-defined-profile label pointing at + // it and waits for the pod + cache load. + deployWithProfile := func(t *testing.T, profilePath string) *testutils.TestWorkload { + t.Helper() + ns := testutils.NewRandomNamespace() + + profile := &v1beta1.ApplicationProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: profileName, + Namespace: ns.Name, + }, + Spec: v1beta1.ApplicationProfileSpec{ + Architectures: []string{"amd64"}, + Containers: []v1beta1.ApplicationProfileContainer{ + { + Name: "nginx", + Execs: []v1beta1.ExecCalls{ + {Path: "/bin/cat", Args: []string{"/bin/cat"}}, + }, + Opens: []v1beta1.OpenCalls{ + {Path: profilePath, Flags: []string{"O_RDONLY"}}, + // Dynamic linker fires this on every exec — keep + // it whitelisted so it doesn't drown out the + // signal we actually care about. + {Path: "/etc/ld.so.cache", Flags: []string{"O_RDONLY", "O_CLOEXEC"}}, + }, + }, + }, + }, + } + + k8sClient := k8sinterface.NewKubernetesApi() + storageClient := spdxv1beta1client.NewForConfigOrDie(k8sClient.K8SConfig) + _, err := storageClient.ApplicationProfiles(ns.Name).Create( + context.Background(), profile, metav1.CreateOptions{}) + require.NoError(t, err, "create user-defined profile %q in ns %s", profileName, ns.Name) + + require.Eventually(t, func() bool { + _, apErr := storageClient.ApplicationProfiles(ns.Name).Get( + context.Background(), profileName, v1.GetOptions{}) + return apErr == nil + }, 30*time.Second, 1*time.Second, "AP must be retrievable from storage before deploying the pod") + + wl, err := testutils.NewTestWorkload(ns.Name, + path.Join(utils.CurrentDir(), "resources/nginx-user-profile-deployment.yaml")) + require.NoError(t, err, "create workload in ns %s", ns.Name) + // 11 subtests deploy a fresh pod sequentially, so each later subtest + // races against an increasingly loaded kind cluster — the upstream + // CP cache reconciler, alertmanager, and prometheus all chew CPU at + // boot. 80s timed out intermittently; 180s gives headroom without + // pushing the total test runtime into a different regime. + require.NoError(t, wl.WaitForReady(180), "workload not ready in ns %s", ns.Name) + + // Wait for node-agent to load the user-defined profile into cache. + time.Sleep(10 * time.Second) + return wl + } + + // catAndAlerts execs `cat ` (ignoring cat's own exit error — + // catting a directory or a non-readable file still triggers the + // open() syscall the eBPF tracer captures), then polls for alerts. + catAndAlerts := func(t *testing.T, wl *testutils.TestWorkload, filePath string) []testutils.Alert { + t.Helper() + stdout, stderr, _ := wl.ExecIntoPod([]string{"cat", filePath}, "nginx") + t.Logf("cat %q → stdout=%q stderr=%q", filePath, stdout, stderr) + + var alerts []testutils.Alert + require.Eventually(t, func() bool { + a, err := testutils.GetAlerts(wl.Namespace) + if err != nil { + return false + } + alerts = a + return true + }, 60*time.Second, 5*time.Second, "alerts must be retrievable from ns %s", wl.Namespace) + // Settle so any late R0002 alert lands before we count. + time.Sleep(10 * time.Second) + alerts, err := testutils.GetAlerts(wl.Namespace) + require.NoError(t, err, "get alerts from ns %s", wl.Namespace) + return alerts + } + + // hasR0002 returns true if any R0002 alert fired for `cat` in the + // nginx container. + hasR0002 := func(alerts []testutils.Alert) bool { + for _, a := range alerts { + if a.Labels["rule_name"] == ruleName && + a.Labels["comm"] == "cat" && + a.Labels["container_name"] == "nginx" { + return true + } + } + return false + } + + tests := []struct { + name string + profilePath string + filePath string + expectAlert bool + why string // contract pinned by this case + }{ + // ─── Trailing-`*` anchoring (the security fix) ────────────── + // + // IMPORTANT: R0002's CEL ruleExpression has a strict prefix + // filter (event.path.startsWith('/etc/'), startsWith('/var/log/'), + // etc. — all with trailing slash). Bare `/etc` and `/var/log` + // don't match those prefixes, so the rule never evaluates on + // them and the matcher's anchoring contract stays invisible at + // runtime. Probe one level deeper instead — `/etc/ssl` IS under + // the `/etc/` monitored prefix, so R0002 CAN see whether a + // `/etc/ssl/*` profile entry matches the bare `/etc/ssl` parent. + { + name: "trailing_star_matches_immediate_child", + profilePath: "/etc/*", + filePath: "/etc/hosts", + expectAlert: false, + why: "/etc/* matches a one-segment child under /etc", + }, + { + name: "trailing_star_matches_deep_child", + profilePath: "/etc/*", + filePath: "/etc/ssl/openssl.cnf", + expectAlert: false, + why: "/etc/* matches a multi-segment path under /etc (mid-path zero-or-more)", + }, + { + name: "trailing_star_does_not_match_bare_parent_under_monitored_prefix", + profilePath: "/etc/ssl/*", + filePath: "/etc/ssl", + expectAlert: true, + why: "/etc/ssl/* must NOT match the bare /etc/ssl directory itself — pins the security fix at a path R0002's prefix filter can observe", + }, + { + name: "deep_prefix_trailing_star_does_not_match_parent", + profilePath: "/etc/ssl/certs/*", + filePath: "/etc/ssl/certs", + expectAlert: true, + why: "Same anchoring rule, deeper: /etc/ssl/certs/* does NOT match /etc/ssl/certs", + }, + + // ─── DynamicIdentifier (⋯) exactly-one ────────────────────── + { + name: "ellipsis_requires_one_segment_not_zero", + profilePath: "/etc/passwd/" + dynamicpathdetector.DynamicIdentifier, + filePath: "/etc/passwd", + expectAlert: true, + why: "⋯ consumes EXACTLY ONE segment; /etc/passwd/⋯ requires one more, /etc/passwd alone has zero past — must fire R0002", + }, + + // ─── Mixed ⋯/* combinations ───────────────────────────────── + { + name: "ellipsis_then_trailing_star_matches_two_segment_tail", + profilePath: "/proc/" + dynamicpathdetector.DynamicIdentifier + "/*", + filePath: "/proc/1/status", + expectAlert: false, + why: "/proc/⋯/* matches /proc/1/status (⋯ consumes 1, * consumes ≥1)", + }, + { + name: "ellipsis_then_trailing_star_matches_three_segment_tail", + profilePath: "/proc/" + dynamicpathdetector.DynamicIdentifier + "/*", + filePath: "/proc/1/task/1", + expectAlert: false, + why: "/proc/⋯/* matches deeper paths (⋯ consumes 1, * consumes ≥1 covering rest)", + }, + + // ─── Multiple trailing wildcards ──────────────────────────── + { + name: "double_trailing_matches_one_child", + profilePath: "/etc/*/*", + filePath: "/etc/ssl", + expectAlert: false, + why: "/etc/*/* matches /etc/ssh (mid-* consumes zero, trailing-* consumes one)", + }, + { + name: "double_trailing_matches_deep_child", + profilePath: "/etc/*/*", + filePath: "/etc/ssl/openssl.cnf", + expectAlert: false, + why: "/etc/*/* matches /etc/ssl/openssl.cnf (mid-* consumes one, trailing-* consumes one)", + }, + { + name: "double_trailing_does_not_match_parent_under_monitored_prefix", + profilePath: "/etc/ssl/*/*", + filePath: "/etc/ssl", + expectAlert: true, + why: "/etc/ssl/*/* requires at least one segment past /etc/ssl; bare /etc/ssl must NOT match (probed under /etc/ so R0002 sees it)", + }, + + // ─── splitPath trailing-slash normalisation ───────────────── + { + name: "trailing_slash_in_profile_normalises_to_literal", + profilePath: "/etc/passwd/", + filePath: "/etc/passwd", + expectAlert: false, + why: "Profile `/etc/passwd/` is normalised to `/etc/passwd`; matches the literal at runtime", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + t.Logf("contract: %s", tc.why) + wl := deployWithProfile(t, tc.profilePath) + alerts := catAndAlerts(t, wl, tc.filePath) + got := hasR0002(alerts) + + detail := fmt.Sprintf("got %d alerts total; R0002 fired = %v", len(alerts), got) + passed := got == tc.expectAlert + if !passed { + if tc.expectAlert { + t.Errorf("expected R0002 alert: profile %q must NOT match %q (%s); but no alert fired", + tc.profilePath, tc.filePath, tc.why) + } else { + t.Errorf("expected NO R0002 alert: profile %q should match %q (%s); but alert fired", + tc.profilePath, tc.filePath, tc.why) + } + } + addResult(tc.name, tc.profilePath, tc.filePath, tc.expectAlert, passed, detail) + }) + } +} diff --git a/tests/resources/aplint_test.go b/tests/resources/aplint_test.go new file mode 100644 index 0000000000..3e1887592a --- /dev/null +++ b/tests/resources/aplint_test.go @@ -0,0 +1,344 @@ +// AP-fixture lint tests. +// +// Validates every ApplicationProfile / NetworkNeighborhood YAML under +// tests/resources/ against the ground-truth syntax rules learned from a +// real auto-recorded AP for curlimages/curl:8.5.0 (originally captured +// by the fork in commit fea3b062 — known-application-profile.yaml). Each +// rule maps a real-world drift mode that has bitten the fork once already +// (e.g. argv[0] basename vs full path — Test_32 first run on PR #37). +// +// Runs as a regular `go test ./...` — no component tag, no kind cluster. +// +// LintApplicationProfile is exported (uppercase) and returns []Violation +// rather than calling t.Errorf directly, so this whole file can be lifted +// into a standalone bobctl subcommand `bobctl lint ` without any +// testing-package dependency. The Test_* functions below are just thin +// wrappers that turn violations into t.Errorf calls. +package resources + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "testing" + + "sigs.k8s.io/yaml" +) + +// applicationProfileLike captures only the fields we lint; we don't import +// the storage v1beta1 types because we want this lint runnable in isolation. +type applicationProfileLike struct { + APIVersion string `json:"apiVersion"` + Kind string `json:"kind"` + Metadata struct { + Name string `json:"name"` + } `json:"metadata"` + Spec struct { + Containers []struct { + Name string `json:"name"` + Execs []struct { + Path string `json:"path"` + Args []string `json:"args"` + } `json:"execs"` + Opens []struct { + Path string `json:"path"` + Flags []string `json:"flags"` + } `json:"opens"` + } `json:"containers"` + } `json:"spec"` +} + +// Violation is a single rule failure — id, target file (if any), and a +// human-readable message. Returned by LintApplicationProfile so callers +// can treat lint output as data (CLI exit code, JSON, t.Errorf, etc). +type Violation struct { + Rule string + Path string + Msg string +} + +func (v Violation) String() string { + if v.Path != "" { + return fmt.Sprintf("[%s] %s: %s", v.Rule, v.Path, v.Msg) + } + return fmt.Sprintf("[%s] %s", v.Rule, v.Msg) +} + +// validOpenFlags is the set of O_* flags the fork has seen in real +// auto-recorded profiles. Extend as new flags appear; a typo'd flag +// (e.g. `O_LARGEFLE`) is caught immediately. +var validOpenFlags = map[string]bool{ + "O_RDONLY": true, + "O_WRONLY": true, + "O_RDWR": true, + "O_CLOEXEC": true, + "O_LARGEFILE": true, + "O_DIRECTORY": true, + "O_NONBLOCK": true, + "O_APPEND": true, + "O_CREAT": true, + "O_EXCL": true, + "O_TRUNC": true, + "O_NOFOLLOW": true, + "O_NOATIME": true, + "O_DIRECT": true, + "O_SYNC": true, + "O_PATH": true, + "O_TMPFILE": true, +} + +// dynamicIdentifier and wildcardIdentifier mirror the constants in +// storage/pkg/registry/file/dynamicpathdetector. Duplicated here so this +// linter has zero dependency on the storage module. +const ( + dynamicIdentifier = "⋯" + wildcardIdentifier = "*" +) + +// LintApplicationProfileYAML parses a YAML doc as an ApplicationProfile and +// runs all rules. Returns the slice of violations (empty == clean). Pure +// function — no I/O, no testing-package coupling. +func LintApplicationProfileYAML(doc []byte, sourceLabel string) []Violation { + var ap applicationProfileLike + if err := yaml.Unmarshal(doc, &ap); err != nil { + return []Violation{{Rule: "R-AP-00", Path: sourceLabel, Msg: fmt.Sprintf("yaml parse: %v", err)}} + } + return LintApplicationProfile(&ap, sourceLabel) +} + +// LintApplicationProfile runs every rule against an already-parsed AP. +// Returns the slice of violations (empty == clean). +// +// Rule IDs: +// R-AP-00 — yaml parse failure (only from LintApplicationProfileYAML) +// R-AP-01 — kind must be ApplicationProfile +// R-AP-02 — at least one container +// R-AP-03 — container name non-empty +// R-AP-10 — exec.path absolute +// R-AP-11 — exec.path no wildcards +// R-AP-12 — exec.args[0] equals exec.path (or wildcard) +// R-AP-13 — exec.args wildcard tokens are whole-word +// R-AP-20 — open.path non-empty + absolute +// R-AP-21 — open.flags non-empty +// R-AP-22 — open.flags from known O_* set +func LintApplicationProfile(ap *applicationProfileLike, src string) []Violation { + var v []Violation + add := func(rule, msg string) { v = append(v, Violation{Rule: rule, Path: src, Msg: msg}) } + + if ap.Kind != "ApplicationProfile" { + add("R-AP-01", fmt.Sprintf("kind is %q, expected \"ApplicationProfile\"", ap.Kind)) + } + if len(ap.Spec.Containers) == 0 { + add("R-AP-02", "spec.containers is empty") + return v + } + + for ci, c := range ap.Spec.Containers { + if c.Name == "" { + add("R-AP-03", fmt.Sprintf("spec.containers[%d].name is empty", ci)) + } + + for ei, e := range c.Execs { + if e.Path == "" { + add("R-AP-10", fmt.Sprintf("containers[%d].execs[%d].path is empty", ci, ei)) + continue + } + if !strings.HasPrefix(e.Path, "/") { + add("R-AP-10", fmt.Sprintf("containers[%d].execs[%d].path %q must be absolute (start with /)", ci, ei, e.Path)) + } + if strings.Contains(e.Path, dynamicIdentifier) || strings.Contains(e.Path, wildcardIdentifier) { + add("R-AP-11", fmt.Sprintf("containers[%d].execs[%d].path %q must NOT contain wildcards (only args[*] may)", ci, ei, e.Path)) + } + + if len(e.Args) == 0 { + continue // path-only entry is legal + } + + // R-AP-12: args[0] must equal the full exec.path. The eBPF + // tracer captures argv[0] as the full binary path; profile + // entries that use a basename (e.g. "sh" instead of "/bin/sh") + // silently fail to match at runtime. Caught the hard way on + // Test_32's first CI run (PR #37 run 25178930763). Exception: + // args[0] may be the wildcard token if the user genuinely + // means "any binary at this path". + if e.Args[0] != e.Path && e.Args[0] != wildcardIdentifier { + add("R-AP-12", fmt.Sprintf("containers[%d].execs[%d].args[0] = %q, must equal path %q (eBPF captures argv[0] as full path)", ci, ei, e.Args[0], e.Path)) + } + + for ai, a := range e.Args { + if a == "" { + add("R-AP-13", fmt.Sprintf("containers[%d].execs[%d].args[%d] is empty", ci, ei, ai)) + } + if strings.Contains(a, dynamicIdentifier) && a != dynamicIdentifier { + add("R-AP-13", fmt.Sprintf("containers[%d].execs[%d].args[%d] = %q — ⋯ must be its own token, not embedded", ci, ei, ai, a)) + } + } + } + + for oi, o := range c.Opens { + if o.Path == "" { + add("R-AP-20", fmt.Sprintf("containers[%d].opens[%d].path is empty", ci, oi)) + continue + } + if !strings.HasPrefix(o.Path, "/") { + add("R-AP-20", fmt.Sprintf("containers[%d].opens[%d].path %q must be absolute", ci, oi, o.Path)) + } + if len(o.Flags) == 0 { + add("R-AP-21", fmt.Sprintf("containers[%d].opens[%d].flags is empty", ci, oi)) + } + for fi, f := range o.Flags { + if !validOpenFlags[f] { + add("R-AP-22", fmt.Sprintf("containers[%d].opens[%d].flags[%d] = %q — not a recognised O_* flag (typo?)", ci, oi, fi, f)) + } + } + } + } + return v +} + +// --------------------------------------------------------------------------- +// Test layer — walk YAMLs in this directory, run the linter, surface +// violations as t.Errorf. +// --------------------------------------------------------------------------- + +func TestApplicationProfileFixturesLint(t *testing.T) { + matches, err := filepath.Glob("*.yaml") + if err != nil { + t.Fatalf("glob: %v", err) + } + if len(matches) == 0 { + t.Skip("no YAML fixtures found — running outside tests/resources?") + } + + for _, p := range matches { + p := p + t.Run(filepath.Base(p), func(t *testing.T) { + data, err := os.ReadFile(p) + if err != nil { + t.Fatalf("read %s: %v", p, err) + } + if !strings.Contains(string(data), "kind: ApplicationProfile") { + t.Skipf("not an ApplicationProfile fixture") + } + for _, v := range LintApplicationProfileYAML(data, p) { + t.Errorf("%s", v) + } + }) + } +} + +// --------------------------------------------------------------------------- +// Self-tests — feed deliberately-bad YAML, verify the expected rule fires. +// Pin rule semantics so a refactor can't silently drop a check. +// --------------------------------------------------------------------------- + +func ruleFired(violations []Violation, ruleID string) bool { + for _, v := range violations { + if v.Rule == ruleID { + return true + } + } + return false +} + +func TestLinter_R_AP_12_argv0_must_be_full_path(t *testing.T) { + bad := []byte(` +apiVersion: spdx.softwarecomposition.kubescape.io/v1beta1 +kind: ApplicationProfile +metadata: { name: bad } +spec: + containers: + - name: c + execs: + - path: /bin/sh + args: ["sh", "-c", "echo hi"] +`) + if !ruleFired(LintApplicationProfileYAML(bad, ""), "R-AP-12") { + t.Fatal("expected R-AP-12 violation for basename argv[0]") + } +} + +func TestLinter_R_AP_11_path_no_wildcards(t *testing.T) { + bad := []byte(` +apiVersion: spdx.softwarecomposition.kubescape.io/v1beta1 +kind: ApplicationProfile +metadata: { name: bad } +spec: + containers: + - name: c + execs: + - path: /usr/bin/* + args: ["/usr/bin/curl"] +`) + if !ruleFired(LintApplicationProfileYAML(bad, ""), "R-AP-11") { + t.Fatal("expected R-AP-11 violation for wildcard in path") + } +} + +func TestLinter_R_AP_22_unknown_open_flag(t *testing.T) { + bad := []byte(` +apiVersion: spdx.softwarecomposition.kubescape.io/v1beta1 +kind: ApplicationProfile +metadata: { name: bad } +spec: + containers: + - name: c + opens: + - path: /etc/passwd + flags: ["O_RDONLY", "O_LARGEFLE"] +`) + if !ruleFired(LintApplicationProfileYAML(bad, ""), "R-AP-22") { + t.Fatal("expected R-AP-22 violation for typo'd flag") + } +} + +func TestLinter_R_AP_10_path_must_be_absolute(t *testing.T) { + bad := []byte(` +apiVersion: spdx.softwarecomposition.kubescape.io/v1beta1 +kind: ApplicationProfile +metadata: { name: bad } +spec: + containers: + - name: c + execs: + - path: bin/sh + args: ["bin/sh"] +`) + if !ruleFired(LintApplicationProfileYAML(bad, ""), "R-AP-10") { + t.Fatal("expected R-AP-10 violation for relative path") + } +} + +func TestLinter_R_AP_12_wildcard_argv0_allowed(t *testing.T) { + // args[0] = "*" is the rare-but-legal "match any binary at this path" case. + ok := []byte(` +apiVersion: spdx.softwarecomposition.kubescape.io/v1beta1 +kind: ApplicationProfile +metadata: { name: ok } +spec: + containers: + - name: c + execs: + - path: /bin/sh + args: ["*"] +`) + if ruleFired(LintApplicationProfileYAML(ok, ""), "R-AP-12") { + t.Fatal("R-AP-12 must NOT fire when args[0] is the wildcard token") + } +} + +func TestLinter_canonical_AP_passes(t *testing.T) { + // The fork's reference profile (from fea3b062) is the gold standard; + // regressions here mean the linter has drifted from real-world syntax. + data, err := os.ReadFile("known-application-profile.yaml") + if err != nil { + t.Skipf("canonical AP fixture not present: %v", err) + } + violations := LintApplicationProfileYAML(data, "known-application-profile.yaml") + if len(violations) > 0 { + for _, v := range violations { + t.Errorf("%s", v) + } + } +} diff --git a/tests/resources/crypto-miner-deployment.yaml b/tests/resources/crypto-miner-deployment.yaml new file mode 100644 index 0000000000..382a3cb995 --- /dev/null +++ b/tests/resources/crypto-miner-deployment.yaml @@ -0,0 +1,24 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: k8s-miner-deployment + labels: + app: k8s-miner +spec: + replicas: 1 + selector: + matchLabels: + app: k8s-miner + template: + metadata: + labels: + app: k8s-miner + kubescape.io/user-defined-profile: crypto2 + spec: + containers: + - name: k8s-miner + image: docker.io/amitschendel/crypto-miner-1 + imagePullPolicy: Always + workingDir: /usr/app/src + command: ["./xmrig"] + args: ["--bench", "1M"] diff --git a/tests/resources/curl-exec-arg-wildcards-deployment.yaml b/tests/resources/curl-exec-arg-wildcards-deployment.yaml new file mode 100644 index 0000000000..2f06f8baef --- /dev/null +++ b/tests/resources/curl-exec-arg-wildcards-deployment.yaml @@ -0,0 +1,28 @@ +## Curl pod for Test_32_UnexpectedProcessArguments. +## +## Carries the unified user-defined-profile label used by upstream's +## ContainerProfileCache (kubescape/node-agent#788). The label value +## must match the name of BOTH the user ApplicationProfile and (when +## present) the user NetworkNeighborhood. The test creates only the AP +## with that name; the NN side is intentionally absent. +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: curl-32 + name: curl-32 +spec: + selector: + matchLabels: + app: curl-32 + replicas: 1 + template: + metadata: + labels: + app: curl-32 + kubescape.io/user-defined-profile: curl-32-overlay + spec: + containers: + - name: curl + image: docker.io/curlimages/curl@sha256:08e466006f0860e54fc299378de998935333e0e130a15f6f98482e9f8dab3058 + command: ["sleep", "infinity"] diff --git a/tests/resources/curl-plain-deployment.yaml b/tests/resources/curl-plain-deployment.yaml new file mode 100644 index 0000000000..003810550a --- /dev/null +++ b/tests/resources/curl-plain-deployment.yaml @@ -0,0 +1,20 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: curl-fusioncore-28-0 + name: curl-fusioncore-deployment +spec: + selector: + matchLabels: + app: curl-fusioncore-28-0 + replicas: 1 + template: + metadata: + labels: + app: curl-fusioncore-28-0 + spec: + containers: + - name: curl + image: docker.io/curlimages/curl@sha256:08e466006f0860e54fc299378de998935333e0e130a15f6f98482e9f8dab3058 + command: ["sleep", "infinity"] diff --git a/tests/resources/curl-signed-deployment.yaml b/tests/resources/curl-signed-deployment.yaml new file mode 100644 index 0000000000..df15283ccd --- /dev/null +++ b/tests/resources/curl-signed-deployment.yaml @@ -0,0 +1,21 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: curl-29 + name: curl-29 +spec: + selector: + matchLabels: + app: curl-29 + replicas: 1 + template: + metadata: + labels: + app: curl-29 + kubescape.io/user-defined-profile: signed-ap + spec: + containers: + - name: curl + image: docker.io/curlimages/curl@sha256:08e466006f0860e54fc299378de998935333e0e130a15f6f98482e9f8dab3058 + command: ["sleep", "infinity"] diff --git a/tests/resources/curl-user-network-deployment.yaml b/tests/resources/curl-user-network-deployment.yaml new file mode 100644 index 0000000000..122de0f1c1 --- /dev/null +++ b/tests/resources/curl-user-network-deployment.yaml @@ -0,0 +1,21 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: curl-fusioncore-28-1 + name: curl-fusioncore-deployment +spec: + selector: + matchLabels: + app: curl-fusioncore-28-1 + replicas: 1 + template: + metadata: + labels: + app: curl-fusioncore-28-1 + kubescape.io/user-defined-network: fusioncore-network + spec: + containers: + - name: curl + image: docker.io/curlimages/curl@sha256:08e466006f0860e54fc299378de998935333e0e130a15f6f98482e9f8dab3058 + command: ["sleep", "infinity"] diff --git a/tests/resources/curl-user-profile-wildcards-deployment.yaml b/tests/resources/curl-user-profile-wildcards-deployment.yaml new file mode 100644 index 0000000000..7b2e4ab7db --- /dev/null +++ b/tests/resources/curl-user-profile-wildcards-deployment.yaml @@ -0,0 +1,21 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: curl-fusioncore + name: curl-fusioncore-deployment +spec: + selector: + matchLabels: + app: curl-fusioncore + replicas: 1 + template: + metadata: + labels: + app: curl-fusioncore + kubescape.io/user-defined-profile: fusioncore-profile-wildcards + spec: + containers: + - name: curl + image: docker.io/curlimages/curl@sha256:08e466006f0860e54fc299378de998935333e0e130a15f6f98482e9f8dab3058 + command: ["sleep", "infinity"] diff --git a/tests/resources/known-application-profile.yaml b/tests/resources/known-application-profile.yaml new file mode 100644 index 0000000000..b802941572 --- /dev/null +++ b/tests/resources/known-application-profile.yaml @@ -0,0 +1,245 @@ +## +## User-defined ApplicationProfile for Test_28. +## +## Referenced directly from a pod via the label: +## kubescape.io/user-defined-profile: fusioncore-profile +## +## Modeled after a real auto-learned AP from curlimages/curl:8.5.0. +## +## Usage: +## sed "s/{{NAMESPACE}}/$NS/g" known-application-profile.yaml \ +## | kubectl apply -f - +## +apiVersion: spdx.softwarecomposition.kubescape.io/v1beta1 +kind: ApplicationProfile +metadata: + name: fusioncore-profile + namespace: "{{NAMESPACE}}" +spec: + architectures: ["amd64"] + containers: + - name: curl + imageID: "docker.io/curlimages/curl@sha256:08e466006f0860e54fc299378de998935333e0e130a15f6f98482e9f8dab3058" + imageTag: "docker.io/curlimages/curl:8.5.0" + capabilities: + - CAP_CHOWN + - CAP_DAC_OVERRIDE + - CAP_DAC_READ_SEARCH + - CAP_SETGID + - CAP_SETPCAP + - CAP_SETUID + - CAP_SYS_ADMIN + execs: + - path: /bin/sleep + args: ["/bin/sleep", "infinity"] + - path: /bin/cat + args: ["/bin/cat"] + - path: /usr/bin/curl + args: ["/usr/bin/curl", "-sm2", "fusioncore.ai"] + - path: /usr/bin/nslookup + args: ["/usr/bin/nslookup"] + opens: + - path: /7/setgroups + flags: ["O_RDONLY", "O_CLOEXEC"] + - path: /etc/hosts + flags: ["O_CLOEXEC", "O_RDONLY", "O_LARGEFILE"] + - path: /etc/ld-musl-x86_64.path + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /etc/passwd + flags: ["O_RDONLY", "O_CLOEXEC", "O_LARGEFILE"] + - path: /etc/resolv.conf + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /etc/ssl/openssl.cnf + flags: ["O_RDONLY", "O_LARGEFILE"] + - path: /home/curl_user/.config/curlrc + flags: ["O_RDONLY", "O_LARGEFILE"] + - path: /home/curl_user/.curlrc + flags: ["O_RDONLY", "O_LARGEFILE"] + - path: /lib/libbrotlicommon.so.1 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /lib/libbrotlidec.so.1 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /lib/libcom_err.so.2.1 + flags: ["O_CLOEXEC", "O_RDONLY", "O_LARGEFILE"] + - path: /lib/libcrypto.so.3 + flags: ["O_CLOEXEC", "O_RDONLY", "O_LARGEFILE"] + - path: /lib/libcurl.so.4 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /lib/libgssapi_krb5.so.2 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /lib/libidn2.so.0 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /lib/libk5crypto.so.3 + flags: ["O_CLOEXEC", "O_RDONLY", "O_LARGEFILE"] + - path: /lib/libkeyutils.so.1 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /lib/libkrb5.so.3 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /lib/libkrb5support.so.0 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /lib/libnghttp2.so.14 + flags: ["O_CLOEXEC", "O_RDONLY", "O_LARGEFILE"] + - path: /lib/libpsl.so.5 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /lib/libssh2.so.1 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /lib/libssl.so.3 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /lib/libunistring.so.5 + flags: ["O_CLOEXEC", "O_RDONLY", "O_LARGEFILE"] + - path: /lib/libz.so.1.3 + flags: ["O_LARGEFILE", "O_CLOEXEC", "O_RDONLY"] + - path: /proc/⋯/cgroup + flags: ["O_RDONLY", "O_CLOEXEC"] + - path: /proc/⋯/kernel/cap_last_cap + flags: ["O_RDONLY", "O_CLOEXEC"] + - path: /proc/⋯/mountinfo + flags: ["O_RDONLY", "O_CLOEXEC"] + - path: /proc/⋯/task/1/fd + flags: ["O_RDONLY", "O_DIRECTORY", "O_CLOEXEC"] + - path: /proc/⋯/task/7/fd + flags: ["O_RDONLY", "O_DIRECTORY", "O_CLOEXEC"] + - path: /runc + flags: ["O_RDONLY", "O_CLOEXEC"] + - path: /sys/fs/cgroup/cpu.max + flags: ["O_RDONLY", "O_CLOEXEC"] + - path: /sys/kernel/mm/transparent_hugepage/hpage_pmd_size + flags: ["O_RDONLY"] + - path: /usr/lib/libbrotlicommon.so.1.1.0 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/lib/libbrotlidec.so.1.1.0 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/lib/libcurl.so.4.8.0 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/lib/libgssapi_krb5.so.2.2 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/lib/libidn2.so.0.3.8 + flags: ["O_CLOEXEC", "O_RDONLY", "O_LARGEFILE"] + - path: /usr/lib/libk5crypto.so.3.1 + flags: ["O_CLOEXEC", "O_RDONLY", "O_LARGEFILE"] + - path: /usr/lib/libkeyutils.so.1.10 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/lib/libkrb5.so.3.3 + flags: ["O_CLOEXEC", "O_RDONLY", "O_LARGEFILE"] + - path: /usr/lib/libkrb5support.so.0.1 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/lib/libnghttp2.so.14.25.1 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/lib/libpsl.so.5.3.4 + flags: ["O_CLOEXEC", "O_RDONLY", "O_LARGEFILE"] + - path: /usr/lib/libssh2.so.1.0.1 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/lib/libunistring.so.5.0.0 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/local/lib/libbrotlicommon.so.1 + flags: ["O_LARGEFILE", "O_CLOEXEC", "O_RDONLY"] + - path: /usr/local/lib/libbrotlidec.so.1 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/local/lib/libcom_err.so.2 + flags: ["O_CLOEXEC", "O_RDONLY", "O_LARGEFILE"] + - path: /usr/local/lib/libcrypto.so.3 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/local/lib/libcurl.so.4 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/local/lib/libgssapi_krb5.so.2 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/local/lib/libidn2.so.0 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/local/lib/libk5crypto.so.3 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/local/lib/libkeyutils.so.1 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/local/lib/libkrb5.so.3 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/local/lib/libkrb5support.so.0 + flags: ["O_CLOEXEC", "O_RDONLY", "O_LARGEFILE"] + - path: /usr/local/lib/libnghttp2.so.14 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/local/lib/libpsl.so.5 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/local/lib/libssh2.so.1 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/local/lib/libssl.so.3 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/local/lib/libunistring.so.5 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/local/lib/libz.so.1 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + syscalls: + - arch_prctl + - bind + - brk + - capget + - capset + - chdir + - clone + - close + - close_range + - connect + - epoll_ctl + - epoll_pwait + - execve + - exit + - exit_group + - faccessat2 + - fchown + - fcntl + - fstat + - fstatfs + - futex + - getcwd + - getdents64 + - getegid + - geteuid + - getgid + - getpeername + - getppid + - getsockname + - getsockopt + - gettid + - getuid + - ioctl + - membarrier + - mmap + - mprotect + - munmap + - nanosleep + - newfstatat + - open + - openat + - openat2 + - pipe + - poll + - prctl + - read + - recvfrom + - recvmsg + - rt_sigaction + - rt_sigprocmask + - rt_sigreturn + - sendto + - set_tid_address + - setgid + - setgroups + - setsockopt + - setuid + - sigaltstack + - socket + - statx + - tkill + - unknown + - write + - writev + endpoints: + - endpoint: ":80/" + direction: outbound + methods: ["GET"] + internal: false + headers: '{"Host":["fusioncore.ai"]}' + seccompProfile: + spec: + defaultAction: "" + rulePolicies: {} + initContainers: [] + ephemeralContainers: [] +status: {} diff --git a/tests/resources/known-network-neighborhood.yaml b/tests/resources/known-network-neighborhood.yaml new file mode 100644 index 0000000000..0d4caa0c4e --- /dev/null +++ b/tests/resources/known-network-neighborhood.yaml @@ -0,0 +1,49 @@ +## +## User-defined NetworkNeighborhood for Test_28. +## +## Referenced directly from a pod via the label: +## kubescape.io/user-defined-network: fusioncore-network +## +## Carries "kubescape.io/managed-by: User" annotation and workload +## labels to match the schema the node-agent cache expects. +## +## Modeled after a real auto-learned NN from curlimages/curl:8.5.0. +## +## Usage: +## sed "s/{{NAMESPACE}}/$NS/g" known-network-neighborhood.yaml \ +## | kubectl apply -f - +## +apiVersion: spdx.softwarecomposition.kubescape.io/v1beta1 +kind: NetworkNeighborhood +metadata: + name: fusioncore-network + namespace: "{{NAMESPACE}}" + annotations: + kubescape.io/managed-by: User + kubescape.io/status: completed + kubescape.io/completion: complete + labels: + kubescape.io/workload-api-group: apps + kubescape.io/workload-api-version: v1 + kubescape.io/workload-kind: Deployment + kubescape.io/workload-name: curl-fusioncore-deployment + kubescape.io/workload-namespace: "{{NAMESPACE}}" +spec: + matchLabels: + app: curl-fusioncore-28-1 + containers: + - name: curl + ingress: [] + egress: + - dns: fusioncore.ai. + dnsNames: + - fusioncore.ai. + identifier: a5e64ff1db824089b1706ac872303e55075f92cf6a652b5272f06c3a2b9e8d10 + ipAddress: 162.0.217.171 + namespaceSelector: null + podSelector: null + ports: + - name: TCP-80 + port: 80 + protocol: TCP + type: external diff --git a/tests/resources/nginx-user-defined-deployment.yaml b/tests/resources/nginx-user-defined-deployment.yaml new file mode 100644 index 0000000000..c21c6b080f --- /dev/null +++ b/tests/resources/nginx-user-defined-deployment.yaml @@ -0,0 +1,25 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: curl-28 + name: curl-28 +spec: + selector: + matchLabels: + app: curl-28 + replicas: 1 + template: + metadata: + labels: + app: curl-28 + # Upstream ContainerProfileCache (kubescape/node-agent#788) reads ONE + # label and uses its value as the name of BOTH the user-defined + # ApplicationProfile and the user-defined NetworkNeighborhood. The + # AP and NN below MUST share the same name as this label value. + kubescape.io/user-defined-profile: curl-28-overlay + spec: + containers: + - name: curl + image: docker.io/curlimages/curl@sha256:08e466006f0860e54fc299378de998935333e0e130a15f6f98482e9f8dab3058 + command: ["sleep", "infinity"] diff --git a/tests/resources/nginx-user-profile-deployment.yaml b/tests/resources/nginx-user-profile-deployment.yaml new file mode 100644 index 0000000000..218f956540 --- /dev/null +++ b/tests/resources/nginx-user-profile-deployment.yaml @@ -0,0 +1,22 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: nginx + name: nginx-deployment +spec: + selector: + matchLabels: + app: nginx + replicas: 1 + template: + metadata: + labels: + app: nginx + kubescape.io/user-defined-profile: nginx-regex-profile + spec: + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80 diff --git a/tests/resources/run-test-28.sh b/tests/resources/run-test-28.sh new file mode 100755 index 0000000000..582aac83cd --- /dev/null +++ b/tests/resources/run-test-28.sh @@ -0,0 +1,163 @@ +#!/usr/bin/env bash +# +# run-test-28.sh — Manual execution of Test_28_UserDefinedNetworkNeighborhood +# +# Applies user-defined NN, deploys curl, waits for AP to auto-learn, +# triggers allowed + unknown traffic, checks for alerts. +# +# Usage: +# ./run-test-28.sh # run the test +# ./run-test-28.sh learn # learn NN from scratch (debug) +# +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +ALERTMANAGER_URL="${ALERTMANAGER_URL:-http://localhost:9093}" +SCENARIO="${1:-test}" + +# Ensure alertmanager port-forward is active. +if ! curl -s --max-time 2 "$ALERTMANAGER_URL/api/v2/alerts" >/dev/null 2>&1; then + echo "Alertmanager not reachable at $ALERTMANAGER_URL — starting port-forward..." + kubectl port-forward svc/alertmanager-operated 9093:9093 -n monitoring & + ALERT_PF_PID=$! + sleep 3 + if ! curl -s --max-time 2 "$ALERTMANAGER_URL/api/v2/alerts" >/dev/null 2>&1; then + echo "ERROR: alertmanager still not reachable after port-forward (pid=$ALERT_PF_PID)" + kill "$ALERT_PF_PID" 2>/dev/null || true + exit 1 + fi + trap 'kill $ALERT_PF_PID 2>/dev/null || true' EXIT +fi + +get_all_alerts() { + local ns="$1" + curl -s "$ALERTMANAGER_URL/api/v2/alerts" | \ + jq "[.[] | select(.labels.namespace==\"$ns\")]" +} + +wait_for_pod() { + local ns="$1" + kubectl rollout status deployment/curl-fusioncore-deployment -n "$ns" --timeout=120s +} + +get_pod() { + local ns="$1" + kubectl get pods -n "$ns" -l app=curl-fusioncore-28-1 \ + -o jsonpath='{.items[0].metadata.name}' 2>/dev/null +} + +# ================================================================= +# Main test: apply NN manifest, deploy curl, trigger traffic, check alerts +# ================================================================= +run_test() { + local NS="t28-$(head -c4 /dev/urandom | xxd -p)" + local NET="fusioncore-network-$NS" + echo "" + echo "=== Test 28: User-Defined NN (ns=$NS, net=$NET) ===" + + kubectl create namespace "$NS" --dry-run=client -o yaml | kubectl apply -f - + + # 1. Apply NN manifest with unique name + sed -e "s/{{NAMESPACE}}/$NS/g" \ + -e "s/fusioncore-network/$NET/g" \ + "$SCRIPT_DIR/known-network-neighborhood.yaml" | kubectl apply -f - + echo " NN $NET created" + + # 2. Deploy curl with user-defined-network label + sed "s/{{NETWORK_NAME}}/$NET/g" \ + "$SCRIPT_DIR/curl-user-network-deployment.yaml" | kubectl apply -n "$NS" -f - + wait_for_pod "$NS" + local POD; POD=$(get_pod "$NS") + echo " Pod: $POD" + + # 3. Wait for AP to auto-learn + echo " Waiting for AP to complete..." + for i in $(seq 1 80); do + AP_STATUS=$(kubectl get applicationprofiles -n "$NS" \ + -o jsonpath='{.items[0].metadata.annotations.kubescape\.io/status}' 2>/dev/null || true) + [ "$AP_STATUS" = "completed" ] && break + sleep 10 + done + echo " AP status: $AP_STATUS" + + # 4. Trigger traffic + echo " Triggering traffic..." + echo " nslookup fusioncore.ai (allowed)" + kubectl exec -n "$NS" "$POD" -c curl -- nslookup fusioncore.ai 2>&1 || true + echo " curl fusioncore.ai (allowed)" + kubectl exec -n "$NS" "$POD" -c curl -- curl -sm2 http://fusioncore.ai >/dev/null 2>&1 || true + echo " nslookup evil.example.com (unknown)" + kubectl exec -n "$NS" "$POD" -c curl -- nslookup evil.example.com 2>&1 || true + echo " curl evil.example.com (unknown)" + kubectl exec -n "$NS" "$POD" -c curl -- curl -sm2 http://evil.example.com >/dev/null 2>&1 || true + + echo " Waiting 30s for alerts..." + sleep 30 + + # 5. Check alerts + echo "" + echo " === All alerts in namespace $NS ===" + ALERTS=$(get_all_alerts "$NS") + ALERT_COUNT=$(echo "$ALERTS" | jq 'length') + echo "$ALERTS" | jq -r '.[] | " [\(.labels.rule_name)] container=\(.labels.container_name // "n/a")"' + echo " Total: $ALERT_COUNT" + echo " ======================================" + + if [ "$ALERT_COUNT" -eq 0 ]; then + echo " FAIL: expected at least one alert (R0005 for evil.example.com), got ZERO" + echo " Namespace $NS left for inspection" + exit 1 + else + echo " PASS: got $ALERT_COUNT alert(s)" + echo " Cleanup: kubectl delete namespace $NS" + fi +} + +# ================================================================= +# Learn scenario: no user-defined labels, learn NN from scratch +# ================================================================= +run_learn() { + local NS="t28-learn-$(head -c4 /dev/urandom | xxd -p)" + echo "" + echo "=== LEARN NN from scratch (ns=$NS) ===" + + kubectl create namespace "$NS" --dry-run=client -o yaml | kubectl apply -f - + kubectl apply -n "$NS" -f "$SCRIPT_DIR/curl-plain-deployment.yaml" + wait_for_pod "$NS" + local POD; POD=$(kubectl get pods -n "$NS" -l app=curl-fusioncore-28-0 \ + -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + echo " Pod: $POD" + + echo " Triggering traffic during learning window..." + kubectl exec -n "$NS" "$POD" -c curl -- nslookup fusioncore.ai 2>&1 || true + kubectl exec -n "$NS" "$POD" -c curl -- curl -sm5 http://fusioncore.ai >/dev/null 2>&1 || true + sleep 5 + kubectl exec -n "$NS" "$POD" -c curl -- nslookup fusioncore.ai 2>&1 || true + kubectl exec -n "$NS" "$POD" -c curl -- curl -sm5 http://fusioncore.ai >/dev/null 2>&1 || true + + echo " Waiting for NN to complete..." + for i in $(seq 1 80); do + NN_STATUS=$(kubectl get networkneighborhoods -n "$NS" \ + -o jsonpath='{.items[0].metadata.annotations.kubescape\.io/status}' 2>/dev/null || true) + [ "$NN_STATUS" = "completed" ] && break + sleep 10 + done + echo " NN status: $NN_STATUS" + + echo "" + echo " === Learned NetworkNeighborhood ===" + kubectl get networkneighborhoods -n "$NS" -o yaml 2>&1 + echo " ===================================" + echo "" + echo " Namespace $NS left for inspection" + echo " Cleanup: kubectl delete namespace $NS" +} + +case "$SCENARIO" in + test) run_test ;; + learn) run_learn ;; + *) + echo "Usage: $0 [test|learn]" + exit 1 + ;; +esac diff --git a/tests/resources/test-28-iterate.sh b/tests/resources/test-28-iterate.sh new file mode 100755 index 0000000000..355fdf65ff --- /dev/null +++ b/tests/resources/test-28-iterate.sh @@ -0,0 +1,232 @@ +#!/usr/bin/env bash +# +# test-28-iterate.sh — Self-contained test for user-defined NN alerts +# +# Prerequisites: +# - R0011 must be enabled with isTriggerAlert=true in the Rules CRD +# - R0005 should have isTriggerAlert=true for DNS alerts +# - Alertmanager port-forward active on localhost:9093 +# +# Usage: ./test-28-iterate.sh +# +set -euo pipefail + +ALERTMANAGER_URL="${ALERTMANAGER_URL:-http://localhost:9093}" +NA_POD=$(kubectl get pods -n kubescape -l app=node-agent -o jsonpath='{.items[0].metadata.name}') + +# Ensure alertmanager reachable +if ! curl -s --max-time 2 "$ALERTMANAGER_URL/api/v2/alerts" >/dev/null 2>&1; then + echo "ERROR: alertmanager not reachable at $ALERTMANAGER_URL" + exit 1 +fi + +get_alerts() { + local ns="$1" + curl -s "$ALERTMANAGER_URL/api/v2/alerts" | jq "[.[] | select(.labels.namespace==\"$ns\")]" +} + +cleanup_ns() { + kubectl delete namespace "$1" --wait=false 2>/dev/null || true +} + +# ================================================================ +# Ensure R0005 and R0011 are enabled with isTriggerAlert=true +# ================================================================ +echo "Patching rules: R0005 isTriggerAlert=true, R0011 enabled+isTriggerAlert=true" +kubectl get rules -n kubescape default-rules -o json | jq ' + .spec.rules = [ + .spec.rules[] | + if .id == "R0005" then .isTriggerAlert = true + elif .id == "R0011" then .enabled = true | .isTriggerAlert = true + else . + end + ] +' | kubectl apply -f - >/dev/null 2>&1 +echo " Done" + +# ================================================================ +# TEST: User-defined AP + NN → R0011 for anomalous TCP egress +# ================================================================ +NS="t28-$(head -c4 /dev/urandom | xxd -p)" +echo "" +echo "============================================================" +echo "TEST: User-defined AP + NN → R0011 Unexpected Egress Traffic" +echo " ns=$NS" +echo "============================================================" + +kubectl create namespace "$NS" --dry-run=client -o yaml | kubectl apply -f - + +# Create user-defined ApplicationProfile +cat <&1 \ + | grep -c "added user-defined network neighborhood" || true) + GOT_AP=$(kubectl logs "$NA_POD" -n kubescape -c node-agent --since=30s 2>&1 \ + | grep -c "added user-defined application profile\|user defined profile" || true) + echo " NN=$GOT_NN AP=$GOT_AP" + [ "$GOT_NN" -gt 0 ] && [ "$GOT_AP" -gt 0 ] && break + sleep 3 +done + +# Trigger anomalous TCP egress (R0011) +echo "" +echo " [anomaly] curl -sm5 http://8.8.8.8 (NOT in NN egress)" +kubectl exec -n "$NS" "$POD" -c nginx -- curl -sm5 http://8.8.8.8 2>&1 || true +echo " [anomaly] curl -sm5 http://1.1.1.1 (NOT in NN egress)" +kubectl exec -n "$NS" "$POD" -c nginx -- curl -sm5 http://1.1.1.1 2>&1 || true + +# Poll for alerts +echo "" +echo " Polling for alerts..." +R0011_ALERTS=0 +for i in 1 2 3 4; do + sleep 5 + ALERTS=$(get_alerts "$NS") + ALERT_COUNT=$(echo "$ALERTS" | jq 'length') + R0011_ALERTS=$(echo "$ALERTS" | jq '[.[] | select(.labels.rule_id=="R0011")] | length') + echo " poll $i: total=$ALERT_COUNT R0011=$R0011_ALERTS" + [ "$R0011_ALERTS" -gt 0 ] && break +done + +echo "" +echo " === All alerts in $NS ===" +echo "$ALERTS" | jq -r '.[] | " [\(.labels.rule_id)] \(.labels.rule_name) | comm=\(.labels.comm // "?")"' 2>/dev/null || true +echo " Total: $ALERT_COUNT R0011: $R0011_ALERTS" +echo " ========================" + +if [ "$R0011_ALERTS" -gt 0 ]; then + echo "" + echo " >>> RESULT: PASS — R0011 fires for user-defined AP+NN" + cleanup_ns "$NS" + exit 0 +else + echo "" + echo " >>> RESULT: FAIL — no R0011 alerts" + echo " >>> Namespace $NS left for inspection" + exit 1 +fi diff --git a/tests/resources/user-profile.yaml b/tests/resources/user-profile.yaml deleted file mode 100644 index 97a116f6d2..0000000000 --- a/tests/resources/user-profile.yaml +++ /dev/null @@ -1,47 +0,0 @@ -apiVersion: spdx.softwarecomposition.kubescape.io/v1beta1 -kind: ApplicationProfile -metadata: - name: {name} - namespace: {namespace} - resourceVersion: "1" # Start with "1" for new resources - annotations: - kubescape.io/managed-by: User -spec: - architectures: ["amd64"] - containers: - - name: nginx - imageID: "" - imageTag: "" - capabilities: [] - opens: [] - syscalls: [] - endpoints: [] - execs: - - path: /usr/bin/ls - args: - - /usr/bin/ls - - -l - seccompProfile: - spec: - defaultAction: "" - - name: server - imageID: "" - imageTag: "" - capabilities: [] - opens: [] - syscalls: [] - endpoints: [] - execs: - - path: /bin/ls - args: - - /bin/ls - - -l - - path: /bin/grpc_health_probe - args: - - "-addr=:9555" - seccompProfile: - spec: - defaultAction: "" - initContainers: [] - ephemeralContainers: [] -status: {} \ No newline at end of file diff --git a/tests/scripts/local-ci.sh b/tests/scripts/local-ci.sh new file mode 100755 index 0000000000..c226f5f611 --- /dev/null +++ b/tests/scripts/local-ci.sh @@ -0,0 +1,220 @@ +#!/usr/bin/env bash +# local-ci.sh — exact local mirror of .github/workflows/component-tests.yaml +# +# Usage: +# ./tests/scripts/local-ci.sh # full run: cluster setup + deploy + all tests +# ./tests/scripts/local-ci.sh --deploy-only # stop after helm install (skip tests) +# ./tests/scripts/local-ci.sh --test-only Test_27 # skip setup, just run one test +# ./tests/scripts/local-ci.sh Test_27 # full run, single test +# ./tests/scripts/local-ci.sh Test_01 Test_27 # full run, multiple tests +# +# Differences from CI: +# - Uses ~/go/bin/kind (v0.31.0) instead of downloading kind +# - Uses existing kubectl instead of downloading it +# - Builds Docker images locally + kind load instead of pulling from ghcr.io +# - Storage is built from ../storage (local replace for go.mod) +# - Node-agent privileged=true for Kind clusters +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" +STORAGE_ROOT="$(cd "$REPO_ROOT/../storage" && pwd)" +KIND="${HOME}/go/bin/kind" +KIND_CLUSTER="integration-test" +NAMESPACE="kubescape" +TAG="local-test" + +cd "$REPO_ROOT" + +# ── parse args ──────────────────────────────────────────────────────────────── +DEPLOY_ONLY=false +TEST_ONLY=false +TESTS=() + +for arg in "$@"; do + case "$arg" in + --deploy-only) DEPLOY_ONLY=true ;; + --test-only) TEST_ONLY=true ;; + Test_*) TESTS+=("$arg") ;; + esac +done + +log() { echo "[$(date +%H:%M:%S)] $*"; } + +# ── step 1: kind cluster ───────────────────────────────────────────────────── +# CI: curl kind, ./kind create cluster, download kubectl +setup_cluster() { + if $KIND get clusters 2>/dev/null | grep -q "^${KIND_CLUSTER}$"; then + log "Kind cluster '${KIND_CLUSTER}' already exists, reusing" + else + log "Creating Kind cluster '${KIND_CLUSTER}'" + $KIND create cluster --name "$KIND_CLUSTER" + fi + kubectl cluster-info --context "kind-${KIND_CLUSTER}" >/dev/null 2>&1 \ + || { log "ERROR: cluster unreachable"; exit 1; } +} + +# ── step 2: install prometheus ──────────────────────────────────────────────── +# CI: helm repo add + helm upgrade --install prometheus +install_prometheus() { + if kubectl get ns monitoring >/dev/null 2>&1 && \ + kubectl get pods -n monitoring -l app.kubernetes.io/name=prometheus -o name 2>/dev/null | grep -q .; then + log "Prometheus already installed, skipping" + else + log "Installing Prometheus" + helm repo add prometheus-community https://prometheus-community.github.io/helm-charts 2>/dev/null || true + helm repo update + helm upgrade --install prometheus prometheus-community/kube-prometheus-stack \ + --set grafana.enabled=false \ + --namespace monitoring --create-namespace \ + --set prometheus.prometheusSpec.podMonitorSelectorNilUsesHelmValues=false,prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues=false \ + --set prometheus.prometheusSpec.maximumStartupDurationSeconds=300 \ + --wait --timeout 5m + fi + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/name=prometheus -n monitoring --timeout=300s +} + +# ── step 3: build + load images ────────────────────────────────────────────── +# CI: pulls from ghcr.io. Locally: docker build + kind load. +build_and_load_images() { + log "Checking disk space" + df -h / | tail -1 + + # Storage + log "Building storage image from ${STORAGE_ROOT}" + docker build -f "${STORAGE_ROOT}/build/Dockerfile" \ + -t "ghcr.io/k8sstormcenter/storage:${TAG}" \ + "${STORAGE_ROOT}" + + # Node-agent: add local storage replace, vendor, build + log "Setting up node-agent go.mod with local storage replace" + go mod edit -replace "github.com/kubescape/storage=${STORAGE_ROOT}" + go mod tidy + go mod vendor + + log "Building node-agent image" + docker build -f build/Dockerfile \ + -t "ghcr.io/k8sstormcenter/node-agent:${TAG}" \ + --build-arg image_version="${TAG}" . + + # Clean up Docker build cache + docker builder prune --filter until=1h -f >/dev/null 2>&1 || true + docker image prune -f >/dev/null 2>&1 || true + + # Drop the local replace (not committed) + go mod edit -dropreplace "github.com/kubescape/storage" + + log "Loading images into Kind" + $KIND load docker-image "ghcr.io/k8sstormcenter/storage:${TAG}" --name "$KIND_CLUSTER" + $KIND load docker-image "ghcr.io/k8sstormcenter/node-agent:${TAG}" --name "$KIND_CLUSTER" + + log "Checking disk space after build" + df -h / | tail -1 +} + +# ── step 4: helm install kubescape ──────────────────────────────────────────── +# CI: helm upgrade --install kubescape ./tests/chart --set ... +install_kubescape() { + log "Installing kubescape chart (storage=${TAG}, node-agent=${TAG})" + helm upgrade --install kubescape ./tests/chart \ + --set clusterName="$(kubectl config current-context)" \ + --set nodeAgent.image.tag="${TAG}" \ + --set nodeAgent.image.repository=ghcr.io/k8sstormcenter/node-agent \ + --set nodeAgent.image.pullPolicy=Never \ + --set storage.image.tag="${TAG}" \ + --set storage.image.pullPolicy=Never \ + --set nodeAgent.privileged=true \ + -n "$NAMESPACE" --create-namespace --wait --timeout 5m \ + --disable-openapi-validation + + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/name=node-agent -n "$NAMESPACE" --timeout=300s + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/name=storage -n "$NAMESPACE" --timeout=300s + sleep 5 +} + +# ── step 5: port forwarding ────────────────────────────────────────────────── +# CI: ./tests/scripts/port-forward.sh +start_port_forwards() { + log "Setting up port forwarding" + # Kill stale port-forwards + pkill -f 'kubectl port-forward.*alertmanager-operated' 2>/dev/null || true + pkill -f 'kubectl port-forward.*prometheus-kube-prometheus' 2>/dev/null || true + sleep 1 + "${SCRIPT_DIR}/port-forward.sh" +} + +# ── step 6: run tests ──────────────────────────────────────────────────────── +# CI: cd tests && go test -v ./... -run ${{ matrix.test }} --timeout=20m --tags=component +run_tests() { + local test_pattern="${1:-}" + + # CI: Update storage dependency (go mod edit -replace ...) + # Locally: same replace so test code compiles against fork storage + log "Applying storage replace for test compilation" + go mod edit -replace "github.com/kubescape/storage=${STORAGE_ROOT}" + go mod tidy + go mod vendor + + if [[ -n "$test_pattern" ]]; then + log "Running test: ${test_pattern}" + cd tests && CGO_ENABLED=0 go test -v ./... -run "${test_pattern}" --timeout=20m --tags=component; cd .. + else + log "Running all component tests" + cd tests && CGO_ENABLED=0 go test -v ./... --timeout=20m --tags=component; cd .. + fi +} + +# ── step 7: collect logs ───────────────────────────────────────────────────── +# CI: kubectl logs ... node-agent + storage +print_logs() { + log "=== Node agent logs ===" + kubectl logs "$(kubectl get pods -n "$NAMESPACE" -o name | grep node-agent)" -n "$NAMESPACE" -c node-agent --tail=100 2>/dev/null || true + echo "-----------------------------------------" + log "=== Storage logs ===" + kubectl logs "$(kubectl get pods -n "$NAMESPACE" -o name | grep storage)" -n "$NAMESPACE" --tail=50 2>/dev/null || true +} + +# ── main ────────────────────────────────────────────────────────────────────── +if $TEST_ONLY; then + # Just run the test(s), assume cluster + deploy are already done + start_port_forwards + if [[ ${#TESTS[@]} -gt 0 ]]; then + for t in "${TESTS[@]}"; do run_tests "$t"; done + else + run_tests "" + fi + print_logs + exit 0 +fi + +setup_cluster +install_prometheus +build_and_load_images +install_kubescape + +if $DEPLOY_ONLY; then + log "Deploy complete. Pods:" + kubectl get pods -n "$NAMESPACE" -o wide + exit 0 +fi + +start_port_forwards + +# Run specified tests or all +set +e +if [[ ${#TESTS[@]} -gt 0 ]]; then + for t in "${TESTS[@]}"; do run_tests "$t"; done +else + run_tests "" +fi +TEST_EXIT=$? +set -e + +print_logs + +if [[ "$TEST_EXIT" -eq 0 ]]; then + log "All tests passed" +else + log "Tests finished with exit code ${TEST_EXIT}" +fi +exit $TEST_EXIT diff --git a/tests/scripts/storage-tag.sh b/tests/scripts/storage-tag.sh index 5512f2ae5a..4bb2565eb8 100755 --- a/tests/scripts/storage-tag.sh +++ b/tests/scripts/storage-tag.sh @@ -1,4 +1,2 @@ -#/bin/bash -curl -s https://raw.githubusercontent.com/kubescape/helm-charts/main/charts/kubescape-operator/values.yaml -o values.yaml -yq '.storage.image.tag' < values.yaml -rm -rf values.yaml +#!/bin/bash +curl -s https://api.github.com/repos/k8sstormcenter/storage/tags | jq -r '.[0].name'