From 254cbf045eb625aaa7108742eef8935dee17860e Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Tue, 16 Dec 2025 11:05:26 +0100 Subject: [PATCH 01/15] CSPL-4360 Secret reference added for Bus CR --- api/v4/bus_types.go | 4 + api/v4/zz_generated.deepcopy.go | 13 +- .../bases/enterprise.splunk.com_buses.yaml | 33 +++++ ...enterprise.splunk.com_indexerclusters.yaml | 34 +++++ ...nterprise.splunk.com_ingestorclusters.yaml | 34 +++++ .../templates/enterprise_v4_buses.yaml | 4 + .../01-assert.yaml | 133 +----------------- .../01-create-se-secret.yaml | 7 + .../02-assert.yaml | 111 ++++++++++++++- ...stall-setup.yaml => 02-install-setup.yaml} | 0 .../03-assert.yaml | 33 +++++ ...ingestor.yaml => 03-scaleup-ingestor.yaml} | 0 ...all-setup.yaml => 04-uninstall-setup.yaml} | 0 .../splunk_index_ingest_sep.yaml | 3 + pkg/splunk/enterprise/indexercluster.go | 32 +++-- pkg/splunk/enterprise/indexercluster_test.go | 15 +- pkg/splunk/enterprise/ingestorcluster.go | 29 +++- pkg/splunk/enterprise/ingestorcluster_test.go | 11 +- pkg/splunk/enterprise/util.go | 19 +++ .../index_and_ingestion_separation_test.go | 20 +++ test/testenv/remote_index_utils.go | 8 ++ 21 files changed, 384 insertions(+), 159 deletions(-) create mode 100644 kuttl/tests/helm/index-and-ingest-separation/01-create-se-secret.yaml rename kuttl/tests/helm/index-and-ingest-separation/{01-install-setup.yaml => 02-install-setup.yaml} (100%) create mode 100644 kuttl/tests/helm/index-and-ingest-separation/03-assert.yaml rename kuttl/tests/helm/index-and-ingest-separation/{02-scaleup-ingestor.yaml => 03-scaleup-ingestor.yaml} (100%) rename kuttl/tests/helm/index-and-ingest-separation/{03-uninstall-setup.yaml => 04-uninstall-setup.yaml} (100%) diff --git a/api/v4/bus_types.go b/api/v4/bus_types.go index 4d9cd3a42..a45be59d6 100644 --- a/api/v4/bus_types.go +++ b/api/v4/bus_types.go @@ -61,6 +61,10 @@ type SQSSpec struct { // +kubebuilder:validation:Pattern=`^https://sqs(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$` // Amazon SQS Service endpoint Endpoint string `json:"endpoint"` + + // +optional + // List of remote storage volumes + VolList []VolumeSpec `json:"volumes,omitempty"` } // BusStatus defines the observed state of Bus diff --git a/api/v4/zz_generated.deepcopy.go b/api/v4/zz_generated.deepcopy.go index dc19b7f10..eb142f146 100644 --- a/api/v4/zz_generated.deepcopy.go +++ b/api/v4/zz_generated.deepcopy.go @@ -185,7 +185,7 @@ func (in *Bus) DeepCopyInto(out *Bus) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec + in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) } @@ -234,7 +234,7 @@ func (in *BusList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BusSpec) DeepCopyInto(out *BusSpec) { *out = *in - out.SQS = in.SQS + in.SQS.DeepCopyInto(&out.SQS) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BusSpec. @@ -637,7 +637,7 @@ func (in *IndexerClusterStatus) DeepCopyInto(out *IndexerClusterStatus) { if in.Bus != nil { in, out := &in.Bus, &out.Bus *out = new(BusSpec) - **out = **in + (*in).DeepCopyInto(*out) } if in.LargeMessageStore != nil { in, out := &in.LargeMessageStore, &out.LargeMessageStore @@ -740,7 +740,7 @@ func (in *IngestorClusterStatus) DeepCopyInto(out *IngestorClusterStatus) { if in.Bus != nil { in, out := &in.Bus, &out.Bus *out = new(BusSpec) - **out = **in + (*in).DeepCopyInto(*out) } if in.LargeMessageStore != nil { in, out := &in.LargeMessageStore, &out.LargeMessageStore @@ -1104,6 +1104,11 @@ func (in *S3Spec) DeepCopy() *S3Spec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SQSSpec) DeepCopyInto(out *SQSSpec) { *out = *in + if in.VolList != nil { + in, out := &in.VolList, &out.VolList + *out = make([]VolumeSpec, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQSSpec. diff --git a/config/crd/bases/enterprise.splunk.com_buses.yaml b/config/crd/bases/enterprise.splunk.com_buses.yaml index 54d498834..db62f351c 100644 --- a/config/crd/bases/enterprise.splunk.com_buses.yaml +++ b/config/crd/bases/enterprise.splunk.com_buses.yaml @@ -78,6 +78,39 @@ spec: description: Region of the resources pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ type: string + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio, azure, gcp.' + type: string + region: + description: Region of the remote storage volume where apps + reside. Used for aws, if provided. Not used for minio + and azure. + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: 'Remote Storage type. Supported values: s3, + blob, gcs. s3 works with aws or minio providers, whereas + blob works with azure provider, gcs works for gcp.' + type: string + type: object + type: array required: - dlq - name diff --git a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml index 67e1021f6..3389a98d5 100644 --- a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml @@ -8368,6 +8368,40 @@ spec: description: Region of the resources pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ type: string + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio, azure, gcp.' + type: string + region: + description: Region of the remote storage volume where + apps reside. Used for aws, if provided. Not used for + minio and azure. + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: 'Remote Storage type. Supported values: + s3, blob, gcs. s3 works with aws or minio providers, + whereas blob works with azure provider, gcs works + for gcp.' + type: string + type: object + type: array required: - dlq - name diff --git a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml index 4ecaa8d32..5b065baa5 100644 --- a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml @@ -4618,6 +4618,40 @@ spec: description: Region of the resources pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ type: string + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio, azure, gcp.' + type: string + region: + description: Region of the remote storage volume where + apps reside. Used for aws, if provided. Not used for + minio and azure. + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: 'Remote Storage type. Supported values: + s3, blob, gcs. s3 works with aws or minio providers, + whereas blob works with azure provider, gcs works + for gcp.' + type: string + type: object + type: array required: - dlq - name diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_buses.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_buses.yaml index bbf162332..e5b881717 100644 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_buses.yaml +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_buses.yaml @@ -29,6 +29,10 @@ spec: {{- if .region }} region: {{ .region | quote }} {{- end }} + {{- if .volumes }} + volumes: + {{ toYaml . | indent 4 }} + {{- end }} {{- end }} {{- end }} {{- end }} \ No newline at end of file diff --git a/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml index f34dd2e6c..1a4e4a60a 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml @@ -1,136 +1,5 @@ --- -# assert for bus custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: Bus -metadata: - name: bus -spec: - provider: sqs - sqs: - name: sqs-test - region: us-west-2 - endpoint: https://sqs.us-west-2.amazonaws.com - dlq: sqs-dlq-test -status: - phase: Ready - ---- -# assert for large message store custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: LargeMessageStore -metadata: - name: lms -spec: - provider: s3 - s3: - endpoint: https://s3.us-west-2.amazonaws.com - path: s3://ingestion/smartbus-test -status: - phase: Ready - ---- -# assert for cluster manager custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: ClusterManager -metadata: - name: cm -status: - phase: Ready - ---- -# check if stateful sets are created -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-cm-cluster-manager -status: - replicas: 1 - ---- -# check if secret object are created -apiVersion: v1 -kind: Secret -metadata: - name: splunk-cm-cluster-manager-secret-v1 - ---- -# assert for indexer cluster custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: IndexerCluster -metadata: - name: indexer -spec: - replicas: 3 - busRef: - name: bus -status: - phase: Ready - bus: - provider: sqs - sqs: - name: sqs-test - region: us-west-2 - endpoint: https://sqs.us-west-2.amazonaws.com - dlq: sqs-dlq-test - largeMessageStore: - provider: s3 - s3: - endpoint: https://s3.us-west-2.amazonaws.com - path: s3://ingestion/smartbus-test - ---- -# check for stateful set and replicas as configured -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-indexer-indexer -status: - replicas: 3 - ---- -# check if secret object are created -apiVersion: v1 -kind: Secret -metadata: - name: splunk-indexer-indexer-secret-v1 - ---- -# assert for indexer cluster custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: IngestorCluster -metadata: - name: ingestor -spec: - replicas: 3 - busRef: - name: bus -status: - phase: Ready - bus: - provider: sqs - sqs: - name: sqs-test - region: us-west-2 - endpoint: https://sqs.us-west-2.amazonaws.com - dlq: sqs-dlq-test - largeMessageStore: - provider: s3 - s3: - endpoint: https://s3.us-west-2.amazonaws.com - path: s3://ingestion/smartbus-test - ---- -# check for stateful set and replicas as configured -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-ingestor-ingestor -status: - replicas: 3 - ---- -# check if secret object are created apiVersion: v1 kind: Secret metadata: - name: splunk-ingestor-ingestor-secret-v1 \ No newline at end of file + name: s3-secret \ No newline at end of file diff --git a/kuttl/tests/helm/index-and-ingest-separation/01-create-se-secret.yaml b/kuttl/tests/helm/index-and-ingest-separation/01-create-se-secret.yaml new file mode 100644 index 000000000..8f1b1b95f --- /dev/null +++ b/kuttl/tests/helm/index-and-ingest-separation/01-create-se-secret.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: kubectl create secret generic s3-secret --from-literal=s3_access_key=$AWS_ACCESS_KEY_ID --from-literal=s3_secret_key=$AWS_SECRET_ACCESS_KEY --namespace $NAMESPACE + background: false + skipLogOutput: true \ No newline at end of file diff --git a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml index 291eddeba..f34dd2e6c 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml @@ -1,11 +1,107 @@ --- -# assert for ingestor cluster custom resource to be ready +# assert for bus custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: Bus +metadata: + name: bus +spec: + provider: sqs + sqs: + name: sqs-test + region: us-west-2 + endpoint: https://sqs.us-west-2.amazonaws.com + dlq: sqs-dlq-test +status: + phase: Ready + +--- +# assert for large message store custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: LargeMessageStore +metadata: + name: lms +spec: + provider: s3 + s3: + endpoint: https://s3.us-west-2.amazonaws.com + path: s3://ingestion/smartbus-test +status: + phase: Ready + +--- +# assert for cluster manager custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: ClusterManager +metadata: + name: cm +status: + phase: Ready + +--- +# check if stateful sets are created +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-cm-cluster-manager +status: + replicas: 1 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-cm-cluster-manager-secret-v1 + +--- +# assert for indexer cluster custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: IndexerCluster +metadata: + name: indexer +spec: + replicas: 3 + busRef: + name: bus +status: + phase: Ready + bus: + provider: sqs + sqs: + name: sqs-test + region: us-west-2 + endpoint: https://sqs.us-west-2.amazonaws.com + dlq: sqs-dlq-test + largeMessageStore: + provider: s3 + s3: + endpoint: https://s3.us-west-2.amazonaws.com + path: s3://ingestion/smartbus-test + +--- +# check for stateful set and replicas as configured +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-indexer-indexer +status: + replicas: 3 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-indexer-indexer-secret-v1 + +--- +# assert for indexer cluster custom resource to be ready apiVersion: enterprise.splunk.com/v4 kind: IngestorCluster metadata: name: ingestor spec: - replicas: 4 + replicas: 3 busRef: name: bus status: @@ -24,10 +120,17 @@ status: path: s3://ingestion/smartbus-test --- -# check for stateful sets and replicas updated +# check for stateful set and replicas as configured apiVersion: apps/v1 kind: StatefulSet metadata: name: splunk-ingestor-ingestor status: - replicas: 4 + replicas: 3 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-ingestor-ingestor-secret-v1 \ No newline at end of file diff --git a/kuttl/tests/helm/index-and-ingest-separation/01-install-setup.yaml b/kuttl/tests/helm/index-and-ingest-separation/02-install-setup.yaml similarity index 100% rename from kuttl/tests/helm/index-and-ingest-separation/01-install-setup.yaml rename to kuttl/tests/helm/index-and-ingest-separation/02-install-setup.yaml diff --git a/kuttl/tests/helm/index-and-ingest-separation/03-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/03-assert.yaml new file mode 100644 index 000000000..291eddeba --- /dev/null +++ b/kuttl/tests/helm/index-and-ingest-separation/03-assert.yaml @@ -0,0 +1,33 @@ +--- +# assert for ingestor cluster custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: IngestorCluster +metadata: + name: ingestor +spec: + replicas: 4 + busRef: + name: bus +status: + phase: Ready + bus: + provider: sqs + sqs: + name: sqs-test + region: us-west-2 + endpoint: https://sqs.us-west-2.amazonaws.com + dlq: sqs-dlq-test + largeMessageStore: + provider: s3 + s3: + endpoint: https://s3.us-west-2.amazonaws.com + path: s3://ingestion/smartbus-test + +--- +# check for stateful sets and replicas updated +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-ingestor-ingestor +status: + replicas: 4 diff --git a/kuttl/tests/helm/index-and-ingest-separation/02-scaleup-ingestor.yaml b/kuttl/tests/helm/index-and-ingest-separation/03-scaleup-ingestor.yaml similarity index 100% rename from kuttl/tests/helm/index-and-ingest-separation/02-scaleup-ingestor.yaml rename to kuttl/tests/helm/index-and-ingest-separation/03-scaleup-ingestor.yaml diff --git a/kuttl/tests/helm/index-and-ingest-separation/03-uninstall-setup.yaml b/kuttl/tests/helm/index-and-ingest-separation/04-uninstall-setup.yaml similarity index 100% rename from kuttl/tests/helm/index-and-ingest-separation/03-uninstall-setup.yaml rename to kuttl/tests/helm/index-and-ingest-separation/04-uninstall-setup.yaml diff --git a/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml b/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml index a73c51ac2..f75668cf1 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml @@ -14,6 +14,9 @@ bus: region: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com dlq: sqs-dlq-test + volumes: + - name: helm-bus-secret-ref-test + secretRef: s3-secret largeMessageStore: enabled: true diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index 2170e914a..88b75af70 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -295,9 +295,8 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller // If bus is updated if cr.Spec.BusRef.Name != "" { - if !reflect.DeepEqual(cr.Status.Bus, bus.Spec) { + if !reflect.DeepEqual(cr.Status.Bus, bus.Spec) || !reflect.DeepEqual(cr.Status.LargeMessageStore, lms.Spec) { mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) - err = mgr.handlePullBusChange(ctx, cr, busCopy, lmsCopy, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Bus/Pipeline config change after pod creation: %s", err.Error())) @@ -618,9 +617,8 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, // If bus is updated if cr.Spec.BusRef.Name != "" { - if !reflect.DeepEqual(cr.Status.Bus, bus.Spec) { + if !reflect.DeepEqual(cr.Status.Bus, bus.Spec) || !reflect.DeepEqual(cr.Status.LargeMessageStore, lms.Spec) { mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) - err = mgr.handlePullBusChange(ctx, cr, busCopy, lmsCopy, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Bus/Pipeline config change after pod creation: %s", err.Error())) @@ -1328,7 +1326,21 @@ func (mgr *indexerClusterPodManager) handlePullBusChange(ctx context.Context, ne afterDelete = true } - busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields := getChangedBusFieldsForIndexer(&bus, &lms, newCR, afterDelete) + // Secret reference + s3AccessKey, s3SecretKey := "", "" + if bus.Spec.Provider == "sqs" { + for _, vol := range bus.Spec.SQS.VolList { + if vol.SecretRef != "" { + s3AccessKey, s3SecretKey, err = GetBusRemoteVolumeSecrets(ctx, vol, k8s, newCR) + if err != nil { + scopedLog.Error(err, "Failed to get bus remote volume secrets") + return err + } + } + } + } + + busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields := getChangedBusFieldsForIndexer(&bus, &lms, newCR, afterDelete, s3AccessKey, s3SecretKey) for _, pbVal := range busChangedFieldsOutputs { if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", bus.Spec.SQS.Name), [][]string{pbVal}); err != nil { @@ -1354,7 +1366,7 @@ func (mgr *indexerClusterPodManager) handlePullBusChange(ctx context.Context, ne } // getChangedBusFieldsForIndexer returns a list of changed bus and pipeline fields for indexer pods -func getChangedBusFieldsForIndexer(bus *enterpriseApi.Bus, lms *enterpriseApi.LargeMessageStore, busIndexerStatus *enterpriseApi.IndexerCluster, afterDelete bool) (busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields [][]string) { +func getChangedBusFieldsForIndexer(bus *enterpriseApi.Bus, lms *enterpriseApi.LargeMessageStore, busIndexerStatus *enterpriseApi.IndexerCluster, afterDelete bool, s3AccessKey, s3SecretKey string) (busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields [][]string) { // Compare bus fields oldPB := busIndexerStatus.Status.Bus if oldPB == nil { @@ -1369,7 +1381,7 @@ func getChangedBusFieldsForIndexer(bus *enterpriseApi.Bus, lms *enterpriseApi.La newLMS := lms.Spec // Push all bus fields - busChangedFieldsInputs, busChangedFieldsOutputs = pullBusChanged(oldPB, &newPB, oldLMS, &newLMS, afterDelete) + busChangedFieldsInputs, busChangedFieldsOutputs = pullBusChanged(oldPB, &newPB, oldLMS, &newLMS, afterDelete, s3AccessKey, s3SecretKey) // Always set all pipeline fields, not just changed ones pipelineChangedFields = pipelineConfig(true) @@ -1387,7 +1399,7 @@ func imageUpdatedTo9(previousImage string, currentImage string) bool { return strings.HasPrefix(previousVersion, "8") && strings.HasPrefix(currentVersion, "9") } -func pullBusChanged(oldBus, newBus *enterpriseApi.BusSpec, oldLMS, newLMS *enterpriseApi.LargeMessageStoreSpec, afterDelete bool) (inputs, outputs [][]string) { +func pullBusChanged(oldBus, newBus *enterpriseApi.BusSpec, oldLMS, newLMS *enterpriseApi.LargeMessageStoreSpec, afterDelete bool, s3AccessKey, s3SecretKey string) (inputs, outputs [][]string) { busProvider := "" if newBus.Provider == "sqs" { busProvider = "sqs_smartbus" @@ -1400,6 +1412,10 @@ func pullBusChanged(oldBus, newBus *enterpriseApi.BusSpec, oldLMS, newLMS *enter if oldBus.Provider != newBus.Provider || afterDelete { inputs = append(inputs, []string{"remote_queue.type", busProvider}) } + if !reflect.DeepEqual(oldBus.SQS.VolList, newBus.SQS.VolList) || afterDelete { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.access_key", busProvider), s3AccessKey}) + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.secret_key", busProvider), s3SecretKey}) + } if oldBus.SQS.Region != newBus.SQS.Region || afterDelete { inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.auth_region", busProvider), newBus.SQS.Region}) } diff --git a/pkg/splunk/enterprise/indexercluster_test.go b/pkg/splunk/enterprise/indexercluster_test.go index ff10e453d..da3f1dfe2 100644 --- a/pkg/splunk/enterprise/indexercluster_test.go +++ b/pkg/splunk/enterprise/indexercluster_test.go @@ -2063,6 +2063,9 @@ func TestGetChangedBusFieldsForIndexer(t *testing.T) { Region: "us-west-2", Endpoint: "https://sqs.us-west-2.amazonaws.com", DLQ: "sqs-dlq-test", + VolList: []enterpriseApi.VolumeSpec{ + {SecretRef: "secret"}, + }, }, }, } @@ -2095,10 +2098,14 @@ func TestGetChangedBusFieldsForIndexer(t *testing.T) { }, } - busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields := getChangedBusFieldsForIndexer(&bus, &lms, newCR, false) - assert.Equal(t, 8, len(busChangedFieldsInputs)) + key := "key" + secret := "secret" + busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields := getChangedBusFieldsForIndexer(&bus, &lms, newCR, false, key, secret) + assert.Equal(t, 10, len(busChangedFieldsInputs)) assert.Equal(t, [][]string{ {"remote_queue.type", provider}, + {fmt.Sprintf("remote_queue.%s.access_key", provider), key}, + {fmt.Sprintf("remote_queue.%s.secret_key", provider), secret}, {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.SQS.Region}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, @@ -2108,9 +2115,11 @@ func TestGetChangedBusFieldsForIndexer(t *testing.T) { {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, }, busChangedFieldsInputs) - assert.Equal(t, 10, len(busChangedFieldsOutputs)) + assert.Equal(t, 12, len(busChangedFieldsOutputs)) assert.Equal(t, [][]string{ {"remote_queue.type", provider}, + {fmt.Sprintf("remote_queue.%s.access_key", provider), key}, + {fmt.Sprintf("remote_queue.%s.secret_key", provider), secret}, {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.SQS.Region}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, diff --git a/pkg/splunk/enterprise/ingestorcluster.go b/pkg/splunk/enterprise/ingestorcluster.go index 524f183b5..5582166b9 100644 --- a/pkg/splunk/enterprise/ingestorcluster.go +++ b/pkg/splunk/enterprise/ingestorcluster.go @@ -259,9 +259,8 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr } // If bus is updated - if !reflect.DeepEqual(cr.Status.Bus, bus.Spec) { + if !reflect.DeepEqual(cr.Status.Bus, bus.Spec) || !reflect.DeepEqual(cr.Status.LargeMessageStore, lms.Spec) { mgr := newIngestorClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) - err = mgr.handlePushBusChange(ctx, cr, busCopy, lmsCopy, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIngestorCluster", fmt.Sprintf("Failed to update conf file for Bus/Pipeline config change after pod creation: %s", err.Error())) @@ -370,7 +369,21 @@ func (mgr *ingestorClusterPodManager) handlePushBusChange(ctx context.Context, n afterDelete = true } - busChangedFields, pipelineChangedFields := getChangedBusFieldsForIngestor(&bus, &lms, newCR, afterDelete) + // Secret reference + s3AccessKey, s3SecretKey := "", "" + if bus.Spec.Provider == "sqs" { + for _, vol := range bus.Spec.SQS.VolList { + if vol.SecretRef != "" { + s3AccessKey, s3SecretKey, err = GetBusRemoteVolumeSecrets(ctx, vol, k8s, newCR) + if err != nil { + scopedLog.Error(err, "Failed to get bus remote volume secrets") + return err + } + } + } + } + + busChangedFields, pipelineChangedFields := getChangedBusFieldsForIngestor(&bus, &lms, newCR, afterDelete, s3AccessKey, s3SecretKey) for _, pbVal := range busChangedFields { if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", bus.Spec.SQS.Name), [][]string{pbVal}); err != nil { @@ -390,7 +403,7 @@ func (mgr *ingestorClusterPodManager) handlePushBusChange(ctx context.Context, n } // getChangedBusFieldsForIngestor returns a list of changed bus and pipeline fields for ingestor pods -func getChangedBusFieldsForIngestor(bus *enterpriseApi.Bus, lms *enterpriseApi.LargeMessageStore, busIngestorStatus *enterpriseApi.IngestorCluster, afterDelete bool) (busChangedFields, pipelineChangedFields [][]string) { +func getChangedBusFieldsForIngestor(bus *enterpriseApi.Bus, lms *enterpriseApi.LargeMessageStore, busIngestorStatus *enterpriseApi.IngestorCluster, afterDelete bool, s3AccessKey, s3SecretKey string) (busChangedFields, pipelineChangedFields [][]string) { oldPB := busIngestorStatus.Status.Bus if oldPB == nil { oldPB = &enterpriseApi.BusSpec{} @@ -404,7 +417,7 @@ func getChangedBusFieldsForIngestor(bus *enterpriseApi.Bus, lms *enterpriseApi.L newLMS := &lms.Spec // Push changed bus fields - busChangedFields = pushBusChanged(oldPB, newPB, oldLMS, newLMS, afterDelete) + busChangedFields = pushBusChanged(oldPB, newPB, oldLMS, newLMS, afterDelete, s3AccessKey, s3SecretKey) // Always changed pipeline fields pipelineChangedFields = pipelineConfig(false) @@ -443,7 +456,7 @@ func pipelineConfig(isIndexer bool) (output [][]string) { return output } -func pushBusChanged(oldBus, newBus *enterpriseApi.BusSpec, oldLMS, newLMS *enterpriseApi.LargeMessageStoreSpec, afterDelete bool) (output [][]string) { +func pushBusChanged(oldBus, newBus *enterpriseApi.BusSpec, oldLMS, newLMS *enterpriseApi.LargeMessageStoreSpec, afterDelete bool, s3AccessKey, s3SecretKey string) (output [][]string) { busProvider := "" if newBus.Provider == "sqs" { busProvider = "sqs_smartbus" @@ -456,6 +469,10 @@ func pushBusChanged(oldBus, newBus *enterpriseApi.BusSpec, oldLMS, newLMS *enter if oldBus.Provider != newBus.Provider || afterDelete { output = append(output, []string{"remote_queue.type", busProvider}) } + if !reflect.DeepEqual(oldBus.SQS.VolList, newBus.SQS.VolList) || afterDelete { + output = append(output, []string{fmt.Sprintf("remote_queue.%s.access_key", busProvider), s3AccessKey}) + output = append(output, []string{fmt.Sprintf("remote_queue.%s.secret_key", busProvider), s3SecretKey}) + } if oldBus.SQS.Region != newBus.SQS.Region || afterDelete { output = append(output, []string{fmt.Sprintf("remote_queue.%s.auth_region", busProvider), newBus.SQS.Region}) } diff --git a/pkg/splunk/enterprise/ingestorcluster_test.go b/pkg/splunk/enterprise/ingestorcluster_test.go index 75cc14ec5..6136b3f2f 100644 --- a/pkg/splunk/enterprise/ingestorcluster_test.go +++ b/pkg/splunk/enterprise/ingestorcluster_test.go @@ -434,6 +434,9 @@ func TestGetChangedBusFieldsForIngestor(t *testing.T) { Region: "us-west-2", Endpoint: "https://sqs.us-west-2.amazonaws.com", DLQ: "sqs-dlq-test", + VolList: []enterpriseApi.VolumeSpec{ + {SecretRef: "secret"}, + }, }, }, } @@ -467,11 +470,15 @@ func TestGetChangedBusFieldsForIngestor(t *testing.T) { Status: enterpriseApi.IngestorClusterStatus{}, } - busChangedFields, pipelineChangedFields := getChangedBusFieldsForIngestor(&bus, &lms, newCR, false) + key := "key" + secret := "secret" + busChangedFields, pipelineChangedFields := getChangedBusFieldsForIngestor(&bus, &lms, newCR, false, key, secret) - assert.Equal(t, 10, len(busChangedFields)) + assert.Equal(t, 12, len(busChangedFields)) assert.Equal(t, [][]string{ {"remote_queue.type", provider}, + {fmt.Sprintf("remote_queue.%s.access_key", provider), key}, + {fmt.Sprintf("remote_queue.%s.secret_key", provider), secret}, {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.SQS.Region}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, diff --git a/pkg/splunk/enterprise/util.go b/pkg/splunk/enterprise/util.go index e8f0736b3..c68b2ca71 100644 --- a/pkg/splunk/enterprise/util.go +++ b/pkg/splunk/enterprise/util.go @@ -417,6 +417,25 @@ func GetSmartstoreRemoteVolumeSecrets(ctx context.Context, volume enterpriseApi. return accessKey, secretKey, namespaceScopedSecret.ResourceVersion, nil } +// GetBusRemoteVolumeSecrets is used to retrieve access key and secrete key for Index & Ingestion separation +func GetBusRemoteVolumeSecrets(ctx context.Context, volume enterpriseApi.VolumeSpec, client splcommon.ControllerClient, cr splcommon.MetaObject) (string, string, error) { + namespaceScopedSecret, err := splutil.GetSecretByName(ctx, client, cr.GetNamespace(), cr.GetName(), volume.SecretRef) + if err != nil { + return "", "", err + } + + accessKey := string(namespaceScopedSecret.Data[s3AccessKey]) + secretKey := string(namespaceScopedSecret.Data[s3SecretKey]) + + if accessKey == "" { + return "", "", errors.New("access Key is missing") + } else if secretKey == "" { + return "", "", errors.New("secret Key is missing") + } + + return accessKey, secretKey, nil +} + // getLocalAppFileName generates the local app file name // For e.g., if the app package name is sample_app.tgz // and etag is "abcd1234", then it will be downloaded locally as sample_app.tgz_abcd1234 diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go index 1b3d27c70..6868dd168 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go @@ -79,6 +79,11 @@ var _ = Describe("indingsep test", func() { testcaseEnvInst.Log.Info("Create Service Account") testcaseEnvInst.CreateServiceAccount(serviceAccountName) + // TODO: Remove secret reference once IRSA fixed for Splunk and EKS 1.34+ + // Secret reference + volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateBusVolumeSpec("bus-secret-ref-volume", testcaseEnvInst.GetIndexSecretName())} + bus.SQS.VolList = volumeSpec + // Deploy Bus testcaseEnvInst.Log.Info("Deploy Bus") b, err := deployment.DeployBus(ctx, "bus", bus) @@ -152,6 +157,11 @@ var _ = Describe("indingsep test", func() { testcaseEnvInst.Log.Info("Create Service Account") testcaseEnvInst.CreateServiceAccount(serviceAccountName) + // TODO: Remove secret reference once IRSA fixed for Splunk and EKS 1.34+ + // Secret reference + volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateBusVolumeSpec("bus-secret-ref-volume", testcaseEnvInst.GetIndexSecretName())} + bus.SQS.VolList = volumeSpec + // Deploy Bus testcaseEnvInst.Log.Info("Deploy Bus") bc, err := deployment.DeployBus(ctx, "bus", bus) @@ -256,6 +266,11 @@ var _ = Describe("indingsep test", func() { testcaseEnvInst.Log.Info("Create Service Account") testcaseEnvInst.CreateServiceAccount(serviceAccountName) + // TODO: Remove secret reference once IRSA fixed for Splunk and EKS 1.34+ + // Secret reference + volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateBusVolumeSpec("bus-secret-ref-volume", testcaseEnvInst.GetIndexSecretName())} + bus.SQS.VolList = volumeSpec + // Deploy Bus testcaseEnvInst.Log.Info("Deploy Bus") bc, err := deployment.DeployBus(ctx, "bus", bus) @@ -363,6 +378,11 @@ var _ = Describe("indingsep test", func() { testcaseEnvInst.Log.Info("Create Service Account") testcaseEnvInst.CreateServiceAccount(serviceAccountName) + // TODO: Remove secret reference once IRSA fixed for Splunk and EKS 1.34+ + // Secret reference + volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateBusVolumeSpec("bus-secret-ref-volume", testcaseEnvInst.GetIndexSecretName())} + bus.SQS.VolList = volumeSpec + // Deploy Bus testcaseEnvInst.Log.Info("Deploy Bus") bc, err := deployment.DeployBus(ctx, "bus", bus) diff --git a/test/testenv/remote_index_utils.go b/test/testenv/remote_index_utils.go index 0eb2b485c..84e5c0709 100644 --- a/test/testenv/remote_index_utils.go +++ b/test/testenv/remote_index_utils.go @@ -86,6 +86,14 @@ func RollHotToWarm(ctx context.Context, deployment *Deployment, podName string, return true } +// GeneratBusVolumeSpec return VolumeSpec struct with given values +func GenerateBusVolumeSpec(name, secretRef string) enterpriseApi.VolumeSpec { + return enterpriseApi.VolumeSpec{ + Name: name, + SecretRef: secretRef, + } +} + // GenerateIndexVolumeSpec return VolumeSpec struct with given values func GenerateIndexVolumeSpec(volumeName string, endpoint string, secretRef string, provider string, storageType string, region string) enterpriseApi.VolumeSpec { return enterpriseApi.VolumeSpec{ From f992c40483f60cb3426d2639a5e84c71973ca2e6 Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Tue, 16 Dec 2025 12:44:42 +0100 Subject: [PATCH 02/15] CSPL-4360 Fix failing tests --- .../templates/enterprise_v4_indexercluster.yaml | 1 + .../index_and_ingestion_separation_test.go | 1 + 2 files changed, 2 insertions(+) diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml index 0e6a96673..62497d0e6 100644 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml @@ -169,6 +169,7 @@ items: {{- if .namespace }} namespace: {{ .namespace }} {{- end }} + {{- end }} {{- with $.Values.indexerCluster.largeMessageStoreRef }} largeMessageStoreRef: name: {{ .name }} diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go index 6868dd168..17ab5903b 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go @@ -382,6 +382,7 @@ var _ = Describe("indingsep test", func() { // Secret reference volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateBusVolumeSpec("bus-secret-ref-volume", testcaseEnvInst.GetIndexSecretName())} bus.SQS.VolList = volumeSpec + updateBus.SQS.VolList = volumeSpec // Deploy Bus testcaseEnvInst.Log.Info("Deploy Bus") From 143dbe0917e34256ce09fbf51a060e15e3f13f19 Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Tue, 16 Dec 2025 12:54:28 +0100 Subject: [PATCH 03/15] CSPL-4360 Add Splunk restart --- docs/IndexIngestionSeparation.md | 18 ++++++++-- pkg/splunk/enterprise/indexercluster.go | 18 ++++++++++ pkg/splunk/enterprise/ingestorcluster.go | 36 +++++++++++++++++-- pkg/splunk/enterprise/ingestorcluster_test.go | 12 +++---- pkg/splunk/enterprise/util_test.go | 5 +++ .../index_and_ingestion_separation_test.go | 8 ----- 6 files changed, 77 insertions(+), 20 deletions(-) diff --git a/docs/IndexIngestionSeparation.md b/docs/IndexIngestionSeparation.md index e8c6211d7..195338c7d 100644 --- a/docs/IndexIngestionSeparation.md +++ b/docs/IndexIngestionSeparation.md @@ -1,3 +1,9 @@ +--- +title: Index and Ingestion Separation +parent: Deploy & Configure +nav_order: 6 +--- + # Background Separation between ingestion and indexing services within Splunk Operator for Kubernetes enables the operator to independently manage the ingestion service while maintaining seamless integration with the indexing service. @@ -10,7 +16,7 @@ This separation enables: # Important Note > [!WARNING] -> **As of now, only brand new deployments are supported for Index and Ingestion Separation. No migration path is implemented, described or tested for existing deployments to move from a standard model to Index & Ingestion separation model.** +> **For customers deploying SmartBus on CMP, the Splunk Operator for Kubernetes (SOK) manages the configuration and lifecycle of the ingestor tier. The following SOK guide provides implementation details for setting up ingestion separation and integrating with existing indexers. This reference is primarily intended for CMP users leveraging SOK-managed ingestors.** # Document Variables @@ -38,7 +44,7 @@ SQS message bus inputs can be found in the table below. | endpoint | string | [Optional, if not provided formed based on region] AWS SQS Service endpoint | dlq | string | [Required] Name of the dead letter queue | -Change of any of the bus inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. +**First provisioning or update of any of the bus inputs requires Ingestor Cluster and Indexer Cluster Splunkd restart, but this restart is implemented automatically and done by SOK.** ## Example ``` @@ -425,6 +431,14 @@ In the following example, the dashboard presents ingestion and indexing data in - [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) +# App Installation for Ingestor Cluster Instances + +Application installation is supported for Ingestor Cluster instances. However, as of now, applications are installed using local scope and if any application requires Splunk restart, there is no automated way to detect it and trigger automatically via Splunk Operator. + +Therefore, to be able to enforce Splunk restart for each of the Ingestor Cluster pods, it is recommended to add/update IngestorCluster CR annotations/labels and apply the new configuration which will trigger the rolling restart of Splunk pods for Ingestor Cluster. + +We are under the investigation on how to make it fully automated. What is more, ideally, update of annotations and labels should not trigger pod restart at all and we are investigating on how to fix this behaviour eventually. + # Example 1. Install CRDs and Splunk Operator for Kubernetes. diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index 88b75af70..d22b7008e 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -305,6 +305,15 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller } cr.Status.Bus = &bus.Spec + + for i := int32(0); i < cr.Spec.Replicas; i++ { + idxcClient := mgr.getClient(ctx, i) + err = idxcClient.RestartSplunk() + if err != nil { + return result, err + } + scopedLog.Info("Restarted splunk", "indexer", i) + } } } @@ -627,6 +636,15 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, } cr.Status.Bus = &bus.Spec + + for i := int32(0); i < cr.Spec.Replicas; i++ { + idxcClient := mgr.getClient(ctx, i) + err = idxcClient.RestartSplunk() + if err != nil { + return result, err + } + scopedLog.Info("Restarted splunk", "indexer", i) + } } } diff --git a/pkg/splunk/enterprise/ingestorcluster.go b/pkg/splunk/enterprise/ingestorcluster.go index 5582166b9..94d51a8f7 100644 --- a/pkg/splunk/enterprise/ingestorcluster.go +++ b/pkg/splunk/enterprise/ingestorcluster.go @@ -260,7 +260,7 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr // If bus is updated if !reflect.DeepEqual(cr.Status.Bus, bus.Spec) || !reflect.DeepEqual(cr.Status.LargeMessageStore, lms.Spec) { - mgr := newIngestorClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) + mgr := newIngestorClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) err = mgr.handlePushBusChange(ctx, cr, busCopy, lmsCopy, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIngestorCluster", fmt.Sprintf("Failed to update conf file for Bus/Pipeline config change after pod creation: %s", err.Error())) @@ -269,6 +269,15 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr } cr.Status.Bus = &bus.Spec + + for i := int32(0); i < cr.Spec.Replicas; i++ { + ingClient := mgr.getClient(ctx, i) + err = ingClient.RestartSplunk() + if err != nil { + return result, err + } + scopedLog.Info("Restarted splunk", "ingestor", i) + } } // Upgrade fron automated MC to MC CRD @@ -311,6 +320,27 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr return result, nil } +// getClient for ingestorClusterPodManager returns a SplunkClient for the member n +func (mgr *ingestorClusterPodManager) getClient(ctx context.Context, n int32) *splclient.SplunkClient { + reqLogger := log.FromContext(ctx) + scopedLog := reqLogger.WithName("ingestorClusterPodManager.getClient").WithValues("name", mgr.cr.GetName(), "namespace", mgr.cr.GetNamespace()) + + // Get Pod Name + memberName := GetSplunkStatefulsetPodName(SplunkIngestor, mgr.cr.GetName(), n) + + // Get Fully Qualified Domain Name + fqdnName := splcommon.GetServiceFQDN(mgr.cr.GetNamespace(), + fmt.Sprintf("%s.%s", memberName, GetSplunkServiceName(SplunkIngestor, mgr.cr.GetName(), true))) + + // Retrieve admin password from Pod + adminPwd, err := splutil.GetSpecificSecretTokenFromPod(ctx, mgr.c, memberName, mgr.cr.GetNamespace(), "password") + if err != nil { + scopedLog.Error(err, "Couldn't retrieve the admin password from pod") + } + + return mgr.newSplunkClient(fmt.Sprintf("https://%s:8089", fqdnName), "admin", adminPwd) +} + // validateIngestorClusterSpec checks validity and makes default updates to a IngestorClusterSpec and returns error if something is wrong func validateIngestorClusterSpec(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.IngestorCluster) error { // We cannot have 0 replicas in IngestorCluster spec since this refers to number of ingestion pods in an ingestor cluster @@ -426,6 +456,7 @@ func getChangedBusFieldsForIngestor(bus *enterpriseApi.Bus, lms *enterpriseApi.L } type ingestorClusterPodManager struct { + c splcommon.ControllerClient log logr.Logger cr *enterpriseApi.IngestorCluster secrets *corev1.Secret @@ -433,12 +464,13 @@ type ingestorClusterPodManager struct { } // newIngestorClusterPodManager function to create pod manager this is added to write unit test case -var newIngestorClusterPodManager = func(log logr.Logger, cr *enterpriseApi.IngestorCluster, secret *corev1.Secret, newSplunkClient NewSplunkClientFunc) ingestorClusterPodManager { +var newIngestorClusterPodManager = func(log logr.Logger, cr *enterpriseApi.IngestorCluster, secret *corev1.Secret, newSplunkClient NewSplunkClientFunc, c splcommon.ControllerClient) ingestorClusterPodManager { return ingestorClusterPodManager{ log: log, cr: cr, secrets: secret, newSplunkClient: newSplunkClient, + c: c, } } diff --git a/pkg/splunk/enterprise/ingestorcluster_test.go b/pkg/splunk/enterprise/ingestorcluster_test.go index 6136b3f2f..a72179453 100644 --- a/pkg/splunk/enterprise/ingestorcluster_test.go +++ b/pkg/splunk/enterprise/ingestorcluster_test.go @@ -25,15 +25,14 @@ import ( "github.com/go-logr/logr" enterpriseApi "github.com/splunk/splunk-operator/api/v4" splclient "github.com/splunk/splunk-operator/pkg/splunk/client" + splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" spltest "github.com/splunk/splunk-operator/pkg/splunk/test" splutil "github.com/splunk/splunk-operator/pkg/splunk/util" "github.com/stretchr/testify/assert" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client/fake" ) func init() { @@ -56,11 +55,7 @@ func TestApplyIngestorCluster(t *testing.T) { ctx := context.TODO() - scheme := runtime.NewScheme() - _ = enterpriseApi.AddToScheme(scheme) - _ = corev1.AddToScheme(scheme) - _ = appsv1.AddToScheme(scheme) - c := fake.NewClientBuilder().WithScheme(scheme).Build() + c := spltest.NewMockClient() // Object definitions provider := "sqs_smartbus" @@ -273,8 +268,9 @@ func TestApplyIngestorCluster(t *testing.T) { // outputs.conf origNew := newIngestorClusterPodManager mockHTTPClient := &spltest.MockHTTPClient{} - newIngestorClusterPodManager = func(l logr.Logger, cr *enterpriseApi.IngestorCluster, secret *corev1.Secret, _ NewSplunkClientFunc) ingestorClusterPodManager { + newIngestorClusterPodManager = func(l logr.Logger, cr *enterpriseApi.IngestorCluster, secret *corev1.Secret, _ NewSplunkClientFunc, c splcommon.ControllerClient) ingestorClusterPodManager { return ingestorClusterPodManager{ + c: c, log: l, cr: cr, secrets: secret, newSplunkClient: func(uri, user, pass string) *splclient.SplunkClient { return &splclient.SplunkClient{ManagementURI: uri, Username: user, Password: pass, Client: mockHTTPClient} diff --git a/pkg/splunk/enterprise/util_test.go b/pkg/splunk/enterprise/util_test.go index f5405b2cf..6ea7b021e 100644 --- a/pkg/splunk/enterprise/util_test.go +++ b/pkg/splunk/enterprise/util_test.go @@ -2624,6 +2624,9 @@ func TestUpdateCRStatus(t *testing.T) { WithStatusSubresource(&enterpriseApi.Standalone{}). WithStatusSubresource(&enterpriseApi.MonitoringConsole{}). WithStatusSubresource(&enterpriseApi.IndexerCluster{}). + WithStatusSubresource(&enterpriseApi.Bus{}). + WithStatusSubresource(&enterpriseApi.LargeMessageStore{}). + WithStatusSubresource(&enterpriseApi.IngestorCluster{}). WithStatusSubresource(&enterpriseApi.SearchHeadCluster{}) c := builder.Build() ctx := context.TODO() @@ -3304,6 +3307,8 @@ func TestGetCurrentImage(t *testing.T) { WithStatusSubresource(&enterpriseApi.MonitoringConsole{}). WithStatusSubresource(&enterpriseApi.IndexerCluster{}). WithStatusSubresource(&enterpriseApi.SearchHeadCluster{}). + WithStatusSubresource(&enterpriseApi.Bus{}). + WithStatusSubresource(&enterpriseApi.LargeMessageStore{}). WithStatusSubresource(&enterpriseApi.IngestorCluster{}) client := builder.Build() client.Create(ctx, ¤t) diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go index 17ab5903b..a21146e11 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go @@ -433,14 +433,6 @@ var _ = Describe("indingsep test", func() { err = deployment.UpdateCR(ctx, bus) Expect(err).To(Succeed(), "Unable to deploy Bus with updated CR") - // Ensure that Ingestor Cluster has not been restarted - testcaseEnvInst.Log.Info("Ensure that Ingestor Cluster has not been restarted") - testenv.IngestorReady(ctx, deployment, testcaseEnvInst) - - // Ensure that Indexer Cluster has not been restarted - testcaseEnvInst.Log.Info("Ensure that Indexer Cluster has not been restarted") - testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) - // Get instance of current Ingestor Cluster CR with latest config testcaseEnvInst.Log.Info("Get instance of current Ingestor Cluster CR with latest config") ingest := &enterpriseApi.IngestorCluster{} From 3c7b2d7c2ae00e126903579ab7d797ab853e4c6f Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Tue, 16 Dec 2025 16:05:02 +0100 Subject: [PATCH 04/15] CSPL-4360 Fix failing tests --- pkg/splunk/enterprise/indexercluster.go | 8 ++-- pkg/splunk/enterprise/indexercluster_test.go | 27 ++++++++++- pkg/splunk/enterprise/ingestorcluster.go | 8 ++-- pkg/splunk/enterprise/ingestorcluster_test.go | 47 ++++++++----------- 4 files changed, 54 insertions(+), 36 deletions(-) diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index d22b7008e..a5ebdbaa1 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -1346,7 +1346,7 @@ func (mgr *indexerClusterPodManager) handlePullBusChange(ctx context.Context, ne // Secret reference s3AccessKey, s3SecretKey := "", "" - if bus.Spec.Provider == "sqs" { + if bus.Spec.Provider == "sqs" && newCR.Spec.ServiceAccount == "" { for _, vol := range bus.Spec.SQS.VolList { if vol.SecretRef != "" { s3AccessKey, s3SecretKey, err = GetBusRemoteVolumeSecrets(ctx, vol, k8s, newCR) @@ -1431,8 +1431,10 @@ func pullBusChanged(oldBus, newBus *enterpriseApi.BusSpec, oldLMS, newLMS *enter inputs = append(inputs, []string{"remote_queue.type", busProvider}) } if !reflect.DeepEqual(oldBus.SQS.VolList, newBus.SQS.VolList) || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.access_key", busProvider), s3AccessKey}) - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.secret_key", busProvider), s3SecretKey}) + if s3AccessKey != "" && s3SecretKey != "" { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.access_key", busProvider), s3AccessKey}) + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.secret_key", busProvider), s3SecretKey}) + } } if oldBus.SQS.Region != newBus.SQS.Region || afterDelete { inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.auth_region", busProvider), newBus.SQS.Region}) diff --git a/pkg/splunk/enterprise/indexercluster_test.go b/pkg/splunk/enterprise/indexercluster_test.go index da3f1dfe2..00f20656f 100644 --- a/pkg/splunk/enterprise/indexercluster_test.go +++ b/pkg/splunk/enterprise/indexercluster_test.go @@ -2404,7 +2404,7 @@ func TestApplyIndexerClusterManager_Bus_Success(t *testing.T) { c := fake.NewClientBuilder().WithScheme(scheme).Build() // Object definitions - bus := enterpriseApi.Bus{ + bus := &enterpriseApi.Bus{ TypeMeta: metav1.TypeMeta{ Kind: "Bus", APIVersion: "enterprise.splunk.com/v4", @@ -2423,7 +2423,26 @@ func TestApplyIndexerClusterManager_Bus_Success(t *testing.T) { }, }, } - c.Create(ctx, &bus) + c.Create(ctx, bus) + + lms := &enterpriseApi.LargeMessageStore{ + TypeMeta: metav1.TypeMeta{ + Kind: "LargeMessageStore", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "lms", + Namespace: "test", + }, + Spec: enterpriseApi.LargeMessageStoreSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "s3://bucket/key", + }, + }, + } + c.Create(ctx, lms) cm := &enterpriseApi.ClusterManager{ TypeMeta: metav1.TypeMeta{Kind: "ClusterManager"}, @@ -2449,6 +2468,10 @@ func TestApplyIndexerClusterManager_Bus_Success(t *testing.T) { Name: bus.Name, Namespace: bus.Namespace, }, + LargeMessageStoreRef: corev1.ObjectReference{ + Name: lms.Name, + Namespace: lms.Namespace, + }, CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ ClusterManagerRef: corev1.ObjectReference{ Name: "cm", diff --git a/pkg/splunk/enterprise/ingestorcluster.go b/pkg/splunk/enterprise/ingestorcluster.go index 94d51a8f7..90c067494 100644 --- a/pkg/splunk/enterprise/ingestorcluster.go +++ b/pkg/splunk/enterprise/ingestorcluster.go @@ -401,7 +401,7 @@ func (mgr *ingestorClusterPodManager) handlePushBusChange(ctx context.Context, n // Secret reference s3AccessKey, s3SecretKey := "", "" - if bus.Spec.Provider == "sqs" { + if bus.Spec.Provider == "sqs" && newCR.Spec.ServiceAccount == "" { for _, vol := range bus.Spec.SQS.VolList { if vol.SecretRef != "" { s3AccessKey, s3SecretKey, err = GetBusRemoteVolumeSecrets(ctx, vol, k8s, newCR) @@ -502,8 +502,10 @@ func pushBusChanged(oldBus, newBus *enterpriseApi.BusSpec, oldLMS, newLMS *enter output = append(output, []string{"remote_queue.type", busProvider}) } if !reflect.DeepEqual(oldBus.SQS.VolList, newBus.SQS.VolList) || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.access_key", busProvider), s3AccessKey}) - output = append(output, []string{fmt.Sprintf("remote_queue.%s.secret_key", busProvider), s3SecretKey}) + if s3AccessKey != "" && s3SecretKey != "" { + output = append(output, []string{fmt.Sprintf("remote_queue.%s.access_key", busProvider), s3AccessKey}) + output = append(output, []string{fmt.Sprintf("remote_queue.%s.secret_key", busProvider), s3SecretKey}) + } } if oldBus.SQS.Region != newBus.SQS.Region || afterDelete { output = append(output, []string{fmt.Sprintf("remote_queue.%s.auth_region", busProvider), newBus.SQS.Region}) diff --git a/pkg/splunk/enterprise/ingestorcluster_test.go b/pkg/splunk/enterprise/ingestorcluster_test.go index a72179453..0f5fae8fa 100644 --- a/pkg/splunk/enterprise/ingestorcluster_test.go +++ b/pkg/splunk/enterprise/ingestorcluster_test.go @@ -32,7 +32,8 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" ) func init() { @@ -55,7 +56,11 @@ func TestApplyIngestorCluster(t *testing.T) { ctx := context.TODO() - c := spltest.NewMockClient() + scheme := runtime.NewScheme() + _ = enterpriseApi.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + c := fake.NewClientBuilder().WithScheme(scheme).Build() // Object definitions provider := "sqs_smartbus" @@ -81,7 +86,7 @@ func TestApplyIngestorCluster(t *testing.T) { } c.Create(ctx, bus) - lms := enterpriseApi.LargeMessageStore{ + lms := &enterpriseApi.LargeMessageStore{ TypeMeta: metav1.TypeMeta{ Kind: "LargeMessageStore", APIVersion: "enterprise.splunk.com/v4", @@ -98,7 +103,7 @@ func TestApplyIngestorCluster(t *testing.T) { }, }, } - c.Create(ctx, &lms) + c.Create(ctx, lms) cr := &enterpriseApi.IngestorCluster{ TypeMeta: metav1.TypeMeta{ @@ -112,7 +117,8 @@ func TestApplyIngestorCluster(t *testing.T) { Spec: enterpriseApi.IngestorClusterSpec{ Replicas: 3, CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ - Mock: true, + Mock: true, + ServiceAccount: "sa", }, BusRef: corev1.ObjectReference{ Name: bus.Name, @@ -242,29 +248,6 @@ func TestApplyIngestorCluster(t *testing.T) { assert.True(t, result.Requeue) assert.NotEqual(t, enterpriseApi.PhaseError, cr.Status.Phase) - // Ensure stored StatefulSet status reflects readiness after any reconcile modifications - fetched := &appsv1.StatefulSet{} - _ = c.Get(ctx, types.NamespacedName{Name: "splunk-test-ingestor", Namespace: "test"}, fetched) - fetched.Status.Replicas = replicas - fetched.Status.ReadyReplicas = replicas - fetched.Status.UpdatedReplicas = replicas - if fetched.Status.UpdateRevision == "" { - fetched.Status.UpdateRevision = "v1" - } - c.Update(ctx, fetched) - - // Guarantee all pods have matching revision label - for _, pn := range []string{"splunk-test-ingestor-0", "splunk-test-ingestor-1", "splunk-test-ingestor-2"} { - p := &corev1.Pod{} - if err := c.Get(ctx, types.NamespacedName{Name: pn, Namespace: "test"}, p); err == nil { - if p.Labels == nil { - p.Labels = map[string]string{} - } - p.Labels["controller-revision-hash"] = fetched.Status.UpdateRevision - c.Update(ctx, p) - } - } - // outputs.conf origNew := newIngestorClusterPodManager mockHTTPClient := &spltest.MockHTTPClient{} @@ -280,6 +263,7 @@ func TestApplyIngestorCluster(t *testing.T) { defer func() { newIngestorClusterPodManager = origNew }() propertyKVList := [][]string{ + {"remote_queue.type", provider}, {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.SQS.Region}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, @@ -318,6 +302,13 @@ func TestApplyIngestorCluster(t *testing.T) { } } + for i := 0; i < int(cr.Status.ReadyReplicas); i++ { + podName := fmt.Sprintf("splunk-test-ingestor-%d", i) + baseURL := fmt.Sprintf("https://%s.splunk-%s-ingestor-headless.%s.svc.cluster.local:8089/services/server/control/restart", podName, cr.GetName(), cr.GetNamespace()) + req, _ := http.NewRequest("POST", baseURL, nil) + mockHTTPClient.AddHandler(req, 200, "", nil) + } + // Second reconcile should now yield Ready cr.Status.TelAppInstalled = true result, err = ApplyIngestorCluster(ctx, c, cr) From e4e083a981061529ca4d948105997901879a1355 Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Wed, 17 Dec 2025 11:37:38 +0100 Subject: [PATCH 05/15] CSPL-4360 Fix failing tests --- .../enterprise_v4_largemessagestores.yaml | 16 ++++++++-------- ...se-secret.yaml => 01-create-s3-secret.yaml} | 0 .../index-and-ingest-separation/02-assert.yaml | 4 ++++ .../index-and-ingest-separation/03-assert.yaml | 2 ++ pkg/splunk/enterprise/indexercluster.go | 4 ++++ pkg/splunk/enterprise/ingestorcluster.go | 2 ++ .../index_and_ingestion_separation_test.go | 18 ++++++++++++++---- 7 files changed, 34 insertions(+), 12 deletions(-) rename kuttl/tests/helm/index-and-ingest-separation/{01-create-se-secret.yaml => 01-create-s3-secret.yaml} (100%) diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_largemessagestores.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_largemessagestores.yaml index 77ef09e69..1e4e9b5db 100644 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_largemessagestores.yaml +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_largemessagestores.yaml @@ -1,21 +1,21 @@ -{{- if .Values.largemessagestore }} -{{- if .Values.largemessagestore.enabled }} +{{- if .Values.largeMessageStore }} +{{- if .Values.largeMessageStore.enabled }} apiVersion: enterprise.splunk.com/v4 kind: LargeMessageStore metadata: - name: {{ .Values.largemessagestore.name }} - namespace: {{ default .Release.Namespace .Values.largemessagestore.namespaceOverride }} - {{- with .Values.largemessagestore.additionalLabels }} + name: {{ .Values.largeMessageStore.name }} + namespace: {{ default .Release.Namespace .Values.largeMessageStore.namespaceOverride }} + {{- with .Values.largeMessageStore.additionalLabels }} labels: {{ toYaml . | nindent 4 }} {{- end }} - {{- with .Values.largemessagestore.additionalAnnotations }} + {{- with .Values.largeMessageStore.additionalAnnotations }} annotations: {{ toYaml . | nindent 4 }} {{- end }} spec: - provider: {{ .Values.largemessagestore.provider | quote }} - {{- with .Values.largemessagestore.s3 }} + provider: {{ .Values.largeMessageStore.provider | quote }} + {{- with .Values.largeMessageStore.s3 }} s3: {{- if .endpoint }} endpoint: {{ .endpoint | quote }} diff --git a/kuttl/tests/helm/index-and-ingest-separation/01-create-se-secret.yaml b/kuttl/tests/helm/index-and-ingest-separation/01-create-s3-secret.yaml similarity index 100% rename from kuttl/tests/helm/index-and-ingest-separation/01-create-se-secret.yaml rename to kuttl/tests/helm/index-and-ingest-separation/01-create-s3-secret.yaml diff --git a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml index f34dd2e6c..42e003418 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml @@ -63,6 +63,8 @@ spec: replicas: 3 busRef: name: bus + largeMessageStoreRef: + name: lms status: phase: Ready bus: @@ -104,6 +106,8 @@ spec: replicas: 3 busRef: name: bus + largeMessageStoreRef: + name: lms status: phase: Ready bus: diff --git a/kuttl/tests/helm/index-and-ingest-separation/03-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/03-assert.yaml index 291eddeba..819620baa 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/03-assert.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/03-assert.yaml @@ -8,6 +8,8 @@ spec: replicas: 4 busRef: name: bus + largeMessageStoreRef: + name: lms status: phase: Ready bus: diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index a5ebdbaa1..4acbc3d11 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -79,6 +79,7 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller cr.Status.ClusterManagerPhase = enterpriseApi.PhaseError if cr.Status.Replicas < cr.Spec.Replicas { cr.Status.Bus = &enterpriseApi.BusSpec{} + cr.Status.LargeMessageStore = &enterpriseApi.LargeMessageStoreSpec{} } cr.Status.Replicas = cr.Spec.Replicas cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-indexer", cr.GetName()) @@ -305,6 +306,7 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller } cr.Status.Bus = &bus.Spec + cr.Status.LargeMessageStore = &lms.Spec for i := int32(0); i < cr.Spec.Replicas; i++ { idxcClient := mgr.getClient(ctx, i) @@ -407,6 +409,7 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, cr.Status.ClusterMasterPhase = enterpriseApi.PhaseError if cr.Status.Replicas < cr.Spec.Replicas { cr.Status.Bus = &enterpriseApi.BusSpec{} + cr.Status.LargeMessageStore = &enterpriseApi.LargeMessageStoreSpec{} } cr.Status.Replicas = cr.Spec.Replicas cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-indexer", cr.GetName()) @@ -636,6 +639,7 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, } cr.Status.Bus = &bus.Spec + cr.Status.LargeMessageStore = &lms.Spec for i := int32(0); i < cr.Spec.Replicas; i++ { idxcClient := mgr.getClient(ctx, i) diff --git a/pkg/splunk/enterprise/ingestorcluster.go b/pkg/splunk/enterprise/ingestorcluster.go index 90c067494..1a1dcd428 100644 --- a/pkg/splunk/enterprise/ingestorcluster.go +++ b/pkg/splunk/enterprise/ingestorcluster.go @@ -74,6 +74,7 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr if cr.Status.Replicas < cr.Spec.Replicas { cr.Status.Bus = &enterpriseApi.BusSpec{} + cr.Status.LargeMessageStore = &enterpriseApi.LargeMessageStoreSpec{} } cr.Status.Replicas = cr.Spec.Replicas @@ -269,6 +270,7 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr } cr.Status.Bus = &bus.Spec + cr.Status.LargeMessageStore = &lms.Spec for i := int32(0); i < cr.Spec.Replicas; i++ { ingClient := mgr.getClient(ctx, i) diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go index a21146e11..4b90db6bd 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go @@ -83,6 +83,7 @@ var _ = Describe("indingsep test", func() { // Secret reference volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateBusVolumeSpec("bus-secret-ref-volume", testcaseEnvInst.GetIndexSecretName())} bus.SQS.VolList = volumeSpec + updateBus.SQS.VolList = volumeSpec // Deploy Bus testcaseEnvInst.Log.Info("Deploy Bus") @@ -161,6 +162,7 @@ var _ = Describe("indingsep test", func() { // Secret reference volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateBusVolumeSpec("bus-secret-ref-volume", testcaseEnvInst.GetIndexSecretName())} bus.SQS.VolList = volumeSpec + updateBus.SQS.VolList = volumeSpec // Deploy Bus testcaseEnvInst.Log.Info("Deploy Bus") @@ -316,7 +318,7 @@ var _ = Describe("indingsep test", func() { // Verify Ingestor Cluster Status testcaseEnvInst.Log.Info("Verify Ingestor Cluster Status") - Expect(ingest.Status.Bus).To(Equal(bus), "Ingestor bus status is not the same as provided as input") + Expect(*ingest.Status.Bus).To(Equal(bus), "Ingestor bus status is not the same as provided as input") // Get instance of current Indexer Cluster CR with latest config testcaseEnvInst.Log.Info("Get instance of current Indexer Cluster CR with latest config") @@ -326,7 +328,7 @@ var _ = Describe("indingsep test", func() { // Verify Indexer Cluster Status testcaseEnvInst.Log.Info("Verify Indexer Cluster Status") - Expect(index.Status.Bus).To(Equal(bus), "Indexer bus status is not the same as provided as input") + Expect(*index.Status.Bus).To(Equal(bus), "Indexer bus status is not the same as provided as input") // Verify conf files testcaseEnvInst.Log.Info("Verify conf files") @@ -433,6 +435,10 @@ var _ = Describe("indingsep test", func() { err = deployment.UpdateCR(ctx, bus) Expect(err).To(Succeed(), "Unable to deploy Bus with updated CR") + // Ensure that Ingestor Cluster is in Ready phase + testcaseEnvInst.Log.Info("Ensure that Ingestor Cluster is in Ready phase") + testenv.IngestorReady(ctx, deployment, testcaseEnvInst) + // Get instance of current Ingestor Cluster CR with latest config testcaseEnvInst.Log.Info("Get instance of current Ingestor Cluster CR with latest config") ingest := &enterpriseApi.IngestorCluster{} @@ -441,7 +447,11 @@ var _ = Describe("indingsep test", func() { // Verify Ingestor Cluster Status testcaseEnvInst.Log.Info("Verify Ingestor Cluster Status") - Expect(ingest.Status.Bus).To(Equal(updateBus), "Ingestor bus status is not the same as provided as input") + Expect(*ingest.Status.Bus).To(Equal(updateBus), "Ingestor bus status is not the same as provided as input") + + // Ensure that Indexer Cluster is in Ready phase + testcaseEnvInst.Log.Info("Ensure that Indexer Cluster is in Ready phase") + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) // Get instance of current Indexer Cluster CR with latest config testcaseEnvInst.Log.Info("Get instance of current Indexer Cluster CR with latest config") @@ -451,7 +461,7 @@ var _ = Describe("indingsep test", func() { // Verify Indexer Cluster Status testcaseEnvInst.Log.Info("Verify Indexer Cluster Status") - Expect(index.Status.Bus).To(Equal(updateBus), "Indexer bus status is not the same as provided as input") + Expect(*index.Status.Bus).To(Equal(updateBus), "Indexer bus status is not the same as provided as input") // Verify conf files testcaseEnvInst.Log.Info("Verify conf files") From 3cb9148536edda1fbfd229c009bb6b7dd1ef9ba4 Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Wed, 17 Dec 2025 13:25:54 +0100 Subject: [PATCH 06/15] CSPL-4360 Fix errors with failing validation on status --- pkg/splunk/enterprise/indexercluster.go | 31 +++++++++---------- pkg/splunk/enterprise/ingestorcluster.go | 25 +++++++-------- pkg/splunk/enterprise/ingestorcluster_test.go | 5 ++- 3 files changed, 30 insertions(+), 31 deletions(-) diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index 4acbc3d11..b9b644599 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -77,10 +77,6 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller // updates status after function completes cr.Status.ClusterManagerPhase = enterpriseApi.PhaseError - if cr.Status.Replicas < cr.Spec.Replicas { - cr.Status.Bus = &enterpriseApi.BusSpec{} - cr.Status.LargeMessageStore = &enterpriseApi.LargeMessageStoreSpec{} - } cr.Status.Replicas = cr.Spec.Replicas cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-indexer", cr.GetName()) if cr.Status.Peers == nil { @@ -296,7 +292,7 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller // If bus is updated if cr.Spec.BusRef.Name != "" { - if !reflect.DeepEqual(cr.Status.Bus, bus.Spec) || !reflect.DeepEqual(cr.Status.LargeMessageStore, lms.Spec) { + if cr.Status.Bus == nil || cr.Status.LargeMessageStore == nil || !reflect.DeepEqual(*cr.Status.Bus, bus.Spec) || !reflect.DeepEqual(*cr.Status.LargeMessageStore, lms.Spec) { mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) err = mgr.handlePullBusChange(ctx, cr, busCopy, lmsCopy, client) if err != nil { @@ -305,9 +301,6 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller return result, err } - cr.Status.Bus = &bus.Spec - cr.Status.LargeMessageStore = &lms.Spec - for i := int32(0); i < cr.Spec.Replicas; i++ { idxcClient := mgr.getClient(ctx, i) err = idxcClient.RestartSplunk() @@ -316,6 +309,9 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller } scopedLog.Info("Restarted splunk", "indexer", i) } + + cr.Status.Bus = &bus.Spec + cr.Status.LargeMessageStore = &lms.Spec } } @@ -407,10 +403,6 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, // updates status after function completes cr.Status.Phase = enterpriseApi.PhaseError cr.Status.ClusterMasterPhase = enterpriseApi.PhaseError - if cr.Status.Replicas < cr.Spec.Replicas { - cr.Status.Bus = &enterpriseApi.BusSpec{} - cr.Status.LargeMessageStore = &enterpriseApi.LargeMessageStoreSpec{} - } cr.Status.Replicas = cr.Spec.Replicas cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-indexer", cr.GetName()) if cr.Status.Peers == nil { @@ -629,7 +621,7 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, // If bus is updated if cr.Spec.BusRef.Name != "" { - if !reflect.DeepEqual(cr.Status.Bus, bus.Spec) || !reflect.DeepEqual(cr.Status.LargeMessageStore, lms.Spec) { + if cr.Status.Bus == nil || cr.Status.LargeMessageStore == nil || !reflect.DeepEqual(*cr.Status.Bus, bus.Spec) || !reflect.DeepEqual(*cr.Status.LargeMessageStore, lms.Spec) { mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) err = mgr.handlePullBusChange(ctx, cr, busCopy, lmsCopy, client) if err != nil { @@ -638,9 +630,6 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, return result, err } - cr.Status.Bus = &bus.Spec - cr.Status.LargeMessageStore = &lms.Spec - for i := int32(0); i < cr.Spec.Replicas; i++ { idxcClient := mgr.getClient(ctx, i) err = idxcClient.RestartSplunk() @@ -649,6 +638,9 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, } scopedLog.Info("Restarted splunk", "indexer", i) } + + cr.Status.Bus = &bus.Spec + cr.Status.LargeMessageStore = &lms.Spec } } @@ -1336,6 +1328,13 @@ func (mgr *indexerClusterPodManager) handlePullBusChange(ctx context.Context, ne } splunkClient := newSplunkClientForBusPipeline(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd)) + if newCR.Status.Bus == nil { + newCR.Status.Bus = &enterpriseApi.BusSpec{} + } + if newCR.Status.LargeMessageStore == nil { + newCR.Status.LargeMessageStore = &enterpriseApi.LargeMessageStoreSpec{} + } + afterDelete := false if (bus.Spec.SQS.Name != "" && newCR.Status.Bus.SQS.Name != "" && bus.Spec.SQS.Name != newCR.Status.Bus.SQS.Name) || (bus.Spec.Provider != "" && newCR.Status.Bus.Provider != "" && bus.Spec.Provider != newCR.Status.Bus.Provider) { diff --git a/pkg/splunk/enterprise/ingestorcluster.go b/pkg/splunk/enterprise/ingestorcluster.go index 1a1dcd428..f87a1eaa7 100644 --- a/pkg/splunk/enterprise/ingestorcluster.go +++ b/pkg/splunk/enterprise/ingestorcluster.go @@ -72,10 +72,6 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr // Update the CR Status defer updateCRStatus(ctx, client, cr, &err) - if cr.Status.Replicas < cr.Spec.Replicas { - cr.Status.Bus = &enterpriseApi.BusSpec{} - cr.Status.LargeMessageStore = &enterpriseApi.LargeMessageStoreSpec{} - } cr.Status.Replicas = cr.Spec.Replicas // If needed, migrate the app framework status @@ -260,7 +256,7 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr } // If bus is updated - if !reflect.DeepEqual(cr.Status.Bus, bus.Spec) || !reflect.DeepEqual(cr.Status.LargeMessageStore, lms.Spec) { + if cr.Status.Bus == nil || cr.Status.LargeMessageStore == nil || !reflect.DeepEqual(*cr.Status.Bus, bus.Spec) || !reflect.DeepEqual(*cr.Status.LargeMessageStore, lms.Spec) { mgr := newIngestorClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) err = mgr.handlePushBusChange(ctx, cr, busCopy, lmsCopy, client) if err != nil { @@ -269,9 +265,6 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr return result, err } - cr.Status.Bus = &bus.Spec - cr.Status.LargeMessageStore = &lms.Spec - for i := int32(0); i < cr.Spec.Replicas; i++ { ingClient := mgr.getClient(ctx, i) err = ingClient.RestartSplunk() @@ -280,6 +273,9 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr } scopedLog.Info("Restarted splunk", "ingestor", i) } + + cr.Status.Bus = &bus.Spec + cr.Status.LargeMessageStore = &lms.Spec } // Upgrade fron automated MC to MC CRD @@ -392,6 +388,13 @@ func (mgr *ingestorClusterPodManager) handlePushBusChange(ctx context.Context, n } splunkClient := mgr.newSplunkClient(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd)) + if newCR.Status.Bus == nil { + newCR.Status.Bus = &enterpriseApi.BusSpec{} + } + if newCR.Status.LargeMessageStore == nil { + newCR.Status.LargeMessageStore = &enterpriseApi.LargeMessageStoreSpec{} + } + afterDelete := false if (bus.Spec.SQS.Name != "" && newCR.Status.Bus.SQS.Name != "" && bus.Spec.SQS.Name != newCR.Status.Bus.SQS.Name) || (bus.Spec.Provider != "" && newCR.Status.Bus.Provider != "" && bus.Spec.Provider != newCR.Status.Bus.Provider) { @@ -437,15 +440,9 @@ func (mgr *ingestorClusterPodManager) handlePushBusChange(ctx context.Context, n // getChangedBusFieldsForIngestor returns a list of changed bus and pipeline fields for ingestor pods func getChangedBusFieldsForIngestor(bus *enterpriseApi.Bus, lms *enterpriseApi.LargeMessageStore, busIngestorStatus *enterpriseApi.IngestorCluster, afterDelete bool, s3AccessKey, s3SecretKey string) (busChangedFields, pipelineChangedFields [][]string) { oldPB := busIngestorStatus.Status.Bus - if oldPB == nil { - oldPB = &enterpriseApi.BusSpec{} - } newPB := &bus.Spec oldLMS := busIngestorStatus.Status.LargeMessageStore - if oldLMS == nil { - oldLMS = &enterpriseApi.LargeMessageStoreSpec{} - } newLMS := &lms.Spec // Push changed bus fields diff --git a/pkg/splunk/enterprise/ingestorcluster_test.go b/pkg/splunk/enterprise/ingestorcluster_test.go index 0f5fae8fa..63d94facb 100644 --- a/pkg/splunk/enterprise/ingestorcluster_test.go +++ b/pkg/splunk/enterprise/ingestorcluster_test.go @@ -454,7 +454,10 @@ func TestGetChangedBusFieldsForIngestor(t *testing.T) { Name: lms.Name, }, }, - Status: enterpriseApi.IngestorClusterStatus{}, + Status: enterpriseApi.IngestorClusterStatus{ + Bus: &enterpriseApi.BusSpec{}, + LargeMessageStore: &enterpriseApi.LargeMessageStoreSpec{}, + }, } key := "key" From fafed270b1c068601a18f4bfeb4c073e625b2fa9 Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Fri, 19 Dec 2025 11:03:36 +0100 Subject: [PATCH 07/15] CSPL-4360 Fixing tests after merge --- pkg/splunk/enterprise/indexercluster_test.go | 2 ++ pkg/splunk/enterprise/util.go | 4 ++-- pkg/splunk/enterprise/util_test.go | 8 ++++---- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/pkg/splunk/enterprise/indexercluster_test.go b/pkg/splunk/enterprise/indexercluster_test.go index 503f8beab..4f788d31a 100644 --- a/pkg/splunk/enterprise/indexercluster_test.go +++ b/pkg/splunk/enterprise/indexercluster_test.go @@ -2111,6 +2111,8 @@ func TestGetChangedQueueFieldsForIndexer(t *testing.T) { {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), os.Spec.S3.Path}, {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, + {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, + {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, }, queueChangedFieldsInputs) assert.Equal(t, 12, len(queueChangedFieldsOutputs)) diff --git a/pkg/splunk/enterprise/util.go b/pkg/splunk/enterprise/util.go index bdc5d16ab..882a96ff3 100644 --- a/pkg/splunk/enterprise/util.go +++ b/pkg/splunk/enterprise/util.go @@ -417,8 +417,8 @@ func GetSmartstoreRemoteVolumeSecrets(ctx context.Context, volume enterpriseApi. return accessKey, secretKey, namespaceScopedSecret.ResourceVersion, nil } -// GetBusRemoteVolumeSecrets is used to retrieve access key and secrete key for Index & Ingestion separation -func GetBusRemoteVolumeSecrets(ctx context.Context, volume enterpriseApi.VolumeSpec, client splcommon.ControllerClient, cr splcommon.MetaObject) (string, string, error) { +// GetQueueRemoteVolumeSecrets is used to retrieve access key and secrete key for Index & Ingestion separation +func GetQueueRemoteVolumeSecrets(ctx context.Context, volume enterpriseApi.VolumeSpec, client splcommon.ControllerClient, cr splcommon.MetaObject) (string, string, error) { namespaceScopedSecret, err := splutil.GetSecretByName(ctx, client, cr.GetNamespace(), cr.GetName(), volume.SecretRef) if err != nil { return "", "", err diff --git a/pkg/splunk/enterprise/util_test.go b/pkg/splunk/enterprise/util_test.go index 6ea7b021e..35523a028 100644 --- a/pkg/splunk/enterprise/util_test.go +++ b/pkg/splunk/enterprise/util_test.go @@ -2624,8 +2624,8 @@ func TestUpdateCRStatus(t *testing.T) { WithStatusSubresource(&enterpriseApi.Standalone{}). WithStatusSubresource(&enterpriseApi.MonitoringConsole{}). WithStatusSubresource(&enterpriseApi.IndexerCluster{}). - WithStatusSubresource(&enterpriseApi.Bus{}). - WithStatusSubresource(&enterpriseApi.LargeMessageStore{}). + WithStatusSubresource(&enterpriseApi.Queue{}). + WithStatusSubresource(&enterpriseApi.ObjectStorage{}). WithStatusSubresource(&enterpriseApi.IngestorCluster{}). WithStatusSubresource(&enterpriseApi.SearchHeadCluster{}) c := builder.Build() @@ -3307,8 +3307,8 @@ func TestGetCurrentImage(t *testing.T) { WithStatusSubresource(&enterpriseApi.MonitoringConsole{}). WithStatusSubresource(&enterpriseApi.IndexerCluster{}). WithStatusSubresource(&enterpriseApi.SearchHeadCluster{}). - WithStatusSubresource(&enterpriseApi.Bus{}). - WithStatusSubresource(&enterpriseApi.LargeMessageStore{}). + WithStatusSubresource(&enterpriseApi.Queue{}). + WithStatusSubresource(&enterpriseApi.ObjectStorage{}). WithStatusSubresource(&enterpriseApi.IngestorCluster{}) client := builder.Build() client.Create(ctx, ¤t) From e0a10ba9fc5b8993f55d5dae4e1e1f189f76c47f Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Fri, 19 Dec 2025 13:20:33 +0100 Subject: [PATCH 08/15] CSPL-4360 Fix validation that fails for status --- pkg/splunk/enterprise/indexercluster.go | 38 +++++++------------ pkg/splunk/enterprise/indexercluster_test.go | 6 ++- pkg/splunk/enterprise/ingestorcluster.go | 28 ++++++-------- pkg/splunk/enterprise/ingestorcluster_test.go | 2 +- 4 files changed, 32 insertions(+), 42 deletions(-) diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index 88b6a31d0..37e81afd4 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -1327,20 +1327,22 @@ func (mgr *indexerClusterPodManager) handlePullQueueChange(ctx context.Context, } splunkClient := newSplunkClientForQueuePipeline(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd)) - if newCR.Status.Queue == nil { - newCR.Status.Queue = &enterpriseApi.QueueSpec{} + newCrStatusQueue := newCR.Status.Queue + if newCrStatusQueue == nil { + newCrStatusQueue = &enterpriseApi.QueueSpec{} } - if newCR.Status.ObjectStorage == nil { - newCR.Status.ObjectStorage = &enterpriseApi.ObjectStorageSpec{} + newCrStatusObjectStorage := newCR.Status.ObjectStorage + if newCrStatusObjectStorage == nil { + newCrStatusObjectStorage = &enterpriseApi.ObjectStorageSpec{} } afterDelete := false - if (queue.Spec.SQS.Name != "" && newCR.Status.Queue.SQS.Name != "" && queue.Spec.SQS.Name != newCR.Status.Queue.SQS.Name) || - (queue.Spec.Provider != "" && newCR.Status.Queue.Provider != "" && queue.Spec.Provider != newCR.Status.Queue.Provider) { - if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCR.Status.Queue.SQS.Name)); err != nil { + if (queue.Spec.SQS.Name != "" && newCrStatusQueue.SQS.Name != "" && queue.Spec.SQS.Name != newCrStatusQueue.SQS.Name) || + (queue.Spec.Provider != "" && newCrStatusQueue.Provider != "" && queue.Spec.Provider != newCrStatusQueue.Provider) { + if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCrStatusQueue.SQS.Name)); err != nil { updateErr = err } - if err := splunkClient.DeleteConfFileProperty(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", newCR.Status.Queue.SQS.Name)); err != nil { + if err := splunkClient.DeleteConfFileProperty(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", newCrStatusQueue.SQS.Name)); err != nil { updateErr = err } afterDelete = true @@ -1360,7 +1362,7 @@ func (mgr *indexerClusterPodManager) handlePullQueueChange(ctx context.Context, } } - queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields := getChangedQueueFieldsForIndexer(&queue, &os, newCR, afterDelete, s3AccessKey, s3SecretKey) + queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields := getChangedQueueFieldsForIndexer(&queue, &os, newCrStatusQueue, newCrStatusObjectStorage, afterDelete, s3AccessKey, s3SecretKey) for _, pbVal := range queueChangedFieldsOutputs { if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name), [][]string{pbVal}); err != nil { @@ -1386,22 +1388,10 @@ func (mgr *indexerClusterPodManager) handlePullQueueChange(ctx context.Context, } // getChangedQueueFieldsForIndexer returns a list of changed queue and pipeline fields for indexer pods -func getChangedQueueFieldsForIndexer(queue *enterpriseApi.Queue, os *enterpriseApi.ObjectStorage, queueIndexerStatus *enterpriseApi.IndexerCluster, afterDelete bool, s3AccessKey, s3SecretKey string) (queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields [][]string) { - // Compare queue fields - oldQueue := queueIndexerStatus.Status.Queue - if oldQueue == nil { - oldQueue = &enterpriseApi.QueueSpec{} - } - newQueue := queue.Spec - - oldOS := queueIndexerStatus.Status.ObjectStorage - if oldOS == nil { - oldOS = &enterpriseApi.ObjectStorageSpec{} - } - newOS := os.Spec - +func getChangedQueueFieldsForIndexer(queue *enterpriseApi.Queue, os *enterpriseApi.ObjectStorage, queueStatus *enterpriseApi.QueueSpec, osStatus *enterpriseApi.ObjectStorageSpec, afterDelete bool, s3AccessKey, s3SecretKey string) (queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields [][]string) { // Push all queue fields - queueChangedFieldsInputs, queueChangedFieldsOutputs = pullQueueChanged(oldQueue, &newQueue, oldOS, &newOS, afterDelete, s3AccessKey, s3SecretKey) + queueChangedFieldsInputs, queueChangedFieldsOutputs = pullQueueChanged(queueStatus, &queue.Spec, osStatus, &os.Spec, afterDelete, s3AccessKey, s3SecretKey) + // Always set all pipeline fields, not just changed ones pipelineChangedFields = pipelineConfig(true) diff --git a/pkg/splunk/enterprise/indexercluster_test.go b/pkg/splunk/enterprise/indexercluster_test.go index 4f788d31a..c891f1dd4 100644 --- a/pkg/splunk/enterprise/indexercluster_test.go +++ b/pkg/splunk/enterprise/indexercluster_test.go @@ -2096,11 +2096,15 @@ func TestGetChangedQueueFieldsForIndexer(t *testing.T) { Name: os.Name, }, }, + Status: enterpriseApi.IndexerClusterStatus{ + Queue: &enterpriseApi.QueueSpec{}, + ObjectStorage: &enterpriseApi.ObjectStorageSpec{}, + }, } key := "key" secret := "secret" - queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields := getChangedQueueFieldsForIndexer(&queue, &os, newCR, false, key, secret) + queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields := getChangedQueueFieldsForIndexer(&queue, &os, newCR.Status.Queue, newCR.Status.ObjectStorage, false, key, secret) assert.Equal(t, 10, len(queueChangedFieldsInputs)) assert.Equal(t, [][]string{ {"remote_queue.type", provider}, diff --git a/pkg/splunk/enterprise/ingestorcluster.go b/pkg/splunk/enterprise/ingestorcluster.go index f3db2a1fa..5aa41dd45 100644 --- a/pkg/splunk/enterprise/ingestorcluster.go +++ b/pkg/splunk/enterprise/ingestorcluster.go @@ -388,17 +388,19 @@ func (mgr *ingestorClusterPodManager) handlePushQueueChange(ctx context.Context, } splunkClient := mgr.newSplunkClient(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd)) - if newCR.Status.Queue == nil { - newCR.Status.Queue = &enterpriseApi.QueueSpec{} + newCrStatusQueue := newCR.Status.Queue + if newCrStatusQueue == nil { + newCrStatusQueue = &enterpriseApi.QueueSpec{} } - if newCR.Status.ObjectStorage == nil { - newCR.Status.ObjectStorage = &enterpriseApi.ObjectStorageSpec{} + newCrStatusObjectStorage := newCR.Status.ObjectStorage + if newCrStatusObjectStorage == nil { + newCrStatusObjectStorage = &enterpriseApi.ObjectStorageSpec{} } afterDelete := false - if (queue.Spec.SQS.Name != "" && newCR.Status.Queue.SQS.Name != "" && queue.Spec.SQS.Name != newCR.Status.Queue.SQS.Name) || - (queue.Spec.Provider != "" && newCR.Status.Queue.Provider != "" && queue.Spec.Provider != newCR.Status.Queue.Provider) { - if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCR.Status.Queue.SQS.Name)); err != nil { + if (queue.Spec.SQS.Name != "" && newCrStatusQueue.SQS.Name != "" && queue.Spec.SQS.Name != newCrStatusQueue.SQS.Name) || + (queue.Spec.Provider != "" && newCrStatusQueue.Provider != "" && queue.Spec.Provider != newCrStatusQueue.Provider) { + if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCrStatusQueue.SQS.Name)); err != nil { updateErr = err } afterDelete = true @@ -418,7 +420,7 @@ func (mgr *ingestorClusterPodManager) handlePushQueueChange(ctx context.Context, } } - queueChangedFields, pipelineChangedFields := getChangedQueueFieldsForIngestor(&queue, &os, newCR, afterDelete, s3AccessKey, s3SecretKey) + queueChangedFields, pipelineChangedFields := getChangedQueueFieldsForIngestor(&queue, &os, newCrStatusQueue, newCrStatusObjectStorage,afterDelete, s3AccessKey, s3SecretKey) for _, pbVal := range queueChangedFields { if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name), [][]string{pbVal}); err != nil { @@ -438,15 +440,9 @@ func (mgr *ingestorClusterPodManager) handlePushQueueChange(ctx context.Context, } // getChangedBusFieldsForIngestor returns a list of changed bus and pipeline fields for ingestor pods -func getChangedQueueFieldsForIngestor(queue *enterpriseApi.Queue, os *enterpriseApi.ObjectStorage, queueIngestorStatus *enterpriseApi.IngestorCluster, afterDelete bool, s3AccessKey, s3SecretKey string) (queueChangedFields, pipelineChangedFields [][]string) { - oldQueue := queueIngestorStatus.Status.Queue - newQueue := &queue.Spec - - oldOS := queueIngestorStatus.Status.ObjectStorage - newOS := &os.Spec - +func getChangedQueueFieldsForIngestor(queue *enterpriseApi.Queue, os *enterpriseApi.ObjectStorage, queueStatus *enterpriseApi.QueueSpec, osStatus *enterpriseApi.ObjectStorageSpec, afterDelete bool, s3AccessKey, s3SecretKey string) (queueChangedFields, pipelineChangedFields [][]string) { // Push changed bus fields - queueChangedFields = pushQueueChanged(oldQueue, newQueue, oldOS, newOS, afterDelete, s3AccessKey, s3SecretKey) + queueChangedFields = pushQueueChanged(queueStatus, &queue.Spec, osStatus, &os.Spec, afterDelete, s3AccessKey, s3SecretKey) // Always changed pipeline fields pipelineChangedFields = pipelineConfig(false) diff --git a/pkg/splunk/enterprise/ingestorcluster_test.go b/pkg/splunk/enterprise/ingestorcluster_test.go index 448929572..995e52ff8 100644 --- a/pkg/splunk/enterprise/ingestorcluster_test.go +++ b/pkg/splunk/enterprise/ingestorcluster_test.go @@ -462,7 +462,7 @@ func TestGetChangedQueueFieldsForIngestor(t *testing.T) { key := "key" secret := "secret" - queueChangedFields, pipelineChangedFields := getChangedQueueFieldsForIngestor(&queue, &os, newCR, false, key, secret) + queueChangedFields, pipelineChangedFields := getChangedQueueFieldsForIngestor(&queue, &os, newCR.Status.Queue, newCR.Status.ObjectStorage, false, key, secret) assert.Equal(t, 12, len(queueChangedFields)) assert.Equal(t, [][]string{ From 155b21a49fda387472a95a93391c27865d16cf1b Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Fri, 19 Dec 2025 14:58:10 +0100 Subject: [PATCH 09/15] CSPL-4360 Fix failing to get k8s secret --- pkg/splunk/enterprise/indexercluster.go | 17 +++++++++-------- pkg/splunk/enterprise/indexercluster_test.go | 3 ++- pkg/splunk/enterprise/ingestorcluster.go | 4 ++-- 3 files changed, 13 insertions(+), 11 deletions(-) diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index 37e81afd4..558f862b1 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -115,7 +115,7 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller cr.Status.ClusterManagerPhase = enterpriseApi.PhaseError } - mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) + mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) // Check if we have configured enough number(<= RF) of replicas if mgr.cr.Status.ClusterManagerPhase == enterpriseApi.PhaseReady { err = VerifyRFPeers(ctx, mgr, client) @@ -248,7 +248,7 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller if cr.Spec.QueueRef.Namespace != "" { ns = cr.Spec.QueueRef.Namespace } - err = client.Get(context.Background(), types.NamespacedName{ + err = client.Get(ctx, types.NamespacedName{ Name: cr.Spec.QueueRef.Name, Namespace: ns, }, &queue) @@ -272,7 +272,7 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller if cr.Spec.ObjectStorageRef.Namespace != "" { ns = cr.Spec.ObjectStorageRef.Namespace } - err = client.Get(context.Background(), types.NamespacedName{ + err = client.Get(ctx, types.NamespacedName{ Name: cr.Spec.ObjectStorageRef.Name, Namespace: ns, }, &os) @@ -292,7 +292,7 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller // If bus is updated if cr.Spec.QueueRef.Name != "" { if cr.Status.Queue == nil || cr.Status.ObjectStorage == nil || !reflect.DeepEqual(*cr.Status.Queue, queue.Spec) || !reflect.DeepEqual(*cr.Status.ObjectStorage, os.Spec) { - mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) + mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) err = mgr.handlePullQueueChange(ctx, cr, queueCopy, osCopy, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Queue/Pipeline config change after pod creation: %s", err.Error())) @@ -443,7 +443,7 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, cr.Status.ClusterMasterPhase = enterpriseApi.PhaseError } - mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) + mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) // Check if we have configured enough number(<= RF) of replicas if mgr.cr.Status.ClusterMasterPhase == enterpriseApi.PhaseReady { err = VerifyRFPeers(ctx, mgr, client) @@ -621,7 +621,7 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, // If bus is updated if cr.Spec.QueueRef.Name != "" { if cr.Status.Queue == nil || cr.Status.ObjectStorage == nil || !reflect.DeepEqual(*cr.Status.Queue, queue.Spec) || !reflect.DeepEqual(*cr.Status.ObjectStorage, os.Spec) { - mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) + mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) err = mgr.handlePullQueueChange(ctx, cr, queueCopy, osCopy, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Queue/Pipeline config change after pod creation: %s", err.Error())) @@ -722,12 +722,13 @@ type indexerClusterPodManager struct { } // newIndexerClusterPodManager function to create pod manager this is added to write unit test case -var newIndexerClusterPodManager = func(log logr.Logger, cr *enterpriseApi.IndexerCluster, secret *corev1.Secret, newSplunkClient NewSplunkClientFunc) indexerClusterPodManager { +var newIndexerClusterPodManager = func(log logr.Logger, cr *enterpriseApi.IndexerCluster, secret *corev1.Secret, newSplunkClient NewSplunkClientFunc, c splcommon.ControllerClient) indexerClusterPodManager { return indexerClusterPodManager{ log: log, cr: cr, secrets: secret, newSplunkClient: newSplunkClient, + c: c, } } @@ -1391,7 +1392,7 @@ func (mgr *indexerClusterPodManager) handlePullQueueChange(ctx context.Context, func getChangedQueueFieldsForIndexer(queue *enterpriseApi.Queue, os *enterpriseApi.ObjectStorage, queueStatus *enterpriseApi.QueueSpec, osStatus *enterpriseApi.ObjectStorageSpec, afterDelete bool, s3AccessKey, s3SecretKey string) (queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields [][]string) { // Push all queue fields queueChangedFieldsInputs, queueChangedFieldsOutputs = pullQueueChanged(queueStatus, &queue.Spec, osStatus, &os.Spec, afterDelete, s3AccessKey, s3SecretKey) - + // Always set all pipeline fields, not just changed ones pipelineChangedFields = pipelineConfig(true) diff --git a/pkg/splunk/enterprise/indexercluster_test.go b/pkg/splunk/enterprise/indexercluster_test.go index c891f1dd4..2b4026ac5 100644 --- a/pkg/splunk/enterprise/indexercluster_test.go +++ b/pkg/splunk/enterprise/indexercluster_test.go @@ -1569,7 +1569,7 @@ func TestIndexerClusterWithReadyState(t *testing.T) { return nil } - newIndexerClusterPodManager = func(log logr.Logger, cr *enterpriseApi.IndexerCluster, secret *corev1.Secret, newSplunkClient NewSplunkClientFunc) indexerClusterPodManager { + newIndexerClusterPodManager = func(log logr.Logger, cr *enterpriseApi.IndexerCluster, secret *corev1.Secret, newSplunkClient NewSplunkClientFunc, c splcommon.ControllerClient) indexerClusterPodManager { return indexerClusterPodManager{ log: log, cr: cr, @@ -1579,6 +1579,7 @@ func TestIndexerClusterWithReadyState(t *testing.T) { c.Client = mclient return c }, + c: c, } } diff --git a/pkg/splunk/enterprise/ingestorcluster.go b/pkg/splunk/enterprise/ingestorcluster.go index 5aa41dd45..62693e1b5 100644 --- a/pkg/splunk/enterprise/ingestorcluster.go +++ b/pkg/splunk/enterprise/ingestorcluster.go @@ -238,7 +238,7 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr if cr.Spec.ObjectStorageRef.Namespace != "" { ns = cr.Spec.ObjectStorageRef.Namespace } - err = client.Get(context.Background(), types.NamespacedName{ + err = client.Get(ctx, types.NamespacedName{ Name: cr.Spec.ObjectStorageRef.Name, Namespace: ns, }, &os) @@ -420,7 +420,7 @@ func (mgr *ingestorClusterPodManager) handlePushQueueChange(ctx context.Context, } } - queueChangedFields, pipelineChangedFields := getChangedQueueFieldsForIngestor(&queue, &os, newCrStatusQueue, newCrStatusObjectStorage,afterDelete, s3AccessKey, s3SecretKey) + queueChangedFields, pipelineChangedFields := getChangedQueueFieldsForIngestor(&queue, &os, newCrStatusQueue, newCrStatusObjectStorage, afterDelete, s3AccessKey, s3SecretKey) for _, pbVal := range queueChangedFields { if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name), [][]string{pbVal}); err != nil { From f8afd5a7790c489e2997921ba08060e2dd87c075 Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Mon, 22 Dec 2025 13:51:03 +0100 Subject: [PATCH 10/15] CSPL-4360 Fix failing integ and helm tests --- api/v4/objectstorage_types.go | 2 +- .../enterprise.splunk.com_objectstorages.yaml | 2 +- docs/CustomResources.md | 10 +-- docs/IndexIngestionSeparation.md | 24 +++--- .../enterprise_v4_indexercluster.yaml | 4 +- .../enterprise_v4_objectstorages.yaml | 2 +- .../templates/enterprise_v4_queues.yaml | 4 +- .../02-assert.yaml | 50 +++++------ .../03-assert.yaml | 20 ++--- .../splunk_index_ingest_sep.yaml | 8 +- pkg/splunk/enterprise/indexercluster.go | 18 ++-- pkg/splunk/enterprise/ingestorcluster.go | 13 +-- pkg/splunk/enterprise/types.go | 2 +- ...dex_and_ingestion_separation_suite_test.go | 28 +++---- .../index_and_ingestion_separation_test.go | 83 ++++++++++--------- test/testenv/remote_index_utils.go | 4 +- test/testenv/util.go | 8 +- 17 files changed, 147 insertions(+), 135 deletions(-) diff --git a/api/v4/objectstorage_types.go b/api/v4/objectstorage_types.go index 9e95392ce..08205743f 100644 --- a/api/v4/objectstorage_types.go +++ b/api/v4/objectstorage_types.go @@ -55,7 +55,7 @@ type S3Spec struct { // ObjectStorageStatus defines the observed state of ObjectStorage. type ObjectStorageStatus struct { - // Phase of the large message store + // Phase of the object storage Phase Phase `json:"phase"` // Resource revision tracker diff --git a/config/crd/bases/enterprise.splunk.com_objectstorages.yaml b/config/crd/bases/enterprise.splunk.com_objectstorages.yaml index 2fac45707..c84474921 100644 --- a/config/crd/bases/enterprise.splunk.com_objectstorages.yaml +++ b/config/crd/bases/enterprise.splunk.com_objectstorages.yaml @@ -87,7 +87,7 @@ spec: description: Auxillary message describing CR status type: string phase: - description: Phase of the large message store + description: Phase of the object storage enum: - Pending - Ready diff --git a/docs/CustomResources.md b/docs/CustomResources.md index 157a9b123..bd85c05ca 100644 --- a/docs/CustomResources.md +++ b/docs/CustomResources.md @@ -404,21 +404,21 @@ spec: endpoint: https://s3.us-west-2.amazonaws.com ``` -ObjectStorage inputs can be found in the table below. As of now, only S3 provider of large message store is supported. +ObjectStorage inputs can be found in the table below. As of now, only S3 provider of object storage is supported. | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | -| provider | string | [Required] Provider of large message store (Allowed values: s3) | -| s3 | S3 | [Required if provider=s3] S3 large message store inputs | +| provider | string | [Required] Provider of object storage (Allowed values: s3) | +| s3 | S3 | [Required if provider=s3] S3 object storage inputs | -S3 large message store inputs can be found in the table below. +S3 object storage inputs can be found in the table below. | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | | path | string | [Required] Remote storage location for messages that are larger than the underlying maximum message size | | endpoint | string | [Optional, if not provided formed based on region] S3-compatible service endpoint -Change of any of the large message queue inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. +Change of any of the object storage inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. ## MonitoringConsole Resource Spec Parameters diff --git a/docs/IndexIngestionSeparation.md b/docs/IndexIngestionSeparation.md index d532e189c..c7b05dcae 100644 --- a/docs/IndexIngestionSeparation.md +++ b/docs/IndexIngestionSeparation.md @@ -44,7 +44,7 @@ SQS message queue inputs can be found in the table below. | endpoint | string | [Optional, if not provided formed based on region] AWS SQS Service endpoint | dlq | string | [Required] Name of the dead letter queue | -**First provisioning or update of any of the bus inputs requires Ingestor Cluster and Indexer Cluster Splunkd restart, but this restart is implemented automatically and done by SOK.** +**First provisioning or update of any of the queue inputs requires Ingestor Cluster and Indexer Cluster Splunkd restart, but this restart is implemented automatically and done by SOK.** ## Example ``` @@ -67,21 +67,21 @@ ObjectStorage is introduced to store large message (messages that exceed the siz ## Spec -ObjectStorage inputs can be found in the table below. As of now, only S3 provider of large message store is supported. +ObjectStorage inputs can be found in the table below. As of now, only S3 provider of object storage is supported. | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | -| provider | string | [Required] Provider of large message store (Allowed values: s3) | -| s3 | S3 | [Required if provider=s3] S3 large message store inputs | +| provider | string | [Required] Provider of object storage (Allowed values: s3) | +| s3 | S3 | [Required if provider=s3] S3 object storage inputs | -S3 large message store inputs can be found in the table below. +S3 object storage inputs can be found in the table below. | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | | path | string | [Required] Remote storage location for messages that are larger than the underlying maximum message size | | endpoint | string | [Optional, if not provided formed based on region] S3-compatible service endpoint -Change of any of the large message queue inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. +Change of any of the object storage inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. ## Example ``` @@ -108,13 +108,13 @@ In addition to common spec inputs, the IngestorCluster resource provides the fol | ---------- | ------- | ------------------------------------------------- | | replicas | integer | The number of replicas (defaults to 3) | | queueRef | corev1.ObjectReference | Message queue reference | -| objectStorageRef | corev1.ObjectReference | Large message store reference | +| objectStorageRef | corev1.ObjectReference | Object storage reference | ## Example The example presented below configures IngestorCluster named ingestor with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the ingestion traffic. This IngestorCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Queue and ObjectStorage references allow the user to specify queue and bucket settings for the ingestion process. -In this case, the setup uses the SQS and S3 based configuration where the messages are stored in sqs-test queue in us-west-2 region with dead letter queue set to sqs-dlq-test queue. The large message store is set to ingestion bucket in smartbus-test directory. Based on these inputs, default-mode.conf and outputs.conf files are configured accordingly. +In this case, the setup uses the SQS and S3 based configuration where the messages are stored in sqs-test queue in us-west-2 region with dead letter queue set to sqs-dlq-test queue. The object storage is set to ingestion bucket in smartbus-test directory. Based on these inputs, default-mode.conf and outputs.conf files are configured accordingly. ``` apiVersion: enterprise.splunk.com/v4 @@ -145,13 +145,13 @@ In addition to common spec inputs, the IndexerCluster resource provides the foll | ---------- | ------- | ------------------------------------------------- | | replicas | integer | The number of replicas (defaults to 3) | | queueRef | corev1.ObjectReference | Message queue reference | -| objectStorageRef | corev1.ObjectReference | Large message store reference | +| objectStorageRef | corev1.ObjectReference | Object storage reference | ## Example The example presented below configures IndexerCluster named indexer with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the indexing traffic. This IndexerCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Queue and ObjectStorage references allow the user to specify queue and bucket settings for the indexing process. -In this case, the setup uses the SQS and S3 based configuration where the messages are stored in and retrieved from sqs-test queue in us-west-2 region with dead letter queue set to sqs-dlq-test queue. The large message store is set to ingestion bucket in smartbus-test directory. Based on these inputs, default-mode.conf, inputs.conf and outputs.conf files are configured accordingly. +In this case, the setup uses the SQS and S3 based configuration where the messages are stored in and retrieved from sqs-test queue in us-west-2 region with dead letter queue set to sqs-dlq-test queue. The object storage is set to ingestion bucket in smartbus-test directory. Based on these inputs, default-mode.conf, inputs.conf and outputs.conf files are configured accordingly. ``` apiVersion: enterprise.splunk.com/v4 @@ -717,7 +717,7 @@ Spec: Name: queue Namespace: default Image: splunk/splunk:${SPLUNK_IMAGE_VERSION} - Large Message Store Ref: + Object Storage Ref: Name: os Namespace: default Replicas: 3 @@ -741,7 +741,7 @@ Status: Endpoint: https://sqs.us-west-2.amazonaws.com Name: sqs-test Provider: sqs - Large Message Store: + Object Storage: S3: Endpoint: https://s3.us-west-2.amazonaws.com Path: s3://ingestion/smartbus-test diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml index 235505530..e5541e017 100644 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml @@ -170,8 +170,8 @@ items: namespace: {{ .namespace }} {{- end }} {{- end }} - {{- with $.Values.indexerCluster.objectStoreRef }} - objectStoreRef: + {{- with $.Values.indexerCluster.objectStorageRef }} + objectStorageRef: name: {{ .name }} {{- if .namespace }} namespace: {{ .namespace }} diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_objectstorages.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_objectstorages.yaml index 7cd5bdca0..033aed904 100644 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_objectstorages.yaml +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_objectstorages.yaml @@ -1,4 +1,4 @@ -{{- if .Values.objectStorage.enabled }} +{{- if .Values.objectStorage }} {{- if .Values.objectStorage.enabled }} apiVersion: enterprise.splunk.com/v4 kind: ObjectStorage diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_queues.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_queues.yaml index 09cd949dc..06a3c5dbd 100644 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_queues.yaml +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_queues.yaml @@ -26,8 +26,8 @@ spec: {{- if .name }} name: {{ .name | quote }} {{- end }} - {{- if .region }} - region: {{ .region | quote }} + {{- if .authRegion }} + authRegion: {{ .authRegion | quote }} {{- end }} {{- if .volumes }} volumes: diff --git a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml index 547f2a358..ca56ca5ef 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml @@ -1,30 +1,30 @@ --- -# assert for bus custom resource to be ready +# assert for queue custom resource to be ready apiVersion: enterprise.splunk.com/v4 -kind: Bus +kind: Queue metadata: - name: bus + name: queue spec: provider: sqs sqs: - name: sqs-test - region: us-west-2 + name: index-ingest-separation-test-q + authRegion: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - dlq: sqs-dlq-test + dlq: index-ingest-separation-test-dlq status: phase: Ready --- -# assert for large message store custom resource to be ready +# assert for object storage custom resource to be ready apiVersion: enterprise.splunk.com/v4 -kind: LargeMessageStore +kind: ObjectStorage metadata: - name: lms + name: os spec: provider: s3 s3: endpoint: https://s3.us-west-2.amazonaws.com - path: s3://ingestion/smartbus-test + path: s3://index-ingest-separation-test-bucket/smartbus-test status: phase: Ready @@ -61,24 +61,24 @@ metadata: name: indexer spec: replicas: 3 - busRef: - name: bus - largeMessageStoreRef: - name: lms + queueRef: + name: queue + objectStorageRef: + name: os status: phase: Ready - bus: + queue: provider: sqs sqs: - name: sqs-test - region: us-west-2 + name: index-ingest-separation-test-q + authRegion: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - dlq: sqs-dlq-test - largeMessageStore: + dlq: index-ingest-separation-test-dlq + objectStorage: provider: s3 s3: endpoint: https://s3.us-west-2.amazonaws.com - path: s3://ingestion/smartbus-test + path: s3://index-ingest-separation-test-bucket/smartbus-test --- # check for stateful set and replicas as configured @@ -103,7 +103,7 @@ kind: IngestorCluster metadata: name: ingestor spec: - replicas: 4 + replicas: 3 queueRef: name: queue objectStorageRef: @@ -113,15 +113,15 @@ status: queue: provider: sqs sqs: - name: sqs-test - region: us-west-2 + name: index-ingest-separation-test-q + authRegion: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - dlq: sqs-dlq-test + dlq: index-ingest-separation-test-dlq objectStorage: provider: s3 s3: endpoint: https://s3.us-west-2.amazonaws.com - path: s3://ingestion/smartbus-test + path: s3://index-ingest-separation-test-bucket/smartbus-test --- # check for stateful set and replicas as configured diff --git a/kuttl/tests/helm/index-and-ingest-separation/03-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/03-assert.yaml index 819620baa..765a22192 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/03-assert.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/03-assert.yaml @@ -6,24 +6,24 @@ metadata: name: ingestor spec: replicas: 4 - busRef: - name: bus - largeMessageStoreRef: - name: lms + queueRef: + name: queue + objectStorageRef: + name: os status: phase: Ready - bus: + queue: provider: sqs sqs: - name: sqs-test - region: us-west-2 + name: index-ingest-separation-test-q + authRegion: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - dlq: sqs-dlq-test - largeMessageStore: + dlq: index-ingest-separation-test-dlq + objectStorage: provider: s3 s3: endpoint: https://s3.us-west-2.amazonaws.com - path: s3://ingestion/smartbus-test + path: s3://index-ingest-separation-test-bucket/smartbus-test --- # check for stateful sets and replicas updated diff --git a/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml b/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml index 7bec8ee7d..46ef7fce3 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml @@ -10,10 +10,10 @@ queue: name: queue provider: sqs sqs: - name: sqs-test - region: us-west-2 + name: index-ingest-separation-test-q + authRegion: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - dlq: sqs-dlq-test + dlq: index-ingest-separation-test-dlq volumes: - name: helm-bus-secret-ref-test secretRef: s3-secret @@ -24,7 +24,7 @@ objectStorage: provider: s3 s3: endpoint: https://s3.us-west-2.amazonaws.com - path: s3://ingestion/smartbus-test + path: s3://index-ingest-separation-test-bucket/smartbus-test ingestorCluster: enabled: true diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index 558f862b1..3808539cc 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -76,6 +76,10 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller // updates status after function completes cr.Status.ClusterManagerPhase = enterpriseApi.PhaseError + if cr.Status.Replicas < cr.Spec.Replicas { + cr.Status.Queue = nil + cr.Status.ObjectStorage = nil + } cr.Status.Replicas = cr.Spec.Replicas cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-indexer", cr.GetName()) if cr.Status.Peers == nil { @@ -265,7 +269,7 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller } } - // Large Message Store + // Object Storage os := enterpriseApi.ObjectStorage{} if cr.Spec.ObjectStorageRef.Name != "" { ns := cr.GetNamespace() @@ -281,7 +285,7 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller } } - // Can not override original large message store spec due to comparison in the later code + // Can not override original object storage spec due to comparison in the later code osCopy := os if osCopy.Spec.Provider == "s3" { if osCopy.Spec.S3.Endpoint == "" && queueCopy.Spec.SQS.AuthRegion != "" { @@ -289,7 +293,7 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller } } - // If bus is updated + // If queue is updated if cr.Spec.QueueRef.Name != "" { if cr.Status.Queue == nil || cr.Status.ObjectStorage == nil || !reflect.DeepEqual(*cr.Status.Queue, queue.Spec) || !reflect.DeepEqual(*cr.Status.ObjectStorage, os.Spec) { mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) @@ -402,6 +406,10 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, // updates status after function completes cr.Status.Phase = enterpriseApi.PhaseError cr.Status.ClusterMasterPhase = enterpriseApi.PhaseError + if cr.Status.Replicas < cr.Spec.Replicas { + cr.Status.Queue = nil + cr.Status.ObjectStorage = nil + } cr.Status.Replicas = cr.Spec.Replicas cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-indexer", cr.GetName()) if cr.Status.Peers == nil { @@ -594,7 +602,7 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, } } - // Large Message Store + // Object Storage os := enterpriseApi.ObjectStorage{} if cr.Spec.ObjectStorageRef.Name != "" { ns := cr.GetNamespace() @@ -618,7 +626,7 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, } } - // If bus is updated + // If queue is updated if cr.Spec.QueueRef.Name != "" { if cr.Status.Queue == nil || cr.Status.ObjectStorage == nil || !reflect.DeepEqual(*cr.Status.Queue, queue.Spec) || !reflect.DeepEqual(*cr.Status.ObjectStorage, os.Spec) { mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) diff --git a/pkg/splunk/enterprise/ingestorcluster.go b/pkg/splunk/enterprise/ingestorcluster.go index 62693e1b5..78a51ede2 100644 --- a/pkg/splunk/enterprise/ingestorcluster.go +++ b/pkg/splunk/enterprise/ingestorcluster.go @@ -71,7 +71,10 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr // Update the CR Status defer updateCRStatus(ctx, client, cr, &err) - + if cr.Status.Replicas < cr.Spec.Replicas { + cr.Status.Queue = nil + cr.Status.ObjectStorage = nil + } cr.Status.Replicas = cr.Spec.Replicas // If needed, migrate the app framework status @@ -231,7 +234,7 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr } } - // Large Message Store + // Object Storage os := enterpriseApi.ObjectStorage{} if cr.Spec.ObjectStorageRef.Name != "" { ns := cr.GetNamespace() @@ -255,7 +258,7 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr } } - // If bus is updated + // If queue is updated if cr.Status.Queue == nil || cr.Status.ObjectStorage == nil || !reflect.DeepEqual(*cr.Status.Queue, queue.Spec) || !reflect.DeepEqual(*cr.Status.ObjectStorage, os.Spec) { mgr := newIngestorClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) err = mgr.handlePushQueueChange(ctx, cr, queueCopy, osCopy, client) @@ -439,9 +442,9 @@ func (mgr *ingestorClusterPodManager) handlePushQueueChange(ctx context.Context, return updateErr } -// getChangedBusFieldsForIngestor returns a list of changed bus and pipeline fields for ingestor pods +// getChangedQueueFieldsForIngestor returns a list of changed queue and pipeline fields for ingestor pods func getChangedQueueFieldsForIngestor(queue *enterpriseApi.Queue, os *enterpriseApi.ObjectStorage, queueStatus *enterpriseApi.QueueSpec, osStatus *enterpriseApi.ObjectStorageSpec, afterDelete bool, s3AccessKey, s3SecretKey string) (queueChangedFields, pipelineChangedFields [][]string) { - // Push changed bus fields + // Push changed queue fields queueChangedFields = pushQueueChanged(queueStatus, &queue.Spec, osStatus, &os.Spec, afterDelete, s3AccessKey, s3SecretKey) // Always changed pipeline fields diff --git a/pkg/splunk/enterprise/types.go b/pkg/splunk/enterprise/types.go index fe96430e4..4267662d8 100644 --- a/pkg/splunk/enterprise/types.go +++ b/pkg/splunk/enterprise/types.go @@ -66,7 +66,7 @@ const ( // SplunkQueue is the queue instance SplunkQueue InstanceType = "queue" - // SplunkObjectStorage is the large message store instance + // SplunkObjectStorage is the object storage instance SplunkObjectStorage InstanceType = "object-storage" // SplunkDeployer is an instance that distributes baseline configurations and apps to search head cluster members diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go index 86231df14..8aac52220 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go @@ -42,29 +42,29 @@ var ( queue = enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - Name: "test-queue", + Name: "index-ingest-separation-test-q", AuthRegion: "us-west-2", Endpoint: "https://sqs.us-west-2.amazonaws.com", - DLQ: "test-dead-letter-queue", + DLQ: "index-ingest-separation-test-dlq", }, } objectStorage = enterpriseApi.ObjectStorageSpec{ Provider: "s3", S3: enterpriseApi.S3Spec{ Endpoint: "https://s3.us-west-2.amazonaws.com", - Path: "s3://test-bucket/smartbus-test", + Path: "s3://index-ingest-separation-test-bucket/smartbus-test", }, } serviceAccountName = "index-ingest-sa" inputs = []string{ - "[remote_queue:test-queue]", + "[remote_queue:index-ingest-separation-test-q]", "remote_queue.type = sqs_smartbus", "remote_queue.sqs_smartbus.auth_region = us-west-2", - "remote_queue.sqs_smartbus.dead_letter_queue.name = test-dead-letter-queue", + "remote_queue.sqs_smartbus.dead_letter_queue.name = index-ingest-separation-test-dlq", "remote_queue.sqs_smartbus.endpoint = https://sqs.us-west-2.amazonaws.com", "remote_queue.sqs_smartbus.large_message_store.endpoint = https://s3.us-west-2.amazonaws.com", - "remote_queue.sqs_smartbus.large_message_store.path = s3://test-bucket/smartbus-test", + "remote_queue.sqs_smartbus.large_message_store.path = s3://index-ingest-separation-test-bucket/smartbus-test", "remote_queue.sqs_smartbus.retry_policy = max_count", "remote_queue.sqs_smartbus.max_count.max_retries_per_part = 4"} outputs = append(inputs, "remote_queue.sqs_smartbus.encoding_format = s2s", "remote_queue.sqs_smartbus.send_interval = 5s") @@ -88,21 +88,21 @@ var ( updateQueue = enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - Name: "test-queue-updated", + Name: "index-ingest-separation-test-q-updated", AuthRegion: "us-west-2", Endpoint: "https://sqs.us-west-2.amazonaws.com", - DLQ: "test-dead-letter-queue-updated", + DLQ: "index-ingest-separation-test-dlq-updated", }, } updatedInputs = []string{ - "[remote_queue:test-queue-updated]", + "[remote_queue:index-ingest-separation-test-q-updated]", "remote_queue.type = sqs_smartbus", "remote_queue.sqs_smartbus.auth_region = us-west-2", - "remote_queue.sqs_smartbus.dead_letter_queue.name = test-dead-letter-queue-updated", + "remote_queue.sqs_smartbus.dead_letter_queue.name = index-ingest-separation-test-dlq-updated", "remote_queue.sqs_smartbus.endpoint = https://sqs.us-west-2.amazonaws.com", "remote_queue.sqs_smartbus.large_message_store.endpoint = https://s3.us-west-2.amazonaws.com", - "remote_queue.sqs_smartbus.large_message_store.path = s3://test-bucket-updated/smartbus-test", + "remote_queue.sqs_smartbus.large_message_store.path = s3://index-ingest-separation-test-bucket/smartbus-test", "remote_queue.sqs_smartbus.retry_policy = max", "remote_queue.max.sqs_smartbus.max_retries_per_part = 5"} updatedOutputs = append(updatedInputs, "remote_queue.sqs_smartbus.encoding_format = s2s", "remote_queue.sqs_smartbus.send_interval = 4s") @@ -116,9 +116,9 @@ var ( updatedDefaultsIngest = append(updatedDefaultsAll, "[pipeline:indexerPipe]\ndisabled = true") inputsShouldNotContain = []string{ - "[remote_queue:test-queue]", - "remote_queue.sqs_smartbus.dead_letter_queue.name = test-dead-letter-queue", - "remote_queue.sqs_smartbus.large_message_store.path = s3://test-bucket/smartbus-test", + "[remote_queue:index-ingest-separation-test-q]", + "remote_queue.sqs_smartbus.dead_letter_queue.name = index-ingest-separation-test-dlq", + "remote_queue.sqs_smartbus.large_message_store.path = s3://index-ingest-separation-test-bucket/smartbus-test", "remote_queue.sqs_smartbus.retry_policy = max_count", "remote_queue.sqs_smartbus.max_count.max_retries_per_part = 4"} outputsShouldNotContain = append(inputs, "remote_queue.sqs_smartbus.send_interval = 5s") diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go index b5e0449f8..85069a071 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go @@ -75,13 +75,13 @@ var _ = Describe("indingsep test", func() { Context("Ingestor and Indexer deployment", func() { It("indingsep, smoke, indingsep: Splunk Operator can deploy Ingestors and Indexers", func() { + // TODO: Remove secret reference and uncomment serviceAccountName part once IRSA fixed for Splunk and EKS 1.34+ // Create Service Account - testcaseEnvInst.Log.Info("Create Service Account") - testcaseEnvInst.CreateServiceAccount(serviceAccountName) + // testcaseEnvInst.Log.Info("Create Service Account") + // testcaseEnvInst.CreateServiceAccount(serviceAccountName) - // TODO: Remove secret reference once IRSA fixed for Splunk and EKS 1.34+ // Secret reference - volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateBusVolumeSpec("bus-secret-ref-volume", testcaseEnvInst.GetIndexSecretName())} + volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateQueueVolumeSpec("queue-secret-ref-volume", testcaseEnvInst.GetIndexSecretName())} queue.SQS.VolList = volumeSpec updateQueue.SQS.VolList = volumeSpec @@ -97,7 +97,7 @@ var _ = Describe("indingsep test", func() { // Deploy Ingestor Cluster testcaseEnvInst.Log.Info("Deploy Ingestor Cluster") - _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, serviceAccountName) + _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, "") // , serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster") // Deploy Cluster Manager @@ -107,7 +107,7 @@ var _ = Describe("indingsep test", func() { // Deploy Indexer Cluster testcaseEnvInst.Log.Info("Deploy Indexer Cluster") - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, serviceAccountName) + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, "") // , serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Indexer Cluster") // Ensure that Ingestor Cluster is in Ready phase @@ -137,11 +137,11 @@ var _ = Describe("indingsep test", func() { Expect(err).To(Succeed(), "Unable to delete Ingestor Cluster instance", "Ingestor Cluster Name", ingest) // Delete the Queue - queue := &enterpriseApi.Queue{} - err = deployment.GetInstance(ctx, "queue", queue) - Expect(err).To(Succeed(), "Unable to get Queue instance", "Queue Name", queue) - err = deployment.DeleteCR(ctx, queue) - Expect(err).To(Succeed(), "Unable to delete Queue", "Queue Name", queue) + q = &enterpriseApi.Queue{} + err = deployment.GetInstance(ctx, "queue", q) + Expect(err).To(Succeed(), "Unable to get Queue instance", "Queue Name", q) + err = deployment.DeleteCR(ctx, q) + Expect(err).To(Succeed(), "Unable to delete Queue", "Queue Name", q) // Delete the ObjectStorage objStorage = &enterpriseApi.ObjectStorage{} @@ -154,13 +154,13 @@ var _ = Describe("indingsep test", func() { Context("Ingestor and Indexer deployment", func() { It("indingsep, smoke, indingsep: Splunk Operator can deploy Ingestors and Indexers with additional configurations", func() { + // TODO: Remove secret reference and uncomment serviceAccountName part once IRSA fixed for Splunk and EKS 1.34+ // Create Service Account - testcaseEnvInst.Log.Info("Create Service Account") - testcaseEnvInst.CreateServiceAccount(serviceAccountName) + // testcaseEnvInst.Log.Info("Create Service Account") + // testcaseEnvInst.CreateServiceAccount(serviceAccountName) - // TODO: Remove secret reference once IRSA fixed for Splunk and EKS 1.34+ // Secret reference - volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateBusVolumeSpec("bus-secret-ref-volume", testcaseEnvInst.GetIndexSecretName())} + volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateQueueVolumeSpec("queue-secret-ref-volume", testcaseEnvInst.GetIndexSecretName())} queue.SQS.VolList = volumeSpec updateQueue.SQS.VolList = volumeSpec @@ -174,24 +174,19 @@ var _ = Describe("indingsep test", func() { objStorage, err := deployment.DeployObjectStorage(ctx, "os", objectStorage) Expect(err).To(Succeed(), "Unable to deploy ObjectStorage") - // Upload apps to S3 - testcaseEnvInst.Log.Info("Upload apps to S3") - appFileList := testenv.GetAppFileList(appListV1) - _, err = testenv.UploadFilesToS3(testS3Bucket, s3TestDir, appFileList, downloadDirV1) - Expect(err).To(Succeed(), "Unable to upload V1 apps to S3 test directory for IngestorCluster") - // Deploy Ingestor Cluster with additional configurations (similar to standalone app framework test) appSourceName := "appframework-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) appFrameworkSpec := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeName, enterpriseApi.ScopeLocal, appSourceName, s3TestDir, 60) appFrameworkSpec.MaxConcurrentAppDownloads = uint64(5) ic := &enterpriseApi.IngestorCluster{ ObjectMeta: metav1.ObjectMeta{ - Name: deployment.GetName() + "-ingest", - Namespace: testcaseEnvInst.GetName(), + Name: deployment.GetName() + "-ingest", + Namespace: testcaseEnvInst.GetName(), + Finalizers: []string{"enterprise.splunk.com/delete-pvc"}, }, Spec: enterpriseApi.IngestorClusterSpec{ CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ - ServiceAccount: serviceAccountName, + // ServiceAccount: serviceAccountName, LivenessInitialDelaySeconds: 600, ReadinessInitialDelaySeconds: 50, StartupProbe: &enterpriseApi.Probe{ @@ -217,10 +212,10 @@ var _ = Describe("indingsep test", func() { Image: testcaseEnvInst.GetSplunkImage(), }, }, - QueueRef: v1.ObjectReference{Name: q.Name}, - ObjectStorageRef: v1.ObjectReference{Name: objStorage.Name}, - Replicas: 3, - AppFrameworkConfig: appFrameworkSpec, + QueueRef: v1.ObjectReference{Name: q.Name}, + ObjectStorageRef: v1.ObjectReference{Name: objStorage.Name}, + Replicas: 3, + AppFrameworkConfig: appFrameworkSpec, }, } @@ -232,6 +227,12 @@ var _ = Describe("indingsep test", func() { testcaseEnvInst.Log.Info("Ensure that Ingestor Cluster is in Ready phase") testenv.IngestorReady(ctx, deployment, testcaseEnvInst) + // Upload apps to S3 + testcaseEnvInst.Log.Info("Upload apps to S3") + appFileList := testenv.GetAppFileList(appListV1) + _, err = testenv.UploadFilesToS3(testS3Bucket, s3TestDir, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload V1 apps to S3 test directory for IngestorCluster") + // Verify Ingestor Cluster Pods have apps installed testcaseEnvInst.Log.Info("Verify Ingestor Cluster Pods have apps installed") ingestorPod := []string{fmt.Sprintf(testenv.IngestorPod, deployment.GetName()+"-ingest", 0)} @@ -264,15 +265,15 @@ var _ = Describe("indingsep test", func() { Context("Ingestor and Indexer deployment", func() { It("indingsep, integration, indingsep: Splunk Operator can deploy Ingestors and Indexers with correct setup", func() { + // TODO: Remove secret reference and uncomment serviceAccountName part once IRSA fixed for Splunk and EKS 1.34+ // Create Service Account - testcaseEnvInst.Log.Info("Create Service Account") - testcaseEnvInst.CreateServiceAccount(serviceAccountName) + // testcaseEnvInst.Log.Info("Create Service Account") + // testcaseEnvInst.CreateServiceAccount(serviceAccountName) - // TODO: Remove secret reference once IRSA fixed for Splunk and EKS 1.34+ // Secret reference - volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateBusVolumeSpec("bus-secret-ref-volume", testcaseEnvInst.GetIndexSecretName())} + volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateQueueVolumeSpec("queue-secret-ref-volume", testcaseEnvInst.GetIndexSecretName())} queue.SQS.VolList = volumeSpec - + // Deploy Queue testcaseEnvInst.Log.Info("Deploy Queue") q, err := deployment.DeployQueue(ctx, "queue", queue) @@ -285,7 +286,7 @@ var _ = Describe("indingsep test", func() { // Deploy Ingestor Cluster testcaseEnvInst.Log.Info("Deploy Ingestor Cluster") - _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, serviceAccountName) + _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, "") // , serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster") // Deploy Cluster Manager @@ -295,7 +296,7 @@ var _ = Describe("indingsep test", func() { // Deploy Indexer Cluster testcaseEnvInst.Log.Info("Deploy Indexer Cluster") - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, serviceAccountName) + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, "") // , serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Indexer Cluster") // Ensure that Ingestor Cluster is in Ready phase @@ -376,13 +377,13 @@ var _ = Describe("indingsep test", func() { Context("Ingestor and Indexer deployment", func() { It("indingsep, integration, indingsep: Splunk Operator can update Ingestors and Indexers with correct setup", func() { + // TODO: Remove secret reference and uncomment serviceAccountName part once IRSA fixed for Splunk and EKS 1.34+ // Create Service Account - testcaseEnvInst.Log.Info("Create Service Account") - testcaseEnvInst.CreateServiceAccount(serviceAccountName) + // testcaseEnvInst.Log.Info("Create Service Account") + // testcaseEnvInst.CreateServiceAccount(serviceAccountName) - // TODO: Remove secret reference once IRSA fixed for Splunk and EKS 1.34+ // Secret reference - volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateBusVolumeSpec("bus-secret-ref-volume", testcaseEnvInst.GetIndexSecretName())} + volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateQueueVolumeSpec("queue-secret-ref-volume", testcaseEnvInst.GetIndexSecretName())} queue.SQS.VolList = volumeSpec updateQueue.SQS.VolList = volumeSpec @@ -398,7 +399,7 @@ var _ = Describe("indingsep test", func() { // Deploy Ingestor Cluster testcaseEnvInst.Log.Info("Deploy Ingestor Cluster") - _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, serviceAccountName) + _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, "") // , serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster") // Deploy Cluster Manager @@ -408,7 +409,7 @@ var _ = Describe("indingsep test", func() { // Deploy Indexer Cluster testcaseEnvInst.Log.Info("Deploy Indexer Cluster") - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, serviceAccountName) + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, "") // , serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Indexer Cluster") // Ensure that Ingestor Cluster is in Ready phase diff --git a/test/testenv/remote_index_utils.go b/test/testenv/remote_index_utils.go index 84e5c0709..f696a4a17 100644 --- a/test/testenv/remote_index_utils.go +++ b/test/testenv/remote_index_utils.go @@ -86,8 +86,8 @@ func RollHotToWarm(ctx context.Context, deployment *Deployment, podName string, return true } -// GeneratBusVolumeSpec return VolumeSpec struct with given values -func GenerateBusVolumeSpec(name, secretRef string) enterpriseApi.VolumeSpec { +// GenerateQueueVolumeSpec return VolumeSpec struct with given values +func GenerateQueueVolumeSpec(name, secretRef string) enterpriseApi.VolumeSpec { return enterpriseApi.VolumeSpec{ Name: name, SecretRef: secretRef, diff --git a/test/testenv/util.go b/test/testenv/util.go index d9c6d5807..366ea3668 100644 --- a/test/testenv/util.go +++ b/test/testenv/util.go @@ -396,8 +396,8 @@ func newIndexerCluster(name, ns, licenseManagerName string, replicas int, cluste }, Defaults: ansibleConfig, }, - Replicas: int32(replicas), - QueueRef: queue, + Replicas: int32(replicas), + QueueRef: queue, ObjectStorageRef: os, }, } @@ -426,8 +426,8 @@ func newIngestorCluster(name, ns string, replicas int, splunkImage string, queue Image: splunkImage, }, }, - Replicas: int32(replicas), - QueueRef: queue, + Replicas: int32(replicas), + QueueRef: queue, ObjectStorageRef: os, }, } From 47d1a354b4025f47cbaea5a4fce44bf77a368157 Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Thu, 8 Jan 2026 17:32:52 +0100 Subject: [PATCH 11/15] CSPL-4360 Fixing failing tests due to incorrect secret ref --- ...AL2023-build-test-push-workflow-AL2023.yml | 2 + .../arm-AL2023-int-test-workflow.yml | 2 + .../arm-RHEL-build-test-push-workflow.yml | 2 + .../workflows/arm-RHEL-int-test-workflow.yml | 2 + .../arm-Ubuntu-build-test-push-workflow.yml | 2 + .../arm-Ubuntu-int-test-workflow.yml | 2 + .../workflows/build-test-push-workflow.yml | 2 + .../distroless-build-test-push-workflow.yml | 2 + .../distroless-int-test-workflow.yml | 2 + .github/workflows/helm-test-workflow.yml | 2 + .github/workflows/int-test-workflow.yml | 2 + .../workflows/manual-int-test-workflow.yml | 2 + .../namespace-scope-int-workflow.yml | 2 + .../workflows/nightly-int-test-workflow.yml | 2 + .../01-assert.yaml | 2 +- .../01-create-s3-secret.yaml | 2 +- .../splunk_index_ingest_sep.yaml | 2 +- .../index_and_ingestion_separation_test.go | 8 +- test/testenv/testcaseenv.go | 99 ++++++++++++------- test/testenv/testenv.go | 64 ++++++------ 20 files changed, 134 insertions(+), 71 deletions(-) diff --git a/.github/workflows/arm-AL2023-build-test-push-workflow-AL2023.yml b/.github/workflows/arm-AL2023-build-test-push-workflow-AL2023.yml index 8ccaf2e65..f3a9e38f5 100644 --- a/.github/workflows/arm-AL2023-build-test-push-workflow-AL2023.yml +++ b/.github/workflows/arm-AL2023-build-test-push-workflow-AL2023.yml @@ -146,6 +146,8 @@ jobs: DEPLOYMENT_TYPE: "" ARM64: "true" GRAVITON_TESTING: "true" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Chekcout code uses: actions/checkout@v2 diff --git a/.github/workflows/arm-AL2023-int-test-workflow.yml b/.github/workflows/arm-AL2023-int-test-workflow.yml index bdd7fe563..9003cb439 100644 --- a/.github/workflows/arm-AL2023-int-test-workflow.yml +++ b/.github/workflows/arm-AL2023-int-test-workflow.yml @@ -94,6 +94,8 @@ jobs: DEPLOYMENT_TYPE: "" ARM64: "true" GRAVITON_TESTING: "true" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Set Test Cluster Nodes and Parallel Runs run: >- diff --git a/.github/workflows/arm-RHEL-build-test-push-workflow.yml b/.github/workflows/arm-RHEL-build-test-push-workflow.yml index d108005e7..0f473836e 100644 --- a/.github/workflows/arm-RHEL-build-test-push-workflow.yml +++ b/.github/workflows/arm-RHEL-build-test-push-workflow.yml @@ -94,6 +94,8 @@ jobs: DEPLOYMENT_TYPE: "" ARM64: "true" GRAVITON_TESTING: "true" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Set Test Cluster Nodes and Parallel Runs run: >- diff --git a/.github/workflows/arm-RHEL-int-test-workflow.yml b/.github/workflows/arm-RHEL-int-test-workflow.yml index 681491b61..1718b316b 100644 --- a/.github/workflows/arm-RHEL-int-test-workflow.yml +++ b/.github/workflows/arm-RHEL-int-test-workflow.yml @@ -94,6 +94,8 @@ jobs: DEPLOYMENT_TYPE: "" ARM64: "true" GRAVITON_TESTING: "true" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Set Test Cluster Nodes and Parallel Runs run: >- diff --git a/.github/workflows/arm-Ubuntu-build-test-push-workflow.yml b/.github/workflows/arm-Ubuntu-build-test-push-workflow.yml index 356812323..8e0d6aa3d 100644 --- a/.github/workflows/arm-Ubuntu-build-test-push-workflow.yml +++ b/.github/workflows/arm-Ubuntu-build-test-push-workflow.yml @@ -146,6 +146,8 @@ jobs: DEPLOYMENT_TYPE: "" ARM64: "true" GRAVITON_TESTING: "true" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Chekcout code uses: actions/checkout@v2 diff --git a/.github/workflows/arm-Ubuntu-int-test-workflow.yml b/.github/workflows/arm-Ubuntu-int-test-workflow.yml index ebbea6176..3ddeaa82d 100644 --- a/.github/workflows/arm-Ubuntu-int-test-workflow.yml +++ b/.github/workflows/arm-Ubuntu-int-test-workflow.yml @@ -94,6 +94,8 @@ jobs: DEPLOYMENT_TYPE: "" ARM64: "true" GRAVITON_TESTING: "true" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Set Test Cluster Nodes and Parallel Runs run: >- diff --git a/.github/workflows/build-test-push-workflow.yml b/.github/workflows/build-test-push-workflow.yml index 6c79f58a9..7e8af7d45 100644 --- a/.github/workflows/build-test-push-workflow.yml +++ b/.github/workflows/build-test-push-workflow.yml @@ -190,6 +190,8 @@ jobs: EKS_SSH_PUBLIC_KEY: ${{ secrets.EKS_SSH_PUBLIC_KEY }} CLUSTER_WIDE: "true" DEPLOYMENT_TYPE: "" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Chekcout code uses: actions/checkout@v2 diff --git a/.github/workflows/distroless-build-test-push-workflow.yml b/.github/workflows/distroless-build-test-push-workflow.yml index c47d72ab7..bb99d1742 100644 --- a/.github/workflows/distroless-build-test-push-workflow.yml +++ b/.github/workflows/distroless-build-test-push-workflow.yml @@ -191,6 +191,8 @@ jobs: EKS_SSH_PUBLIC_KEY: ${{ secrets.EKS_SSH_PUBLIC_KEY }} CLUSTER_WIDE: "true" DEPLOYMENT_TYPE: "" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Chekcout code uses: actions/checkout@v2 diff --git a/.github/workflows/distroless-int-test-workflow.yml b/.github/workflows/distroless-int-test-workflow.yml index da4719183..a73d194c5 100644 --- a/.github/workflows/distroless-int-test-workflow.yml +++ b/.github/workflows/distroless-int-test-workflow.yml @@ -88,6 +88,8 @@ jobs: S3_REGION: ${{ secrets.AWS_DEFAULT_REGION }} CLUSTER_WIDE: "true" DEPLOYMENT_TYPE: "" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Set Test Cluster Nodes and Parallel Runs run: >- diff --git a/.github/workflows/helm-test-workflow.yml b/.github/workflows/helm-test-workflow.yml index 6e83bcc63..d5e58c914 100644 --- a/.github/workflows/helm-test-workflow.yml +++ b/.github/workflows/helm-test-workflow.yml @@ -65,6 +65,8 @@ jobs: HELM_REPO_PATH: "../../../../helm-chart" INSTALL_OPERATOR: "true" TEST_VPC_ENDPOINT_URL: ${{ secrets.TEST_VPC_ENDPOINT_URL }} + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - uses: chrisdickinson/setup-yq@3d931309f27270ebbafd53f2daee773a82ea1822 - name: Checking YQ installation diff --git a/.github/workflows/int-test-workflow.yml b/.github/workflows/int-test-workflow.yml index e5b12b5dc..c09b6c305 100644 --- a/.github/workflows/int-test-workflow.yml +++ b/.github/workflows/int-test-workflow.yml @@ -84,6 +84,8 @@ jobs: S3_REGION: ${{ secrets.AWS_DEFAULT_REGION }} CLUSTER_WIDE: "true" DEPLOYMENT_TYPE: "" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Set Test Cluster Nodes and Parallel Runs run: >- diff --git a/.github/workflows/manual-int-test-workflow.yml b/.github/workflows/manual-int-test-workflow.yml index b76b3d515..c042347aa 100644 --- a/.github/workflows/manual-int-test-workflow.yml +++ b/.github/workflows/manual-int-test-workflow.yml @@ -45,6 +45,8 @@ jobs: PRIVATE_REGISTRY: ${{ secrets.ECR_REPOSITORY }} S3_REGION: ${{ secrets.AWS_DEFAULT_REGION }} CLUSTER_WIDE: ${{ github.event.inputs.CLUSTER_WIDE }} + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Set Test Cluster Nodes and Parallel Runs run: >- diff --git a/.github/workflows/namespace-scope-int-workflow.yml b/.github/workflows/namespace-scope-int-workflow.yml index b32dcee92..9153bd950 100644 --- a/.github/workflows/namespace-scope-int-workflow.yml +++ b/.github/workflows/namespace-scope-int-workflow.yml @@ -40,6 +40,8 @@ jobs: PRIVATE_REGISTRY: ${{ secrets.ECR_REPOSITORY }} S3_REGION: ${{ secrets.AWS_DEFAULT_REGION }} CLUSTER_WIDE: "false" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Set Test Cluster Nodes and Parallel Runs run: >- diff --git a/.github/workflows/nightly-int-test-workflow.yml b/.github/workflows/nightly-int-test-workflow.yml index 4bc4c199c..41fbf3d74 100644 --- a/.github/workflows/nightly-int-test-workflow.yml +++ b/.github/workflows/nightly-int-test-workflow.yml @@ -81,6 +81,8 @@ jobs: PRIVATE_REGISTRY: ${{ secrets.ECR_REPOSITORY }} S3_REGION: ${{ secrets.AWS_DEFAULT_REGION }} CLUSTER_WIDE: "true" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Set Test Cluster Nodes and Parallel Runs run: >- diff --git a/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml index e3dd6765c..a4aaa0824 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml @@ -2,4 +2,4 @@ apiVersion: v1 kind: Secret metadata: - name: s3-secret + name: index-ing-sep-secret diff --git a/kuttl/tests/helm/index-and-ingest-separation/01-create-s3-secret.yaml b/kuttl/tests/helm/index-and-ingest-separation/01-create-s3-secret.yaml index 8f1b1b95f..591aa8fd5 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/01-create-s3-secret.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/01-create-s3-secret.yaml @@ -2,6 +2,6 @@ apiVersion: kuttl.dev/v1beta1 kind: TestStep commands: - - script: kubectl create secret generic s3-secret --from-literal=s3_access_key=$AWS_ACCESS_KEY_ID --from-literal=s3_secret_key=$AWS_SECRET_ACCESS_KEY --namespace $NAMESPACE + - script: kubectl create secret generic index-ing-sep-secret --from-literal=s3_access_key=$AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID --from-literal=s3_secret_key=$AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY --namespace $NAMESPACE background: false skipLogOutput: true \ No newline at end of file diff --git a/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml b/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml index 46ef7fce3..1cdbc33b8 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml @@ -16,7 +16,7 @@ queue: dlq: index-ingest-separation-test-dlq volumes: - name: helm-bus-secret-ref-test - secretRef: s3-secret + secretRef: index-ing-sep-secret objectStorage: enabled: true diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go index 85069a071..6fe07597a 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go @@ -81,7 +81,7 @@ var _ = Describe("indingsep test", func() { // testcaseEnvInst.CreateServiceAccount(serviceAccountName) // Secret reference - volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateQueueVolumeSpec("queue-secret-ref-volume", testcaseEnvInst.GetIndexSecretName())} + volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateQueueVolumeSpec("queue-secret-ref-volume", testcaseEnvInst.GetIndexIngestSepSecretName())} queue.SQS.VolList = volumeSpec updateQueue.SQS.VolList = volumeSpec @@ -160,7 +160,7 @@ var _ = Describe("indingsep test", func() { // testcaseEnvInst.CreateServiceAccount(serviceAccountName) // Secret reference - volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateQueueVolumeSpec("queue-secret-ref-volume", testcaseEnvInst.GetIndexSecretName())} + volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateQueueVolumeSpec("queue-secret-ref-volume", testcaseEnvInst.GetIndexIngestSepSecretName())} queue.SQS.VolList = volumeSpec updateQueue.SQS.VolList = volumeSpec @@ -271,7 +271,7 @@ var _ = Describe("indingsep test", func() { // testcaseEnvInst.CreateServiceAccount(serviceAccountName) // Secret reference - volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateQueueVolumeSpec("queue-secret-ref-volume", testcaseEnvInst.GetIndexSecretName())} + volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateQueueVolumeSpec("queue-secret-ref-volume", testcaseEnvInst.GetIndexIngestSepSecretName())} queue.SQS.VolList = volumeSpec // Deploy Queue @@ -383,7 +383,7 @@ var _ = Describe("indingsep test", func() { // testcaseEnvInst.CreateServiceAccount(serviceAccountName) // Secret reference - volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateQueueVolumeSpec("queue-secret-ref-volume", testcaseEnvInst.GetIndexSecretName())} + volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateQueueVolumeSpec("queue-secret-ref-volume", testcaseEnvInst.GetIndexIngestSepSecretName())} queue.SQS.VolList = volumeSpec updateQueue.SQS.VolList = volumeSpec diff --git a/test/testenv/testcaseenv.go b/test/testenv/testcaseenv.go index a1081e0a0..737aaa9a6 100644 --- a/test/testenv/testcaseenv.go +++ b/test/testenv/testcaseenv.go @@ -35,24 +35,25 @@ import ( // TestCaseEnv represents a namespaced-isolated k8s cluster environment (aka virtual k8s cluster) to run test cases against type TestCaseEnv struct { - kubeClient client.Client - name string - namespace string - serviceAccountName string - roleName string - roleBindingName string - operatorName string - operatorImage string - splunkImage string - initialized bool - SkipTeardown bool - licenseFilePath string - licenseCMName string - s3IndexSecret string - Log logr.Logger - cleanupFuncs []cleanupFunc - debug string - clusterWideOperator string + kubeClient client.Client + name string + namespace string + serviceAccountName string + roleName string + roleBindingName string + operatorName string + operatorImage string + splunkImage string + initialized bool + SkipTeardown bool + licenseFilePath string + licenseCMName string + s3IndexSecret string + indexIngestSepSecret string + Log logr.Logger + cleanupFuncs []cleanupFunc + debug string + clusterWideOperator string } // GetKubeClient returns the kube client to talk to kube-apiserver @@ -79,21 +80,22 @@ func NewTestCaseEnv(kubeClient client.Client, name string, operatorImage string, } testenv := &TestCaseEnv{ - kubeClient: kubeClient, - name: name, - namespace: name, - serviceAccountName: name, - roleName: name, - roleBindingName: name, - operatorName: "splunk-op-" + name, - operatorImage: operatorImage, - splunkImage: splunkImage, - SkipTeardown: specifiedSkipTeardown, - licenseCMName: name, - licenseFilePath: licenseFilePath, - s3IndexSecret: "splunk-s3-index-" + name, - debug: os.Getenv("DEBUG"), - clusterWideOperator: installOperatorClusterWide, + kubeClient: kubeClient, + name: name, + namespace: name, + serviceAccountName: name, + roleName: name, + roleBindingName: name, + operatorName: "splunk-op-" + name, + operatorImage: operatorImage, + splunkImage: splunkImage, + SkipTeardown: specifiedSkipTeardown, + licenseCMName: name, + licenseFilePath: licenseFilePath, + s3IndexSecret: "splunk-s3-index-" + name, + indexIngestSepSecret: "splunk--index-ingest-sep-" + name, + debug: os.Getenv("DEBUG"), + clusterWideOperator: installOperatorClusterWide, } testenv.Log = logf.Log.WithValues("testcaseenv", testenv.name) @@ -156,6 +158,7 @@ func (testenv *TestCaseEnv) setup() error { switch ClusterProvider { case "eks": testenv.createIndexSecret() + testenv.createIndexIngestSepSecret() case "azure": testenv.createIndexSecretAzure() case "gcp": @@ -588,11 +591,41 @@ func (testenv *TestCaseEnv) createIndexSecretAzure() error { return nil } +// CreateIndexIngestSepSecret creates secret object +func (testenv *TestCaseEnv) createIndexIngestSepSecret() error { + secretName := testenv.indexIngestSepSecret + ns := testenv.namespace + + data := map[string][]byte{"s3_access_key": []byte(os.Getenv("AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID")), + "s3_secret_key": []byte(os.Getenv("AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY"))} + secret := newSecretSpec(ns, secretName, data) + + if err := testenv.GetKubeClient().Create(context.TODO(), secret); err != nil { + testenv.Log.Error(err, "Unable to create index and ingestion sep secret object") + return err + } + + testenv.pushCleanupFunc(func() error { + err := testenv.GetKubeClient().Delete(context.TODO(), secret) + if err != nil { + testenv.Log.Error(err, "Unable to delete index and ingestion sep secret object") + return err + } + return nil + }) + return nil +} + // GetIndexSecretName return index secret object name func (testenv *TestCaseEnv) GetIndexSecretName() string { return testenv.s3IndexSecret } +// GetIndexSecretName return index and ingestion separation secret object name +func (testenv *TestCaseEnv) GetIndexIngestSepSecretName() string { + return testenv.indexIngestSepSecret +} + // GetLMConfigMap Return name of license config map func (testenv *TestCaseEnv) GetLMConfigMap() string { return testenv.licenseCMName diff --git a/test/testenv/testenv.go b/test/testenv/testenv.go index f82310015..06fe304d4 100644 --- a/test/testenv/testenv.go +++ b/test/testenv/testenv.go @@ -160,24 +160,25 @@ type cleanupFunc func() error // TestEnv represents a namespaced-isolated k8s cluster environment (aka virtual k8s cluster) to run tests against type TestEnv struct { - kubeAPIServer string - name string - namespace string - serviceAccountName string - roleName string - roleBindingName string - operatorName string - operatorImage string - splunkImage string - initialized bool - SkipTeardown bool - licenseFilePath string - licenseCMName string - s3IndexSecret string - kubeClient client.Client - Log logr.Logger - cleanupFuncs []cleanupFunc - debug string + kubeAPIServer string + name string + namespace string + serviceAccountName string + roleName string + roleBindingName string + operatorName string + operatorImage string + splunkImage string + initialized bool + SkipTeardown bool + licenseFilePath string + licenseCMName string + s3IndexSecret string + indexIngestSepSecret string + kubeClient client.Client + Log logr.Logger + cleanupFuncs []cleanupFunc + debug string } func init() { @@ -231,19 +232,20 @@ func NewTestEnv(name, commitHash, operatorImage, splunkImage, licenseFilePath st } testenv := &TestEnv{ - name: envName, - namespace: envName, - serviceAccountName: envName, - roleName: envName, - roleBindingName: envName, - operatorName: "splunk-op-" + envName, - operatorImage: operatorImage, - splunkImage: splunkImage, - SkipTeardown: specifiedSkipTeardown, - licenseCMName: envName, - licenseFilePath: licenseFilePath, - s3IndexSecret: "splunk-s3-index-" + envName, - debug: os.Getenv("DEBUG"), + name: envName, + namespace: envName, + serviceAccountName: envName, + roleName: envName, + roleBindingName: envName, + operatorName: "splunk-op-" + envName, + operatorImage: operatorImage, + splunkImage: splunkImage, + SkipTeardown: specifiedSkipTeardown, + licenseCMName: envName, + licenseFilePath: licenseFilePath, + s3IndexSecret: "splunk-s3-index-" + envName, + indexIngestSepSecret: "splunk--index-ingest-sep-" + name, + debug: os.Getenv("DEBUG"), } testenv.Log = logf.Log.WithValues("testenv", testenv.name) From 532ca28f6a955a74d62360f8d196bf132eadca43 Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Tue, 13 Jan 2026 11:36:36 +0100 Subject: [PATCH 12/15] CSPL-4360 Addressing comments --- api/v4/indexercluster_types.go | 2 + api/v4/ingestorcluster_types.go | 2 + api/v4/objectstorage_types.go | 2 + api/v4/queue_types.go | 2 + ...enterprise.splunk.com_indexerclusters.yaml | 8 + ...nterprise.splunk.com_ingestorclusters.yaml | 8 + .../enterprise.splunk.com_objectstorages.yaml | 4 + .../bases/enterprise.splunk.com_queues.yaml | 4 + pkg/splunk/client/enterprise.go | 19 -- pkg/splunk/client/enterprise_test.go | 32 ---- pkg/splunk/enterprise/indexercluster.go | 147 +++++--------- pkg/splunk/enterprise/indexercluster_test.go | 92 ++++----- pkg/splunk/enterprise/ingestorcluster.go | 139 +++++--------- pkg/splunk/enterprise/ingestorcluster_test.go | 94 ++++----- ...dex_and_ingestion_separation_suite_test.go | 30 --- .../index_and_ingestion_separation_test.go | 181 ------------------ 16 files changed, 213 insertions(+), 553 deletions(-) diff --git a/api/v4/indexercluster_types.go b/api/v4/indexercluster_types.go index e74f900a7..34eb0ba3e 100644 --- a/api/v4/indexercluster_types.go +++ b/api/v4/indexercluster_types.go @@ -40,10 +40,12 @@ type IndexerClusterSpec struct { CommonSplunkSpec `json:",inline"` // +optional + // +kubebuilder:validation:Immutable // Queue reference QueueRef corev1.ObjectReference `json:"queueRef"` // +optional + // +kubebuilder:validation:Immutable // Object Storage reference ObjectStorageRef corev1.ObjectReference `json:"objectStorageRef"` diff --git a/api/v4/ingestorcluster_types.go b/api/v4/ingestorcluster_types.go index f2e061284..15dc47640 100644 --- a/api/v4/ingestorcluster_types.go +++ b/api/v4/ingestorcluster_types.go @@ -40,10 +40,12 @@ type IngestorClusterSpec struct { AppFrameworkConfig AppFrameworkSpec `json:"appRepo,omitempty"` // +kubebuilder:validation:Required + // +kubebuilder:validation:Immutable // Queue reference QueueRef corev1.ObjectReference `json:"queueRef"` // +kubebuilder:validation:Required + // +kubebuilder:validation:Immutable // Object Storage reference ObjectStorageRef corev1.ObjectReference `json:"objectStorageRef"` } diff --git a/api/v4/objectstorage_types.go b/api/v4/objectstorage_types.go index 08205743f..587738d20 100644 --- a/api/v4/objectstorage_types.go +++ b/api/v4/objectstorage_types.go @@ -28,6 +28,8 @@ const ( ObjectStoragePausedAnnotation = "objectstorage.enterprise.splunk.com/paused" ) +// +kubebuilder:validation:XValidation:rule="self.provider == oldSelf.provider",message="provider is immutable once created" +// +kubebuilder:validation:XValidation:rule="self.s3 == oldSelf.s3",message="s3 is immutable once created" // +kubebuilder:validation:XValidation:rule="self.provider != 's3' || has(self.s3)",message="s3 must be provided when provider is s3" // ObjectStorageSpec defines the desired state of ObjectStorage type ObjectStorageSpec struct { diff --git a/api/v4/queue_types.go b/api/v4/queue_types.go index 4c3ff9861..d689a4acd 100644 --- a/api/v4/queue_types.go +++ b/api/v4/queue_types.go @@ -28,6 +28,8 @@ const ( QueuePausedAnnotation = "queue.enterprise.splunk.com/paused" ) +// +kubebuilder:validation:XValidation:rule="self.provider == oldSelf.provider",message="provider is immutable once created" +// +kubebuilder:validation:XValidation:rule="self.sqs == oldSelf.sqs",message="sqs is immutable once created" // +kubebuilder:validation:XValidation:rule="self.provider != 'sqs' || has(self.sqs)",message="sqs must be provided when provider is sqs" // QueueSpec defines the desired state of Queue type QueueSpec struct { diff --git a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml index af672ce67..2d01798e3 100644 --- a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml @@ -8410,6 +8410,10 @@ spec: - s3 type: object x-kubernetes-validations: + - message: provider is immutable once created + rule: self.provider == oldSelf.provider + - message: s3 is immutable once created + rule: self.s3 == oldSelf.s3 - message: s3 must be provided when provider is s3 rule: self.provider != 's3' || has(self.s3) peers: @@ -8523,6 +8527,10 @@ spec: - sqs type: object x-kubernetes-validations: + - message: provider is immutable once created + rule: self.provider == oldSelf.provider + - message: sqs is immutable once created + rule: self.sqs == oldSelf.sqs - message: sqs must be provided when provider is sqs rule: self.provider != 'sqs' || has(self.sqs) readyReplicas: diff --git a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml index 6ce4c8488..194fdac86 100644 --- a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml @@ -4621,6 +4621,10 @@ spec: - s3 type: object x-kubernetes-validations: + - message: provider is immutable once created + rule: self.provider == oldSelf.provider + - message: s3 is immutable once created + rule: self.s3 == oldSelf.s3 - message: s3 must be provided when provider is s3 rule: self.provider != 's3' || has(self.s3) phase: @@ -4704,6 +4708,10 @@ spec: - sqs type: object x-kubernetes-validations: + - message: provider is immutable once created + rule: self.provider == oldSelf.provider + - message: sqs is immutable once created + rule: self.sqs == oldSelf.sqs - message: sqs must be provided when provider is sqs rule: self.provider != 'sqs' || has(self.sqs) readyReplicas: diff --git a/config/crd/bases/enterprise.splunk.com_objectstorages.yaml b/config/crd/bases/enterprise.splunk.com_objectstorages.yaml index c84474921..23d5b437b 100644 --- a/config/crd/bases/enterprise.splunk.com_objectstorages.yaml +++ b/config/crd/bases/enterprise.splunk.com_objectstorages.yaml @@ -78,6 +78,10 @@ spec: - s3 type: object x-kubernetes-validations: + - message: provider is immutable once created + rule: self.provider == oldSelf.provider + - message: s3 is immutable once created + rule: self.s3 == oldSelf.s3 - message: s3 must be provided when provider is s3 rule: self.provider != 's3' || has(self.s3) status: diff --git a/config/crd/bases/enterprise.splunk.com_queues.yaml b/config/crd/bases/enterprise.splunk.com_queues.yaml index f4ed36a45..454d1700b 100644 --- a/config/crd/bases/enterprise.splunk.com_queues.yaml +++ b/config/crd/bases/enterprise.splunk.com_queues.yaml @@ -120,6 +120,10 @@ spec: - sqs type: object x-kubernetes-validations: + - message: provider is immutable once created + rule: self.provider == oldSelf.provider + - message: sqs is immutable once created + rule: self.sqs == oldSelf.sqs - message: sqs must be provided when provider is sqs rule: self.provider != 'sqs' || has(self.sqs) status: diff --git a/pkg/splunk/client/enterprise.go b/pkg/splunk/client/enterprise.go index 6eb4d2f87..e51688661 100644 --- a/pkg/splunk/client/enterprise.go +++ b/pkg/splunk/client/enterprise.go @@ -1015,22 +1015,3 @@ func (c *SplunkClient) UpdateConfFile(scopedLog logr.Logger, fileName, property } return err } - -// Deletes conf files properties -func (c *SplunkClient) DeleteConfFileProperty(scopedLog logr.Logger, fileName, property string) error { - endpoint := fmt.Sprintf("%s/servicesNS/nobody/system/configs/conf-%s/%s", c.ManagementURI, fileName, property) - - scopedLog.Info("Deleting conf file object", "fileName", fileName, "property", property) - request, err := http.NewRequest("DELETE", endpoint, nil) - if err != nil { - scopedLog.Error(err, "Failed to delete conf file object", "fileName", fileName, "property", property) - return err - } - - expectedStatus := []int{200, 201, 404} - err = c.Do(request, expectedStatus, nil) - if err != nil { - scopedLog.Error(err, fmt.Sprintf("Status not in %v for conf file object deletion", expectedStatus), "fileName", fileName, "property", property) - } - return err -} diff --git a/pkg/splunk/client/enterprise_test.go b/pkg/splunk/client/enterprise_test.go index 6b97c24d7..4934eedfc 100644 --- a/pkg/splunk/client/enterprise_test.go +++ b/pkg/splunk/client/enterprise_test.go @@ -705,35 +705,3 @@ func TestUpdateConfFile(t *testing.T) { t.Errorf("UpdateConfFile expected error on update, got nil") } } - -func TestDeleteConfFileProperty(t *testing.T) { - // Test successful deletion of conf property - property := "myproperty" - fileName := "outputs" - - reqLogger := log.FromContext(context.TODO()) - scopedLog := reqLogger.WithName("TestDeleteConfFileProperty") - - wantDeleteRequest, _ := http.NewRequest("DELETE", fmt.Sprintf("https://localhost:8089/servicesNS/nobody/system/configs/conf-outputs/%s", property), nil) - - mockSplunkClient := &spltest.MockHTTPClient{} - mockSplunkClient.AddHandler(wantDeleteRequest, 200, "", nil) - - c := NewSplunkClient("https://localhost:8089", "admin", "p@ssw0rd") - c.Client = mockSplunkClient - - err := c.DeleteConfFileProperty(scopedLog, fileName, property) - if err != nil { - t.Errorf("DeleteConfFileProperty err = %v", err) - } - mockSplunkClient.CheckRequests(t, "TestDeleteConfFileProperty") - - // Negative test: error on delete - mockSplunkClient = &spltest.MockHTTPClient{} - mockSplunkClient.AddHandler(wantDeleteRequest, 500, "", nil) - c.Client = mockSplunkClient - err = c.DeleteConfFileProperty(scopedLog, fileName, property) - if err == nil { - t.Errorf("DeleteConfFileProperty expected error on delete, got nil") - } -} diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index 3808539cc..af981be2c 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -19,7 +19,6 @@ import ( "context" "errors" "fmt" - "reflect" "regexp" "sort" "strconv" @@ -260,12 +259,9 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller return result, err } } - - // Can not override original queue spec due to comparison in the later code - queueCopy := queue - if queueCopy.Spec.Provider == "sqs" { - if queueCopy.Spec.SQS.Endpoint == "" && queueCopy.Spec.SQS.AuthRegion != "" { - queueCopy.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", queueCopy.Spec.SQS.AuthRegion) + if queue.Spec.Provider == "sqs" { + if queue.Spec.SQS.Endpoint == "" && queue.Spec.SQS.AuthRegion != "" { + queue.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", queue.Spec.SQS.AuthRegion) } } @@ -284,20 +280,17 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller return result, err } } - - // Can not override original object storage spec due to comparison in the later code - osCopy := os - if osCopy.Spec.Provider == "s3" { - if osCopy.Spec.S3.Endpoint == "" && queueCopy.Spec.SQS.AuthRegion != "" { - osCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queueCopy.Spec.SQS.AuthRegion) + if os.Spec.Provider == "s3" { + if os.Spec.S3.Endpoint == "" && queue.Spec.SQS.AuthRegion != "" { + os.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queue.Spec.SQS.AuthRegion) } } // If queue is updated if cr.Spec.QueueRef.Name != "" { - if cr.Status.Queue == nil || cr.Status.ObjectStorage == nil || !reflect.DeepEqual(*cr.Status.Queue, queue.Spec) || !reflect.DeepEqual(*cr.Status.ObjectStorage, os.Spec) { + if cr.Status.Queue == nil || cr.Status.ObjectStorage == nil { mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) - err = mgr.handlePullQueueChange(ctx, cr, queueCopy, osCopy, client) + err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Queue/Pipeline config change after pod creation: %s", err.Error())) scopedLog.Error(err, "Failed to update conf file for Queue/Pipeline config change after pod creation") @@ -593,12 +586,9 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, return result, err } } - - // Can not override original queue spec due to comparison in the later code - queueCopy := queue - if queueCopy.Spec.Provider == "sqs" { - if queueCopy.Spec.SQS.Endpoint == "" && queueCopy.Spec.SQS.AuthRegion != "" { - queueCopy.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", queueCopy.Spec.SQS.AuthRegion) + if queue.Spec.Provider == "sqs" { + if queue.Spec.SQS.Endpoint == "" && queue.Spec.SQS.AuthRegion != "" { + queue.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", queue.Spec.SQS.AuthRegion) } } @@ -612,25 +602,21 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, err = client.Get(context.Background(), types.NamespacedName{ Name: cr.Spec.ObjectStorageRef.Name, Namespace: ns, - }, &queue) + }, &os) if err != nil { return result, err } } - - // Can not override original queue spec due to comparison in the later code - osCopy := os - if osCopy.Spec.Provider == "s3" { - if osCopy.Spec.S3.Endpoint == "" && queueCopy.Spec.SQS.AuthRegion != "" { - osCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queueCopy.Spec.SQS.AuthRegion) + if os.Spec.Provider == "s3" { + if os.Spec.S3.Endpoint == "" && queue.Spec.SQS.AuthRegion != "" { + os.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queue.Spec.SQS.AuthRegion) } } - // If queue is updated if cr.Spec.QueueRef.Name != "" { - if cr.Status.Queue == nil || cr.Status.ObjectStorage == nil || !reflect.DeepEqual(*cr.Status.Queue, queue.Spec) || !reflect.DeepEqual(*cr.Status.ObjectStorage, os.Spec) { + if cr.Status.Queue == nil || cr.Status.ObjectStorage == nil { mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) - err = mgr.handlePullQueueChange(ctx, cr, queueCopy, osCopy, client) + err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Queue/Pipeline config change after pod creation: %s", err.Error())) scopedLog.Error(err, "Failed to update conf file for Queue/Pipeline config change after pod creation") @@ -1317,10 +1303,10 @@ func getSiteName(ctx context.Context, c splcommon.ControllerClient, cr *enterpri var newSplunkClientForQueuePipeline = splclient.NewSplunkClient -// Checks if only PullQueue or Pipeline config changed, and updates the conf file if so -func (mgr *indexerClusterPodManager) handlePullQueueChange(ctx context.Context, newCR *enterpriseApi.IndexerCluster, queue enterpriseApi.Queue, os enterpriseApi.ObjectStorage, k8s rclient.Client) error { +// updateIndexerConfFiles checks if Queue or Pipeline inputs are created for the first time and updates the conf file if so +func (mgr *indexerClusterPodManager) updateIndexerConfFiles(ctx context.Context, newCR *enterpriseApi.IndexerCluster, queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, k8s rclient.Client) error { reqLogger := log.FromContext(ctx) - scopedLog := reqLogger.WithName("handlePullQueueChange").WithValues("name", newCR.GetName(), "namespace", newCR.GetNamespace()) + scopedLog := reqLogger.WithName("updateIndexerConfFiles").WithValues("name", newCR.GetName(), "namespace", newCR.GetNamespace()) // Only update config for pods that exist readyReplicas := newCR.Status.ReadyReplicas @@ -1336,31 +1322,10 @@ func (mgr *indexerClusterPodManager) handlePullQueueChange(ctx context.Context, } splunkClient := newSplunkClientForQueuePipeline(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd)) - newCrStatusQueue := newCR.Status.Queue - if newCrStatusQueue == nil { - newCrStatusQueue = &enterpriseApi.QueueSpec{} - } - newCrStatusObjectStorage := newCR.Status.ObjectStorage - if newCrStatusObjectStorage == nil { - newCrStatusObjectStorage = &enterpriseApi.ObjectStorageSpec{} - } - - afterDelete := false - if (queue.Spec.SQS.Name != "" && newCrStatusQueue.SQS.Name != "" && queue.Spec.SQS.Name != newCrStatusQueue.SQS.Name) || - (queue.Spec.Provider != "" && newCrStatusQueue.Provider != "" && queue.Spec.Provider != newCrStatusQueue.Provider) { - if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCrStatusQueue.SQS.Name)); err != nil { - updateErr = err - } - if err := splunkClient.DeleteConfFileProperty(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", newCrStatusQueue.SQS.Name)); err != nil { - updateErr = err - } - afterDelete = true - } - // Secret reference s3AccessKey, s3SecretKey := "", "" - if queue.Spec.Provider == "sqs" && newCR.Spec.ServiceAccount == "" { - for _, vol := range queue.Spec.SQS.VolList { + if queue.Provider == "sqs" && newCR.Spec.ServiceAccount == "" { + for _, vol := range queue.SQS.VolList { if vol.SecretRef != "" { s3AccessKey, s3SecretKey, err = GetQueueRemoteVolumeSecrets(ctx, vol, k8s, newCR) if err != nil { @@ -1371,38 +1336,37 @@ func (mgr *indexerClusterPodManager) handlePullQueueChange(ctx context.Context, } } - queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields := getChangedQueueFieldsForIndexer(&queue, &os, newCrStatusQueue, newCrStatusObjectStorage, afterDelete, s3AccessKey, s3SecretKey) + queueInputs, queueOutputs, pipelineInputs := getQueueAndPipelineInputsForIndexerConfFiles(queue, os, s3AccessKey, s3SecretKey) - for _, pbVal := range queueChangedFieldsOutputs { - if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name), [][]string{pbVal}); err != nil { + for _, pbVal := range queueOutputs { + if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", queue.SQS.Name), [][]string{pbVal}); err != nil { updateErr = err } } - for _, pbVal := range queueChangedFieldsInputs { - if err := splunkClient.UpdateConfFile(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name), [][]string{pbVal}); err != nil { + for _, pbVal := range queueInputs { + if err := splunkClient.UpdateConfFile(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", queue.SQS.Name), [][]string{pbVal}); err != nil { updateErr = err } } - for _, field := range pipelineChangedFields { + for _, field := range pipelineInputs { if err := splunkClient.UpdateConfFile(scopedLog, "default-mode", field[0], [][]string{{field[1], field[2]}}); err != nil { updateErr = err } } } - // Do NOT restart Splunk return updateErr } -// getChangedQueueFieldsForIndexer returns a list of changed queue and pipeline fields for indexer pods -func getChangedQueueFieldsForIndexer(queue *enterpriseApi.Queue, os *enterpriseApi.ObjectStorage, queueStatus *enterpriseApi.QueueSpec, osStatus *enterpriseApi.ObjectStorageSpec, afterDelete bool, s3AccessKey, s3SecretKey string) (queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields [][]string) { - // Push all queue fields - queueChangedFieldsInputs, queueChangedFieldsOutputs = pullQueueChanged(queueStatus, &queue.Spec, osStatus, &os.Spec, afterDelete, s3AccessKey, s3SecretKey) +// getQueueAndPipelineInputsForIndexerConfFiles returns a list of queue and pipeline inputs for indexer pods conf files +func getQueueAndPipelineInputsForIndexerConfFiles(queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, s3AccessKey, s3SecretKey string) (queueInputs, queueOutputs, pipelineInputs [][]string) { + // Queue Inputs + queueInputs, queueOutputs = getQueueAndObjectStorageInputsForIndexerConfFiles(queue, os, s3AccessKey, s3SecretKey) - // Always set all pipeline fields, not just changed ones - pipelineChangedFields = pipelineConfig(true) + // Pipeline inputs + pipelineInputs = getPipelineInputsForConfFile(true) return } @@ -1418,45 +1382,34 @@ func imageUpdatedTo9(previousImage string, currentImage string) bool { return strings.HasPrefix(previousVersion, "8") && strings.HasPrefix(currentVersion, "9") } -func pullQueueChanged(oldQueue, newQueue *enterpriseApi.QueueSpec, oldOS, newOS *enterpriseApi.ObjectStorageSpec, afterDelete bool, s3AccessKey, s3SecretKey string) (inputs, outputs [][]string) { +// getQueueAndObjectStorageInputsForIndexerConfFiles returns a list of queue and object storage inputs for conf files +func getQueueAndObjectStorageInputsForIndexerConfFiles(queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, s3AccessKey, s3SecretKey string) (inputs, outputs [][]string) { queueProvider := "" - if newQueue.Provider == "sqs" { + if queue.Provider == "sqs" { queueProvider = "sqs_smartbus" } osProvider := "" - if newOS.Provider == "s3" { + if os.Provider == "s3" { osProvider = "sqs_smartbus" } - if oldQueue.Provider != newQueue.Provider || afterDelete { - inputs = append(inputs, []string{"remote_queue.type", queueProvider}) - } - if !reflect.DeepEqual(oldQueue.SQS.VolList, newQueue.SQS.VolList) || afterDelete { - if s3AccessKey != "" && s3SecretKey != "" { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.access_key", queueProvider), s3AccessKey}) - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.secret_key", queueProvider), s3SecretKey}) - } - } - if oldQueue.SQS.AuthRegion != newQueue.SQS.AuthRegion || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.auth_region", queueProvider), newQueue.SQS.AuthRegion}) - } - if newQueue.SQS.Endpoint != "" && (oldQueue.SQS.Endpoint != newQueue.SQS.Endpoint || afterDelete) { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.endpoint", queueProvider), newQueue.SQS.Endpoint}) - } - if newOS.S3.Endpoint != "" && (oldOS.S3.Endpoint != newOS.S3.Endpoint || afterDelete) { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", osProvider), newOS.S3.Endpoint}) - } - if oldOS.S3.Path != newOS.S3.Path || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", osProvider), newOS.S3.Path}) - } - if oldQueue.SQS.DLQ != newQueue.SQS.DLQ || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", queueProvider), newQueue.SQS.DLQ}) - } inputs = append(inputs, + []string{"remote_queue.type", queueProvider}, + []string{fmt.Sprintf("remote_queue.%s.auth_region", queueProvider), queue.SQS.AuthRegion}, + []string{fmt.Sprintf("remote_queue.%s.endpoint", queueProvider), queue.SQS.Endpoint}, + []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", osProvider), os.S3.Endpoint}, + []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", osProvider), os.S3.Path}, + []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", queueProvider), queue.SQS.DLQ}, []string{fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", queueProvider), "4"}, []string{fmt.Sprintf("remote_queue.%s.retry_policy", queueProvider), "max_count"}, ) + // TODO: Handle credentials change + if s3AccessKey != "" && s3SecretKey != "" { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.access_key", queueProvider), s3AccessKey}) + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.secret_key", queueProvider), s3SecretKey}) + } + outputs = inputs outputs = append(outputs, []string{fmt.Sprintf("remote_queue.%s.send_interval", queueProvider), "5s"}, diff --git a/pkg/splunk/enterprise/indexercluster_test.go b/pkg/splunk/enterprise/indexercluster_test.go index 2b4026ac5..9d1bf0118 100644 --- a/pkg/splunk/enterprise/indexercluster_test.go +++ b/pkg/splunk/enterprise/indexercluster_test.go @@ -2046,10 +2046,10 @@ func TestImageUpdatedTo9(t *testing.T) { } } -func TestGetChangedQueueFieldsForIndexer(t *testing.T) { +func TestGetQueueAndPipelineInputsForIndexerConfFiles(t *testing.T) { provider := "sqs_smartbus" - queue := enterpriseApi.Queue{ + queue := &enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", @@ -2071,7 +2071,7 @@ func TestGetChangedQueueFieldsForIndexer(t *testing.T) { }, } - os := enterpriseApi.ObjectStorage{ + os := &enterpriseApi.ObjectStorage{ TypeMeta: metav1.TypeMeta{ Kind: "ObjectStorage", APIVersion: "enterprise.splunk.com/v4", @@ -2088,29 +2088,13 @@ func TestGetChangedQueueFieldsForIndexer(t *testing.T) { }, } - newCR := &enterpriseApi.IndexerCluster{ - Spec: enterpriseApi.IndexerClusterSpec{ - QueueRef: corev1.ObjectReference{ - Name: queue.Name, - }, - ObjectStorageRef: corev1.ObjectReference{ - Name: os.Name, - }, - }, - Status: enterpriseApi.IndexerClusterStatus{ - Queue: &enterpriseApi.QueueSpec{}, - ObjectStorage: &enterpriseApi.ObjectStorageSpec{}, - }, - } - key := "key" secret := "secret" - queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields := getChangedQueueFieldsForIndexer(&queue, &os, newCR.Status.Queue, newCR.Status.ObjectStorage, false, key, secret) + + queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields := getQueueAndPipelineInputsForIndexerConfFiles(&queue.Spec, &os.Spec, key, secret) assert.Equal(t, 10, len(queueChangedFieldsInputs)) assert.Equal(t, [][]string{ {"remote_queue.type", provider}, - {fmt.Sprintf("remote_queue.%s.access_key", provider), key}, - {fmt.Sprintf("remote_queue.%s.secret_key", provider), secret}, {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, @@ -2118,13 +2102,13 @@ func TestGetChangedQueueFieldsForIndexer(t *testing.T) { {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, + {fmt.Sprintf("remote_queue.%s.access_key", provider), key}, + {fmt.Sprintf("remote_queue.%s.secret_key", provider), secret}, }, queueChangedFieldsInputs) assert.Equal(t, 12, len(queueChangedFieldsOutputs)) assert.Equal(t, [][]string{ {"remote_queue.type", provider}, - {fmt.Sprintf("remote_queue.%s.access_key", provider), key}, - {fmt.Sprintf("remote_queue.%s.secret_key", provider), secret}, {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, @@ -2132,6 +2116,8 @@ func TestGetChangedQueueFieldsForIndexer(t *testing.T) { {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, + {fmt.Sprintf("remote_queue.%s.access_key", provider), key}, + {fmt.Sprintf("remote_queue.%s.secret_key", provider), secret}, {fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"}, {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, }, queueChangedFieldsOutputs) @@ -2146,11 +2132,14 @@ func TestGetChangedQueueFieldsForIndexer(t *testing.T) { }, pipelineChangedFields) } -func TestHandlePullQueueChange(t *testing.T) { +func TestUpdateIndexerConfFiles(t *testing.T) { + c := spltest.NewMockClient() + ctx := context.TODO() + // Object definitions provider := "sqs_smartbus" - queue := enterpriseApi.Queue{ + queue := &enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", @@ -2169,6 +2158,7 @@ func TestHandlePullQueueChange(t *testing.T) { }, }, } + c.Create(ctx, queue) os := enterpriseApi.ObjectStorage{ TypeMeta: metav1.TypeMeta{ @@ -2187,8 +2177,9 @@ func TestHandlePullQueueChange(t *testing.T) { }, }, } + c.Create(ctx, &os) - newCR := &enterpriseApi.IndexerCluster{ + cr := &enterpriseApi.IndexerCluster{ TypeMeta: metav1.TypeMeta{ Kind: "IndexerCluster", }, @@ -2211,6 +2202,7 @@ func TestHandlePullQueueChange(t *testing.T) { ObjectStorage: &enterpriseApi.ObjectStorageSpec{}, }, } + c.Create(ctx, cr) pod0 := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -2252,6 +2244,10 @@ func TestHandlePullQueueChange(t *testing.T) { pod2 := pod0.DeepCopy() pod2.ObjectMeta.Name = "splunk-test-indexer-2" + c.Create(ctx, pod0) + c.Create(ctx, pod1) + c.Create(ctx, pod2) + secret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "test-secrets", @@ -2262,19 +2258,9 @@ func TestHandlePullQueueChange(t *testing.T) { }, } - // Mock pods - c := spltest.NewMockClient() - ctx := context.TODO() - c.Create(ctx, &queue) - c.Create(ctx, &os) - c.Create(ctx, newCR) - c.Create(ctx, pod0) - c.Create(ctx, pod1) - c.Create(ctx, pod2) - // Negative test case: secret not found mgr := &indexerClusterPodManager{} - err := mgr.handlePullQueueChange(ctx, newCR, queue, os, c) + err := mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, c) assert.NotNil(t, err) // Mock secret @@ -2283,9 +2269,9 @@ func TestHandlePullQueueChange(t *testing.T) { mockHTTPClient := &spltest.MockHTTPClient{} // Negative test case: failure in creating remote queue stanza - mgr = newTestPullQueuePipelineManager(mockHTTPClient) + mgr = newTestIndexerQueuePipelineManager(mockHTTPClient) - err = mgr.handlePullQueueChange(ctx, newCR, queue, os, c) + err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, c) assert.NotNil(t, err) // outputs.conf @@ -2304,22 +2290,22 @@ func TestHandlePullQueueChange(t *testing.T) { propertyKVListOutputs = append(propertyKVListOutputs, []string{fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"}) body := buildFormBody(propertyKVListOutputs) - addRemoteQueueHandlersForIndexer(mockHTTPClient, newCR, queue, newCR.Status.ReadyReplicas, "conf-outputs", body) + addRemoteQueueHandlersForIndexer(mockHTTPClient, cr, &queue.Spec, "conf-outputs", body) // Negative test case: failure in creating remote queue stanza - mgr = newTestPullQueuePipelineManager(mockHTTPClient) + mgr = newTestIndexerQueuePipelineManager(mockHTTPClient) - err = mgr.handlePullQueueChange(ctx, newCR, queue, os, c) + err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, c) assert.NotNil(t, err) // inputs.conf body = buildFormBody(propertyKVList) - addRemoteQueueHandlersForIndexer(mockHTTPClient, newCR, queue, newCR.Status.ReadyReplicas, "conf-inputs", body) + addRemoteQueueHandlersForIndexer(mockHTTPClient, cr, &queue.Spec, "conf-inputs", body) // Negative test case: failure in updating remote queue stanza - mgr = newTestPullQueuePipelineManager(mockHTTPClient) + mgr = newTestIndexerQueuePipelineManager(mockHTTPClient) - err = mgr.handlePullQueueChange(ctx, newCR, queue, os, c) + err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, c) assert.NotNil(t, err) // default-mode.conf @@ -2331,7 +2317,7 @@ func TestHandlePullQueueChange(t *testing.T) { {"pipeline:typing", "disabled", "true"}, } - for i := 0; i < int(newCR.Status.ReadyReplicas); i++ { + for i := 0; i < int(cr.Status.ReadyReplicas); i++ { podName := fmt.Sprintf("splunk-test-indexer-%d", i) baseURL := fmt.Sprintf("https://%s.splunk-test-indexer-headless.test.svc.cluster.local:8089/servicesNS/nobody/system/configs/conf-default-mode", podName) @@ -2345,9 +2331,9 @@ func TestHandlePullQueueChange(t *testing.T) { } } - mgr = newTestPullQueuePipelineManager(mockHTTPClient) + mgr = newTestIndexerQueuePipelineManager(mockHTTPClient) - err = mgr.handlePullQueueChange(ctx, newCR, queue, os, c) + err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, c) assert.Nil(t, err) } @@ -2365,25 +2351,25 @@ func buildFormBody(pairs [][]string) string { return b.String() } -func addRemoteQueueHandlersForIndexer(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IndexerCluster, queue enterpriseApi.Queue, replicas int32, confName, body string) { - for i := 0; i < int(replicas); i++ { +func addRemoteQueueHandlersForIndexer(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IndexerCluster, queue *enterpriseApi.QueueSpec, confName, body string) { + for i := 0; i < int(cr.Status.ReadyReplicas); i++ { podName := fmt.Sprintf("splunk-%s-indexer-%d", cr.GetName(), i) baseURL := fmt.Sprintf( "https://%s.splunk-%s-indexer-headless.%s.svc.cluster.local:8089/servicesNS/nobody/system/configs/%s", podName, cr.GetName(), cr.GetNamespace(), confName, ) - createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name)) + createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", queue.SQS.Name)) reqCreate, _ := http.NewRequest("POST", baseURL, strings.NewReader(createReqBody)) mockHTTPClient.AddHandler(reqCreate, 200, "", nil) - updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name)) + updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", queue.SQS.Name)) reqUpdate, _ := http.NewRequest("POST", updateURL, strings.NewReader(body)) mockHTTPClient.AddHandler(reqUpdate, 200, "", nil) } } -func newTestPullQueuePipelineManager(mockHTTPClient *spltest.MockHTTPClient) *indexerClusterPodManager { +func newTestIndexerQueuePipelineManager(mockHTTPClient *spltest.MockHTTPClient) *indexerClusterPodManager { newSplunkClientForQueuePipeline = func(uri, user, pass string) *splclient.SplunkClient { return &splclient.SplunkClient{ ManagementURI: uri, diff --git a/pkg/splunk/enterprise/ingestorcluster.go b/pkg/splunk/enterprise/ingestorcluster.go index 78a51ede2..55f0e7d35 100644 --- a/pkg/splunk/enterprise/ingestorcluster.go +++ b/pkg/splunk/enterprise/ingestorcluster.go @@ -225,12 +225,9 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr return result, err } } - - // Can not override original queue spec due to comparison in the later code - queueCopy := queue - if queueCopy.Spec.Provider == "sqs" { - if queueCopy.Spec.SQS.Endpoint == "" && queueCopy.Spec.SQS.AuthRegion != "" { - queueCopy.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", queueCopy.Spec.SQS.AuthRegion) + if queue.Spec.Provider == "sqs" { + if queue.Spec.SQS.Endpoint == "" && queue.Spec.SQS.AuthRegion != "" { + queue.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", queue.Spec.SQS.AuthRegion) } } @@ -249,19 +246,16 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr return result, err } } - - // Can not override original queue spec due to comparison in the later code - osCopy := os - if osCopy.Spec.Provider == "s3" { - if osCopy.Spec.S3.Endpoint == "" && queueCopy.Spec.SQS.AuthRegion != "" { - osCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queue.Spec.SQS.AuthRegion) + if os.Spec.Provider == "s3" { + if os.Spec.S3.Endpoint == "" && queue.Spec.SQS.AuthRegion != "" { + os.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queue.Spec.SQS.AuthRegion) } } // If queue is updated - if cr.Status.Queue == nil || cr.Status.ObjectStorage == nil || !reflect.DeepEqual(*cr.Status.Queue, queue.Spec) || !reflect.DeepEqual(*cr.Status.ObjectStorage, os.Spec) { + if cr.Status.Queue == nil || cr.Status.ObjectStorage == nil { mgr := newIngestorClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) - err = mgr.handlePushQueueChange(ctx, cr, queueCopy, osCopy, client) + err = mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIngestorCluster", fmt.Sprintf("Failed to update conf file for Queue/Pipeline config change after pod creation: %s", err.Error())) scopedLog.Error(err, "Failed to update conf file for Queue/Pipeline config change after pod creation") @@ -344,7 +338,7 @@ func (mgr *ingestorClusterPodManager) getClient(ctx context.Context, n int32) *s // validateIngestorClusterSpec checks validity and makes default updates to a IngestorClusterSpec and returns error if something is wrong func validateIngestorClusterSpec(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.IngestorCluster) error { - // We cannot have 0 replicas in IngestorCluster spec since this refers to number of ingestion pods in an ingestor cluster + // We cannot have 0 replicas in IngestorCluster spec since this refers to number of ingestion pods in the ingestor cluster if cr.Spec.Replicas < 3 { cr.Spec.Replicas = 3 } @@ -372,10 +366,10 @@ func getIngestorStatefulSet(ctx context.Context, client splcommon.ControllerClie return ss, nil } -// Checks if only Queue or Pipeline config changed, and updates the conf file if so -func (mgr *ingestorClusterPodManager) handlePushQueueChange(ctx context.Context, newCR *enterpriseApi.IngestorCluster, queue enterpriseApi.Queue, os enterpriseApi.ObjectStorage, k8s client.Client) error { +// updateIngestorConfFiles checks if Queue or Pipeline inputs are created for the first time and updates the conf file if so +func (mgr *ingestorClusterPodManager) updateIngestorConfFiles(ctx context.Context, newCR *enterpriseApi.IngestorCluster, queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, k8s client.Client) error { reqLogger := log.FromContext(ctx) - scopedLog := reqLogger.WithName("handlePushQueueChange").WithValues("name", newCR.GetName(), "namespace", newCR.GetNamespace()) + scopedLog := reqLogger.WithName("updateIngestorConfFiles").WithValues("name", newCR.GetName(), "namespace", newCR.GetNamespace()) // Only update config for pods that exist readyReplicas := newCR.Status.Replicas @@ -391,28 +385,10 @@ func (mgr *ingestorClusterPodManager) handlePushQueueChange(ctx context.Context, } splunkClient := mgr.newSplunkClient(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd)) - newCrStatusQueue := newCR.Status.Queue - if newCrStatusQueue == nil { - newCrStatusQueue = &enterpriseApi.QueueSpec{} - } - newCrStatusObjectStorage := newCR.Status.ObjectStorage - if newCrStatusObjectStorage == nil { - newCrStatusObjectStorage = &enterpriseApi.ObjectStorageSpec{} - } - - afterDelete := false - if (queue.Spec.SQS.Name != "" && newCrStatusQueue.SQS.Name != "" && queue.Spec.SQS.Name != newCrStatusQueue.SQS.Name) || - (queue.Spec.Provider != "" && newCrStatusQueue.Provider != "" && queue.Spec.Provider != newCrStatusQueue.Provider) { - if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCrStatusQueue.SQS.Name)); err != nil { - updateErr = err - } - afterDelete = true - } - // Secret reference s3AccessKey, s3SecretKey := "", "" - if queue.Spec.Provider == "sqs" && newCR.Spec.ServiceAccount == "" { - for _, vol := range queue.Spec.SQS.VolList { + if queue.Provider == "sqs" && newCR.Spec.ServiceAccount == "" { + for _, vol := range queue.SQS.VolList { if vol.SecretRef != "" { s3AccessKey, s3SecretKey, err = GetQueueRemoteVolumeSecrets(ctx, vol, k8s, newCR) if err != nil { @@ -423,32 +399,31 @@ func (mgr *ingestorClusterPodManager) handlePushQueueChange(ctx context.Context, } } - queueChangedFields, pipelineChangedFields := getChangedQueueFieldsForIngestor(&queue, &os, newCrStatusQueue, newCrStatusObjectStorage, afterDelete, s3AccessKey, s3SecretKey) + queueInputs, pipelineInputs := getQueueAndPipelineInputsForIngestorConfFiles(queue, os, s3AccessKey, s3SecretKey) - for _, pbVal := range queueChangedFields { - if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name), [][]string{pbVal}); err != nil { + for _, input := range queueInputs { + if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", queue.SQS.Name), [][]string{input}); err != nil { updateErr = err } } - for _, field := range pipelineChangedFields { - if err := splunkClient.UpdateConfFile(scopedLog, "default-mode", field[0], [][]string{{field[1], field[2]}}); err != nil { + for _, input := range pipelineInputs { + if err := splunkClient.UpdateConfFile(scopedLog, "default-mode", input[0], [][]string{{input[1], input[2]}}); err != nil { updateErr = err } } } - // Do NOT restart Splunk return updateErr } -// getChangedQueueFieldsForIngestor returns a list of changed queue and pipeline fields for ingestor pods -func getChangedQueueFieldsForIngestor(queue *enterpriseApi.Queue, os *enterpriseApi.ObjectStorage, queueStatus *enterpriseApi.QueueSpec, osStatus *enterpriseApi.ObjectStorageSpec, afterDelete bool, s3AccessKey, s3SecretKey string) (queueChangedFields, pipelineChangedFields [][]string) { - // Push changed queue fields - queueChangedFields = pushQueueChanged(queueStatus, &queue.Spec, osStatus, &os.Spec, afterDelete, s3AccessKey, s3SecretKey) +// getQueueAndPipelineInputsForIngestorConfFiles returns a list of queue and pipeline inputs for ingestor pods conf files +func getQueueAndPipelineInputsForIngestorConfFiles(queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, s3AccessKey, s3SecretKey string) (queueInputs, pipelineInputs [][]string) { + // Queue Inputs + queueInputs = getQueueAndObjectStorageInputsForIngestorConfFiles(queue, os, s3AccessKey, s3SecretKey) - // Always changed pipeline fields - pipelineChangedFields = pipelineConfig(false) + // Pipeline inputs + pipelineInputs = getPipelineInputsForConfFile(false) return } @@ -461,7 +436,7 @@ type ingestorClusterPodManager struct { newSplunkClient func(managementURI, username, password string) *splclient.SplunkClient } -// newIngestorClusterPodManager function to create pod manager this is added to write unit test case +// newIngestorClusterPodManager creates pod manager to handle unit test cases var newIngestorClusterPodManager = func(log logr.Logger, cr *enterpriseApi.IngestorCluster, secret *corev1.Secret, newSplunkClient NewSplunkClientFunc, c splcommon.ControllerClient) ingestorClusterPodManager { return ingestorClusterPodManager{ log: log, @@ -472,8 +447,9 @@ var newIngestorClusterPodManager = func(log logr.Logger, cr *enterpriseApi.Inges } } -func pipelineConfig(isIndexer bool) (output [][]string) { - output = append(output, +// getPipelineInputsForConfFile returns a list of pipeline inputs for conf file +func getPipelineInputsForConfFile(isIndexer bool) (config [][]string) { + config = append(config, []string{"pipeline:remotequeueruleset", "disabled", "false"}, []string{"pipeline:ruleset", "disabled", "true"}, []string{"pipeline:remotequeuetyping", "disabled", "false"}, @@ -481,51 +457,40 @@ func pipelineConfig(isIndexer bool) (output [][]string) { []string{"pipeline:typing", "disabled", "true"}, ) if !isIndexer { - output = append(output, []string{"pipeline:indexerPipe", "disabled", "true"}) + config = append(config, []string{"pipeline:indexerPipe", "disabled", "true"}) } - return output + + return } -func pushQueueChanged(oldQueue, newQueue *enterpriseApi.QueueSpec, oldOS, newOS *enterpriseApi.ObjectStorageSpec, afterDelete bool, s3AccessKey, s3SecretKey string) (output [][]string) { +// getQueueAndObjectStorageInputsForConfFiles returns a list of queue and object storage inputs for conf files +func getQueueAndObjectStorageInputsForIngestorConfFiles(queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, s3AccessKey, s3SecretKey string) (config [][]string) { queueProvider := "" - if newQueue.Provider == "sqs" { + if queue.Provider == "sqs" { queueProvider = "sqs_smartbus" } osProvider := "" - if newOS.Provider == "s3" { + if os.Provider == "s3" { osProvider = "sqs_smartbus" } - - if oldQueue.Provider != newQueue.Provider || afterDelete { - output = append(output, []string{"remote_queue.type", queueProvider}) - } - if !reflect.DeepEqual(oldQueue.SQS.VolList, newQueue.SQS.VolList) || afterDelete { - if s3AccessKey != "" && s3SecretKey != "" { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.access_key", queueProvider), s3AccessKey}) - output = append(output, []string{fmt.Sprintf("remote_queue.%s.secret_key", queueProvider), s3SecretKey}) - } - } - if oldQueue.SQS.AuthRegion != newQueue.SQS.AuthRegion || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.auth_region", queueProvider), newQueue.SQS.AuthRegion}) - } - if newQueue.SQS.Endpoint != "" && (oldQueue.SQS.Endpoint != newQueue.SQS.Endpoint || afterDelete) { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.endpoint", queueProvider), newQueue.SQS.Endpoint}) - } - if newOS.S3.Endpoint != "" && (oldOS.S3.Endpoint != newOS.S3.Endpoint || afterDelete) { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", osProvider), newOS.S3.Endpoint}) - } - if oldOS.S3.Path != newOS.S3.Path || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", osProvider), newOS.S3.Path}) - } - if oldQueue.SQS.DLQ != newQueue.SQS.DLQ || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", queueProvider), newQueue.SQS.DLQ}) - } - - output = append(output, + config = append(config, + []string{"remote_queue.type", queueProvider}, + []string{fmt.Sprintf("remote_queue.%s.auth_region", queueProvider), queue.SQS.AuthRegion}, + []string{fmt.Sprintf("remote_queue.%s.endpoint", queueProvider), queue.SQS.Endpoint}, + []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", osProvider), os.S3.Endpoint}, + []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", osProvider), os.S3.Path}, + []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", queueProvider), queue.SQS.DLQ}, []string{fmt.Sprintf("remote_queue.%s.encoding_format", queueProvider), "s2s"}, []string{fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", queueProvider), "4"}, []string{fmt.Sprintf("remote_queue.%s.retry_policy", queueProvider), "max_count"}, - []string{fmt.Sprintf("remote_queue.%s.send_interval", queueProvider), "5s"}) + []string{fmt.Sprintf("remote_queue.%s.send_interval", queueProvider), "5s"}, + ) - return output + // TODO: Handle credentials change + if s3AccessKey != "" && s3SecretKey != "" { + config = append(config, []string{fmt.Sprintf("remote_queue.%s.access_key", queueProvider), s3AccessKey}) + config = append(config, []string{fmt.Sprintf("remote_queue.%s.secret_key", queueProvider), s3SecretKey}) + } + + return } diff --git a/pkg/splunk/enterprise/ingestorcluster_test.go b/pkg/splunk/enterprise/ingestorcluster_test.go index 995e52ff8..e79bbaa94 100644 --- a/pkg/splunk/enterprise/ingestorcluster_test.go +++ b/pkg/splunk/enterprise/ingestorcluster_test.go @@ -86,7 +86,7 @@ func TestApplyIngestorCluster(t *testing.T) { } c.Create(ctx, queue) - os := enterpriseApi.ObjectStorage{ + os := &enterpriseApi.ObjectStorage{ TypeMeta: metav1.TypeMeta{ Kind: "ObjectStorage", APIVersion: "enterprise.splunk.com/v4", @@ -103,7 +103,7 @@ func TestApplyIngestorCluster(t *testing.T) { }, }, } - c.Create(ctx, &os) + c.Create(ctx, os) cr := &enterpriseApi.IngestorCluster{ TypeMeta: metav1.TypeMeta{ @@ -276,7 +276,7 @@ func TestApplyIngestorCluster(t *testing.T) { } body := buildFormBody(propertyKVList) - addRemoteQueueHandlersForIngestor(mockHTTPClient, cr, queue, cr.Status.ReadyReplicas, "conf-outputs", body) + addRemoteQueueHandlersForIngestor(mockHTTPClient, cr, &queue.Spec, "conf-outputs", body) // default-mode.conf propertyKVList = [][]string{ @@ -403,7 +403,7 @@ func TestGetIngestorStatefulSet(t *testing.T) { test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-test-ingestor","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor","app.kubernetes.io/test-extra-label":"test-extra-label-value"},"ownerReferences":[{"apiVersion":"","kind":"IngestorCluster","name":"test","uid":"","controller":true}]},"spec":{"replicas":3,"selector":{"matchLabels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor","app.kubernetes.io/test-extra-label":"test-extra-label-value"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997","traffic.sidecar.istio.io/includeInboundPorts":"8000,8088"}},"spec":{"volumes":[{"name":"splunk-test-probe-configmap","configMap":{"name":"splunk-test-probe-configmap","defaultMode":365}},{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-test-ingestor-secret-v1","defaultMode":420}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"http-splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"http-hec","containerPort":8088,"protocol":"TCP"},{"name":"https-splunkd","containerPort":8089,"protocol":"TCP"},{"name":"tcp-s2s","containerPort":9997,"protocol":"TCP"},{"name":"user-defined","containerPort":32000,"protocol":"UDP"}],"env":[{"name":"TEST_ENV_VAR","value":"test_value"},{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_standalone"},{"name":"SPLUNK_DECLARATIVE_ADMIN_PASSWORD","value":"true"},{"name":"SPLUNK_OPERATOR_K8_LIVENESS_DRIVER_FILE_PATH","value":"/tmp/splunk_operator_k8s/probes/k8_liveness_driver.sh"},{"name":"SPLUNK_GENERAL_TERMS","value":"--accept-sgt-current-at-splunk-com"},{"name":"SPLUNK_SKIP_CLUSTER_BUNDLE_PUSH","value":"true"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"splunk-test-probe-configmap","mountPath":"/mnt/probes"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"}],"livenessProbe":{"exec":{"command":["/mnt/probes/livenessProbe.sh"]},"initialDelaySeconds":30,"timeoutSeconds":30,"periodSeconds":30,"failureThreshold":3},"readinessProbe":{"exec":{"command":["/mnt/probes/readinessProbe.sh"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5,"failureThreshold":3},"startupProbe":{"exec":{"command":["/mnt/probes/startupProbe.sh"]},"initialDelaySeconds":40,"timeoutSeconds":30,"periodSeconds":30,"failureThreshold":12},"imagePullPolicy":"IfNotPresent","securityContext":{"capabilities":{"add":["NET_BIND_SERVICE"],"drop":["ALL"]},"privileged":false,"runAsUser":41812,"runAsNonRoot":true,"allowPrivilegeEscalation":false,"seccompProfile":{"type":"RuntimeDefault"}}}],"serviceAccountName":"defaults","securityContext":{"runAsUser":41812,"runAsNonRoot":true,"fsGroup":41812,"fsGroupChangePolicy":"OnRootMismatch"},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-test-ingestor"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor","app.kubernetes.io/test-extra-label":"test-extra-label-value"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"10Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor","app.kubernetes.io/test-extra-label":"test-extra-label-value"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"100Gi"}}},"status":{}}],"serviceName":"splunk-test-ingestor-headless","podManagementPolicy":"Parallel","updateStrategy":{"type":"OnDelete"}},"status":{"replicas":0,"availableReplicas":0}}`) } -func TestGetChangedQueueFieldsForIngestor(t *testing.T) { +func TestGetQueueAndPipelineInputsForIngestorConfFiles(t *testing.T) { provider := "sqs_smartbus" queue := enterpriseApi.Queue{ @@ -445,30 +445,14 @@ func TestGetChangedQueueFieldsForIngestor(t *testing.T) { }, } - newCR := &enterpriseApi.IngestorCluster{ - Spec: enterpriseApi.IngestorClusterSpec{ - QueueRef: corev1.ObjectReference{ - Name: queue.Name, - }, - ObjectStorageRef: corev1.ObjectReference{ - Name: os.Name, - }, - }, - Status: enterpriseApi.IngestorClusterStatus{ - Queue: &enterpriseApi.QueueSpec{}, - ObjectStorage: &enterpriseApi.ObjectStorageSpec{}, - }, - } - key := "key" secret := "secret" - queueChangedFields, pipelineChangedFields := getChangedQueueFieldsForIngestor(&queue, &os, newCR.Status.Queue, newCR.Status.ObjectStorage, false, key, secret) - assert.Equal(t, 12, len(queueChangedFields)) + queueInputs, pipelineInputs := getQueueAndPipelineInputsForIngestorConfFiles(&queue.Spec, &os.Spec, key, secret) + + assert.Equal(t, 12, len(queueInputs)) assert.Equal(t, [][]string{ {"remote_queue.type", provider}, - {fmt.Sprintf("remote_queue.%s.access_key", provider), key}, - {fmt.Sprintf("remote_queue.%s.secret_key", provider), secret}, {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, @@ -478,9 +462,11 @@ func TestGetChangedQueueFieldsForIngestor(t *testing.T) { {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, {fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"}, - }, queueChangedFields) + {fmt.Sprintf("remote_queue.%s.access_key", provider), key}, + {fmt.Sprintf("remote_queue.%s.secret_key", provider), secret}, + }, queueInputs) - assert.Equal(t, 6, len(pipelineChangedFields)) + assert.Equal(t, 6, len(pipelineInputs)) assert.Equal(t, [][]string{ {"pipeline:remotequeueruleset", "disabled", "false"}, {"pipeline:ruleset", "disabled", "true"}, @@ -488,14 +474,17 @@ func TestGetChangedQueueFieldsForIngestor(t *testing.T) { {"pipeline:remotequeueoutput", "disabled", "false"}, {"pipeline:typing", "disabled", "true"}, {"pipeline:indexerPipe", "disabled", "true"}, - }, pipelineChangedFields) + }, pipelineInputs) } -func TestHandlePushQueueChange(t *testing.T) { +func TestUpdateIngestorConfFiles(t *testing.T) { + c := spltest.NewMockClient() + ctx := context.TODO() + // Object definitions provider := "sqs_smartbus" - queue := enterpriseApi.Queue{ + queue := &enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", @@ -514,7 +503,7 @@ func TestHandlePushQueueChange(t *testing.T) { }, } - os := enterpriseApi.ObjectStorage{ + os := &enterpriseApi.ObjectStorage{ TypeMeta: metav1.TypeMeta{ Kind: "ObjectStorage", APIVersion: "enterprise.splunk.com/v4", @@ -531,7 +520,7 @@ func TestHandlePushQueueChange(t *testing.T) { }, } - newCR := &enterpriseApi.IngestorCluster{ + cr := &enterpriseApi.IngestorCluster{ TypeMeta: metav1.TypeMeta{ Kind: "IngestorCluster", }, @@ -595,6 +584,10 @@ func TestHandlePushQueueChange(t *testing.T) { pod2 := pod0.DeepCopy() pod2.ObjectMeta.Name = "splunk-test-ingestor-2" + c.Create(ctx, pod0) + c.Create(ctx, pod1) + c.Create(ctx, pod2) + secret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "test-secrets", @@ -605,17 +598,10 @@ func TestHandlePushQueueChange(t *testing.T) { }, } - // Mock pods - c := spltest.NewMockClient() - ctx := context.TODO() - c.Create(ctx, pod0) - c.Create(ctx, pod1) - c.Create(ctx, pod2) - // Negative test case: secret not found mgr := &ingestorClusterPodManager{} - err := mgr.handlePushQueueChange(ctx, newCR, queue, os, c) + err := mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, c) assert.NotNil(t, err) // Mock secret @@ -624,9 +610,9 @@ func TestHandlePushQueueChange(t *testing.T) { mockHTTPClient := &spltest.MockHTTPClient{} // Negative test case: failure in creating remote queue stanza - mgr = newTestPushQueuePipelineManager(mockHTTPClient) + mgr = newTestIngestorQueuePipelineManager(mockHTTPClient) - err = mgr.handlePushQueueChange(ctx, newCR, queue, os, c) + err = mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, c) assert.NotNil(t, err) // outputs.conf @@ -643,12 +629,12 @@ func TestHandlePushQueueChange(t *testing.T) { } body := buildFormBody(propertyKVList) - addRemoteQueueHandlersForIngestor(mockHTTPClient, newCR, &queue, newCR.Status.ReadyReplicas, "conf-outputs", body) + addRemoteQueueHandlersForIngestor(mockHTTPClient, cr, &queue.Spec, "conf-outputs", body) // Negative test case: failure in creating remote queue stanza - mgr = newTestPushQueuePipelineManager(mockHTTPClient) + mgr = newTestIngestorQueuePipelineManager(mockHTTPClient) - err = mgr.handlePushQueueChange(ctx, newCR, queue, os, c) + err = mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, c) assert.NotNil(t, err) // default-mode.conf @@ -661,9 +647,9 @@ func TestHandlePushQueueChange(t *testing.T) { {"pipeline:indexerPipe", "disabled", "true"}, } - for i := 0; i < int(newCR.Status.ReadyReplicas); i++ { + for i := 0; i < int(cr.Status.ReadyReplicas); i++ { podName := fmt.Sprintf("splunk-test-ingestor-%d", i) - baseURL := fmt.Sprintf("https://%s.splunk-%s-ingestor-headless.%s.svc.cluster.local:8089/servicesNS/nobody/system/configs/conf-default-mode", podName, newCR.GetName(), newCR.GetNamespace()) + baseURL := fmt.Sprintf("https://%s.splunk-%s-ingestor-headless.%s.svc.cluster.local:8089/servicesNS/nobody/system/configs/conf-default-mode", podName, cr.GetName(), cr.GetNamespace()) for _, field := range propertyKVList { req, _ := http.NewRequest("POST", baseURL, strings.NewReader(fmt.Sprintf("name=%s", field[0]))) @@ -675,32 +661,32 @@ func TestHandlePushQueueChange(t *testing.T) { } } - mgr = newTestPushQueuePipelineManager(mockHTTPClient) + mgr = newTestIngestorQueuePipelineManager(mockHTTPClient) - err = mgr.handlePushQueueChange(ctx, newCR, queue, os, c) + err = mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, c) assert.Nil(t, err) } -func addRemoteQueueHandlersForIngestor(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IngestorCluster, queue *enterpriseApi.Queue, replicas int32, confName, body string) { - for i := 0; i < int(replicas); i++ { +func addRemoteQueueHandlersForIngestor(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IngestorCluster, queue *enterpriseApi.QueueSpec, confName, body string) { + for i := 0; i < int(cr.Status.ReadyReplicas); i++ { podName := fmt.Sprintf("splunk-%s-ingestor-%d", cr.GetName(), i) baseURL := fmt.Sprintf( "https://%s.splunk-%s-ingestor-headless.%s.svc.cluster.local:8089/servicesNS/nobody/system/configs/%s", podName, cr.GetName(), cr.GetNamespace(), confName, ) - createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name)) + createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", queue.SQS.Name)) reqCreate, _ := http.NewRequest("POST", baseURL, strings.NewReader(createReqBody)) mockHTTPClient.AddHandler(reqCreate, 200, "", nil) - updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name)) + updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", queue.SQS.Name)) reqUpdate, _ := http.NewRequest("POST", updateURL, strings.NewReader(body)) mockHTTPClient.AddHandler(reqUpdate, 200, "", nil) } } -func newTestPushQueuePipelineManager(mockHTTPClient *spltest.MockHTTPClient) *ingestorClusterPodManager { - newSplunkClientForPushQueuePipeline := func(uri, user, pass string) *splclient.SplunkClient { +func newTestIngestorQueuePipelineManager(mockHTTPClient *spltest.MockHTTPClient) *ingestorClusterPodManager { + newSplunkClientForQueuePipeline := func(uri, user, pass string) *splclient.SplunkClient { return &splclient.SplunkClient{ ManagementURI: uri, Username: user, @@ -709,6 +695,6 @@ func newTestPushQueuePipelineManager(mockHTTPClient *spltest.MockHTTPClient) *in } } return &ingestorClusterPodManager{ - newSplunkClient: newSplunkClientForPushQueuePipeline, + newSplunkClient: newSplunkClientForQueuePipeline, } } diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go index 8aac52220..3e18b669c 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go @@ -85,36 +85,6 @@ var ( "AWS_STS_REGIONAL_ENDPOINTS=regional", } - updateQueue = enterpriseApi.QueueSpec{ - Provider: "sqs", - SQS: enterpriseApi.SQSSpec{ - Name: "index-ingest-separation-test-q-updated", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - DLQ: "index-ingest-separation-test-dlq-updated", - }, - } - - updatedInputs = []string{ - "[remote_queue:index-ingest-separation-test-q-updated]", - "remote_queue.type = sqs_smartbus", - "remote_queue.sqs_smartbus.auth_region = us-west-2", - "remote_queue.sqs_smartbus.dead_letter_queue.name = index-ingest-separation-test-dlq-updated", - "remote_queue.sqs_smartbus.endpoint = https://sqs.us-west-2.amazonaws.com", - "remote_queue.sqs_smartbus.large_message_store.endpoint = https://s3.us-west-2.amazonaws.com", - "remote_queue.sqs_smartbus.large_message_store.path = s3://index-ingest-separation-test-bucket/smartbus-test", - "remote_queue.sqs_smartbus.retry_policy = max", - "remote_queue.max.sqs_smartbus.max_retries_per_part = 5"} - updatedOutputs = append(updatedInputs, "remote_queue.sqs_smartbus.encoding_format = s2s", "remote_queue.sqs_smartbus.send_interval = 4s") - updatedDefaultsAll = []string{ - "[pipeline:remotequeueruleset]\ndisabled = false", - "[pipeline:ruleset]\ndisabled = false", - "[pipeline:remotequeuetyping]\ndisabled = false", - "[pipeline:remotequeueoutput]\ndisabled = false", - "[pipeline:typing]\ndisabled = true", - } - updatedDefaultsIngest = append(updatedDefaultsAll, "[pipeline:indexerPipe]\ndisabled = true") - inputsShouldNotContain = []string{ "[remote_queue:index-ingest-separation-test-q]", "remote_queue.sqs_smartbus.dead_letter_queue.name = index-ingest-separation-test-dlq", diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go index 6fe07597a..4314124cc 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go @@ -83,7 +83,6 @@ var _ = Describe("indingsep test", func() { // Secret reference volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateQueueVolumeSpec("queue-secret-ref-volume", testcaseEnvInst.GetIndexIngestSepSecretName())} queue.SQS.VolList = volumeSpec - updateQueue.SQS.VolList = volumeSpec // Deploy Queue testcaseEnvInst.Log.Info("Deploy Queue") @@ -162,7 +161,6 @@ var _ = Describe("indingsep test", func() { // Secret reference volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateQueueVolumeSpec("queue-secret-ref-volume", testcaseEnvInst.GetIndexIngestSepSecretName())} queue.SQS.VolList = volumeSpec - updateQueue.SQS.VolList = volumeSpec // Deploy Queue testcaseEnvInst.Log.Info("Deploy Queue") @@ -374,183 +372,4 @@ var _ = Describe("indingsep test", func() { } }) }) - - Context("Ingestor and Indexer deployment", func() { - It("indingsep, integration, indingsep: Splunk Operator can update Ingestors and Indexers with correct setup", func() { - // TODO: Remove secret reference and uncomment serviceAccountName part once IRSA fixed for Splunk and EKS 1.34+ - // Create Service Account - // testcaseEnvInst.Log.Info("Create Service Account") - // testcaseEnvInst.CreateServiceAccount(serviceAccountName) - - // Secret reference - volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateQueueVolumeSpec("queue-secret-ref-volume", testcaseEnvInst.GetIndexIngestSepSecretName())} - queue.SQS.VolList = volumeSpec - updateQueue.SQS.VolList = volumeSpec - - // Deploy Queue - testcaseEnvInst.Log.Info("Deploy Queue") - q, err := deployment.DeployQueue(ctx, "queue", queue) - Expect(err).To(Succeed(), "Unable to deploy Queue") - - // Deploy ObjectStorage - testcaseEnvInst.Log.Info("Deploy ObjectStorage") - objStorage, err := deployment.DeployObjectStorage(ctx, "os", objectStorage) - Expect(err).To(Succeed(), "Unable to deploy ObjectStorage") - - // Deploy Ingestor Cluster - testcaseEnvInst.Log.Info("Deploy Ingestor Cluster") - _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, "") // , serviceAccountName) - Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster") - - // Deploy Cluster Manager - testcaseEnvInst.Log.Info("Deploy Cluster Manager") - _, err = deployment.DeployClusterManagerWithGivenSpec(ctx, deployment.GetName(), cmSpec) - Expect(err).To(Succeed(), "Unable to deploy Cluster Manager") - - // Deploy Indexer Cluster - testcaseEnvInst.Log.Info("Deploy Indexer Cluster") - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, "") // , serviceAccountName) - Expect(err).To(Succeed(), "Unable to deploy Indexer Cluster") - - // Ensure that Ingestor Cluster is in Ready phase - testcaseEnvInst.Log.Info("Ensure that Ingestor Cluster is in Ready phase") - testenv.IngestorReady(ctx, deployment, testcaseEnvInst) - - // Ensure that Cluster Manager is in Ready phase - testcaseEnvInst.Log.Info("Ensure that Cluster Manager is in Ready phase") - testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) - - // Ensure that Indexer Cluster is in Ready phase - testcaseEnvInst.Log.Info("Ensure that Indexer Cluster is in Ready phase") - testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) - - // Get instance of current Queue CR with latest config - testcaseEnvInst.Log.Info("Get instance of current Queue CR with latest config") - queue := &enterpriseApi.Queue{} - err = deployment.GetInstance(ctx, q.Name, queue) - Expect(err).To(Succeed(), "Failed to get instance of Queue") - - // Update instance of Queue CR with new queue - testcaseEnvInst.Log.Info("Update instance of Queue CR with new queue") - queue.Spec = updateQueue - err = deployment.UpdateCR(ctx, queue) - Expect(err).To(Succeed(), "Unable to deploy Queue with updated CR") - - // Ensure that Ingestor Cluster is in Ready phase - testcaseEnvInst.Log.Info("Ensure that Ingestor Cluster is in Ready phase") - testenv.IngestorReady(ctx, deployment, testcaseEnvInst) - - // Get instance of current Ingestor Cluster CR with latest config - testcaseEnvInst.Log.Info("Get instance of current Ingestor Cluster CR with latest config") - ingest := &enterpriseApi.IngestorCluster{} - err = deployment.GetInstance(ctx, deployment.GetName()+"-ingest", ingest) - Expect(err).To(Succeed(), "Failed to get instance of Ingestor Cluster") - - // Verify Ingestor Cluster Status - testcaseEnvInst.Log.Info("Verify Ingestor Cluster Status") - Expect(*ingest.Status.Queue).To(Equal(updateQueue), "Ingestor queue status is not the same as provided as input") - - // Ensure that Indexer Cluster is in Ready phase - testcaseEnvInst.Log.Info("Ensure that Indexer Cluster is in Ready phase") - testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) - - // Get instance of current Indexer Cluster CR with latest config - testcaseEnvInst.Log.Info("Get instance of current Indexer Cluster CR with latest config") - index := &enterpriseApi.IndexerCluster{} - err = deployment.GetInstance(ctx, deployment.GetName()+"-idxc", index) - Expect(err).To(Succeed(), "Failed to get instance of Indexer Cluster") - - // Verify Indexer Cluster Status - testcaseEnvInst.Log.Info("Verify Indexer Cluster Status") - Expect(*index.Status.Queue).To(Equal(updateQueue), "Indexer queue status is not the same as provided as input") - - // Verify conf files - testcaseEnvInst.Log.Info("Verify conf files") - pods := testenv.DumpGetPods(deployment.GetName()) - for _, pod := range pods { - defaultsConf := "" - - if strings.Contains(pod, "ingest") || strings.Contains(pod, "idxc") { - // Verify outputs.conf - testcaseEnvInst.Log.Info("Verify outputs.conf") - outputsPath := "opt/splunk/etc/system/local/outputs.conf" - outputsConf, err := testenv.GetConfFile(pod, outputsPath, deployment.GetName()) - Expect(err).To(Succeed(), "Failed to get outputs.conf from Ingestor Cluster pod") - testenv.ValidateContent(outputsConf, updatedOutputs, true) - testenv.ValidateContent(outputsConf, outputsShouldNotContain, false) - - // Verify default-mode.conf - testcaseEnvInst.Log.Info("Verify default-mode.conf") - defaultsPath := "opt/splunk/etc/system/local/default-mode.conf" - defaultsConf, err := testenv.GetConfFile(pod, defaultsPath, deployment.GetName()) - Expect(err).To(Succeed(), "Failed to get default-mode.conf from Ingestor Cluster pod") - testenv.ValidateContent(defaultsConf, defaultsAll, true) - - // Verify AWS env variables - testcaseEnvInst.Log.Info("Verify AWS env variables") - envVars, err := testenv.GetAWSEnv(pod, deployment.GetName()) - Expect(err).To(Succeed(), "Failed to get AWS env variables from Ingestor Cluster pod") - testenv.ValidateContent(envVars, awsEnvVars, true) - } - - if strings.Contains(pod, "ingest") { - // Verify default-mode.conf - testcaseEnvInst.Log.Info("Verify default-mode.conf") - testenv.ValidateContent(defaultsConf, defaultsIngest, true) - } else if strings.Contains(pod, "idxc") { - // Verify inputs.conf - testcaseEnvInst.Log.Info("Verify inputs.conf") - inputsPath := "opt/splunk/etc/system/local/inputs.conf" - inputsConf, err := testenv.GetConfFile(pod, inputsPath, deployment.GetName()) - Expect(err).To(Succeed(), "Failed to get inputs.conf from Indexer Cluster pod") - testenv.ValidateContent(inputsConf, updatedInputs, true) - testenv.ValidateContent(inputsConf, inputsShouldNotContain, false) - } - } - - // Verify conf files - testcaseEnvInst.Log.Info("Verify conf files") - pods = testenv.DumpGetPods(deployment.GetName()) - for _, pod := range pods { - defaultsConf := "" - - if strings.Contains(pod, "ingest") || strings.Contains(pod, "idxc") { - // Verify outputs.conf - testcaseEnvInst.Log.Info("Verify outputs.conf") - outputsPath := "opt/splunk/etc/system/local/outputs.conf" - outputsConf, err := testenv.GetConfFile(pod, outputsPath, deployment.GetName()) - Expect(err).To(Succeed(), "Failed to get outputs.conf from Ingestor Cluster pod") - testenv.ValidateContent(outputsConf, updatedOutputs, true) - testenv.ValidateContent(outputsConf, outputsShouldNotContain, false) - - // Verify default-mode.conf - testcaseEnvInst.Log.Info("Verify default-mode.conf") - defaultsPath := "opt/splunk/etc/system/local/default-mode.conf" - defaultsConf, err := testenv.GetConfFile(pod, defaultsPath, deployment.GetName()) - Expect(err).To(Succeed(), "Failed to get default-mode.conf from Ingestor Cluster pod") - testenv.ValidateContent(defaultsConf, updatedDefaultsAll, true) - - // Verify AWS env variables - testcaseEnvInst.Log.Info("Verify AWS env variables") - envVars, err := testenv.GetAWSEnv(pod, deployment.GetName()) - Expect(err).To(Succeed(), "Failed to get AWS env variables from Ingestor Cluster pod") - testenv.ValidateContent(envVars, awsEnvVars, true) - } - - if strings.Contains(pod, "ingest") { - // Verify default-mode.conf - testcaseEnvInst.Log.Info("Verify default-mode.conf") - testenv.ValidateContent(defaultsConf, updatedDefaultsIngest, true) - } else if strings.Contains(pod, "idxc") { - // Verify inputs.conf - testcaseEnvInst.Log.Info("Verify inputs.conf") - inputsPath := "opt/splunk/etc/system/local/inputs.conf" - inputsConf, err := testenv.GetConfFile(pod, inputsPath, deployment.GetName()) - Expect(err).To(Succeed(), "Failed to get inputs.conf from Indexer Cluster pod") - testenv.ValidateContent(inputsConf, updatedInputs, true) - testenv.ValidateContent(inputsConf, inputsShouldNotContain, false) - } - } - }) - }) }) From 5c6e7867024169603458cee81d99b1ef6958fd59 Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Tue, 13 Jan 2026 13:04:14 +0100 Subject: [PATCH 13/15] CSPL-4360 Addressing secret value change and removing redundant controllers --- api/v4/indexercluster_types.go | 7 +- api/v4/ingestorcluster_types.go | 7 +- api/v4/objectstorage_types.go | 30 -- api/v4/queue_types.go | 30 -- api/v4/zz_generated.deepcopy.go | 20 -- cmd/main.go | 14 - ...enterprise.splunk.com_indexerclusters.yaml | 112 +------- ...nterprise.splunk.com_ingestorclusters.yaml | 112 +------- config/rbac/role.yaml | 6 - .../controller/objectstorage_controller.go | 120 -------- .../objectstorage_controller_test.go | 260 ----------------- internal/controller/queue_controller.go | 120 -------- internal/controller/queue_controller_test.go | 269 ------------------ internal/controller/suite_test.go | 12 - pkg/splunk/enterprise/indexercluster.go | 82 +++--- pkg/splunk/enterprise/indexercluster_test.go | 18 +- pkg/splunk/enterprise/ingestorcluster.go | 57 ++-- pkg/splunk/enterprise/ingestorcluster_test.go | 18 +- pkg/splunk/enterprise/util.go | 12 +- .../index_and_ingestion_separation_test.go | 6 +- 20 files changed, 117 insertions(+), 1195 deletions(-) delete mode 100644 internal/controller/objectstorage_controller.go delete mode 100644 internal/controller/objectstorage_controller_test.go delete mode 100644 internal/controller/queue_controller.go delete mode 100644 internal/controller/queue_controller_test.go diff --git a/api/v4/indexercluster_types.go b/api/v4/indexercluster_types.go index 34eb0ba3e..f1332d8c4 100644 --- a/api/v4/indexercluster_types.go +++ b/api/v4/indexercluster_types.go @@ -123,11 +123,8 @@ type IndexerClusterStatus struct { // Auxillary message describing CR status Message string `json:"message"` - // Queue - Queue *QueueSpec `json:"queue,omitempty"` - - // Object Storage - ObjectStorage *ObjectStorageSpec `json:"objectStorage,omitempty"` + // Queue and bucket access secret version + QueueBucketAccessSecretVersion string `json:"queueBucketAccessSecretVersion,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/api/v4/ingestorcluster_types.go b/api/v4/ingestorcluster_types.go index 15dc47640..9ce919809 100644 --- a/api/v4/ingestorcluster_types.go +++ b/api/v4/ingestorcluster_types.go @@ -76,11 +76,8 @@ type IngestorClusterStatus struct { // Auxillary message describing CR status Message string `json:"message"` - // Queue - Queue *QueueSpec `json:"queue,omitempty"` - - // Object Storage - ObjectStorage *ObjectStorageSpec `json:"objectStorage,omitempty"` + // Queue and bucket access secret version + QueueBucketAccessSecretVersion string `json:"queueBucketAccessSecretVersion,omitempty"` } // +kubebuilder:object:root=true diff --git a/api/v4/objectstorage_types.go b/api/v4/objectstorage_types.go index 587738d20..7712e81d6 100644 --- a/api/v4/objectstorage_types.go +++ b/api/v4/objectstorage_types.go @@ -17,7 +17,6 @@ limitations under the License. package v4 import ( - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) @@ -109,32 +108,3 @@ type ObjectStorageList struct { func init() { SchemeBuilder.Register(&ObjectStorage{}, &ObjectStorageList{}) } - -// NewEvent creates a new event associated with the object and ready -// to be published to Kubernetes API -func (os *ObjectStorage) NewEvent(eventType, reason, message string) corev1.Event { - t := metav1.Now() - return corev1.Event{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: reason + "-", - Namespace: os.ObjectMeta.Namespace, - }, - InvolvedObject: corev1.ObjectReference{ - Kind: "ObjectStorage", - Namespace: os.Namespace, - Name: os.Name, - UID: os.UID, - APIVersion: GroupVersion.String(), - }, - Reason: reason, - Message: message, - Source: corev1.EventSource{ - Component: "splunk-object-storage-controller", - }, - FirstTimestamp: t, - LastTimestamp: t, - Count: 1, - Type: eventType, - ReportingController: "enterprise.splunk.com/object-storage-controller", - } -} diff --git a/api/v4/queue_types.go b/api/v4/queue_types.go index d689a4acd..999eaccc8 100644 --- a/api/v4/queue_types.go +++ b/api/v4/queue_types.go @@ -17,7 +17,6 @@ limitations under the License. package v4 import ( - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) @@ -123,32 +122,3 @@ type QueueList struct { func init() { SchemeBuilder.Register(&Queue{}, &QueueList{}) } - -// NewEvent creates a new event associated with the object and ready -// to be published to Kubernetes API -func (os *Queue) NewEvent(eventType, reason, message string) corev1.Event { - t := metav1.Now() - return corev1.Event{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: reason + "-", - Namespace: os.ObjectMeta.Namespace, - }, - InvolvedObject: corev1.ObjectReference{ - Kind: "Queue", - Namespace: os.Namespace, - Name: os.Name, - UID: os.UID, - APIVersion: GroupVersion.String(), - }, - Reason: reason, - Message: message, - Source: corev1.EventSource{ - Component: "splunk-queue-controller", - }, - FirstTimestamp: t, - LastTimestamp: t, - Count: 1, - Type: eventType, - ReportingController: "enterprise.splunk.com/queue-controller", - } -} diff --git a/api/v4/zz_generated.deepcopy.go b/api/v4/zz_generated.deepcopy.go index 1f2215a9a..c7759fa58 100644 --- a/api/v4/zz_generated.deepcopy.go +++ b/api/v4/zz_generated.deepcopy.go @@ -545,16 +545,6 @@ func (in *IndexerClusterStatus) DeepCopyInto(out *IndexerClusterStatus) { *out = make([]IndexerClusterMemberStatus, len(*in)) copy(*out, *in) } - if in.Queue != nil { - in, out := &in.Queue, &out.Queue - *out = new(QueueSpec) - (*in).DeepCopyInto(*out) - } - if in.ObjectStorage != nil { - in, out := &in.ObjectStorage, &out.ObjectStorage - *out = new(ObjectStorageSpec) - **out = **in - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexerClusterStatus. @@ -648,16 +638,6 @@ func (in *IngestorClusterStatus) DeepCopyInto(out *IngestorClusterStatus) { } } in.AppContext.DeepCopyInto(&out.AppContext) - if in.Queue != nil { - in, out := &in.Queue, &out.Queue - *out = new(QueueSpec) - (*in).DeepCopyInto(*out) - } - if in.ObjectStorage != nil { - in, out := &in.ObjectStorage, &out.ObjectStorage - *out = new(ObjectStorageSpec) - **out = **in - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngestorClusterStatus. diff --git a/cmd/main.go b/cmd/main.go index dfb9c87e1..a037f87b1 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -230,20 +230,6 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "IngestorCluster") os.Exit(1) } - if err := (&controller.QueueReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "Queue") - os.Exit(1) - } - if err := (&controller.ObjectStorageReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "ObjectStorage") - os.Exit(1) - } //+kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { diff --git a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml index 2d01798e3..9b3f50bc8 100644 --- a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml @@ -8383,39 +8383,6 @@ spec: namespace_scoped_secret_resource_version: description: Indicates resource version of namespace scoped secret type: string - objectStorage: - description: Object Storage - properties: - provider: - description: Provider of queue resources - enum: - - s3 - type: string - s3: - description: s3 specific inputs - properties: - endpoint: - description: S3-compatible Service endpoint - pattern: ^https?://[^\s/$.?#].[^\s]*$ - type: string - path: - description: S3 bucket path - pattern: ^s3://[a-z0-9.-]{3,63}(?:/[^\s]+)?$ - type: string - required: - - path - type: object - required: - - provider - - s3 - type: object - x-kubernetes-validations: - - message: provider is immutable once created - rule: self.provider == oldSelf.provider - - message: s3 is immutable once created - rule: self.s3 == oldSelf.s3 - - message: s3 must be provided when provider is s3 - rule: self.provider != 's3' || has(self.s3) peers: description: status of each indexer cluster peer items: @@ -8457,82 +8424,9 @@ spec: - Terminating - Error type: string - queue: - description: Queue - properties: - provider: - description: Provider of queue resources - enum: - - sqs - type: string - sqs: - description: sqs specific inputs - properties: - authRegion: - description: Auth Region of the resources - pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ - type: string - dlq: - description: Name of the dead letter queue resource - minLength: 1 - type: string - endpoint: - description: Amazon SQS Service endpoint - pattern: ^https?://[^\s/$.?#].[^\s]*$ - type: string - name: - description: Name of the queue - minLength: 1 - type: string - volumes: - description: List of remote storage volumes - items: - description: VolumeSpec defines remote volume config - properties: - endpoint: - description: Remote volume URI - type: string - name: - description: Remote volume name - type: string - path: - description: Remote volume path - type: string - provider: - description: 'App Package Remote Store provider. Supported - values: aws, minio, azure, gcp.' - type: string - region: - description: Region of the remote storage volume where - apps reside. Used for aws, if provided. Not used for - minio and azure. - type: string - secretRef: - description: Secret object name - type: string - storageType: - description: 'Remote Storage type. Supported values: - s3, blob, gcs. s3 works with aws or minio providers, - whereas blob works with azure provider, gcs works - for gcp.' - type: string - type: object - type: array - required: - - dlq - - name - type: object - required: - - provider - - sqs - type: object - x-kubernetes-validations: - - message: provider is immutable once created - rule: self.provider == oldSelf.provider - - message: sqs is immutable once created - rule: self.sqs == oldSelf.sqs - - message: sqs must be provided when provider is sqs - rule: self.provider != 'sqs' || has(self.sqs) + queueBucketAccessSecretVersion: + description: Queue and bucket access secret version + type: string readyReplicas: description: current number of ready indexer peers format: int32 diff --git a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml index 194fdac86..e04e1a021 100644 --- a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml @@ -4594,39 +4594,6 @@ spec: message: description: Auxillary message describing CR status type: string - objectStorage: - description: Object Storage - properties: - provider: - description: Provider of queue resources - enum: - - s3 - type: string - s3: - description: s3 specific inputs - properties: - endpoint: - description: S3-compatible Service endpoint - pattern: ^https?://[^\s/$.?#].[^\s]*$ - type: string - path: - description: S3 bucket path - pattern: ^s3://[a-z0-9.-]{3,63}(?:/[^\s]+)?$ - type: string - required: - - path - type: object - required: - - provider - - s3 - type: object - x-kubernetes-validations: - - message: provider is immutable once created - rule: self.provider == oldSelf.provider - - message: s3 is immutable once created - rule: self.s3 == oldSelf.s3 - - message: s3 must be provided when provider is s3 - rule: self.provider != 's3' || has(self.s3) phase: description: Phase of the ingestor pods enum: @@ -4638,82 +4605,9 @@ spec: - Terminating - Error type: string - queue: - description: Queue - properties: - provider: - description: Provider of queue resources - enum: - - sqs - type: string - sqs: - description: sqs specific inputs - properties: - authRegion: - description: Auth Region of the resources - pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ - type: string - dlq: - description: Name of the dead letter queue resource - minLength: 1 - type: string - endpoint: - description: Amazon SQS Service endpoint - pattern: ^https?://[^\s/$.?#].[^\s]*$ - type: string - name: - description: Name of the queue - minLength: 1 - type: string - volumes: - description: List of remote storage volumes - items: - description: VolumeSpec defines remote volume config - properties: - endpoint: - description: Remote volume URI - type: string - name: - description: Remote volume name - type: string - path: - description: Remote volume path - type: string - provider: - description: 'App Package Remote Store provider. Supported - values: aws, minio, azure, gcp.' - type: string - region: - description: Region of the remote storage volume where - apps reside. Used for aws, if provided. Not used for - minio and azure. - type: string - secretRef: - description: Secret object name - type: string - storageType: - description: 'Remote Storage type. Supported values: - s3, blob, gcs. s3 works with aws or minio providers, - whereas blob works with azure provider, gcs works - for gcp.' - type: string - type: object - type: array - required: - - dlq - - name - type: object - required: - - provider - - sqs - type: object - x-kubernetes-validations: - - message: provider is immutable once created - rule: self.provider == oldSelf.provider - - message: sqs is immutable once created - rule: self.sqs == oldSelf.sqs - - message: sqs must be provided when provider is sqs - rule: self.provider != 'sqs' || has(self.sqs) + queueBucketAccessSecretVersion: + description: Queue and bucket access secret version + type: string readyReplicas: description: Number of ready ingestor pods format: int32 diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 973105d16..fc8513023 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -54,8 +54,6 @@ rules: - licensemanagers - licensemasters - monitoringconsoles - - objectstorages - - queues - searchheadclusters - standalones verbs: @@ -76,8 +74,6 @@ rules: - licensemanagers/finalizers - licensemasters/finalizers - monitoringconsoles/finalizers - - objectstorages/finalizers - - queues/finalizers - searchheadclusters/finalizers - standalones/finalizers verbs: @@ -92,8 +88,6 @@ rules: - licensemanagers/status - licensemasters/status - monitoringconsoles/status - - objectstorages/status - - queues/status - searchheadclusters/status - standalones/status verbs: diff --git a/internal/controller/objectstorage_controller.go b/internal/controller/objectstorage_controller.go deleted file mode 100644 index 4ae36b1a2..000000000 --- a/internal/controller/objectstorage_controller.go +++ /dev/null @@ -1,120 +0,0 @@ -/* -Copyright 2025. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "time" - - k8serrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - "github.com/pkg/errors" - enterpriseApi "github.com/splunk/splunk-operator/api/v4" - "github.com/splunk/splunk-operator/internal/controller/common" - metrics "github.com/splunk/splunk-operator/pkg/splunk/client/metrics" - enterprise "github.com/splunk/splunk-operator/pkg/splunk/enterprise" -) - -// ObjectStorageReconciler reconciles a ObjectStorage object -type ObjectStorageReconciler struct { - client.Client - Scheme *runtime.Scheme -} - -// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=objectstorages,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=objectstorages/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=objectstorages/finalizers,verbs=update - -// Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// TODO(user): Modify the Reconcile function to compare the state specified by -// the ObjectStorage object against the actual cluster state, and then -// perform operations to make the cluster state reflect the state specified by -// the user. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.22.1/pkg/reconcile -func (r *ObjectStorageReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - metrics.ReconcileCounters.With(metrics.GetPrometheusLabels(req, "ObjectStorage")).Inc() - defer recordInstrumentionData(time.Now(), req, "controller", "ObjectStorage") - - reqLogger := log.FromContext(ctx) - reqLogger = reqLogger.WithValues("objectstorage", req.NamespacedName) - - // Fetch the ObjectStorage - instance := &enterpriseApi.ObjectStorage{} - err := r.Get(ctx, req.NamespacedName, instance) - if err != nil { - if k8serrors.IsNotFound(err) { - // Request object not found, could have been deleted after - // reconcile request. Owned objects are automatically - // garbage collected. For additional cleanup logic use - // finalizers. Return and don't requeue - return ctrl.Result{}, nil - } - // Error reading the object - requeue the request. - return ctrl.Result{}, errors.Wrap(err, "could not load objectstorage data") - } - - // If the reconciliation is paused, requeue - annotations := instance.GetAnnotations() - if annotations != nil { - if _, ok := annotations[enterpriseApi.ObjectStoragePausedAnnotation]; ok { - return ctrl.Result{Requeue: true, RequeueAfter: pauseRetryDelay}, nil - } - } - - reqLogger.Info("start", "CR version", instance.GetResourceVersion()) - - result, err := ApplyObjectStorage(ctx, r.Client, instance) - if result.Requeue && result.RequeueAfter != 0 { - reqLogger.Info("Requeued", "period(seconds)", int(result.RequeueAfter/time.Second)) - } - - return result, err -} - -var ApplyObjectStorage = func(ctx context.Context, client client.Client, instance *enterpriseApi.ObjectStorage) (reconcile.Result, error) { - return enterprise.ApplyObjectStorage(ctx, client, instance) -} - -// SetupWithManager sets up the controller with the Manager. -func (r *ObjectStorageReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&enterpriseApi.ObjectStorage{}). - WithEventFilter(predicate.Or( - common.GenerationChangedPredicate(), - common.AnnotationChangedPredicate(), - common.LabelChangedPredicate(), - common.SecretChangedPredicate(), - common.ConfigMapChangedPredicate(), - common.StatefulsetChangedPredicate(), - common.PodChangedPredicate(), - common.CrdChangedPredicate(), - )). - WithOptions(controller.Options{ - MaxConcurrentReconciles: enterpriseApi.TotalWorker, - }). - Complete(r) -} diff --git a/internal/controller/objectstorage_controller_test.go b/internal/controller/objectstorage_controller_test.go deleted file mode 100644 index 6d7dec87a..000000000 --- a/internal/controller/objectstorage_controller_test.go +++ /dev/null @@ -1,260 +0,0 @@ -/* -Copyright 2025. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "fmt" - "time" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - enterpriseApi "github.com/splunk/splunk-operator/api/v4" - "github.com/splunk/splunk-operator/internal/controller/testutils" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes/scheme" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -var _ = Describe("ObjectStorage Controller", func() { - BeforeEach(func() { - time.Sleep(2 * time.Second) - }) - - AfterEach(func() { - - }) - - Context("ObjectStorage Management", func() { - - It("Get ObjectStorage custom resource should fail", func() { - namespace := "ns-splunk-objectstorage-1" - ApplyObjectStorage = func(ctx context.Context, client client.Client, instance *enterpriseApi.ObjectStorage) (reconcile.Result, error) { - return reconcile.Result{}, nil - } - nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} - - Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - - _, err := GetObjectStorage("test", nsSpecs.Name) - Expect(err.Error()).Should(Equal("objectstorages.enterprise.splunk.com \"test\" not found")) - Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) - }) - - It("Create ObjectStorage custom resource with annotations should pause", func() { - namespace := "ns-splunk-objectstorage-2" - annotations := make(map[string]string) - annotations[enterpriseApi.ObjectStoragePausedAnnotation] = "" - ApplyObjectStorage = func(ctx context.Context, client client.Client, instance *enterpriseApi.ObjectStorage) (reconcile.Result, error) { - return reconcile.Result{}, nil - } - nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} - - Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - - spec := enterpriseApi.ObjectStorageSpec{ - Provider: "s3", - S3: enterpriseApi.S3Spec{ - Endpoint: "https://s3.us-west-2.amazonaws.com", - Path: "s3://ingestion/smartbus-test", - }, - } - CreateObjectStorage("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) - osSpec, _ := GetObjectStorage("test", nsSpecs.Name) - annotations = map[string]string{} - osSpec.Annotations = annotations - osSpec.Status.Phase = "Ready" - UpdateObjectStorage(osSpec, enterpriseApi.PhaseReady, spec) - DeleteObjectStorage("test", nsSpecs.Name) - Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) - }) - - It("Create ObjectStorage custom resource should succeeded", func() { - namespace := "ns-splunk-objectstorage-3" - ApplyObjectStorage = func(ctx context.Context, client client.Client, instance *enterpriseApi.ObjectStorage) (reconcile.Result, error) { - return reconcile.Result{}, nil - } - nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} - - Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - - annotations := make(map[string]string) - spec := enterpriseApi.ObjectStorageSpec{ - Provider: "s3", - S3: enterpriseApi.S3Spec{ - Endpoint: "https://s3.us-west-2.amazonaws.com", - Path: "s3://ingestion/smartbus-test", - }, - } - CreateObjectStorage("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) - DeleteObjectStorage("test", nsSpecs.Name) - Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) - }) - - It("Cover Unused methods", func() { - namespace := "ns-splunk-objectstorage-4" - ApplyObjectStorage = func(ctx context.Context, client client.Client, instance *enterpriseApi.ObjectStorage) (reconcile.Result, error) { - return reconcile.Result{}, nil - } - nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} - - Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - - ctx := context.TODO() - builder := fake.NewClientBuilder() - c := builder.Build() - instance := ObjectStorageReconciler{ - Client: c, - Scheme: scheme.Scheme, - } - request := reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: "test", - Namespace: namespace, - }, - } - _, err := instance.Reconcile(ctx, request) - Expect(err).ToNot(HaveOccurred()) - - spec := enterpriseApi.ObjectStorageSpec{ - Provider: "s3", - S3: enterpriseApi.S3Spec{ - Endpoint: "https://s3.us-west-2.amazonaws.com", - Path: "s3://ingestion/smartbus-test", - }, - } - osSpec := testutils.NewObjectStorage("test", namespace, spec) - Expect(c.Create(ctx, osSpec)).Should(Succeed()) - - annotations := make(map[string]string) - annotations[enterpriseApi.ObjectStoragePausedAnnotation] = "" - osSpec.Annotations = annotations - Expect(c.Update(ctx, osSpec)).Should(Succeed()) - - _, err = instance.Reconcile(ctx, request) - Expect(err).ToNot(HaveOccurred()) - - annotations = map[string]string{} - osSpec.Annotations = annotations - Expect(c.Update(ctx, osSpec)).Should(Succeed()) - - _, err = instance.Reconcile(ctx, request) - Expect(err).ToNot(HaveOccurred()) - - osSpec.DeletionTimestamp = &metav1.Time{} - _, err = instance.Reconcile(ctx, request) - Expect(err).ToNot(HaveOccurred()) - }) - - }) -}) - -func GetObjectStorage(name string, namespace string) (*enterpriseApi.ObjectStorage, error) { - By("Expecting ObjectStorage custom resource to be retrieved successfully") - - key := types.NamespacedName{ - Name: name, - Namespace: namespace, - } - os := &enterpriseApi.ObjectStorage{} - - err := k8sClient.Get(context.Background(), key, os) - if err != nil { - return nil, err - } - - return os, err -} - -func CreateObjectStorage(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase, spec enterpriseApi.ObjectStorageSpec) *enterpriseApi.ObjectStorage { - By("Expecting ObjectStorage custom resource to be created successfully") - key := types.NamespacedName{ - Name: name, - Namespace: namespace, - } - osSpec := &enterpriseApi.ObjectStorage{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Annotations: annotations, - }, - Spec: spec, - } - - Expect(k8sClient.Create(context.Background(), osSpec)).Should(Succeed()) - time.Sleep(2 * time.Second) - - os := &enterpriseApi.ObjectStorage{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, os) - if status != "" { - fmt.Printf("status is set to %v", status) - os.Status.Phase = status - Expect(k8sClient.Status().Update(context.Background(), os)).Should(Succeed()) - time.Sleep(2 * time.Second) - } - return true - }, timeout, interval).Should(BeTrue()) - - return os -} - -func UpdateObjectStorage(instance *enterpriseApi.ObjectStorage, status enterpriseApi.Phase, spec enterpriseApi.ObjectStorageSpec) *enterpriseApi.ObjectStorage { - By("Expecting ObjectStorage custom resource to be updated successfully") - key := types.NamespacedName{ - Name: instance.Name, - Namespace: instance.Namespace, - } - - osSpec := testutils.NewObjectStorage(instance.Name, instance.Namespace, spec) - osSpec.ResourceVersion = instance.ResourceVersion - Expect(k8sClient.Update(context.Background(), osSpec)).Should(Succeed()) - time.Sleep(2 * time.Second) - - os := &enterpriseApi.ObjectStorage{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, os) - if status != "" { - fmt.Printf("status is set to %v", status) - os.Status.Phase = status - Expect(k8sClient.Status().Update(context.Background(), os)).Should(Succeed()) - time.Sleep(2 * time.Second) - } - return true - }, timeout, interval).Should(BeTrue()) - - return os -} - -func DeleteObjectStorage(name string, namespace string) { - By("Expecting ObjectStorage custom resource to be deleted successfully") - key := types.NamespacedName{ - Name: name, - Namespace: namespace, - } - - Eventually(func() error { - os := &enterpriseApi.ObjectStorage{} - _ = k8sClient.Get(context.Background(), key, os) - err := k8sClient.Delete(context.Background(), os) - return err - }, timeout, interval).Should(Succeed()) -} diff --git a/internal/controller/queue_controller.go b/internal/controller/queue_controller.go deleted file mode 100644 index 6fff662b9..000000000 --- a/internal/controller/queue_controller.go +++ /dev/null @@ -1,120 +0,0 @@ -/* -Copyright 2025. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "time" - - k8serrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - "github.com/pkg/errors" - enterpriseApi "github.com/splunk/splunk-operator/api/v4" - "github.com/splunk/splunk-operator/internal/controller/common" - metrics "github.com/splunk/splunk-operator/pkg/splunk/client/metrics" - enterprise "github.com/splunk/splunk-operator/pkg/splunk/enterprise" -) - -// QueueReconciler reconciles a Queue object -type QueueReconciler struct { - client.Client - Scheme *runtime.Scheme -} - -// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=queues,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=queues/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=queues/finalizers,verbs=update - -// Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// TODO(user): Modify the Reconcile function to compare the state specified by -// the Queue object against the actual cluster state, and then -// perform operations to make the cluster state reflect the state specified by -// the user. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.22.1/pkg/reconcile -func (r *QueueReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - metrics.ReconcileCounters.With(metrics.GetPrometheusLabels(req, "Queue")).Inc() - defer recordInstrumentionData(time.Now(), req, "controller", "Queue") - - reqLogger := log.FromContext(ctx) - reqLogger = reqLogger.WithValues("queue", req.NamespacedName) - - // Fetch the Queue - instance := &enterpriseApi.Queue{} - err := r.Get(ctx, req.NamespacedName, instance) - if err != nil { - if k8serrors.IsNotFound(err) { - // Request object not found, could have been deleted after - // reconcile request. Owned objects are automatically - // garbage collected. For additional cleanup logic use - // finalizers. Return and don't requeue - return ctrl.Result{}, nil - } - // Error reading the object - requeue the request. - return ctrl.Result{}, errors.Wrap(err, "could not load queue data") - } - - // If the reconciliation is paused, requeue - annotations := instance.GetAnnotations() - if annotations != nil { - if _, ok := annotations[enterpriseApi.QueuePausedAnnotation]; ok { - return ctrl.Result{Requeue: true, RequeueAfter: pauseRetryDelay}, nil - } - } - - reqLogger.Info("start", "CR version", instance.GetResourceVersion()) - - result, err := ApplyQueue(ctx, r.Client, instance) - if result.Requeue && result.RequeueAfter != 0 { - reqLogger.Info("Requeued", "period(seconds)", int(result.RequeueAfter/time.Second)) - } - - return result, err -} - -var ApplyQueue = func(ctx context.Context, client client.Client, instance *enterpriseApi.Queue) (reconcile.Result, error) { - return enterprise.ApplyQueue(ctx, client, instance) -} - -// SetupWithManager sets up the controller with the Manager. -func (r *QueueReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&enterpriseApi.Queue{}). - WithEventFilter(predicate.Or( - common.GenerationChangedPredicate(), - common.AnnotationChangedPredicate(), - common.LabelChangedPredicate(), - common.SecretChangedPredicate(), - common.ConfigMapChangedPredicate(), - common.StatefulsetChangedPredicate(), - common.PodChangedPredicate(), - common.CrdChangedPredicate(), - )). - WithOptions(controller.Options{ - MaxConcurrentReconciles: enterpriseApi.TotalWorker, - }). - Complete(r) -} diff --git a/internal/controller/queue_controller_test.go b/internal/controller/queue_controller_test.go deleted file mode 100644 index b04a5d4b3..000000000 --- a/internal/controller/queue_controller_test.go +++ /dev/null @@ -1,269 +0,0 @@ -/* -Copyright 2025. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "fmt" - "time" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - enterpriseApi "github.com/splunk/splunk-operator/api/v4" - "github.com/splunk/splunk-operator/internal/controller/testutils" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes/scheme" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -var _ = Describe("Queue Controller", func() { - BeforeEach(func() { - time.Sleep(2 * time.Second) - }) - - AfterEach(func() { - - }) - - Context("Queue Management", func() { - - It("Get Queue custom resource should fail", func() { - namespace := "ns-splunk-queue-1" - ApplyQueue = func(ctx context.Context, client client.Client, instance *enterpriseApi.Queue) (reconcile.Result, error) { - return reconcile.Result{}, nil - } - nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} - - Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - - _, err := GetQueue("test", nsSpecs.Name) - Expect(err.Error()).Should(Equal("queues.enterprise.splunk.com \"test\" not found")) - Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) - }) - - It("Create Queue custom resource with annotations should pause", func() { - namespace := "ns-splunk-queue-2" - annotations := make(map[string]string) - annotations[enterpriseApi.QueuePausedAnnotation] = "" - ApplyQueue = func(ctx context.Context, client client.Client, instance *enterpriseApi.Queue) (reconcile.Result, error) { - return reconcile.Result{}, nil - } - nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} - - Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - - spec := enterpriseApi.QueueSpec{ - Provider: "sqs", - SQS: enterpriseApi.SQSSpec{ - Name: "smartbus-queue", - AuthRegion: "us-west-2", - DLQ: "smartbus-dlq", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - }, - } - CreateQueue("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) - icSpec, _ := GetQueue("test", nsSpecs.Name) - annotations = map[string]string{} - icSpec.Annotations = annotations - icSpec.Status.Phase = "Ready" - UpdateQueue(icSpec, enterpriseApi.PhaseReady, spec) - DeleteQueue("test", nsSpecs.Name) - Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) - }) - - It("Create Queue custom resource should succeeded", func() { - namespace := "ns-splunk-queue-3" - ApplyQueue = func(ctx context.Context, client client.Client, instance *enterpriseApi.Queue) (reconcile.Result, error) { - return reconcile.Result{}, nil - } - nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} - - Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - - annotations := make(map[string]string) - spec := enterpriseApi.QueueSpec{ - Provider: "sqs", - SQS: enterpriseApi.SQSSpec{ - Name: "smartbus-queue", - AuthRegion: "us-west-2", - DLQ: "smartbus-dlq", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - }, - } - CreateQueue("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) - DeleteQueue("test", nsSpecs.Name) - Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) - }) - - It("Cover Unused methods", func() { - namespace := "ns-splunk-queue-4" - ApplyQueue = func(ctx context.Context, client client.Client, instance *enterpriseApi.Queue) (reconcile.Result, error) { - return reconcile.Result{}, nil - } - nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} - - Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - - ctx := context.TODO() - builder := fake.NewClientBuilder() - c := builder.Build() - instance := QueueReconciler{ - Client: c, - Scheme: scheme.Scheme, - } - request := reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: "test", - Namespace: namespace, - }, - } - _, err := instance.Reconcile(ctx, request) - Expect(err).ToNot(HaveOccurred()) - - spec := enterpriseApi.QueueSpec{ - Provider: "sqs", - SQS: enterpriseApi.SQSSpec{ - Name: "smartbus-queue", - AuthRegion: "us-west-2", - DLQ: "smartbus-dlq", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - }, - } - bcSpec := testutils.NewQueue("test", namespace, spec) - Expect(c.Create(ctx, bcSpec)).Should(Succeed()) - - annotations := make(map[string]string) - annotations[enterpriseApi.QueuePausedAnnotation] = "" - bcSpec.Annotations = annotations - Expect(c.Update(ctx, bcSpec)).Should(Succeed()) - - _, err = instance.Reconcile(ctx, request) - Expect(err).ToNot(HaveOccurred()) - - annotations = map[string]string{} - bcSpec.Annotations = annotations - Expect(c.Update(ctx, bcSpec)).Should(Succeed()) - - _, err = instance.Reconcile(ctx, request) - Expect(err).ToNot(HaveOccurred()) - - bcSpec.DeletionTimestamp = &metav1.Time{} - _, err = instance.Reconcile(ctx, request) - Expect(err).ToNot(HaveOccurred()) - }) - - }) -}) - -func GetQueue(name string, namespace string) (*enterpriseApi.Queue, error) { - By("Expecting Queue custom resource to be retrieved successfully") - - key := types.NamespacedName{ - Name: name, - Namespace: namespace, - } - b := &enterpriseApi.Queue{} - - err := k8sClient.Get(context.Background(), key, b) - if err != nil { - return nil, err - } - - return b, err -} - -func CreateQueue(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase, spec enterpriseApi.QueueSpec) *enterpriseApi.Queue { - By("Expecting Queue custom resource to be created successfully") - - key := types.NamespacedName{ - Name: name, - Namespace: namespace, - } - ingSpec := &enterpriseApi.Queue{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Annotations: annotations, - }, - Spec: spec, - } - - Expect(k8sClient.Create(context.Background(), ingSpec)).Should(Succeed()) - time.Sleep(2 * time.Second) - - b := &enterpriseApi.Queue{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, b) - if status != "" { - fmt.Printf("status is set to %v", status) - b.Status.Phase = status - Expect(k8sClient.Status().Update(context.Background(), b)).Should(Succeed()) - time.Sleep(2 * time.Second) - } - return true - }, timeout, interval).Should(BeTrue()) - - return b -} - -func UpdateQueue(instance *enterpriseApi.Queue, status enterpriseApi.Phase, spec enterpriseApi.QueueSpec) *enterpriseApi.Queue { - By("Expecting Queue custom resource to be updated successfully") - - key := types.NamespacedName{ - Name: instance.Name, - Namespace: instance.Namespace, - } - - bSpec := testutils.NewQueue(instance.Name, instance.Namespace, spec) - bSpec.ResourceVersion = instance.ResourceVersion - Expect(k8sClient.Update(context.Background(), bSpec)).Should(Succeed()) - time.Sleep(2 * time.Second) - - b := &enterpriseApi.Queue{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, b) - if status != "" { - fmt.Printf("status is set to %v", status) - b.Status.Phase = status - Expect(k8sClient.Status().Update(context.Background(), b)).Should(Succeed()) - time.Sleep(2 * time.Second) - } - return true - }, timeout, interval).Should(BeTrue()) - - return b -} - -func DeleteQueue(name string, namespace string) { - By("Expecting Queue custom resource to be deleted successfully") - - key := types.NamespacedName{ - Name: name, - Namespace: namespace, - } - - Eventually(func() error { - b := &enterpriseApi.Queue{} - _ = k8sClient.Get(context.Background(), key, b) - err := k8sClient.Delete(context.Background(), b) - return err - }, timeout, interval).Should(Succeed()) -} diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index 8454d15b5..142a8720c 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -98,12 +98,6 @@ var _ = BeforeSuite(func(ctx context.Context) { Scheme: clientgoscheme.Scheme, }) Expect(err).ToNot(HaveOccurred()) - if err := (&QueueReconciler{ - Client: k8sManager.GetClient(), - Scheme: k8sManager.GetScheme(), - }).SetupWithManager(k8sManager); err != nil { - Expect(err).NotTo(HaveOccurred()) - } if err := (&ClusterManagerReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), @@ -128,12 +122,6 @@ var _ = BeforeSuite(func(ctx context.Context) { }).SetupWithManager(k8sManager); err != nil { Expect(err).NotTo(HaveOccurred()) } - if err := (&ObjectStorageReconciler{ - Client: k8sManager.GetClient(), - Scheme: k8sManager.GetScheme(), - }).SetupWithManager(k8sManager); err != nil { - Expect(err).NotTo(HaveOccurred()) - } if err := (&LicenseManagerReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index af981be2c..42b714924 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -76,8 +76,7 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller // updates status after function completes cr.Status.ClusterManagerPhase = enterpriseApi.PhaseError if cr.Status.Replicas < cr.Spec.Replicas { - cr.Status.Queue = nil - cr.Status.ObjectStorage = nil + cr.Status.QueueBucketAccessSecretVersion = "0" } cr.Status.Replicas = cr.Spec.Replicas cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-indexer", cr.GetName()) @@ -286,11 +285,27 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller } } + // Secret reference + accessKey, secretKey, version := "", "", "" + if queue.Spec.Provider == "sqs" && cr.Spec.ServiceAccount == "" { + for _, vol := range queue.Spec.SQS.VolList { + if vol.SecretRef != "" { + accessKey, secretKey, version, err = GetQueueRemoteVolumeSecrets(ctx, vol, client, cr) + if err != nil { + scopedLog.Error(err, "Failed to get queue remote volume secrets") + return result, err + } + } + } + } + + secretChanged := cr.Status.QueueBucketAccessSecretVersion != version + // If queue is updated if cr.Spec.QueueRef.Name != "" { - if cr.Status.Queue == nil || cr.Status.ObjectStorage == nil { + if secretChanged { mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) - err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, client) + err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Queue/Pipeline config change after pod creation: %s", err.Error())) scopedLog.Error(err, "Failed to update conf file for Queue/Pipeline config change after pod creation") @@ -306,8 +321,7 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller scopedLog.Info("Restarted splunk", "indexer", i) } - cr.Status.Queue = &queue.Spec - cr.Status.ObjectStorage = &os.Spec + cr.Status.QueueBucketAccessSecretVersion = version } } @@ -400,8 +414,7 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, cr.Status.Phase = enterpriseApi.PhaseError cr.Status.ClusterMasterPhase = enterpriseApi.PhaseError if cr.Status.Replicas < cr.Spec.Replicas { - cr.Status.Queue = nil - cr.Status.ObjectStorage = nil + cr.Status.QueueBucketAccessSecretVersion = "0" } cr.Status.Replicas = cr.Spec.Replicas cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-indexer", cr.GetName()) @@ -613,10 +626,26 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, } } + // Secret reference + accessKey, secretKey, version := "", "", "" + if queue.Spec.Provider == "sqs" && cr.Spec.ServiceAccount == "" { + for _, vol := range queue.Spec.SQS.VolList { + if vol.SecretRef != "" { + accessKey, secretKey, version, err = GetQueueRemoteVolumeSecrets(ctx, vol, client, cr) + if err != nil { + scopedLog.Error(err, "Failed to get queue remote volume secrets") + return result, err + } + } + } + } + + secretChanged := cr.Status.QueueBucketAccessSecretVersion != version + if cr.Spec.QueueRef.Name != "" { - if cr.Status.Queue == nil || cr.Status.ObjectStorage == nil { + if secretChanged { mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) - err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, client) + err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Queue/Pipeline config change after pod creation: %s", err.Error())) scopedLog.Error(err, "Failed to update conf file for Queue/Pipeline config change after pod creation") @@ -632,8 +661,7 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, scopedLog.Info("Restarted splunk", "indexer", i) } - cr.Status.Queue = &queue.Spec - cr.Status.ObjectStorage = &os.Spec + cr.Status.QueueBucketAccessSecretVersion = version } } @@ -1304,7 +1332,7 @@ func getSiteName(ctx context.Context, c splcommon.ControllerClient, cr *enterpri var newSplunkClientForQueuePipeline = splclient.NewSplunkClient // updateIndexerConfFiles checks if Queue or Pipeline inputs are created for the first time and updates the conf file if so -func (mgr *indexerClusterPodManager) updateIndexerConfFiles(ctx context.Context, newCR *enterpriseApi.IndexerCluster, queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, k8s rclient.Client) error { +func (mgr *indexerClusterPodManager) updateIndexerConfFiles(ctx context.Context, newCR *enterpriseApi.IndexerCluster, queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, accessKey, secretKey string, k8s rclient.Client) error { reqLogger := log.FromContext(ctx) scopedLog := reqLogger.WithName("updateIndexerConfFiles").WithValues("name", newCR.GetName(), "namespace", newCR.GetNamespace()) @@ -1322,21 +1350,7 @@ func (mgr *indexerClusterPodManager) updateIndexerConfFiles(ctx context.Context, } splunkClient := newSplunkClientForQueuePipeline(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd)) - // Secret reference - s3AccessKey, s3SecretKey := "", "" - if queue.Provider == "sqs" && newCR.Spec.ServiceAccount == "" { - for _, vol := range queue.SQS.VolList { - if vol.SecretRef != "" { - s3AccessKey, s3SecretKey, err = GetQueueRemoteVolumeSecrets(ctx, vol, k8s, newCR) - if err != nil { - scopedLog.Error(err, "Failed to get queue remote volume secrets") - return err - } - } - } - } - - queueInputs, queueOutputs, pipelineInputs := getQueueAndPipelineInputsForIndexerConfFiles(queue, os, s3AccessKey, s3SecretKey) + queueInputs, queueOutputs, pipelineInputs := getQueueAndPipelineInputsForIndexerConfFiles(queue, os, accessKey, secretKey) for _, pbVal := range queueOutputs { if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", queue.SQS.Name), [][]string{pbVal}); err != nil { @@ -1361,9 +1375,9 @@ func (mgr *indexerClusterPodManager) updateIndexerConfFiles(ctx context.Context, } // getQueueAndPipelineInputsForIndexerConfFiles returns a list of queue and pipeline inputs for indexer pods conf files -func getQueueAndPipelineInputsForIndexerConfFiles(queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, s3AccessKey, s3SecretKey string) (queueInputs, queueOutputs, pipelineInputs [][]string) { +func getQueueAndPipelineInputsForIndexerConfFiles(queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, accessKey, secretKey string) (queueInputs, queueOutputs, pipelineInputs [][]string) { // Queue Inputs - queueInputs, queueOutputs = getQueueAndObjectStorageInputsForIndexerConfFiles(queue, os, s3AccessKey, s3SecretKey) + queueInputs, queueOutputs = getQueueAndObjectStorageInputsForIndexerConfFiles(queue, os, accessKey, secretKey) // Pipeline inputs pipelineInputs = getPipelineInputsForConfFile(true) @@ -1383,7 +1397,7 @@ func imageUpdatedTo9(previousImage string, currentImage string) bool { } // getQueueAndObjectStorageInputsForIndexerConfFiles returns a list of queue and object storage inputs for conf files -func getQueueAndObjectStorageInputsForIndexerConfFiles(queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, s3AccessKey, s3SecretKey string) (inputs, outputs [][]string) { +func getQueueAndObjectStorageInputsForIndexerConfFiles(queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, accessKey, secretKey string) (inputs, outputs [][]string) { queueProvider := "" if queue.Provider == "sqs" { queueProvider = "sqs_smartbus" @@ -1405,9 +1419,9 @@ func getQueueAndObjectStorageInputsForIndexerConfFiles(queue *enterpriseApi.Queu ) // TODO: Handle credentials change - if s3AccessKey != "" && s3SecretKey != "" { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.access_key", queueProvider), s3AccessKey}) - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.secret_key", queueProvider), s3SecretKey}) + if accessKey != "" && secretKey != "" { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.access_key", queueProvider), accessKey}) + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.secret_key", queueProvider), secretKey}) } outputs = inputs diff --git a/pkg/splunk/enterprise/indexercluster_test.go b/pkg/splunk/enterprise/indexercluster_test.go index 9d1bf0118..ac9e59554 100644 --- a/pkg/splunk/enterprise/indexercluster_test.go +++ b/pkg/splunk/enterprise/indexercluster_test.go @@ -2139,6 +2139,9 @@ func TestUpdateIndexerConfFiles(t *testing.T) { // Object definitions provider := "sqs_smartbus" + accessKey := "accessKey" + secretKey := "secretKey" + queue := &enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ Kind: "Queue", @@ -2197,9 +2200,8 @@ func TestUpdateIndexerConfFiles(t *testing.T) { }, }, Status: enterpriseApi.IndexerClusterStatus{ - ReadyReplicas: 3, - Queue: &enterpriseApi.QueueSpec{}, - ObjectStorage: &enterpriseApi.ObjectStorageSpec{}, + ReadyReplicas: 3, + QueueBucketAccessSecretVersion: "123", }, } c.Create(ctx, cr) @@ -2260,7 +2262,7 @@ func TestUpdateIndexerConfFiles(t *testing.T) { // Negative test case: secret not found mgr := &indexerClusterPodManager{} - err := mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, c) + err := mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c) assert.NotNil(t, err) // Mock secret @@ -2271,7 +2273,7 @@ func TestUpdateIndexerConfFiles(t *testing.T) { // Negative test case: failure in creating remote queue stanza mgr = newTestIndexerQueuePipelineManager(mockHTTPClient) - err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, c) + err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c) assert.NotNil(t, err) // outputs.conf @@ -2295,7 +2297,7 @@ func TestUpdateIndexerConfFiles(t *testing.T) { // Negative test case: failure in creating remote queue stanza mgr = newTestIndexerQueuePipelineManager(mockHTTPClient) - err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, c) + err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c) assert.NotNil(t, err) // inputs.conf @@ -2305,7 +2307,7 @@ func TestUpdateIndexerConfFiles(t *testing.T) { // Negative test case: failure in updating remote queue stanza mgr = newTestIndexerQueuePipelineManager(mockHTTPClient) - err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, c) + err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c) assert.NotNil(t, err) // default-mode.conf @@ -2333,7 +2335,7 @@ func TestUpdateIndexerConfFiles(t *testing.T) { mgr = newTestIndexerQueuePipelineManager(mockHTTPClient) - err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, c) + err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c) assert.Nil(t, err) } diff --git a/pkg/splunk/enterprise/ingestorcluster.go b/pkg/splunk/enterprise/ingestorcluster.go index 55f0e7d35..fb4c9474a 100644 --- a/pkg/splunk/enterprise/ingestorcluster.go +++ b/pkg/splunk/enterprise/ingestorcluster.go @@ -72,8 +72,7 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr // Update the CR Status defer updateCRStatus(ctx, client, cr, &err) if cr.Status.Replicas < cr.Spec.Replicas { - cr.Status.Queue = nil - cr.Status.ObjectStorage = nil + cr.Status.QueueBucketAccessSecretVersion = "0" } cr.Status.Replicas = cr.Spec.Replicas @@ -252,10 +251,26 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr } } + // Secret reference + accessKey, secretKey, version := "", "", "" + if queue.Spec.Provider == "sqs" && cr.Spec.ServiceAccount == "" { + for _, vol := range queue.Spec.SQS.VolList { + if vol.SecretRef != "" { + accessKey, secretKey, version, err = GetQueueRemoteVolumeSecrets(ctx, vol, client, cr) + if err != nil { + scopedLog.Error(err, "Failed to get queue remote volume secrets") + return result, err + } + } + } + } + + secretChanged := cr.Status.QueueBucketAccessSecretVersion != version + // If queue is updated - if cr.Status.Queue == nil || cr.Status.ObjectStorage == nil { + if secretChanged { mgr := newIngestorClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) - err = mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, client) + err = mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIngestorCluster", fmt.Sprintf("Failed to update conf file for Queue/Pipeline config change after pod creation: %s", err.Error())) scopedLog.Error(err, "Failed to update conf file for Queue/Pipeline config change after pod creation") @@ -271,8 +286,7 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr scopedLog.Info("Restarted splunk", "ingestor", i) } - cr.Status.Queue = &queue.Spec - cr.Status.ObjectStorage = &os.Spec + cr.Status.QueueBucketAccessSecretVersion = version } // Upgrade fron automated MC to MC CRD @@ -367,7 +381,7 @@ func getIngestorStatefulSet(ctx context.Context, client splcommon.ControllerClie } // updateIngestorConfFiles checks if Queue or Pipeline inputs are created for the first time and updates the conf file if so -func (mgr *ingestorClusterPodManager) updateIngestorConfFiles(ctx context.Context, newCR *enterpriseApi.IngestorCluster, queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, k8s client.Client) error { +func (mgr *ingestorClusterPodManager) updateIngestorConfFiles(ctx context.Context, newCR *enterpriseApi.IngestorCluster, queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, accessKey, secretKey string, k8s client.Client) error { reqLogger := log.FromContext(ctx) scopedLog := reqLogger.WithName("updateIngestorConfFiles").WithValues("name", newCR.GetName(), "namespace", newCR.GetNamespace()) @@ -385,21 +399,7 @@ func (mgr *ingestorClusterPodManager) updateIngestorConfFiles(ctx context.Contex } splunkClient := mgr.newSplunkClient(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd)) - // Secret reference - s3AccessKey, s3SecretKey := "", "" - if queue.Provider == "sqs" && newCR.Spec.ServiceAccount == "" { - for _, vol := range queue.SQS.VolList { - if vol.SecretRef != "" { - s3AccessKey, s3SecretKey, err = GetQueueRemoteVolumeSecrets(ctx, vol, k8s, newCR) - if err != nil { - scopedLog.Error(err, "Failed to get queue remote volume secrets") - return err - } - } - } - } - - queueInputs, pipelineInputs := getQueueAndPipelineInputsForIngestorConfFiles(queue, os, s3AccessKey, s3SecretKey) + queueInputs, pipelineInputs := getQueueAndPipelineInputsForIngestorConfFiles(queue, os, accessKey, secretKey) for _, input := range queueInputs { if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", queue.SQS.Name), [][]string{input}); err != nil { @@ -418,9 +418,9 @@ func (mgr *ingestorClusterPodManager) updateIngestorConfFiles(ctx context.Contex } // getQueueAndPipelineInputsForIngestorConfFiles returns a list of queue and pipeline inputs for ingestor pods conf files -func getQueueAndPipelineInputsForIngestorConfFiles(queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, s3AccessKey, s3SecretKey string) (queueInputs, pipelineInputs [][]string) { +func getQueueAndPipelineInputsForIngestorConfFiles(queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, accessKey, secretKey string) (queueInputs, pipelineInputs [][]string) { // Queue Inputs - queueInputs = getQueueAndObjectStorageInputsForIngestorConfFiles(queue, os, s3AccessKey, s3SecretKey) + queueInputs = getQueueAndObjectStorageInputsForIngestorConfFiles(queue, os, accessKey, secretKey) // Pipeline inputs pipelineInputs = getPipelineInputsForConfFile(false) @@ -464,7 +464,7 @@ func getPipelineInputsForConfFile(isIndexer bool) (config [][]string) { } // getQueueAndObjectStorageInputsForConfFiles returns a list of queue and object storage inputs for conf files -func getQueueAndObjectStorageInputsForIngestorConfFiles(queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, s3AccessKey, s3SecretKey string) (config [][]string) { +func getQueueAndObjectStorageInputsForIngestorConfFiles(queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, accessKey, secretKey string) (config [][]string) { queueProvider := "" if queue.Provider == "sqs" { queueProvider = "sqs_smartbus" @@ -486,10 +486,9 @@ func getQueueAndObjectStorageInputsForIngestorConfFiles(queue *enterpriseApi.Que []string{fmt.Sprintf("remote_queue.%s.send_interval", queueProvider), "5s"}, ) - // TODO: Handle credentials change - if s3AccessKey != "" && s3SecretKey != "" { - config = append(config, []string{fmt.Sprintf("remote_queue.%s.access_key", queueProvider), s3AccessKey}) - config = append(config, []string{fmt.Sprintf("remote_queue.%s.secret_key", queueProvider), s3SecretKey}) + if accessKey != "" && secretKey != "" { + config = append(config, []string{fmt.Sprintf("remote_queue.%s.access_key", queueProvider), accessKey}) + config = append(config, []string{fmt.Sprintf("remote_queue.%s.secret_key", queueProvider), secretKey}) } return diff --git a/pkg/splunk/enterprise/ingestorcluster_test.go b/pkg/splunk/enterprise/ingestorcluster_test.go index e79bbaa94..f7dd54b39 100644 --- a/pkg/splunk/enterprise/ingestorcluster_test.go +++ b/pkg/splunk/enterprise/ingestorcluster_test.go @@ -484,6 +484,9 @@ func TestUpdateIngestorConfFiles(t *testing.T) { // Object definitions provider := "sqs_smartbus" + accessKey := "accessKey" + secretKey := "secretKey" + queue := &enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ Kind: "Queue", @@ -537,10 +540,9 @@ func TestUpdateIngestorConfFiles(t *testing.T) { }, }, Status: enterpriseApi.IngestorClusterStatus{ - Replicas: 3, - ReadyReplicas: 3, - Queue: &enterpriseApi.QueueSpec{}, - ObjectStorage: &enterpriseApi.ObjectStorageSpec{}, + Replicas: 3, + ReadyReplicas: 3, + QueueBucketAccessSecretVersion: "123", }, } @@ -601,7 +603,7 @@ func TestUpdateIngestorConfFiles(t *testing.T) { // Negative test case: secret not found mgr := &ingestorClusterPodManager{} - err := mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, c) + err := mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c) assert.NotNil(t, err) // Mock secret @@ -612,7 +614,7 @@ func TestUpdateIngestorConfFiles(t *testing.T) { // Negative test case: failure in creating remote queue stanza mgr = newTestIngestorQueuePipelineManager(mockHTTPClient) - err = mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, c) + err = mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c) assert.NotNil(t, err) // outputs.conf @@ -634,7 +636,7 @@ func TestUpdateIngestorConfFiles(t *testing.T) { // Negative test case: failure in creating remote queue stanza mgr = newTestIngestorQueuePipelineManager(mockHTTPClient) - err = mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, c) + err = mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c) assert.NotNil(t, err) // default-mode.conf @@ -663,7 +665,7 @@ func TestUpdateIngestorConfFiles(t *testing.T) { mgr = newTestIngestorQueuePipelineManager(mockHTTPClient) - err = mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, c) + err = mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c) assert.Nil(t, err) } diff --git a/pkg/splunk/enterprise/util.go b/pkg/splunk/enterprise/util.go index 882a96ff3..88a85b448 100644 --- a/pkg/splunk/enterprise/util.go +++ b/pkg/splunk/enterprise/util.go @@ -418,22 +418,24 @@ func GetSmartstoreRemoteVolumeSecrets(ctx context.Context, volume enterpriseApi. } // GetQueueRemoteVolumeSecrets is used to retrieve access key and secrete key for Index & Ingestion separation -func GetQueueRemoteVolumeSecrets(ctx context.Context, volume enterpriseApi.VolumeSpec, client splcommon.ControllerClient, cr splcommon.MetaObject) (string, string, error) { +func GetQueueRemoteVolumeSecrets(ctx context.Context, volume enterpriseApi.VolumeSpec, client splcommon.ControllerClient, cr splcommon.MetaObject) (string, string, string, error) { namespaceScopedSecret, err := splutil.GetSecretByName(ctx, client, cr.GetNamespace(), cr.GetName(), volume.SecretRef) if err != nil { - return "", "", err + return "", "", "", err } accessKey := string(namespaceScopedSecret.Data[s3AccessKey]) secretKey := string(namespaceScopedSecret.Data[s3SecretKey]) + version := namespaceScopedSecret.ResourceVersion + if accessKey == "" { - return "", "", errors.New("access Key is missing") + return "", "", "", errors.New("access Key is missing") } else if secretKey == "" { - return "", "", errors.New("secret Key is missing") + return "", "", "", errors.New("secret Key is missing") } - return accessKey, secretKey, nil + return accessKey, secretKey, version, nil } // getLocalAppFileName generates the local app file name diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go index 4314124cc..17b5bd8da 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go @@ -317,7 +317,8 @@ var _ = Describe("indingsep test", func() { // Verify Ingestor Cluster Status testcaseEnvInst.Log.Info("Verify Ingestor Cluster Status") - Expect(*ingest.Status.Queue).To(Equal(queue), "Ingestor queue status is not the same as provided as input") + Expect(ingest.Status.QueueBucketAccessSecretVersion).To(Not(Equal("")), "Ingestor queue status queue bucket access secret version is empty") + Expect(ingest.Status.QueueBucketAccessSecretVersion).To(Not(Equal("0")), "Ingestor queue status queue bucket access secret version is 0") // Get instance of current Indexer Cluster CR with latest config testcaseEnvInst.Log.Info("Get instance of current Indexer Cluster CR with latest config") @@ -327,7 +328,8 @@ var _ = Describe("indingsep test", func() { // Verify Indexer Cluster Status testcaseEnvInst.Log.Info("Verify Indexer Cluster Status") - Expect(*index.Status.Queue).To(Equal(queue), "Indexer queue status is not the same as provided as input") + Expect(index.Status.QueueBucketAccessSecretVersion).To(Not(Equal("")), "Indexer queue status queue bucket access secret version is empty") + Expect(index.Status.QueueBucketAccessSecretVersion).To(Not(Equal("0")), "Indexer queue status queue bucket access secret version is 0") // Verify conf files testcaseEnvInst.Log.Info("Verify conf files") From 42dc8e8ab139a21ffe15cafdcbac762e04d87829 Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Tue, 13 Jan 2026 15:07:27 +0100 Subject: [PATCH 14/15] CSPL-4360 Update of docs, helm tests and validations --- api/v4/indexercluster_types.go | 4 +-- api/v4/ingestorcluster_types.go | 4 +-- api/v4/queue_types.go | 5 ++- ...enterprise.splunk.com_indexerclusters.yaml | 4 +++ ...nterprise.splunk.com_ingestorclusters.yaml | 5 +++ .../bases/enterprise.splunk.com_queues.yaml | 10 ++++-- docs/IndexIngestionSeparation.md | 36 ++++++++++--------- .../02-assert.yaml | 24 ------------- .../03-assert.yaml | 12 ------- 9 files changed, 45 insertions(+), 59 deletions(-) diff --git a/api/v4/indexercluster_types.go b/api/v4/indexercluster_types.go index f1332d8c4..02cf1562d 100644 --- a/api/v4/indexercluster_types.go +++ b/api/v4/indexercluster_types.go @@ -35,17 +35,17 @@ const ( ) // +kubebuilder:validation:XValidation:rule="has(self.queueRef) == has(self.objectStorageRef)",message="queueRef and objectStorageRef must both be set or both be empty" +// +kubebuilder:validation:XValidation:rule="self.queueRef == oldSelf.queueRef",message="queueRef is immutable once created" +// +kubebuilder:validation:XValidation:rule="self.objectStorageRef == oldSelf.objectStorageRef",message="objectStorageRef is immutable once created" // IndexerClusterSpec defines the desired state of a Splunk Enterprise indexer cluster type IndexerClusterSpec struct { CommonSplunkSpec `json:",inline"` // +optional - // +kubebuilder:validation:Immutable // Queue reference QueueRef corev1.ObjectReference `json:"queueRef"` // +optional - // +kubebuilder:validation:Immutable // Object Storage reference ObjectStorageRef corev1.ObjectReference `json:"objectStorageRef"` diff --git a/api/v4/ingestorcluster_types.go b/api/v4/ingestorcluster_types.go index 9ce919809..021acd025 100644 --- a/api/v4/ingestorcluster_types.go +++ b/api/v4/ingestorcluster_types.go @@ -28,6 +28,8 @@ const ( IngestorClusterPausedAnnotation = "ingestorcluster.enterprise.splunk.com/paused" ) +// +kubebuilder:validation:XValidation:rule="self.queueRef == oldSelf.queueRef",message="queueRef is immutable once created" +// +kubebuilder:validation:XValidation:rule="self.objectStorageRef == oldSelf.objectStorageRef",message="objectStorageRef is immutable once created" // IngestorClusterSpec defines the spec of Ingestor Cluster type IngestorClusterSpec struct { // Common Splunk spec @@ -40,12 +42,10 @@ type IngestorClusterSpec struct { AppFrameworkConfig AppFrameworkSpec `json:"appRepo,omitempty"` // +kubebuilder:validation:Required - // +kubebuilder:validation:Immutable // Queue reference QueueRef corev1.ObjectReference `json:"queueRef"` // +kubebuilder:validation:Required - // +kubebuilder:validation:Immutable // Object Storage reference ObjectStorageRef corev1.ObjectReference `json:"objectStorageRef"` } diff --git a/api/v4/queue_types.go b/api/v4/queue_types.go index 999eaccc8..2139f43dd 100644 --- a/api/v4/queue_types.go +++ b/api/v4/queue_types.go @@ -28,7 +28,10 @@ const ( ) // +kubebuilder:validation:XValidation:rule="self.provider == oldSelf.provider",message="provider is immutable once created" -// +kubebuilder:validation:XValidation:rule="self.sqs == oldSelf.sqs",message="sqs is immutable once created" +// +kubebuilder:validation:XValidation:rule="self.sqs.name == oldSelf.sqs.name",message="sqs.name is immutable once created" +// +kubebuilder:validation:XValidation:rule="self.sqs.authRegion == oldSelf.sqs.authRegion",message="sqs.authRegion is immutable once created" +// +kubebuilder:validation:XValidation:rule="self.sqs.dlq == oldSelf.sqs.dlq",message="sqs.dlq is immutable once created" +// +kubebuilder:validation:XValidation:rule="self.sqs.endpoint == oldSelf.sqs.endpoint",message="sqs.endpoint is immutable once created" // +kubebuilder:validation:XValidation:rule="self.provider != 'sqs' || has(self.sqs)",message="sqs must be provided when provider is sqs" // QueueSpec defines the desired state of Queue type QueueSpec struct { diff --git a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml index 9b3f50bc8..3ea073d7d 100644 --- a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml @@ -8331,6 +8331,10 @@ spec: x-kubernetes-validations: - message: queueRef and objectStorageRef must both be set or both be empty rule: has(self.queueRef) == has(self.objectStorageRef) + - message: queueRef is immutable once created + rule: self.queueRef == oldSelf.queueRef + - message: objectStorageRef is immutable once created + rule: self.objectStorageRef == oldSelf.objectStorageRef status: description: IndexerClusterStatus defines the observed state of a Splunk Enterprise indexer cluster diff --git a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml index e04e1a021..703af01e6 100644 --- a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml @@ -4306,6 +4306,11 @@ spec: - objectStorageRef - queueRef type: object + x-kubernetes-validations: + - message: queueRef is immutable once created + rule: self.queueRef == oldSelf.queueRef + - message: objectStorageRef is immutable once created + rule: self.objectStorageRef == oldSelf.objectStorageRef status: description: IngestorClusterStatus defines the observed state of Ingestor Cluster diff --git a/config/crd/bases/enterprise.splunk.com_queues.yaml b/config/crd/bases/enterprise.splunk.com_queues.yaml index 454d1700b..e10ee536a 100644 --- a/config/crd/bases/enterprise.splunk.com_queues.yaml +++ b/config/crd/bases/enterprise.splunk.com_queues.yaml @@ -122,8 +122,14 @@ spec: x-kubernetes-validations: - message: provider is immutable once created rule: self.provider == oldSelf.provider - - message: sqs is immutable once created - rule: self.sqs == oldSelf.sqs + - message: sqs.name is immutable once created + rule: self.sqs.name == oldSelf.sqs.name + - message: sqs.authRegion is immutable once created + rule: self.sqs.authRegion == oldSelf.sqs.authRegion + - message: sqs.dlq is immutable once created + rule: self.sqs.dlq == oldSelf.sqs.dlq + - message: sqs.endpoint is immutable once created + rule: self.sqs.endpoint == oldSelf.sqs.endpoint - message: sqs must be provided when provider is sqs rule: self.provider != 'sqs' || has(self.sqs) status: diff --git a/docs/IndexIngestionSeparation.md b/docs/IndexIngestionSeparation.md index c7b05dcae..ab6f789c7 100644 --- a/docs/IndexIngestionSeparation.md +++ b/docs/IndexIngestionSeparation.md @@ -43,8 +43,9 @@ SQS message queue inputs can be found in the table below. | region | string | [Required] Region where the queue is located | | endpoint | string | [Optional, if not provided formed based on region] AWS SQS Service endpoint | dlq | string | [Required] Name of the dead letter queue | +| volumes | []VolumeSpec | [Optional] List of remote storage volumes used to mount the credentials for queue and bucket access (must contain s3_access_key and s3_secret_key) | -**First provisioning or update of any of the queue inputs requires Ingestor Cluster and Indexer Cluster Splunkd restart, but this restart is implemented automatically and done by SOK.** +**SOK doesn't support update of any of the Queue inputs except from the volumes which allow the change of secrets.** ## Example ``` @@ -59,6 +60,9 @@ spec: region: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com dlq: sqs-dlq-test + volumes: + - name: s3-sqs-volume + secretRef: s3-secret ``` # ObjectStorage @@ -81,7 +85,7 @@ S3 object storage inputs can be found in the table below. | path | string | [Required] Remote storage location for messages that are larger than the underlying maximum message size | | endpoint | string | [Optional, if not provided formed based on region] S3-compatible service endpoint -Change of any of the object storage inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. +**SOK doesn't support update of any of the ObjectStorage inputs.** ## Example ``` @@ -110,9 +114,13 @@ In addition to common spec inputs, the IngestorCluster resource provides the fol | queueRef | corev1.ObjectReference | Message queue reference | | objectStorageRef | corev1.ObjectReference | Object storage reference | +**SOK doesn't support update of queueRef and objectStorageRef.** + +**First provisioning or scaling up the number of replicas requires Ingestor Cluster Splunkd restart, but this restart is implemented automatically and done by SOK.** + ## Example -The example presented below configures IngestorCluster named ingestor with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the ingestion traffic. This IngestorCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Queue and ObjectStorage references allow the user to specify queue and bucket settings for the ingestion process. +The example presented below configures IngestorCluster named ingestor with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the ingestion traffic. This IngestorCluster custom resource is set up with the s3-secret credentials allowing it to perform SQS and S3 operations. Queue and ObjectStorage references allow the user to specify queue and bucket settings for the ingestion process. In this case, the setup uses the SQS and S3 based configuration where the messages are stored in sqs-test queue in us-west-2 region with dead letter queue set to sqs-dlq-test queue. The object storage is set to ingestion bucket in smartbus-test directory. Based on these inputs, default-mode.conf and outputs.conf files are configured accordingly. @@ -147,9 +155,13 @@ In addition to common spec inputs, the IndexerCluster resource provides the foll | queueRef | corev1.ObjectReference | Message queue reference | | objectStorageRef | corev1.ObjectReference | Object storage reference | +**SOK doesn't support update of queueRef and objectStorageRef.** + +**First provisioning or scaling up the number of replicas requires Indexer Cluster Splunkd restart, but this restart is implemented automatically and done by SOK.** + ## Example -The example presented below configures IndexerCluster named indexer with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the indexing traffic. This IndexerCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Queue and ObjectStorage references allow the user to specify queue and bucket settings for the indexing process. +The example presented below configures IndexerCluster named indexer with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the indexing traffic. This IndexerCluster custom resource is set up with the s3-secret credentials allowing it to perform SQS and S3 operations. Queue and ObjectStorage references allow the user to specify queue and bucket settings for the indexing process. In this case, the setup uses the SQS and S3 based configuration where the messages are stored in and retrieved from sqs-test queue in us-west-2 region with dead letter queue set to sqs-dlq-test queue. The object storage is set to ingestion bucket in smartbus-test directory. Based on these inputs, default-mode.conf, inputs.conf and outputs.conf files are configured accordingly. @@ -204,6 +216,9 @@ queue: region: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com dlq: sqs-dlq-test + volumes: + - name: s3-sqs-volume + secretRef: s3-secret ``` ``` @@ -734,18 +749,7 @@ Status: Is Deployment In Progress: false Last App Info Check Time: 0 Version: 0 - Queue: - Sqs: - Region: us-west-2 - DLQ: sqs-dlq-test - Endpoint: https://sqs.us-west-2.amazonaws.com - Name: sqs-test - Provider: sqs - Object Storage: - S3: - Endpoint: https://s3.us-west-2.amazonaws.com - Path: s3://ingestion/smartbus-test - Provider: s3 + Queue Bucket Access Secret Version: 33744270 Message: Phase: Ready Ready Replicas: 3 diff --git a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml index ca56ca5ef..5848da973 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml @@ -67,18 +67,6 @@ spec: name: os status: phase: Ready - queue: - provider: sqs - sqs: - name: index-ingest-separation-test-q - authRegion: us-west-2 - endpoint: https://sqs.us-west-2.amazonaws.com - dlq: index-ingest-separation-test-dlq - objectStorage: - provider: s3 - s3: - endpoint: https://s3.us-west-2.amazonaws.com - path: s3://index-ingest-separation-test-bucket/smartbus-test --- # check for stateful set and replicas as configured @@ -110,18 +98,6 @@ spec: name: os status: phase: Ready - queue: - provider: sqs - sqs: - name: index-ingest-separation-test-q - authRegion: us-west-2 - endpoint: https://sqs.us-west-2.amazonaws.com - dlq: index-ingest-separation-test-dlq - objectStorage: - provider: s3 - s3: - endpoint: https://s3.us-west-2.amazonaws.com - path: s3://index-ingest-separation-test-bucket/smartbus-test --- # check for stateful set and replicas as configured diff --git a/kuttl/tests/helm/index-and-ingest-separation/03-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/03-assert.yaml index 765a22192..8bf619148 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/03-assert.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/03-assert.yaml @@ -12,18 +12,6 @@ spec: name: os status: phase: Ready - queue: - provider: sqs - sqs: - name: index-ingest-separation-test-q - authRegion: us-west-2 - endpoint: https://sqs.us-west-2.amazonaws.com - dlq: index-ingest-separation-test-dlq - objectStorage: - provider: s3 - s3: - endpoint: https://s3.us-west-2.amazonaws.com - path: s3://index-ingest-separation-test-bucket/smartbus-test --- # check for stateful sets and replicas updated From f929eb39d1785ad0e517e667580556292d3a6ff1 Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Tue, 13 Jan 2026 15:56:22 +0100 Subject: [PATCH 15/15] CSPL-4360 Add secret watch and fix controller tests --- config/rbac/role.yaml | 6 ++ .../templates/rbac/clusterrole.yaml | 78 +++++++++++++++++++ .../rbac/objectstorage_editor_role.yaml | 55 +++++++++++++ .../rbac/objectstorage_viewer_role.yaml | 47 +++++++++++ .../controller/indexercluster_controller.go | 51 ++++++++++++ .../controller/ingestorcluster_controller.go | 55 +++++++++++++ .../ingestorcluster_controller_test.go | 37 ++++++++- internal/controller/testutils/new.go | 9 ++- .../02-assert.yaml | 4 - 9 files changed, 332 insertions(+), 10 deletions(-) create mode 100644 helm-chart/splunk-operator/templates/rbac/objectstorage_editor_role.yaml create mode 100644 helm-chart/splunk-operator/templates/rbac/objectstorage_viewer_role.yaml diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index fc8513023..973105d16 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -54,6 +54,8 @@ rules: - licensemanagers - licensemasters - monitoringconsoles + - objectstorages + - queues - searchheadclusters - standalones verbs: @@ -74,6 +76,8 @@ rules: - licensemanagers/finalizers - licensemasters/finalizers - monitoringconsoles/finalizers + - objectstorages/finalizers + - queues/finalizers - searchheadclusters/finalizers - standalones/finalizers verbs: @@ -88,6 +92,8 @@ rules: - licensemanagers/status - licensemasters/status - monitoringconsoles/status + - objectstorages/status + - queues/status - searchheadclusters/status - standalones/status verbs: diff --git a/helm-chart/splunk-operator/templates/rbac/clusterrole.yaml b/helm-chart/splunk-operator/templates/rbac/clusterrole.yaml index 2b5d51ec9..a952b174c 100644 --- a/helm-chart/splunk-operator/templates/rbac/clusterrole.yaml +++ b/helm-chart/splunk-operator/templates/rbac/clusterrole.yaml @@ -222,6 +222,32 @@ rules: - get - patch - update +- apiGroups: + - enterprise.splunk.com + resources: + - ingestorclusters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - ingestorclusters/finalizers + verbs: + - update +- apiGroups: + - enterprise.splunk.com + resources: + - ingestorclusters/status + verbs: + - get + - patch + - update - apiGroups: - enterprise.splunk.com resources: @@ -300,6 +326,58 @@ rules: - get - patch - update +- apiGroups: + - enterprise.splunk.com + resources: + - objectstorages + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - objectstorages/finalizers + verbs: + - update +- apiGroups: + - enterprise.splunk.com + resources: + - objectstorages/status + verbs: + - get + - patch + - update +- apiGroups: + - enterprise.splunk.com + resources: + - queues + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - queues/finalizers + verbs: + - update +- apiGroups: + - enterprise.splunk.com + resources: + - queues/status + verbs: + - get + - patch + - update - apiGroups: - enterprise.splunk.com resources: diff --git a/helm-chart/splunk-operator/templates/rbac/objectstorage_editor_role.yaml b/helm-chart/splunk-operator/templates/rbac/objectstorage_editor_role.yaml new file mode 100644 index 000000000..d90f7673b --- /dev/null +++ b/helm-chart/splunk-operator/templates/rbac/objectstorage_editor_role.yaml @@ -0,0 +1,55 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants permissions to create, update, and delete resources within the enterprise.splunk.com. +# This role is intended for users who need to manage these resources +# but should not control RBAC or manage permissions for others. +{{- if .Values.splunkOperator.clusterWideAccess }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "splunk-operator.operator.fullname" . }}-objectstorage-editor-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - objectstorages + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - objectstorages/status + verbs: + - get +{{- else }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "splunk-operator.operator.fullname" . }}-objectstorage-editor-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - objectstorages + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - objectstorages/status + verbs: + - get +{{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-operator/templates/rbac/objectstorage_viewer_role.yaml b/helm-chart/splunk-operator/templates/rbac/objectstorage_viewer_role.yaml new file mode 100644 index 000000000..ec9358b8d --- /dev/null +++ b/helm-chart/splunk-operator/templates/rbac/objectstorage_viewer_role.yaml @@ -0,0 +1,47 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants read-only access to enterprise.splunk.com resources. +# This role is intended for users who need visibility into these resources +# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing. +{{- if .Values.splunkOperator.clusterWideAccess }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "splunk-operator.operator.fullname" . }}-objectstorage-viewer-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - objectstorages + verbs: + - get + - list + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - objectstorages/status + verbs: + - get +{{- else }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "splunk-operator.operator.fullname" . }}-objectstorage-viewer-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - objectstorages + verbs: + - get + - list + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - objectstorages/status + verbs: + - get +{{- end }} \ No newline at end of file diff --git a/internal/controller/indexercluster_controller.go b/internal/controller/indexercluster_controller.go index 7efb6e1b8..4f83f5abe 100644 --- a/internal/controller/indexercluster_controller.go +++ b/internal/controller/indexercluster_controller.go @@ -148,6 +148,57 @@ func (r *IndexerClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { mgr.GetRESTMapper(), &enterpriseApi.IndexerCluster{}, )). + Watches(&corev1.Secret{}, + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { + secret, ok := obj.(*corev1.Secret) + if !ok { + return nil + } + + // Only consider indexer clusters in the same namespace as the Secret + var list enterpriseApi.IndexerClusterList + if err := r.Client.List(ctx, &list, client.InNamespace(secret.Namespace)); err != nil { + return nil + } + + var reqs []reconcile.Request + for _, ic := range list.Items { + if ic.Spec.QueueRef.Name == "" { + continue + } + + queueNS := ic.Spec.QueueRef.Namespace + if queueNS == "" { + queueNS = ic.Namespace + } + + queue := &enterpriseApi.Queue{} + if err := r.Client.Get(ctx, types.NamespacedName{ + Name: ic.Spec.QueueRef.Name, + Namespace: queueNS, + }, queue); err != nil { + continue + } + + if queue.Spec.Provider != "sqs" { + continue + } + + for _, vol := range queue.Spec.SQS.VolList { + if vol.SecretRef == secret.Name { + reqs = append(reqs, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: ic.Name, + Namespace: ic.Namespace, + }, + }) + break + } + } + } + return reqs + }), + ). Watches(&corev1.Pod{}, handler.EnqueueRequestForOwner( mgr.GetScheme(), diff --git a/internal/controller/ingestorcluster_controller.go b/internal/controller/ingestorcluster_controller.go index 0d8117bd2..b5aa3d911 100644 --- a/internal/controller/ingestorcluster_controller.go +++ b/internal/controller/ingestorcluster_controller.go @@ -50,6 +50,10 @@ type IngestorClusterReconciler struct { // +kubebuilder:rbac:groups=enterprise.splunk.com,resources=ingestorclusters/status,verbs=get;update;patch // +kubebuilder:rbac:groups=enterprise.splunk.com,resources=ingestorclusters/finalizers,verbs=update +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=queues;objectstorages,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=queues/status;objectstorages/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=queues/finalizers;objectstorages/finalizers,verbs=update + // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. // TODO(user): Modify the Reconcile function to compare the state specified by @@ -129,6 +133,57 @@ func (r *IngestorClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { mgr.GetRESTMapper(), &enterpriseApi.IngestorCluster{}, )). + Watches(&corev1.Secret{}, + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { + secret, ok := obj.(*corev1.Secret) + if !ok { + return nil + } + + // Only consider ingestor clusters in the same namespace as the Secret + var list enterpriseApi.IngestorClusterList + if err := r.Client.List(ctx, &list, client.InNamespace(secret.Namespace)); err != nil { + return nil + } + + var reqs []reconcile.Request + for _, ic := range list.Items { + if ic.Spec.QueueRef.Name == "" { + continue + } + + queueNS := ic.Spec.QueueRef.Namespace + if queueNS == "" { + queueNS = ic.Namespace + } + + queue := &enterpriseApi.Queue{} + if err := r.Client.Get(ctx, types.NamespacedName{ + Name: ic.Spec.QueueRef.Name, + Namespace: queueNS, + }, queue); err != nil { + continue + } + + if queue.Spec.Provider != "sqs" { + continue + } + + for _, vol := range queue.Spec.SQS.VolList { + if vol.SecretRef == secret.Name { + reqs = append(reqs, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: ic.Name, + Namespace: ic.Namespace, + }, + }) + break + } + } + } + return reqs + }), + ). Watches(&corev1.Pod{}, handler.EnqueueRequestForOwner( mgr.GetScheme(), diff --git a/internal/controller/ingestorcluster_controller_test.go b/internal/controller/ingestorcluster_controller_test.go index 38e7cbb4e..49d59e608 100644 --- a/internal/controller/ingestorcluster_controller_test.go +++ b/internal/controller/ingestorcluster_controller_test.go @@ -104,7 +104,7 @@ var _ = Describe("IngestorCluster Controller", func() { annotations = map[string]string{} icSpec.Annotations = annotations icSpec.Status.Phase = "Ready" - UpdateIngestorCluster(icSpec, enterpriseApi.PhaseReady) + UpdateIngestorCluster(icSpec, enterpriseApi.PhaseReady, os, queue) DeleteIngestorCluster("test", nsSpecs.Name) Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) }) @@ -161,6 +161,35 @@ var _ = Describe("IngestorCluster Controller", func() { Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) + queue := &enterpriseApi.Queue{ + ObjectMeta: metav1.ObjectMeta{ + Name: "queue", + Namespace: nsSpecs.Name, + }, + Spec: enterpriseApi.QueueSpec{ + Provider: "sqs", + SQS: enterpriseApi.SQSSpec{ + Name: "smartbus-queue", + AuthRegion: "us-west-2", + DLQ: "smartbus-dlq", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + }, + }, + } + os := &enterpriseApi.ObjectStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: "os", + Namespace: nsSpecs.Name, + }, + Spec: enterpriseApi.ObjectStorageSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "s3://ingestion/smartbus-test", + }, + }, + } + ctx := context.TODO() builder := fake.NewClientBuilder() c := builder.Build() @@ -177,7 +206,7 @@ var _ = Describe("IngestorCluster Controller", func() { _, err := instance.Reconcile(ctx, request) Expect(err).ToNot(HaveOccurred()) - icSpec := testutils.NewIngestorCluster("test", namespace, "image") + icSpec := testutils.NewIngestorCluster("test", namespace, "image", os, queue) Expect(c.Create(ctx, icSpec)).Should(Succeed()) annotations := make(map[string]string) @@ -269,7 +298,7 @@ func CreateIngestorCluster(name string, namespace string, annotations map[string return ic } -func UpdateIngestorCluster(instance *enterpriseApi.IngestorCluster, status enterpriseApi.Phase) *enterpriseApi.IngestorCluster { +func UpdateIngestorCluster(instance *enterpriseApi.IngestorCluster, status enterpriseApi.Phase, os *enterpriseApi.ObjectStorage, queue *enterpriseApi.Queue) *enterpriseApi.IngestorCluster { By("Expecting IngestorCluster custom resource to be updated successfully") key := types.NamespacedName{ @@ -277,7 +306,7 @@ func UpdateIngestorCluster(instance *enterpriseApi.IngestorCluster, status enter Namespace: instance.Namespace, } - icSpec := testutils.NewIngestorCluster(instance.Name, instance.Namespace, "image") + icSpec := testutils.NewIngestorCluster(instance.Name, instance.Namespace, "image", os, queue) icSpec.ResourceVersion = instance.ResourceVersion Expect(k8sClient.Update(context.Background(), icSpec)).Should(Succeed()) time.Sleep(2 * time.Second) diff --git a/internal/controller/testutils/new.go b/internal/controller/testutils/new.go index aa47e8092..63a291a1d 100644 --- a/internal/controller/testutils/new.go +++ b/internal/controller/testutils/new.go @@ -46,7 +46,7 @@ func NewStandalone(name, ns, image string) *enterpriseApi.Standalone { } // NewIngestorCluster returns new IngestorCluster instance with its config hash -func NewIngestorCluster(name, ns, image string) *enterpriseApi.IngestorCluster { +func NewIngestorCluster(name, ns, image string, os *enterpriseApi.ObjectStorage, queue *enterpriseApi.Queue) *enterpriseApi.IngestorCluster { return &enterpriseApi.IngestorCluster{ ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: ns}, Spec: enterpriseApi.IngestorClusterSpec{ @@ -55,7 +55,12 @@ func NewIngestorCluster(name, ns, image string) *enterpriseApi.IngestorCluster { }, Replicas: 3, QueueRef: corev1.ObjectReference{ - Name: "queue", + Name: queue.Name, + Namespace: queue.Namespace, + }, + ObjectStorageRef: corev1.ObjectReference{ + Name: os.Name, + Namespace: os.Namespace, }, }, } diff --git a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml index 5848da973..c6cc343d8 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml @@ -11,8 +11,6 @@ spec: authRegion: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com dlq: index-ingest-separation-test-dlq -status: - phase: Ready --- # assert for object storage custom resource to be ready @@ -25,8 +23,6 @@ spec: s3: endpoint: https://s3.us-west-2.amazonaws.com path: s3://index-ingest-separation-test-bucket/smartbus-test -status: - phase: Ready --- # assert for cluster manager custom resource to be ready