From c1ad439ee614877e02cafd852bebbb462c6ea8f9 Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Fri, 12 Dec 2025 12:30:36 +0100 Subject: [PATCH 1/7] CSPL-4358 Splitting BusConfiguration into Bus and LargeMessageStore --- PROJECT | 11 +- ...busconfiguration_types.go => bus_types.go} | 83 +++--- api/v4/indexercluster_types.go | 16 +- api/v4/ingestorcluster_types.go | 14 +- api/v4/largemessagestore.go | 137 +++++++++ api/v4/zz_generated.deepcopy.go | 168 +++++++++-- cmd/main.go | 11 +- .../bases/enterprise.splunk.com_buses.yaml | 123 ++++++++ ...enterprise.splunk.com_indexerclusters.yaml | 118 +++++++- ...nterprise.splunk.com_ingestorclusters.yaml | 114 +++++++- ...rprise.splunk.com_largemessagestores.yaml} | 55 ++-- config/crd/kustomization.yaml | 3 +- ..._editor_role.yaml => bus_editor_role.yaml} | 6 +- ..._viewer_role.yaml => bus_viewer_role.yaml} | 6 +- .../rbac/largemessagestore_editor_role.yaml | 30 ++ .../rbac/largemessagestore_viewer_role.yaml | 26 ++ config/rbac/role.yaml | 9 +- ...figuration.yaml => enterprise_v4_bus.yaml} | 4 +- .../enterprise_v4_largemessagestore.yaml | 8 + config/samples/kustomization.yaml | 3 +- docs/CustomResources.md | 8 +- docs/IndexIngestionSeparation.md | 74 +++-- .../enterprise_v4_busconfigurations.yaml | 40 --- .../templates/enterprise_v4_buses.yaml | 30 ++ .../enterprise_v4_indexercluster.yaml | 10 +- .../enterprise_v4_ingestorcluster.yaml | 17 +- .../enterprise_v4_largemessagestores.yaml | 28 ++ helm-chart/splunk-enterprise/values.yaml | 8 +- ..._editor_role.yaml => bus_editor_role.yaml} | 12 +- ..._viewer_role.yaml => bus_viewer_role.yaml} | 12 +- .../splunk-operator/templates/rbac/role.yaml | 32 ++- ...ration_controller.go => bus_controller.go} | 38 +-- ...troller_test.go => bus_controller_test.go} | 127 +++++---- .../controller/indexercluster_controller.go | 36 ++- .../controller/ingestorcluster_controller.go | 36 ++- .../ingestorcluster_controller_test.go | 71 ++++- .../largemessagestore_controller.go | 120 ++++++++ .../largemessagestore_controller_test.go | 263 ++++++++++++++++++ internal/controller/suite_test.go | 25 +- internal/controller/testutils/new.go | 33 +-- .../01-assert.yaml | 68 +++-- .../02-assert.yaml | 19 +- .../splunk_index_ingest_sep.yaml | 32 ++- pkg/splunk/enterprise/bus.go | 75 +++++ pkg/splunk/enterprise/bus_test.go | 69 +++++ pkg/splunk/enterprise/busconfiguration.go | 140 ---------- .../enterprise/busconfiguration_test.go | 151 ---------- pkg/splunk/enterprise/indexercluster.go | 159 +++++++---- pkg/splunk/enterprise/indexercluster_test.go | 229 ++++++++------- pkg/splunk/enterprise/ingestorcluster.go | 110 +++++--- pkg/splunk/enterprise/ingestorcluster_test.go | 251 ++++++++++------- pkg/splunk/enterprise/largemessagestore.go | 75 +++++ .../enterprise/largemessagestore_test.go | 83 ++++++ pkg/splunk/enterprise/types.go | 13 +- pkg/splunk/enterprise/util.go | 24 +- .../c3/appframework_aws_test.go | 2 +- .../c3/manager_appframework_test.go | 4 +- .../c3/appframework_azure_test.go | 2 +- .../c3/manager_appframework_azure_test.go | 2 +- .../c3/manager_appframework_test.go | 4 +- ...dex_and_ingestion_separation_suite_test.go | 35 +-- .../index_and_ingestion_separation_test.go | 112 +++++--- test/testenv/deployment.go | 79 ++++-- test/testenv/util.go | 37 ++- 64 files changed, 2674 insertions(+), 1066 deletions(-) rename api/v4/{busconfiguration_types.go => bus_types.go} (56%) create mode 100644 api/v4/largemessagestore.go create mode 100644 config/crd/bases/enterprise.splunk.com_buses.yaml rename config/crd/bases/{enterprise.splunk.com_busconfigurations.yaml => enterprise.splunk.com_largemessagestores.yaml} (64%) rename config/rbac/{busconfiguration_editor_role.yaml => bus_editor_role.yaml} (88%) rename config/rbac/{busconfiguration_viewer_role.yaml => bus_viewer_role.yaml} (87%) create mode 100644 config/rbac/largemessagestore_editor_role.yaml create mode 100644 config/rbac/largemessagestore_viewer_role.yaml rename config/samples/{enterprise_v4_busconfiguration.yaml => enterprise_v4_bus.yaml} (72%) create mode 100644 config/samples/enterprise_v4_largemessagestore.yaml delete mode 100644 helm-chart/splunk-enterprise/templates/enterprise_v4_busconfigurations.yaml create mode 100644 helm-chart/splunk-enterprise/templates/enterprise_v4_buses.yaml create mode 100644 helm-chart/splunk-enterprise/templates/enterprise_v4_largemessagestores.yaml rename helm-chart/splunk-operator/templates/rbac/{busconfiguration_editor_role.yaml => bus_editor_role.yaml} (78%) rename helm-chart/splunk-operator/templates/rbac/{busconfiguration_viewer_role.yaml => bus_viewer_role.yaml} (76%) rename internal/controller/{busconfiguration_controller.go => bus_controller.go} (70%) rename internal/controller/{busconfiguration_controller_test.go => bus_controller_test.go} (56%) create mode 100644 internal/controller/largemessagestore_controller.go create mode 100644 internal/controller/largemessagestore_controller_test.go create mode 100644 pkg/splunk/enterprise/bus.go create mode 100644 pkg/splunk/enterprise/bus_test.go delete mode 100644 pkg/splunk/enterprise/busconfiguration.go delete mode 100644 pkg/splunk/enterprise/busconfiguration_test.go create mode 100644 pkg/splunk/enterprise/largemessagestore.go create mode 100644 pkg/splunk/enterprise/largemessagestore_test.go diff --git a/PROJECT b/PROJECT index 983f3418b..aa4aa1078 100644 --- a/PROJECT +++ b/PROJECT @@ -128,7 +128,16 @@ resources: controller: true domain: splunk.com group: enterprise - kind: BusConfiguration + kind: Bus + path: github.com/splunk/splunk-operator/api/v4 + version: v4 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: splunk.com + group: enterprise + kind: LargeMessageStore path: github.com/splunk/splunk-operator/api/v4 version: v4 version: "3" diff --git a/api/v4/busconfiguration_types.go b/api/v4/bus_types.go similarity index 56% rename from api/v4/busconfiguration_types.go rename to api/v4/bus_types.go index a4b76a00b..10958f56b 100644 --- a/api/v4/busconfiguration_types.go +++ b/api/v4/bus_types.go @@ -23,35 +23,48 @@ import ( ) const ( - // BusConfigurationPausedAnnotation is the annotation that pauses the reconciliation (triggers + // BusPausedAnnotation is the annotation that pauses the reconciliation (triggers // an immediate requeue) - BusConfigurationPausedAnnotation = "busconfiguration.enterprise.splunk.com/paused" + BusPausedAnnotation = "bus.enterprise.splunk.com/paused" ) -// BusConfigurationSpec defines the desired state of BusConfiguration -type BusConfigurationSpec struct { - Type string `json:"type"` +// +kubebuilder:validation:XValidation:rule="self.provider != 'sqs' || has(self.sqs)",message="sqs must be provided when provider is sqs" +// BusSpec defines the desired state of Bus +type BusSpec struct { + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum=sqs + // Provider of queue resources + Provider string `json:"provider"` + + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // Name of the queue + QueueName string `json:"queueName"` + + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$` + // Region of the resources + Region string `json:"region"` + // sqs specific inputs SQS SQSSpec `json:"sqs"` } type SQSSpec struct { - QueueName string `json:"queueName"` - - AuthRegion string `json:"authRegion"` - + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // Name of the dead letter queue resource + DLQ string `json:"dlq"` + + // +optional + // +kubebuilder:validation:Pattern=`^https://sqs(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$` + // Amazon SQS Service endpoint Endpoint string `json:"endpoint"` - - LargeMessageStoreEndpoint string `json:"largeMessageStoreEndpoint"` - - LargeMessageStorePath string `json:"largeMessageStorePath"` - - DeadLetterQueueName string `json:"deadLetterQueueName"` } -// BusConfigurationStatus defines the observed state of BusConfiguration. -type BusConfigurationStatus struct { - // Phase of the bus configuration +// BusStatus defines the observed state of Bus +type BusStatus struct { + // Phase of the bus Phase Phase `json:"phase"` // Resource revision tracker @@ -64,27 +77,27 @@ type BusConfigurationStatus struct { // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// BusConfiguration is the Schema for a Splunk Enterprise bus configuration +// Bus is the Schema for a Splunk Enterprise bus // +k8s:openapi-gen=true // +kubebuilder:subresource:status // +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.selector -// +kubebuilder:resource:path=busconfigurations,scope=Namespaced,shortName=bus -// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Status of bus configuration" -// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age of bus configuration resource" +// +kubebuilder:resource:path=buses,scope=Namespaced,shortName=bus +// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Status of bus" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age of bus resource" // +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Auxillary message describing CR status" // +kubebuilder:storageversion -// BusConfiguration is the Schema for the busconfigurations API -type BusConfiguration struct { +// Bus is the Schema for the buses API +type Bus struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty,omitzero"` - Spec BusConfigurationSpec `json:"spec"` - Status BusConfigurationStatus `json:"status,omitempty,omitzero"` + Spec BusSpec `json:"spec"` + Status BusStatus `json:"status,omitempty,omitzero"` } // DeepCopyObject implements runtime.Object -func (in *BusConfiguration) DeepCopyObject() runtime.Object { +func (in *Bus) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -93,20 +106,20 @@ func (in *BusConfiguration) DeepCopyObject() runtime.Object { // +kubebuilder:object:root=true -// BusConfigurationList contains a list of BusConfiguration -type BusConfigurationList struct { +// BusList contains a list of Bus +type BusList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` - Items []BusConfiguration `json:"items"` + Items []Bus `json:"items"` } func init() { - SchemeBuilder.Register(&BusConfiguration{}, &BusConfigurationList{}) + SchemeBuilder.Register(&Bus{}, &BusList{}) } // NewEvent creates a new event associated with the object and ready // to be published to Kubernetes API -func (bc *BusConfiguration) NewEvent(eventType, reason, message string) corev1.Event { +func (bc *Bus) NewEvent(eventType, reason, message string) corev1.Event { t := metav1.Now() return corev1.Event{ ObjectMeta: metav1.ObjectMeta{ @@ -114,7 +127,7 @@ func (bc *BusConfiguration) NewEvent(eventType, reason, message string) corev1.E Namespace: bc.ObjectMeta.Namespace, }, InvolvedObject: corev1.ObjectReference{ - Kind: "BusConfiguration", + Kind: "Bus", Namespace: bc.Namespace, Name: bc.Name, UID: bc.UID, @@ -123,12 +136,12 @@ func (bc *BusConfiguration) NewEvent(eventType, reason, message string) corev1.E Reason: reason, Message: message, Source: corev1.EventSource{ - Component: "splunk-busconfiguration-controller", + Component: "splunk-bus-controller", }, FirstTimestamp: t, LastTimestamp: t, Count: 1, Type: eventType, - ReportingController: "enterprise.splunk.com/busconfiguration-controller", + ReportingController: "enterprise.splunk.com/bus-controller", } } diff --git a/api/v4/indexercluster_types.go b/api/v4/indexercluster_types.go index 493aeb0f3..0ec425240 100644 --- a/api/v4/indexercluster_types.go +++ b/api/v4/indexercluster_types.go @@ -38,8 +38,13 @@ const ( type IndexerClusterSpec struct { CommonSplunkSpec `json:",inline"` - // Bus configuration reference - BusConfigurationRef corev1.ObjectReference `json:"busConfigurationRef,omitempty"` + // +optional + // Bus reference + BusRef corev1.ObjectReference `json:"busRef"` + + // +optional + // Large Message Store reference + LargeMessageStoreRef corev1.ObjectReference `json:"largeMessageStoreRef"` // Number of search head pods; a search head cluster will be created if > 1 Replicas int32 `json:"replicas"` @@ -115,8 +120,11 @@ type IndexerClusterStatus struct { // Auxillary message describing CR status Message string `json:"message"` - // Bus configuration - BusConfiguration BusConfigurationSpec `json:"busConfiguration,omitempty"` + // Bus + Bus *BusSpec `json:"bus,omitempty"` + + // Large Message Store + LargeMessageStore *LargeMessageStoreSpec `json:"largeMessageStore,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/api/v4/ingestorcluster_types.go b/api/v4/ingestorcluster_types.go index 364625e97..27fa5d1e0 100644 --- a/api/v4/ingestorcluster_types.go +++ b/api/v4/ingestorcluster_types.go @@ -39,8 +39,11 @@ type IngestorClusterSpec struct { // Splunk Enterprise app repository that specifies remote app location and scope for Splunk app management AppFrameworkConfig AppFrameworkSpec `json:"appRepo,omitempty"` - // Bus configuration reference - BusConfigurationRef corev1.ObjectReference `json:"busConfigurationRef"` + // Bus reference + BusRef corev1.ObjectReference `json:"busRef"` + + // Large Message Store reference + LargeMessageStoreRef corev1.ObjectReference `json:"largeMessageStoreRef"` } // IngestorClusterStatus defines the observed state of Ingestor Cluster @@ -69,8 +72,11 @@ type IngestorClusterStatus struct { // Auxillary message describing CR status Message string `json:"message"` - // Bus configuration - BusConfiguration BusConfigurationSpec `json:"busConfiguration,omitempty"` + // Bus + Bus *BusSpec `json:"bus,omitempty"` + + // Large Message Store + LargeMessageStore *LargeMessageStoreSpec `json:"largeMessageStore,omitempty"` } // +kubebuilder:object:root=true diff --git a/api/v4/largemessagestore.go b/api/v4/largemessagestore.go new file mode 100644 index 000000000..3e9f4b62b --- /dev/null +++ b/api/v4/largemessagestore.go @@ -0,0 +1,137 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v4 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +const ( + // LargeMessageStorePausedAnnotation is the annotation that pauses the reconciliation (triggers + // an immediate requeue) + LargeMessageStorePausedAnnotation = "largemessagestore.enterprise.splunk.com/paused" +) + +// +kubebuilder:validation:XValidation:rule="self.provider != 's3' || has(self.s3)",message="s3 must be provided when provider is s3" +// LargeMessageStoreSpec defines the desired state of LargeMessageStore +type LargeMessageStoreSpec struct { + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum=s3 + // Provider of queue resources + Provider string `json:"provider"` + + // s3 specific inputs + S3 S3Spec `json:"s3"` +} + +type S3Spec struct { + // +optional + // +kubebuilder:validation:Pattern=`^https://s3(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$` + // S3-compatible Service endpoint + Endpoint string `json:"endpoint"` + + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^s3://[a-z0-9.-]{3,63}(?:/[^\s]+)?$` + // S3 bucket path + Path string `json:"path"` +} + +// LargeMessageStoreStatus defines the observed state of LargeMessageStore. +type LargeMessageStoreStatus struct { + // Phase of the large message store + Phase Phase `json:"phase"` + + // Resource revision tracker + ResourceRevMap map[string]string `json:"resourceRevMap"` + + // Auxillary message describing CR status + Message string `json:"message"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// LargeMessageStore is the Schema for a Splunk Enterprise large message store +// +k8s:openapi-gen=true +// +kubebuilder:subresource:status +// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.selector +// +kubebuilder:resource:path=largemessagestores,scope=Namespaced,shortName=lms +// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Status of large message store" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age of large message store resource" +// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Auxillary message describing CR status" +// +kubebuilder:storageversion + +// LargeMessageStore is the Schema for the largemessagestores API +type LargeMessageStore struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty,omitzero"` + + Spec LargeMessageStoreSpec `json:"spec"` + Status LargeMessageStoreStatus `json:"status,omitempty,omitzero"` +} + +// DeepCopyObject implements runtime.Object +func (in *LargeMessageStore) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// +kubebuilder:object:root=true + +// LargeMessageStoreList contains a list of LargeMessageStore +type LargeMessageStoreList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LargeMessageStore `json:"items"` +} + +func init() { + SchemeBuilder.Register(&LargeMessageStore{}, &LargeMessageStoreList{}) +} + +// NewEvent creates a new event associated with the object and ready +// to be published to Kubernetes API +func (bc *LargeMessageStore) NewEvent(eventType, reason, message string) corev1.Event { + t := metav1.Now() + return corev1.Event{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: reason + "-", + Namespace: bc.ObjectMeta.Namespace, + }, + InvolvedObject: corev1.ObjectReference{ + Kind: "LargeMessageStore", + Namespace: bc.Namespace, + Name: bc.Name, + UID: bc.UID, + APIVersion: GroupVersion.String(), + }, + Reason: reason, + Message: message, + Source: corev1.EventSource{ + Component: "splunk-large-message-store-controller", + }, + FirstTimestamp: t, + LastTimestamp: t, + Count: 1, + Type: eventType, + ReportingController: "enterprise.splunk.com/large-message-store-controller", + } +} diff --git a/api/v4/zz_generated.deepcopy.go b/api/v4/zz_generated.deepcopy.go index fa23c996a..dc19b7f10 100644 --- a/api/v4/zz_generated.deepcopy.go +++ b/api/v4/zz_generated.deepcopy.go @@ -181,7 +181,7 @@ func (in *BundlePushTracker) DeepCopy() *BundlePushTracker { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BusConfiguration) DeepCopyInto(out *BusConfiguration) { +func (in *Bus) DeepCopyInto(out *Bus) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) @@ -189,42 +189,42 @@ func (in *BusConfiguration) DeepCopyInto(out *BusConfiguration) { in.Status.DeepCopyInto(&out.Status) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BusConfiguration. -func (in *BusConfiguration) DeepCopy() *BusConfiguration { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Bus. +func (in *Bus) DeepCopy() *Bus { if in == nil { return nil } - out := new(BusConfiguration) + out := new(Bus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BusConfigurationList) DeepCopyInto(out *BusConfigurationList) { +func (in *BusList) DeepCopyInto(out *BusList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]BusConfiguration, len(*in)) + *out = make([]Bus, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BusConfigurationList. -func (in *BusConfigurationList) DeepCopy() *BusConfigurationList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BusList. +func (in *BusList) DeepCopy() *BusList { if in == nil { return nil } - out := new(BusConfigurationList) + out := new(BusList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *BusConfigurationList) DeepCopyObject() runtime.Object { +func (in *BusList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -232,23 +232,23 @@ func (in *BusConfigurationList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BusConfigurationSpec) DeepCopyInto(out *BusConfigurationSpec) { +func (in *BusSpec) DeepCopyInto(out *BusSpec) { *out = *in out.SQS = in.SQS } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BusConfigurationSpec. -func (in *BusConfigurationSpec) DeepCopy() *BusConfigurationSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BusSpec. +func (in *BusSpec) DeepCopy() *BusSpec { if in == nil { return nil } - out := new(BusConfigurationSpec) + out := new(BusSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BusConfigurationStatus) DeepCopyInto(out *BusConfigurationStatus) { +func (in *BusStatus) DeepCopyInto(out *BusStatus) { *out = *in if in.ResourceRevMap != nil { in, out := &in.ResourceRevMap, &out.ResourceRevMap @@ -259,12 +259,12 @@ func (in *BusConfigurationStatus) DeepCopyInto(out *BusConfigurationStatus) { } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BusConfigurationStatus. -func (in *BusConfigurationStatus) DeepCopy() *BusConfigurationStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BusStatus. +func (in *BusStatus) DeepCopy() *BusStatus { if in == nil { return nil } - out := new(BusConfigurationStatus) + out := new(BusStatus) in.DeepCopyInto(out) return out } @@ -600,7 +600,8 @@ func (in *IndexerClusterMemberStatus) DeepCopy() *IndexerClusterMemberStatus { func (in *IndexerClusterSpec) DeepCopyInto(out *IndexerClusterSpec) { *out = *in in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) - out.BusConfigurationRef = in.BusConfigurationRef + out.BusRef = in.BusRef + out.LargeMessageStoreRef = in.LargeMessageStoreRef } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexerClusterSpec. @@ -633,7 +634,16 @@ func (in *IndexerClusterStatus) DeepCopyInto(out *IndexerClusterStatus) { *out = make([]IndexerClusterMemberStatus, len(*in)) copy(*out, *in) } - out.BusConfiguration = in.BusConfiguration + if in.Bus != nil { + in, out := &in.Bus, &out.Bus + *out = new(BusSpec) + **out = **in + } + if in.LargeMessageStore != nil { + in, out := &in.LargeMessageStore, &out.LargeMessageStore + *out = new(LargeMessageStoreSpec) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexerClusterStatus. @@ -702,7 +712,8 @@ func (in *IngestorClusterSpec) DeepCopyInto(out *IngestorClusterSpec) { *out = *in in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) in.AppFrameworkConfig.DeepCopyInto(&out.AppFrameworkConfig) - out.BusConfigurationRef = in.BusConfigurationRef + out.BusRef = in.BusRef + out.LargeMessageStoreRef = in.LargeMessageStoreRef } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngestorClusterSpec. @@ -726,7 +737,16 @@ func (in *IngestorClusterStatus) DeepCopyInto(out *IngestorClusterStatus) { } } in.AppContext.DeepCopyInto(&out.AppContext) - out.BusConfiguration = in.BusConfiguration + if in.Bus != nil { + in, out := &in.Bus, &out.Bus + *out = new(BusSpec) + **out = **in + } + if in.LargeMessageStore != nil { + in, out := &in.LargeMessageStore, &out.LargeMessageStore + *out = new(LargeMessageStoreSpec) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngestorClusterStatus. @@ -739,6 +759,95 @@ func (in *IngestorClusterStatus) DeepCopy() *IngestorClusterStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LargeMessageStore) DeepCopyInto(out *LargeMessageStore) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LargeMessageStore. +func (in *LargeMessageStore) DeepCopy() *LargeMessageStore { + if in == nil { + return nil + } + out := new(LargeMessageStore) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LargeMessageStoreList) DeepCopyInto(out *LargeMessageStoreList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LargeMessageStore, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LargeMessageStoreList. +func (in *LargeMessageStoreList) DeepCopy() *LargeMessageStoreList { + if in == nil { + return nil + } + out := new(LargeMessageStoreList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LargeMessageStoreList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LargeMessageStoreSpec) DeepCopyInto(out *LargeMessageStoreSpec) { + *out = *in + out.S3 = in.S3 +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LargeMessageStoreSpec. +func (in *LargeMessageStoreSpec) DeepCopy() *LargeMessageStoreSpec { + if in == nil { + return nil + } + out := new(LargeMessageStoreSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LargeMessageStoreStatus) DeepCopyInto(out *LargeMessageStoreStatus) { + *out = *in + if in.ResourceRevMap != nil { + in, out := &in.ResourceRevMap, &out.ResourceRevMap + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LargeMessageStoreStatus. +func (in *LargeMessageStoreStatus) DeepCopy() *LargeMessageStoreStatus { + if in == nil { + return nil + } + out := new(LargeMessageStoreStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LicenseManager) DeepCopyInto(out *LicenseManager) { *out = *in @@ -977,6 +1086,21 @@ func (in *Probe) DeepCopy() *Probe { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3Spec) DeepCopyInto(out *S3Spec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Spec. +func (in *S3Spec) DeepCopy() *S3Spec { + if in == nil { + return nil + } + out := new(S3Spec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SQSSpec) DeepCopyInto(out *SQSSpec) { *out = *in diff --git a/cmd/main.go b/cmd/main.go index 1984474fa..0d14d691a 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -230,11 +230,18 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "IngestorCluster") os.Exit(1) } - if err := (&controller.BusConfigurationReconciler{ + if err := (&controller.BusReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "BusConfiguration") + setupLog.Error(err, "unable to create controller", "controller", "Bus") + os.Exit(1) + } + if err := (&controller.LargeMessageStoreReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "LargeMessageStore") os.Exit(1) } //+kubebuilder:scaffold:builder diff --git a/config/crd/bases/enterprise.splunk.com_buses.yaml b/config/crd/bases/enterprise.splunk.com_buses.yaml new file mode 100644 index 000000000..6a98483a5 --- /dev/null +++ b/config/crd/bases/enterprise.splunk.com_buses.yaml @@ -0,0 +1,123 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: buses.enterprise.splunk.com +spec: + group: enterprise.splunk.com + names: + kind: Bus + listKind: BusList + plural: buses + shortNames: + - bus + singular: bus + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Status of bus + jsonPath: .status.phase + name: Phase + type: string + - description: Age of bus resource + jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Auxillary message describing CR status + jsonPath: .status.message + name: Message + type: string + name: v4 + schema: + openAPIV3Schema: + description: Bus is the Schema for the buses API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BusSpec defines the desired state of Bus + properties: + provider: + description: Provider of queue resources + enum: + - sqs + type: string + queueName: + description: Name of the queue + minLength: 1 + type: string + region: + description: Region of the resources + pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ + type: string + sqs: + description: sqs specific inputs + properties: + dlq: + description: Name of the dead letter queue resource + minLength: 1 + type: string + endpoint: + description: Amazon SQS Service endpoint + pattern: ^https://sqs(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$ + type: string + required: + - dlq + type: object + required: + - provider + - queueName + - region + type: object + x-kubernetes-validations: + - message: sqs must be provided when provider is sqs + rule: self.provider != 'sqs' || has(self.sqs) + status: + description: BusStatus defines the observed state of Bus + properties: + message: + description: Auxillary message describing CR status + type: string + phase: + description: Phase of the bus + enum: + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error + type: string + resourceRevMap: + additionalProperties: + type: string + description: Resource revision tracker + type: object + type: object + type: object + served: true + storage: true + subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + status: {} diff --git a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml index d66e057fb..3563c678f 100644 --- a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml @@ -5165,8 +5165,8 @@ spec: x-kubernetes-list-type: atomic type: object type: object - busConfigurationRef: - description: Bus configuration reference + busRef: + description: Bus reference properties: apiVersion: description: API version of the referent. @@ -5480,6 +5480,49 @@ spec: type: object x-kubernetes-map-type: atomic type: array + largeMessageStoreRef: + description: Large Message Store reference + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic licenseManagerRef: description: LicenseManagerRef refers to a Splunk Enterprise license manager managed by the operator within Kubernetes @@ -8294,27 +8337,44 @@ spec: type: boolean description: Holds secrets whose IDXC password has changed type: object - busConfiguration: - description: Bus configuration + bus: + description: Bus properties: + provider: + description: Provider of queue resources + enum: + - sqs + type: string + queueName: + description: Name of the queue + minLength: 1 + type: string + region: + description: Region of the resources + pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ + type: string sqs: + description: sqs specific inputs properties: - authRegion: - type: string - deadLetterQueueName: + dlq: + description: Name of the dead letter queue resource + minLength: 1 type: string endpoint: + description: Amazon SQS Service endpoint + pattern: ^https://sqs(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$ type: string - largeMessageStoreEndpoint: - type: string - largeMessageStorePath: - type: string - queueName: - type: string + required: + - dlq type: object - type: - type: string + required: + - provider + - queueName + - region type: object + x-kubernetes-validations: + - message: sqs must be provided when provider is sqs + rule: self.provider != 'sqs' || has(self.sqs) clusterManagerPhase: description: current phase of the cluster manager enum: @@ -8349,6 +8409,34 @@ spec: initialized_flag: description: Indicates if the cluster is initialized. type: boolean + largeMessageStore: + description: Large Message Store + properties: + provider: + description: Provider of queue resources + enum: + - s3 + type: string + s3: + description: s3 specific inputs + properties: + endpoint: + description: S3-compatible Service endpoint + pattern: ^https://s3(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$ + type: string + path: + description: S3 bucket path + pattern: ^s3://[a-z0-9.-]{3,63}(?:/[^\s]+)?$ + type: string + required: + - path + type: object + required: + - provider + type: object + x-kubernetes-validations: + - message: s3 must be provided when provider is s3 + rule: self.provider != 's3' || has(self.s3) maintenance_mode: description: Indicates if the cluster is in maintenance mode. type: boolean diff --git a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml index 82f1f868a..8ada99079 100644 --- a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml @@ -1141,8 +1141,8 @@ spec: type: object type: array type: object - busConfigurationRef: - description: Bus configuration reference + busRef: + description: Bus reference properties: apiVersion: description: API version of the referent. @@ -1456,6 +1456,49 @@ spec: type: object x-kubernetes-map-type: atomic type: array + largeMessageStoreRef: + description: Large Message Store reference + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic licenseManagerRef: description: LicenseManagerRef refers to a Splunk Enterprise license manager managed by the operator within Kubernetes @@ -4545,27 +4588,72 @@ spec: description: App Framework version info for future use type: integer type: object - busConfiguration: - description: Bus configuration + bus: + description: Bus properties: + provider: + description: Provider of queue resources + enum: + - sqs + type: string + queueName: + description: Name of the queue + minLength: 1 + type: string + region: + description: Region of the resources + pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ + type: string sqs: + description: sqs specific inputs properties: - authRegion: - type: string - deadLetterQueueName: + dlq: + description: Name of the dead letter queue resource + minLength: 1 type: string endpoint: + description: Amazon SQS Service endpoint + pattern: ^https://sqs(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$ type: string - largeMessageStoreEndpoint: - type: string - largeMessageStorePath: + required: + - dlq + type: object + required: + - provider + - queueName + - region + type: object + x-kubernetes-validations: + - message: sqs must be provided when provider is sqs + rule: self.provider != 'sqs' || has(self.sqs) + largeMessageStore: + description: Large Message Store + properties: + provider: + description: Provider of queue resources + enum: + - s3 + type: string + s3: + description: s3 specific inputs + properties: + endpoint: + description: S3-compatible Service endpoint + pattern: ^https://s3(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$ type: string - queueName: + path: + description: S3 bucket path + pattern: ^s3://[a-z0-9.-]{3,63}(?:/[^\s]+)?$ type: string + required: + - path type: object - type: - type: string + required: + - provider type: object + x-kubernetes-validations: + - message: s3 must be provided when provider is s3 + rule: self.provider != 's3' || has(self.s3) message: description: Auxillary message describing CR status type: string diff --git a/config/crd/bases/enterprise.splunk.com_busconfigurations.yaml b/config/crd/bases/enterprise.splunk.com_largemessagestores.yaml similarity index 64% rename from config/crd/bases/enterprise.splunk.com_busconfigurations.yaml rename to config/crd/bases/enterprise.splunk.com_largemessagestores.yaml index 9f80cdbea..20cd26906 100644 --- a/config/crd/bases/enterprise.splunk.com_busconfigurations.yaml +++ b/config/crd/bases/enterprise.splunk.com_largemessagestores.yaml @@ -4,24 +4,24 @@ kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.16.1 - name: busconfigurations.enterprise.splunk.com + name: largemessagestores.enterprise.splunk.com spec: group: enterprise.splunk.com names: - kind: BusConfiguration - listKind: BusConfigurationList - plural: busconfigurations + kind: LargeMessageStore + listKind: LargeMessageStoreList + plural: largemessagestores shortNames: - - bus - singular: busconfiguration + - lms + singular: largemessagestore scope: Namespaced versions: - additionalPrinterColumns: - - description: Status of bus configuration + - description: Status of large message store jsonPath: .status.phase name: Phase type: string - - description: Age of bus configuration resource + - description: Age of large message store resource jsonPath: .metadata.creationTimestamp name: Age type: date @@ -32,7 +32,7 @@ spec: name: v4 schema: openAPIV3Schema: - description: BusConfiguration is the Schema for the busconfigurations API + description: LargeMessageStore is the Schema for the largemessagestores API properties: apiVersion: description: |- @@ -52,34 +52,41 @@ spec: metadata: type: object spec: - description: BusConfigurationSpec defines the desired state of BusConfiguration + description: LargeMessageStoreSpec defines the desired state of LargeMessageStore properties: - sqs: + provider: + description: Provider of queue resources + enum: + - s3 + type: string + s3: + description: s3 specific inputs properties: - authRegion: - type: string - deadLetterQueueName: - type: string endpoint: + description: S3-compatible Service endpoint + pattern: ^https://s3(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$ type: string - largeMessageStoreEndpoint: - type: string - largeMessageStorePath: - type: string - queueName: + path: + description: S3 bucket path + pattern: ^s3://[a-z0-9.-]{3,63}(?:/[^\s]+)?$ type: string + required: + - path type: object - type: - type: string + required: + - provider type: object + x-kubernetes-validations: + - message: s3 must be provided when provider is s3 + rule: self.provider != 's3' || has(self.s3) status: - description: BusConfigurationStatus defines the observed state of BusConfiguration. + description: LargeMessageStoreStatus defines the observed state of LargeMessageStore. properties: message: description: Auxillary message describing CR status type: string phase: - description: Phase of the bus configuration + description: Phase of the large message store enum: - Pending - Ready diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 679c1dc72..c8ba16418 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -11,7 +11,8 @@ resources: - bases/enterprise.splunk.com_searchheadclusters.yaml - bases/enterprise.splunk.com_standalones.yaml - bases/enterprise.splunk.com_ingestorclusters.yaml -- bases/enterprise.splunk.com_busconfigurations.yaml +- bases/enterprise.splunk.com_buses.yaml +- bases/enterprise.splunk.com_largemessagestores.yaml #+kubebuilder:scaffold:crdkustomizeresource diff --git a/config/rbac/busconfiguration_editor_role.yaml b/config/rbac/bus_editor_role.yaml similarity index 88% rename from config/rbac/busconfiguration_editor_role.yaml rename to config/rbac/bus_editor_role.yaml index fde8687f7..c08c2ce39 100644 --- a/config/rbac/busconfiguration_editor_role.yaml +++ b/config/rbac/bus_editor_role.yaml @@ -8,12 +8,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: busconfiguration-editor-role + name: bus-editor-role rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations + - buses verbs: - create - delete @@ -25,6 +25,6 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations/status + - buses/status verbs: - get diff --git a/config/rbac/busconfiguration_viewer_role.yaml b/config/rbac/bus_viewer_role.yaml similarity index 87% rename from config/rbac/busconfiguration_viewer_role.yaml rename to config/rbac/bus_viewer_role.yaml index 6230863a9..6f9c42d2a 100644 --- a/config/rbac/busconfiguration_viewer_role.yaml +++ b/config/rbac/bus_viewer_role.yaml @@ -8,12 +8,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: busconfiguration-viewer-role + name: bus-viewer-role rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations + - buses verbs: - get - list @@ -21,6 +21,6 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations/status + - buses/status verbs: - get diff --git a/config/rbac/largemessagestore_editor_role.yaml b/config/rbac/largemessagestore_editor_role.yaml new file mode 100644 index 000000000..614d09ad2 --- /dev/null +++ b/config/rbac/largemessagestore_editor_role.yaml @@ -0,0 +1,30 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants permissions to create, update, and delete resources within the enterprise.splunk.com. +# This role is intended for users who need to manage these resources +# but should not control RBAC or manage permissions for others. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: largemessagestore-editor-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - largemessagestores + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - largemessagestores/status + verbs: + - get diff --git a/config/rbac/largemessagestore_viewer_role.yaml b/config/rbac/largemessagestore_viewer_role.yaml new file mode 100644 index 000000000..36cfde351 --- /dev/null +++ b/config/rbac/largemessagestore_viewer_role.yaml @@ -0,0 +1,26 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants read-only access to enterprise.splunk.com resources. +# This role is intended for users who need visibility into these resources +# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: largemessagestore-viewer-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - largemessagestores + verbs: + - get + - list + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - largemessagestores/status + verbs: + - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 78231b303..94ed9d59e 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -47,11 +47,12 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations + - buses - clustermanagers - clustermasters - indexerclusters - ingestorclusters + - largemessagestores - licensemanagers - licensemasters - monitoringconsoles @@ -68,11 +69,12 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations/finalizers + - buses/finalizers - clustermanagers/finalizers - clustermasters/finalizers - indexerclusters/finalizers - ingestorclusters/finalizers + - largemessagestores/finalizers - licensemanagers/finalizers - licensemasters/finalizers - monitoringconsoles/finalizers @@ -83,11 +85,12 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations/status + - buses/status - clustermanagers/status - clustermasters/status - indexerclusters/status - ingestorclusters/status + - largemessagestores/status - licensemanagers/status - licensemasters/status - monitoringconsoles/status diff --git a/config/samples/enterprise_v4_busconfiguration.yaml b/config/samples/enterprise_v4_bus.yaml similarity index 72% rename from config/samples/enterprise_v4_busconfiguration.yaml rename to config/samples/enterprise_v4_bus.yaml index 0cc1aed31..51af9d05a 100644 --- a/config/samples/enterprise_v4_busconfiguration.yaml +++ b/config/samples/enterprise_v4_bus.yaml @@ -1,7 +1,7 @@ apiVersion: enterprise.splunk.com/v4 -kind: BusConfiguration +kind: Bus metadata: - name: busconfiguration-sample + name: bus-sample finalizers: - "enterprise.splunk.com/delete-pvc" spec: {} diff --git a/config/samples/enterprise_v4_largemessagestore.yaml b/config/samples/enterprise_v4_largemessagestore.yaml new file mode 100644 index 000000000..508ba0b77 --- /dev/null +++ b/config/samples/enterprise_v4_largemessagestore.yaml @@ -0,0 +1,8 @@ +apiVersion: enterprise.splunk.com/v4 +kind: LargeMessageStore +metadata: + name: largemessagestore-sample + finalizers: + - "enterprise.splunk.com/delete-pvc" +spec: {} +# TODO(user): Add fields here diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index 88c71025d..1ea90a3ae 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -14,5 +14,6 @@ resources: - enterprise_v4_clustermanager.yaml - enterprise_v4_licensemanager.yaml - enterprise_v4_ingestorcluster.yaml -- enterprise_v4_busconfiguration.yaml +- enterprise_v4_bus.yaml +- enterprise_v4_largemessagestore.yaml #+kubebuilder:scaffold:manifestskustomizesamples diff --git a/docs/CustomResources.md b/docs/CustomResources.md index 6461d4488..384153add 100644 --- a/docs/CustomResources.md +++ b/docs/CustomResources.md @@ -338,10 +338,12 @@ metadata: name: ic spec: replicas: 3 - busConfigurationRef: - name: bus-config + busRef: + name: bus + largeMessageStoreRef: + name: lms ``` -Note: `busConfigurationRef` is required field in case of IngestorCluster resource since it will be used to connect the IngestorCluster to BusConfiguration resource. +Note: `busRef` and `largeMessageStoreRef` are required fields in case of IngestorCluster resource since they will be used to connect the IngestorCluster to Bus and LargeMessageStore resources. In addition to [Common Spec Parameters for All Resources](#common-spec-parameters-for-all-resources) and [Common Spec Parameters for All Splunk Enterprise Resources](#common-spec-parameters-for-all-splunk-enterprise-resources), diff --git a/docs/IndexIngestionSeparation.md b/docs/IndexIngestionSeparation.md index dd53922ff..3b151cc4d 100644 --- a/docs/IndexIngestionSeparation.md +++ b/docs/IndexIngestionSeparation.md @@ -16,13 +16,13 @@ This separation enables: - SPLUNK_IMAGE_VERSION: Splunk Enterprise Docker Image version -# BusConfiguration +# Bus -BusConfiguration is introduced to store message bus configuration to be shared among IngestorCluster and IndexerCluster. +Bus is introduced to store message bus to be shared among IngestorCluster and IndexerCluster. ## Spec -BusConfiguration inputs can be found in the table below. As of now, only SQS type of message bus is supported. +Bus inputs can be found in the table below. As of now, only SQS type of message bus is supported. | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | @@ -45,9 +45,9 @@ Change of any of the bus inputs does not restart Splunk. It just updates the con ## Example ``` apiVersion: enterprise.splunk.com/v4 -kind: BusConfiguration +kind: Bus metadata: - name: bus-config + name: bus spec: type: sqs_smartbus sqs: @@ -70,7 +70,8 @@ In addition to common spec inputs, the IngestorCluster resource provides the fol | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | | replicas | integer | The number of replicas (defaults to 3) | -| busConfigurationRef | corev1.ObjectReference | Message bus configuration reference | +| busRef | corev1.ObjectReference | Message bus reference | +| largeMessageStoreRef | corev1.ObjectReference | Large message store reference | ## Example @@ -89,8 +90,10 @@ spec: serviceAccount: ingestor-sa replicas: 3 image: splunk/splunk:${SPLUNK_IMAGE_VERSION} - busConfigurationRef: - name: bus-config + busRef: + name: bus + largeMessageStoreRef: + name: lms ``` # IndexerCluster @@ -104,7 +107,8 @@ In addition to common spec inputs, the IndexerCluster resource provides the foll | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | | replicas | integer | The number of replicas (defaults to 3) | -| busConfigurationRef | corev1.ObjectReference | Message bus configuration reference | +| busRef | corev1.ObjectReference | Message bus reference | +| largeMessageStoreRef | corev1.ObjectReference | Large message store reference | ## Example @@ -135,8 +139,10 @@ spec: serviceAccount: ingestor-sa replicas: 3 image: splunk/splunk:${SPLUNK_IMAGE_VERSION} - busConfigurationRef: - name: bus-config + busRef: + name: bus + largeMessageStoreRef: + name: lms ``` # Common Spec @@ -149,12 +155,12 @@ An IngestorCluster template has been added to the splunk/splunk-enterprise Helm ## Example -Below examples describe how to define values for BusConfiguration, IngestorCluster and IndexerCluster similarly to the above yaml files specifications. +Below examples describe how to define values for Bus, IngestorCluster and IndexerCluster similarly to the above yaml files specifications. ``` -busConfiguration:: +bus: enabled: true - name: bus-config + name: bus type: sqs_smartbus sqs: queueName: sqs-test @@ -171,8 +177,10 @@ ingestorCluster: name: ingestor replicaCount: 3 serviceAccount: ingestor-sa - busConfigurationRef: - name: bus-config + busRef: + name: bus + largeMessageStoreRef: + name: lms ``` ``` @@ -189,8 +197,10 @@ indexerCluster: serviceAccount: ingestor-sa clusterManagerRef: name: cm - busConfigurationRef: - name: bus-config + busRef: + name: bus + largeMessageStoreRef: + name: lms ``` # Service Account @@ -492,12 +502,12 @@ $ aws iam list-attached-role-policies --role-name eksctl-ind-ing-sep-demo-addon- } ``` -3. Install BusConfiguration resource. +3. Install Bus resource. ``` $ cat bus.yaml apiVersion: enterprise.splunk.com/v4 -kind: BusConfiguration +kind: Bus metadata: name: bus finalizers: @@ -518,19 +528,19 @@ $ kubectl apply -f bus.yaml ``` ``` -$ kubectl get busconfiguration +$ kubectl get bus NAME PHASE AGE MESSAGE bus Ready 20s ``` ``` -kubectl describe busconfiguration +kubectl describe bus Name: bus Namespace: default Labels: Annotations: API Version: enterprise.splunk.com/v4 -Kind: BusConfiguration +Kind: Bus Metadata: Creation Timestamp: 2025-10-27T10:25:53Z Finalizers: @@ -568,8 +578,10 @@ spec: serviceAccount: ingestor-sa replicas: 3 image: splunk/splunk:${SPLUNK_IMAGE_VERSION} - busConfigurationRef: - name: bus-config + busRef: + name: bus + largeMessageStoreRef: + name: lms ``` ``` @@ -598,8 +610,8 @@ Metadata: Resource Version: 12345678 UID: 12345678-1234-1234-1234-1234567890123 Spec: - Bus Configuration Ref: - Name: bus-config + Bus Ref: + Name: bus Namespace: default Image: splunk/splunk:${SPLUNK_IMAGE_VERSION} Replicas: 3 @@ -616,7 +628,7 @@ Status: Is Deployment In Progress: false Last App Info Check Time: 0 Version: 0 - Bus Configuration: + Bus: Sqs: Auth Region: us-west-2 Dead Letter Queue Name: sqs-dlq-test @@ -704,8 +716,10 @@ spec: clusterManagerRef: name: cm serviceAccount: ingestor-sa - busConfigurationRef: - name: bus-config + busRef: + name: bus + largeMessageStoreRef: + name: lms ``` ``` diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_busconfigurations.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_busconfigurations.yaml deleted file mode 100644 index 2a746968e..000000000 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_busconfigurations.yaml +++ /dev/null @@ -1,40 +0,0 @@ -{{- if .Values.busConfiguration }} -{{- if .Values.busConfiguration.enabled }} -apiVersion: enterprise.splunk.com/v4 -kind: BusConfiguration -metadata: - name: {{ .Values.busConfiguration.name }} - namespace: {{ default .Release.Namespace .Values.busConfiguration.namespaceOverride }} - {{- with .Values.busConfiguration.additionalLabels }} - labels: -{{ toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.busConfiguration.additionalAnnotations }} - annotations: -{{ toYaml . | nindent 4 }} - {{- end }} -spec: - type: {{ .Values.busConfiguration.type | quote }} - {{- with .Values.busConfiguration.sqs }} - sqs: - {{- if .queueName }} - queueName: {{ .queueName | quote }} - {{- end }} - {{- if .authRegion }} - authRegion: {{ .authRegion | quote }} - {{- end }} - {{- if .endpoint }} - endpoint: {{ .endpoint | quote }} - {{- end }} - {{- if .largeMessageStoreEndpoint }} - largeMessageStoreEndpoint: {{ .largeMessageStoreEndpoint | quote }} - {{- end }} - {{- if .largeMessageStorePath }} - largeMessageStorePath: {{ .largeMessageStorePath | quote }} - {{- end }} - {{- if .deadLetterQueueName }} - deadLetterQueueName: {{ .deadLetterQueueName | quote }} - {{- end }} - {{- end }} -{{- end }} -{{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_buses.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_buses.yaml new file mode 100644 index 000000000..ce1c1e7a9 --- /dev/null +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_buses.yaml @@ -0,0 +1,30 @@ +{{- if .Values.bus }} +{{- if .Values.bus.enabled }} +apiVersion: enterprise.splunk.com/v4 +kind: Bus +metadata: + name: {{ .Values.bus.name }} + namespace: {{ default .Release.Namespace .Values.bus.namespaceOverride }} + {{- with .Values.bus.additionalLabels }} + labels: +{{ toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.bus.additionalAnnotations }} + annotations: +{{ toYaml . | nindent 4 }} + {{- end }} +spec: + provider: {{ .Values.bus.provider | quote }} + queueName: {{ .Values.bus.queueName | quote }} + region: {{ .Values.bus.region | quote }} + {{- with .Values.bus.sqs }} + sqs: + {{- if .endpoint }} + endpoint: {{ .endpoint | quote }} + {{- end }} + {{- if .dlq }} + dlq: {{ .dlq | quote }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml index 77c24d500..0e6a96673 100644 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml @@ -163,8 +163,14 @@ items: {{ toYaml . | indent 6 }} {{- end }} {{- end }} - {{- with $.Values.indexerCluster.busConfigurationRef }} - busConfigurationRef: + {{- with $.Values.indexerCluster.busRef }} + busRef: + name: {{ .name }} + {{- if .namespace }} + namespace: {{ .namespace }} + {{- end }} + {{- with $.Values.indexerCluster.largeMessageStoreRef }} + largeMessageStoreRef: name: {{ .name }} {{- if .namespace }} namespace: {{ .namespace }} diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_ingestorcluster.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_ingestorcluster.yaml index fd72da310..b6c1640ec 100644 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_ingestorcluster.yaml +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_ingestorcluster.yaml @@ -95,11 +95,18 @@ spec: topologySpreadConstraints: {{- toYaml . | nindent 4 }} {{- end }} - {{- with $.Values.ingestorCluster.busConfigurationRef }} - busConfigurationRef: - name: {{ $.Values.ingestorCluster.busConfigurationRef.name }} - {{- if $.Values.ingestorCluster.busConfigurationRef.namespace }} - namespace: {{ $.Values.ingestorCluster.busConfigurationRef.namespace }} + {{- with $.Values.ingestorCluster.busRef }} + busRef: + name: {{ $.Values.ingestorCluster.busRef.name }} + {{- if $.Values.ingestorCluster.busRef.namespace }} + namespace: {{ $.Values.ingestorCluster.busRef.namespace }} + {{- end }} + {{- end }} + {{- with $.Values.ingestorCluster.largeMessageStoreRef }} + largeMessageStoreRef: + name: {{ $.Values.ingestorCluster.largeMessageStoreRef.name }} + {{- if $.Values.ingestorCluster.largeMessageStoreRef.namespace }} + namespace: {{ $.Values.ingestorCluster.largeMessageStoreRef.namespace }} {{- end }} {{- end }} {{- with .Values.ingestorCluster.extraEnv }} diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_largemessagestores.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_largemessagestores.yaml new file mode 100644 index 000000000..77ef09e69 --- /dev/null +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_largemessagestores.yaml @@ -0,0 +1,28 @@ +{{- if .Values.largemessagestore }} +{{- if .Values.largemessagestore.enabled }} +apiVersion: enterprise.splunk.com/v4 +kind: LargeMessageStore +metadata: + name: {{ .Values.largemessagestore.name }} + namespace: {{ default .Release.Namespace .Values.largemessagestore.namespaceOverride }} + {{- with .Values.largemessagestore.additionalLabels }} + labels: +{{ toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.largemessagestore.additionalAnnotations }} + annotations: +{{ toYaml . | nindent 4 }} + {{- end }} +spec: + provider: {{ .Values.largemessagestore.provider | quote }} + {{- with .Values.largemessagestore.s3 }} + s3: + {{- if .endpoint }} + endpoint: {{ .endpoint | quote }} + {{- end }} + {{- if .path }} + path: {{ .path | quote }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-enterprise/values.yaml b/helm-chart/splunk-enterprise/values.yaml index e49073398..a001bbead 100644 --- a/helm-chart/splunk-enterprise/values.yaml +++ b/helm-chart/splunk-enterprise/values.yaml @@ -350,7 +350,9 @@ indexerCluster: # nodeAffinityPolicy: [Honor|Ignore] # optional; beta since v1.26 # nodeTaintsPolicy: [Honor|Ignore] # optional; beta since v1.26 - busConfigurationRef: {} + busRef: {} + + largeMessageStoreRef: {} searchHeadCluster: @@ -899,4 +901,6 @@ ingestorCluster: affinity: {} - busConfigurationRef: {} \ No newline at end of file + busRef: {} + + largeMessageStoreRef: {} \ No newline at end of file diff --git a/helm-chart/splunk-operator/templates/rbac/busconfiguration_editor_role.yaml b/helm-chart/splunk-operator/templates/rbac/bus_editor_role.yaml similarity index 78% rename from helm-chart/splunk-operator/templates/rbac/busconfiguration_editor_role.yaml rename to helm-chart/splunk-operator/templates/rbac/bus_editor_role.yaml index 1475add32..f285a1ca5 100644 --- a/helm-chart/splunk-operator/templates/rbac/busconfiguration_editor_role.yaml +++ b/helm-chart/splunk-operator/templates/rbac/bus_editor_role.yaml @@ -8,12 +8,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: {{ include "splunk-operator.operator.fullname" . }}-busconfiguration-editor-role + name: {{ include "splunk-operator.operator.fullname" . }}-bus-editor-role rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations + - buses verbs: - create - delete @@ -25,19 +25,19 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations/status + - buses/status verbs: - get {{- else }} apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: - name: {{ include "splunk-operator.operator.fullname" . }}-busconfiguration-editor-role + name: {{ include "splunk-operator.operator.fullname" . }}-bus-editor-role rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations + - buses verbs: - create - delete @@ -49,7 +49,7 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations/status + - buses/status verbs: - get {{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-operator/templates/rbac/busconfiguration_viewer_role.yaml b/helm-chart/splunk-operator/templates/rbac/bus_viewer_role.yaml similarity index 76% rename from helm-chart/splunk-operator/templates/rbac/busconfiguration_viewer_role.yaml rename to helm-chart/splunk-operator/templates/rbac/bus_viewer_role.yaml index 500b1d100..c4381a3cc 100644 --- a/helm-chart/splunk-operator/templates/rbac/busconfiguration_viewer_role.yaml +++ b/helm-chart/splunk-operator/templates/rbac/bus_viewer_role.yaml @@ -8,12 +8,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: {{ include "splunk-operator.operator.fullname" . }}-busconfiguration-viewer-role + name: {{ include "splunk-operator.operator.fullname" . }}-bus-viewer-role rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations + - buses verbs: - get - list @@ -21,19 +21,19 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations/status + - buses/status verbs: - get {{- else }} apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: - name: {{ include "splunk-operator.operator.fullname" . }}-busconfiguration-viewer-role + name: {{ include "splunk-operator.operator.fullname" . }}-bus-viewer-role rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations + - buses verbs: - get - list @@ -41,7 +41,7 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations/status + - buses/status verbs: - get {{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-operator/templates/rbac/role.yaml b/helm-chart/splunk-operator/templates/rbac/role.yaml index 4eab5275e..61cf4ada9 100644 --- a/helm-chart/splunk-operator/templates/rbac/role.yaml +++ b/helm-chart/splunk-operator/templates/rbac/role.yaml @@ -251,7 +251,7 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations + - buses verbs: - create - delete @@ -263,13 +263,39 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations/finalizers + - buses/finalizers verbs: - update - apiGroups: - enterprise.splunk.com resources: - - busconfigurations/status + - buses/status + verbs: + - get + - patch + - update +- apiGroups: + - enterprise.splunk.com + resources: + - largemessagestores + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - largemessagestores/finalizers + verbs: + - update +- apiGroups: + - enterprise.splunk.com + resources: + - largemessagestores/status verbs: - get - patch diff --git a/internal/controller/busconfiguration_controller.go b/internal/controller/bus_controller.go similarity index 70% rename from internal/controller/busconfiguration_controller.go rename to internal/controller/bus_controller.go index c8519c017..b52e91991 100644 --- a/internal/controller/busconfiguration_controller.go +++ b/internal/controller/bus_controller.go @@ -36,34 +36,34 @@ import ( enterprise "github.com/splunk/splunk-operator/pkg/splunk/enterprise" ) -// BusConfigurationReconciler reconciles a BusConfiguration object -type BusConfigurationReconciler struct { +// BusReconciler reconciles a Bus object +type BusReconciler struct { client.Client Scheme *runtime.Scheme } -// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=busconfigurations,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=busconfigurations/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=busconfigurations/finalizers,verbs=update +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=buses,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=buses/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=buses/finalizers,verbs=update // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. // TODO(user): Modify the Reconcile function to compare the state specified by -// the BusConfiguration object against the actual cluster state, and then +// the Bus object against the actual cluster state, and then // perform operations to make the cluster state reflect the state specified by // the user. // // For more details, check Reconcile and its Result here: // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.22.1/pkg/reconcile -func (r *BusConfigurationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - metrics.ReconcileCounters.With(metrics.GetPrometheusLabels(req, "BusConfiguration")).Inc() - defer recordInstrumentionData(time.Now(), req, "controller", "BusConfiguration") +func (r *BusReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + metrics.ReconcileCounters.With(metrics.GetPrometheusLabels(req, "Bus")).Inc() + defer recordInstrumentionData(time.Now(), req, "controller", "Bus") reqLogger := log.FromContext(ctx) - reqLogger = reqLogger.WithValues("busconfiguration", req.NamespacedName) + reqLogger = reqLogger.WithValues("bus", req.NamespacedName) - // Fetch the BusConfiguration - instance := &enterpriseApi.BusConfiguration{} + // Fetch the Bus + instance := &enterpriseApi.Bus{} err := r.Get(ctx, req.NamespacedName, instance) if err != nil { if k8serrors.IsNotFound(err) { @@ -74,20 +74,20 @@ func (r *BusConfigurationReconciler) Reconcile(ctx context.Context, req ctrl.Req return ctrl.Result{}, nil } // Error reading the object - requeue the request. - return ctrl.Result{}, errors.Wrap(err, "could not load bus configuration data") + return ctrl.Result{}, errors.Wrap(err, "could not load bus data") } // If the reconciliation is paused, requeue annotations := instance.GetAnnotations() if annotations != nil { - if _, ok := annotations[enterpriseApi.BusConfigurationPausedAnnotation]; ok { + if _, ok := annotations[enterpriseApi.BusPausedAnnotation]; ok { return ctrl.Result{Requeue: true, RequeueAfter: pauseRetryDelay}, nil } } reqLogger.Info("start", "CR version", instance.GetResourceVersion()) - result, err := ApplyBusConfiguration(ctx, r.Client, instance) + result, err := ApplyBus(ctx, r.Client, instance) if result.Requeue && result.RequeueAfter != 0 { reqLogger.Info("Requeued", "period(seconds)", int(result.RequeueAfter/time.Second)) } @@ -95,14 +95,14 @@ func (r *BusConfigurationReconciler) Reconcile(ctx context.Context, req ctrl.Req return result, err } -var ApplyBusConfiguration = func(ctx context.Context, client client.Client, instance *enterpriseApi.BusConfiguration) (reconcile.Result, error) { - return enterprise.ApplyBusConfiguration(ctx, client, instance) +var ApplyBus = func(ctx context.Context, client client.Client, instance *enterpriseApi.Bus) (reconcile.Result, error) { + return enterprise.ApplyBus(ctx, client, instance) } // SetupWithManager sets up the controller with the Manager. -func (r *BusConfigurationReconciler) SetupWithManager(mgr ctrl.Manager) error { +func (r *BusReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&enterpriseApi.BusConfiguration{}). + For(&enterpriseApi.Bus{}). WithEventFilter(predicate.Or( common.GenerationChangedPredicate(), common.AnnotationChangedPredicate(), diff --git a/internal/controller/busconfiguration_controller_test.go b/internal/controller/bus_controller_test.go similarity index 56% rename from internal/controller/busconfiguration_controller_test.go rename to internal/controller/bus_controller_test.go index e08154211..300af1879 100644 --- a/internal/controller/busconfiguration_controller_test.go +++ b/internal/controller/bus_controller_test.go @@ -34,7 +34,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" ) -var _ = Describe("BusConfiguration Controller", func() { +var _ = Describe("Bus Controller", func() { BeforeEach(func() { time.Sleep(2 * time.Second) }) @@ -43,47 +43,55 @@ var _ = Describe("BusConfiguration Controller", func() { }) - Context("BusConfiguration Management", func() { + Context("Bus Management", func() { - It("Get BusConfiguration custom resource should fail", func() { + It("Get Bus custom resource should fail", func() { namespace := "ns-splunk-bus-1" - ApplyBusConfiguration = func(ctx context.Context, client client.Client, instance *enterpriseApi.BusConfiguration) (reconcile.Result, error) { + ApplyBus = func(ctx context.Context, client client.Client, instance *enterpriseApi.Bus) (reconcile.Result, error) { return reconcile.Result{}, nil } nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - _, err := GetBusConfiguration("test", nsSpecs.Name) - Expect(err.Error()).Should(Equal("busconfigurations.enterprise.splunk.com \"test\" not found")) - + _, err := GetBus("test", nsSpecs.Name) + Expect(err.Error()).Should(Equal("buses.enterprise.splunk.com \"test\" not found")) Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) }) - It("Create BusConfiguration custom resource with annotations should pause", func() { + It("Create Bus custom resource with annotations should pause", func() { namespace := "ns-splunk-bus-2" annotations := make(map[string]string) - annotations[enterpriseApi.BusConfigurationPausedAnnotation] = "" - ApplyBusConfiguration = func(ctx context.Context, client client.Client, instance *enterpriseApi.BusConfiguration) (reconcile.Result, error) { + annotations[enterpriseApi.BusPausedAnnotation] = "" + ApplyBus = func(ctx context.Context, client client.Client, instance *enterpriseApi.Bus) (reconcile.Result, error) { return reconcile.Result{}, nil } nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - CreateBusConfiguration("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady) - icSpec, _ := GetBusConfiguration("test", nsSpecs.Name) + spec := enterpriseApi.BusSpec{ + Provider: "sqs", + QueueName: "smartbus-queue", + Region: "us-west-2", + SQS: enterpriseApi.SQSSpec{ + DLQ: "smartbus-dlq", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + }, + } + CreateBus("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) + icSpec, _ := GetBus("test", nsSpecs.Name) annotations = map[string]string{} icSpec.Annotations = annotations icSpec.Status.Phase = "Ready" - UpdateBusConfiguration(icSpec, enterpriseApi.PhaseReady) - DeleteBusConfiguration("test", nsSpecs.Name) + UpdateBus(icSpec, enterpriseApi.PhaseReady, spec) + DeleteBus("test", nsSpecs.Name) Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) }) - It("Create BusConfiguration custom resource should succeeded", func() { + It("Create Bus custom resource should succeeded", func() { namespace := "ns-splunk-bus-3" - ApplyBusConfiguration = func(ctx context.Context, client client.Client, instance *enterpriseApi.BusConfiguration) (reconcile.Result, error) { + ApplyBus = func(ctx context.Context, client client.Client, instance *enterpriseApi.Bus) (reconcile.Result, error) { return reconcile.Result{}, nil } nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} @@ -91,14 +99,23 @@ var _ = Describe("BusConfiguration Controller", func() { Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) annotations := make(map[string]string) - CreateBusConfiguration("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady) - DeleteBusConfiguration("test", nsSpecs.Name) + spec := enterpriseApi.BusSpec{ + Provider: "sqs", + QueueName: "smartbus-queue", + Region: "us-west-2", + SQS: enterpriseApi.SQSSpec{ + DLQ: "smartbus-dlq", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + }, + } + CreateBus("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) + DeleteBus("test", nsSpecs.Name) Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) }) It("Cover Unused methods", func() { namespace := "ns-splunk-bus-4" - ApplyBusConfiguration = func(ctx context.Context, client client.Client, instance *enterpriseApi.BusConfiguration) (reconcile.Result, error) { + ApplyBus = func(ctx context.Context, client client.Client, instance *enterpriseApi.Bus) (reconcile.Result, error) { return reconcile.Result{}, nil } nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} @@ -108,7 +125,7 @@ var _ = Describe("BusConfiguration Controller", func() { ctx := context.TODO() builder := fake.NewClientBuilder() c := builder.Build() - instance := BusConfigurationReconciler{ + instance := BusReconciler{ Client: c, Scheme: scheme.Scheme, } @@ -121,11 +138,20 @@ var _ = Describe("BusConfiguration Controller", func() { _, err := instance.Reconcile(ctx, request) Expect(err).ToNot(HaveOccurred()) - bcSpec := testutils.NewBusConfiguration("test", namespace, "image") + spec := enterpriseApi.BusSpec{ + Provider: "sqs", + QueueName: "smartbus-queue", + Region: "us-west-2", + SQS: enterpriseApi.SQSSpec{ + DLQ: "smartbus-dlq", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + }, + } + bcSpec := testutils.NewBus("test", namespace, spec) Expect(c.Create(ctx, bcSpec)).Should(Succeed()) annotations := make(map[string]string) - annotations[enterpriseApi.BusConfigurationPausedAnnotation] = "" + annotations[enterpriseApi.BusPausedAnnotation] = "" bcSpec.Annotations = annotations Expect(c.Update(ctx, bcSpec)).Should(Succeed()) @@ -147,86 +173,87 @@ var _ = Describe("BusConfiguration Controller", func() { }) }) -func GetBusConfiguration(name string, namespace string) (*enterpriseApi.BusConfiguration, error) { - By("Expecting BusConfiguration custom resource to be retrieved successfully") +func GetBus(name string, namespace string) (*enterpriseApi.Bus, error) { + By("Expecting Bus custom resource to be retrieved successfully") key := types.NamespacedName{ Name: name, Namespace: namespace, } - bc := &enterpriseApi.BusConfiguration{} + b := &enterpriseApi.Bus{} - err := k8sClient.Get(context.Background(), key, bc) + err := k8sClient.Get(context.Background(), key, b) if err != nil { return nil, err } - return bc, err + return b, err } -func CreateBusConfiguration(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase) *enterpriseApi.BusConfiguration { - By("Expecting BusConfiguration custom resource to be created successfully") +func CreateBus(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase, spec enterpriseApi.BusSpec) *enterpriseApi.Bus { + By("Expecting Bus custom resource to be created successfully") key := types.NamespacedName{ Name: name, Namespace: namespace, } - ingSpec := &enterpriseApi.BusConfiguration{ + ingSpec := &enterpriseApi.Bus{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, Annotations: annotations, }, + Spec: spec, } Expect(k8sClient.Create(context.Background(), ingSpec)).Should(Succeed()) time.Sleep(2 * time.Second) - bc := &enterpriseApi.BusConfiguration{} + b := &enterpriseApi.Bus{} Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, bc) + _ = k8sClient.Get(context.Background(), key, b) if status != "" { fmt.Printf("status is set to %v", status) - bc.Status.Phase = status - Expect(k8sClient.Status().Update(context.Background(), bc)).Should(Succeed()) + b.Status.Phase = status + Expect(k8sClient.Status().Update(context.Background(), b)).Should(Succeed()) time.Sleep(2 * time.Second) } return true }, timeout, interval).Should(BeTrue()) - return bc + return b } -func UpdateBusConfiguration(instance *enterpriseApi.BusConfiguration, status enterpriseApi.Phase) *enterpriseApi.BusConfiguration { - By("Expecting BusConfiguration custom resource to be updated successfully") +func UpdateBus(instance *enterpriseApi.Bus, status enterpriseApi.Phase, spec enterpriseApi.BusSpec) *enterpriseApi.Bus { + By("Expecting Bus custom resource to be updated successfully") key := types.NamespacedName{ Name: instance.Name, Namespace: instance.Namespace, } - bcSpec := testutils.NewBusConfiguration(instance.Name, instance.Namespace, "image") - bcSpec.ResourceVersion = instance.ResourceVersion - Expect(k8sClient.Update(context.Background(), bcSpec)).Should(Succeed()) + bSpec := testutils.NewBus(instance.Name, instance.Namespace, spec) + bSpec.ResourceVersion = instance.ResourceVersion + Expect(k8sClient.Update(context.Background(), bSpec)).Should(Succeed()) time.Sleep(2 * time.Second) - bc := &enterpriseApi.BusConfiguration{} + b := &enterpriseApi.Bus{} Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, bc) + _ = k8sClient.Get(context.Background(), key, b) if status != "" { fmt.Printf("status is set to %v", status) - bc.Status.Phase = status - Expect(k8sClient.Status().Update(context.Background(), bc)).Should(Succeed()) + b.Status.Phase = status + Expect(k8sClient.Status().Update(context.Background(), b)).Should(Succeed()) time.Sleep(2 * time.Second) } return true }, timeout, interval).Should(BeTrue()) - return bc + return b } -func DeleteBusConfiguration(name string, namespace string) { - By("Expecting BusConfiguration custom resource to be deleted successfully") +func DeleteBus(name string, namespace string) { + By("Expecting Bus custom resource to be deleted successfully") key := types.NamespacedName{ Name: name, @@ -234,9 +261,9 @@ func DeleteBusConfiguration(name string, namespace string) { } Eventually(func() error { - bc := &enterpriseApi.BusConfiguration{} - _ = k8sClient.Get(context.Background(), key, bc) - err := k8sClient.Delete(context.Background(), bc) + b := &enterpriseApi.Bus{} + _ = k8sClient.Get(context.Background(), key, b) + err := k8sClient.Delete(context.Background(), b) return err }, timeout, interval).Should(Succeed()) } diff --git a/internal/controller/indexercluster_controller.go b/internal/controller/indexercluster_controller.go index 3cc840baa..676f81d23 100644 --- a/internal/controller/indexercluster_controller.go +++ b/internal/controller/indexercluster_controller.go @@ -172,9 +172,9 @@ func (r *IndexerClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { mgr.GetRESTMapper(), &enterpriseApi.IndexerCluster{}, )). - Watches(&enterpriseApi.BusConfiguration{}, + Watches(&enterpriseApi.Bus{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { - bc, ok := obj.(*enterpriseApi.BusConfiguration) + b, ok := obj.(*enterpriseApi.Bus) if !ok { return nil } @@ -184,11 +184,39 @@ func (r *IndexerClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { } var reqs []reconcile.Request for _, ic := range list.Items { - ns := ic.Spec.BusConfigurationRef.Namespace + ns := ic.Spec.BusRef.Namespace if ns == "" { ns = ic.Namespace } - if ic.Spec.BusConfigurationRef.Name == bc.Name && ns == bc.Namespace { + if ic.Spec.BusRef.Name == b.Name && ns == b.Namespace { + reqs = append(reqs, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: ic.Name, + Namespace: ic.Namespace, + }, + }) + } + } + return reqs + }), + ). + Watches(&enterpriseApi.LargeMessageStore{}, + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { + lms, ok := obj.(*enterpriseApi.LargeMessageStore) + if !ok { + return nil + } + var list enterpriseApi.IndexerClusterList + if err := r.Client.List(ctx, &list); err != nil { + return nil + } + var reqs []reconcile.Request + for _, ic := range list.Items { + ns := ic.Spec.LargeMessageStoreRef.Namespace + if ns == "" { + ns = ic.Namespace + } + if ic.Spec.LargeMessageStoreRef.Name == lms.Name && ns == lms.Namespace { reqs = append(reqs, reconcile.Request{ NamespacedName: types.NamespacedName{ Name: ic.Name, diff --git a/internal/controller/ingestorcluster_controller.go b/internal/controller/ingestorcluster_controller.go index a2c5846df..1df81eb78 100644 --- a/internal/controller/ingestorcluster_controller.go +++ b/internal/controller/ingestorcluster_controller.go @@ -141,9 +141,9 @@ func (r *IngestorClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { mgr.GetRESTMapper(), &enterpriseApi.IngestorCluster{}, )). - Watches(&enterpriseApi.BusConfiguration{}, + Watches(&enterpriseApi.Bus{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { - bc, ok := obj.(*enterpriseApi.BusConfiguration) + b, ok := obj.(*enterpriseApi.Bus) if !ok { return nil } @@ -153,11 +153,39 @@ func (r *IngestorClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { } var reqs []reconcile.Request for _, ic := range list.Items { - ns := ic.Spec.BusConfigurationRef.Namespace + ns := ic.Spec.BusRef.Namespace if ns == "" { ns = ic.Namespace } - if ic.Spec.BusConfigurationRef.Name == bc.Name && ns == bc.Namespace { + if ic.Spec.BusRef.Name == b.Name && ns == b.Namespace { + reqs = append(reqs, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: ic.Name, + Namespace: ic.Namespace, + }, + }) + } + } + return reqs + }), + ). + Watches(&enterpriseApi.LargeMessageStore{}, + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { + lms, ok := obj.(*enterpriseApi.LargeMessageStore) + if !ok { + return nil + } + var list enterpriseApi.IndexerClusterList + if err := r.Client.List(ctx, &list); err != nil { + return nil + } + var reqs []reconcile.Request + for _, ic := range list.Items { + ns := ic.Spec.LargeMessageStoreRef.Namespace + if ns == "" { + ns = ic.Namespace + } + if ic.Spec.LargeMessageStoreRef.Name == lms.Name && ns == lms.Namespace { reqs = append(reqs, reconcile.Request{ NamespacedName: types.NamespacedName{ Name: ic.Name, diff --git a/internal/controller/ingestorcluster_controller_test.go b/internal/controller/ingestorcluster_controller_test.go index 5e7ae4b73..811ca930a 100644 --- a/internal/controller/ingestorcluster_controller_test.go +++ b/internal/controller/ingestorcluster_controller_test.go @@ -71,7 +71,35 @@ var _ = Describe("IngestorCluster Controller", func() { Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - CreateIngestorCluster("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady) + bus := &enterpriseApi.Bus{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bus", + Namespace: nsSpecs.Name, + }, + Spec: enterpriseApi.BusSpec{ + Provider: "sqs", + QueueName: "smartbus-queue", + Region: "us-west-2", + SQS: enterpriseApi.SQSSpec{ + DLQ: "smartbus-dlq", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + }, + }, + } + lms := &enterpriseApi.LargeMessageStore{ + ObjectMeta: metav1.ObjectMeta{ + Name: "lms", + Namespace: nsSpecs.Name, + }, + Spec: enterpriseApi.LargeMessageStoreSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "s3://ingestion/smartbus-test", + }, + }, + } + CreateIngestorCluster("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, lms, bus) icSpec, _ := GetIngestorCluster("test", nsSpecs.Name) annotations = map[string]string{} icSpec.Annotations = annotations @@ -91,7 +119,35 @@ var _ = Describe("IngestorCluster Controller", func() { Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) annotations := make(map[string]string) - CreateIngestorCluster("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady) + bus := &enterpriseApi.Bus{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bus", + Namespace: nsSpecs.Name, + }, + Spec: enterpriseApi.BusSpec{ + Provider: "sqs", + QueueName: "smartbus-queue", + Region: "us-west-2", + SQS: enterpriseApi.SQSSpec{ + DLQ: "smartbus-dlq", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + }, + }, + } + lms := &enterpriseApi.LargeMessageStore{ + ObjectMeta: metav1.ObjectMeta{ + Name: "lms", + Namespace: nsSpecs.Name, + }, + Spec: enterpriseApi.LargeMessageStoreSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "s3://ingestion/smartbus-test", + }, + }, + } + CreateIngestorCluster("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, lms, bus) DeleteIngestorCluster("test", nsSpecs.Name) Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) }) @@ -164,7 +220,7 @@ func GetIngestorCluster(name string, namespace string) (*enterpriseApi.IngestorC return ic, err } -func CreateIngestorCluster(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase) *enterpriseApi.IngestorCluster { +func CreateIngestorCluster(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase, lms *enterpriseApi.LargeMessageStore, bus *enterpriseApi.Bus) *enterpriseApi.IngestorCluster { By("Expecting IngestorCluster custom resource to be created successfully") key := types.NamespacedName{ @@ -184,8 +240,13 @@ func CreateIngestorCluster(name string, namespace string, annotations map[string }, }, Replicas: 3, - BusConfigurationRef: corev1.ObjectReference{ - Name: "busConfig", + BusRef: corev1.ObjectReference{ + Name: bus.Name, + Namespace: bus.Namespace, + }, + LargeMessageStoreRef: corev1.ObjectReference{ + Name: lms.Name, + Namespace: lms.Namespace, }, }, } diff --git a/internal/controller/largemessagestore_controller.go b/internal/controller/largemessagestore_controller.go new file mode 100644 index 000000000..69a4af131 --- /dev/null +++ b/internal/controller/largemessagestore_controller.go @@ -0,0 +1,120 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "time" + + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/pkg/errors" + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + "github.com/splunk/splunk-operator/internal/controller/common" + metrics "github.com/splunk/splunk-operator/pkg/splunk/client/metrics" + enterprise "github.com/splunk/splunk-operator/pkg/splunk/enterprise" +) + +// LargeMessageStoreReconciler reconciles a LargeMessageStore object +type LargeMessageStoreReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=largemessagestores,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=largemessagestores/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=largemessagestores/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the LargeMessageStore object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.22.1/pkg/reconcile +func (r *LargeMessageStoreReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + metrics.ReconcileCounters.With(metrics.GetPrometheusLabels(req, "LargeMessageStore")).Inc() + defer recordInstrumentionData(time.Now(), req, "controller", "LargeMessageStore") + + reqLogger := log.FromContext(ctx) + reqLogger = reqLogger.WithValues("largemessagestore", req.NamespacedName) + + // Fetch the LargeMessageStore + instance := &enterpriseApi.LargeMessageStore{} + err := r.Get(ctx, req.NamespacedName, instance) + if err != nil { + if k8serrors.IsNotFound(err) { + // Request object not found, could have been deleted after + // reconcile request. Owned objects are automatically + // garbage collected. For additional cleanup logic use + // finalizers. Return and don't requeue + return ctrl.Result{}, nil + } + // Error reading the object - requeue the request. + return ctrl.Result{}, errors.Wrap(err, "could not load largemessagestore data") + } + + // If the reconciliation is paused, requeue + annotations := instance.GetAnnotations() + if annotations != nil { + if _, ok := annotations[enterpriseApi.LargeMessageStorePausedAnnotation]; ok { + return ctrl.Result{Requeue: true, RequeueAfter: pauseRetryDelay}, nil + } + } + + reqLogger.Info("start", "CR version", instance.GetResourceVersion()) + + result, err := ApplyLargeMessageStore(ctx, r.Client, instance) + if result.Requeue && result.RequeueAfter != 0 { + reqLogger.Info("Requeued", "period(seconds)", int(result.RequeueAfter/time.Second)) + } + + return result, err +} + +var ApplyLargeMessageStore = func(ctx context.Context, client client.Client, instance *enterpriseApi.LargeMessageStore) (reconcile.Result, error) { + return enterprise.ApplyLargeMessageStore(ctx, client, instance) +} + +// SetupWithManager sets up the controller with the Manager. +func (r *LargeMessageStoreReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&enterpriseApi.LargeMessageStore{}). + WithEventFilter(predicate.Or( + common.GenerationChangedPredicate(), + common.AnnotationChangedPredicate(), + common.LabelChangedPredicate(), + common.SecretChangedPredicate(), + common.ConfigMapChangedPredicate(), + common.StatefulsetChangedPredicate(), + common.PodChangedPredicate(), + common.CrdChangedPredicate(), + )). + WithOptions(controller.Options{ + MaxConcurrentReconciles: enterpriseApi.TotalWorker, + }). + Complete(r) +} diff --git a/internal/controller/largemessagestore_controller_test.go b/internal/controller/largemessagestore_controller_test.go new file mode 100644 index 000000000..5d85d4409 --- /dev/null +++ b/internal/controller/largemessagestore_controller_test.go @@ -0,0 +1,263 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "fmt" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + "github.com/splunk/splunk-operator/internal/controller/testutils" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var _ = Describe("LargeMessageStore Controller", func() { + BeforeEach(func() { + time.Sleep(2 * time.Second) + }) + + AfterEach(func() { + + }) + + Context("LargeMessageStore Management", func() { + + It("Get LargeMessageStore custom resource should fail", func() { + namespace := "ns-splunk-largemessagestore-1" + ApplyLargeMessageStore = func(ctx context.Context, client client.Client, instance *enterpriseApi.LargeMessageStore) (reconcile.Result, error) { + return reconcile.Result{}, nil + } + nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} + + Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) + + _, err := GetLargeMessageStore("test", nsSpecs.Name) + Expect(err.Error()).Should(Equal("largemessagestores.enterprise.splunk.com \"test\" not found")) + Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) + }) + + It("Create LargeMessageStore custom resource with annotations should pause", func() { + namespace := "ns-splunk-largemessagestore-2" + annotations := make(map[string]string) + annotations[enterpriseApi.LargeMessageStorePausedAnnotation] = "" + ApplyLargeMessageStore = func(ctx context.Context, client client.Client, instance *enterpriseApi.LargeMessageStore) (reconcile.Result, error) { + return reconcile.Result{}, nil + } + nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} + + Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) + + spec := enterpriseApi.LargeMessageStoreSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "s3://ingestion/smartbus-test", + }, + } + CreateLargeMessageStore("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) + icSpec, _ := GetLargeMessageStore("test", nsSpecs.Name) + annotations = map[string]string{} + icSpec.Annotations = annotations + icSpec.Status.Phase = "Ready" + UpdateLargeMessageStore(icSpec, enterpriseApi.PhaseReady, spec) + DeleteLargeMessageStore("test", nsSpecs.Name) + Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) + }) + + It("Create LargeMessageStore custom resource should succeeded", func() { + namespace := "ns-splunk-largemessagestore-3" + ApplyLargeMessageStore = func(ctx context.Context, client client.Client, instance *enterpriseApi.LargeMessageStore) (reconcile.Result, error) { + return reconcile.Result{}, nil + } + nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} + + Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) + + annotations := make(map[string]string) + spec := enterpriseApi.LargeMessageStoreSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "s3://ingestion/smartbus-test", + }, + } + CreateLargeMessageStore("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) + DeleteLargeMessageStore("test", nsSpecs.Name) + Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) + }) + + It("Cover Unused methods", func() { + namespace := "ns-splunk-largemessagestore-4" + ApplyLargeMessageStore = func(ctx context.Context, client client.Client, instance *enterpriseApi.LargeMessageStore) (reconcile.Result, error) { + return reconcile.Result{}, nil + } + nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} + + Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) + + ctx := context.TODO() + builder := fake.NewClientBuilder() + c := builder.Build() + instance := LargeMessageStoreReconciler{ + Client: c, + Scheme: scheme.Scheme, + } + request := reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "test", + Namespace: namespace, + }, + } + _, err := instance.Reconcile(ctx, request) + Expect(err).ToNot(HaveOccurred()) + + spec := enterpriseApi.LargeMessageStoreSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "s3://ingestion/smartbus-test", + }, + } + lmsSpec := testutils.NewLargeMessageStore("test", namespace, spec) + Expect(c.Create(ctx, lmsSpec)).Should(Succeed()) + + annotations := make(map[string]string) + annotations[enterpriseApi.LargeMessageStorePausedAnnotation] = "" + lmsSpec.Annotations = annotations + Expect(c.Update(ctx, lmsSpec)).Should(Succeed()) + + _, err = instance.Reconcile(ctx, request) + Expect(err).ToNot(HaveOccurred()) + + annotations = map[string]string{} + lmsSpec.Annotations = annotations + Expect(c.Update(ctx, lmsSpec)).Should(Succeed()) + + _, err = instance.Reconcile(ctx, request) + Expect(err).ToNot(HaveOccurred()) + + lmsSpec.DeletionTimestamp = &metav1.Time{} + _, err = instance.Reconcile(ctx, request) + Expect(err).ToNot(HaveOccurred()) + }) + + }) +}) + +func GetLargeMessageStore(name string, namespace string) (*enterpriseApi.LargeMessageStore, error) { + By("Expecting LargeMessageStore custom resource to be retrieved successfully") + + key := types.NamespacedName{ + Name: name, + Namespace: namespace, + } + lms := &enterpriseApi.LargeMessageStore{} + + err := k8sClient.Get(context.Background(), key, lms) + if err != nil { + return nil, err + } + + return lms, err +} + +func CreateLargeMessageStore(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase, spec enterpriseApi.LargeMessageStoreSpec) *enterpriseApi.LargeMessageStore { + By("Expecting LargeMessageStore custom resource to be created successfully") + + key := types.NamespacedName{ + Name: name, + Namespace: namespace, + } + lmsSpec := &enterpriseApi.LargeMessageStore{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: annotations, + }, + Spec: spec, + } + + Expect(k8sClient.Create(context.Background(), lmsSpec)).Should(Succeed()) + time.Sleep(2 * time.Second) + + lms := &enterpriseApi.LargeMessageStore{} + Eventually(func() bool { + _ = k8sClient.Get(context.Background(), key, lms) + if status != "" { + fmt.Printf("status is set to %v", status) + lms.Status.Phase = status + Expect(k8sClient.Status().Update(context.Background(), lms)).Should(Succeed()) + time.Sleep(2 * time.Second) + } + return true + }, timeout, interval).Should(BeTrue()) + + return lms +} + +func UpdateLargeMessageStore(instance *enterpriseApi.LargeMessageStore, status enterpriseApi.Phase, spec enterpriseApi.LargeMessageStoreSpec) *enterpriseApi.LargeMessageStore { + By("Expecting LargeMessageStore custom resource to be updated successfully") + + key := types.NamespacedName{ + Name: instance.Name, + Namespace: instance.Namespace, + } + + lmsSpec := testutils.NewLargeMessageStore(instance.Name, instance.Namespace, spec) + lmsSpec.ResourceVersion = instance.ResourceVersion + Expect(k8sClient.Update(context.Background(), lmsSpec)).Should(Succeed()) + time.Sleep(2 * time.Second) + + lms := &enterpriseApi.LargeMessageStore{} + Eventually(func() bool { + _ = k8sClient.Get(context.Background(), key, lms) + if status != "" { + fmt.Printf("status is set to %v", status) + lms.Status.Phase = status + Expect(k8sClient.Status().Update(context.Background(), lms)).Should(Succeed()) + time.Sleep(2 * time.Second) + } + return true + }, timeout, interval).Should(BeTrue()) + + return lms +} + +func DeleteLargeMessageStore(name string, namespace string) { + By("Expecting LargeMessageStore custom resource to be deleted successfully") + + key := types.NamespacedName{ + Name: name, + Namespace: namespace, + } + + Eventually(func() error { + lms := &enterpriseApi.LargeMessageStore{} + _ = k8sClient.Get(context.Background(), key, lms) + err := k8sClient.Delete(context.Background(), lms) + return err + }, timeout, interval).Should(Succeed()) +} diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index 52c4c1a1d..17ce5e760 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -50,7 +50,6 @@ func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Controller Suite") - } var _ = BeforeSuite(func(ctx context.Context) { @@ -99,6 +98,12 @@ var _ = BeforeSuite(func(ctx context.Context) { Scheme: clientgoscheme.Scheme, }) Expect(err).ToNot(HaveOccurred()) + if err := (&BusReconciler{ + Client: k8sManager.GetClient(), + Scheme: k8sManager.GetScheme(), + }).SetupWithManager(k8sManager); err != nil { + Expect(err).NotTo(HaveOccurred()) + } if err := (&ClusterManagerReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), @@ -117,37 +122,43 @@ var _ = BeforeSuite(func(ctx context.Context) { }).SetupWithManager(k8sManager); err != nil { Expect(err).NotTo(HaveOccurred()) } - if err := (&LicenseManagerReconciler{ + if err := (&IngestorClusterReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), }).SetupWithManager(k8sManager); err != nil { Expect(err).NotTo(HaveOccurred()) } - if err := (&LicenseMasterReconciler{ + if err := (&LargeMessageStoreReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), }).SetupWithManager(k8sManager); err != nil { Expect(err).NotTo(HaveOccurred()) } - if err := (&MonitoringConsoleReconciler{ + if err := (&LicenseManagerReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), }).SetupWithManager(k8sManager); err != nil { Expect(err).NotTo(HaveOccurred()) } - if err := (&SearchHeadClusterReconciler{ + if err := (&LicenseMasterReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), }).SetupWithManager(k8sManager); err != nil { Expect(err).NotTo(HaveOccurred()) } - if err := (&StandaloneReconciler{ + if err := (&MonitoringConsoleReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), }).SetupWithManager(k8sManager); err != nil { Expect(err).NotTo(HaveOccurred()) } - if err := (&IngestorClusterReconciler{ + if err := (&SearchHeadClusterReconciler{ + Client: k8sManager.GetClient(), + Scheme: k8sManager.GetScheme(), + }).SetupWithManager(k8sManager); err != nil { + Expect(err).NotTo(HaveOccurred()) + } + if err := (&StandaloneReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), }).SetupWithManager(k8sManager); err != nil { diff --git a/internal/controller/testutils/new.go b/internal/controller/testutils/new.go index 9ca78593c..e3e37efc2 100644 --- a/internal/controller/testutils/new.go +++ b/internal/controller/testutils/new.go @@ -54,28 +54,26 @@ func NewIngestorCluster(name, ns, image string) *enterpriseApi.IngestorCluster { Spec: enterpriseApi.Spec{ImagePullPolicy: string(pullPolicy)}, }, Replicas: 3, - BusConfigurationRef: corev1.ObjectReference{ - Name: "busConfig", + BusRef: corev1.ObjectReference{ + Name: "bus", }, }, } } -// NewBusConfiguration returns new BusConfiguration instance with its config hash -func NewBusConfiguration(name, ns, image string) *enterpriseApi.BusConfiguration { - return &enterpriseApi.BusConfiguration{ +// NewBus returns new Bus instance with its config hash +func NewBus(name, ns string, spec enterpriseApi.BusSpec) *enterpriseApi.Bus { + return &enterpriseApi.Bus{ ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: ns}, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", - SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", - }, - }, + Spec: spec, + } +} + +// NewLargeMessageStore returns new LargeMessageStore instance with its config hash +func NewLargeMessageStore(name, ns string, spec enterpriseApi.LargeMessageStoreSpec) *enterpriseApi.LargeMessageStore { + return &enterpriseApi.LargeMessageStore{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: ns}, + Spec: spec, } } @@ -313,9 +311,6 @@ func NewIndexerCluster(name, ns, image string) *enterpriseApi.IndexerCluster { ad.Spec = enterpriseApi.IndexerClusterSpec{ CommonSplunkSpec: *cs, - BusConfigurationRef: corev1.ObjectReference{ - Name: "busConfig", - }, } return ad } diff --git a/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml index 5ac9b4a7a..001a78ee4 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml @@ -1,21 +1,33 @@ --- -# assert for bus configurtion custom resource to be ready +# assert for bus custom resource to be ready apiVersion: enterprise.splunk.com/v4 -kind: BusConfiguration +kind: Bus metadata: - name: bus-config + name: bus spec: - type: sqs_smartbus + provider: sqs + queueName: sqs-test + region: us-west-2 sqs: - queueName: sqs-test - authRegion: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com - largeMessageStorePath: s3://ingestion/smartbus-test deadLetterQueueName: sqs-dlq-test status: phase: Ready +--- +# assert for large message store custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: LargeMessageStore +metadata: + name: lms +spec: + provider: s3 + s3: + endpoint: https://s3.us-west-2.amazonaws.com + path: s3://ingestion/smartbus-test +status: + phase: Ready + --- # assert for cluster manager custom resource to be ready apiVersion: enterprise.splunk.com/v4 @@ -49,20 +61,23 @@ metadata: name: indexer spec: replicas: 3 - busConfigurationRef: - name: bus-config + busRef: + name: bus status: phase: Ready - busConfiguration: - type: sqs_smartbus + bus: + provider: sqs + queueName: sqs-test + region: us-west-2 sqs: - queueName: sqs-test - authRegion: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com - largeMessageStorePath: s3://ingestion/smartbus-test deadLetterQueueName: sqs-dlq-test - + largeMessageStore: + provider: s3 + s3: + endpoint: https://s3.us-west-2.amazonaws.com + path: s3://ingestion/smartbus-test + --- # check for stateful set and replicas as configured apiVersion: apps/v1 @@ -87,19 +102,22 @@ metadata: name: ingestor spec: replicas: 3 - busConfigurationRef: - name: bus-config + busRef: + name: bus status: phase: Ready - busConfiguration: - type: sqs_smartbus + bus: + provider: sqs + queueName: sqs-test + region: us-west-2 sqs: - queueName: sqs-test - authRegion: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com - largeMessageStorePath: s3://ingestion/smartbus-test deadLetterQueueName: sqs-dlq-test + largeMessageStore: + provider: s3 + s3: + endpoint: https://s3.us-west-2.amazonaws.com + path: s3://ingestion/smartbus-test --- # check for stateful set and replicas as configured diff --git a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml index daa1ab4ab..86a2df8a8 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml @@ -6,19 +6,22 @@ metadata: name: ingestor spec: replicas: 4 - busConfigurationRef: - name: bus-config + busRef: + name: bus status: phase: Ready - busConfiguration: - type: sqs_smartbus + bus: + provider: sqs + queueName: sqs-test + region: us-west-2 sqs: - queueName: sqs-test - authRegion: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com - largeMessageStorePath: s3://ingestion/smartbus-test deadLetterQueueName: sqs-dlq-test + largeMessageStore: + provider: s3 + s3: + endpoint: https://s3.us-west-2.amazonaws.com + path: s3://ingestion/smartbus-test --- # check for stateful sets and replicas updated diff --git a/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml b/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml index 6e87733cc..d832c5253 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml @@ -5,24 +5,32 @@ splunk-operator: persistentVolumeClaim: storageClassName: gp2 -busConfiguration: +bus: enabled: true - name: bus-config - type: sqs_smartbus + name: bus + provider: sqs + queueName: sqs-test + region: us-west-2 sqs: - queueName: sqs-test - authRegion: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com - largeMessageStorePath: s3://ingestion/smartbus-test deadLetterQueueName: sqs-dlq-test +largeMessageStore: + enabled: true + name: lms + provider: s3 + s3: + endpoint: https://s3.us-west-2.amazonaws.com + path: s3://ingestion/smartbus-test + ingestorCluster: enabled: true name: ingestor replicaCount: 3 - busConfigurationRef: - name: bus-config + busRef: + name: bus + largeMessageStoreRef: + name: lms clusterManager: enabled: true @@ -35,5 +43,7 @@ indexerCluster: replicaCount: 3 clusterManagerRef: name: cm - busConfigurationRef: - name: bus-config + busRef: + name: bus + largeMessageStoreRef: + name: lms diff --git a/pkg/splunk/enterprise/bus.go b/pkg/splunk/enterprise/bus.go new file mode 100644 index 000000000..b6e8318ed --- /dev/null +++ b/pkg/splunk/enterprise/bus.go @@ -0,0 +1,75 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package enterprise + +import ( + "context" + "time" + + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" + splctrl "github.com/splunk/splunk-operator/pkg/splunk/splkcontroller" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// ApplyBus reconciles the state of an IngestorCluster custom resource +func ApplyBus(ctx context.Context, client client.Client, cr *enterpriseApi.Bus) (reconcile.Result, error) { + var err error + + // Unless modified, reconcile for this object will be requeued after 5 seconds + result := reconcile.Result{ + Requeue: true, + RequeueAfter: time.Second * 5, + } + + if cr.Status.ResourceRevMap == nil { + cr.Status.ResourceRevMap = make(map[string]string) + } + + eventPublisher, _ := newK8EventPublisher(client, cr) + ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher) + + cr.Kind = "Bus" + + // Initialize phase + cr.Status.Phase = enterpriseApi.PhaseError + + // Update the CR Status + defer updateCRStatus(ctx, client, cr, &err) + + // Check if deletion has been requested + if cr.ObjectMeta.DeletionTimestamp != nil { + terminating, err := splctrl.CheckForDeletion(ctx, cr, client) + if terminating && err != nil { + cr.Status.Phase = enterpriseApi.PhaseTerminating + } else { + result.Requeue = false + } + return result, err + } + + cr.Status.Phase = enterpriseApi.PhaseReady + + // RequeueAfter if greater than 0, tells the Controller to requeue the reconcile key after the Duration. + // Implies that Requeue is true, there is no need to set Requeue to true at the same time as RequeueAfter. + if !result.Requeue { + result.RequeueAfter = 0 + } + + return result, nil +} diff --git a/pkg/splunk/enterprise/bus_test.go b/pkg/splunk/enterprise/bus_test.go new file mode 100644 index 000000000..ac8ce8a8e --- /dev/null +++ b/pkg/splunk/enterprise/bus_test.go @@ -0,0 +1,69 @@ +/* +Copyright 2025. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package enterprise + +import ( + "context" + "os" + "testing" + + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestApplyBus(t *testing.T) { + os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") + + ctx := context.TODO() + + scheme := runtime.NewScheme() + _ = enterpriseApi.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + c := fake.NewClientBuilder().WithScheme(scheme).Build() + + // Object definitions + bus := &enterpriseApi.Bus{ + TypeMeta: metav1.TypeMeta{ + Kind: "Bus", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "bus", + Namespace: "test", + }, + Spec: enterpriseApi.BusSpec{ + Provider: "sqs", + QueueName: "test-queue", + Region: "us-west-2", + SQS: enterpriseApi.SQSSpec{ + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", + }, + }, + } + c.Create(ctx, bus) + + // ApplyBus + result, err := ApplyBus(ctx, c, bus) + assert.NoError(t, err) + assert.True(t, result.Requeue) + assert.NotEqual(t, enterpriseApi.PhaseError, bus.Status.Phase) + assert.Equal(t, enterpriseApi.PhaseReady, bus.Status.Phase) +} diff --git a/pkg/splunk/enterprise/busconfiguration.go b/pkg/splunk/enterprise/busconfiguration.go deleted file mode 100644 index 43fd35f68..000000000 --- a/pkg/splunk/enterprise/busconfiguration.go +++ /dev/null @@ -1,140 +0,0 @@ -/* -Copyright 2025. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package enterprise - -import ( - "context" - "errors" - "fmt" - "strings" - "time" - - enterpriseApi "github.com/splunk/splunk-operator/api/v4" - splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" - splctrl "github.com/splunk/splunk-operator/pkg/splunk/splkcontroller" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -// ApplyBusConfiguration reconciles the state of an IngestorCluster custom resource -func ApplyBusConfiguration(ctx context.Context, client client.Client, cr *enterpriseApi.BusConfiguration) (reconcile.Result, error) { - var err error - - // Unless modified, reconcile for this object will be requeued after 5 seconds - result := reconcile.Result{ - Requeue: true, - RequeueAfter: time.Second * 5, - } - - reqLogger := log.FromContext(ctx) - scopedLog := reqLogger.WithName("ApplyBusConfiguration") - - if cr.Status.ResourceRevMap == nil { - cr.Status.ResourceRevMap = make(map[string]string) - } - - eventPublisher, _ := newK8EventPublisher(client, cr) - ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher) - - cr.Kind = "BusConfiguration" - - // Initialize phase - cr.Status.Phase = enterpriseApi.PhaseError - - // Update the CR Status - defer updateCRStatus(ctx, client, cr, &err) - - // Validate and updates defaults for CR - err = validateBusConfigurationSpec(ctx, client, cr) - if err != nil { - eventPublisher.Warning(ctx, "validateBusConfigurationSpec", fmt.Sprintf("validate bus configuration spec failed %s", err.Error())) - scopedLog.Error(err, "Failed to validate bus configuration spec") - return result, err - } - - // Check if deletion has been requested - if cr.ObjectMeta.DeletionTimestamp != nil { - terminating, err := splctrl.CheckForDeletion(ctx, cr, client) - if terminating && err != nil { - cr.Status.Phase = enterpriseApi.PhaseTerminating - } else { - result.Requeue = false - } - return result, err - } - - cr.Status.Phase = enterpriseApi.PhaseReady - - // RequeueAfter if greater than 0, tells the Controller to requeue the reconcile key after the Duration. - // Implies that Requeue is true, there is no need to set Requeue to true at the same time as RequeueAfter. - if !result.Requeue { - result.RequeueAfter = 0 - } - - return result, nil -} - -// validateBusConfigurationSpec checks validity and makes default updates to a BusConfigurationSpec and returns error if something is wrong -func validateBusConfigurationSpec(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.BusConfiguration) error { - return validateBusConfigurationInputs(cr) -} - -func validateBusConfigurationInputs(cr *enterpriseApi.BusConfiguration) error { - // sqs_smartbus type is supported for now - if cr.Spec.Type != "sqs_smartbus" { - return errors.New("only sqs_smartbus type is supported in bus configuration") - } - - // Cannot be empty fields check - cannotBeEmptyFields := []string{} - if cr.Spec.SQS.QueueName == "" { - cannotBeEmptyFields = append(cannotBeEmptyFields, "queueName") - } - - if cr.Spec.SQS.AuthRegion == "" { - cannotBeEmptyFields = append(cannotBeEmptyFields, "authRegion") - } - - if cr.Spec.SQS.DeadLetterQueueName == "" { - cannotBeEmptyFields = append(cannotBeEmptyFields, "deadLetterQueueName") - } - - if len(cannotBeEmptyFields) > 0 { - return errors.New("bus configuration sqs " + strings.Join(cannotBeEmptyFields, ", ") + " cannot be empty") - } - - // Have to start with https:// or s3:// checks - haveToStartWithHttps := []string{} - if !strings.HasPrefix(cr.Spec.SQS.Endpoint, "https://") { - haveToStartWithHttps = append(haveToStartWithHttps, "endpoint") - } - - if !strings.HasPrefix(cr.Spec.SQS.LargeMessageStoreEndpoint, "https://") { - haveToStartWithHttps = append(haveToStartWithHttps, "largeMessageStoreEndpoint") - } - - if len(haveToStartWithHttps) > 0 { - return errors.New("bus configuration sqs " + strings.Join(haveToStartWithHttps, ", ") + " must start with https://") - } - - if !strings.HasPrefix(cr.Spec.SQS.LargeMessageStorePath, "s3://") { - return errors.New("bus configuration sqs largeMessageStorePath must start with s3://") - } - - return nil -} diff --git a/pkg/splunk/enterprise/busconfiguration_test.go b/pkg/splunk/enterprise/busconfiguration_test.go deleted file mode 100644 index 45d19bb40..000000000 --- a/pkg/splunk/enterprise/busconfiguration_test.go +++ /dev/null @@ -1,151 +0,0 @@ -/* -Copyright 2025. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package enterprise - -import ( - "context" - "os" - "path/filepath" - "testing" - - enterpriseApi "github.com/splunk/splunk-operator/api/v4" - "github.com/stretchr/testify/assert" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/client/fake" -) - -func init() { - GetReadinessScriptLocation = func() string { - fileLocation, _ := filepath.Abs("../../../" + readinessScriptLocation) - return fileLocation - } - GetLivenessScriptLocation = func() string { - fileLocation, _ := filepath.Abs("../../../" + livenessScriptLocation) - return fileLocation - } - GetStartupScriptLocation = func() string { - fileLocation, _ := filepath.Abs("../../../" + startupScriptLocation) - return fileLocation - } -} - -func TestApplyBusConfiguration(t *testing.T) { - os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") - - ctx := context.TODO() - - scheme := runtime.NewScheme() - _ = enterpriseApi.AddToScheme(scheme) - _ = corev1.AddToScheme(scheme) - _ = appsv1.AddToScheme(scheme) - c := fake.NewClientBuilder().WithScheme(scheme).Build() - - // Object definitions - busConfig := &enterpriseApi.BusConfiguration{ - TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", - APIVersion: "enterprise.splunk.com/v4", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "busConfig", - Namespace: "test", - }, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", - SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", - }, - }, - } - c.Create(ctx, busConfig) - - // ApplyBusConfiguration - result, err := ApplyBusConfiguration(ctx, c, busConfig) - assert.NoError(t, err) - assert.True(t, result.Requeue) - assert.NotEqual(t, enterpriseApi.PhaseError, busConfig.Status.Phase) - assert.Equal(t, enterpriseApi.PhaseReady, busConfig.Status.Phase) -} - -func TestValidateBusConfigurationInputs(t *testing.T) { - busConfig := enterpriseApi.BusConfiguration{ - TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", - APIVersion: "enterprise.splunk.com/v4", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "busConfig", - }, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "othertype", - SQS: enterpriseApi.SQSSpec{}, - }, - } - - err := validateBusConfigurationInputs(&busConfig) - assert.NotNil(t, err) - assert.Equal(t, "only sqs_smartbus type is supported in bus configuration", err.Error()) - - busConfig.Spec.Type = "sqs_smartbus" - - err = validateBusConfigurationInputs(&busConfig) - assert.NotNil(t, err) - assert.Equal(t, "bus configuration sqs queueName, authRegion, deadLetterQueueName cannot be empty", err.Error()) - - busConfig.Spec.SQS.AuthRegion = "us-west-2" - - err = validateBusConfigurationInputs(&busConfig) - assert.NotNil(t, err) - assert.Equal(t, "bus configuration sqs queueName, deadLetterQueueName cannot be empty", err.Error()) - - busConfig.Spec.SQS.QueueName = "test-queue" - busConfig.Spec.SQS.DeadLetterQueueName = "dlq-test" - busConfig.Spec.SQS.AuthRegion = "" - - err = validateBusConfigurationInputs(&busConfig) - assert.NotNil(t, err) - assert.Equal(t, "bus configuration sqs authRegion cannot be empty", err.Error()) - - busConfig.Spec.SQS.AuthRegion = "us-west-2" - - err = validateBusConfigurationInputs(&busConfig) - assert.NotNil(t, err) - assert.Equal(t, "bus configuration sqs endpoint, largeMessageStoreEndpoint must start with https://", err.Error()) - - busConfig.Spec.SQS.Endpoint = "https://sqs.us-west-2.amazonaws.com" - busConfig.Spec.SQS.LargeMessageStoreEndpoint = "https://s3.us-west-2.amazonaws.com" - - err = validateBusConfigurationInputs(&busConfig) - assert.NotNil(t, err) - assert.Equal(t, "bus configuration sqs largeMessageStorePath must start with s3://", err.Error()) - - busConfig.Spec.SQS.LargeMessageStorePath = "ingestion/smartbus-test" - - err = validateBusConfigurationInputs(&busConfig) - assert.NotNil(t, err) - assert.Equal(t, "bus configuration sqs largeMessageStorePath must start with s3://", err.Error()) - - busConfig.Spec.SQS.LargeMessageStorePath = "s3://ingestion/smartbus-test" - - err = validateBusConfigurationInputs(&busConfig) - assert.Nil(t, err) -} diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index 74b1b0a91..7b8009cdd 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -78,7 +78,7 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller // updates status after function completes cr.Status.ClusterManagerPhase = enterpriseApi.PhaseError if cr.Status.Replicas < cr.Spec.Replicas { - cr.Status.BusConfiguration = enterpriseApi.BusConfigurationSpec{} + cr.Status.Bus = &enterpriseApi.BusSpec{} } cr.Status.Replicas = cr.Spec.Replicas cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-indexer", cr.GetName()) @@ -245,35 +245,51 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller // no need to requeue if everything is ready if cr.Status.Phase == enterpriseApi.PhaseReady { - // Bus config - busConfig := enterpriseApi.BusConfiguration{} - if cr.Spec.BusConfigurationRef.Name != "" { + // Bus + bus := enterpriseApi.Bus{} + if cr.Spec.BusRef.Name != "" { ns := cr.GetNamespace() - if cr.Spec.BusConfigurationRef.Namespace != "" { - ns = cr.Spec.BusConfigurationRef.Namespace + if cr.Spec.BusRef.Namespace != "" { + ns = cr.Spec.BusRef.Namespace } err = client.Get(context.Background(), types.NamespacedName{ - Name: cr.Spec.BusConfigurationRef.Name, + Name: cr.Spec.BusRef.Name, Namespace: ns, - }, &busConfig) + }, &bus) if err != nil { return result, err } } - // If bus config is updated - if cr.Spec.BusConfigurationRef.Name != "" { - if !reflect.DeepEqual(cr.Status.BusConfiguration, busConfig.Spec) { + // Large Message Store + lms := enterpriseApi.LargeMessageStore{} + if cr.Spec.LargeMessageStoreRef.Name != "" { + ns := cr.GetNamespace() + if cr.Spec.LargeMessageStoreRef.Namespace != "" { + ns = cr.Spec.LargeMessageStoreRef.Namespace + } + err = client.Get(context.Background(), types.NamespacedName{ + Name: cr.Spec.LargeMessageStoreRef.Name, + Namespace: ns, + }, &lms) + if err != nil { + return result, err + } + } + + // If bus is updated + if cr.Spec.BusRef.Name != "" { + if !reflect.DeepEqual(cr.Status.Bus, bus.Spec) { mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) - err = mgr.handlePullBusChange(ctx, cr, busConfig, client) + err = mgr.handlePullBusChange(ctx, cr, bus, lms, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Bus/Pipeline config change after pod creation: %s", err.Error())) scopedLog.Error(err, "Failed to update conf file for Bus/Pipeline config change after pod creation") return result, err } - cr.Status.BusConfiguration = busConfig.Spec + cr.Status.Bus = &bus.Spec } } @@ -366,7 +382,7 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, cr.Status.Phase = enterpriseApi.PhaseError cr.Status.ClusterMasterPhase = enterpriseApi.PhaseError if cr.Status.Replicas < cr.Spec.Replicas { - cr.Status.BusConfiguration = enterpriseApi.BusConfigurationSpec{} + cr.Status.Bus = &enterpriseApi.BusSpec{} } cr.Status.Replicas = cr.Spec.Replicas cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-indexer", cr.GetName()) @@ -536,35 +552,51 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, // no need to requeue if everything is ready if cr.Status.Phase == enterpriseApi.PhaseReady { - // Bus config - busConfig := enterpriseApi.BusConfiguration{} - if cr.Spec.BusConfigurationRef.Name != "" { + // Bus + bus := enterpriseApi.Bus{} + if cr.Spec.BusRef.Name != "" { ns := cr.GetNamespace() - if cr.Spec.BusConfigurationRef.Namespace != "" { - ns = cr.Spec.BusConfigurationRef.Namespace + if cr.Spec.BusRef.Namespace != "" { + ns = cr.Spec.BusRef.Namespace } err = client.Get(context.Background(), types.NamespacedName{ - Name: cr.Spec.BusConfigurationRef.Name, + Name: cr.Spec.BusRef.Name, Namespace: ns, - }, &busConfig) + }, &bus) if err != nil { return result, err } } - // If bus config is updated - if cr.Spec.BusConfigurationRef.Name != "" { - if !reflect.DeepEqual(cr.Status.BusConfiguration, busConfig.Spec) { + // Large Message Store + lms := enterpriseApi.LargeMessageStore{} + if cr.Spec.LargeMessageStoreRef.Name != "" { + ns := cr.GetNamespace() + if cr.Spec.LargeMessageStoreRef.Namespace != "" { + ns = cr.Spec.LargeMessageStoreRef.Namespace + } + err = client.Get(context.Background(), types.NamespacedName{ + Name: cr.Spec.LargeMessageStoreRef.Name, + Namespace: ns, + }, &bus) + if err != nil { + return result, err + } + } + + // If bus is updated + if cr.Spec.BusRef.Name != "" { + if !reflect.DeepEqual(cr.Status.Bus, bus.Spec) { mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) - err = mgr.handlePullBusChange(ctx, cr, busConfig, client) + err = mgr.handlePullBusChange(ctx, cr, bus, lms, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Bus/Pipeline config change after pod creation: %s", err.Error())) scopedLog.Error(err, "Failed to update conf file for Bus/Pipeline config change after pod creation") return result, err } - cr.Status.BusConfiguration = busConfig.Spec + cr.Status.Bus = &bus.Spec } } @@ -1234,7 +1266,7 @@ func getSiteName(ctx context.Context, c splcommon.ControllerClient, cr *enterpri var newSplunkClientForBusPipeline = splclient.NewSplunkClient // Checks if only PullBus or Pipeline config changed, and updates the conf file if so -func (mgr *indexerClusterPodManager) handlePullBusChange(ctx context.Context, newCR *enterpriseApi.IndexerCluster, busConfig enterpriseApi.BusConfiguration, k8s client.Client) error { +func (mgr *indexerClusterPodManager) handlePullBusChange(ctx context.Context, newCR *enterpriseApi.IndexerCluster, bus enterpriseApi.Bus, lms enterpriseApi.LargeMessageStore, k8s client.Client) error { reqLogger := log.FromContext(ctx) scopedLog := reqLogger.WithName("handlePullBusChange").WithValues("name", newCR.GetName(), "namespace", newCR.GetNamespace()) @@ -1253,27 +1285,27 @@ func (mgr *indexerClusterPodManager) handlePullBusChange(ctx context.Context, ne splunkClient := newSplunkClientForBusPipeline(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd)) afterDelete := false - if (busConfig.Spec.SQS.QueueName != "" && newCR.Status.BusConfiguration.SQS.QueueName != "" && busConfig.Spec.SQS.QueueName != newCR.Status.BusConfiguration.SQS.QueueName) || - (busConfig.Spec.Type != "" && newCR.Status.BusConfiguration.Type != "" && busConfig.Spec.Type != newCR.Status.BusConfiguration.Type) { - if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCR.Status.BusConfiguration.SQS.QueueName)); err != nil { + if (bus.Spec.QueueName != "" && newCR.Status.Bus.QueueName != "" && bus.Spec.QueueName != newCR.Status.Bus.QueueName) || + (bus.Spec.Provider != "" && newCR.Status.Bus.Provider != "" && bus.Spec.Provider != newCR.Status.Bus.Provider) { + if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCR.Status.Bus.QueueName)); err != nil { updateErr = err } - if err := splunkClient.DeleteConfFileProperty(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", newCR.Status.BusConfiguration.SQS.QueueName)); err != nil { + if err := splunkClient.DeleteConfFileProperty(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", newCR.Status.Bus.QueueName)); err != nil { updateErr = err } afterDelete = true } - busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields := getChangedBusFieldsForIndexer(&busConfig, newCR, afterDelete) + busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields := getChangedBusFieldsForIndexer(&bus, &lms, newCR, afterDelete) for _, pbVal := range busChangedFieldsOutputs { - if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", busConfig.Spec.SQS.QueueName), [][]string{pbVal}); err != nil { + if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", bus.Spec.QueueName), [][]string{pbVal}); err != nil { updateErr = err } } for _, pbVal := range busChangedFieldsInputs { - if err := splunkClient.UpdateConfFile(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", busConfig.Spec.SQS.QueueName), [][]string{pbVal}); err != nil { + if err := splunkClient.UpdateConfFile(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", bus.Spec.QueueName), [][]string{pbVal}); err != nil { updateErr = err } } @@ -1290,14 +1322,22 @@ func (mgr *indexerClusterPodManager) handlePullBusChange(ctx context.Context, ne } // getChangedBusFieldsForIndexer returns a list of changed bus and pipeline fields for indexer pods -func getChangedBusFieldsForIndexer(busConfig *enterpriseApi.BusConfiguration, busConfigIndexerStatus *enterpriseApi.IndexerCluster, afterDelete bool) (busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields [][]string) { +func getChangedBusFieldsForIndexer(bus *enterpriseApi.Bus, lms *enterpriseApi.LargeMessageStore, busIndexerStatus *enterpriseApi.IndexerCluster, afterDelete bool) (busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields [][]string) { // Compare bus fields - oldPB := busConfigIndexerStatus.Status.BusConfiguration - newPB := busConfig.Spec + oldPB := busIndexerStatus.Status.Bus + if oldPB == nil { + oldPB = &enterpriseApi.BusSpec{} + } + newPB := bus.Spec - // Push all bus fields - busChangedFieldsInputs, busChangedFieldsOutputs = pullBusChanged(&oldPB, &newPB, afterDelete) + oldLMS := busIndexerStatus.Status.LargeMessageStore + if oldLMS == nil { + oldLMS = &enterpriseApi.LargeMessageStoreSpec{} + } + newLMS := lms.Spec + // Push all bus fields + busChangedFieldsInputs, busChangedFieldsOutputs = pullBusChanged(oldPB, &newPB, oldLMS, &newLMS, afterDelete) // Always set all pipeline fields, not just changed ones pipelineChangedFields = pipelineConfig(true) @@ -1315,34 +1355,43 @@ func imageUpdatedTo9(previousImage string, currentImage string) bool { return strings.HasPrefix(previousVersion, "8") && strings.HasPrefix(currentVersion, "9") } -func pullBusChanged(oldBus, newBus *enterpriseApi.BusConfigurationSpec, afterDelete bool) (inputs, outputs [][]string) { - if oldBus.Type != newBus.Type || afterDelete { - inputs = append(inputs, []string{"remote_queue.type", newBus.Type}) +func pullBusChanged(oldBus, newBus *enterpriseApi.BusSpec, oldLMS, newLMS *enterpriseApi.LargeMessageStoreSpec, afterDelete bool) (inputs, outputs [][]string) { + busProvider := "" + if newBus.Provider == "sqs" { + busProvider = "sqs_smartbus" + } + lmsProvider := "" + if newLMS.Provider == "s3" { + lmsProvider = "sqs_smartbus" + } + + if oldBus.Provider != newBus.Provider || afterDelete { + inputs = append(inputs, []string{"remote_queue.type", busProvider}) } - if oldBus.SQS.AuthRegion != newBus.SQS.AuthRegion || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.auth_region", newBus.Type), newBus.SQS.AuthRegion}) + if oldBus.Region != newBus.Region || afterDelete { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.auth_region", busProvider), newBus.Region}) } if oldBus.SQS.Endpoint != newBus.SQS.Endpoint || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.endpoint", newBus.Type), newBus.SQS.Endpoint}) + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.endpoint", busProvider), newBus.SQS.Endpoint}) } - if oldBus.SQS.LargeMessageStoreEndpoint != newBus.SQS.LargeMessageStoreEndpoint || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", newBus.Type), newBus.SQS.LargeMessageStoreEndpoint}) + if oldLMS.S3.Endpoint != newLMS.S3.Endpoint || afterDelete { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", lmsProvider), newLMS.S3.Endpoint}) } - if oldBus.SQS.LargeMessageStorePath != newBus.SQS.LargeMessageStorePath || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", newBus.Type), newBus.SQS.LargeMessageStorePath}) + if oldLMS.S3.Path != newLMS.S3.Path || afterDelete { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", lmsProvider), newLMS.S3.Path}) } - if oldBus.SQS.DeadLetterQueueName != newBus.SQS.DeadLetterQueueName || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", newBus.Type), newBus.SQS.DeadLetterQueueName}) + if oldBus.SQS.DLQ != newBus.SQS.DLQ || afterDelete { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busProvider), newBus.SQS.DLQ}) } inputs = append(inputs, - []string{fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", newBus.Type), "4"}, - []string{fmt.Sprintf("remote_queue.%s.retry_policy", newBus.Type), "max_count"}, + []string{fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", busProvider), "4"}, + []string{fmt.Sprintf("remote_queue.%s.retry_policy", busProvider), "max_count"}, ) outputs = inputs outputs = append(outputs, - []string{fmt.Sprintf("remote_queue.%s.send_interval", newBus.Type), "5s"}, - []string{fmt.Sprintf("remote_queue.%s.encoding_format", newBus.Type), "s2s"}, + []string{fmt.Sprintf("remote_queue.%s.send_interval", busProvider), "5s"}, + []string{fmt.Sprintf("remote_queue.%s.encoding_format", busProvider), "s2s"}, ) return inputs, outputs diff --git a/pkg/splunk/enterprise/indexercluster_test.go b/pkg/splunk/enterprise/indexercluster_test.go index e541fc4f6..9df4b2f75 100644 --- a/pkg/splunk/enterprise/indexercluster_test.go +++ b/pkg/splunk/enterprise/indexercluster_test.go @@ -1344,23 +1344,21 @@ func TestInvalidIndexerClusterSpec(t *testing.T) { func TestGetIndexerStatefulSet(t *testing.T) { os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") - busConfig := enterpriseApi.BusConfiguration{ + bus := enterpriseApi.Bus{ TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", + Kind: "Bus", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "busConfig", + Name: "bus", }, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", + Spec: enterpriseApi.BusSpec{ + Provider: "sqs", + QueueName: "test-queue", + Region: "us-west-2", SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", }, }, } @@ -1371,8 +1369,8 @@ func TestGetIndexerStatefulSet(t *testing.T) { Namespace: "test", }, Spec: enterpriseApi.IndexerClusterSpec{ - BusConfigurationRef: corev1.ObjectReference{ - Name: busConfig.Name, + BusRef: corev1.ObjectReference{ + Name: bus.Name, }, }, } @@ -2048,60 +2046,80 @@ func TestImageUpdatedTo9(t *testing.T) { } func TestGetChangedBusFieldsForIndexer(t *testing.T) { - busConfig := enterpriseApi.BusConfiguration{ + provider := "sqs_smartbus" + + bus := enterpriseApi.Bus{ TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", + Kind: "Bus", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "busConfig", + Name: "bus", }, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", + Spec: enterpriseApi.BusSpec{ + Provider: "sqs", + QueueName: "test-queue", + Region: "us-west-2", SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", + }, + }, + } + + lms := enterpriseApi.LargeMessageStore{ + TypeMeta: metav1.TypeMeta{ + Kind: "LargeMessageStore", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "lms", + }, + Spec: enterpriseApi.LargeMessageStoreSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "s3://bucket/key", }, }, } newCR := &enterpriseApi.IndexerCluster{ Spec: enterpriseApi.IndexerClusterSpec{ - BusConfigurationRef: corev1.ObjectReference{ - Name: busConfig.Name, + BusRef: corev1.ObjectReference{ + Name: bus.Name, + }, + LargeMessageStoreRef: corev1.ObjectReference{ + Name: lms.Name, }, }, } - busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields := getChangedBusFieldsForIndexer(&busConfig, newCR, false) + busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields := getChangedBusFieldsForIndexer(&bus, &lms, newCR, false) assert.Equal(t, 8, len(busChangedFieldsInputs)) assert.Equal(t, [][]string{ - {"remote_queue.type", busConfig.Spec.Type}, - {fmt.Sprintf("remote_queue.%s.auth_region", busConfig.Spec.Type), busConfig.Spec.SQS.AuthRegion}, - {fmt.Sprintf("remote_queue.%s.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStoreEndpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStorePath}, - {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busConfig.Spec.Type), busConfig.Spec.SQS.DeadLetterQueueName}, - {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", busConfig.Spec.Type), "4"}, - {fmt.Sprintf("remote_queue.%s.retry_policy", busConfig.Spec.Type), "max_count"}, + {"remote_queue.type", provider}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.Region}, + {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), bus.Spec.SQS.DLQ}, + {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, + {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, }, busChangedFieldsInputs) assert.Equal(t, 10, len(busChangedFieldsOutputs)) assert.Equal(t, [][]string{ - {"remote_queue.type", busConfig.Spec.Type}, - {fmt.Sprintf("remote_queue.%s.auth_region", busConfig.Spec.Type), busConfig.Spec.SQS.AuthRegion}, - {fmt.Sprintf("remote_queue.%s.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStoreEndpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStorePath}, - {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busConfig.Spec.Type), busConfig.Spec.SQS.DeadLetterQueueName}, - {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", busConfig.Spec.Type), "4"}, - {fmt.Sprintf("remote_queue.%s.retry_policy", busConfig.Spec.Type), "max_count"}, - {fmt.Sprintf("remote_queue.%s.send_interval", busConfig.Spec.Type), "5s"}, - {fmt.Sprintf("remote_queue.%s.encoding_format", busConfig.Spec.Type), "s2s"}, + {"remote_queue.type", provider}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.Region}, + {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), bus.Spec.SQS.DLQ}, + {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, + {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, + {fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"}, + {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, }, busChangedFieldsOutputs) assert.Equal(t, 5, len(pipelineChangedFields)) @@ -2116,24 +2134,42 @@ func TestGetChangedBusFieldsForIndexer(t *testing.T) { func TestHandlePullBusChange(t *testing.T) { // Object definitions - busConfig := enterpriseApi.BusConfiguration{ + provider := "sqs_smartbus" + + bus := enterpriseApi.Bus{ TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", + Kind: "Bus", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "busConfig", + Name: "bus", Namespace: "test", }, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", + Spec: enterpriseApi.BusSpec{ + Provider: "sqs", + QueueName: "test-queue", + Region: "us-west-2", SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", + }, + }, + } + + lms := enterpriseApi.LargeMessageStore{ + TypeMeta: metav1.TypeMeta{ + Kind: "LargeMessageStore", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "lms", + Namespace: "test", + }, + Spec: enterpriseApi.LargeMessageStoreSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "s3://bucket/key", }, }, } @@ -2147,12 +2183,18 @@ func TestHandlePullBusChange(t *testing.T) { Namespace: "test", }, Spec: enterpriseApi.IndexerClusterSpec{ - BusConfigurationRef: corev1.ObjectReference{ - Name: busConfig.Name, + BusRef: corev1.ObjectReference{ + Name: bus.Name, + }, + LargeMessageStoreRef: corev1.ObjectReference{ + Name: lms.Name, + Namespace: lms.Namespace, }, }, Status: enterpriseApi.IndexerClusterStatus{ ReadyReplicas: 3, + Bus: &enterpriseApi.BusSpec{}, + LargeMessageStore: &enterpriseApi.LargeMessageStoreSpec{}, }, } @@ -2209,7 +2251,8 @@ func TestHandlePullBusChange(t *testing.T) { // Mock pods c := spltest.NewMockClient() ctx := context.TODO() - c.Create(ctx, &busConfig) + c.Create(ctx, &bus) + c.Create(ctx, &lms) c.Create(ctx, newCR) c.Create(ctx, pod0) c.Create(ctx, pod1) @@ -2217,7 +2260,7 @@ func TestHandlePullBusChange(t *testing.T) { // Negative test case: secret not found mgr := &indexerClusterPodManager{} - err := mgr.handlePullBusChange(ctx, newCR, busConfig, c) + err := mgr.handlePullBusChange(ctx, newCR, bus, lms, c) assert.NotNil(t, err) // Mock secret @@ -2228,41 +2271,41 @@ func TestHandlePullBusChange(t *testing.T) { // Negative test case: failure in creating remote queue stanza mgr = newTestPullBusPipelineManager(mockHTTPClient) - err = mgr.handlePullBusChange(ctx, newCR, busConfig, c) + err = mgr.handlePullBusChange(ctx, newCR, bus, lms, c) assert.NotNil(t, err) // outputs.conf propertyKVList := [][]string{ - {fmt.Sprintf("remote_queue.%s.auth_region", busConfig.Spec.Type), busConfig.Spec.SQS.AuthRegion}, - {fmt.Sprintf("remote_queue.%s.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStoreEndpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStorePath}, - {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busConfig.Spec.Type), busConfig.Spec.SQS.DeadLetterQueueName}, - {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", busConfig.Spec.Type), "4"}, - {fmt.Sprintf("remote_queue.%s.retry_policy", busConfig.Spec.Type), "max_count"}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.Region}, + {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), bus.Spec.SQS.DLQ}, + {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, + {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, } propertyKVListOutputs := propertyKVList - propertyKVListOutputs = append(propertyKVListOutputs, []string{fmt.Sprintf("remote_queue.%s.encoding_format", busConfig.Spec.Type), "s2s"}) - propertyKVListOutputs = append(propertyKVListOutputs, []string{fmt.Sprintf("remote_queue.%s.send_interval", busConfig.Spec.Type), "5s"}) + propertyKVListOutputs = append(propertyKVListOutputs, []string{fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}) + propertyKVListOutputs = append(propertyKVListOutputs, []string{fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"}) body := buildFormBody(propertyKVListOutputs) - addRemoteQueueHandlersForIndexer(mockHTTPClient, newCR, busConfig, newCR.Status.ReadyReplicas, "conf-outputs", body) + addRemoteQueueHandlersForIndexer(mockHTTPClient, newCR, bus, newCR.Status.ReadyReplicas, "conf-outputs", body) // Negative test case: failure in creating remote queue stanza mgr = newTestPullBusPipelineManager(mockHTTPClient) - err = mgr.handlePullBusChange(ctx, newCR, busConfig, c) + err = mgr.handlePullBusChange(ctx, newCR, bus, lms, c) assert.NotNil(t, err) // inputs.conf body = buildFormBody(propertyKVList) - addRemoteQueueHandlersForIndexer(mockHTTPClient, newCR, busConfig, newCR.Status.ReadyReplicas, "conf-inputs", body) + addRemoteQueueHandlersForIndexer(mockHTTPClient, newCR, bus, newCR.Status.ReadyReplicas, "conf-inputs", body) // Negative test case: failure in updating remote queue stanza mgr = newTestPullBusPipelineManager(mockHTTPClient) - err = mgr.handlePullBusChange(ctx, newCR, busConfig, c) + err = mgr.handlePullBusChange(ctx, newCR, bus, lms, c) assert.NotNil(t, err) // default-mode.conf @@ -2290,7 +2333,7 @@ func TestHandlePullBusChange(t *testing.T) { mgr = newTestPullBusPipelineManager(mockHTTPClient) - err = mgr.handlePullBusChange(ctx, newCR, busConfig, c) + err = mgr.handlePullBusChange(ctx, newCR, bus, lms, c) assert.Nil(t, err) } @@ -2308,7 +2351,7 @@ func buildFormBody(pairs [][]string) string { return b.String() } -func addRemoteQueueHandlersForIndexer(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IndexerCluster, busConfig enterpriseApi.BusConfiguration, replicas int32, confName, body string) { +func addRemoteQueueHandlersForIndexer(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IndexerCluster, bus enterpriseApi.Bus, replicas int32, confName, body string) { for i := 0; i < int(replicas); i++ { podName := fmt.Sprintf("splunk-%s-indexer-%d", cr.GetName(), i) baseURL := fmt.Sprintf( @@ -2316,11 +2359,11 @@ func addRemoteQueueHandlersForIndexer(mockHTTPClient *spltest.MockHTTPClient, cr podName, cr.GetName(), cr.GetNamespace(), confName, ) - createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", busConfig.Spec.SQS.QueueName)) + createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", bus.Spec.QueueName)) reqCreate, _ := http.NewRequest("POST", baseURL, strings.NewReader(createReqBody)) mockHTTPClient.AddHandler(reqCreate, 200, "", nil) - updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", busConfig.Spec.SQS.QueueName)) + updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", bus.Spec.QueueName)) reqUpdate, _ := http.NewRequest("POST", updateURL, strings.NewReader(body)) mockHTTPClient.AddHandler(reqUpdate, 200, "", nil) } @@ -2340,7 +2383,7 @@ func newTestPullBusPipelineManager(mockHTTPClient *spltest.MockHTTPClient) *inde } } -func TestApplyIndexerClusterManager_BusConfig_Success(t *testing.T) { +func TestApplyIndexerClusterManager_Bus_Success(t *testing.T) { os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") ctx := context.TODO() @@ -2352,28 +2395,26 @@ func TestApplyIndexerClusterManager_BusConfig_Success(t *testing.T) { c := fake.NewClientBuilder().WithScheme(scheme).Build() // Object definitions - busConfig := enterpriseApi.BusConfiguration{ + bus := enterpriseApi.Bus{ TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", + Kind: "Bus", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "busConfig", + Name: "bus", Namespace: "test", }, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", + Spec: enterpriseApi.BusSpec{ + Provider: "sqs", + QueueName: "test-queue", + Region: "us-west-2", SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", }, }, } - c.Create(ctx, &busConfig) + c.Create(ctx, &bus) cm := &enterpriseApi.ClusterManager{ TypeMeta: metav1.TypeMeta{Kind: "ClusterManager"}, @@ -2395,9 +2436,9 @@ func TestApplyIndexerClusterManager_BusConfig_Success(t *testing.T) { }, Spec: enterpriseApi.IndexerClusterSpec{ Replicas: 1, - BusConfigurationRef: corev1.ObjectReference{ - Name: busConfig.Name, - Namespace: busConfig.Namespace, + BusRef: corev1.ObjectReference{ + Name: bus.Name, + Namespace: bus.Namespace, }, CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ ClusterManagerRef: corev1.ObjectReference{ diff --git a/pkg/splunk/enterprise/ingestorcluster.go b/pkg/splunk/enterprise/ingestorcluster.go index 4f96f05bc..6ca721b6a 100644 --- a/pkg/splunk/enterprise/ingestorcluster.go +++ b/pkg/splunk/enterprise/ingestorcluster.go @@ -73,7 +73,7 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr defer updateCRStatus(ctx, client, cr, &err) if cr.Status.Replicas < cr.Spec.Replicas { - cr.Status.BusConfiguration = enterpriseApi.BusConfigurationSpec{} + cr.Status.Bus = &enterpriseApi.BusSpec{} } cr.Status.Replicas = cr.Spec.Replicas @@ -210,34 +210,50 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr // No need to requeue if everything is ready if cr.Status.Phase == enterpriseApi.PhaseReady { - // Bus config - busConfig := enterpriseApi.BusConfiguration{} - if cr.Spec.BusConfigurationRef.Name != "" { + // Bus + bus := enterpriseApi.Bus{} + if cr.Spec.BusRef.Name != "" { ns := cr.GetNamespace() - if cr.Spec.BusConfigurationRef.Namespace != "" { - ns = cr.Spec.BusConfigurationRef.Namespace + if cr.Spec.BusRef.Namespace != "" { + ns = cr.Spec.BusRef.Namespace } err = client.Get(ctx, types.NamespacedName{ - Name: cr.Spec.BusConfigurationRef.Name, + Name: cr.Spec.BusRef.Name, Namespace: ns, - }, &busConfig) + }, &bus) if err != nil { return result, err } } - // If bus config is updated - if !reflect.DeepEqual(cr.Status.BusConfiguration, busConfig.Spec) { + // Large Message Store + lms := enterpriseApi.LargeMessageStore{} + if cr.Spec.LargeMessageStoreRef.Name != "" { + ns := cr.GetNamespace() + if cr.Spec.LargeMessageStoreRef.Namespace != "" { + ns = cr.Spec.LargeMessageStoreRef.Namespace + } + err = client.Get(context.Background(), types.NamespacedName{ + Name: cr.Spec.LargeMessageStoreRef.Name, + Namespace: ns, + }, &lms) + if err != nil { + return result, err + } + } + + // If bus is updated + if !reflect.DeepEqual(cr.Status.Bus, bus.Spec) { mgr := newIngestorClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) - err = mgr.handlePushBusChange(ctx, cr, busConfig, client) + err = mgr.handlePushBusChange(ctx, cr, bus, lms, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIngestorCluster", fmt.Sprintf("Failed to update conf file for Bus/Pipeline config change after pod creation: %s", err.Error())) scopedLog.Error(err, "Failed to update conf file for Bus/Pipeline config change after pod creation") return result, err } - cr.Status.BusConfiguration = busConfig.Spec + cr.Status.Bus = &bus.Spec } // Upgrade fron automated MC to MC CRD @@ -311,7 +327,7 @@ func getIngestorStatefulSet(ctx context.Context, client splcommon.ControllerClie } // Checks if only Bus or Pipeline config changed, and updates the conf file if so -func (mgr *ingestorClusterPodManager) handlePushBusChange(ctx context.Context, newCR *enterpriseApi.IngestorCluster, busConfig enterpriseApi.BusConfiguration, k8s client.Client) error { +func (mgr *ingestorClusterPodManager) handlePushBusChange(ctx context.Context, newCR *enterpriseApi.IngestorCluster, bus enterpriseApi.Bus, lms enterpriseApi.LargeMessageStore, k8s client.Client) error { reqLogger := log.FromContext(ctx) scopedLog := reqLogger.WithName("handlePushBusChange").WithValues("name", newCR.GetName(), "namespace", newCR.GetNamespace()) @@ -330,18 +346,18 @@ func (mgr *ingestorClusterPodManager) handlePushBusChange(ctx context.Context, n splunkClient := mgr.newSplunkClient(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd)) afterDelete := false - if (busConfig.Spec.SQS.QueueName != "" && newCR.Status.BusConfiguration.SQS.QueueName != "" && busConfig.Spec.SQS.QueueName != newCR.Status.BusConfiguration.SQS.QueueName) || - (busConfig.Spec.Type != "" && newCR.Status.BusConfiguration.Type != "" && busConfig.Spec.Type != newCR.Status.BusConfiguration.Type) { - if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCR.Status.BusConfiguration.SQS.QueueName)); err != nil { + if (bus.Spec.QueueName != "" && newCR.Status.Bus.QueueName != "" && bus.Spec.QueueName != newCR.Status.Bus.QueueName) || + (bus.Spec.Provider != "" && newCR.Status.Bus.Provider != "" && bus.Spec.Provider != newCR.Status.Bus.Provider) { + if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCR.Status.Bus.QueueName)); err != nil { updateErr = err } afterDelete = true } - busChangedFields, pipelineChangedFields := getChangedBusFieldsForIngestor(&busConfig, newCR, afterDelete) + busChangedFields, pipelineChangedFields := getChangedBusFieldsForIngestor(&bus, &lms, newCR, afterDelete) for _, pbVal := range busChangedFields { - if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", busConfig.Spec.SQS.QueueName), [][]string{pbVal}); err != nil { + if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", bus.Spec.QueueName), [][]string{pbVal}); err != nil { updateErr = err } } @@ -358,12 +374,21 @@ func (mgr *ingestorClusterPodManager) handlePushBusChange(ctx context.Context, n } // getChangedBusFieldsForIngestor returns a list of changed bus and pipeline fields for ingestor pods -func getChangedBusFieldsForIngestor(busConfig *enterpriseApi.BusConfiguration, busConfigIngestorStatus *enterpriseApi.IngestorCluster, afterDelete bool) (busChangedFields, pipelineChangedFields [][]string) { - oldPB := &busConfigIngestorStatus.Status.BusConfiguration - newPB := &busConfig.Spec +func getChangedBusFieldsForIngestor(bus *enterpriseApi.Bus, lms *enterpriseApi.LargeMessageStore, busIngestorStatus *enterpriseApi.IngestorCluster, afterDelete bool) (busChangedFields, pipelineChangedFields [][]string) { + oldPB := busIngestorStatus.Status.Bus + if oldPB == nil { + oldPB = &enterpriseApi.BusSpec{} + } + newPB := &bus.Spec + + oldLMS := busIngestorStatus.Status.LargeMessageStore + if oldLMS == nil { + oldLMS = &enterpriseApi.LargeMessageStoreSpec{} + } + newLMS := &lms.Spec // Push changed bus fields - busChangedFields = pushBusChanged(oldPB, newPB, afterDelete) + busChangedFields = pushBusChanged(oldPB, newPB, oldLMS, newLMS, afterDelete) // Always changed pipeline fields pipelineChangedFields = pipelineConfig(false) @@ -402,31 +427,40 @@ func pipelineConfig(isIndexer bool) (output [][]string) { return output } -func pushBusChanged(oldBus, newBus *enterpriseApi.BusConfigurationSpec, afterDelete bool) (output [][]string) { - if oldBus.Type != newBus.Type || afterDelete { - output = append(output, []string{"remote_queue.type", newBus.Type}) +func pushBusChanged(oldBus, newBus *enterpriseApi.BusSpec, oldLMS, newLMS *enterpriseApi.LargeMessageStoreSpec, afterDelete bool) (output [][]string) { + busProvider := "" + if newBus.Provider == "sqs" { + busProvider = "sqs_smartbus" + } + lmsProvider := "" + if newLMS.Provider == "s3" { + lmsProvider = "sqs_smartbus" + } + + if oldBus.Provider != newBus.Provider || afterDelete { + output = append(output, []string{"remote_queue.type", busProvider}) } - if oldBus.SQS.AuthRegion != newBus.SQS.AuthRegion || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.auth_region", newBus.Type), newBus.SQS.AuthRegion}) + if oldBus.Region != newBus.Region || afterDelete { + output = append(output, []string{fmt.Sprintf("remote_queue.%s.auth_region", busProvider), newBus.Region}) } if oldBus.SQS.Endpoint != newBus.SQS.Endpoint || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.endpoint", newBus.Type), newBus.SQS.Endpoint}) + output = append(output, []string{fmt.Sprintf("remote_queue.%s.endpoint", busProvider), newBus.SQS.Endpoint}) } - if oldBus.SQS.LargeMessageStoreEndpoint != newBus.SQS.LargeMessageStoreEndpoint || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", newBus.Type), newBus.SQS.LargeMessageStoreEndpoint}) + if oldLMS.S3.Endpoint != newLMS.S3.Endpoint || afterDelete { + output = append(output, []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", lmsProvider), newLMS.S3.Endpoint}) } - if oldBus.SQS.LargeMessageStorePath != newBus.SQS.LargeMessageStorePath || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", newBus.Type), newBus.SQS.LargeMessageStorePath}) + if oldLMS.S3.Path != newLMS.S3.Path || afterDelete { + output = append(output, []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", lmsProvider), newLMS.S3.Path}) } - if oldBus.SQS.DeadLetterQueueName != newBus.SQS.DeadLetterQueueName || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", newBus.Type), newBus.SQS.DeadLetterQueueName}) + if oldBus.SQS.DLQ != newBus.SQS.DLQ || afterDelete { + output = append(output, []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busProvider), newBus.SQS.DLQ}) } output = append(output, - []string{fmt.Sprintf("remote_queue.%s.encoding_format", newBus.Type), "s2s"}, - []string{fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", newBus.Type), "4"}, - []string{fmt.Sprintf("remote_queue.%s.retry_policy", newBus.Type), "max_count"}, - []string{fmt.Sprintf("remote_queue.%s.send_interval", newBus.Type), "5s"}) + []string{fmt.Sprintf("remote_queue.%s.encoding_format", busProvider), "s2s"}, + []string{fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", busProvider), "4"}, + []string{fmt.Sprintf("remote_queue.%s.retry_policy", busProvider), "max_count"}, + []string{fmt.Sprintf("remote_queue.%s.send_interval", busProvider), "5s"}) return output } diff --git a/pkg/splunk/enterprise/ingestorcluster_test.go b/pkg/splunk/enterprise/ingestorcluster_test.go index bee3df4d6..d7a1604cd 100644 --- a/pkg/splunk/enterprise/ingestorcluster_test.go +++ b/pkg/splunk/enterprise/ingestorcluster_test.go @@ -63,28 +63,47 @@ func TestApplyIngestorCluster(t *testing.T) { c := fake.NewClientBuilder().WithScheme(scheme).Build() // Object definitions - busConfig := &enterpriseApi.BusConfiguration{ + provider := "sqs_smartbus" + + bus := &enterpriseApi.Bus{ TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", + Kind: "Bus", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "busConfig", + Name: "bus", Namespace: "test", }, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", + Spec: enterpriseApi.BusSpec{ + Provider: "sqs", + QueueName: "test-queue", + Region: "us-west-2", SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", }, }, } - c.Create(ctx, busConfig) + c.Create(ctx, bus) + + lms := enterpriseApi.LargeMessageStore{ + TypeMeta: metav1.TypeMeta{ + Kind: "LargeMessageStore", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "lms", + Namespace: "test", + }, + Spec: enterpriseApi.LargeMessageStoreSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "s3://bucket/key", + }, + }, + } + c.Create(ctx, &lms) cr := &enterpriseApi.IngestorCluster{ TypeMeta: metav1.TypeMeta{ @@ -100,9 +119,13 @@ func TestApplyIngestorCluster(t *testing.T) { CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ Mock: true, }, - BusConfigurationRef: corev1.ObjectReference{ - Name: busConfig.Name, - Namespace: busConfig.Namespace, + BusRef: corev1.ObjectReference{ + Name: bus.Name, + Namespace: bus.Namespace, + }, + LargeMessageStoreRef: corev1.ObjectReference{ + Name: lms.Name, + Namespace: lms.Namespace, }, }, } @@ -261,19 +284,19 @@ func TestApplyIngestorCluster(t *testing.T) { defer func() { newIngestorClusterPodManager = origNew }() propertyKVList := [][]string{ - {fmt.Sprintf("remote_queue.%s.encoding_format", busConfig.Spec.Type), "s2s"}, - {fmt.Sprintf("remote_queue.%s.auth_region", busConfig.Spec.Type), busConfig.Spec.SQS.AuthRegion}, - {fmt.Sprintf("remote_queue.%s.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStoreEndpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStorePath}, - {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busConfig.Spec.Type), busConfig.Spec.SQS.DeadLetterQueueName}, - {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", busConfig.Spec.Type), "4"}, - {fmt.Sprintf("remote_queue.%s.retry_policy", busConfig.Spec.Type), "max_count"}, - {fmt.Sprintf("remote_queue.%s.send_interval", busConfig.Spec.Type), "5s"}, + {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.Region}, + {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), bus.Spec.SQS.DLQ}, + {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, + {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, + {fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"}, } body := buildFormBody(propertyKVList) - addRemoteQueueHandlersForIngestor(mockHTTPClient, cr, busConfig, cr.Status.ReadyReplicas, "conf-outputs", body) + addRemoteQueueHandlersForIngestor(mockHTTPClient, cr, bus, cr.Status.ReadyReplicas, "conf-outputs", body) // default-mode.conf propertyKVList = [][]string{ @@ -310,23 +333,21 @@ func TestGetIngestorStatefulSet(t *testing.T) { // Object definitions os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") - busConfig := enterpriseApi.BusConfiguration{ + bus := enterpriseApi.Bus{ TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", + Kind: "Bus", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "busConfig", + Name: "bus", }, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", + Spec: enterpriseApi.BusSpec{ + Provider: "sqs", + QueueName: "test-queue", + Region: "us-west-2", SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", }, }, } @@ -341,8 +362,8 @@ func TestGetIngestorStatefulSet(t *testing.T) { }, Spec: enterpriseApi.IngestorClusterSpec{ Replicas: 2, - BusConfigurationRef: corev1.ObjectReference{ - Name: busConfig.Name, + BusRef: corev1.ObjectReference{ + Name: bus.Name, }, }, } @@ -396,50 +417,70 @@ func TestGetIngestorStatefulSet(t *testing.T) { } func TestGetChangedBusFieldsForIngestor(t *testing.T) { - busConfig := enterpriseApi.BusConfiguration{ + provider := "sqs_smartbus" + + bus := enterpriseApi.Bus{ TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", + Kind: "Bus", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "busConfig", + Name: "bus", }, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", + Spec: enterpriseApi.BusSpec{ + Provider: "sqs", + QueueName: "test-queue", + Region: "us-west-2", SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", + }, + }, + } + + lms := enterpriseApi.LargeMessageStore{ + TypeMeta: metav1.TypeMeta{ + Kind: "LargeMessageStore", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "lms", + }, + Spec: enterpriseApi.LargeMessageStoreSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "s3://bucket/key", }, }, } newCR := &enterpriseApi.IngestorCluster{ Spec: enterpriseApi.IngestorClusterSpec{ - BusConfigurationRef: corev1.ObjectReference{ - Name: busConfig.Name, + BusRef: corev1.ObjectReference{ + Name: bus.Name, + }, + LargeMessageStoreRef: corev1.ObjectReference{ + Name: lms.Name, }, }, Status: enterpriseApi.IngestorClusterStatus{}, } - busChangedFields, pipelineChangedFields := getChangedBusFieldsForIngestor(&busConfig, newCR, false) + busChangedFields, pipelineChangedFields := getChangedBusFieldsForIngestor(&bus, &lms, newCR, false) assert.Equal(t, 10, len(busChangedFields)) assert.Equal(t, [][]string{ - {"remote_queue.type", busConfig.Spec.Type}, - {fmt.Sprintf("remote_queue.%s.auth_region", busConfig.Spec.Type), busConfig.Spec.SQS.AuthRegion}, - {fmt.Sprintf("remote_queue.%s.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStoreEndpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStorePath}, - {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busConfig.Spec.Type), busConfig.Spec.SQS.DeadLetterQueueName}, - {fmt.Sprintf("remote_queue.%s.encoding_format", busConfig.Spec.Type), "s2s"}, - {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", busConfig.Spec.Type), "4"}, - {fmt.Sprintf("remote_queue.%s.retry_policy", busConfig.Spec.Type), "max_count"}, - {fmt.Sprintf("remote_queue.%s.send_interval", busConfig.Spec.Type), "5s"}, + {"remote_queue.type", provider}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.Region}, + {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), bus.Spec.SQS.DLQ}, + {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, + {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, + {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, + {fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"}, }, busChangedFields) assert.Equal(t, 6, len(pipelineChangedFields)) @@ -455,23 +496,40 @@ func TestGetChangedBusFieldsForIngestor(t *testing.T) { func TestHandlePushBusChange(t *testing.T) { // Object definitions - busConfig := enterpriseApi.BusConfiguration{ + provider := "sqs_smartbus" + + bus := enterpriseApi.Bus{ TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", + Kind: "Bus", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "busConfig", + Name: "bus", }, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", + Spec: enterpriseApi.BusSpec{ + Provider: "sqs", + QueueName: "test-queue", + Region: "us-west-2", SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", + }, + }, + } + + lms := enterpriseApi.LargeMessageStore{ + TypeMeta: metav1.TypeMeta{ + Kind: "LargeMessageStore", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "lms", + }, + Spec: enterpriseApi.LargeMessageStoreSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "s3://bucket/key", }, }, } @@ -485,13 +543,18 @@ func TestHandlePushBusChange(t *testing.T) { Namespace: "test", }, Spec: enterpriseApi.IngestorClusterSpec{ - BusConfigurationRef: corev1.ObjectReference{ - Name: busConfig.Name, + BusRef: corev1.ObjectReference{ + Name: bus.Name, + }, + LargeMessageStoreRef: corev1.ObjectReference{ + Name: lms.Name, }, }, Status: enterpriseApi.IngestorClusterStatus{ - Replicas: 3, - ReadyReplicas: 3, + Replicas: 3, + ReadyReplicas: 3, + Bus: &enterpriseApi.BusSpec{}, + LargeMessageStore: &enterpriseApi.LargeMessageStoreSpec{}, }, } @@ -555,7 +618,7 @@ func TestHandlePushBusChange(t *testing.T) { // Negative test case: secret not found mgr := &ingestorClusterPodManager{} - err := mgr.handlePushBusChange(ctx, newCR, busConfig, c) + err := mgr.handlePushBusChange(ctx, newCR, bus, lms, c) assert.NotNil(t, err) // Mock secret @@ -566,29 +629,29 @@ func TestHandlePushBusChange(t *testing.T) { // Negative test case: failure in creating remote queue stanza mgr = newTestPushBusPipelineManager(mockHTTPClient) - err = mgr.handlePushBusChange(ctx, newCR, busConfig, c) + err = mgr.handlePushBusChange(ctx, newCR, bus, lms, c) assert.NotNil(t, err) // outputs.conf propertyKVList := [][]string{ - {fmt.Sprintf("remote_queue.%s.encoding_format", busConfig.Spec.Type), "s2s"}, - {fmt.Sprintf("remote_queue.%s.auth_region", busConfig.Spec.Type), busConfig.Spec.SQS.AuthRegion}, - {fmt.Sprintf("remote_queue.%s.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStoreEndpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStorePath}, - {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busConfig.Spec.Type), busConfig.Spec.SQS.DeadLetterQueueName}, - {fmt.Sprintf("remote_queue.max_count.%s.max_retries_per_part", busConfig.Spec.Type), "4"}, - {fmt.Sprintf("remote_queue.%s.retry_policy", busConfig.Spec.Type), "max_count"}, - {fmt.Sprintf("remote_queue.%s.send_interval", busConfig.Spec.Type), "5s"}, + {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.Region}, + {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), bus.Spec.SQS.DLQ}, + {fmt.Sprintf("remote_queue.max_count.%s.max_retries_per_part", provider), "4"}, + {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, + {fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"}, } body := buildFormBody(propertyKVList) - addRemoteQueueHandlersForIngestor(mockHTTPClient, newCR, &busConfig, newCR.Status.ReadyReplicas, "conf-outputs", body) + addRemoteQueueHandlersForIngestor(mockHTTPClient, newCR, &bus, newCR.Status.ReadyReplicas, "conf-outputs", body) // Negative test case: failure in creating remote queue stanza mgr = newTestPushBusPipelineManager(mockHTTPClient) - err = mgr.handlePushBusChange(ctx, newCR, busConfig, c) + err = mgr.handlePushBusChange(ctx, newCR, bus, lms, c) assert.NotNil(t, err) // default-mode.conf @@ -617,11 +680,11 @@ func TestHandlePushBusChange(t *testing.T) { mgr = newTestPushBusPipelineManager(mockHTTPClient) - err = mgr.handlePushBusChange(ctx, newCR, busConfig, c) + err = mgr.handlePushBusChange(ctx, newCR, bus, lms, c) assert.Nil(t, err) } -func addRemoteQueueHandlersForIngestor(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IngestorCluster, busConfig *enterpriseApi.BusConfiguration, replicas int32, confName, body string) { +func addRemoteQueueHandlersForIngestor(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IngestorCluster, bus *enterpriseApi.Bus, replicas int32, confName, body string) { for i := 0; i < int(replicas); i++ { podName := fmt.Sprintf("splunk-%s-ingestor-%d", cr.GetName(), i) baseURL := fmt.Sprintf( @@ -629,11 +692,11 @@ func addRemoteQueueHandlersForIngestor(mockHTTPClient *spltest.MockHTTPClient, c podName, cr.GetName(), cr.GetNamespace(), confName, ) - createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", busConfig.Spec.SQS.QueueName)) + createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", bus.Spec.QueueName)) reqCreate, _ := http.NewRequest("POST", baseURL, strings.NewReader(createReqBody)) mockHTTPClient.AddHandler(reqCreate, 200, "", nil) - updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", busConfig.Spec.SQS.QueueName)) + updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", bus.Spec.QueueName)) reqUpdate, _ := http.NewRequest("POST", updateURL, strings.NewReader(body)) mockHTTPClient.AddHandler(reqUpdate, 200, "", nil) } diff --git a/pkg/splunk/enterprise/largemessagestore.go b/pkg/splunk/enterprise/largemessagestore.go new file mode 100644 index 000000000..8e6ff93f5 --- /dev/null +++ b/pkg/splunk/enterprise/largemessagestore.go @@ -0,0 +1,75 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package enterprise + +import ( + "context" + "time" + + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" + splctrl "github.com/splunk/splunk-operator/pkg/splunk/splkcontroller" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// ApplyLargeMessageStore reconciles the state of an IngestorCluster custom resource +func ApplyLargeMessageStore(ctx context.Context, client client.Client, cr *enterpriseApi.LargeMessageStore) (reconcile.Result, error) { + var err error + + // Unless modified, reconcile for this object will be requeued after 5 seconds + result := reconcile.Result{ + Requeue: true, + RequeueAfter: time.Second * 5, + } + + if cr.Status.ResourceRevMap == nil { + cr.Status.ResourceRevMap = make(map[string]string) + } + + eventPublisher, _ := newK8EventPublisher(client, cr) + ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher) + + cr.Kind = "LargeMessageStore" + + // Initialize phase + cr.Status.Phase = enterpriseApi.PhaseError + + // Update the CR Status + defer updateCRStatus(ctx, client, cr, &err) + + // Check if deletion has been requested + if cr.ObjectMeta.DeletionTimestamp != nil { + terminating, err := splctrl.CheckForDeletion(ctx, cr, client) + if terminating && err != nil { + cr.Status.Phase = enterpriseApi.PhaseTerminating + } else { + result.Requeue = false + } + return result, err + } + + cr.Status.Phase = enterpriseApi.PhaseReady + + // RequeueAfter if greater than 0, tells the Controller to requeue the reconcile key after the Duration. + // Implies that Requeue is true, there is no need to set Requeue to true at the same time as RequeueAfter. + if !result.Requeue { + result.RequeueAfter = 0 + } + + return result, nil +} diff --git a/pkg/splunk/enterprise/largemessagestore_test.go b/pkg/splunk/enterprise/largemessagestore_test.go new file mode 100644 index 000000000..0f627383c --- /dev/null +++ b/pkg/splunk/enterprise/largemessagestore_test.go @@ -0,0 +1,83 @@ +/* +Copyright 2025. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package enterprise + +import ( + "context" + "os" + "path/filepath" + "testing" + + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func init() { + GetReadinessScriptLocation = func() string { + fileLocation, _ := filepath.Abs("../../../" + readinessScriptLocation) + return fileLocation + } + GetLivenessScriptLocation = func() string { + fileLocation, _ := filepath.Abs("../../../" + livenessScriptLocation) + return fileLocation + } + GetStartupScriptLocation = func() string { + fileLocation, _ := filepath.Abs("../../../" + startupScriptLocation) + return fileLocation + } +} + +func TestApplyLargeMessageStore(t *testing.T) { + os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") + + ctx := context.TODO() + + scheme := runtime.NewScheme() + _ = enterpriseApi.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + c := fake.NewClientBuilder().WithScheme(scheme).Build() + + // Object definitions + lms := &enterpriseApi.LargeMessageStore{ + TypeMeta: metav1.TypeMeta{ + Kind: "LargeMessageStore", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "lms", + Namespace: "test", + }, + Spec: enterpriseApi.LargeMessageStoreSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "s3://bucket/key", + }, + }, + } + c.Create(ctx, lms) + + // ApplyLargeMessageStore + result, err := ApplyLargeMessageStore(ctx, c, lms) + assert.NoError(t, err) + assert.True(t, result.Requeue) + assert.NotEqual(t, enterpriseApi.PhaseError, lms.Status.Phase) + assert.Equal(t, enterpriseApi.PhaseReady, lms.Status.Phase) +} diff --git a/pkg/splunk/enterprise/types.go b/pkg/splunk/enterprise/types.go index 6ebd3df34..180659498 100644 --- a/pkg/splunk/enterprise/types.go +++ b/pkg/splunk/enterprise/types.go @@ -63,8 +63,11 @@ const ( // SplunkIngestor may be a standalone or clustered ingestion peer SplunkIngestor InstanceType = "ingestor" - // SplunkBusConfiguration is the bus configuration instance - SplunkBusConfiguration InstanceType = "busconfiguration" + // SplunkBus is the bus instance + SplunkBus InstanceType = "bus" + + // SplunkLargeMessageStore is the large message store instance + SplunkLargeMessageStore InstanceType = "large-message-store" // SplunkDeployer is an instance that distributes baseline configurations and apps to search head cluster members SplunkDeployer InstanceType = "deployer" @@ -294,8 +297,10 @@ func KindToInstanceString(kind string) string { return SplunkIndexer.ToString() case "IngestorCluster": return SplunkIngestor.ToString() - case "BusConfiguration": - return SplunkBusConfiguration.ToString() + case "Bus": + return SplunkBus.ToString() + case "LargeMessageStore": + return SplunkLargeMessageStore.ToString() case "LicenseManager": return SplunkLicenseManager.ToString() case "LicenseMaster": diff --git a/pkg/splunk/enterprise/util.go b/pkg/splunk/enterprise/util.go index 38853aab0..e8f0736b3 100644 --- a/pkg/splunk/enterprise/util.go +++ b/pkg/splunk/enterprise/util.go @@ -2291,20 +2291,34 @@ func fetchCurrentCRWithStatusUpdate(ctx context.Context, client splcommon.Contro origCR.(*enterpriseApi.IngestorCluster).Status.DeepCopyInto(&latestIngCR.Status) return latestIngCR, nil - case "BusConfiguration": - latestBusCR := &enterpriseApi.BusConfiguration{} + case "Bus": + latestBusCR := &enterpriseApi.Bus{} err = client.Get(ctx, namespacedName, latestBusCR) if err != nil { return nil, err } - origCR.(*enterpriseApi.BusConfiguration).Status.Message = "" + origCR.(*enterpriseApi.Bus).Status.Message = "" if (crError != nil) && ((*crError) != nil) { - origCR.(*enterpriseApi.BusConfiguration).Status.Message = (*crError).Error() + origCR.(*enterpriseApi.Bus).Status.Message = (*crError).Error() } - origCR.(*enterpriseApi.BusConfiguration).Status.DeepCopyInto(&latestBusCR.Status) + origCR.(*enterpriseApi.Bus).Status.DeepCopyInto(&latestBusCR.Status) return latestBusCR, nil + case "LargeMessageStore": + latestLmsCR := &enterpriseApi.LargeMessageStore{} + err = client.Get(ctx, namespacedName, latestLmsCR) + if err != nil { + return nil, err + } + + origCR.(*enterpriseApi.LargeMessageStore).Status.Message = "" + if (crError != nil) && ((*crError) != nil) { + origCR.(*enterpriseApi.LargeMessageStore).Status.Message = (*crError).Error() + } + origCR.(*enterpriseApi.LargeMessageStore).Status.DeepCopyInto(&latestLmsCR.Status) + return latestLmsCR, nil + case "LicenseMaster": latestLmCR := &enterpriseApiV3.LicenseMaster{} err = client.Get(ctx, namespacedName, latestLmCR) diff --git a/test/appframework_aws/c3/appframework_aws_test.go b/test/appframework_aws/c3/appframework_aws_test.go index ba0162ffa..2d150f5ac 100644 --- a/test/appframework_aws/c3/appframework_aws_test.go +++ b/test/appframework_aws/c3/appframework_aws_test.go @@ -3182,7 +3182,7 @@ var _ = Describe("c3appfw test", func() { // Deploy the Indexer Cluster testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster") indexerReplicas := 3 - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, "") + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster") // Deploy the Search Head Cluster diff --git a/test/appframework_aws/c3/manager_appframework_test.go b/test/appframework_aws/c3/manager_appframework_test.go index afc7abae6..904433195 100644 --- a/test/appframework_aws/c3/manager_appframework_test.go +++ b/test/appframework_aws/c3/manager_appframework_test.go @@ -355,7 +355,7 @@ var _ = Describe("c3appfw test", func() { shcName := fmt.Sprintf("%s-shc", deployment.GetName()) idxName := fmt.Sprintf("%s-idxc", deployment.GetName()) shc, err := deployment.DeploySearchHeadCluster(ctx, shcName, cm.GetName(), lm.GetName(), "", mcName) - idxc, err := deployment.DeployIndexerCluster(ctx, idxName, lm.GetName(), 3, cm.GetName(), "", corev1.ObjectReference{}, "") + idxc, err := deployment.DeployIndexerCluster(ctx, idxName, lm.GetName(), 3, cm.GetName(), "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") // Wait for License Manager to be in READY phase testenv.LicenseManagerReady(ctx, deployment, testcaseEnvInst) @@ -3324,7 +3324,7 @@ var _ = Describe("c3appfw test", func() { // Deploy the Indexer Cluster testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster") indexerReplicas := 3 - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, "") + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster") // Deploy the Search Head Cluster diff --git a/test/appframework_az/c3/appframework_azure_test.go b/test/appframework_az/c3/appframework_azure_test.go index 0622700a4..c7fea6ff3 100644 --- a/test/appframework_az/c3/appframework_azure_test.go +++ b/test/appframework_az/c3/appframework_azure_test.go @@ -993,7 +993,7 @@ var _ = Describe("c3appfw test", func() { // Deploy the Indexer Cluster testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster") indexerReplicas := 3 - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, "") + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster") // Deploy the Search Head Cluster diff --git a/test/appframework_az/c3/manager_appframework_azure_test.go b/test/appframework_az/c3/manager_appframework_azure_test.go index 2a0af0b3b..4412efe43 100644 --- a/test/appframework_az/c3/manager_appframework_azure_test.go +++ b/test/appframework_az/c3/manager_appframework_azure_test.go @@ -991,7 +991,7 @@ var _ = Describe("c3appfw test", func() { // Deploy the Indexer Cluster testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster") indexerReplicas := 3 - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, "") + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster") // Deploy the Search Head Cluster diff --git a/test/appframework_gcp/c3/manager_appframework_test.go b/test/appframework_gcp/c3/manager_appframework_test.go index 02ad17cfb..66c553e47 100644 --- a/test/appframework_gcp/c3/manager_appframework_test.go +++ b/test/appframework_gcp/c3/manager_appframework_test.go @@ -361,7 +361,7 @@ var _ = Describe("c3appfw test", func() { shcName := fmt.Sprintf("%s-shc", deployment.GetName()) idxName := fmt.Sprintf("%s-idxc", deployment.GetName()) shc, err := deployment.DeploySearchHeadCluster(ctx, shcName, cm.GetName(), lm.GetName(), "", mcName) - idxc, err := deployment.DeployIndexerCluster(ctx, idxName, lm.GetName(), 3, cm.GetName(), "", corev1.ObjectReference{}, "") + idxc, err := deployment.DeployIndexerCluster(ctx, idxName, lm.GetName(), 3, cm.GetName(), "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") // Wait for License Manager to be in READY phase testenv.LicenseManagerReady(ctx, deployment, testcaseEnvInst) @@ -3327,7 +3327,7 @@ var _ = Describe("c3appfw test", func() { // Deploy the Indexer Cluster testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster") indexerReplicas := 3 - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, "") + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster") // Deploy the Search Head Cluster diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go index c040802f8..c99112617 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go @@ -39,15 +39,20 @@ var ( testenvInstance *testenv.TestEnv testSuiteName = "indingsep-" + testenv.RandomDNSName(3) - bus = enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", + bus = enterpriseApi.BusSpec{ + Provider: "sqs", + QueueName: "test-queue", + Region: "us-west-2", SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://test-bucket/smartbus-test", - DeadLetterQueueName: "test-dead-letter-queue", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "test-dead-letter-queue", + }, + } + lms = enterpriseApi.LargeMessageStoreSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "s3://test-bucket/smartbus-test", }, } serviceAccountName = "index-ingest-sa" @@ -80,15 +85,13 @@ var ( "AWS_STS_REGIONAL_ENDPOINTS=regional", } - updateBus = enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", + updateBus = enterpriseApi.BusSpec{ + Provider: "sqs", + QueueName: "test-queue-updated", + Region: "us-west-2", SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue-updated", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://test-bucket-updated/smartbus-test", - DeadLetterQueueName: "test-dead-letter-queue-updated", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "test-dead-letter-queue-updated", }, } diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go index 8bccddb47..1b3d27c70 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go @@ -79,14 +79,19 @@ var _ = Describe("indingsep test", func() { testcaseEnvInst.Log.Info("Create Service Account") testcaseEnvInst.CreateServiceAccount(serviceAccountName) - // Deploy Bus Configuration - testcaseEnvInst.Log.Info("Deploy Bus Configuration") - bc, err := deployment.DeployBusConfiguration(ctx, "bus-config", bus) - Expect(err).To(Succeed(), "Unable to deploy Bus Configuration") + // Deploy Bus + testcaseEnvInst.Log.Info("Deploy Bus") + b, err := deployment.DeployBus(ctx, "bus", bus) + Expect(err).To(Succeed(), "Unable to deploy Bus") + + // Deploy LargeMessageStore + testcaseEnvInst.Log.Info("Deploy LargeMessageStore") + lm, err := deployment.DeployLargeMessageStore(ctx, "lms", lms) + Expect(err).To(Succeed(), "Unable to deploy LargeMessageStore") // Deploy Ingestor Cluster testcaseEnvInst.Log.Info("Deploy Ingestor Cluster") - _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: bc.Name}, serviceAccountName) + _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: b.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster") // Deploy Cluster Manager @@ -96,7 +101,7 @@ var _ = Describe("indingsep test", func() { // Deploy Indexer Cluster testcaseEnvInst.Log.Info("Deploy Indexer Cluster") - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: bc.Name}, serviceAccountName) + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: b.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Indexer Cluster") // Ensure that Ingestor Cluster is in Ready phase @@ -125,12 +130,19 @@ var _ = Describe("indingsep test", func() { err = deployment.DeleteCR(ctx, ingest) Expect(err).To(Succeed(), "Unable to delete Ingestor Cluster instance", "Ingestor Cluster Name", ingest) - // Delete the Bus Configuration - busConfiguration := &enterpriseApi.BusConfiguration{} - err = deployment.GetInstance(ctx, "bus-config", busConfiguration) - Expect(err).To(Succeed(), "Unable to get Bus Configuration instance", "Bus Configuration Name", busConfiguration) - err = deployment.DeleteCR(ctx, busConfiguration) - Expect(err).To(Succeed(), "Unable to delete Bus Configuration", "Bus Configuration Name", busConfiguration) + // Delete the Bus + bus := &enterpriseApi.Bus{} + err = deployment.GetInstance(ctx, "bus", bus) + Expect(err).To(Succeed(), "Unable to get Bus instance", "Bus Name", bus) + err = deployment.DeleteCR(ctx, bus) + Expect(err).To(Succeed(), "Unable to delete Bus", "Bus Name", bus) + + // Delete the LargeMessageStore + lm = &enterpriseApi.LargeMessageStore{} + err = deployment.GetInstance(ctx, "lms", lm) + Expect(err).To(Succeed(), "Unable to get LargeMessageStore instance", "LargeMessageStore Name", lm) + err = deployment.DeleteCR(ctx, lm) + Expect(err).To(Succeed(), "Unable to delete LargeMessageStore", "LargeMessageStore Name", lm) }) }) @@ -140,10 +152,15 @@ var _ = Describe("indingsep test", func() { testcaseEnvInst.Log.Info("Create Service Account") testcaseEnvInst.CreateServiceAccount(serviceAccountName) - // Deploy Bus Configuration - testcaseEnvInst.Log.Info("Deploy Bus Configuration") - bc, err := deployment.DeployBusConfiguration(ctx, "bus-config", bus) - Expect(err).To(Succeed(), "Unable to deploy Bus Configuration") + // Deploy Bus + testcaseEnvInst.Log.Info("Deploy Bus") + bc, err := deployment.DeployBus(ctx, "bus", bus) + Expect(err).To(Succeed(), "Unable to deploy Bus") + + // Deploy LargeMessageStore + testcaseEnvInst.Log.Info("Deploy LargeMessageStore") + lm, err := deployment.DeployLargeMessageStore(ctx, "lms", lms) + Expect(err).To(Succeed(), "Unable to deploy LargeMessageStore") // Upload apps to S3 testcaseEnvInst.Log.Info("Upload apps to S3") @@ -188,9 +205,10 @@ var _ = Describe("indingsep test", func() { Image: testcaseEnvInst.GetSplunkImage(), }, }, - BusConfigurationRef: v1.ObjectReference{Name: bc.Name}, - Replicas: 3, - AppFrameworkConfig: appFrameworkSpec, + BusRef: v1.ObjectReference{Name: bc.Name}, + LargeMessageStoreRef: v1.ObjectReference{Name: lm.Name}, + Replicas: 3, + AppFrameworkConfig: appFrameworkSpec, }, } @@ -238,14 +256,19 @@ var _ = Describe("indingsep test", func() { testcaseEnvInst.Log.Info("Create Service Account") testcaseEnvInst.CreateServiceAccount(serviceAccountName) - // Deploy Bus Configuration - testcaseEnvInst.Log.Info("Deploy Bus Configuration") - bc, err := deployment.DeployBusConfiguration(ctx, "bus-config", bus) - Expect(err).To(Succeed(), "Unable to deploy Bus Configuration") + // Deploy Bus + testcaseEnvInst.Log.Info("Deploy Bus") + bc, err := deployment.DeployBus(ctx, "bus", bus) + Expect(err).To(Succeed(), "Unable to deploy Bus") + + // Deploy LargeMessageStore + testcaseEnvInst.Log.Info("Deploy LargeMessageStore") + lm, err := deployment.DeployLargeMessageStore(ctx, "lms", lms) + Expect(err).To(Succeed(), "Unable to deploy LargeMessageStore") // Deploy Ingestor Cluster testcaseEnvInst.Log.Info("Deploy Ingestor Cluster") - _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: bc.Name}, serviceAccountName) + _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: bc.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster") // Deploy Cluster Manager @@ -255,7 +278,7 @@ var _ = Describe("indingsep test", func() { // Deploy Indexer Cluster testcaseEnvInst.Log.Info("Deploy Indexer Cluster") - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: bc.Name}, serviceAccountName) + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: bc.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Indexer Cluster") // Ensure that Ingestor Cluster is in Ready phase @@ -278,7 +301,7 @@ var _ = Describe("indingsep test", func() { // Verify Ingestor Cluster Status testcaseEnvInst.Log.Info("Verify Ingestor Cluster Status") - Expect(ingest.Status.BusConfiguration).To(Equal(bus), "Ingestor bus configuration status is not the same as provided as input") + Expect(ingest.Status.Bus).To(Equal(bus), "Ingestor bus status is not the same as provided as input") // Get instance of current Indexer Cluster CR with latest config testcaseEnvInst.Log.Info("Get instance of current Indexer Cluster CR with latest config") @@ -288,7 +311,7 @@ var _ = Describe("indingsep test", func() { // Verify Indexer Cluster Status testcaseEnvInst.Log.Info("Verify Indexer Cluster Status") - Expect(index.Status.BusConfiguration).To(Equal(bus), "Indexer bus configuration status is not the same as provided as input") + Expect(index.Status.Bus).To(Equal(bus), "Indexer bus status is not the same as provided as input") // Verify conf files testcaseEnvInst.Log.Info("Verify conf files") @@ -340,14 +363,19 @@ var _ = Describe("indingsep test", func() { testcaseEnvInst.Log.Info("Create Service Account") testcaseEnvInst.CreateServiceAccount(serviceAccountName) - // Deploy Bus Configuration - testcaseEnvInst.Log.Info("Deploy Bus Configuration") - bc, err := deployment.DeployBusConfiguration(ctx, "bus-config", bus) - Expect(err).To(Succeed(), "Unable to deploy Bus Configuration") + // Deploy Bus + testcaseEnvInst.Log.Info("Deploy Bus") + bc, err := deployment.DeployBus(ctx, "bus", bus) + Expect(err).To(Succeed(), "Unable to deploy Bus") + + // Deploy LargeMessageStore + testcaseEnvInst.Log.Info("Deploy LargeMessageStore") + lm, err := deployment.DeployLargeMessageStore(ctx, "lms", lms) + Expect(err).To(Succeed(), "Unable to deploy LargeMessageStore") // Deploy Ingestor Cluster testcaseEnvInst.Log.Info("Deploy Ingestor Cluster") - _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: bc.Name}, serviceAccountName) + _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: bc.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster") // Deploy Cluster Manager @@ -357,7 +385,7 @@ var _ = Describe("indingsep test", func() { // Deploy Indexer Cluster testcaseEnvInst.Log.Info("Deploy Indexer Cluster") - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: bc.Name}, serviceAccountName) + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: bc.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Indexer Cluster") // Ensure that Ingestor Cluster is in Ready phase @@ -372,17 +400,17 @@ var _ = Describe("indingsep test", func() { testcaseEnvInst.Log.Info("Ensure that Indexer Cluster is in Ready phase") testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) - // Get instance of current Bus Configuration CR with latest config - testcaseEnvInst.Log.Info("Get instance of current Bus Configuration CR with latest config") - bus := &enterpriseApi.BusConfiguration{} + // Get instance of current Bus CR with latest config + testcaseEnvInst.Log.Info("Get instance of current Bus CR with latest config") + bus := &enterpriseApi.Bus{} err = deployment.GetInstance(ctx, bc.Name, bus) - Expect(err).To(Succeed(), "Failed to get instance of Bus Configuration") + Expect(err).To(Succeed(), "Failed to get instance of Bus") - // Update instance of BusConfiguration CR with new bus configuration - testcaseEnvInst.Log.Info("Update instance of BusConfiguration CR with new bus configuration") + // Update instance of Bus CR with new bus + testcaseEnvInst.Log.Info("Update instance of Bus CR with new bus") bus.Spec = updateBus err = deployment.UpdateCR(ctx, bus) - Expect(err).To(Succeed(), "Unable to deploy Bus Configuration with updated CR") + Expect(err).To(Succeed(), "Unable to deploy Bus with updated CR") // Ensure that Ingestor Cluster has not been restarted testcaseEnvInst.Log.Info("Ensure that Ingestor Cluster has not been restarted") @@ -400,7 +428,7 @@ var _ = Describe("indingsep test", func() { // Verify Ingestor Cluster Status testcaseEnvInst.Log.Info("Verify Ingestor Cluster Status") - Expect(ingest.Status.BusConfiguration).To(Equal(updateBus), "Ingestor bus configuration status is not the same as provided as input") + Expect(ingest.Status.Bus).To(Equal(updateBus), "Ingestor bus status is not the same as provided as input") // Get instance of current Indexer Cluster CR with latest config testcaseEnvInst.Log.Info("Get instance of current Indexer Cluster CR with latest config") @@ -410,7 +438,7 @@ var _ = Describe("indingsep test", func() { // Verify Indexer Cluster Status testcaseEnvInst.Log.Info("Verify Indexer Cluster Status") - Expect(index.Status.BusConfiguration).To(Equal(updateBus), "Indexer bus configuration status is not the same as provided as input") + Expect(index.Status.Bus).To(Equal(updateBus), "Indexer bus status is not the same as provided as input") // Verify conf files testcaseEnvInst.Log.Info("Verify conf files") diff --git a/test/testenv/deployment.go b/test/testenv/deployment.go index 2e312c652..3a7ba21d2 100644 --- a/test/testenv/deployment.go +++ b/test/testenv/deployment.go @@ -431,9 +431,9 @@ func (d *Deployment) DeployClusterMasterWithSmartStoreIndexes(ctx context.Contex } // DeployIndexerCluster deploys the indexer cluster -func (d *Deployment) DeployIndexerCluster(ctx context.Context, name, LicenseManagerName string, count int, clusterManagerRef string, ansibleConfig string, busConfig corev1.ObjectReference, serviceAccountName string) (*enterpriseApi.IndexerCluster, error) { +func (d *Deployment) DeployIndexerCluster(ctx context.Context, name, LicenseManagerName string, count int, clusterManagerRef string, ansibleConfig string, bus, lms corev1.ObjectReference, serviceAccountName string) (*enterpriseApi.IndexerCluster, error) { d.testenv.Log.Info("Deploying indexer cluster", "name", name, "CM", clusterManagerRef) - indexer := newIndexerCluster(name, d.testenv.namespace, LicenseManagerName, count, clusterManagerRef, ansibleConfig, d.testenv.splunkImage, busConfig, serviceAccountName) + indexer := newIndexerCluster(name, d.testenv.namespace, LicenseManagerName, count, clusterManagerRef, ansibleConfig, d.testenv.splunkImage, bus, lms, serviceAccountName) pdata, _ := json.Marshal(indexer) d.testenv.Log.Info("indexer cluster spec", "cr", string(pdata)) deployed, err := d.deployCR(ctx, name, indexer) @@ -445,10 +445,10 @@ func (d *Deployment) DeployIndexerCluster(ctx context.Context, name, LicenseMana } // DeployIngestorCluster deploys the ingestor cluster -func (d *Deployment) DeployIngestorCluster(ctx context.Context, name string, count int, busConfig corev1.ObjectReference, serviceAccountName string) (*enterpriseApi.IngestorCluster, error) { +func (d *Deployment) DeployIngestorCluster(ctx context.Context, name string, count int, bus, lms corev1.ObjectReference, serviceAccountName string) (*enterpriseApi.IngestorCluster, error) { d.testenv.Log.Info("Deploying ingestor cluster", "name", name) - ingestor := newIngestorCluster(name, d.testenv.namespace, count, d.testenv.splunkImage, busConfig, serviceAccountName) + ingestor := newIngestorCluster(name, d.testenv.namespace, count, d.testenv.splunkImage, bus, lms, serviceAccountName) pdata, _ := json.Marshal(ingestor) d.testenv.Log.Info("ingestor cluster spec", "cr", string(pdata)) @@ -460,20 +460,36 @@ func (d *Deployment) DeployIngestorCluster(ctx context.Context, name string, cou return deployed.(*enterpriseApi.IngestorCluster), err } -// DeployBusConfiguration deploys the bus configuration -func (d *Deployment) DeployBusConfiguration(ctx context.Context, name string, busConfig enterpriseApi.BusConfigurationSpec) (*enterpriseApi.BusConfiguration, error) { - d.testenv.Log.Info("Deploying bus configuration", "name", name) +// DeployBus deploys the bus +func (d *Deployment) DeployBus(ctx context.Context, name string, bus enterpriseApi.BusSpec) (*enterpriseApi.Bus, error) { + d.testenv.Log.Info("Deploying bus", "name", name) - busCfg := newBusConfiguration(name, d.testenv.namespace, busConfig) + busCfg := newBus(name, d.testenv.namespace, bus) pdata, _ := json.Marshal(busCfg) - d.testenv.Log.Info("bus configuration spec", "cr", string(pdata)) + d.testenv.Log.Info("bus spec", "cr", string(pdata)) deployed, err := d.deployCR(ctx, name, busCfg) if err != nil { return nil, err } - return deployed.(*enterpriseApi.BusConfiguration), err + return deployed.(*enterpriseApi.Bus), err +} + +// DeployLargeMessageStore deploys the large message store +func (d *Deployment) DeployLargeMessageStore(ctx context.Context, name string, lms enterpriseApi.LargeMessageStoreSpec) (*enterpriseApi.LargeMessageStore, error) { + d.testenv.Log.Info("Deploying large message store", "name", name) + + lmsCfg := newLargeMessageStore(name, d.testenv.namespace, lms) + pdata, _ := json.Marshal(lmsCfg) + + d.testenv.Log.Info("large message store spec", "cr", string(pdata)) + deployed, err := d.deployCR(ctx, name, lmsCfg) + if err != nil { + return nil, err + } + + return deployed.(*enterpriseApi.LargeMessageStore), err } // DeployIngestorClusterWithAdditionalConfiguration deploys the ingestor cluster with additional configuration @@ -632,13 +648,22 @@ func (d *Deployment) UpdateCR(ctx context.Context, cr client.Object) error { ucr := cr.(*enterpriseApi.IngestorCluster) current.Spec = ucr.Spec cobject = current - case "BusConfiguration": - current := &enterpriseApi.BusConfiguration{} + case "Bus": + current := &enterpriseApi.Bus{} + err = d.testenv.GetKubeClient().Get(ctx, namespacedName, current) + if err != nil { + return err + } + ucr := cr.(*enterpriseApi.Bus) + current.Spec = ucr.Spec + cobject = current + case "LargeMessageStore": + current := &enterpriseApi.LargeMessageStore{} err = d.testenv.GetKubeClient().Get(ctx, namespacedName, current) if err != nil { return err } - ucr := cr.(*enterpriseApi.BusConfiguration) + ucr := cr.(*enterpriseApi.LargeMessageStore) current.Spec = ucr.Spec cobject = current case "ClusterMaster": @@ -740,7 +765,7 @@ func (d *Deployment) DeploySingleSiteCluster(ctx context.Context, name string, i } // Deploy the indexer cluster - _, err := d.DeployIndexerCluster(ctx, name+"-idxc", LicenseManager, indexerReplicas, name, "", corev1.ObjectReference{}, "") + _, err := d.DeployIndexerCluster(ctx, name+"-idxc", LicenseManager, indexerReplicas, name, "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return err } @@ -798,7 +823,7 @@ func (d *Deployment) DeployMultisiteClusterMasterWithSearchHead(ctx context.Cont multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-master", siteName) - _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseMaster, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") + _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseMaster, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return err } @@ -870,7 +895,7 @@ func (d *Deployment) DeployMultisiteClusterWithSearchHead(ctx context.Context, n multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-manager", siteName) - _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") + _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return err } @@ -931,7 +956,7 @@ func (d *Deployment) DeployMultisiteCluster(ctx context.Context, name string, in multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-manager", siteName) - _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") + _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return err } @@ -1067,7 +1092,7 @@ func (d *Deployment) DeployMultisiteClusterWithSearchHeadAndIndexes(ctx context. multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-manager", siteName) - _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") + _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return err } @@ -1122,7 +1147,7 @@ func (d *Deployment) DeployMultisiteClusterMasterWithSearchHeadAndIndexes(ctx co multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-master", siteName) - _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") + _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return err } @@ -1227,7 +1252,7 @@ func (d *Deployment) DeploySingleSiteClusterWithGivenAppFrameworkSpec(ctx contex } // Deploy the indexer cluster - idxc, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseManager, indexerReplicas, name, "", corev1.ObjectReference{}, "") + idxc, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseManager, indexerReplicas, name, "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return cm, idxc, sh, err } @@ -1305,7 +1330,7 @@ func (d *Deployment) DeploySingleSiteClusterMasterWithGivenAppFrameworkSpec(ctx } // Deploy the indexer cluster - idxc, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseMaster, indexerReplicas, name, "", corev1.ObjectReference{}, "") + idxc, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseMaster, indexerReplicas, name, "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return cm, idxc, sh, err } @@ -1405,7 +1430,7 @@ func (d *Deployment) DeployMultisiteClusterWithSearchHeadAndAppFramework(ctx con multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-manager", siteName) - idxc, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") + idxc, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return cm, idxc, sh, err } @@ -1509,7 +1534,7 @@ func (d *Deployment) DeployMultisiteClusterMasterWithSearchHeadAndAppFramework(c multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-master", siteName) - idxc, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseMaster, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") + idxc, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseMaster, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return cm, idxc, sh, err } @@ -1590,7 +1615,7 @@ func (d *Deployment) DeploySingleSiteClusterWithGivenMonitoringConsole(ctx conte } // Deploy the indexer cluster - _, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseManager, indexerReplicas, name, "", corev1.ObjectReference{}, "") + _, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseManager, indexerReplicas, name, "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return err } @@ -1662,7 +1687,7 @@ func (d *Deployment) DeploySingleSiteClusterMasterWithGivenMonitoringConsole(ctx } // Deploy the indexer cluster - _, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseMaster, indexerReplicas, name, "", corev1.ObjectReference{}, "") + _, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseMaster, indexerReplicas, name, "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return err } @@ -1756,7 +1781,7 @@ func (d *Deployment) DeployMultisiteClusterWithMonitoringConsole(ctx context.Con multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-manager", siteName) - _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") + _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return err } @@ -1856,7 +1881,7 @@ func (d *Deployment) DeployMultisiteClusterMasterWithMonitoringConsole(ctx conte multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-master", siteName) - _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseMaster, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") + _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseMaster, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return err } diff --git a/test/testenv/util.go b/test/testenv/util.go index b779ab3c3..28bd67a13 100644 --- a/test/testenv/util.go +++ b/test/testenv/util.go @@ -359,7 +359,7 @@ func newClusterMasterWithGivenIndexes(name, ns, licenseManagerName, ansibleConfi } // newIndexerCluster creates and initialize the CR for IndexerCluster Kind -func newIndexerCluster(name, ns, licenseManagerName string, replicas int, clusterManagerRef, ansibleConfig, splunkImage string, busConfig corev1.ObjectReference, serviceAccountName string) *enterpriseApi.IndexerCluster { +func newIndexerCluster(name, ns, licenseManagerName string, replicas int, clusterManagerRef, ansibleConfig, splunkImage string, bus, lms corev1.ObjectReference, serviceAccountName string) *enterpriseApi.IndexerCluster { licenseMasterRef, licenseManagerRef := swapLicenseManager(name, licenseManagerName) clusterMasterRef, clusterManagerRef := swapClusterManager(name, clusterManagerRef) @@ -396,8 +396,9 @@ func newIndexerCluster(name, ns, licenseManagerName string, replicas int, cluste }, Defaults: ansibleConfig, }, - Replicas: int32(replicas), - BusConfigurationRef: busConfig, + Replicas: int32(replicas), + BusRef: bus, + LargeMessageStoreRef: lms, }, } @@ -405,7 +406,7 @@ func newIndexerCluster(name, ns, licenseManagerName string, replicas int, cluste } // newIngestorCluster creates and initialize the CR for IngestorCluster Kind -func newIngestorCluster(name, ns string, replicas int, splunkImage string, busConfig corev1.ObjectReference, serviceAccountName string) *enterpriseApi.IngestorCluster { +func newIngestorCluster(name, ns string, replicas int, splunkImage string, bus, lms corev1.ObjectReference, serviceAccountName string) *enterpriseApi.IngestorCluster { return &enterpriseApi.IngestorCluster{ TypeMeta: metav1.TypeMeta{ Kind: "IngestorCluster", @@ -425,24 +426,38 @@ func newIngestorCluster(name, ns string, replicas int, splunkImage string, busCo Image: splunkImage, }, }, - Replicas: int32(replicas), - BusConfigurationRef: busConfig, + Replicas: int32(replicas), + BusRef: bus, + LargeMessageStoreRef: lms, }, } } -// newBusConfiguration creates and initializes the CR for BusConfiguration Kind -func newBusConfiguration(name, ns string, busConfig enterpriseApi.BusConfigurationSpec) *enterpriseApi.BusConfiguration { - return &enterpriseApi.BusConfiguration{ +// newBus creates and initializes the CR for Bus Kind +func newBus(name, ns string, bus enterpriseApi.BusSpec) *enterpriseApi.Bus { + return &enterpriseApi.Bus{ TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", + Kind: "Bus", }, ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: ns, }, - Spec: busConfig, + Spec: bus, + } +} +// newLargeMessageStore creates and initializes the CR for LargeMessageStore Kind +func newLargeMessageStore(name, ns string, lms enterpriseApi.LargeMessageStoreSpec) *enterpriseApi.LargeMessageStore { + return &enterpriseApi.LargeMessageStore{ + TypeMeta: metav1.TypeMeta{ + Kind: "LargeMessageStore", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + }, + Spec: lms, } } From cb8daf2a967460b238cad848093c7962bf731c6d Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Fri, 12 Dec 2025 13:00:28 +0100 Subject: [PATCH 2/7] CSPL-4358 Update docs --- docs/CustomResources.md | 69 +++++++++++ docs/IndexIngestionSeparation.md | 195 +++++++++++++++++++++++-------- 2 files changed, 214 insertions(+), 50 deletions(-) diff --git a/docs/CustomResources.md b/docs/CustomResources.md index 384153add..95ca6c1d9 100644 --- a/docs/CustomResources.md +++ b/docs/CustomResources.md @@ -18,9 +18,11 @@ you can use to manage Splunk Enterprise deployments in your Kubernetes cluster. - [LicenseManager Resource Spec Parameters](#licensemanager-resource-spec-parameters) - [Standalone Resource Spec Parameters](#standalone-resource-spec-parameters) - [SearchHeadCluster Resource Spec Parameters](#searchheadcluster-resource-spec-parameters) + - [Bus Resource Spec Parameters](#bus-resource-spec-parameters) - [ClusterManager Resource Spec Parameters](#clustermanager-resource-spec-parameters) - [IndexerCluster Resource Spec Parameters](#indexercluster-resource-spec-parameters) - [IngestorCluster Resource Spec Parameters](#ingestorcluster-resource-spec-parameters) + - [LargeMessageStore Resource Spec Parameters](#largemessagestore-resource-spec-parameters) - [MonitoringConsole Resource Spec Parameters](#monitoringconsole-resource-spec-parameters) - [Examples of Guaranteed and Burstable QoS](#examples-of-guaranteed-and-burstable-qos) - [A Guaranteed QoS Class example:](#a-guaranteed-qos-class-example) @@ -279,6 +281,41 @@ spec: cpu: "4" ``` +## Bus Resource Spec Parameters + +```yaml +apiVersion: enterprise.splunk.com/v4 +kind: Bus +metadata: + name: bus +spec: + replicas: 3 + provider: sqs + sqs: + name: sqs-test + region: us-west-2 + endpoint: https://sqs.us-west-2.amazonaws.com + dlq: sqs-dlq-test +``` + +Bus inputs can be found in the table below. As of now, only SQS provider of message bus is supported. + +| Key | Type | Description | +| ---------- | ------- | ------------------------------------------------- | +| provider | string | [Required] Provider of message bus (Allowed values: sqs) | +| sqs | SQS | [Required if provider=sqs] SQS message bus inputs | + +SQS message bus inputs can be found in the table below. + +| Key | Type | Description | +| ---------- | ------- | ------------------------------------------------- | +| name | string | [Required] Name of the queue | +| region | string | [Required] Region where the queue is located | +| endpoint | string | [Optional, if not provided formed based on region] AWS SQS Service endpoint +| dlq | string | [Required] Name of the dead letter queue | + +Change of any of the bus inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. + ## ClusterManager Resource Spec Parameters ClusterManager resource does not have a required spec parameter, but to configure SmartStore, you can specify indexes and volume configuration as below - ```yaml @@ -353,6 +390,36 @@ the `IngestorCluster` resource provides the following `Spec` configuration param | ---------- | ------- | ----------------------------------------------------- | | replicas | integer | The number of ingestor peers (minimum of 3 which is the default) | +## LargeMessageStore Resource Spec Parameters + +```yaml +apiVersion: enterprise.splunk.com/v4 +kind: LargeMessageStore +metadata: + name: lms +spec: + provider: s3 + s3: + path: s3://ingestion/smartbus-test + endpoint: https://s3.us-west-2.amazonaws.com +``` + +LargeMessageStore inputs can be found in the table below. As of now, only S3 provider of large message store is supported. + +| Key | Type | Description | +| ---------- | ------- | ------------------------------------------------- | +| provider | string | [Required] Provider of large message store (Allowed values: s3) | +| s3 | S3 | [Required if provider=s3] S3 large message store inputs | + +S3 large message store inputs can be found in the table below. + +| Key | Type | Description | +| ---------- | ------- | ------------------------------------------------- | +| path | string | [Required] Remote storage location for messages that are larger than the underlying maximum message size | +| endpoint | string | [Optional, if not provided formed based on region] S3-compatible service endpoint + +Change of any of the large message bus inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. + ## MonitoringConsole Resource Spec Parameters ```yaml @@ -464,10 +531,12 @@ The Splunk Operator controller reconciles every Splunk Enterprise CR. However, t | Customer Resource Definition | Annotation | | ----------- | --------- | +| bus.enterprise.splunk.com | "bus.enterprise.splunk.com/paused" | | clustermaster.enterprise.splunk.com | "clustermaster.enterprise.splunk.com/paused" | | clustermanager.enterprise.splunk.com | "clustermanager.enterprise.splunk.com/paused" | | indexercluster.enterprise.splunk.com | "indexercluster.enterprise.splunk.com/paused" | | ingestorcluster.enterprise.splunk.com | "ingestorcluster.enterprise.splunk.com/paused" | +| largemessagestore.enterprise.splunk.com | "largemessagestore.enterprise.splunk.com/paused" | | licensemaster.enterprise.splunk.com | "licensemaster.enterprise.splunk.com/paused" | | monitoringconsole.enterprise.splunk.com | "monitoringconsole.enterprise.splunk.com/paused" | | searchheadcluster.enterprise.splunk.com | "searchheadcluster.enterprise.splunk.com/paused" | diff --git a/docs/IndexIngestionSeparation.md b/docs/IndexIngestionSeparation.md index 3b151cc4d..e8c6211d7 100644 --- a/docs/IndexIngestionSeparation.md +++ b/docs/IndexIngestionSeparation.md @@ -18,29 +18,27 @@ This separation enables: # Bus -Bus is introduced to store message bus to be shared among IngestorCluster and IndexerCluster. +Bus is introduced to store message bus information to be shared among IngestorCluster and IndexerCluster. ## Spec -Bus inputs can be found in the table below. As of now, only SQS type of message bus is supported. +Bus inputs can be found in the table below. As of now, only SQS provider of message bus is supported. | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | -| type | string | Type of message bus (Only sqs_smartbus as of now) | -| sqs | SQS | SQS message bus inputs | +| provider | string | [Required] Provider of message bus (Allowed values: sqs) | +| sqs | SQS | [Required if provider=sqs] SQS message bus inputs | SQS message bus inputs can be found in the table below. | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | -| queueName | string | Name of the SQS queue | -| authRegion | string | Region where the SQS queue is located | -| endpoint | string | AWS SQS endpoint -| largeMessageStoreEndpoint | string | AWS S3 Large Message Store endpoint | -| largeMessageStorePath | string | S3 path for Large Message Store | -| deadLetterQueueName | string | Name of the SQS dead letter queue | +| name | string | [Required] Name of the queue | +| region | string | [Required] Region where the queue is located | +| endpoint | string | [Optional, if not provided formed based on region] AWS SQS Service endpoint +| dlq | string | [Required] Name of the dead letter queue | -Change of any of the bus inputs does not restart Splunk. It just updates the config values with no disruptions. +Change of any of the bus inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. ## Example ``` @@ -49,14 +47,47 @@ kind: Bus metadata: name: bus spec: - type: sqs_smartbus + provider: sqs sqs: - queueName: sqs-test - authRegion: us-west-2 + name: sqs-test + region: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com - largeMessageStorePath: s3://ingestion/smartbus-test - deadLetterQueueName: sqs-dlq-test + dlq: sqs-dlq-test +``` + +# LargeMessageStore + +LargeMessageStore is introduced to store large message (messages that exceed the size of messages that can be stored in SQS) store information to be shared among IngestorCluster and IndexerCluster. + +## Spec + +LargeMessageStore inputs can be found in the table below. As of now, only S3 provider of large message store is supported. + +| Key | Type | Description | +| ---------- | ------- | ------------------------------------------------- | +| provider | string | [Required] Provider of large message store (Allowed values: s3) | +| s3 | S3 | [Required if provider=s3] S3 large message store inputs | + +S3 large message store inputs can be found in the table below. + +| Key | Type | Description | +| ---------- | ------- | ------------------------------------------------- | +| path | string | [Required] Remote storage location for messages that are larger than the underlying maximum message size | +| endpoint | string | [Optional, if not provided formed based on region] S3-compatible service endpoint + +Change of any of the large message bus inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. + +## Example +``` +apiVersion: enterprise.splunk.com/v4 +kind: LargeMessageStore +metadata: + name: lms +spec: + provider: s3 + s3: + path: s3://ingestion/smartbus-test + endpoint: https://s3.us-west-2.amazonaws.com ``` # IngestorCluster @@ -75,7 +106,7 @@ In addition to common spec inputs, the IngestorCluster resource provides the fol ## Example -The example presented below configures IngestorCluster named ingestor with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the ingestion traffic. This IngestorCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Push Bus reference allows the user to specify queue and bucket settings for the ingestion process. +The example presented below configures IngestorCluster named ingestor with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the ingestion traffic. This IngestorCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Bus and LargeMessageStore references allow the user to specify queue and bucket settings for the ingestion process. In this case, the setup uses the SQS and S3 based configuration where the messages are stored in sqs-test queue in us-west-2 region with dead letter queue set to sqs-dlq-test queue. The large message store is set to ingestion bucket in smartbus-test directory. Based on these inputs, default-mode.conf and outputs.conf files are configured accordingly. @@ -112,7 +143,7 @@ In addition to common spec inputs, the IndexerCluster resource provides the foll ## Example -The example presented below configures IndexerCluster named indexer with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the indexing traffic. This IndexerCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Pull Bus reference allows the user to specify queue and bucket settings for the indexing process. +The example presented below configures IndexerCluster named indexer with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the indexing traffic. This IndexerCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Bus and LargeMessageStore references allow the user to specify queue and bucket settings for the indexing process. In this case, the setup uses the SQS and S3 based configuration where the messages are stored in and retrieved from sqs-test queue in us-west-2 region with dead letter queue set to sqs-dlq-test queue. The large message store is set to ingestion bucket in smartbus-test directory. Based on these inputs, default-mode.conf, inputs.conf and outputs.conf files are configured accordingly. @@ -151,24 +182,32 @@ Common spec values for all SOK Custom Resources can be found in [CustomResources # Helm Charts -An IngestorCluster template has been added to the splunk/splunk-enterprise Helm chart. The IndexerCluster template has also been enhanced to support new inputs. +Bus, LargeMessageStore and IngestorCluster have been added to the splunk/splunk-enterprise Helm chart. IndexerCluster has also been enhanced to support new inputs. ## Example -Below examples describe how to define values for Bus, IngestorCluster and IndexerCluster similarly to the above yaml files specifications. +Below examples describe how to define values for Bus, LargeMessageStoe, IngestorCluster and IndexerCluster similarly to the above yaml files specifications. ``` bus: enabled: true name: bus - type: sqs_smartbus + provider: sqs sqs: - queueName: sqs-test - authRegion: us-west-2 + name: sqs-test + region: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com - largeMessageStorePath: s3://ingestion/smartbus-test - deadLetterQueueName: sqs-dlq-test + dlq: sqs-dlq-test +``` + +``` +largeMessageStore: + enabled: true + name: lms + provider: s3 + s3: + endpoint: https://s3.us-west-2.amazonaws.com + path: s3://ingestion/smartbus-test ``` ``` @@ -513,14 +552,12 @@ metadata: finalizers: - enterprise.splunk.com/delete-pvc spec: - type: sqs_smartbus + provider: sqs sqs: - queueName: sqs-test - authRegion: us-west-2 + name: sqs-test + region: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com - largeMessageStorePath: s3://ingestion/smartbus-test - deadLetterQueueName: sqs-dlq-test + dlq: sqs-dlq-test ``` ``` @@ -550,13 +587,11 @@ Metadata: UID: 12345678-1234-5678-1234-012345678911 Spec: Sqs: - Auth Region: us-west-2 - Dead Letter Queue Name: sqs-dlq-test + Region: us-west-2 + DLQ: sqs-dlq-test Endpoint: https://sqs.us-west-2.amazonaws.com - Large Message Store Endpoint: https://s3.us-west-2.amazonaws.com - Large Message Store Path: s3://ingestion/smartbus-test - Queue Name: sqs-test - Type: sqs_smartbus + Name: sqs-test + Provider: sqs Status: Message: Phase: Ready @@ -564,7 +599,61 @@ Status: Events: ``` -4. Install IngestorCluster resource. +4. Install LargeMessageStore resource. + +``` +$ cat lms.yaml +apiVersion: enterprise.splunk.com/v4 +kind: LargeMessageStore +metadata: + name: lms + finalizers: + - enterprise.splunk.com/delete-pvc +spec: + provider: s3 + s3: + endpoint: https://s3.us-west-2.amazonaws.com + path: s3://ingestion/smartbus-test +``` + +``` +$ kubectl apply -f lms.yaml +``` + +``` +$ kubectl get lms +NAME PHASE AGE MESSAGE +lms Ready 20s +``` + +``` +kubectl describe lms +Name: lms +Namespace: default +Labels: +Annotations: +API Version: enterprise.splunk.com/v4 +Kind: LargeMessageStore +Metadata: + Creation Timestamp: 2025-10-27T10:25:53Z + Finalizers: + enterprise.splunk.com/delete-pvc + Generation: 1 + Resource Version: 12345678 + UID: 12345678-1234-5678-1234-012345678911 +Spec: + S3: + Endpoint: https://s3.us-west-2.amazonaws.com + Path: s3://ingestion/smartbus-test + Provider: s3 +Status: + Message: + Phase: Ready + Resource Rev Map: +Events: +``` + +5. Install IngestorCluster resource. ``` $ cat ingestor.yaml @@ -614,6 +703,9 @@ Spec: Name: bus Namespace: default Image: splunk/splunk:${SPLUNK_IMAGE_VERSION} + Large Message Store Ref: + Name: lms + Namespace: default Replicas: 3 Service Account: ingestor-sa Status: @@ -630,13 +722,16 @@ Status: Version: 0 Bus: Sqs: - Auth Region: us-west-2 - Dead Letter Queue Name: sqs-dlq-test - Endpoint: https://sqs.us-west-2.amazonaws.com - Large Message Store Endpoint: https://s3.us-west-2.amazonaws.com - Large Message Store Path: s3://ingestion/smartbus-test - Queue Name: sqs-test - Type: sqs_smartbus + Region: us-west-2 + DLQ: sqs-dlq-test + Endpoint: https://sqs.us-west-2.amazonaws.com + Name: sqs-test + Provider: sqs + Large Message Store: + S3: + Endpoint: https://s3.us-west-2.amazonaws.com + Path: s3://ingestion/smartbus-test + Provider: s3 Message: Phase: Ready Ready Replicas: 3 @@ -690,7 +785,7 @@ remote_queue.sqs_smartbus.send_interval = 5s remote_queue.type = sqs_smartbus ``` -5. Install IndexerCluster resource. +6. Install IndexerCluster resource. ``` $ cat idxc.yaml @@ -791,7 +886,7 @@ disabled = false disabled = true ``` -6. Install Horizontal Pod Autoscaler for IngestorCluster. +7. Install Horizontal Pod Autoscaler for IngestorCluster. ``` $ cat hpa-ing.yaml @@ -874,7 +969,7 @@ NAME REFERENCE TARGETS MINPODS MAXPODS REPLICA ing-hpa IngestorCluster/ingestor cpu: 115%/50% 3 10 10 8m54s ``` -7. Generate fake load. +8. Generate fake load. - HEC_TOKEN: HEC token for making fake calls From 61c0387ce7de0b81859c24eb32a4b96b3ee029f4 Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Fri, 12 Dec 2025 13:33:11 +0100 Subject: [PATCH 3/7] CSPL-4358 Addressing comments --- api/v4/bus_types.go | 12 +++--- .../bases/enterprise.splunk.com_buses.yaml | 20 +++++----- ...enterprise.splunk.com_indexerclusters.yaml | 20 +++++----- ...nterprise.splunk.com_ingestorclusters.yaml | 20 +++++----- .../templates/enterprise_v4_buses.yaml | 8 +++- internal/controller/bus_controller_test.go | 18 ++++----- .../ingestorcluster_controller_test.go | 12 +++--- .../01-assert.yaml | 18 ++++----- .../02-assert.yaml | 6 +-- .../splunk_index_ingest_sep.yaml | 6 +-- pkg/splunk/enterprise/bus_test.go | 6 +-- pkg/splunk/enterprise/indexercluster.go | 14 +++---- pkg/splunk/enterprise/indexercluster_test.go | 38 +++++++++---------- pkg/splunk/enterprise/ingestorcluster.go | 10 ++--- pkg/splunk/enterprise/ingestorcluster_test.go | 34 ++++++++--------- ...dex_and_ingestion_separation_suite_test.go | 12 +++--- 16 files changed, 129 insertions(+), 125 deletions(-) diff --git a/api/v4/bus_types.go b/api/v4/bus_types.go index 10958f56b..a4930c1fa 100644 --- a/api/v4/bus_types.go +++ b/api/v4/bus_types.go @@ -36,21 +36,21 @@ type BusSpec struct { // Provider of queue resources Provider string `json:"provider"` + // sqs specific inputs + SQS SQSSpec `json:"sqs"` +} + +type SQSSpec struct { // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // Name of the queue - QueueName string `json:"queueName"` + Name string `json:"name"` // +kubebuilder:validation:Required // +kubebuilder:validation:Pattern=`^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$` // Region of the resources Region string `json:"region"` - // sqs specific inputs - SQS SQSSpec `json:"sqs"` -} - -type SQSSpec struct { // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // Name of the dead letter queue resource diff --git a/config/crd/bases/enterprise.splunk.com_buses.yaml b/config/crd/bases/enterprise.splunk.com_buses.yaml index 6a98483a5..6f4f8fac8 100644 --- a/config/crd/bases/enterprise.splunk.com_buses.yaml +++ b/config/crd/bases/enterprise.splunk.com_buses.yaml @@ -59,14 +59,6 @@ spec: enum: - sqs type: string - queueName: - description: Name of the queue - minLength: 1 - type: string - region: - description: Region of the resources - pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ - type: string sqs: description: sqs specific inputs properties: @@ -78,13 +70,21 @@ spec: description: Amazon SQS Service endpoint pattern: ^https://sqs(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$ type: string + name: + description: Name of the queue + minLength: 1 + type: string + region: + description: Region of the resources + pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ + type: string required: - dlq + - name + - region type: object required: - provider - - queueName - - region type: object x-kubernetes-validations: - message: sqs must be provided when provider is sqs diff --git a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml index 3563c678f..c9c19edfb 100644 --- a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml @@ -8345,14 +8345,6 @@ spec: enum: - sqs type: string - queueName: - description: Name of the queue - minLength: 1 - type: string - region: - description: Region of the resources - pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ - type: string sqs: description: sqs specific inputs properties: @@ -8364,13 +8356,21 @@ spec: description: Amazon SQS Service endpoint pattern: ^https://sqs(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$ type: string + name: + description: Name of the queue + minLength: 1 + type: string + region: + description: Region of the resources + pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ + type: string required: - dlq + - name + - region type: object required: - provider - - queueName - - region type: object x-kubernetes-validations: - message: sqs must be provided when provider is sqs diff --git a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml index 8ada99079..bdd6fb096 100644 --- a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml @@ -4596,14 +4596,6 @@ spec: enum: - sqs type: string - queueName: - description: Name of the queue - minLength: 1 - type: string - region: - description: Region of the resources - pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ - type: string sqs: description: sqs specific inputs properties: @@ -4615,13 +4607,21 @@ spec: description: Amazon SQS Service endpoint pattern: ^https://sqs(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$ type: string + name: + description: Name of the queue + minLength: 1 + type: string + region: + description: Region of the resources + pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ + type: string required: - dlq + - name + - region type: object required: - provider - - queueName - - region type: object x-kubernetes-validations: - message: sqs must be provided when provider is sqs diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_buses.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_buses.yaml index ce1c1e7a9..bbf162332 100644 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_buses.yaml +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_buses.yaml @@ -15,8 +15,6 @@ metadata: {{- end }} spec: provider: {{ .Values.bus.provider | quote }} - queueName: {{ .Values.bus.queueName | quote }} - region: {{ .Values.bus.region | quote }} {{- with .Values.bus.sqs }} sqs: {{- if .endpoint }} @@ -25,6 +23,12 @@ spec: {{- if .dlq }} dlq: {{ .dlq | quote }} {{- end }} + {{- if .name }} + name: {{ .name | quote }} + {{- end }} + {{- if .region }} + region: {{ .region | quote }} + {{- end }} {{- end }} {{- end }} {{- end }} \ No newline at end of file diff --git a/internal/controller/bus_controller_test.go b/internal/controller/bus_controller_test.go index 300af1879..c45c66420 100644 --- a/internal/controller/bus_controller_test.go +++ b/internal/controller/bus_controller_test.go @@ -72,10 +72,10 @@ var _ = Describe("Bus Controller", func() { spec := enterpriseApi.BusSpec{ Provider: "sqs", - QueueName: "smartbus-queue", - Region: "us-west-2", SQS: enterpriseApi.SQSSpec{ - DLQ: "smartbus-dlq", + Name: "smartbus-queue", + Region: "us-west-2", + DLQ: "smartbus-dlq", Endpoint: "https://sqs.us-west-2.amazonaws.com", }, } @@ -101,10 +101,10 @@ var _ = Describe("Bus Controller", func() { annotations := make(map[string]string) spec := enterpriseApi.BusSpec{ Provider: "sqs", - QueueName: "smartbus-queue", - Region: "us-west-2", SQS: enterpriseApi.SQSSpec{ - DLQ: "smartbus-dlq", + Name: "smartbus-queue", + Region: "us-west-2", + DLQ: "smartbus-dlq", Endpoint: "https://sqs.us-west-2.amazonaws.com", }, } @@ -140,10 +140,10 @@ var _ = Describe("Bus Controller", func() { spec := enterpriseApi.BusSpec{ Provider: "sqs", - QueueName: "smartbus-queue", - Region: "us-west-2", SQS: enterpriseApi.SQSSpec{ - DLQ: "smartbus-dlq", + Name: "smartbus-queue", + Region: "us-west-2", + DLQ: "smartbus-dlq", Endpoint: "https://sqs.us-west-2.amazonaws.com", }, } diff --git a/internal/controller/ingestorcluster_controller_test.go b/internal/controller/ingestorcluster_controller_test.go index 811ca930a..053195d44 100644 --- a/internal/controller/ingestorcluster_controller_test.go +++ b/internal/controller/ingestorcluster_controller_test.go @@ -77,10 +77,10 @@ var _ = Describe("IngestorCluster Controller", func() { Namespace: nsSpecs.Name, }, Spec: enterpriseApi.BusSpec{ - Provider: "sqs", - QueueName: "smartbus-queue", - Region: "us-west-2", + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ + Name: "smartbus-queue", + Region: "us-west-2", DLQ: "smartbus-dlq", Endpoint: "https://sqs.us-west-2.amazonaws.com", }, @@ -125,10 +125,10 @@ var _ = Describe("IngestorCluster Controller", func() { Namespace: nsSpecs.Name, }, Spec: enterpriseApi.BusSpec{ - Provider: "sqs", - QueueName: "smartbus-queue", - Region: "us-west-2", + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ + Name: "smartbus-queue", + Region: "us-west-2", DLQ: "smartbus-dlq", Endpoint: "https://sqs.us-west-2.amazonaws.com", }, diff --git a/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml index 001a78ee4..f34dd2e6c 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml @@ -6,11 +6,11 @@ metadata: name: bus spec: provider: sqs - queueName: sqs-test - region: us-west-2 sqs: + name: sqs-test + region: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - deadLetterQueueName: sqs-dlq-test + dlq: sqs-dlq-test status: phase: Ready @@ -67,11 +67,11 @@ status: phase: Ready bus: provider: sqs - queueName: sqs-test - region: us-west-2 sqs: + name: sqs-test + region: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - deadLetterQueueName: sqs-dlq-test + dlq: sqs-dlq-test largeMessageStore: provider: s3 s3: @@ -108,11 +108,11 @@ status: phase: Ready bus: provider: sqs - queueName: sqs-test - region: us-west-2 sqs: + name: sqs-test + region: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - deadLetterQueueName: sqs-dlq-test + dlq: sqs-dlq-test largeMessageStore: provider: s3 s3: diff --git a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml index 86a2df8a8..291eddeba 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml @@ -12,11 +12,11 @@ status: phase: Ready bus: provider: sqs - queueName: sqs-test - region: us-west-2 sqs: + name: sqs-test + region: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - deadLetterQueueName: sqs-dlq-test + dlq: sqs-dlq-test largeMessageStore: provider: s3 s3: diff --git a/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml b/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml index d832c5253..a73c51ac2 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml @@ -9,11 +9,11 @@ bus: enabled: true name: bus provider: sqs - queueName: sqs-test - region: us-west-2 sqs: + name: sqs-test + region: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - deadLetterQueueName: sqs-dlq-test + dlq: sqs-dlq-test largeMessageStore: enabled: true diff --git a/pkg/splunk/enterprise/bus_test.go b/pkg/splunk/enterprise/bus_test.go index ac8ce8a8e..6e5bf1aa7 100644 --- a/pkg/splunk/enterprise/bus_test.go +++ b/pkg/splunk/enterprise/bus_test.go @@ -49,10 +49,10 @@ func TestApplyBus(t *testing.T) { Namespace: "test", }, Spec: enterpriseApi.BusSpec{ - Provider: "sqs", - QueueName: "test-queue", - Region: "us-west-2", + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ + Name: "test-queue", + Region: "us-west-2", Endpoint: "https://sqs.us-west-2.amazonaws.com", DLQ: "sqs-dlq-test", }, diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index 7b8009cdd..e71a19efd 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -1285,12 +1285,12 @@ func (mgr *indexerClusterPodManager) handlePullBusChange(ctx context.Context, ne splunkClient := newSplunkClientForBusPipeline(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd)) afterDelete := false - if (bus.Spec.QueueName != "" && newCR.Status.Bus.QueueName != "" && bus.Spec.QueueName != newCR.Status.Bus.QueueName) || + if (bus.Spec.SQS.Name != "" && newCR.Status.Bus.SQS.Name != "" && bus.Spec.SQS.Name != newCR.Status.Bus.SQS.Name) || (bus.Spec.Provider != "" && newCR.Status.Bus.Provider != "" && bus.Spec.Provider != newCR.Status.Bus.Provider) { - if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCR.Status.Bus.QueueName)); err != nil { + if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCR.Status.Bus.SQS.Name)); err != nil { updateErr = err } - if err := splunkClient.DeleteConfFileProperty(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", newCR.Status.Bus.QueueName)); err != nil { + if err := splunkClient.DeleteConfFileProperty(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", newCR.Status.Bus.SQS.Name)); err != nil { updateErr = err } afterDelete = true @@ -1299,13 +1299,13 @@ func (mgr *indexerClusterPodManager) handlePullBusChange(ctx context.Context, ne busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields := getChangedBusFieldsForIndexer(&bus, &lms, newCR, afterDelete) for _, pbVal := range busChangedFieldsOutputs { - if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", bus.Spec.QueueName), [][]string{pbVal}); err != nil { + if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", bus.Spec.SQS.Name), [][]string{pbVal}); err != nil { updateErr = err } } for _, pbVal := range busChangedFieldsInputs { - if err := splunkClient.UpdateConfFile(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", bus.Spec.QueueName), [][]string{pbVal}); err != nil { + if err := splunkClient.UpdateConfFile(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", bus.Spec.SQS.Name), [][]string{pbVal}); err != nil { updateErr = err } } @@ -1368,8 +1368,8 @@ func pullBusChanged(oldBus, newBus *enterpriseApi.BusSpec, oldLMS, newLMS *enter if oldBus.Provider != newBus.Provider || afterDelete { inputs = append(inputs, []string{"remote_queue.type", busProvider}) } - if oldBus.Region != newBus.Region || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.auth_region", busProvider), newBus.Region}) + if oldBus.SQS.Region != newBus.SQS.Region || afterDelete { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.auth_region", busProvider), newBus.SQS.Region}) } if oldBus.SQS.Endpoint != newBus.SQS.Endpoint || afterDelete { inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.endpoint", busProvider), newBus.SQS.Endpoint}) diff --git a/pkg/splunk/enterprise/indexercluster_test.go b/pkg/splunk/enterprise/indexercluster_test.go index 9df4b2f75..ff10e453d 100644 --- a/pkg/splunk/enterprise/indexercluster_test.go +++ b/pkg/splunk/enterprise/indexercluster_test.go @@ -1353,10 +1353,10 @@ func TestGetIndexerStatefulSet(t *testing.T) { Name: "bus", }, Spec: enterpriseApi.BusSpec{ - Provider: "sqs", - QueueName: "test-queue", - Region: "us-west-2", + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ + Name: "test-queue", + Region: "us-west-2", Endpoint: "https://sqs.us-west-2.amazonaws.com", DLQ: "sqs-dlq-test", }, @@ -2057,10 +2057,10 @@ func TestGetChangedBusFieldsForIndexer(t *testing.T) { Name: "bus", }, Spec: enterpriseApi.BusSpec{ - Provider: "sqs", - QueueName: "test-queue", - Region: "us-west-2", + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ + Name: "test-queue", + Region: "us-west-2", Endpoint: "https://sqs.us-west-2.amazonaws.com", DLQ: "sqs-dlq-test", }, @@ -2099,7 +2099,7 @@ func TestGetChangedBusFieldsForIndexer(t *testing.T) { assert.Equal(t, 8, len(busChangedFieldsInputs)) assert.Equal(t, [][]string{ {"remote_queue.type", provider}, - {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.Region}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.SQS.Region}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, @@ -2111,7 +2111,7 @@ func TestGetChangedBusFieldsForIndexer(t *testing.T) { assert.Equal(t, 10, len(busChangedFieldsOutputs)) assert.Equal(t, [][]string{ {"remote_queue.type", provider}, - {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.Region}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.SQS.Region}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, @@ -2146,10 +2146,10 @@ func TestHandlePullBusChange(t *testing.T) { Namespace: "test", }, Spec: enterpriseApi.BusSpec{ - Provider: "sqs", - QueueName: "test-queue", - Region: "us-west-2", + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ + Name: "test-queue", + Region: "us-west-2", Endpoint: "https://sqs.us-west-2.amazonaws.com", DLQ: "sqs-dlq-test", }, @@ -2192,8 +2192,8 @@ func TestHandlePullBusChange(t *testing.T) { }, }, Status: enterpriseApi.IndexerClusterStatus{ - ReadyReplicas: 3, - Bus: &enterpriseApi.BusSpec{}, + ReadyReplicas: 3, + Bus: &enterpriseApi.BusSpec{}, LargeMessageStore: &enterpriseApi.LargeMessageStoreSpec{}, }, } @@ -2276,7 +2276,7 @@ func TestHandlePullBusChange(t *testing.T) { // outputs.conf propertyKVList := [][]string{ - {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.Region}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.SQS.Region}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, @@ -2359,11 +2359,11 @@ func addRemoteQueueHandlersForIndexer(mockHTTPClient *spltest.MockHTTPClient, cr podName, cr.GetName(), cr.GetNamespace(), confName, ) - createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", bus.Spec.QueueName)) + createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", bus.Spec.SQS.Name)) reqCreate, _ := http.NewRequest("POST", baseURL, strings.NewReader(createReqBody)) mockHTTPClient.AddHandler(reqCreate, 200, "", nil) - updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", bus.Spec.QueueName)) + updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", bus.Spec.SQS.Name)) reqUpdate, _ := http.NewRequest("POST", updateURL, strings.NewReader(body)) mockHTTPClient.AddHandler(reqUpdate, 200, "", nil) } @@ -2405,10 +2405,10 @@ func TestApplyIndexerClusterManager_Bus_Success(t *testing.T) { Namespace: "test", }, Spec: enterpriseApi.BusSpec{ - Provider: "sqs", - QueueName: "test-queue", - Region: "us-west-2", + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ + Name: "test-queue", + Region: "us-west-2", Endpoint: "https://sqs.us-west-2.amazonaws.com", DLQ: "sqs-dlq-test", }, diff --git a/pkg/splunk/enterprise/ingestorcluster.go b/pkg/splunk/enterprise/ingestorcluster.go index 6ca721b6a..9e6c6ce17 100644 --- a/pkg/splunk/enterprise/ingestorcluster.go +++ b/pkg/splunk/enterprise/ingestorcluster.go @@ -346,9 +346,9 @@ func (mgr *ingestorClusterPodManager) handlePushBusChange(ctx context.Context, n splunkClient := mgr.newSplunkClient(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd)) afterDelete := false - if (bus.Spec.QueueName != "" && newCR.Status.Bus.QueueName != "" && bus.Spec.QueueName != newCR.Status.Bus.QueueName) || + if (bus.Spec.SQS.Name != "" && newCR.Status.Bus.SQS.Name != "" && bus.Spec.SQS.Name != newCR.Status.Bus.SQS.Name) || (bus.Spec.Provider != "" && newCR.Status.Bus.Provider != "" && bus.Spec.Provider != newCR.Status.Bus.Provider) { - if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCR.Status.Bus.QueueName)); err != nil { + if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCR.Status.Bus.SQS.Name)); err != nil { updateErr = err } afterDelete = true @@ -357,7 +357,7 @@ func (mgr *ingestorClusterPodManager) handlePushBusChange(ctx context.Context, n busChangedFields, pipelineChangedFields := getChangedBusFieldsForIngestor(&bus, &lms, newCR, afterDelete) for _, pbVal := range busChangedFields { - if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", bus.Spec.QueueName), [][]string{pbVal}); err != nil { + if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", bus.Spec.SQS.Name), [][]string{pbVal}); err != nil { updateErr = err } } @@ -440,8 +440,8 @@ func pushBusChanged(oldBus, newBus *enterpriseApi.BusSpec, oldLMS, newLMS *enter if oldBus.Provider != newBus.Provider || afterDelete { output = append(output, []string{"remote_queue.type", busProvider}) } - if oldBus.Region != newBus.Region || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.auth_region", busProvider), newBus.Region}) + if oldBus.SQS.Region != newBus.SQS.Region || afterDelete { + output = append(output, []string{fmt.Sprintf("remote_queue.%s.auth_region", busProvider), newBus.SQS.Region}) } if oldBus.SQS.Endpoint != newBus.SQS.Endpoint || afterDelete { output = append(output, []string{fmt.Sprintf("remote_queue.%s.endpoint", busProvider), newBus.SQS.Endpoint}) diff --git a/pkg/splunk/enterprise/ingestorcluster_test.go b/pkg/splunk/enterprise/ingestorcluster_test.go index d7a1604cd..75cc14ec5 100644 --- a/pkg/splunk/enterprise/ingestorcluster_test.go +++ b/pkg/splunk/enterprise/ingestorcluster_test.go @@ -75,10 +75,10 @@ func TestApplyIngestorCluster(t *testing.T) { Namespace: "test", }, Spec: enterpriseApi.BusSpec{ - Provider: "sqs", - QueueName: "test-queue", - Region: "us-west-2", + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ + Name: "test-queue", + Region: "us-west-2", Endpoint: "https://sqs.us-west-2.amazonaws.com", DLQ: "sqs-dlq-test", }, @@ -285,7 +285,7 @@ func TestApplyIngestorCluster(t *testing.T) { propertyKVList := [][]string{ {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, - {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.Region}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.SQS.Region}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, @@ -342,10 +342,10 @@ func TestGetIngestorStatefulSet(t *testing.T) { Name: "bus", }, Spec: enterpriseApi.BusSpec{ - Provider: "sqs", - QueueName: "test-queue", - Region: "us-west-2", + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ + Name: "test-queue", + Region: "us-west-2", Endpoint: "https://sqs.us-west-2.amazonaws.com", DLQ: "sqs-dlq-test", }, @@ -428,10 +428,10 @@ func TestGetChangedBusFieldsForIngestor(t *testing.T) { Name: "bus", }, Spec: enterpriseApi.BusSpec{ - Provider: "sqs", - QueueName: "test-queue", - Region: "us-west-2", + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ + Name: "test-queue", + Region: "us-west-2", Endpoint: "https://sqs.us-west-2.amazonaws.com", DLQ: "sqs-dlq-test", }, @@ -472,7 +472,7 @@ func TestGetChangedBusFieldsForIngestor(t *testing.T) { assert.Equal(t, 10, len(busChangedFields)) assert.Equal(t, [][]string{ {"remote_queue.type", provider}, - {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.Region}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.SQS.Region}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, @@ -507,10 +507,10 @@ func TestHandlePushBusChange(t *testing.T) { Name: "bus", }, Spec: enterpriseApi.BusSpec{ - Provider: "sqs", - QueueName: "test-queue", - Region: "us-west-2", + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ + Name: "test-queue", + Region: "us-west-2", Endpoint: "https://sqs.us-west-2.amazonaws.com", DLQ: "sqs-dlq-test", }, @@ -635,7 +635,7 @@ func TestHandlePushBusChange(t *testing.T) { // outputs.conf propertyKVList := [][]string{ {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, - {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.Region}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.SQS.Region}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, @@ -692,11 +692,11 @@ func addRemoteQueueHandlersForIngestor(mockHTTPClient *spltest.MockHTTPClient, c podName, cr.GetName(), cr.GetNamespace(), confName, ) - createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", bus.Spec.QueueName)) + createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", bus.Spec.SQS.Name)) reqCreate, _ := http.NewRequest("POST", baseURL, strings.NewReader(createReqBody)) mockHTTPClient.AddHandler(reqCreate, 200, "", nil) - updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", bus.Spec.QueueName)) + updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", bus.Spec.SQS.Name)) reqUpdate, _ := http.NewRequest("POST", updateURL, strings.NewReader(body)) mockHTTPClient.AddHandler(reqUpdate, 200, "", nil) } diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go index c99112617..711580d99 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go @@ -40,10 +40,10 @@ var ( testSuiteName = "indingsep-" + testenv.RandomDNSName(3) bus = enterpriseApi.BusSpec{ - Provider: "sqs", - QueueName: "test-queue", - Region: "us-west-2", + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ + Name: "test-queue", + Region: "us-west-2", Endpoint: "https://sqs.us-west-2.amazonaws.com", DLQ: "test-dead-letter-queue", }, @@ -86,10 +86,10 @@ var ( } updateBus = enterpriseApi.BusSpec{ - Provider: "sqs", - QueueName: "test-queue-updated", - Region: "us-west-2", + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ + Name: "test-queue-updated", + Region: "us-west-2", Endpoint: "https://sqs.us-west-2.amazonaws.com", DLQ: "test-dead-letter-queue-updated", }, From 3eb98f747eca7c6e5475f53ff1e4e5c0172a7c4f Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Mon, 15 Dec 2025 10:47:47 +0100 Subject: [PATCH 4/7] CSPL-4358 Adding more validations --- api/v4/bus_types.go | 1 + api/v4/indexercluster_types.go | 1 + api/v4/ingestorcluster_types.go | 2 ++ api/v4/largemessagestore.go | 1 + .../bases/enterprise.splunk.com_buses.yaml | 1 + ...enterprise.splunk.com_indexerclusters.yaml | 6 ++++ ...nterprise.splunk.com_ingestorclusters.yaml | 5 +++ ...erprise.splunk.com_largemessagestores.yaml | 1 + pkg/splunk/enterprise/indexercluster.go | 36 +++++++++++++++++-- pkg/splunk/enterprise/ingestorcluster.go | 20 +++++++++-- 10 files changed, 70 insertions(+), 4 deletions(-) diff --git a/api/v4/bus_types.go b/api/v4/bus_types.go index a4930c1fa..4d9cd3a42 100644 --- a/api/v4/bus_types.go +++ b/api/v4/bus_types.go @@ -36,6 +36,7 @@ type BusSpec struct { // Provider of queue resources Provider string `json:"provider"` + // +kubebuilder:validation:Required // sqs specific inputs SQS SQSSpec `json:"sqs"` } diff --git a/api/v4/indexercluster_types.go b/api/v4/indexercluster_types.go index 0ec425240..1f096ccdd 100644 --- a/api/v4/indexercluster_types.go +++ b/api/v4/indexercluster_types.go @@ -34,6 +34,7 @@ const ( IndexerClusterPausedAnnotation = "indexercluster.enterprise.splunk.com/paused" ) +// +kubebuilder:validation:XValidation:rule="has(self.busRef) == has(self.largeMessageStoreRef)",message="busRef and largeMessageStoreRef must both be set or both be empty" // IndexerClusterSpec defines the desired state of a Splunk Enterprise indexer cluster type IndexerClusterSpec struct { CommonSplunkSpec `json:",inline"` diff --git a/api/v4/ingestorcluster_types.go b/api/v4/ingestorcluster_types.go index 27fa5d1e0..811f780a4 100644 --- a/api/v4/ingestorcluster_types.go +++ b/api/v4/ingestorcluster_types.go @@ -39,9 +39,11 @@ type IngestorClusterSpec struct { // Splunk Enterprise app repository that specifies remote app location and scope for Splunk app management AppFrameworkConfig AppFrameworkSpec `json:"appRepo,omitempty"` + // +kubebuilder:validation:Required // Bus reference BusRef corev1.ObjectReference `json:"busRef"` + // +kubebuilder:validation:Required // Large Message Store reference LargeMessageStoreRef corev1.ObjectReference `json:"largeMessageStoreRef"` } diff --git a/api/v4/largemessagestore.go b/api/v4/largemessagestore.go index 3e9f4b62b..26c986f2d 100644 --- a/api/v4/largemessagestore.go +++ b/api/v4/largemessagestore.go @@ -36,6 +36,7 @@ type LargeMessageStoreSpec struct { // Provider of queue resources Provider string `json:"provider"` + // +kubebuilder:validation:Required // s3 specific inputs S3 S3Spec `json:"s3"` } diff --git a/config/crd/bases/enterprise.splunk.com_buses.yaml b/config/crd/bases/enterprise.splunk.com_buses.yaml index 6f4f8fac8..54d498834 100644 --- a/config/crd/bases/enterprise.splunk.com_buses.yaml +++ b/config/crd/bases/enterprise.splunk.com_buses.yaml @@ -85,6 +85,7 @@ spec: type: object required: - provider + - sqs type: object x-kubernetes-validations: - message: sqs must be provided when provider is sqs diff --git a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml index c9c19edfb..67e1021f6 100644 --- a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml @@ -8328,6 +8328,10 @@ spec: type: object type: array type: object + x-kubernetes-validations: + - message: busRef and largeMessageStoreRef must both be set or both be + empty + rule: has(self.busRef) == has(self.largeMessageStoreRef) status: description: IndexerClusterStatus defines the observed state of a Splunk Enterprise indexer cluster @@ -8371,6 +8375,7 @@ spec: type: object required: - provider + - sqs type: object x-kubernetes-validations: - message: sqs must be provided when provider is sqs @@ -8433,6 +8438,7 @@ spec: type: object required: - provider + - s3 type: object x-kubernetes-validations: - message: s3 must be provided when provider is s3 diff --git a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml index bdd6fb096..4ecaa8d32 100644 --- a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml @@ -4302,6 +4302,9 @@ spec: - name type: object type: array + required: + - busRef + - largeMessageStoreRef type: object status: description: IngestorClusterStatus defines the observed state of Ingestor @@ -4622,6 +4625,7 @@ spec: type: object required: - provider + - sqs type: object x-kubernetes-validations: - message: sqs must be provided when provider is sqs @@ -4650,6 +4654,7 @@ spec: type: object required: - provider + - s3 type: object x-kubernetes-validations: - message: s3 must be provided when provider is s3 diff --git a/config/crd/bases/enterprise.splunk.com_largemessagestores.yaml b/config/crd/bases/enterprise.splunk.com_largemessagestores.yaml index 20cd26906..562cd773c 100644 --- a/config/crd/bases/enterprise.splunk.com_largemessagestores.yaml +++ b/config/crd/bases/enterprise.splunk.com_largemessagestores.yaml @@ -75,6 +75,7 @@ spec: type: object required: - provider + - s3 type: object x-kubernetes-validations: - message: s3 must be provided when provider is s3 diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index e71a19efd..2170e914a 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -261,6 +261,14 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller } } + // Can not override original bus spec due to comparison in the later code + busCopy := bus + if busCopy.Spec.Provider == "sqs" { + if busCopy.Spec.SQS.Endpoint == "" { + busCopy.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", busCopy.Spec.SQS.Region) + } + } + // Large Message Store lms := enterpriseApi.LargeMessageStore{} if cr.Spec.LargeMessageStoreRef.Name != "" { @@ -277,12 +285,20 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller } } + // Can not override original large message store spec due to comparison in the later code + lmsCopy := lms + if lmsCopy.Spec.Provider == "s3" { + if lmsCopy.Spec.S3.Endpoint == "" { + lmsCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", busCopy.Spec.SQS.Region) + } + } + // If bus is updated if cr.Spec.BusRef.Name != "" { if !reflect.DeepEqual(cr.Status.Bus, bus.Spec) { mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) - err = mgr.handlePullBusChange(ctx, cr, bus, lms, client) + err = mgr.handlePullBusChange(ctx, cr, busCopy, lmsCopy, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Bus/Pipeline config change after pod creation: %s", err.Error())) scopedLog.Error(err, "Failed to update conf file for Bus/Pipeline config change after pod creation") @@ -568,6 +584,14 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, } } + // Can not override original bus spec due to comparison in the later code + busCopy := bus + if busCopy.Spec.Provider == "sqs" { + if busCopy.Spec.SQS.Endpoint == "" { + busCopy.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", busCopy.Spec.SQS.Region) + } + } + // Large Message Store lms := enterpriseApi.LargeMessageStore{} if cr.Spec.LargeMessageStoreRef.Name != "" { @@ -584,12 +608,20 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, } } + // Can not override original bus spec due to comparison in the later code + lmsCopy := lms + if lmsCopy.Spec.Provider == "s3" { + if lmsCopy.Spec.S3.Endpoint == "" { + lmsCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", busCopy.Spec.SQS.Region) + } + } + // If bus is updated if cr.Spec.BusRef.Name != "" { if !reflect.DeepEqual(cr.Status.Bus, bus.Spec) { mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) - err = mgr.handlePullBusChange(ctx, cr, bus, lms, client) + err = mgr.handlePullBusChange(ctx, cr, busCopy, lmsCopy, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Bus/Pipeline config change after pod creation: %s", err.Error())) scopedLog.Error(err, "Failed to update conf file for Bus/Pipeline config change after pod creation") diff --git a/pkg/splunk/enterprise/ingestorcluster.go b/pkg/splunk/enterprise/ingestorcluster.go index 9e6c6ce17..524f183b5 100644 --- a/pkg/splunk/enterprise/ingestorcluster.go +++ b/pkg/splunk/enterprise/ingestorcluster.go @@ -226,6 +226,14 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr } } + // Can not override original bus spec due to comparison in the later code + busCopy := bus + if busCopy.Spec.Provider == "sqs" { + if busCopy.Spec.SQS.Endpoint == "" { + busCopy.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", busCopy.Spec.SQS.Region) + } + } + // Large Message Store lms := enterpriseApi.LargeMessageStore{} if cr.Spec.LargeMessageStoreRef.Name != "" { @@ -242,11 +250,19 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr } } + // Can not override original bus spec due to comparison in the later code + lmsCopy := lms + if lmsCopy.Spec.Provider == "s3" { + if lmsCopy.Spec.S3.Endpoint == "" { + lmsCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", bus.Spec.SQS.Region) + } + } + // If bus is updated if !reflect.DeepEqual(cr.Status.Bus, bus.Spec) { mgr := newIngestorClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) - err = mgr.handlePushBusChange(ctx, cr, bus, lms, client) + err = mgr.handlePushBusChange(ctx, cr, busCopy, lmsCopy, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIngestorCluster", fmt.Sprintf("Failed to update conf file for Bus/Pipeline config change after pod creation: %s", err.Error())) scopedLog.Error(err, "Failed to update conf file for Bus/Pipeline config change after pod creation") @@ -377,7 +393,7 @@ func (mgr *ingestorClusterPodManager) handlePushBusChange(ctx context.Context, n func getChangedBusFieldsForIngestor(bus *enterpriseApi.Bus, lms *enterpriseApi.LargeMessageStore, busIngestorStatus *enterpriseApi.IngestorCluster, afterDelete bool) (busChangedFields, pipelineChangedFields [][]string) { oldPB := busIngestorStatus.Status.Bus if oldPB == nil { - oldPB = &enterpriseApi.BusSpec{} + oldPB = &enterpriseApi.BusSpec{} } newPB := &bus.Spec From ba73a8779bec65a9230ef2f23a88f5968d8f2501 Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Fri, 19 Dec 2025 08:47:17 +0100 Subject: [PATCH 5/7] CSPL-4358 Rename Bus to Queue --- PROJECT | 2 +- api/v4/indexercluster_types.go | 10 +- api/v4/ingestorcluster_types.go | 8 +- api/v4/{bus_types.go => queue_types.go} | 48 ++--- api/v4/zz_generated.deepcopy.go | 194 +++++++++--------- cmd/main.go | 4 +- ...enterprise.splunk.com_indexerclusters.yaml | 170 +++++++-------- ...nterprise.splunk.com_ingestorclusters.yaml | 166 +++++++-------- ...yaml => enterprise.splunk.com_queues.yaml} | 24 +-- config/crd/kustomization.yaml | 2 +- ...ditor_role.yaml => queue_editor_role.yaml} | 6 +- ...iewer_role.yaml => queue_viewer_role.yaml} | 6 +- config/rbac/role.yaml | 6 +- ...e_v4_bus.yaml => enterprise_v4_queue.yaml} | 4 +- config/samples/kustomization.yaml | 2 +- docs/CustomResources.md | 28 +-- docs/IndexIngestionSeparation.md | 92 ++++----- .../enterprise_v4_indexercluster.yaml | 4 +- .../enterprise_v4_ingestorcluster.yaml | 10 +- ...4_buses.yaml => enterprise_v4_queues.yaml} | 18 +- helm-chart/splunk-enterprise/values.yaml | 4 +- ...ditor_role.yaml => queue_editor_role.yaml} | 12 +- ...iewer_role.yaml => queue_viewer_role.yaml} | 12 +- .../splunk-operator/templates/rbac/role.yaml | 6 +- .../controller/indexercluster_controller.go | 8 +- .../controller/ingestorcluster_controller.go | 8 +- .../ingestorcluster_controller_test.go | 24 +-- ...{bus_controller.go => queue_controller.go} | 38 ++-- ...oller_test.go => queue_controller_test.go} | 84 ++++---- internal/controller/suite_test.go | 2 +- internal/controller/testutils/new.go | 10 +- .../01-assert.yaml | 18 +- .../02-assert.yaml | 6 +- .../splunk_index_ingest_sep.yaml | 12 +- pkg/splunk/enterprise/clustermanager.go | 5 +- pkg/splunk/enterprise/indexercluster.go | 169 ++++++++------- pkg/splunk/enterprise/indexercluster_test.go | 134 ++++++------ pkg/splunk/enterprise/ingestorcluster.go | 108 +++++----- pkg/splunk/enterprise/ingestorcluster_test.go | 112 +++++----- pkg/splunk/enterprise/monitoringconsole.go | 3 +- pkg/splunk/enterprise/{bus.go => queue.go} | 6 +- .../enterprise/{bus_test.go => queue_test.go} | 20 +- pkg/splunk/enterprise/types.go | 8 +- pkg/splunk/enterprise/upgrade.go | 9 +- pkg/splunk/enterprise/util.go | 16 +- ...dex_and_ingestion_separation_suite_test.go | 4 +- .../index_and_ingestion_separation_test.go | 86 ++++---- test/testenv/deployment.go | 30 +-- test/testenv/util.go | 20 +- 49 files changed, 887 insertions(+), 891 deletions(-) rename api/v4/{bus_types.go => queue_types.go} (75%) rename config/crd/bases/{enterprise.splunk.com_buses.yaml => enterprise.splunk.com_queues.yaml} (89%) rename config/rbac/{bus_editor_role.yaml => queue_editor_role.yaml} (92%) rename config/rbac/{bus_viewer_role.yaml => queue_viewer_role.yaml} (91%) rename config/samples/{enterprise_v4_bus.yaml => enterprise_v4_queue.yaml} (81%) rename helm-chart/splunk-enterprise/templates/{enterprise_v4_buses.yaml => enterprise_v4_queues.yaml} (57%) rename helm-chart/splunk-operator/templates/rbac/{bus_editor_role.yaml => queue_editor_role.yaml} (82%) rename helm-chart/splunk-operator/templates/rbac/{bus_viewer_role.yaml => queue_viewer_role.yaml} (81%) rename internal/controller/{bus_controller.go => queue_controller.go} (72%) rename internal/controller/{bus_controller_test.go => queue_controller_test.go} (68%) rename pkg/splunk/enterprise/{bus.go => queue.go} (91%) rename pkg/splunk/enterprise/{bus_test.go => queue_test.go} (81%) diff --git a/PROJECT b/PROJECT index aa4aa1078..c2f3680d3 100644 --- a/PROJECT +++ b/PROJECT @@ -128,7 +128,7 @@ resources: controller: true domain: splunk.com group: enterprise - kind: Bus + kind: Queue path: github.com/splunk/splunk-operator/api/v4 version: v4 - api: diff --git a/api/v4/indexercluster_types.go b/api/v4/indexercluster_types.go index 1f096ccdd..5e76d3e57 100644 --- a/api/v4/indexercluster_types.go +++ b/api/v4/indexercluster_types.go @@ -34,14 +34,14 @@ const ( IndexerClusterPausedAnnotation = "indexercluster.enterprise.splunk.com/paused" ) -// +kubebuilder:validation:XValidation:rule="has(self.busRef) == has(self.largeMessageStoreRef)",message="busRef and largeMessageStoreRef must both be set or both be empty" +// +kubebuilder:validation:XValidation:rule="has(self.queueRef) == has(self.largeMessageStoreRef)",message="queueRef and largeMessageStoreRef must both be set or both be empty" // IndexerClusterSpec defines the desired state of a Splunk Enterprise indexer cluster type IndexerClusterSpec struct { CommonSplunkSpec `json:",inline"` // +optional - // Bus reference - BusRef corev1.ObjectReference `json:"busRef"` + // Queue reference + QueueRef corev1.ObjectReference `json:"queueRef"` // +optional // Large Message Store reference @@ -121,8 +121,8 @@ type IndexerClusterStatus struct { // Auxillary message describing CR status Message string `json:"message"` - // Bus - Bus *BusSpec `json:"bus,omitempty"` + // Queue + Queue *QueueSpec `json:"queue,omitempty"` // Large Message Store LargeMessageStore *LargeMessageStoreSpec `json:"largeMessageStore,omitempty"` diff --git a/api/v4/ingestorcluster_types.go b/api/v4/ingestorcluster_types.go index 811f780a4..aa2281864 100644 --- a/api/v4/ingestorcluster_types.go +++ b/api/v4/ingestorcluster_types.go @@ -40,8 +40,8 @@ type IngestorClusterSpec struct { AppFrameworkConfig AppFrameworkSpec `json:"appRepo,omitempty"` // +kubebuilder:validation:Required - // Bus reference - BusRef corev1.ObjectReference `json:"busRef"` + // Queue reference + QueueRef corev1.ObjectReference `json:"queueRef"` // +kubebuilder:validation:Required // Large Message Store reference @@ -74,8 +74,8 @@ type IngestorClusterStatus struct { // Auxillary message describing CR status Message string `json:"message"` - // Bus - Bus *BusSpec `json:"bus,omitempty"` + // Queue + Queue *QueueSpec `json:"queue,omitempty"` // Large Message Store LargeMessageStore *LargeMessageStoreSpec `json:"largeMessageStore,omitempty"` diff --git a/api/v4/bus_types.go b/api/v4/queue_types.go similarity index 75% rename from api/v4/bus_types.go rename to api/v4/queue_types.go index 4d9cd3a42..a094b76ce 100644 --- a/api/v4/bus_types.go +++ b/api/v4/queue_types.go @@ -23,14 +23,14 @@ import ( ) const ( - // BusPausedAnnotation is the annotation that pauses the reconciliation (triggers + // QueuePausedAnnotation is the annotation that pauses the reconciliation (triggers // an immediate requeue) - BusPausedAnnotation = "bus.enterprise.splunk.com/paused" + QueuePausedAnnotation = "queue.enterprise.splunk.com/paused" ) // +kubebuilder:validation:XValidation:rule="self.provider != 'sqs' || has(self.sqs)",message="sqs must be provided when provider is sqs" -// BusSpec defines the desired state of Bus -type BusSpec struct { +// QueueSpec defines the desired state of Queue +type QueueSpec struct { // +kubebuilder:validation:Required // +kubebuilder:validation:Enum=sqs // Provider of queue resources @@ -63,9 +63,9 @@ type SQSSpec struct { Endpoint string `json:"endpoint"` } -// BusStatus defines the observed state of Bus -type BusStatus struct { - // Phase of the bus +// QueueStatus defines the observed state of Queue +type QueueStatus struct { + // Phase of the queue Phase Phase `json:"phase"` // Resource revision tracker @@ -78,27 +78,27 @@ type BusStatus struct { // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// Bus is the Schema for a Splunk Enterprise bus +// Queue is the Schema for a Splunk Enterprise queue // +k8s:openapi-gen=true // +kubebuilder:subresource:status // +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.selector -// +kubebuilder:resource:path=buses,scope=Namespaced,shortName=bus -// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Status of bus" -// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age of bus resource" +// +kubebuilder:resource:path=queues,scope=Namespaced,shortName=queue +// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Status of queue" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age of queue resource" // +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Auxillary message describing CR status" // +kubebuilder:storageversion -// Bus is the Schema for the buses API -type Bus struct { +// Queue is the Schema for the queues API +type Queue struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty,omitzero"` - Spec BusSpec `json:"spec"` - Status BusStatus `json:"status,omitempty,omitzero"` + Spec QueueSpec `json:"spec"` + Status QueueStatus `json:"status,omitempty,omitzero"` } // DeepCopyObject implements runtime.Object -func (in *Bus) DeepCopyObject() runtime.Object { +func (in *Queue) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -107,20 +107,20 @@ func (in *Bus) DeepCopyObject() runtime.Object { // +kubebuilder:object:root=true -// BusList contains a list of Bus -type BusList struct { +// QueueList contains a list of Queue +type QueueList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` - Items []Bus `json:"items"` + Items []Queue `json:"items"` } func init() { - SchemeBuilder.Register(&Bus{}, &BusList{}) + SchemeBuilder.Register(&Queue{}, &QueueList{}) } // NewEvent creates a new event associated with the object and ready // to be published to Kubernetes API -func (bc *Bus) NewEvent(eventType, reason, message string) corev1.Event { +func (bc *Queue) NewEvent(eventType, reason, message string) corev1.Event { t := metav1.Now() return corev1.Event{ ObjectMeta: metav1.ObjectMeta{ @@ -128,7 +128,7 @@ func (bc *Bus) NewEvent(eventType, reason, message string) corev1.Event { Namespace: bc.ObjectMeta.Namespace, }, InvolvedObject: corev1.ObjectReference{ - Kind: "Bus", + Kind: "Queue", Namespace: bc.Namespace, Name: bc.Name, UID: bc.UID, @@ -137,12 +137,12 @@ func (bc *Bus) NewEvent(eventType, reason, message string) corev1.Event { Reason: reason, Message: message, Source: corev1.EventSource{ - Component: "splunk-bus-controller", + Component: "splunk-queue-controller", }, FirstTimestamp: t, LastTimestamp: t, Count: 1, Type: eventType, - ReportingController: "enterprise.splunk.com/bus-controller", + ReportingController: "enterprise.splunk.com/queue-controller", } } diff --git a/api/v4/zz_generated.deepcopy.go b/api/v4/zz_generated.deepcopy.go index dc19b7f10..2fb0eebc8 100644 --- a/api/v4/zz_generated.deepcopy.go +++ b/api/v4/zz_generated.deepcopy.go @@ -180,95 +180,6 @@ func (in *BundlePushTracker) DeepCopy() *BundlePushTracker { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Bus) DeepCopyInto(out *Bus) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Bus. -func (in *Bus) DeepCopy() *Bus { - if in == nil { - return nil - } - out := new(Bus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BusList) DeepCopyInto(out *BusList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Bus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BusList. -func (in *BusList) DeepCopy() *BusList { - if in == nil { - return nil - } - out := new(BusList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *BusList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BusSpec) DeepCopyInto(out *BusSpec) { - *out = *in - out.SQS = in.SQS -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BusSpec. -func (in *BusSpec) DeepCopy() *BusSpec { - if in == nil { - return nil - } - out := new(BusSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BusStatus) DeepCopyInto(out *BusStatus) { - *out = *in - if in.ResourceRevMap != nil { - in, out := &in.ResourceRevMap, &out.ResourceRevMap - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BusStatus. -func (in *BusStatus) DeepCopy() *BusStatus { - if in == nil { - return nil - } - out := new(BusStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CacheManagerSpec) DeepCopyInto(out *CacheManagerSpec) { *out = *in @@ -600,7 +511,7 @@ func (in *IndexerClusterMemberStatus) DeepCopy() *IndexerClusterMemberStatus { func (in *IndexerClusterSpec) DeepCopyInto(out *IndexerClusterSpec) { *out = *in in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) - out.BusRef = in.BusRef + out.QueueRef = in.QueueRef out.LargeMessageStoreRef = in.LargeMessageStoreRef } @@ -634,9 +545,9 @@ func (in *IndexerClusterStatus) DeepCopyInto(out *IndexerClusterStatus) { *out = make([]IndexerClusterMemberStatus, len(*in)) copy(*out, *in) } - if in.Bus != nil { - in, out := &in.Bus, &out.Bus - *out = new(BusSpec) + if in.Queue != nil { + in, out := &in.Queue, &out.Queue + *out = new(QueueSpec) **out = **in } if in.LargeMessageStore != nil { @@ -712,7 +623,7 @@ func (in *IngestorClusterSpec) DeepCopyInto(out *IngestorClusterSpec) { *out = *in in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) in.AppFrameworkConfig.DeepCopyInto(&out.AppFrameworkConfig) - out.BusRef = in.BusRef + out.QueueRef = in.QueueRef out.LargeMessageStoreRef = in.LargeMessageStoreRef } @@ -737,9 +648,9 @@ func (in *IngestorClusterStatus) DeepCopyInto(out *IngestorClusterStatus) { } } in.AppContext.DeepCopyInto(&out.AppContext) - if in.Bus != nil { - in, out := &in.Bus, &out.Bus - *out = new(BusSpec) + if in.Queue != nil { + in, out := &in.Queue, &out.Queue + *out = new(QueueSpec) **out = **in } if in.LargeMessageStore != nil { @@ -1086,6 +997,95 @@ func (in *Probe) DeepCopy() *Probe { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Queue) DeepCopyInto(out *Queue) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Queue. +func (in *Queue) DeepCopy() *Queue { + if in == nil { + return nil + } + out := new(Queue) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueList) DeepCopyInto(out *QueueList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Queue, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueList. +func (in *QueueList) DeepCopy() *QueueList { + if in == nil { + return nil + } + out := new(QueueList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *QueueList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueSpec) DeepCopyInto(out *QueueSpec) { + *out = *in + out.SQS = in.SQS +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueSpec. +func (in *QueueSpec) DeepCopy() *QueueSpec { + if in == nil { + return nil + } + out := new(QueueSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueStatus) DeepCopyInto(out *QueueStatus) { + *out = *in + if in.ResourceRevMap != nil { + in, out := &in.ResourceRevMap, &out.ResourceRevMap + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueStatus. +func (in *QueueStatus) DeepCopy() *QueueStatus { + if in == nil { + return nil + } + out := new(QueueStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *S3Spec) DeepCopyInto(out *S3Spec) { *out = *in diff --git a/cmd/main.go b/cmd/main.go index 0d14d691a..72a3e38c7 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -230,11 +230,11 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "IngestorCluster") os.Exit(1) } - if err := (&controller.BusReconciler{ + if err := (&controller.QueueReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "Bus") + setupLog.Error(err, "unable to create controller", "controller", "Queue") os.Exit(1) } if err := (&controller.LargeMessageStoreReconciler{ diff --git a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml index 67e1021f6..90c266230 100644 --- a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml @@ -5165,49 +5165,6 @@ spec: x-kubernetes-list-type: atomic type: object type: object - busRef: - description: Bus reference - properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. - type: string - kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ - type: string - resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency - type: string - uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids - type: string - type: object - x-kubernetes-map-type: atomic clusterManagerRef: description: ClusterManagerRef refers to a Splunk Enterprise indexer cluster managed by the operator within Kubernetes @@ -5690,6 +5647,49 @@ spec: type: string type: object x-kubernetes-map-type: atomic + queueRef: + description: Queue reference + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic readinessInitialDelaySeconds: description: |- ReadinessInitialDelaySeconds defines initialDelaySeconds(See https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes) for Readiness probe @@ -8329,9 +8329,9 @@ spec: type: array type: object x-kubernetes-validations: - - message: busRef and largeMessageStoreRef must both be set or both be - empty - rule: has(self.busRef) == has(self.largeMessageStoreRef) + - message: queueRef and largeMessageStoreRef must both be set or both + be empty + rule: has(self.queueRef) == has(self.largeMessageStoreRef) status: description: IndexerClusterStatus defines the observed state of a Splunk Enterprise indexer cluster @@ -8341,45 +8341,6 @@ spec: type: boolean description: Holds secrets whose IDXC password has changed type: object - bus: - description: Bus - properties: - provider: - description: Provider of queue resources - enum: - - sqs - type: string - sqs: - description: sqs specific inputs - properties: - dlq: - description: Name of the dead letter queue resource - minLength: 1 - type: string - endpoint: - description: Amazon SQS Service endpoint - pattern: ^https://sqs(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$ - type: string - name: - description: Name of the queue - minLength: 1 - type: string - region: - description: Region of the resources - pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ - type: string - required: - - dlq - - name - - region - type: object - required: - - provider - - sqs - type: object - x-kubernetes-validations: - - message: sqs must be provided when provider is sqs - rule: self.provider != 'sqs' || has(self.sqs) clusterManagerPhase: description: current phase of the cluster manager enum: @@ -8493,6 +8454,45 @@ spec: - Terminating - Error type: string + queue: + description: Queue + properties: + provider: + description: Provider of queue resources + enum: + - sqs + type: string + sqs: + description: sqs specific inputs + properties: + dlq: + description: Name of the dead letter queue resource + minLength: 1 + type: string + endpoint: + description: Amazon SQS Service endpoint + pattern: ^https://sqs(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$ + type: string + name: + description: Name of the queue + minLength: 1 + type: string + region: + description: Region of the resources + pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ + type: string + required: + - dlq + - name + - region + type: object + required: + - provider + - sqs + type: object + x-kubernetes-validations: + - message: sqs must be provided when provider is sqs + rule: self.provider != 'sqs' || has(self.sqs) readyReplicas: description: current number of ready indexer peers format: int32 diff --git a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml index 4ecaa8d32..37c820c4c 100644 --- a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml @@ -1141,49 +1141,6 @@ spec: type: object type: array type: object - busRef: - description: Bus reference - properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. - type: string - kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ - type: string - resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency - type: string - uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids - type: string - type: object - x-kubernetes-map-type: atomic clusterManagerRef: description: ClusterManagerRef refers to a Splunk Enterprise indexer cluster managed by the operator within Kubernetes @@ -1666,6 +1623,49 @@ spec: type: string type: object x-kubernetes-map-type: atomic + queueRef: + description: Queue reference + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic readinessInitialDelaySeconds: description: |- ReadinessInitialDelaySeconds defines initialDelaySeconds(See https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes) for Readiness probe @@ -4303,8 +4303,8 @@ spec: type: object type: array required: - - busRef - largeMessageStoreRef + - queueRef type: object status: description: IngestorClusterStatus defines the observed state of Ingestor @@ -4591,45 +4591,6 @@ spec: description: App Framework version info for future use type: integer type: object - bus: - description: Bus - properties: - provider: - description: Provider of queue resources - enum: - - sqs - type: string - sqs: - description: sqs specific inputs - properties: - dlq: - description: Name of the dead letter queue resource - minLength: 1 - type: string - endpoint: - description: Amazon SQS Service endpoint - pattern: ^https://sqs(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$ - type: string - name: - description: Name of the queue - minLength: 1 - type: string - region: - description: Region of the resources - pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ - type: string - required: - - dlq - - name - - region - type: object - required: - - provider - - sqs - type: object - x-kubernetes-validations: - - message: sqs must be provided when provider is sqs - rule: self.provider != 'sqs' || has(self.sqs) largeMessageStore: description: Large Message Store properties: @@ -4673,6 +4634,45 @@ spec: - Terminating - Error type: string + queue: + description: Queue + properties: + provider: + description: Provider of queue resources + enum: + - sqs + type: string + sqs: + description: sqs specific inputs + properties: + dlq: + description: Name of the dead letter queue resource + minLength: 1 + type: string + endpoint: + description: Amazon SQS Service endpoint + pattern: ^https://sqs(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$ + type: string + name: + description: Name of the queue + minLength: 1 + type: string + region: + description: Region of the resources + pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ + type: string + required: + - dlq + - name + - region + type: object + required: + - provider + - sqs + type: object + x-kubernetes-validations: + - message: sqs must be provided when provider is sqs + rule: self.provider != 'sqs' || has(self.sqs) readyReplicas: description: Number of ready ingestor pods format: int32 diff --git a/config/crd/bases/enterprise.splunk.com_buses.yaml b/config/crd/bases/enterprise.splunk.com_queues.yaml similarity index 89% rename from config/crd/bases/enterprise.splunk.com_buses.yaml rename to config/crd/bases/enterprise.splunk.com_queues.yaml index 54d498834..928cd34ce 100644 --- a/config/crd/bases/enterprise.splunk.com_buses.yaml +++ b/config/crd/bases/enterprise.splunk.com_queues.yaml @@ -4,24 +4,24 @@ kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.16.1 - name: buses.enterprise.splunk.com + name: queues.enterprise.splunk.com spec: group: enterprise.splunk.com names: - kind: Bus - listKind: BusList - plural: buses + kind: Queue + listKind: QueueList + plural: queues shortNames: - - bus - singular: bus + - queue + singular: queue scope: Namespaced versions: - additionalPrinterColumns: - - description: Status of bus + - description: Status of queue jsonPath: .status.phase name: Phase type: string - - description: Age of bus resource + - description: Age of queue resource jsonPath: .metadata.creationTimestamp name: Age type: date @@ -32,7 +32,7 @@ spec: name: v4 schema: openAPIV3Schema: - description: Bus is the Schema for the buses API + description: Queue is the Schema for the queues API properties: apiVersion: description: |- @@ -52,7 +52,7 @@ spec: metadata: type: object spec: - description: BusSpec defines the desired state of Bus + description: QueueSpec defines the desired state of Queue properties: provider: description: Provider of queue resources @@ -91,13 +91,13 @@ spec: - message: sqs must be provided when provider is sqs rule: self.provider != 'sqs' || has(self.sqs) status: - description: BusStatus defines the observed state of Bus + description: QueueStatus defines the observed state of Queue properties: message: description: Auxillary message describing CR status type: string phase: - description: Phase of the bus + description: Phase of the queue enum: - Pending - Ready diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index c8ba16418..f80dfec5e 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -11,7 +11,7 @@ resources: - bases/enterprise.splunk.com_searchheadclusters.yaml - bases/enterprise.splunk.com_standalones.yaml - bases/enterprise.splunk.com_ingestorclusters.yaml -- bases/enterprise.splunk.com_buses.yaml +- bases/enterprise.splunk.com_queues.yaml - bases/enterprise.splunk.com_largemessagestores.yaml #+kubebuilder:scaffold:crdkustomizeresource diff --git a/config/rbac/bus_editor_role.yaml b/config/rbac/queue_editor_role.yaml similarity index 92% rename from config/rbac/bus_editor_role.yaml rename to config/rbac/queue_editor_role.yaml index c08c2ce39..bf7e4d890 100644 --- a/config/rbac/bus_editor_role.yaml +++ b/config/rbac/queue_editor_role.yaml @@ -8,12 +8,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: bus-editor-role + name: queue-editor-role rules: - apiGroups: - enterprise.splunk.com resources: - - buses + - queues verbs: - create - delete @@ -25,6 +25,6 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - buses/status + - queues/status verbs: - get diff --git a/config/rbac/bus_viewer_role.yaml b/config/rbac/queue_viewer_role.yaml similarity index 91% rename from config/rbac/bus_viewer_role.yaml rename to config/rbac/queue_viewer_role.yaml index 6f9c42d2a..b186c8650 100644 --- a/config/rbac/bus_viewer_role.yaml +++ b/config/rbac/queue_viewer_role.yaml @@ -8,12 +8,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: bus-viewer-role + name: queue-viewer-role rules: - apiGroups: - enterprise.splunk.com resources: - - buses + - queues verbs: - get - list @@ -21,6 +21,6 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - buses/status + - queues/status verbs: - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 94ed9d59e..295e080c6 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -47,7 +47,6 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - buses - clustermanagers - clustermasters - indexerclusters @@ -56,6 +55,7 @@ rules: - licensemanagers - licensemasters - monitoringconsoles + - queues - searchheadclusters - standalones verbs: @@ -69,7 +69,6 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - buses/finalizers - clustermanagers/finalizers - clustermasters/finalizers - indexerclusters/finalizers @@ -78,6 +77,7 @@ rules: - licensemanagers/finalizers - licensemasters/finalizers - monitoringconsoles/finalizers + - queues/finalizers - searchheadclusters/finalizers - standalones/finalizers verbs: @@ -85,7 +85,6 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - buses/status - clustermanagers/status - clustermasters/status - indexerclusters/status @@ -94,6 +93,7 @@ rules: - licensemanagers/status - licensemasters/status - monitoringconsoles/status + - queues/status - searchheadclusters/status - standalones/status verbs: diff --git a/config/samples/enterprise_v4_bus.yaml b/config/samples/enterprise_v4_queue.yaml similarity index 81% rename from config/samples/enterprise_v4_bus.yaml rename to config/samples/enterprise_v4_queue.yaml index 51af9d05a..374d4adb2 100644 --- a/config/samples/enterprise_v4_bus.yaml +++ b/config/samples/enterprise_v4_queue.yaml @@ -1,7 +1,7 @@ apiVersion: enterprise.splunk.com/v4 -kind: Bus +kind: Queue metadata: - name: bus-sample + name: queue-sample finalizers: - "enterprise.splunk.com/delete-pvc" spec: {} diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index 1ea90a3ae..4de2ec89d 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -14,6 +14,6 @@ resources: - enterprise_v4_clustermanager.yaml - enterprise_v4_licensemanager.yaml - enterprise_v4_ingestorcluster.yaml -- enterprise_v4_bus.yaml +- enterprise_v4_queue.yaml - enterprise_v4_largemessagestore.yaml #+kubebuilder:scaffold:manifestskustomizesamples diff --git a/docs/CustomResources.md b/docs/CustomResources.md index 95ca6c1d9..f69a8fa50 100644 --- a/docs/CustomResources.md +++ b/docs/CustomResources.md @@ -18,7 +18,7 @@ you can use to manage Splunk Enterprise deployments in your Kubernetes cluster. - [LicenseManager Resource Spec Parameters](#licensemanager-resource-spec-parameters) - [Standalone Resource Spec Parameters](#standalone-resource-spec-parameters) - [SearchHeadCluster Resource Spec Parameters](#searchheadcluster-resource-spec-parameters) - - [Bus Resource Spec Parameters](#bus-resource-spec-parameters) + - [Queue Resource Spec Parameters](#queue-resource-spec-parameters) - [ClusterManager Resource Spec Parameters](#clustermanager-resource-spec-parameters) - [IndexerCluster Resource Spec Parameters](#indexercluster-resource-spec-parameters) - [IngestorCluster Resource Spec Parameters](#ingestorcluster-resource-spec-parameters) @@ -281,13 +281,13 @@ spec: cpu: "4" ``` -## Bus Resource Spec Parameters +## Queue Resource Spec Parameters ```yaml apiVersion: enterprise.splunk.com/v4 -kind: Bus +kind: Queue metadata: - name: bus + name: queue spec: replicas: 3 provider: sqs @@ -298,14 +298,14 @@ spec: dlq: sqs-dlq-test ``` -Bus inputs can be found in the table below. As of now, only SQS provider of message bus is supported. +Queue inputs can be found in the table below. As of now, only SQS provider of message queue is supported. | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | -| provider | string | [Required] Provider of message bus (Allowed values: sqs) | -| sqs | SQS | [Required if provider=sqs] SQS message bus inputs | +| provider | string | [Required] Provider of message queue (Allowed values: sqs) | +| sqs | SQS | [Required if provider=sqs] SQS message queue inputs | -SQS message bus inputs can be found in the table below. +SQS message queue inputs can be found in the table below. | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | @@ -314,7 +314,7 @@ SQS message bus inputs can be found in the table below. | endpoint | string | [Optional, if not provided formed based on region] AWS SQS Service endpoint | dlq | string | [Required] Name of the dead letter queue | -Change of any of the bus inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. +Change of any of the queue inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. ## ClusterManager Resource Spec Parameters ClusterManager resource does not have a required spec parameter, but to configure SmartStore, you can specify indexes and volume configuration as below - @@ -375,12 +375,12 @@ metadata: name: ic spec: replicas: 3 - busRef: - name: bus + queueRef: + name: queue largeMessageStoreRef: name: lms ``` -Note: `busRef` and `largeMessageStoreRef` are required fields in case of IngestorCluster resource since they will be used to connect the IngestorCluster to Bus and LargeMessageStore resources. +Note: `queueRef` and `largeMessageStoreRef` are required fields in case of IngestorCluster resource since they will be used to connect the IngestorCluster to Queue and LargeMessageStore resources. In addition to [Common Spec Parameters for All Resources](#common-spec-parameters-for-all-resources) and [Common Spec Parameters for All Splunk Enterprise Resources](#common-spec-parameters-for-all-splunk-enterprise-resources), @@ -418,7 +418,7 @@ S3 large message store inputs can be found in the table below. | path | string | [Required] Remote storage location for messages that are larger than the underlying maximum message size | | endpoint | string | [Optional, if not provided formed based on region] S3-compatible service endpoint -Change of any of the large message bus inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. +Change of any of the large message queue inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. ## MonitoringConsole Resource Spec Parameters @@ -531,7 +531,7 @@ The Splunk Operator controller reconciles every Splunk Enterprise CR. However, t | Customer Resource Definition | Annotation | | ----------- | --------- | -| bus.enterprise.splunk.com | "bus.enterprise.splunk.com/paused" | +| queue.enterprise.splunk.com | "queue.enterprise.splunk.com/paused" | | clustermaster.enterprise.splunk.com | "clustermaster.enterprise.splunk.com/paused" | | clustermanager.enterprise.splunk.com | "clustermanager.enterprise.splunk.com/paused" | | indexercluster.enterprise.splunk.com | "indexercluster.enterprise.splunk.com/paused" | diff --git a/docs/IndexIngestionSeparation.md b/docs/IndexIngestionSeparation.md index e8c6211d7..257e37400 100644 --- a/docs/IndexIngestionSeparation.md +++ b/docs/IndexIngestionSeparation.md @@ -4,7 +4,7 @@ Separation between ingestion and indexing services within Splunk Operator for Ku This separation enables: - Independent scaling: Match resource allocation to ingestion or indexing workload. -- Data durability: Off‑load buffer management and retry logic to a durable message bus. +- Data durability: Off‑load buffer management and retry logic to a durable message queue. - Operational clarity: Separate monitoring dashboards for ingestion throughput vs indexing latency. # Important Note @@ -16,20 +16,20 @@ This separation enables: - SPLUNK_IMAGE_VERSION: Splunk Enterprise Docker Image version -# Bus +# Queue -Bus is introduced to store message bus information to be shared among IngestorCluster and IndexerCluster. +Queue is introduced to store message queue information to be shared among IngestorCluster and IndexerCluster. ## Spec -Bus inputs can be found in the table below. As of now, only SQS provider of message bus is supported. +Queue inputs can be found in the table below. As of now, only SQS provider of message queue is supported. | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | -| provider | string | [Required] Provider of message bus (Allowed values: sqs) | -| sqs | SQS | [Required if provider=sqs] SQS message bus inputs | +| provider | string | [Required] Provider of message queue (Allowed values: sqs) | +| sqs | SQS | [Required if provider=sqs] SQS message queue inputs | -SQS message bus inputs can be found in the table below. +SQS message queue inputs can be found in the table below. | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | @@ -38,14 +38,14 @@ SQS message bus inputs can be found in the table below. | endpoint | string | [Optional, if not provided formed based on region] AWS SQS Service endpoint | dlq | string | [Required] Name of the dead letter queue | -Change of any of the bus inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. +Change of any of the queue inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. ## Example ``` apiVersion: enterprise.splunk.com/v4 -kind: Bus +kind: Queue metadata: - name: bus + name: queue spec: provider: sqs sqs: @@ -75,7 +75,7 @@ S3 large message store inputs can be found in the table below. | path | string | [Required] Remote storage location for messages that are larger than the underlying maximum message size | | endpoint | string | [Optional, if not provided formed based on region] S3-compatible service endpoint -Change of any of the large message bus inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. +Change of any of the large message queue inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. ## Example ``` @@ -92,7 +92,7 @@ spec: # IngestorCluster -IngestorCluster is introduced for high‑throughput data ingestion into a durable message bus. Its Splunk pods are configured to receive events (outputs.conf) and publish them to a message bus. +IngestorCluster is introduced for high‑throughput data ingestion into a durable message queue. Its Splunk pods are configured to receive events (outputs.conf) and publish them to a message queue. ## Spec @@ -101,12 +101,12 @@ In addition to common spec inputs, the IngestorCluster resource provides the fol | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | | replicas | integer | The number of replicas (defaults to 3) | -| busRef | corev1.ObjectReference | Message bus reference | +| queueRef | corev1.ObjectReference | Message queue reference | | largeMessageStoreRef | corev1.ObjectReference | Large message store reference | ## Example -The example presented below configures IngestorCluster named ingestor with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the ingestion traffic. This IngestorCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Bus and LargeMessageStore references allow the user to specify queue and bucket settings for the ingestion process. +The example presented below configures IngestorCluster named ingestor with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the ingestion traffic. This IngestorCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Queue and LargeMessageStore references allow the user to specify queue and bucket settings for the ingestion process. In this case, the setup uses the SQS and S3 based configuration where the messages are stored in sqs-test queue in us-west-2 region with dead letter queue set to sqs-dlq-test queue. The large message store is set to ingestion bucket in smartbus-test directory. Based on these inputs, default-mode.conf and outputs.conf files are configured accordingly. @@ -121,15 +121,15 @@ spec: serviceAccount: ingestor-sa replicas: 3 image: splunk/splunk:${SPLUNK_IMAGE_VERSION} - busRef: - name: bus + queueRef: + name: queue largeMessageStoreRef: name: lms ``` # IndexerCluster -IndexerCluster is enhanced to support index‑only mode enabling independent scaling, loss‑safe buffering, and simplified day‑0/day‑n management via Kubernetes CRDs. Its Splunk pods are configured to pull events from the bus (inputs.conf) and index them. +IndexerCluster is enhanced to support index‑only mode enabling independent scaling, loss‑safe buffering, and simplified day‑0/day‑n management via Kubernetes CRDs. Its Splunk pods are configured to pull events from the queue (inputs.conf) and index them. ## Spec @@ -138,12 +138,12 @@ In addition to common spec inputs, the IndexerCluster resource provides the foll | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | | replicas | integer | The number of replicas (defaults to 3) | -| busRef | corev1.ObjectReference | Message bus reference | +| queueRef | corev1.ObjectReference | Message queue reference | | largeMessageStoreRef | corev1.ObjectReference | Large message store reference | ## Example -The example presented below configures IndexerCluster named indexer with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the indexing traffic. This IndexerCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Bus and LargeMessageStore references allow the user to specify queue and bucket settings for the indexing process. +The example presented below configures IndexerCluster named indexer with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the indexing traffic. This IndexerCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Queue and LargeMessageStore references allow the user to specify queue and bucket settings for the indexing process. In this case, the setup uses the SQS and S3 based configuration where the messages are stored in and retrieved from sqs-test queue in us-west-2 region with dead letter queue set to sqs-dlq-test queue. The large message store is set to ingestion bucket in smartbus-test directory. Based on these inputs, default-mode.conf, inputs.conf and outputs.conf files are configured accordingly. @@ -170,8 +170,8 @@ spec: serviceAccount: ingestor-sa replicas: 3 image: splunk/splunk:${SPLUNK_IMAGE_VERSION} - busRef: - name: bus + queueRef: + name: queue largeMessageStoreRef: name: lms ``` @@ -182,16 +182,16 @@ Common spec values for all SOK Custom Resources can be found in [CustomResources # Helm Charts -Bus, LargeMessageStore and IngestorCluster have been added to the splunk/splunk-enterprise Helm chart. IndexerCluster has also been enhanced to support new inputs. +Queue, LargeMessageStore and IngestorCluster have been added to the splunk/splunk-enterprise Helm chart. IndexerCluster has also been enhanced to support new inputs. ## Example -Below examples describe how to define values for Bus, LargeMessageStoe, IngestorCluster and IndexerCluster similarly to the above yaml files specifications. +Below examples describe how to define values for Queue, LargeMessageStoe, IngestorCluster and IndexerCluster similarly to the above yaml files specifications. ``` -bus: +queue: enabled: true - name: bus + name: queue provider: sqs sqs: name: sqs-test @@ -216,8 +216,8 @@ ingestorCluster: name: ingestor replicaCount: 3 serviceAccount: ingestor-sa - busRef: - name: bus + queueRef: + name: queue largeMessageStoreRef: name: lms ``` @@ -236,8 +236,8 @@ indexerCluster: serviceAccount: ingestor-sa clusterManagerRef: name: cm - busRef: - name: bus + queueRef: + name: queue largeMessageStoreRef: name: lms ``` @@ -541,14 +541,14 @@ $ aws iam list-attached-role-policies --role-name eksctl-ind-ing-sep-demo-addon- } ``` -3. Install Bus resource. +3. Install Queue resource. ``` -$ cat bus.yaml +$ cat queue.yaml apiVersion: enterprise.splunk.com/v4 -kind: Bus +kind: Queue metadata: - name: bus + name: queue finalizers: - enterprise.splunk.com/delete-pvc spec: @@ -561,23 +561,23 @@ spec: ``` ``` -$ kubectl apply -f bus.yaml +$ kubectl apply -f queue.yaml ``` ``` -$ kubectl get bus +$ kubectl get queue NAME PHASE AGE MESSAGE -bus Ready 20s +queue Ready 20s ``` ``` -kubectl describe bus -Name: bus +kubectl describe queue +Name: queue Namespace: default Labels: Annotations: API Version: enterprise.splunk.com/v4 -Kind: Bus +Kind: Queue Metadata: Creation Timestamp: 2025-10-27T10:25:53Z Finalizers: @@ -667,8 +667,8 @@ spec: serviceAccount: ingestor-sa replicas: 3 image: splunk/splunk:${SPLUNK_IMAGE_VERSION} - busRef: - name: bus + queueRef: + name: queue largeMessageStoreRef: name: lms ``` @@ -699,8 +699,8 @@ Metadata: Resource Version: 12345678 UID: 12345678-1234-1234-1234-1234567890123 Spec: - Bus Ref: - Name: bus + Queue Ref: + Name: queue Namespace: default Image: splunk/splunk:${SPLUNK_IMAGE_VERSION} Large Message Store Ref: @@ -720,7 +720,7 @@ Status: Is Deployment In Progress: false Last App Info Check Time: 0 Version: 0 - Bus: + Queue: Sqs: Region: us-west-2 DLQ: sqs-dlq-test @@ -811,8 +811,8 @@ spec: clusterManagerRef: name: cm serviceAccount: ingestor-sa - busRef: - name: bus + queueRef: + name: queue largeMessageStoreRef: name: lms ``` diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml index 0e6a96673..536be0cd2 100644 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml @@ -163,8 +163,8 @@ items: {{ toYaml . | indent 6 }} {{- end }} {{- end }} - {{- with $.Values.indexerCluster.busRef }} - busRef: + {{- with $.Values.indexerCluster.queueRef }} + queueRef: name: {{ .name }} {{- if .namespace }} namespace: {{ .namespace }} diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_ingestorcluster.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_ingestorcluster.yaml index b6c1640ec..b9ec62107 100644 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_ingestorcluster.yaml +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_ingestorcluster.yaml @@ -95,11 +95,11 @@ spec: topologySpreadConstraints: {{- toYaml . | nindent 4 }} {{- end }} - {{- with $.Values.ingestorCluster.busRef }} - busRef: - name: {{ $.Values.ingestorCluster.busRef.name }} - {{- if $.Values.ingestorCluster.busRef.namespace }} - namespace: {{ $.Values.ingestorCluster.busRef.namespace }} + {{- with $.Values.ingestorCluster.queueRef }} + queueRef: + name: {{ $.Values.ingestorCluster.queueRef.name }} + {{- if $.Values.ingestorCluster.queueRef.namespace }} + namespace: {{ $.Values.ingestorCluster.queueRef.namespace }} {{- end }} {{- end }} {{- with $.Values.ingestorCluster.largeMessageStoreRef }} diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_buses.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_queues.yaml similarity index 57% rename from helm-chart/splunk-enterprise/templates/enterprise_v4_buses.yaml rename to helm-chart/splunk-enterprise/templates/enterprise_v4_queues.yaml index bbf162332..b586e45da 100644 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_buses.yaml +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_queues.yaml @@ -1,21 +1,21 @@ -{{- if .Values.bus }} -{{- if .Values.bus.enabled }} +{{- if .Values.queue }} +{{- if .Values.queue.enabled }} apiVersion: enterprise.splunk.com/v4 -kind: Bus +kind: Queue metadata: - name: {{ .Values.bus.name }} - namespace: {{ default .Release.Namespace .Values.bus.namespaceOverride }} - {{- with .Values.bus.additionalLabels }} + name: {{ .Values.queue.name }} + namespace: {{ default .Release.Namespace .Values.queue.namespaceOverride }} + {{- with .Values.queue.additionalLabels }} labels: {{ toYaml . | nindent 4 }} {{- end }} - {{- with .Values.bus.additionalAnnotations }} + {{- with .Values.queue.additionalAnnotations }} annotations: {{ toYaml . | nindent 4 }} {{- end }} spec: - provider: {{ .Values.bus.provider | quote }} - {{- with .Values.bus.sqs }} + provider: {{ .Values.queue.provider | quote }} + {{- with .Values.queue.sqs }} sqs: {{- if .endpoint }} endpoint: {{ .endpoint | quote }} diff --git a/helm-chart/splunk-enterprise/values.yaml b/helm-chart/splunk-enterprise/values.yaml index a001bbead..ea4921b52 100644 --- a/helm-chart/splunk-enterprise/values.yaml +++ b/helm-chart/splunk-enterprise/values.yaml @@ -350,7 +350,7 @@ indexerCluster: # nodeAffinityPolicy: [Honor|Ignore] # optional; beta since v1.26 # nodeTaintsPolicy: [Honor|Ignore] # optional; beta since v1.26 - busRef: {} + queueRef: {} largeMessageStoreRef: {} @@ -901,6 +901,6 @@ ingestorCluster: affinity: {} - busRef: {} + queueRef: {} largeMessageStoreRef: {} \ No newline at end of file diff --git a/helm-chart/splunk-operator/templates/rbac/bus_editor_role.yaml b/helm-chart/splunk-operator/templates/rbac/queue_editor_role.yaml similarity index 82% rename from helm-chart/splunk-operator/templates/rbac/bus_editor_role.yaml rename to helm-chart/splunk-operator/templates/rbac/queue_editor_role.yaml index f285a1ca5..6c04be75b 100644 --- a/helm-chart/splunk-operator/templates/rbac/bus_editor_role.yaml +++ b/helm-chart/splunk-operator/templates/rbac/queue_editor_role.yaml @@ -8,12 +8,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: {{ include "splunk-operator.operator.fullname" . }}-bus-editor-role + name: {{ include "splunk-operator.operator.fullname" . }}-queue-editor-role rules: - apiGroups: - enterprise.splunk.com resources: - - buses + - queues verbs: - create - delete @@ -25,19 +25,19 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - buses/status + - queues/status verbs: - get {{- else }} apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: - name: {{ include "splunk-operator.operator.fullname" . }}-bus-editor-role + name: {{ include "splunk-operator.operator.fullname" . }}-queue-editor-role rules: - apiGroups: - enterprise.splunk.com resources: - - buses + - queues verbs: - create - delete @@ -49,7 +49,7 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - buses/status + - queues/status verbs: - get {{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-operator/templates/rbac/bus_viewer_role.yaml b/helm-chart/splunk-operator/templates/rbac/queue_viewer_role.yaml similarity index 81% rename from helm-chart/splunk-operator/templates/rbac/bus_viewer_role.yaml rename to helm-chart/splunk-operator/templates/rbac/queue_viewer_role.yaml index c4381a3cc..2c81b98fd 100644 --- a/helm-chart/splunk-operator/templates/rbac/bus_viewer_role.yaml +++ b/helm-chart/splunk-operator/templates/rbac/queue_viewer_role.yaml @@ -8,12 +8,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: {{ include "splunk-operator.operator.fullname" . }}-bus-viewer-role + name: {{ include "splunk-operator.operator.fullname" . }}-queue-viewer-role rules: - apiGroups: - enterprise.splunk.com resources: - - buses + - queues verbs: - get - list @@ -21,19 +21,19 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - buses/status + - queues/status verbs: - get {{- else }} apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: - name: {{ include "splunk-operator.operator.fullname" . }}-bus-viewer-role + name: {{ include "splunk-operator.operator.fullname" . }}-queue-viewer-role rules: - apiGroups: - enterprise.splunk.com resources: - - buses + - queues verbs: - get - list @@ -41,7 +41,7 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - buses/status + - queues/status verbs: - get {{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-operator/templates/rbac/role.yaml b/helm-chart/splunk-operator/templates/rbac/role.yaml index 61cf4ada9..26824528f 100644 --- a/helm-chart/splunk-operator/templates/rbac/role.yaml +++ b/helm-chart/splunk-operator/templates/rbac/role.yaml @@ -251,7 +251,7 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - buses + - queues verbs: - create - delete @@ -263,13 +263,13 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - buses/finalizers + - queues/finalizers verbs: - update - apiGroups: - enterprise.splunk.com resources: - - buses/status + - queues/status verbs: - get - patch diff --git a/internal/controller/indexercluster_controller.go b/internal/controller/indexercluster_controller.go index 676f81d23..2ed4d775e 100644 --- a/internal/controller/indexercluster_controller.go +++ b/internal/controller/indexercluster_controller.go @@ -172,9 +172,9 @@ func (r *IndexerClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { mgr.GetRESTMapper(), &enterpriseApi.IndexerCluster{}, )). - Watches(&enterpriseApi.Bus{}, + Watches(&enterpriseApi.Queue{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { - b, ok := obj.(*enterpriseApi.Bus) + b, ok := obj.(*enterpriseApi.Queue) if !ok { return nil } @@ -184,11 +184,11 @@ func (r *IndexerClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { } var reqs []reconcile.Request for _, ic := range list.Items { - ns := ic.Spec.BusRef.Namespace + ns := ic.Spec.QueueRef.Namespace if ns == "" { ns = ic.Namespace } - if ic.Spec.BusRef.Name == b.Name && ns == b.Namespace { + if ic.Spec.QueueRef.Name == b.Name && ns == b.Namespace { reqs = append(reqs, reconcile.Request{ NamespacedName: types.NamespacedName{ Name: ic.Name, diff --git a/internal/controller/ingestorcluster_controller.go b/internal/controller/ingestorcluster_controller.go index 1df81eb78..a46a1dcff 100644 --- a/internal/controller/ingestorcluster_controller.go +++ b/internal/controller/ingestorcluster_controller.go @@ -141,9 +141,9 @@ func (r *IngestorClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { mgr.GetRESTMapper(), &enterpriseApi.IngestorCluster{}, )). - Watches(&enterpriseApi.Bus{}, + Watches(&enterpriseApi.Queue{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { - b, ok := obj.(*enterpriseApi.Bus) + queue, ok := obj.(*enterpriseApi.Queue) if !ok { return nil } @@ -153,11 +153,11 @@ func (r *IngestorClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { } var reqs []reconcile.Request for _, ic := range list.Items { - ns := ic.Spec.BusRef.Namespace + ns := ic.Spec.QueueRef.Namespace if ns == "" { ns = ic.Namespace } - if ic.Spec.BusRef.Name == b.Name && ns == b.Namespace { + if ic.Spec.QueueRef.Name == queue.Name && ns == queue.Namespace { reqs = append(reqs, reconcile.Request{ NamespacedName: types.NamespacedName{ Name: ic.Name, diff --git a/internal/controller/ingestorcluster_controller_test.go b/internal/controller/ingestorcluster_controller_test.go index 053195d44..4d140e1d6 100644 --- a/internal/controller/ingestorcluster_controller_test.go +++ b/internal/controller/ingestorcluster_controller_test.go @@ -71,12 +71,12 @@ var _ = Describe("IngestorCluster Controller", func() { Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - bus := &enterpriseApi.Bus{ + queue := &enterpriseApi.Queue{ ObjectMeta: metav1.ObjectMeta{ - Name: "bus", + Name: "queue", Namespace: nsSpecs.Name, }, - Spec: enterpriseApi.BusSpec{ + Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ Name: "smartbus-queue", @@ -99,7 +99,7 @@ var _ = Describe("IngestorCluster Controller", func() { }, }, } - CreateIngestorCluster("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, lms, bus) + CreateIngestorCluster("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, lms, queue) icSpec, _ := GetIngestorCluster("test", nsSpecs.Name) annotations = map[string]string{} icSpec.Annotations = annotations @@ -119,12 +119,12 @@ var _ = Describe("IngestorCluster Controller", func() { Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) annotations := make(map[string]string) - bus := &enterpriseApi.Bus{ + queue := &enterpriseApi.Queue{ ObjectMeta: metav1.ObjectMeta{ - Name: "bus", + Name: "queue", Namespace: nsSpecs.Name, }, - Spec: enterpriseApi.BusSpec{ + Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ Name: "smartbus-queue", @@ -147,7 +147,7 @@ var _ = Describe("IngestorCluster Controller", func() { }, }, } - CreateIngestorCluster("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, lms, bus) + CreateIngestorCluster("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, lms, queue) DeleteIngestorCluster("test", nsSpecs.Name) Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) }) @@ -220,7 +220,7 @@ func GetIngestorCluster(name string, namespace string) (*enterpriseApi.IngestorC return ic, err } -func CreateIngestorCluster(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase, lms *enterpriseApi.LargeMessageStore, bus *enterpriseApi.Bus) *enterpriseApi.IngestorCluster { +func CreateIngestorCluster(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase, lms *enterpriseApi.LargeMessageStore, queue *enterpriseApi.Queue) *enterpriseApi.IngestorCluster { By("Expecting IngestorCluster custom resource to be created successfully") key := types.NamespacedName{ @@ -240,9 +240,9 @@ func CreateIngestorCluster(name string, namespace string, annotations map[string }, }, Replicas: 3, - BusRef: corev1.ObjectReference{ - Name: bus.Name, - Namespace: bus.Namespace, + QueueRef: corev1.ObjectReference{ + Name: queue.Name, + Namespace: queue.Namespace, }, LargeMessageStoreRef: corev1.ObjectReference{ Name: lms.Name, diff --git a/internal/controller/bus_controller.go b/internal/controller/queue_controller.go similarity index 72% rename from internal/controller/bus_controller.go rename to internal/controller/queue_controller.go index b52e91991..6fff662b9 100644 --- a/internal/controller/bus_controller.go +++ b/internal/controller/queue_controller.go @@ -36,34 +36,34 @@ import ( enterprise "github.com/splunk/splunk-operator/pkg/splunk/enterprise" ) -// BusReconciler reconciles a Bus object -type BusReconciler struct { +// QueueReconciler reconciles a Queue object +type QueueReconciler struct { client.Client Scheme *runtime.Scheme } -// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=buses,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=buses/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=buses/finalizers,verbs=update +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=queues,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=queues/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=queues/finalizers,verbs=update // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. // TODO(user): Modify the Reconcile function to compare the state specified by -// the Bus object against the actual cluster state, and then +// the Queue object against the actual cluster state, and then // perform operations to make the cluster state reflect the state specified by // the user. // // For more details, check Reconcile and its Result here: // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.22.1/pkg/reconcile -func (r *BusReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - metrics.ReconcileCounters.With(metrics.GetPrometheusLabels(req, "Bus")).Inc() - defer recordInstrumentionData(time.Now(), req, "controller", "Bus") +func (r *QueueReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + metrics.ReconcileCounters.With(metrics.GetPrometheusLabels(req, "Queue")).Inc() + defer recordInstrumentionData(time.Now(), req, "controller", "Queue") reqLogger := log.FromContext(ctx) - reqLogger = reqLogger.WithValues("bus", req.NamespacedName) + reqLogger = reqLogger.WithValues("queue", req.NamespacedName) - // Fetch the Bus - instance := &enterpriseApi.Bus{} + // Fetch the Queue + instance := &enterpriseApi.Queue{} err := r.Get(ctx, req.NamespacedName, instance) if err != nil { if k8serrors.IsNotFound(err) { @@ -74,20 +74,20 @@ func (r *BusReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.R return ctrl.Result{}, nil } // Error reading the object - requeue the request. - return ctrl.Result{}, errors.Wrap(err, "could not load bus data") + return ctrl.Result{}, errors.Wrap(err, "could not load queue data") } // If the reconciliation is paused, requeue annotations := instance.GetAnnotations() if annotations != nil { - if _, ok := annotations[enterpriseApi.BusPausedAnnotation]; ok { + if _, ok := annotations[enterpriseApi.QueuePausedAnnotation]; ok { return ctrl.Result{Requeue: true, RequeueAfter: pauseRetryDelay}, nil } } reqLogger.Info("start", "CR version", instance.GetResourceVersion()) - result, err := ApplyBus(ctx, r.Client, instance) + result, err := ApplyQueue(ctx, r.Client, instance) if result.Requeue && result.RequeueAfter != 0 { reqLogger.Info("Requeued", "period(seconds)", int(result.RequeueAfter/time.Second)) } @@ -95,14 +95,14 @@ func (r *BusReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.R return result, err } -var ApplyBus = func(ctx context.Context, client client.Client, instance *enterpriseApi.Bus) (reconcile.Result, error) { - return enterprise.ApplyBus(ctx, client, instance) +var ApplyQueue = func(ctx context.Context, client client.Client, instance *enterpriseApi.Queue) (reconcile.Result, error) { + return enterprise.ApplyQueue(ctx, client, instance) } // SetupWithManager sets up the controller with the Manager. -func (r *BusReconciler) SetupWithManager(mgr ctrl.Manager) error { +func (r *QueueReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&enterpriseApi.Bus{}). + For(&enterpriseApi.Queue{}). WithEventFilter(predicate.Or( common.GenerationChangedPredicate(), common.AnnotationChangedPredicate(), diff --git a/internal/controller/bus_controller_test.go b/internal/controller/queue_controller_test.go similarity index 68% rename from internal/controller/bus_controller_test.go rename to internal/controller/queue_controller_test.go index c45c66420..23d40ae4c 100644 --- a/internal/controller/bus_controller_test.go +++ b/internal/controller/queue_controller_test.go @@ -34,7 +34,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" ) -var _ = Describe("Bus Controller", func() { +var _ = Describe("Queue Controller", func() { BeforeEach(func() { time.Sleep(2 * time.Second) }) @@ -43,34 +43,34 @@ var _ = Describe("Bus Controller", func() { }) - Context("Bus Management", func() { + Context("Queue Management", func() { - It("Get Bus custom resource should fail", func() { - namespace := "ns-splunk-bus-1" - ApplyBus = func(ctx context.Context, client client.Client, instance *enterpriseApi.Bus) (reconcile.Result, error) { + It("Get Queue custom resource should fail", func() { + namespace := "ns-splunk-queue-1" + ApplyQueue = func(ctx context.Context, client client.Client, instance *enterpriseApi.Queue) (reconcile.Result, error) { return reconcile.Result{}, nil } nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - _, err := GetBus("test", nsSpecs.Name) - Expect(err.Error()).Should(Equal("buses.enterprise.splunk.com \"test\" not found")) + _, err := GetQueue("test", nsSpecs.Name) + Expect(err.Error()).Should(Equal("queues.enterprise.splunk.com \"test\" not found")) Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) }) - It("Create Bus custom resource with annotations should pause", func() { - namespace := "ns-splunk-bus-2" + It("Create Queue custom resource with annotations should pause", func() { + namespace := "ns-splunk-queue-2" annotations := make(map[string]string) - annotations[enterpriseApi.BusPausedAnnotation] = "" - ApplyBus = func(ctx context.Context, client client.Client, instance *enterpriseApi.Bus) (reconcile.Result, error) { + annotations[enterpriseApi.QueuePausedAnnotation] = "" + ApplyQueue = func(ctx context.Context, client client.Client, instance *enterpriseApi.Queue) (reconcile.Result, error) { return reconcile.Result{}, nil } nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - spec := enterpriseApi.BusSpec{ + spec := enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ Name: "smartbus-queue", @@ -79,19 +79,19 @@ var _ = Describe("Bus Controller", func() { Endpoint: "https://sqs.us-west-2.amazonaws.com", }, } - CreateBus("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) - icSpec, _ := GetBus("test", nsSpecs.Name) + CreateQueue("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) + icSpec, _ := GetQueue("test", nsSpecs.Name) annotations = map[string]string{} icSpec.Annotations = annotations icSpec.Status.Phase = "Ready" - UpdateBus(icSpec, enterpriseApi.PhaseReady, spec) - DeleteBus("test", nsSpecs.Name) + UpdateQueue(icSpec, enterpriseApi.PhaseReady, spec) + DeleteQueue("test", nsSpecs.Name) Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) }) - It("Create Bus custom resource should succeeded", func() { - namespace := "ns-splunk-bus-3" - ApplyBus = func(ctx context.Context, client client.Client, instance *enterpriseApi.Bus) (reconcile.Result, error) { + It("Create Queue custom resource should succeeded", func() { + namespace := "ns-splunk-queue-3" + ApplyQueue = func(ctx context.Context, client client.Client, instance *enterpriseApi.Queue) (reconcile.Result, error) { return reconcile.Result{}, nil } nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} @@ -99,7 +99,7 @@ var _ = Describe("Bus Controller", func() { Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) annotations := make(map[string]string) - spec := enterpriseApi.BusSpec{ + spec := enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ Name: "smartbus-queue", @@ -108,14 +108,14 @@ var _ = Describe("Bus Controller", func() { Endpoint: "https://sqs.us-west-2.amazonaws.com", }, } - CreateBus("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) - DeleteBus("test", nsSpecs.Name) + CreateQueue("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) + DeleteQueue("test", nsSpecs.Name) Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) }) It("Cover Unused methods", func() { - namespace := "ns-splunk-bus-4" - ApplyBus = func(ctx context.Context, client client.Client, instance *enterpriseApi.Bus) (reconcile.Result, error) { + namespace := "ns-splunk-queue-4" + ApplyQueue = func(ctx context.Context, client client.Client, instance *enterpriseApi.Queue) (reconcile.Result, error) { return reconcile.Result{}, nil } nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} @@ -125,7 +125,7 @@ var _ = Describe("Bus Controller", func() { ctx := context.TODO() builder := fake.NewClientBuilder() c := builder.Build() - instance := BusReconciler{ + instance := QueueReconciler{ Client: c, Scheme: scheme.Scheme, } @@ -138,7 +138,7 @@ var _ = Describe("Bus Controller", func() { _, err := instance.Reconcile(ctx, request) Expect(err).ToNot(HaveOccurred()) - spec := enterpriseApi.BusSpec{ + spec := enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ Name: "smartbus-queue", @@ -147,11 +147,11 @@ var _ = Describe("Bus Controller", func() { Endpoint: "https://sqs.us-west-2.amazonaws.com", }, } - bcSpec := testutils.NewBus("test", namespace, spec) + bcSpec := testutils.NewQueue("test", namespace, spec) Expect(c.Create(ctx, bcSpec)).Should(Succeed()) annotations := make(map[string]string) - annotations[enterpriseApi.BusPausedAnnotation] = "" + annotations[enterpriseApi.QueuePausedAnnotation] = "" bcSpec.Annotations = annotations Expect(c.Update(ctx, bcSpec)).Should(Succeed()) @@ -173,14 +173,14 @@ var _ = Describe("Bus Controller", func() { }) }) -func GetBus(name string, namespace string) (*enterpriseApi.Bus, error) { - By("Expecting Bus custom resource to be retrieved successfully") +func GetQueue(name string, namespace string) (*enterpriseApi.Queue, error) { + By("Expecting Queue custom resource to be retrieved successfully") key := types.NamespacedName{ Name: name, Namespace: namespace, } - b := &enterpriseApi.Bus{} + b := &enterpriseApi.Queue{} err := k8sClient.Get(context.Background(), key, b) if err != nil { @@ -190,14 +190,14 @@ func GetBus(name string, namespace string) (*enterpriseApi.Bus, error) { return b, err } -func CreateBus(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase, spec enterpriseApi.BusSpec) *enterpriseApi.Bus { - By("Expecting Bus custom resource to be created successfully") +func CreateQueue(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase, spec enterpriseApi.QueueSpec) *enterpriseApi.Queue { + By("Expecting Queue custom resource to be created successfully") key := types.NamespacedName{ Name: name, Namespace: namespace, } - ingSpec := &enterpriseApi.Bus{ + ingSpec := &enterpriseApi.Queue{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, @@ -209,7 +209,7 @@ func CreateBus(name string, namespace string, annotations map[string]string, sta Expect(k8sClient.Create(context.Background(), ingSpec)).Should(Succeed()) time.Sleep(2 * time.Second) - b := &enterpriseApi.Bus{} + b := &enterpriseApi.Queue{} Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, b) if status != "" { @@ -224,20 +224,20 @@ func CreateBus(name string, namespace string, annotations map[string]string, sta return b } -func UpdateBus(instance *enterpriseApi.Bus, status enterpriseApi.Phase, spec enterpriseApi.BusSpec) *enterpriseApi.Bus { - By("Expecting Bus custom resource to be updated successfully") +func UpdateQueue(instance *enterpriseApi.Queue, status enterpriseApi.Phase, spec enterpriseApi.QueueSpec) *enterpriseApi.Queue { + By("Expecting Queue custom resource to be updated successfully") key := types.NamespacedName{ Name: instance.Name, Namespace: instance.Namespace, } - bSpec := testutils.NewBus(instance.Name, instance.Namespace, spec) + bSpec := testutils.NewQueue(instance.Name, instance.Namespace, spec) bSpec.ResourceVersion = instance.ResourceVersion Expect(k8sClient.Update(context.Background(), bSpec)).Should(Succeed()) time.Sleep(2 * time.Second) - b := &enterpriseApi.Bus{} + b := &enterpriseApi.Queue{} Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, b) if status != "" { @@ -252,8 +252,8 @@ func UpdateBus(instance *enterpriseApi.Bus, status enterpriseApi.Phase, spec ent return b } -func DeleteBus(name string, namespace string) { - By("Expecting Bus custom resource to be deleted successfully") +func DeleteQueue(name string, namespace string) { + By("Expecting Queue custom resource to be deleted successfully") key := types.NamespacedName{ Name: name, @@ -261,7 +261,7 @@ func DeleteBus(name string, namespace string) { } Eventually(func() error { - b := &enterpriseApi.Bus{} + b := &enterpriseApi.Queue{} _ = k8sClient.Get(context.Background(), key, b) err := k8sClient.Delete(context.Background(), b) return err diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index 17ce5e760..eda9f320d 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -98,7 +98,7 @@ var _ = BeforeSuite(func(ctx context.Context) { Scheme: clientgoscheme.Scheme, }) Expect(err).ToNot(HaveOccurred()) - if err := (&BusReconciler{ + if err := (&QueueReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), }).SetupWithManager(k8sManager); err != nil { diff --git a/internal/controller/testutils/new.go b/internal/controller/testutils/new.go index e3e37efc2..b5b620337 100644 --- a/internal/controller/testutils/new.go +++ b/internal/controller/testutils/new.go @@ -54,16 +54,16 @@ func NewIngestorCluster(name, ns, image string) *enterpriseApi.IngestorCluster { Spec: enterpriseApi.Spec{ImagePullPolicy: string(pullPolicy)}, }, Replicas: 3, - BusRef: corev1.ObjectReference{ - Name: "bus", + QueueRef: corev1.ObjectReference{ + Name: "queue", }, }, } } -// NewBus returns new Bus instance with its config hash -func NewBus(name, ns string, spec enterpriseApi.BusSpec) *enterpriseApi.Bus { - return &enterpriseApi.Bus{ +// NewQueue returns new Queue instance with its config hash +func NewQueue(name, ns string, spec enterpriseApi.QueueSpec) *enterpriseApi.Queue { + return &enterpriseApi.Queue{ ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: ns}, Spec: spec, } diff --git a/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml index f34dd2e6c..2b0596fdd 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml @@ -1,9 +1,9 @@ --- -# assert for bus custom resource to be ready +# assert for queue custom resource to be ready apiVersion: enterprise.splunk.com/v4 -kind: Bus +kind: Queue metadata: - name: bus + name: queue spec: provider: sqs sqs: @@ -61,11 +61,11 @@ metadata: name: indexer spec: replicas: 3 - busRef: - name: bus + queueRef: + name: queue status: phase: Ready - bus: + queue: provider: sqs sqs: name: sqs-test @@ -102,11 +102,11 @@ metadata: name: ingestor spec: replicas: 3 - busRef: - name: bus + queueRef: + name: queue status: phase: Ready - bus: + queue: provider: sqs sqs: name: sqs-test diff --git a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml index 291eddeba..57e6c4c68 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml @@ -6,11 +6,11 @@ metadata: name: ingestor spec: replicas: 4 - busRef: - name: bus + queueRef: + name: queue status: phase: Ready - bus: + queue: provider: sqs sqs: name: sqs-test diff --git a/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml b/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml index a73c51ac2..1e8af1663 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml @@ -5,9 +5,9 @@ splunk-operator: persistentVolumeClaim: storageClassName: gp2 -bus: +queue: enabled: true - name: bus + name: queue provider: sqs sqs: name: sqs-test @@ -27,8 +27,8 @@ ingestorCluster: enabled: true name: ingestor replicaCount: 3 - busRef: - name: bus + queueRef: + name: queue largeMessageStoreRef: name: lms @@ -43,7 +43,7 @@ indexerCluster: replicaCount: 3 clusterManagerRef: name: cm - busRef: - name: bus + queueRef: + name: queue largeMessageStoreRef: name: lms diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 269753c5c..150dfdbbe 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -22,7 +22,6 @@ import ( "time" enterpriseApi "github.com/splunk/splunk-operator/api/v4" - "sigs.k8s.io/controller-runtime/pkg/client" rclient "sigs.k8s.io/controller-runtime/pkg/client" "github.com/go-logr/logr" @@ -427,9 +426,9 @@ func PushManagerAppsBundle(ctx context.Context, c splcommon.ControllerClient, cr return splunkClient.BundlePush(true) } - + // helper function to get the list of ClusterManager types in the current namespace -func getClusterManagerList(ctx context.Context, c splcommon.ControllerClient, cr splcommon.MetaObject, listOpts []client.ListOption) (int, error) { +func getClusterManagerList(ctx context.Context, c splcommon.ControllerClient, cr splcommon.MetaObject, listOpts []rclient.ListOption) (int, error) { reqLogger := log.FromContext(ctx) scopedLog := reqLogger.WithName("getClusterManagerList").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index 2170e914a..5e468196c 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -37,7 +37,6 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" rclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -78,7 +77,7 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller // updates status after function completes cr.Status.ClusterManagerPhase = enterpriseApi.PhaseError if cr.Status.Replicas < cr.Spec.Replicas { - cr.Status.Bus = &enterpriseApi.BusSpec{} + cr.Status.Queue = &enterpriseApi.QueueSpec{} } cr.Status.Replicas = cr.Spec.Replicas cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-indexer", cr.GetName()) @@ -245,27 +244,27 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller // no need to requeue if everything is ready if cr.Status.Phase == enterpriseApi.PhaseReady { - // Bus - bus := enterpriseApi.Bus{} - if cr.Spec.BusRef.Name != "" { + // Queue + queue := enterpriseApi.Queue{} + if cr.Spec.QueueRef.Name != "" { ns := cr.GetNamespace() - if cr.Spec.BusRef.Namespace != "" { - ns = cr.Spec.BusRef.Namespace + if cr.Spec.QueueRef.Namespace != "" { + ns = cr.Spec.QueueRef.Namespace } err = client.Get(context.Background(), types.NamespacedName{ - Name: cr.Spec.BusRef.Name, + Name: cr.Spec.QueueRef.Name, Namespace: ns, - }, &bus) + }, &queue) if err != nil { return result, err } } - // Can not override original bus spec due to comparison in the later code - busCopy := bus - if busCopy.Spec.Provider == "sqs" { - if busCopy.Spec.SQS.Endpoint == "" { - busCopy.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", busCopy.Spec.SQS.Region) + // Can not override original queue spec due to comparison in the later code + queueCopy := queue + if queueCopy.Spec.Provider == "sqs" { + if queueCopy.Spec.SQS.Endpoint == "" { + queueCopy.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", queueCopy.Spec.SQS.Region) } } @@ -289,23 +288,23 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller lmsCopy := lms if lmsCopy.Spec.Provider == "s3" { if lmsCopy.Spec.S3.Endpoint == "" { - lmsCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", busCopy.Spec.SQS.Region) + lmsCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queueCopy.Spec.SQS.Region) } } - // If bus is updated - if cr.Spec.BusRef.Name != "" { - if !reflect.DeepEqual(cr.Status.Bus, bus.Spec) { + // If queue is updated + if cr.Spec.QueueRef.Name != "" { + if !reflect.DeepEqual(cr.Status.Queue, queue.Spec) { mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) - err = mgr.handlePullBusChange(ctx, cr, busCopy, lmsCopy, client) + err = mgr.handlePullQueueChange(ctx, cr, queueCopy, lmsCopy, client) if err != nil { - eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Bus/Pipeline config change after pod creation: %s", err.Error())) - scopedLog.Error(err, "Failed to update conf file for Bus/Pipeline config change after pod creation") + eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Queue/Pipeline config change after pod creation: %s", err.Error())) + scopedLog.Error(err, "Failed to update conf file for Queue/Pipeline config change after pod creation") return result, err } - cr.Status.Bus = &bus.Spec + cr.Status.Queue = &queue.Spec } } @@ -398,7 +397,7 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, cr.Status.Phase = enterpriseApi.PhaseError cr.Status.ClusterMasterPhase = enterpriseApi.PhaseError if cr.Status.Replicas < cr.Spec.Replicas { - cr.Status.Bus = &enterpriseApi.BusSpec{} + cr.Status.Queue = &enterpriseApi.QueueSpec{} } cr.Status.Replicas = cr.Spec.Replicas cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-indexer", cr.GetName()) @@ -568,27 +567,27 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, // no need to requeue if everything is ready if cr.Status.Phase == enterpriseApi.PhaseReady { - // Bus - bus := enterpriseApi.Bus{} - if cr.Spec.BusRef.Name != "" { + // Queue + queue := enterpriseApi.Queue{} + if cr.Spec.QueueRef.Name != "" { ns := cr.GetNamespace() - if cr.Spec.BusRef.Namespace != "" { - ns = cr.Spec.BusRef.Namespace + if cr.Spec.QueueRef.Namespace != "" { + ns = cr.Spec.QueueRef.Namespace } err = client.Get(context.Background(), types.NamespacedName{ - Name: cr.Spec.BusRef.Name, + Name: cr.Spec.QueueRef.Name, Namespace: ns, - }, &bus) + }, &queue) if err != nil { return result, err } } - // Can not override original bus spec due to comparison in the later code - busCopy := bus - if busCopy.Spec.Provider == "sqs" { - if busCopy.Spec.SQS.Endpoint == "" { - busCopy.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", busCopy.Spec.SQS.Region) + // Can not override original queue spec due to comparison in the later code + queueCopy := queue + if queueCopy.Spec.Provider == "sqs" { + if queueCopy.Spec.SQS.Endpoint == "" { + queueCopy.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", queueCopy.Spec.SQS.Region) } } @@ -602,33 +601,33 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, err = client.Get(context.Background(), types.NamespacedName{ Name: cr.Spec.LargeMessageStoreRef.Name, Namespace: ns, - }, &bus) + }, &queue) if err != nil { return result, err } } - // Can not override original bus spec due to comparison in the later code + // Can not override original queue spec due to comparison in the later code lmsCopy := lms if lmsCopy.Spec.Provider == "s3" { if lmsCopy.Spec.S3.Endpoint == "" { - lmsCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", busCopy.Spec.SQS.Region) + lmsCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queueCopy.Spec.SQS.Region) } } - // If bus is updated - if cr.Spec.BusRef.Name != "" { - if !reflect.DeepEqual(cr.Status.Bus, bus.Spec) { + // If queue is updated + if cr.Spec.QueueRef.Name != "" { + if !reflect.DeepEqual(cr.Status.Queue, queue.Spec) { mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) - err = mgr.handlePullBusChange(ctx, cr, busCopy, lmsCopy, client) + err = mgr.handlePullQueueChange(ctx, cr, queueCopy, lmsCopy, client) if err != nil { - eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Bus/Pipeline config change after pod creation: %s", err.Error())) - scopedLog.Error(err, "Failed to update conf file for Bus/Pipeline config change after pod creation") + eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Queue/Pipeline config change after pod creation: %s", err.Error())) + scopedLog.Error(err, "Failed to update conf file for Queue/Pipeline config change after pod creation") return result, err } - cr.Status.Bus = &bus.Spec + cr.Status.Queue = &queue.Spec } } @@ -1218,7 +1217,7 @@ func validateIndexerClusterSpec(ctx context.Context, c splcommon.ControllerClien } // helper function to get the list of IndexerCluster types in the current namespace -func getIndexerClusterList(ctx context.Context, c splcommon.ControllerClient, cr splcommon.MetaObject, listOpts []client.ListOption) (enterpriseApi.IndexerClusterList, error) { +func getIndexerClusterList(ctx context.Context, c splcommon.ControllerClient, cr splcommon.MetaObject, listOpts []rclient.ListOption) (enterpriseApi.IndexerClusterList, error) { reqLogger := log.FromContext(ctx) scopedLog := reqLogger.WithName("getIndexerClusterList").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) @@ -1295,12 +1294,12 @@ func getSiteName(ctx context.Context, c splcommon.ControllerClient, cr *enterpri return extractedValue } -var newSplunkClientForBusPipeline = splclient.NewSplunkClient +var newSplunkClientForQueuePipeline = splclient.NewSplunkClient -// Checks if only PullBus or Pipeline config changed, and updates the conf file if so -func (mgr *indexerClusterPodManager) handlePullBusChange(ctx context.Context, newCR *enterpriseApi.IndexerCluster, bus enterpriseApi.Bus, lms enterpriseApi.LargeMessageStore, k8s client.Client) error { +// Checks if only PullQueue or Pipeline config changed, and updates the conf file if so +func (mgr *indexerClusterPodManager) handlePullQueueChange(ctx context.Context, newCR *enterpriseApi.IndexerCluster, queue enterpriseApi.Queue, lms enterpriseApi.LargeMessageStore, k8s rclient.Client) error { reqLogger := log.FromContext(ctx) - scopedLog := reqLogger.WithName("handlePullBusChange").WithValues("name", newCR.GetName(), "namespace", newCR.GetNamespace()) + scopedLog := reqLogger.WithName("handlePullQueueChange").WithValues("name", newCR.GetName(), "namespace", newCR.GetNamespace()) // Only update config for pods that exist readyReplicas := newCR.Status.ReadyReplicas @@ -1314,30 +1313,30 @@ func (mgr *indexerClusterPodManager) handlePullBusChange(ctx context.Context, ne if err != nil { return err } - splunkClient := newSplunkClientForBusPipeline(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd)) + splunkClient := newSplunkClientForQueuePipeline(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd)) afterDelete := false - if (bus.Spec.SQS.Name != "" && newCR.Status.Bus.SQS.Name != "" && bus.Spec.SQS.Name != newCR.Status.Bus.SQS.Name) || - (bus.Spec.Provider != "" && newCR.Status.Bus.Provider != "" && bus.Spec.Provider != newCR.Status.Bus.Provider) { - if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCR.Status.Bus.SQS.Name)); err != nil { + if (queue.Spec.SQS.Name != "" && newCR.Status.Queue.SQS.Name != "" && queue.Spec.SQS.Name != newCR.Status.Queue.SQS.Name) || + (queue.Spec.Provider != "" && newCR.Status.Queue.Provider != "" && queue.Spec.Provider != newCR.Status.Queue.Provider) { + if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCR.Status.Queue.SQS.Name)); err != nil { updateErr = err } - if err := splunkClient.DeleteConfFileProperty(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", newCR.Status.Bus.SQS.Name)); err != nil { + if err := splunkClient.DeleteConfFileProperty(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", newCR.Status.Queue.SQS.Name)); err != nil { updateErr = err } afterDelete = true } - busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields := getChangedBusFieldsForIndexer(&bus, &lms, newCR, afterDelete) + queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields := getChangedQueueFieldsForIndexer(&queue, &lms, newCR, afterDelete) - for _, pbVal := range busChangedFieldsOutputs { - if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", bus.Spec.SQS.Name), [][]string{pbVal}); err != nil { + for _, pbVal := range queueChangedFieldsOutputs { + if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name), [][]string{pbVal}); err != nil { updateErr = err } } - for _, pbVal := range busChangedFieldsInputs { - if err := splunkClient.UpdateConfFile(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", bus.Spec.SQS.Name), [][]string{pbVal}); err != nil { + for _, pbVal := range queueChangedFieldsInputs { + if err := splunkClient.UpdateConfFile(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name), [][]string{pbVal}); err != nil { updateErr = err } } @@ -1353,23 +1352,23 @@ func (mgr *indexerClusterPodManager) handlePullBusChange(ctx context.Context, ne return updateErr } -// getChangedBusFieldsForIndexer returns a list of changed bus and pipeline fields for indexer pods -func getChangedBusFieldsForIndexer(bus *enterpriseApi.Bus, lms *enterpriseApi.LargeMessageStore, busIndexerStatus *enterpriseApi.IndexerCluster, afterDelete bool) (busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields [][]string) { - // Compare bus fields - oldPB := busIndexerStatus.Status.Bus +// getChangedQueueFieldsForIndexer returns a list of changed queue and pipeline fields for indexer pods +func getChangedQueueFieldsForIndexer(queue *enterpriseApi.Queue, lms *enterpriseApi.LargeMessageStore, queueIndexerStatus *enterpriseApi.IndexerCluster, afterDelete bool) (queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields [][]string) { + // Compare queue fields + oldPB := queueIndexerStatus.Status.Queue if oldPB == nil { - oldPB = &enterpriseApi.BusSpec{} + oldPB = &enterpriseApi.QueueSpec{} } - newPB := bus.Spec + newPB := queue.Spec - oldLMS := busIndexerStatus.Status.LargeMessageStore + oldLMS := queueIndexerStatus.Status.LargeMessageStore if oldLMS == nil { oldLMS = &enterpriseApi.LargeMessageStoreSpec{} } newLMS := lms.Spec - // Push all bus fields - busChangedFieldsInputs, busChangedFieldsOutputs = pullBusChanged(oldPB, &newPB, oldLMS, &newLMS, afterDelete) + // Push all queue fields + queueChangedFieldsInputs, queueChangedFieldsOutputs = pullQueueChanged(oldPB, &newPB, oldLMS, &newLMS, afterDelete) // Always set all pipeline fields, not just changed ones pipelineChangedFields = pipelineConfig(true) @@ -1387,24 +1386,24 @@ func imageUpdatedTo9(previousImage string, currentImage string) bool { return strings.HasPrefix(previousVersion, "8") && strings.HasPrefix(currentVersion, "9") } -func pullBusChanged(oldBus, newBus *enterpriseApi.BusSpec, oldLMS, newLMS *enterpriseApi.LargeMessageStoreSpec, afterDelete bool) (inputs, outputs [][]string) { - busProvider := "" - if newBus.Provider == "sqs" { - busProvider = "sqs_smartbus" +func pullQueueChanged(oldQueue, newQueue *enterpriseApi.QueueSpec, oldLMS, newLMS *enterpriseApi.LargeMessageStoreSpec, afterDelete bool) (inputs, outputs [][]string) { + queueProvider := "" + if newQueue.Provider == "sqs" { + queueProvider = "sqs_smartbus" } lmsProvider := "" if newLMS.Provider == "s3" { lmsProvider = "sqs_smartbus" } - if oldBus.Provider != newBus.Provider || afterDelete { - inputs = append(inputs, []string{"remote_queue.type", busProvider}) + if oldQueue.Provider != newQueue.Provider || afterDelete { + inputs = append(inputs, []string{"remote_queue.type", queueProvider}) } - if oldBus.SQS.Region != newBus.SQS.Region || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.auth_region", busProvider), newBus.SQS.Region}) + if oldQueue.SQS.Region != newQueue.SQS.Region || afterDelete { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.auth_region", queueProvider), newQueue.SQS.Region}) } - if oldBus.SQS.Endpoint != newBus.SQS.Endpoint || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.endpoint", busProvider), newBus.SQS.Endpoint}) + if oldQueue.SQS.Endpoint != newQueue.SQS.Endpoint || afterDelete { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.endpoint", queueProvider), newQueue.SQS.Endpoint}) } if oldLMS.S3.Endpoint != newLMS.S3.Endpoint || afterDelete { inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", lmsProvider), newLMS.S3.Endpoint}) @@ -1412,18 +1411,18 @@ func pullBusChanged(oldBus, newBus *enterpriseApi.BusSpec, oldLMS, newLMS *enter if oldLMS.S3.Path != newLMS.S3.Path || afterDelete { inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", lmsProvider), newLMS.S3.Path}) } - if oldBus.SQS.DLQ != newBus.SQS.DLQ || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busProvider), newBus.SQS.DLQ}) + if oldQueue.SQS.DLQ != newQueue.SQS.DLQ || afterDelete { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", queueProvider), newQueue.SQS.DLQ}) } inputs = append(inputs, - []string{fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", busProvider), "4"}, - []string{fmt.Sprintf("remote_queue.%s.retry_policy", busProvider), "max_count"}, + []string{fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", queueProvider), "4"}, + []string{fmt.Sprintf("remote_queue.%s.retry_policy", queueProvider), "max_count"}, ) outputs = inputs outputs = append(outputs, - []string{fmt.Sprintf("remote_queue.%s.send_interval", busProvider), "5s"}, - []string{fmt.Sprintf("remote_queue.%s.encoding_format", busProvider), "s2s"}, + []string{fmt.Sprintf("remote_queue.%s.send_interval", queueProvider), "5s"}, + []string{fmt.Sprintf("remote_queue.%s.encoding_format", queueProvider), "s2s"}, ) return inputs, outputs diff --git a/pkg/splunk/enterprise/indexercluster_test.go b/pkg/splunk/enterprise/indexercluster_test.go index ff10e453d..4c166c8e0 100644 --- a/pkg/splunk/enterprise/indexercluster_test.go +++ b/pkg/splunk/enterprise/indexercluster_test.go @@ -1344,15 +1344,15 @@ func TestInvalidIndexerClusterSpec(t *testing.T) { func TestGetIndexerStatefulSet(t *testing.T) { os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") - bus := enterpriseApi.Bus{ + queue := enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ - Kind: "Bus", + Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "bus", + Name: "queue", }, - Spec: enterpriseApi.BusSpec{ + Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ Name: "test-queue", @@ -1369,8 +1369,8 @@ func TestGetIndexerStatefulSet(t *testing.T) { Namespace: "test", }, Spec: enterpriseApi.IndexerClusterSpec{ - BusRef: corev1.ObjectReference{ - Name: bus.Name, + QueueRef: corev1.ObjectReference{ + Name: queue.Name, }, }, } @@ -2045,18 +2045,18 @@ func TestImageUpdatedTo9(t *testing.T) { } } -func TestGetChangedBusFieldsForIndexer(t *testing.T) { +func TestGetChangedQueueFieldsForIndexer(t *testing.T) { provider := "sqs_smartbus" - bus := enterpriseApi.Bus{ + queue := enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ - Kind: "Bus", + Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "bus", + Name: "queue", }, - Spec: enterpriseApi.BusSpec{ + Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ Name: "test-queue", @@ -2086,8 +2086,8 @@ func TestGetChangedBusFieldsForIndexer(t *testing.T) { newCR := &enterpriseApi.IndexerCluster{ Spec: enterpriseApi.IndexerClusterSpec{ - BusRef: corev1.ObjectReference{ - Name: bus.Name, + QueueRef: corev1.ObjectReference{ + Name: queue.Name, }, LargeMessageStoreRef: corev1.ObjectReference{ Name: lms.Name, @@ -2095,32 +2095,32 @@ func TestGetChangedBusFieldsForIndexer(t *testing.T) { }, } - busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields := getChangedBusFieldsForIndexer(&bus, &lms, newCR, false) - assert.Equal(t, 8, len(busChangedFieldsInputs)) + queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields := getChangedQueueFieldsForIndexer(&queue, &lms, newCR, false) + assert.Equal(t, 8, len(queueChangedFieldsInputs)) assert.Equal(t, [][]string{ {"remote_queue.type", provider}, - {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.SQS.Region}, - {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.Region}, + {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, - {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), bus.Spec.SQS.DLQ}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, - }, busChangedFieldsInputs) + }, queueChangedFieldsInputs) - assert.Equal(t, 10, len(busChangedFieldsOutputs)) + assert.Equal(t, 10, len(queueChangedFieldsOutputs)) assert.Equal(t, [][]string{ {"remote_queue.type", provider}, - {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.SQS.Region}, - {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.Region}, + {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, - {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), bus.Spec.SQS.DLQ}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, {fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"}, {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, - }, busChangedFieldsOutputs) + }, queueChangedFieldsOutputs) assert.Equal(t, 5, len(pipelineChangedFields)) assert.Equal(t, [][]string{ @@ -2132,20 +2132,20 @@ func TestGetChangedBusFieldsForIndexer(t *testing.T) { }, pipelineChangedFields) } -func TestHandlePullBusChange(t *testing.T) { +func TestHandlePullQueueChange(t *testing.T) { // Object definitions provider := "sqs_smartbus" - bus := enterpriseApi.Bus{ + queue := enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ - Kind: "Bus", + Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "bus", + Name: "queue", Namespace: "test", }, - Spec: enterpriseApi.BusSpec{ + Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ Name: "test-queue", @@ -2183,8 +2183,8 @@ func TestHandlePullBusChange(t *testing.T) { Namespace: "test", }, Spec: enterpriseApi.IndexerClusterSpec{ - BusRef: corev1.ObjectReference{ - Name: bus.Name, + QueueRef: corev1.ObjectReference{ + Name: queue.Name, }, LargeMessageStoreRef: corev1.ObjectReference{ Name: lms.Name, @@ -2193,7 +2193,7 @@ func TestHandlePullBusChange(t *testing.T) { }, Status: enterpriseApi.IndexerClusterStatus{ ReadyReplicas: 3, - Bus: &enterpriseApi.BusSpec{}, + Queue: &enterpriseApi.QueueSpec{}, LargeMessageStore: &enterpriseApi.LargeMessageStoreSpec{}, }, } @@ -2251,7 +2251,7 @@ func TestHandlePullBusChange(t *testing.T) { // Mock pods c := spltest.NewMockClient() ctx := context.TODO() - c.Create(ctx, &bus) + c.Create(ctx, &queue) c.Create(ctx, &lms) c.Create(ctx, newCR) c.Create(ctx, pod0) @@ -2260,7 +2260,7 @@ func TestHandlePullBusChange(t *testing.T) { // Negative test case: secret not found mgr := &indexerClusterPodManager{} - err := mgr.handlePullBusChange(ctx, newCR, bus, lms, c) + err := mgr.handlePullQueueChange(ctx, newCR, queue, lms, c) assert.NotNil(t, err) // Mock secret @@ -2269,18 +2269,18 @@ func TestHandlePullBusChange(t *testing.T) { mockHTTPClient := &spltest.MockHTTPClient{} // Negative test case: failure in creating remote queue stanza - mgr = newTestPullBusPipelineManager(mockHTTPClient) + mgr = newTestPullQueuePipelineManager(mockHTTPClient) - err = mgr.handlePullBusChange(ctx, newCR, bus, lms, c) + err = mgr.handlePullQueueChange(ctx, newCR, queue, lms, c) assert.NotNil(t, err) // outputs.conf propertyKVList := [][]string{ - {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.SQS.Region}, - {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.Region}, + {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, - {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), bus.Spec.SQS.DLQ}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, } @@ -2290,22 +2290,22 @@ func TestHandlePullBusChange(t *testing.T) { propertyKVListOutputs = append(propertyKVListOutputs, []string{fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"}) body := buildFormBody(propertyKVListOutputs) - addRemoteQueueHandlersForIndexer(mockHTTPClient, newCR, bus, newCR.Status.ReadyReplicas, "conf-outputs", body) + addRemoteQueueHandlersForIndexer(mockHTTPClient, newCR, queue, newCR.Status.ReadyReplicas, "conf-outputs", body) // Negative test case: failure in creating remote queue stanza - mgr = newTestPullBusPipelineManager(mockHTTPClient) + mgr = newTestPullQueuePipelineManager(mockHTTPClient) - err = mgr.handlePullBusChange(ctx, newCR, bus, lms, c) + err = mgr.handlePullQueueChange(ctx, newCR, queue, lms, c) assert.NotNil(t, err) // inputs.conf body = buildFormBody(propertyKVList) - addRemoteQueueHandlersForIndexer(mockHTTPClient, newCR, bus, newCR.Status.ReadyReplicas, "conf-inputs", body) + addRemoteQueueHandlersForIndexer(mockHTTPClient, newCR, queue, newCR.Status.ReadyReplicas, "conf-inputs", body) // Negative test case: failure in updating remote queue stanza - mgr = newTestPullBusPipelineManager(mockHTTPClient) + mgr = newTestPullQueuePipelineManager(mockHTTPClient) - err = mgr.handlePullBusChange(ctx, newCR, bus, lms, c) + err = mgr.handlePullQueueChange(ctx, newCR, queue, lms, c) assert.NotNil(t, err) // default-mode.conf @@ -2331,9 +2331,9 @@ func TestHandlePullBusChange(t *testing.T) { } } - mgr = newTestPullBusPipelineManager(mockHTTPClient) + mgr = newTestPullQueuePipelineManager(mockHTTPClient) - err = mgr.handlePullBusChange(ctx, newCR, bus, lms, c) + err = mgr.handlePullQueueChange(ctx, newCR, queue, lms, c) assert.Nil(t, err) } @@ -2351,7 +2351,7 @@ func buildFormBody(pairs [][]string) string { return b.String() } -func addRemoteQueueHandlersForIndexer(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IndexerCluster, bus enterpriseApi.Bus, replicas int32, confName, body string) { +func addRemoteQueueHandlersForIndexer(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IndexerCluster, queue enterpriseApi.Queue, replicas int32, confName, body string) { for i := 0; i < int(replicas); i++ { podName := fmt.Sprintf("splunk-%s-indexer-%d", cr.GetName(), i) baseURL := fmt.Sprintf( @@ -2359,18 +2359,18 @@ func addRemoteQueueHandlersForIndexer(mockHTTPClient *spltest.MockHTTPClient, cr podName, cr.GetName(), cr.GetNamespace(), confName, ) - createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", bus.Spec.SQS.Name)) + createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name)) reqCreate, _ := http.NewRequest("POST", baseURL, strings.NewReader(createReqBody)) mockHTTPClient.AddHandler(reqCreate, 200, "", nil) - updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", bus.Spec.SQS.Name)) + updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name)) reqUpdate, _ := http.NewRequest("POST", updateURL, strings.NewReader(body)) mockHTTPClient.AddHandler(reqUpdate, 200, "", nil) } } -func newTestPullBusPipelineManager(mockHTTPClient *spltest.MockHTTPClient) *indexerClusterPodManager { - newSplunkClientForBusPipeline = func(uri, user, pass string) *splclient.SplunkClient { +func newTestPullQueuePipelineManager(mockHTTPClient *spltest.MockHTTPClient) *indexerClusterPodManager { + newSplunkClientForQueuePipeline = func(uri, user, pass string) *splclient.SplunkClient { return &splclient.SplunkClient{ ManagementURI: uri, Username: user, @@ -2379,11 +2379,11 @@ func newTestPullBusPipelineManager(mockHTTPClient *spltest.MockHTTPClient) *inde } } return &indexerClusterPodManager{ - newSplunkClient: newSplunkClientForBusPipeline, + newSplunkClient: newSplunkClientForQueuePipeline, } } -func TestApplyIndexerClusterManager_Bus_Success(t *testing.T) { +func TestApplyIndexerClusterManager_Queue_Success(t *testing.T) { os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") ctx := context.TODO() @@ -2395,16 +2395,16 @@ func TestApplyIndexerClusterManager_Bus_Success(t *testing.T) { c := fake.NewClientBuilder().WithScheme(scheme).Build() // Object definitions - bus := enterpriseApi.Bus{ + queue := enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ - Kind: "Bus", + Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "bus", + Name: "queue", Namespace: "test", }, - Spec: enterpriseApi.BusSpec{ + Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ Name: "test-queue", @@ -2414,7 +2414,7 @@ func TestApplyIndexerClusterManager_Bus_Success(t *testing.T) { }, }, } - c.Create(ctx, &bus) + c.Create(ctx, &queue) cm := &enterpriseApi.ClusterManager{ TypeMeta: metav1.TypeMeta{Kind: "ClusterManager"}, @@ -2436,9 +2436,9 @@ func TestApplyIndexerClusterManager_Bus_Success(t *testing.T) { }, Spec: enterpriseApi.IndexerClusterSpec{ Replicas: 1, - BusRef: corev1.ObjectReference{ - Name: bus.Name, - Namespace: bus.Namespace, + QueueRef: corev1.ObjectReference{ + Name: queue.Name, + Namespace: queue.Namespace, }, CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ ClusterManagerRef: corev1.ObjectReference{ @@ -2552,14 +2552,14 @@ func TestApplyIndexerClusterManager_Bus_Success(t *testing.T) { mockHTTPClient := &spltest.MockHTTPClient{} base := "https://splunk-test-indexer-0.splunk-test-indexer-headless.test.svc.cluster.local:8089/servicesNS/nobody/system/configs" - queue := "remote_queue:test-queue" + q := "remote_queue:test-queue" - mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-outputs", base), "name="+queue), 200, "", nil) - mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-outputs/%s", base, queue), ""), 200, "", nil) + mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-outputs", base), "name="+q), 200, "", nil) + mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-outputs/%s", base, q), ""), 200, "", nil) // inputs.conf - mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-inputs", base), "name="+queue), 200, "", nil) - mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-inputs/%s", base, queue), ""), 200, "", nil) + mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-inputs", base), "name="+q), 200, "", nil) + mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-inputs/%s", base, q), ""), 200, "", nil) // default-mode.conf pipelineFields := []string{ diff --git a/pkg/splunk/enterprise/ingestorcluster.go b/pkg/splunk/enterprise/ingestorcluster.go index 524f183b5..299aa8d0c 100644 --- a/pkg/splunk/enterprise/ingestorcluster.go +++ b/pkg/splunk/enterprise/ingestorcluster.go @@ -73,7 +73,7 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr defer updateCRStatus(ctx, client, cr, &err) if cr.Status.Replicas < cr.Spec.Replicas { - cr.Status.Bus = &enterpriseApi.BusSpec{} + cr.Status.Queue = &enterpriseApi.QueueSpec{} } cr.Status.Replicas = cr.Spec.Replicas @@ -210,27 +210,27 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr // No need to requeue if everything is ready if cr.Status.Phase == enterpriseApi.PhaseReady { - // Bus - bus := enterpriseApi.Bus{} - if cr.Spec.BusRef.Name != "" { + // Queue + queue := enterpriseApi.Queue{} + if cr.Spec.QueueRef.Name != "" { ns := cr.GetNamespace() - if cr.Spec.BusRef.Namespace != "" { - ns = cr.Spec.BusRef.Namespace + if cr.Spec.QueueRef.Namespace != "" { + ns = cr.Spec.QueueRef.Namespace } err = client.Get(ctx, types.NamespacedName{ - Name: cr.Spec.BusRef.Name, + Name: cr.Spec.QueueRef.Name, Namespace: ns, - }, &bus) + }, &queue) if err != nil { return result, err } } - // Can not override original bus spec due to comparison in the later code - busCopy := bus - if busCopy.Spec.Provider == "sqs" { - if busCopy.Spec.SQS.Endpoint == "" { - busCopy.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", busCopy.Spec.SQS.Region) + // Can not override original queue spec due to comparison in the later code + queueCopy := queue + if queueCopy.Spec.Provider == "sqs" { + if queueCopy.Spec.SQS.Endpoint == "" { + queueCopy.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", queueCopy.Spec.SQS.Region) } } @@ -250,26 +250,26 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr } } - // Can not override original bus spec due to comparison in the later code + // Can not override original queue spec due to comparison in the later code lmsCopy := lms if lmsCopy.Spec.Provider == "s3" { if lmsCopy.Spec.S3.Endpoint == "" { - lmsCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", bus.Spec.SQS.Region) + lmsCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queue.Spec.SQS.Region) } } - // If bus is updated - if !reflect.DeepEqual(cr.Status.Bus, bus.Spec) { + // If queue is updated + if !reflect.DeepEqual(cr.Status.Queue, queue.Spec) { mgr := newIngestorClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) - err = mgr.handlePushBusChange(ctx, cr, busCopy, lmsCopy, client) + err = mgr.handlePushQueueChange(ctx, cr, queueCopy, lmsCopy, client) if err != nil { - eventPublisher.Warning(ctx, "ApplyIngestorCluster", fmt.Sprintf("Failed to update conf file for Bus/Pipeline config change after pod creation: %s", err.Error())) - scopedLog.Error(err, "Failed to update conf file for Bus/Pipeline config change after pod creation") + eventPublisher.Warning(ctx, "ApplyIngestorCluster", fmt.Sprintf("Failed to update conf file for Queue/Pipeline config change after pod creation: %s", err.Error())) + scopedLog.Error(err, "Failed to update conf file for Queue/Pipeline config change after pod creation") return result, err } - cr.Status.Bus = &bus.Spec + cr.Status.Queue = &queue.Spec } // Upgrade fron automated MC to MC CRD @@ -342,10 +342,10 @@ func getIngestorStatefulSet(ctx context.Context, client splcommon.ControllerClie return ss, nil } -// Checks if only Bus or Pipeline config changed, and updates the conf file if so -func (mgr *ingestorClusterPodManager) handlePushBusChange(ctx context.Context, newCR *enterpriseApi.IngestorCluster, bus enterpriseApi.Bus, lms enterpriseApi.LargeMessageStore, k8s client.Client) error { +// Checks if only Queue or Pipeline config changed, and updates the conf file if so +func (mgr *ingestorClusterPodManager) handlePushQueueChange(ctx context.Context, newCR *enterpriseApi.IngestorCluster, queue enterpriseApi.Queue, lms enterpriseApi.LargeMessageStore, k8s client.Client) error { reqLogger := log.FromContext(ctx) - scopedLog := reqLogger.WithName("handlePushBusChange").WithValues("name", newCR.GetName(), "namespace", newCR.GetNamespace()) + scopedLog := reqLogger.WithName("handlePushQueueChange").WithValues("name", newCR.GetName(), "namespace", newCR.GetNamespace()) // Only update config for pods that exist readyReplicas := newCR.Status.Replicas @@ -362,18 +362,18 @@ func (mgr *ingestorClusterPodManager) handlePushBusChange(ctx context.Context, n splunkClient := mgr.newSplunkClient(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd)) afterDelete := false - if (bus.Spec.SQS.Name != "" && newCR.Status.Bus.SQS.Name != "" && bus.Spec.SQS.Name != newCR.Status.Bus.SQS.Name) || - (bus.Spec.Provider != "" && newCR.Status.Bus.Provider != "" && bus.Spec.Provider != newCR.Status.Bus.Provider) { - if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCR.Status.Bus.SQS.Name)); err != nil { + if (queue.Spec.SQS.Name != "" && newCR.Status.Queue.SQS.Name != "" && queue.Spec.SQS.Name != newCR.Status.Queue.SQS.Name) || + (queue.Spec.Provider != "" && newCR.Status.Queue.Provider != "" && queue.Spec.Provider != newCR.Status.Queue.Provider) { + if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCR.Status.Queue.SQS.Name)); err != nil { updateErr = err } afterDelete = true } - busChangedFields, pipelineChangedFields := getChangedBusFieldsForIngestor(&bus, &lms, newCR, afterDelete) + queueChangedFields, pipelineChangedFields := getChangedQueueFieldsForIngestor(&queue, &lms, newCR, afterDelete) - for _, pbVal := range busChangedFields { - if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", bus.Spec.SQS.Name), [][]string{pbVal}); err != nil { + for _, pbVal := range queueChangedFields { + if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name), [][]string{pbVal}); err != nil { updateErr = err } } @@ -389,22 +389,22 @@ func (mgr *ingestorClusterPodManager) handlePushBusChange(ctx context.Context, n return updateErr } -// getChangedBusFieldsForIngestor returns a list of changed bus and pipeline fields for ingestor pods -func getChangedBusFieldsForIngestor(bus *enterpriseApi.Bus, lms *enterpriseApi.LargeMessageStore, busIngestorStatus *enterpriseApi.IngestorCluster, afterDelete bool) (busChangedFields, pipelineChangedFields [][]string) { - oldPB := busIngestorStatus.Status.Bus +// getChangedQueueFieldsForIngestor returns a list of changed queue and pipeline fields for ingestor pods +func getChangedQueueFieldsForIngestor(queue *enterpriseApi.Queue, lms *enterpriseApi.LargeMessageStore, queueIngestorStatus *enterpriseApi.IngestorCluster, afterDelete bool) (queueChangedFields, pipelineChangedFields [][]string) { + oldPB := queueIngestorStatus.Status.Queue if oldPB == nil { - oldPB = &enterpriseApi.BusSpec{} + oldPB = &enterpriseApi.QueueSpec{} } - newPB := &bus.Spec + newPB := &queue.Spec - oldLMS := busIngestorStatus.Status.LargeMessageStore + oldLMS := queueIngestorStatus.Status.LargeMessageStore if oldLMS == nil { oldLMS = &enterpriseApi.LargeMessageStoreSpec{} } newLMS := &lms.Spec - // Push changed bus fields - busChangedFields = pushBusChanged(oldPB, newPB, oldLMS, newLMS, afterDelete) + // Push changed queue fields + queueChangedFields = pushQueueChanged(oldPB, newPB, oldLMS, newLMS, afterDelete) // Always changed pipeline fields pipelineChangedFields = pipelineConfig(false) @@ -443,24 +443,24 @@ func pipelineConfig(isIndexer bool) (output [][]string) { return output } -func pushBusChanged(oldBus, newBus *enterpriseApi.BusSpec, oldLMS, newLMS *enterpriseApi.LargeMessageStoreSpec, afterDelete bool) (output [][]string) { - busProvider := "" - if newBus.Provider == "sqs" { - busProvider = "sqs_smartbus" +func pushQueueChanged(oldQueue, newQueue *enterpriseApi.QueueSpec, oldLMS, newLMS *enterpriseApi.LargeMessageStoreSpec, afterDelete bool) (output [][]string) { + queueProvider := "" + if newQueue.Provider == "sqs" { + queueProvider = "sqs_smartbus" } lmsProvider := "" if newLMS.Provider == "s3" { lmsProvider = "sqs_smartbus" } - if oldBus.Provider != newBus.Provider || afterDelete { - output = append(output, []string{"remote_queue.type", busProvider}) + if oldQueue.Provider != newQueue.Provider || afterDelete { + output = append(output, []string{"remote_queue.type", queueProvider}) } - if oldBus.SQS.Region != newBus.SQS.Region || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.auth_region", busProvider), newBus.SQS.Region}) + if oldQueue.SQS.Region != newQueue.SQS.Region || afterDelete { + output = append(output, []string{fmt.Sprintf("remote_queue.%s.auth_region", queueProvider), newQueue.SQS.Region}) } - if oldBus.SQS.Endpoint != newBus.SQS.Endpoint || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.endpoint", busProvider), newBus.SQS.Endpoint}) + if oldQueue.SQS.Endpoint != newQueue.SQS.Endpoint || afterDelete { + output = append(output, []string{fmt.Sprintf("remote_queue.%s.endpoint", queueProvider), newQueue.SQS.Endpoint}) } if oldLMS.S3.Endpoint != newLMS.S3.Endpoint || afterDelete { output = append(output, []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", lmsProvider), newLMS.S3.Endpoint}) @@ -468,15 +468,15 @@ func pushBusChanged(oldBus, newBus *enterpriseApi.BusSpec, oldLMS, newLMS *enter if oldLMS.S3.Path != newLMS.S3.Path || afterDelete { output = append(output, []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", lmsProvider), newLMS.S3.Path}) } - if oldBus.SQS.DLQ != newBus.SQS.DLQ || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busProvider), newBus.SQS.DLQ}) + if oldQueue.SQS.DLQ != newQueue.SQS.DLQ || afterDelete { + output = append(output, []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", queueProvider), newQueue.SQS.DLQ}) } output = append(output, - []string{fmt.Sprintf("remote_queue.%s.encoding_format", busProvider), "s2s"}, - []string{fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", busProvider), "4"}, - []string{fmt.Sprintf("remote_queue.%s.retry_policy", busProvider), "max_count"}, - []string{fmt.Sprintf("remote_queue.%s.send_interval", busProvider), "5s"}) + []string{fmt.Sprintf("remote_queue.%s.encoding_format", queueProvider), "s2s"}, + []string{fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", queueProvider), "4"}, + []string{fmt.Sprintf("remote_queue.%s.retry_policy", queueProvider), "max_count"}, + []string{fmt.Sprintf("remote_queue.%s.send_interval", queueProvider), "5s"}) return output } diff --git a/pkg/splunk/enterprise/ingestorcluster_test.go b/pkg/splunk/enterprise/ingestorcluster_test.go index 75cc14ec5..424806846 100644 --- a/pkg/splunk/enterprise/ingestorcluster_test.go +++ b/pkg/splunk/enterprise/ingestorcluster_test.go @@ -65,16 +65,16 @@ func TestApplyIngestorCluster(t *testing.T) { // Object definitions provider := "sqs_smartbus" - bus := &enterpriseApi.Bus{ + queue := &enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ - Kind: "Bus", + Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "bus", + Name: "queue", Namespace: "test", }, - Spec: enterpriseApi.BusSpec{ + Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ Name: "test-queue", @@ -84,7 +84,7 @@ func TestApplyIngestorCluster(t *testing.T) { }, }, } - c.Create(ctx, bus) + c.Create(ctx, queue) lms := enterpriseApi.LargeMessageStore{ TypeMeta: metav1.TypeMeta{ @@ -119,9 +119,9 @@ func TestApplyIngestorCluster(t *testing.T) { CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ Mock: true, }, - BusRef: corev1.ObjectReference{ - Name: bus.Name, - Namespace: bus.Namespace, + QueueRef: corev1.ObjectReference{ + Name: queue.Name, + Namespace: queue.Namespace, }, LargeMessageStoreRef: corev1.ObjectReference{ Name: lms.Name, @@ -285,18 +285,18 @@ func TestApplyIngestorCluster(t *testing.T) { propertyKVList := [][]string{ {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, - {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.SQS.Region}, - {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.Region}, + {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, - {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), bus.Spec.SQS.DLQ}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, {fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"}, } body := buildFormBody(propertyKVList) - addRemoteQueueHandlersForIngestor(mockHTTPClient, cr, bus, cr.Status.ReadyReplicas, "conf-outputs", body) + addRemoteQueueHandlersForIngestor(mockHTTPClient, cr, queue, cr.Status.ReadyReplicas, "conf-outputs", body) // default-mode.conf propertyKVList = [][]string{ @@ -333,15 +333,15 @@ func TestGetIngestorStatefulSet(t *testing.T) { // Object definitions os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") - bus := enterpriseApi.Bus{ + queue := enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ - Kind: "Bus", + Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "bus", + Name: "queue", }, - Spec: enterpriseApi.BusSpec{ + Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ Name: "test-queue", @@ -362,8 +362,8 @@ func TestGetIngestorStatefulSet(t *testing.T) { }, Spec: enterpriseApi.IngestorClusterSpec{ Replicas: 2, - BusRef: corev1.ObjectReference{ - Name: bus.Name, + QueueRef: corev1.ObjectReference{ + Name: queue.Name, }, }, } @@ -416,18 +416,18 @@ func TestGetIngestorStatefulSet(t *testing.T) { test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-test-ingestor","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor","app.kubernetes.io/test-extra-label":"test-extra-label-value"},"ownerReferences":[{"apiVersion":"","kind":"IngestorCluster","name":"test","uid":"","controller":true}]},"spec":{"replicas":3,"selector":{"matchLabels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor","app.kubernetes.io/test-extra-label":"test-extra-label-value"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997","traffic.sidecar.istio.io/includeInboundPorts":"8000,8088"}},"spec":{"volumes":[{"name":"splunk-test-probe-configmap","configMap":{"name":"splunk-test-probe-configmap","defaultMode":365}},{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-test-ingestor-secret-v1","defaultMode":420}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"http-splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"http-hec","containerPort":8088,"protocol":"TCP"},{"name":"https-splunkd","containerPort":8089,"protocol":"TCP"},{"name":"tcp-s2s","containerPort":9997,"protocol":"TCP"},{"name":"user-defined","containerPort":32000,"protocol":"UDP"}],"env":[{"name":"TEST_ENV_VAR","value":"test_value"},{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_standalone"},{"name":"SPLUNK_DECLARATIVE_ADMIN_PASSWORD","value":"true"},{"name":"SPLUNK_OPERATOR_K8_LIVENESS_DRIVER_FILE_PATH","value":"/tmp/splunk_operator_k8s/probes/k8_liveness_driver.sh"},{"name":"SPLUNK_GENERAL_TERMS","value":"--accept-sgt-current-at-splunk-com"},{"name":"SPLUNK_SKIP_CLUSTER_BUNDLE_PUSH","value":"true"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"splunk-test-probe-configmap","mountPath":"/mnt/probes"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"}],"livenessProbe":{"exec":{"command":["/mnt/probes/livenessProbe.sh"]},"initialDelaySeconds":30,"timeoutSeconds":30,"periodSeconds":30,"failureThreshold":3},"readinessProbe":{"exec":{"command":["/mnt/probes/readinessProbe.sh"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5,"failureThreshold":3},"startupProbe":{"exec":{"command":["/mnt/probes/startupProbe.sh"]},"initialDelaySeconds":40,"timeoutSeconds":30,"periodSeconds":30,"failureThreshold":12},"imagePullPolicy":"IfNotPresent","securityContext":{"capabilities":{"add":["NET_BIND_SERVICE"],"drop":["ALL"]},"privileged":false,"runAsUser":41812,"runAsNonRoot":true,"allowPrivilegeEscalation":false,"seccompProfile":{"type":"RuntimeDefault"}}}],"serviceAccountName":"defaults","securityContext":{"runAsUser":41812,"runAsNonRoot":true,"fsGroup":41812,"fsGroupChangePolicy":"OnRootMismatch"},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-test-ingestor"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor","app.kubernetes.io/test-extra-label":"test-extra-label-value"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"10Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor","app.kubernetes.io/test-extra-label":"test-extra-label-value"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"100Gi"}}},"status":{}}],"serviceName":"splunk-test-ingestor-headless","podManagementPolicy":"Parallel","updateStrategy":{"type":"OnDelete"}},"status":{"replicas":0,"availableReplicas":0}}`) } -func TestGetChangedBusFieldsForIngestor(t *testing.T) { +func TestGetChangedQueueFieldsForIngestor(t *testing.T) { provider := "sqs_smartbus" - bus := enterpriseApi.Bus{ + queue := enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ - Kind: "Bus", + Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "bus", + Name: "queue", }, - Spec: enterpriseApi.BusSpec{ + Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ Name: "test-queue", @@ -457,8 +457,8 @@ func TestGetChangedBusFieldsForIngestor(t *testing.T) { newCR := &enterpriseApi.IngestorCluster{ Spec: enterpriseApi.IngestorClusterSpec{ - BusRef: corev1.ObjectReference{ - Name: bus.Name, + QueueRef: corev1.ObjectReference{ + Name: queue.Name, }, LargeMessageStoreRef: corev1.ObjectReference{ Name: lms.Name, @@ -467,21 +467,21 @@ func TestGetChangedBusFieldsForIngestor(t *testing.T) { Status: enterpriseApi.IngestorClusterStatus{}, } - busChangedFields, pipelineChangedFields := getChangedBusFieldsForIngestor(&bus, &lms, newCR, false) + queueChangedFields, pipelineChangedFields := getChangedQueueFieldsForIngestor(&queue, &lms, newCR, false) - assert.Equal(t, 10, len(busChangedFields)) + assert.Equal(t, 10, len(queueChangedFields)) assert.Equal(t, [][]string{ {"remote_queue.type", provider}, - {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.SQS.Region}, - {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.Region}, + {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, - {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), bus.Spec.SQS.DLQ}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, {fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"}, - }, busChangedFields) + }, queueChangedFields) assert.Equal(t, 6, len(pipelineChangedFields)) assert.Equal(t, [][]string{ @@ -494,19 +494,19 @@ func TestGetChangedBusFieldsForIngestor(t *testing.T) { }, pipelineChangedFields) } -func TestHandlePushBusChange(t *testing.T) { +func TestHandlePushQueueChange(t *testing.T) { // Object definitions provider := "sqs_smartbus" - bus := enterpriseApi.Bus{ + queue := enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ - Kind: "Bus", + Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "bus", + Name: "queue", }, - Spec: enterpriseApi.BusSpec{ + Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ Name: "test-queue", @@ -543,8 +543,8 @@ func TestHandlePushBusChange(t *testing.T) { Namespace: "test", }, Spec: enterpriseApi.IngestorClusterSpec{ - BusRef: corev1.ObjectReference{ - Name: bus.Name, + QueueRef: corev1.ObjectReference{ + Name: queue.Name, }, LargeMessageStoreRef: corev1.ObjectReference{ Name: lms.Name, @@ -553,7 +553,7 @@ func TestHandlePushBusChange(t *testing.T) { Status: enterpriseApi.IngestorClusterStatus{ Replicas: 3, ReadyReplicas: 3, - Bus: &enterpriseApi.BusSpec{}, + Queue: &enterpriseApi.QueueSpec{}, LargeMessageStore: &enterpriseApi.LargeMessageStoreSpec{}, }, } @@ -618,7 +618,7 @@ func TestHandlePushBusChange(t *testing.T) { // Negative test case: secret not found mgr := &ingestorClusterPodManager{} - err := mgr.handlePushBusChange(ctx, newCR, bus, lms, c) + err := mgr.handlePushQueueChange(ctx, newCR, queue, lms, c) assert.NotNil(t, err) // Mock secret @@ -627,31 +627,31 @@ func TestHandlePushBusChange(t *testing.T) { mockHTTPClient := &spltest.MockHTTPClient{} // Negative test case: failure in creating remote queue stanza - mgr = newTestPushBusPipelineManager(mockHTTPClient) + mgr = newTestPushQueuePipelineManager(mockHTTPClient) - err = mgr.handlePushBusChange(ctx, newCR, bus, lms, c) + err = mgr.handlePushQueueChange(ctx, newCR, queue, lms, c) assert.NotNil(t, err) // outputs.conf propertyKVList := [][]string{ {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, - {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.SQS.Region}, - {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.Region}, + {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, - {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), bus.Spec.SQS.DLQ}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, {fmt.Sprintf("remote_queue.max_count.%s.max_retries_per_part", provider), "4"}, {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, {fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"}, } body := buildFormBody(propertyKVList) - addRemoteQueueHandlersForIngestor(mockHTTPClient, newCR, &bus, newCR.Status.ReadyReplicas, "conf-outputs", body) + addRemoteQueueHandlersForIngestor(mockHTTPClient, newCR, &queue, newCR.Status.ReadyReplicas, "conf-outputs", body) // Negative test case: failure in creating remote queue stanza - mgr = newTestPushBusPipelineManager(mockHTTPClient) + mgr = newTestPushQueuePipelineManager(mockHTTPClient) - err = mgr.handlePushBusChange(ctx, newCR, bus, lms, c) + err = mgr.handlePushQueueChange(ctx, newCR, queue, lms, c) assert.NotNil(t, err) // default-mode.conf @@ -678,13 +678,13 @@ func TestHandlePushBusChange(t *testing.T) { } } - mgr = newTestPushBusPipelineManager(mockHTTPClient) + mgr = newTestPushQueuePipelineManager(mockHTTPClient) - err = mgr.handlePushBusChange(ctx, newCR, bus, lms, c) + err = mgr.handlePushQueueChange(ctx, newCR, queue, lms, c) assert.Nil(t, err) } -func addRemoteQueueHandlersForIngestor(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IngestorCluster, bus *enterpriseApi.Bus, replicas int32, confName, body string) { +func addRemoteQueueHandlersForIngestor(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IngestorCluster, queue *enterpriseApi.Queue, replicas int32, confName, body string) { for i := 0; i < int(replicas); i++ { podName := fmt.Sprintf("splunk-%s-ingestor-%d", cr.GetName(), i) baseURL := fmt.Sprintf( @@ -692,18 +692,18 @@ func addRemoteQueueHandlersForIngestor(mockHTTPClient *spltest.MockHTTPClient, c podName, cr.GetName(), cr.GetNamespace(), confName, ) - createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", bus.Spec.SQS.Name)) + createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name)) reqCreate, _ := http.NewRequest("POST", baseURL, strings.NewReader(createReqBody)) mockHTTPClient.AddHandler(reqCreate, 200, "", nil) - updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", bus.Spec.SQS.Name)) + updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name)) reqUpdate, _ := http.NewRequest("POST", updateURL, strings.NewReader(body)) mockHTTPClient.AddHandler(reqUpdate, 200, "", nil) } } -func newTestPushBusPipelineManager(mockHTTPClient *spltest.MockHTTPClient) *ingestorClusterPodManager { - newSplunkClientForPushBusPipeline := func(uri, user, pass string) *splclient.SplunkClient { +func newTestPushQueuePipelineManager(mockHTTPClient *spltest.MockHTTPClient) *ingestorClusterPodManager { + newSplunkClientForPushQueuePipeline := func(uri, user, pass string) *splclient.SplunkClient { return &splclient.SplunkClient{ ManagementURI: uri, Username: user, @@ -712,6 +712,6 @@ func newTestPushBusPipelineManager(mockHTTPClient *spltest.MockHTTPClient) *inge } } return &ingestorClusterPodManager{ - newSplunkClient: newSplunkClientForPushBusPipeline, + newSplunkClient: newSplunkClientForPushQueuePipeline, } } diff --git a/pkg/splunk/enterprise/monitoringconsole.go b/pkg/splunk/enterprise/monitoringconsole.go index 64de4a2de..77c58c328 100644 --- a/pkg/splunk/enterprise/monitoringconsole.go +++ b/pkg/splunk/enterprise/monitoringconsole.go @@ -33,7 +33,6 @@ import ( k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" rclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -207,7 +206,7 @@ func getMonitoringConsoleStatefulSet(ctx context.Context, client splcommon.Contr } // helper function to get the list of MonitoringConsole types in the current namespace -func getMonitoringConsoleList(ctx context.Context, c splcommon.ControllerClient, cr splcommon.MetaObject, listOpts []client.ListOption) (enterpriseApi.MonitoringConsoleList, error) { +func getMonitoringConsoleList(ctx context.Context, c splcommon.ControllerClient, cr splcommon.MetaObject, listOpts []rclient.ListOption) (enterpriseApi.MonitoringConsoleList, error) { reqLogger := log.FromContext(ctx) scopedLog := reqLogger.WithName("getMonitoringConsoleList").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) diff --git a/pkg/splunk/enterprise/bus.go b/pkg/splunk/enterprise/queue.go similarity index 91% rename from pkg/splunk/enterprise/bus.go rename to pkg/splunk/enterprise/queue.go index b6e8318ed..1f36f6bad 100644 --- a/pkg/splunk/enterprise/bus.go +++ b/pkg/splunk/enterprise/queue.go @@ -27,8 +27,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" ) -// ApplyBus reconciles the state of an IngestorCluster custom resource -func ApplyBus(ctx context.Context, client client.Client, cr *enterpriseApi.Bus) (reconcile.Result, error) { +// ApplyQueue reconciles the state of an IngestorCluster custom resource +func ApplyQueue(ctx context.Context, client client.Client, cr *enterpriseApi.Queue) (reconcile.Result, error) { var err error // Unless modified, reconcile for this object will be requeued after 5 seconds @@ -44,7 +44,7 @@ func ApplyBus(ctx context.Context, client client.Client, cr *enterpriseApi.Bus) eventPublisher, _ := newK8EventPublisher(client, cr) ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher) - cr.Kind = "Bus" + cr.Kind = "Queue" // Initialize phase cr.Status.Phase = enterpriseApi.PhaseError diff --git a/pkg/splunk/enterprise/bus_test.go b/pkg/splunk/enterprise/queue_test.go similarity index 81% rename from pkg/splunk/enterprise/bus_test.go rename to pkg/splunk/enterprise/queue_test.go index 6e5bf1aa7..45a813282 100644 --- a/pkg/splunk/enterprise/bus_test.go +++ b/pkg/splunk/enterprise/queue_test.go @@ -27,7 +27,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" ) -func TestApplyBus(t *testing.T) { +func TestApplyQueue(t *testing.T) { os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") ctx := context.TODO() @@ -39,16 +39,16 @@ func TestApplyBus(t *testing.T) { c := fake.NewClientBuilder().WithScheme(scheme).Build() // Object definitions - bus := &enterpriseApi.Bus{ + queue := &enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ - Kind: "Bus", + Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "bus", + Name: "queue", Namespace: "test", }, - Spec: enterpriseApi.BusSpec{ + Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ Name: "test-queue", @@ -58,12 +58,12 @@ func TestApplyBus(t *testing.T) { }, }, } - c.Create(ctx, bus) + c.Create(ctx, queue) - // ApplyBus - result, err := ApplyBus(ctx, c, bus) + // ApplyQueue + result, err := ApplyQueue(ctx, c, queue) assert.NoError(t, err) assert.True(t, result.Requeue) - assert.NotEqual(t, enterpriseApi.PhaseError, bus.Status.Phase) - assert.Equal(t, enterpriseApi.PhaseReady, bus.Status.Phase) + assert.NotEqual(t, enterpriseApi.PhaseError, queue.Status.Phase) + assert.Equal(t, enterpriseApi.PhaseReady, queue.Status.Phase) } diff --git a/pkg/splunk/enterprise/types.go b/pkg/splunk/enterprise/types.go index 180659498..b7b691415 100644 --- a/pkg/splunk/enterprise/types.go +++ b/pkg/splunk/enterprise/types.go @@ -63,8 +63,8 @@ const ( // SplunkIngestor may be a standalone or clustered ingestion peer SplunkIngestor InstanceType = "ingestor" - // SplunkBus is the bus instance - SplunkBus InstanceType = "bus" + // SplunkQueue is the queue instance + SplunkQueue InstanceType = "queue" // SplunkLargeMessageStore is the large message store instance SplunkLargeMessageStore InstanceType = "large-message-store" @@ -297,8 +297,8 @@ func KindToInstanceString(kind string) string { return SplunkIndexer.ToString() case "IngestorCluster": return SplunkIngestor.ToString() - case "Bus": - return SplunkBus.ToString() + case "Queue": + return SplunkQueue.ToString() case "LargeMessageStore": return SplunkLargeMessageStore.ToString() case "LicenseManager": diff --git a/pkg/splunk/enterprise/upgrade.go b/pkg/splunk/enterprise/upgrade.go index 5d50e8cec..71fc017da 100644 --- a/pkg/splunk/enterprise/upgrade.go +++ b/pkg/splunk/enterprise/upgrade.go @@ -10,7 +10,6 @@ import ( appsv1 "k8s.io/api/apps/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" - rclient "sigs.k8s.io/controller-runtime/pkg/client" runtime "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" ) @@ -161,8 +160,8 @@ IndexerCluster: } // check if cluster is multisite if clusterInfo.MultiSite == "true" { - opts := []rclient.ListOption{ - rclient.InNamespace(cr.GetNamespace()), + opts := []runtime.ListOption{ + runtime.InNamespace(cr.GetNamespace()), } indexerList, err := getIndexerClusterList(ctx, c, cr, opts) if err != nil { @@ -220,8 +219,8 @@ SearchHeadCluster: // check if a search head cluster exists with the same ClusterManager instance attached searchHeadClusterInstance := enterpriseApi.SearchHeadCluster{} - opts := []rclient.ListOption{ - rclient.InNamespace(cr.GetNamespace()), + opts := []runtime.ListOption{ + runtime.InNamespace(cr.GetNamespace()), } searchHeadList, err := getSearchHeadClusterList(ctx, c, cr, opts) if err != nil { diff --git a/pkg/splunk/enterprise/util.go b/pkg/splunk/enterprise/util.go index e8f0736b3..01b304c12 100644 --- a/pkg/splunk/enterprise/util.go +++ b/pkg/splunk/enterprise/util.go @@ -2291,19 +2291,19 @@ func fetchCurrentCRWithStatusUpdate(ctx context.Context, client splcommon.Contro origCR.(*enterpriseApi.IngestorCluster).Status.DeepCopyInto(&latestIngCR.Status) return latestIngCR, nil - case "Bus": - latestBusCR := &enterpriseApi.Bus{} - err = client.Get(ctx, namespacedName, latestBusCR) + case "Queue": + latestQueueCR := &enterpriseApi.Queue{} + err = client.Get(ctx, namespacedName, latestQueueCR) if err != nil { return nil, err } - origCR.(*enterpriseApi.Bus).Status.Message = "" + origCR.(*enterpriseApi.Queue).Status.Message = "" if (crError != nil) && ((*crError) != nil) { - origCR.(*enterpriseApi.Bus).Status.Message = (*crError).Error() + origCR.(*enterpriseApi.Queue).Status.Message = (*crError).Error() } - origCR.(*enterpriseApi.Bus).Status.DeepCopyInto(&latestBusCR.Status) - return latestBusCR, nil + origCR.(*enterpriseApi.Queue).Status.DeepCopyInto(&latestQueueCR.Status) + return latestQueueCR, nil case "LargeMessageStore": latestLmsCR := &enterpriseApi.LargeMessageStore{} @@ -2547,7 +2547,7 @@ func loadFixture(t *testing.T, filename string) string { if err != nil { t.Fatalf("Failed to load fixture %s: %v", filename, err) } - + // Compact the JSON to match the output from json.Marshal var compactJSON bytes.Buffer if err := json.Compact(&compactJSON, data); err != nil { diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go index 711580d99..687473bc0 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go @@ -39,7 +39,7 @@ var ( testenvInstance *testenv.TestEnv testSuiteName = "indingsep-" + testenv.RandomDNSName(3) - bus = enterpriseApi.BusSpec{ + queue = enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ Name: "test-queue", @@ -85,7 +85,7 @@ var ( "AWS_STS_REGIONAL_ENDPOINTS=regional", } - updateBus = enterpriseApi.BusSpec{ + updateQueue = enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ Name: "test-queue-updated", diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go index 1b3d27c70..a27269889 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go @@ -79,10 +79,10 @@ var _ = Describe("indingsep test", func() { testcaseEnvInst.Log.Info("Create Service Account") testcaseEnvInst.CreateServiceAccount(serviceAccountName) - // Deploy Bus - testcaseEnvInst.Log.Info("Deploy Bus") - b, err := deployment.DeployBus(ctx, "bus", bus) - Expect(err).To(Succeed(), "Unable to deploy Bus") + // Deploy Queue + testcaseEnvInst.Log.Info("Deploy Queue") + q, err := deployment.DeployQueue(ctx, "queue", queue) + Expect(err).To(Succeed(), "Unable to deploy Queue") // Deploy LargeMessageStore testcaseEnvInst.Log.Info("Deploy LargeMessageStore") @@ -91,7 +91,7 @@ var _ = Describe("indingsep test", func() { // Deploy Ingestor Cluster testcaseEnvInst.Log.Info("Deploy Ingestor Cluster") - _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: b.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) + _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster") // Deploy Cluster Manager @@ -101,7 +101,7 @@ var _ = Describe("indingsep test", func() { // Deploy Indexer Cluster testcaseEnvInst.Log.Info("Deploy Indexer Cluster") - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: b.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Indexer Cluster") // Ensure that Ingestor Cluster is in Ready phase @@ -130,12 +130,12 @@ var _ = Describe("indingsep test", func() { err = deployment.DeleteCR(ctx, ingest) Expect(err).To(Succeed(), "Unable to delete Ingestor Cluster instance", "Ingestor Cluster Name", ingest) - // Delete the Bus - bus := &enterpriseApi.Bus{} - err = deployment.GetInstance(ctx, "bus", bus) - Expect(err).To(Succeed(), "Unable to get Bus instance", "Bus Name", bus) - err = deployment.DeleteCR(ctx, bus) - Expect(err).To(Succeed(), "Unable to delete Bus", "Bus Name", bus) + // Delete the Queue + queue := &enterpriseApi.Queue{} + err = deployment.GetInstance(ctx, "queue", queue) + Expect(err).To(Succeed(), "Unable to get Queue instance", "Queue Name", queue) + err = deployment.DeleteCR(ctx, queue) + Expect(err).To(Succeed(), "Unable to delete Queue", "Queue Name", queue) // Delete the LargeMessageStore lm = &enterpriseApi.LargeMessageStore{} @@ -152,10 +152,10 @@ var _ = Describe("indingsep test", func() { testcaseEnvInst.Log.Info("Create Service Account") testcaseEnvInst.CreateServiceAccount(serviceAccountName) - // Deploy Bus - testcaseEnvInst.Log.Info("Deploy Bus") - bc, err := deployment.DeployBus(ctx, "bus", bus) - Expect(err).To(Succeed(), "Unable to deploy Bus") + // Deploy Queue + testcaseEnvInst.Log.Info("Deploy Queue") + q, err := deployment.DeployQueue(ctx, "queue", queue) + Expect(err).To(Succeed(), "Unable to deploy Queue") // Deploy LargeMessageStore testcaseEnvInst.Log.Info("Deploy LargeMessageStore") @@ -205,7 +205,7 @@ var _ = Describe("indingsep test", func() { Image: testcaseEnvInst.GetSplunkImage(), }, }, - BusRef: v1.ObjectReference{Name: bc.Name}, + QueueRef: v1.ObjectReference{Name: q.Name}, LargeMessageStoreRef: v1.ObjectReference{Name: lm.Name}, Replicas: 3, AppFrameworkConfig: appFrameworkSpec, @@ -256,10 +256,10 @@ var _ = Describe("indingsep test", func() { testcaseEnvInst.Log.Info("Create Service Account") testcaseEnvInst.CreateServiceAccount(serviceAccountName) - // Deploy Bus - testcaseEnvInst.Log.Info("Deploy Bus") - bc, err := deployment.DeployBus(ctx, "bus", bus) - Expect(err).To(Succeed(), "Unable to deploy Bus") + // Deploy Queue + testcaseEnvInst.Log.Info("Deploy Queue") + q, err := deployment.DeployQueue(ctx, "queue", queue) + Expect(err).To(Succeed(), "Unable to deploy Queue") // Deploy LargeMessageStore testcaseEnvInst.Log.Info("Deploy LargeMessageStore") @@ -268,7 +268,7 @@ var _ = Describe("indingsep test", func() { // Deploy Ingestor Cluster testcaseEnvInst.Log.Info("Deploy Ingestor Cluster") - _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: bc.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) + _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster") // Deploy Cluster Manager @@ -278,7 +278,7 @@ var _ = Describe("indingsep test", func() { // Deploy Indexer Cluster testcaseEnvInst.Log.Info("Deploy Indexer Cluster") - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: bc.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Indexer Cluster") // Ensure that Ingestor Cluster is in Ready phase @@ -301,7 +301,7 @@ var _ = Describe("indingsep test", func() { // Verify Ingestor Cluster Status testcaseEnvInst.Log.Info("Verify Ingestor Cluster Status") - Expect(ingest.Status.Bus).To(Equal(bus), "Ingestor bus status is not the same as provided as input") + Expect(ingest.Status.Queue).To(Equal(queue), "Ingestor queue status is not the same as provided as input") // Get instance of current Indexer Cluster CR with latest config testcaseEnvInst.Log.Info("Get instance of current Indexer Cluster CR with latest config") @@ -311,7 +311,7 @@ var _ = Describe("indingsep test", func() { // Verify Indexer Cluster Status testcaseEnvInst.Log.Info("Verify Indexer Cluster Status") - Expect(index.Status.Bus).To(Equal(bus), "Indexer bus status is not the same as provided as input") + Expect(index.Status.Queue).To(Equal(queue), "Indexer queue status is not the same as provided as input") // Verify conf files testcaseEnvInst.Log.Info("Verify conf files") @@ -363,10 +363,10 @@ var _ = Describe("indingsep test", func() { testcaseEnvInst.Log.Info("Create Service Account") testcaseEnvInst.CreateServiceAccount(serviceAccountName) - // Deploy Bus - testcaseEnvInst.Log.Info("Deploy Bus") - bc, err := deployment.DeployBus(ctx, "bus", bus) - Expect(err).To(Succeed(), "Unable to deploy Bus") + // Deploy Queue + testcaseEnvInst.Log.Info("Deploy Queue") + q, err := deployment.DeployQueue(ctx, "queue", queue) + Expect(err).To(Succeed(), "Unable to deploy Queue") // Deploy LargeMessageStore testcaseEnvInst.Log.Info("Deploy LargeMessageStore") @@ -375,7 +375,7 @@ var _ = Describe("indingsep test", func() { // Deploy Ingestor Cluster testcaseEnvInst.Log.Info("Deploy Ingestor Cluster") - _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: bc.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) + _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster") // Deploy Cluster Manager @@ -385,7 +385,7 @@ var _ = Describe("indingsep test", func() { // Deploy Indexer Cluster testcaseEnvInst.Log.Info("Deploy Indexer Cluster") - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: bc.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Indexer Cluster") // Ensure that Ingestor Cluster is in Ready phase @@ -400,17 +400,17 @@ var _ = Describe("indingsep test", func() { testcaseEnvInst.Log.Info("Ensure that Indexer Cluster is in Ready phase") testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) - // Get instance of current Bus CR with latest config - testcaseEnvInst.Log.Info("Get instance of current Bus CR with latest config") - bus := &enterpriseApi.Bus{} - err = deployment.GetInstance(ctx, bc.Name, bus) - Expect(err).To(Succeed(), "Failed to get instance of Bus") + // Get instance of current Queue CR with latest config + testcaseEnvInst.Log.Info("Get instance of current Queue CR with latest config") + queue := &enterpriseApi.Queue{} + err = deployment.GetInstance(ctx, q.Name, queue) + Expect(err).To(Succeed(), "Failed to get instance of Queue") - // Update instance of Bus CR with new bus - testcaseEnvInst.Log.Info("Update instance of Bus CR with new bus") - bus.Spec = updateBus - err = deployment.UpdateCR(ctx, bus) - Expect(err).To(Succeed(), "Unable to deploy Bus with updated CR") + // Update instance of Queue CR with new queue + testcaseEnvInst.Log.Info("Update instance of Queue CR with new queue") + queue.Spec = updateQueue + err = deployment.UpdateCR(ctx, queue) + Expect(err).To(Succeed(), "Unable to deploy Queue with updated CR") // Ensure that Ingestor Cluster has not been restarted testcaseEnvInst.Log.Info("Ensure that Ingestor Cluster has not been restarted") @@ -428,7 +428,7 @@ var _ = Describe("indingsep test", func() { // Verify Ingestor Cluster Status testcaseEnvInst.Log.Info("Verify Ingestor Cluster Status") - Expect(ingest.Status.Bus).To(Equal(updateBus), "Ingestor bus status is not the same as provided as input") + Expect(ingest.Status.Queue).To(Equal(updateQueue), "Ingestor queue status is not the same as provided as input") // Get instance of current Indexer Cluster CR with latest config testcaseEnvInst.Log.Info("Get instance of current Indexer Cluster CR with latest config") @@ -438,7 +438,7 @@ var _ = Describe("indingsep test", func() { // Verify Indexer Cluster Status testcaseEnvInst.Log.Info("Verify Indexer Cluster Status") - Expect(index.Status.Bus).To(Equal(updateBus), "Indexer bus status is not the same as provided as input") + Expect(index.Status.Queue).To(Equal(updateQueue), "Indexer queue status is not the same as provided as input") // Verify conf files testcaseEnvInst.Log.Info("Verify conf files") diff --git a/test/testenv/deployment.go b/test/testenv/deployment.go index 3a7ba21d2..00d8f1e95 100644 --- a/test/testenv/deployment.go +++ b/test/testenv/deployment.go @@ -431,9 +431,9 @@ func (d *Deployment) DeployClusterMasterWithSmartStoreIndexes(ctx context.Contex } // DeployIndexerCluster deploys the indexer cluster -func (d *Deployment) DeployIndexerCluster(ctx context.Context, name, LicenseManagerName string, count int, clusterManagerRef string, ansibleConfig string, bus, lms corev1.ObjectReference, serviceAccountName string) (*enterpriseApi.IndexerCluster, error) { +func (d *Deployment) DeployIndexerCluster(ctx context.Context, name, LicenseManagerName string, count int, clusterManagerRef string, ansibleConfig string, queue, lms corev1.ObjectReference, serviceAccountName string) (*enterpriseApi.IndexerCluster, error) { d.testenv.Log.Info("Deploying indexer cluster", "name", name, "CM", clusterManagerRef) - indexer := newIndexerCluster(name, d.testenv.namespace, LicenseManagerName, count, clusterManagerRef, ansibleConfig, d.testenv.splunkImage, bus, lms, serviceAccountName) + indexer := newIndexerCluster(name, d.testenv.namespace, LicenseManagerName, count, clusterManagerRef, ansibleConfig, d.testenv.splunkImage, queue, lms, serviceAccountName) pdata, _ := json.Marshal(indexer) d.testenv.Log.Info("indexer cluster spec", "cr", string(pdata)) deployed, err := d.deployCR(ctx, name, indexer) @@ -445,10 +445,10 @@ func (d *Deployment) DeployIndexerCluster(ctx context.Context, name, LicenseMana } // DeployIngestorCluster deploys the ingestor cluster -func (d *Deployment) DeployIngestorCluster(ctx context.Context, name string, count int, bus, lms corev1.ObjectReference, serviceAccountName string) (*enterpriseApi.IngestorCluster, error) { +func (d *Deployment) DeployIngestorCluster(ctx context.Context, name string, count int, queue, lms corev1.ObjectReference, serviceAccountName string) (*enterpriseApi.IngestorCluster, error) { d.testenv.Log.Info("Deploying ingestor cluster", "name", name) - ingestor := newIngestorCluster(name, d.testenv.namespace, count, d.testenv.splunkImage, bus, lms, serviceAccountName) + ingestor := newIngestorCluster(name, d.testenv.namespace, count, d.testenv.splunkImage, queue, lms, serviceAccountName) pdata, _ := json.Marshal(ingestor) d.testenv.Log.Info("ingestor cluster spec", "cr", string(pdata)) @@ -460,20 +460,20 @@ func (d *Deployment) DeployIngestorCluster(ctx context.Context, name string, cou return deployed.(*enterpriseApi.IngestorCluster), err } -// DeployBus deploys the bus -func (d *Deployment) DeployBus(ctx context.Context, name string, bus enterpriseApi.BusSpec) (*enterpriseApi.Bus, error) { - d.testenv.Log.Info("Deploying bus", "name", name) +// DeployQueue deploys the queue +func (d *Deployment) DeployQueue(ctx context.Context, name string, queue enterpriseApi.QueueSpec) (*enterpriseApi.Queue, error) { + d.testenv.Log.Info("Deploying queue", "name", name) - busCfg := newBus(name, d.testenv.namespace, bus) - pdata, _ := json.Marshal(busCfg) + queueCfg := newQueue(name, d.testenv.namespace, queue) + pdata, _ := json.Marshal(queueCfg) - d.testenv.Log.Info("bus spec", "cr", string(pdata)) - deployed, err := d.deployCR(ctx, name, busCfg) + d.testenv.Log.Info("queue spec", "cr", string(pdata)) + deployed, err := d.deployCR(ctx, name, queueCfg) if err != nil { return nil, err } - return deployed.(*enterpriseApi.Bus), err + return deployed.(*enterpriseApi.Queue), err } // DeployLargeMessageStore deploys the large message store @@ -648,13 +648,13 @@ func (d *Deployment) UpdateCR(ctx context.Context, cr client.Object) error { ucr := cr.(*enterpriseApi.IngestorCluster) current.Spec = ucr.Spec cobject = current - case "Bus": - current := &enterpriseApi.Bus{} + case "Queue": + current := &enterpriseApi.Queue{} err = d.testenv.GetKubeClient().Get(ctx, namespacedName, current) if err != nil { return err } - ucr := cr.(*enterpriseApi.Bus) + ucr := cr.(*enterpriseApi.Queue) current.Spec = ucr.Spec cobject = current case "LargeMessageStore": diff --git a/test/testenv/util.go b/test/testenv/util.go index 28bd67a13..f71cc31f3 100644 --- a/test/testenv/util.go +++ b/test/testenv/util.go @@ -359,7 +359,7 @@ func newClusterMasterWithGivenIndexes(name, ns, licenseManagerName, ansibleConfi } // newIndexerCluster creates and initialize the CR for IndexerCluster Kind -func newIndexerCluster(name, ns, licenseManagerName string, replicas int, clusterManagerRef, ansibleConfig, splunkImage string, bus, lms corev1.ObjectReference, serviceAccountName string) *enterpriseApi.IndexerCluster { +func newIndexerCluster(name, ns, licenseManagerName string, replicas int, clusterManagerRef, ansibleConfig, splunkImage string, queue, lms corev1.ObjectReference, serviceAccountName string) *enterpriseApi.IndexerCluster { licenseMasterRef, licenseManagerRef := swapLicenseManager(name, licenseManagerName) clusterMasterRef, clusterManagerRef := swapClusterManager(name, clusterManagerRef) @@ -396,8 +396,8 @@ func newIndexerCluster(name, ns, licenseManagerName string, replicas int, cluste }, Defaults: ansibleConfig, }, - Replicas: int32(replicas), - BusRef: bus, + Replicas: int32(replicas), + QueueRef: queue, LargeMessageStoreRef: lms, }, } @@ -406,7 +406,7 @@ func newIndexerCluster(name, ns, licenseManagerName string, replicas int, cluste } // newIngestorCluster creates and initialize the CR for IngestorCluster Kind -func newIngestorCluster(name, ns string, replicas int, splunkImage string, bus, lms corev1.ObjectReference, serviceAccountName string) *enterpriseApi.IngestorCluster { +func newIngestorCluster(name, ns string, replicas int, splunkImage string, queue, lms corev1.ObjectReference, serviceAccountName string) *enterpriseApi.IngestorCluster { return &enterpriseApi.IngestorCluster{ TypeMeta: metav1.TypeMeta{ Kind: "IngestorCluster", @@ -427,23 +427,23 @@ func newIngestorCluster(name, ns string, replicas int, splunkImage string, bus, }, }, Replicas: int32(replicas), - BusRef: bus, + QueueRef: queue, LargeMessageStoreRef: lms, }, } } -// newBus creates and initializes the CR for Bus Kind -func newBus(name, ns string, bus enterpriseApi.BusSpec) *enterpriseApi.Bus { - return &enterpriseApi.Bus{ +// newQueue creates and initializes the CR for Queue Kind +func newQueue(name, ns string, queue enterpriseApi.QueueSpec) *enterpriseApi.Queue { + return &enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ - Kind: "Bus", + Kind: "Queue", }, ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: ns, }, - Spec: bus, + Spec: queue, } } From b6f5b0bda26fad02b530955bfb8e3dab40cb9380 Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Fri, 19 Dec 2025 09:35:30 +0100 Subject: [PATCH 6/7] CSPL-4358 Rename LargeMessageStore to ObjectStorage --- PROJECT | 2 +- api/v4/indexercluster_types.go | 10 +- api/v4/ingestorcluster_types.go | 8 +- ...messagestore.go => objectstorage_types.go} | 54 +++--- api/v4/queue_types.go | 10 +- api/v4/zz_generated.deepcopy.go | 170 +++++++++--------- cmd/main.go | 4 +- ...enterprise.splunk.com_indexerclusters.yaml | 113 ++++++------ ...nterprise.splunk.com_ingestorclusters.yaml | 98 +++++----- ...enterprise.splunk.com_objectstorages.yaml} | 22 +-- config/crd/kustomization.yaml | 2 +- ...le.yaml => objectstorage_editor_role.yaml} | 6 +- ...le.yaml => objectstorage_viewer_role.yaml} | 6 +- config/rbac/role.yaml | 6 +- ....yaml => enterprise_v4_objectstorage.yaml} | 4 +- config/samples/kustomization.yaml | 2 +- docs/CustomResources.md | 18 +- docs/IndexIngestionSeparation.md | 72 ++++---- .../enterprise_v4_indexercluster.yaml | 4 +- .../enterprise_v4_ingestorcluster.yaml | 10 +- .../enterprise_v4_largemessagestores.yaml | 28 --- .../enterprise_v4_objectstorages.yaml | 28 +++ helm-chart/splunk-enterprise/values.yaml | 4 +- .../splunk-operator/templates/rbac/role.yaml | 6 +- .../controller/indexercluster_controller.go | 8 +- .../controller/ingestorcluster_controller.go | 10 +- .../ingestorcluster_controller_test.go | 24 +-- ...troller.go => objectstorage_controller.go} | 38 ++-- ...st.go => objectstorage_controller_test.go} | 133 +++++++------- internal/controller/suite_test.go | 2 +- internal/controller/testutils/new.go | 6 +- .../01-assert.yaml | 8 +- .../02-assert.yaml | 2 +- .../splunk_index_ingest_sep.yaml | 12 +- pkg/splunk/enterprise/indexercluster.go | 82 ++++----- pkg/splunk/enterprise/indexercluster_test.go | 54 +++--- pkg/splunk/enterprise/ingestorcluster.go | 63 ++++--- pkg/splunk/enterprise/ingestorcluster_test.go | 64 +++---- ...{largemessagestore.go => objectstorage.go} | 6 +- ...agestore_test.go => objectstorage_test.go} | 20 +-- pkg/splunk/enterprise/types.go | 8 +- pkg/splunk/enterprise/util.go | 14 +- ...dex_and_ingestion_separation_suite_test.go | 2 +- .../index_and_ingestion_separation_test.go | 58 +++--- test/testenv/deployment.go | 30 ++-- test/testenv/util.go | 18 +- 46 files changed, 672 insertions(+), 677 deletions(-) rename api/v4/{largemessagestore.go => objectstorage_types.go} (66%) rename config/crd/bases/{enterprise.splunk.com_largemessagestores.yaml => enterprise.splunk.com_objectstorages.yaml} (86%) rename config/rbac/{largemessagestore_editor_role.yaml => objectstorage_editor_role.yaml} (87%) rename config/rbac/{largemessagestore_viewer_role.yaml => objectstorage_viewer_role.yaml} (87%) rename config/samples/{enterprise_v4_largemessagestore.yaml => enterprise_v4_objectstorage.yaml} (71%) delete mode 100644 helm-chart/splunk-enterprise/templates/enterprise_v4_largemessagestores.yaml create mode 100644 helm-chart/splunk-enterprise/templates/enterprise_v4_objectstorages.yaml rename internal/controller/{largemessagestore_controller.go => objectstorage_controller.go} (68%) rename internal/controller/{largemessagestore_controller_test.go => objectstorage_controller_test.go} (51%) rename pkg/splunk/enterprise/{largemessagestore.go => objectstorage.go} (89%) rename pkg/splunk/enterprise/{largemessagestore_test.go => objectstorage_test.go} (82%) diff --git a/PROJECT b/PROJECT index c2f3680d3..e87979069 100644 --- a/PROJECT +++ b/PROJECT @@ -137,7 +137,7 @@ resources: controller: true domain: splunk.com group: enterprise - kind: LargeMessageStore + kind: ObjectStorage path: github.com/splunk/splunk-operator/api/v4 version: v4 version: "3" diff --git a/api/v4/indexercluster_types.go b/api/v4/indexercluster_types.go index 5e76d3e57..e74f900a7 100644 --- a/api/v4/indexercluster_types.go +++ b/api/v4/indexercluster_types.go @@ -34,7 +34,7 @@ const ( IndexerClusterPausedAnnotation = "indexercluster.enterprise.splunk.com/paused" ) -// +kubebuilder:validation:XValidation:rule="has(self.queueRef) == has(self.largeMessageStoreRef)",message="queueRef and largeMessageStoreRef must both be set or both be empty" +// +kubebuilder:validation:XValidation:rule="has(self.queueRef) == has(self.objectStorageRef)",message="queueRef and objectStorageRef must both be set or both be empty" // IndexerClusterSpec defines the desired state of a Splunk Enterprise indexer cluster type IndexerClusterSpec struct { CommonSplunkSpec `json:",inline"` @@ -44,8 +44,8 @@ type IndexerClusterSpec struct { QueueRef corev1.ObjectReference `json:"queueRef"` // +optional - // Large Message Store reference - LargeMessageStoreRef corev1.ObjectReference `json:"largeMessageStoreRef"` + // Object Storage reference + ObjectStorageRef corev1.ObjectReference `json:"objectStorageRef"` // Number of search head pods; a search head cluster will be created if > 1 Replicas int32 `json:"replicas"` @@ -124,8 +124,8 @@ type IndexerClusterStatus struct { // Queue Queue *QueueSpec `json:"queue,omitempty"` - // Large Message Store - LargeMessageStore *LargeMessageStoreSpec `json:"largeMessageStore,omitempty"` + // Object Storage + ObjectStorage *ObjectStorageSpec `json:"objectStorage,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/api/v4/ingestorcluster_types.go b/api/v4/ingestorcluster_types.go index aa2281864..f2e061284 100644 --- a/api/v4/ingestorcluster_types.go +++ b/api/v4/ingestorcluster_types.go @@ -44,8 +44,8 @@ type IngestorClusterSpec struct { QueueRef corev1.ObjectReference `json:"queueRef"` // +kubebuilder:validation:Required - // Large Message Store reference - LargeMessageStoreRef corev1.ObjectReference `json:"largeMessageStoreRef"` + // Object Storage reference + ObjectStorageRef corev1.ObjectReference `json:"objectStorageRef"` } // IngestorClusterStatus defines the observed state of Ingestor Cluster @@ -77,8 +77,8 @@ type IngestorClusterStatus struct { // Queue Queue *QueueSpec `json:"queue,omitempty"` - // Large Message Store - LargeMessageStore *LargeMessageStoreSpec `json:"largeMessageStore,omitempty"` + // Object Storage + ObjectStorage *ObjectStorageSpec `json:"objectStorage,omitempty"` } // +kubebuilder:object:root=true diff --git a/api/v4/largemessagestore.go b/api/v4/objectstorage_types.go similarity index 66% rename from api/v4/largemessagestore.go rename to api/v4/objectstorage_types.go index 26c986f2d..80fcd45cf 100644 --- a/api/v4/largemessagestore.go +++ b/api/v4/objectstorage_types.go @@ -23,14 +23,14 @@ import ( ) const ( - // LargeMessageStorePausedAnnotation is the annotation that pauses the reconciliation (triggers + // ObjectStoragePausedAnnotation is the annotation that pauses the reconciliation (triggers // an immediate requeue) - LargeMessageStorePausedAnnotation = "largemessagestore.enterprise.splunk.com/paused" + ObjectStoragePausedAnnotation = "objectstorage.enterprise.splunk.com/paused" ) // +kubebuilder:validation:XValidation:rule="self.provider != 's3' || has(self.s3)",message="s3 must be provided when provider is s3" -// LargeMessageStoreSpec defines the desired state of LargeMessageStore -type LargeMessageStoreSpec struct { +// ObjectStorageSpec defines the desired state of ObjectStorage +type ObjectStorageSpec struct { // +kubebuilder:validation:Required // +kubebuilder:validation:Enum=s3 // Provider of queue resources @@ -53,8 +53,8 @@ type S3Spec struct { Path string `json:"path"` } -// LargeMessageStoreStatus defines the observed state of LargeMessageStore. -type LargeMessageStoreStatus struct { +// ObjectStorageStatus defines the observed state of ObjectStorage. +type ObjectStorageStatus struct { // Phase of the large message store Phase Phase `json:"phase"` @@ -68,27 +68,27 @@ type LargeMessageStoreStatus struct { // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// LargeMessageStore is the Schema for a Splunk Enterprise large message store +// ObjectStorage is the Schema for a Splunk Enterprise object storage // +k8s:openapi-gen=true // +kubebuilder:subresource:status // +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.selector -// +kubebuilder:resource:path=largemessagestores,scope=Namespaced,shortName=lms -// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Status of large message store" -// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age of large message store resource" +// +kubebuilder:resource:path=objectstorages,scope=Namespaced,shortName=os +// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Status of object storage" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age of object storage resource" // +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Auxillary message describing CR status" // +kubebuilder:storageversion -// LargeMessageStore is the Schema for the largemessagestores API -type LargeMessageStore struct { +// ObjectStorage is the Schema for the objectstorages API +type ObjectStorage struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty,omitzero"` - Spec LargeMessageStoreSpec `json:"spec"` - Status LargeMessageStoreStatus `json:"status,omitempty,omitzero"` + Spec ObjectStorageSpec `json:"spec"` + Status ObjectStorageStatus `json:"status,omitempty,omitzero"` } // DeepCopyObject implements runtime.Object -func (in *LargeMessageStore) DeepCopyObject() runtime.Object { +func (in *ObjectStorage) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -97,42 +97,42 @@ func (in *LargeMessageStore) DeepCopyObject() runtime.Object { // +kubebuilder:object:root=true -// LargeMessageStoreList contains a list of LargeMessageStore -type LargeMessageStoreList struct { +// ObjectStorageList contains a list of ObjectStorage +type ObjectStorageList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` - Items []LargeMessageStore `json:"items"` + Items []ObjectStorage `json:"items"` } func init() { - SchemeBuilder.Register(&LargeMessageStore{}, &LargeMessageStoreList{}) + SchemeBuilder.Register(&ObjectStorage{}, &ObjectStorageList{}) } // NewEvent creates a new event associated with the object and ready // to be published to Kubernetes API -func (bc *LargeMessageStore) NewEvent(eventType, reason, message string) corev1.Event { +func (os *ObjectStorage) NewEvent(eventType, reason, message string) corev1.Event { t := metav1.Now() return corev1.Event{ ObjectMeta: metav1.ObjectMeta{ GenerateName: reason + "-", - Namespace: bc.ObjectMeta.Namespace, + Namespace: os.ObjectMeta.Namespace, }, InvolvedObject: corev1.ObjectReference{ - Kind: "LargeMessageStore", - Namespace: bc.Namespace, - Name: bc.Name, - UID: bc.UID, + Kind: "ObjectStorage", + Namespace: os.Namespace, + Name: os.Name, + UID: os.UID, APIVersion: GroupVersion.String(), }, Reason: reason, Message: message, Source: corev1.EventSource{ - Component: "splunk-large-message-store-controller", + Component: "splunk-object-storage-controller", }, FirstTimestamp: t, LastTimestamp: t, Count: 1, Type: eventType, - ReportingController: "enterprise.splunk.com/large-message-store-controller", + ReportingController: "enterprise.splunk.com/object-storage-controller", } } diff --git a/api/v4/queue_types.go b/api/v4/queue_types.go index a094b76ce..06703ac95 100644 --- a/api/v4/queue_types.go +++ b/api/v4/queue_types.go @@ -120,18 +120,18 @@ func init() { // NewEvent creates a new event associated with the object and ready // to be published to Kubernetes API -func (bc *Queue) NewEvent(eventType, reason, message string) corev1.Event { +func (os *Queue) NewEvent(eventType, reason, message string) corev1.Event { t := metav1.Now() return corev1.Event{ ObjectMeta: metav1.ObjectMeta{ GenerateName: reason + "-", - Namespace: bc.ObjectMeta.Namespace, + Namespace: os.ObjectMeta.Namespace, }, InvolvedObject: corev1.ObjectReference{ Kind: "Queue", - Namespace: bc.Namespace, - Name: bc.Name, - UID: bc.UID, + Namespace: os.Namespace, + Name: os.Name, + UID: os.UID, APIVersion: GroupVersion.String(), }, Reason: reason, diff --git a/api/v4/zz_generated.deepcopy.go b/api/v4/zz_generated.deepcopy.go index 2fb0eebc8..dd9b2f347 100644 --- a/api/v4/zz_generated.deepcopy.go +++ b/api/v4/zz_generated.deepcopy.go @@ -512,7 +512,7 @@ func (in *IndexerClusterSpec) DeepCopyInto(out *IndexerClusterSpec) { *out = *in in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) out.QueueRef = in.QueueRef - out.LargeMessageStoreRef = in.LargeMessageStoreRef + out.ObjectStorageRef = in.ObjectStorageRef } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexerClusterSpec. @@ -550,9 +550,9 @@ func (in *IndexerClusterStatus) DeepCopyInto(out *IndexerClusterStatus) { *out = new(QueueSpec) **out = **in } - if in.LargeMessageStore != nil { - in, out := &in.LargeMessageStore, &out.LargeMessageStore - *out = new(LargeMessageStoreSpec) + if in.ObjectStorage != nil { + in, out := &in.ObjectStorage, &out.ObjectStorage + *out = new(ObjectStorageSpec) **out = **in } } @@ -624,7 +624,7 @@ func (in *IngestorClusterSpec) DeepCopyInto(out *IngestorClusterSpec) { in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) in.AppFrameworkConfig.DeepCopyInto(&out.AppFrameworkConfig) out.QueueRef = in.QueueRef - out.LargeMessageStoreRef = in.LargeMessageStoreRef + out.ObjectStorageRef = in.ObjectStorageRef } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngestorClusterSpec. @@ -653,9 +653,9 @@ func (in *IngestorClusterStatus) DeepCopyInto(out *IngestorClusterStatus) { *out = new(QueueSpec) **out = **in } - if in.LargeMessageStore != nil { - in, out := &in.LargeMessageStore, &out.LargeMessageStore - *out = new(LargeMessageStoreSpec) + if in.ObjectStorage != nil { + in, out := &in.ObjectStorage, &out.ObjectStorage + *out = new(ObjectStorageSpec) **out = **in } } @@ -671,50 +671,58 @@ func (in *IngestorClusterStatus) DeepCopy() *IngestorClusterStatus { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LargeMessageStore) DeepCopyInto(out *LargeMessageStore) { +func (in *LicenseManager) DeepCopyInto(out *LicenseManager) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec + in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LargeMessageStore. -func (in *LargeMessageStore) DeepCopy() *LargeMessageStore { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LicenseManager. +func (in *LicenseManager) DeepCopy() *LicenseManager { if in == nil { return nil } - out := new(LargeMessageStore) + out := new(LicenseManager) in.DeepCopyInto(out) return out } +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LicenseManager) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LargeMessageStoreList) DeepCopyInto(out *LargeMessageStoreList) { +func (in *LicenseManagerList) DeepCopyInto(out *LicenseManagerList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]LargeMessageStore, len(*in)) + *out = make([]LicenseManager, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LargeMessageStoreList. -func (in *LargeMessageStoreList) DeepCopy() *LargeMessageStoreList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LicenseManagerList. +func (in *LicenseManagerList) DeepCopy() *LicenseManagerList { if in == nil { return nil } - out := new(LargeMessageStoreList) + out := new(LicenseManagerList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *LargeMessageStoreList) DeepCopyObject() runtime.Object { +func (in *LicenseManagerList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -722,45 +730,40 @@ func (in *LargeMessageStoreList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LargeMessageStoreSpec) DeepCopyInto(out *LargeMessageStoreSpec) { +func (in *LicenseManagerSpec) DeepCopyInto(out *LicenseManagerSpec) { *out = *in - out.S3 = in.S3 + in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) + in.AppFrameworkConfig.DeepCopyInto(&out.AppFrameworkConfig) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LargeMessageStoreSpec. -func (in *LargeMessageStoreSpec) DeepCopy() *LargeMessageStoreSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LicenseManagerSpec. +func (in *LicenseManagerSpec) DeepCopy() *LicenseManagerSpec { if in == nil { return nil } - out := new(LargeMessageStoreSpec) + out := new(LicenseManagerSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LargeMessageStoreStatus) DeepCopyInto(out *LargeMessageStoreStatus) { +func (in *LicenseManagerStatus) DeepCopyInto(out *LicenseManagerStatus) { *out = *in - if in.ResourceRevMap != nil { - in, out := &in.ResourceRevMap, &out.ResourceRevMap - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } + in.AppContext.DeepCopyInto(&out.AppContext) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LargeMessageStoreStatus. -func (in *LargeMessageStoreStatus) DeepCopy() *LargeMessageStoreStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LicenseManagerStatus. +func (in *LicenseManagerStatus) DeepCopy() *LicenseManagerStatus { if in == nil { return nil } - out := new(LargeMessageStoreStatus) + out := new(LicenseManagerStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LicenseManager) DeepCopyInto(out *LicenseManager) { +func (in *MonitoringConsole) DeepCopyInto(out *MonitoringConsole) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) @@ -768,18 +771,18 @@ func (in *LicenseManager) DeepCopyInto(out *LicenseManager) { in.Status.DeepCopyInto(&out.Status) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LicenseManager. -func (in *LicenseManager) DeepCopy() *LicenseManager { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringConsole. +func (in *MonitoringConsole) DeepCopy() *MonitoringConsole { if in == nil { return nil } - out := new(LicenseManager) + out := new(MonitoringConsole) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *LicenseManager) DeepCopyObject() runtime.Object { +func (in *MonitoringConsole) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -787,31 +790,31 @@ func (in *LicenseManager) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LicenseManagerList) DeepCopyInto(out *LicenseManagerList) { +func (in *MonitoringConsoleList) DeepCopyInto(out *MonitoringConsoleList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]LicenseManager, len(*in)) + *out = make([]MonitoringConsole, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LicenseManagerList. -func (in *LicenseManagerList) DeepCopy() *LicenseManagerList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringConsoleList. +func (in *MonitoringConsoleList) DeepCopy() *MonitoringConsoleList { if in == nil { return nil } - out := new(LicenseManagerList) + out := new(MonitoringConsoleList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *LicenseManagerList) DeepCopyObject() runtime.Object { +func (in *MonitoringConsoleList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -819,91 +822,91 @@ func (in *LicenseManagerList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LicenseManagerSpec) DeepCopyInto(out *LicenseManagerSpec) { +func (in *MonitoringConsoleSpec) DeepCopyInto(out *MonitoringConsoleSpec) { *out = *in in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) in.AppFrameworkConfig.DeepCopyInto(&out.AppFrameworkConfig) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LicenseManagerSpec. -func (in *LicenseManagerSpec) DeepCopy() *LicenseManagerSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringConsoleSpec. +func (in *MonitoringConsoleSpec) DeepCopy() *MonitoringConsoleSpec { if in == nil { return nil } - out := new(LicenseManagerSpec) + out := new(MonitoringConsoleSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LicenseManagerStatus) DeepCopyInto(out *LicenseManagerStatus) { +func (in *MonitoringConsoleStatus) DeepCopyInto(out *MonitoringConsoleStatus) { *out = *in + out.BundlePushTracker = in.BundlePushTracker + if in.ResourceRevMap != nil { + in, out := &in.ResourceRevMap, &out.ResourceRevMap + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } in.AppContext.DeepCopyInto(&out.AppContext) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LicenseManagerStatus. -func (in *LicenseManagerStatus) DeepCopy() *LicenseManagerStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringConsoleStatus. +func (in *MonitoringConsoleStatus) DeepCopy() *MonitoringConsoleStatus { if in == nil { return nil } - out := new(LicenseManagerStatus) + out := new(MonitoringConsoleStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MonitoringConsole) DeepCopyInto(out *MonitoringConsole) { +func (in *ObjectStorage) DeepCopyInto(out *ObjectStorage) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) + out.Spec = in.Spec in.Status.DeepCopyInto(&out.Status) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringConsole. -func (in *MonitoringConsole) DeepCopy() *MonitoringConsole { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorage. +func (in *ObjectStorage) DeepCopy() *ObjectStorage { if in == nil { return nil } - out := new(MonitoringConsole) + out := new(ObjectStorage) in.DeepCopyInto(out) return out } -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *MonitoringConsole) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MonitoringConsoleList) DeepCopyInto(out *MonitoringConsoleList) { +func (in *ObjectStorageList) DeepCopyInto(out *ObjectStorageList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]MonitoringConsole, len(*in)) + *out = make([]ObjectStorage, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringConsoleList. -func (in *MonitoringConsoleList) DeepCopy() *MonitoringConsoleList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageList. +func (in *ObjectStorageList) DeepCopy() *ObjectStorageList { if in == nil { return nil } - out := new(MonitoringConsoleList) + out := new(ObjectStorageList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *MonitoringConsoleList) DeepCopyObject() runtime.Object { +func (in *ObjectStorageList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -911,26 +914,24 @@ func (in *MonitoringConsoleList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MonitoringConsoleSpec) DeepCopyInto(out *MonitoringConsoleSpec) { +func (in *ObjectStorageSpec) DeepCopyInto(out *ObjectStorageSpec) { *out = *in - in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) - in.AppFrameworkConfig.DeepCopyInto(&out.AppFrameworkConfig) + out.S3 = in.S3 } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringConsoleSpec. -func (in *MonitoringConsoleSpec) DeepCopy() *MonitoringConsoleSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageSpec. +func (in *ObjectStorageSpec) DeepCopy() *ObjectStorageSpec { if in == nil { return nil } - out := new(MonitoringConsoleSpec) + out := new(ObjectStorageSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MonitoringConsoleStatus) DeepCopyInto(out *MonitoringConsoleStatus) { +func (in *ObjectStorageStatus) DeepCopyInto(out *ObjectStorageStatus) { *out = *in - out.BundlePushTracker = in.BundlePushTracker if in.ResourceRevMap != nil { in, out := &in.ResourceRevMap, &out.ResourceRevMap *out = make(map[string]string, len(*in)) @@ -938,15 +939,14 @@ func (in *MonitoringConsoleStatus) DeepCopyInto(out *MonitoringConsoleStatus) { (*out)[key] = val } } - in.AppContext.DeepCopyInto(&out.AppContext) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringConsoleStatus. -func (in *MonitoringConsoleStatus) DeepCopy() *MonitoringConsoleStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageStatus. +func (in *ObjectStorageStatus) DeepCopy() *ObjectStorageStatus { if in == nil { return nil } - out := new(MonitoringConsoleStatus) + out := new(ObjectStorageStatus) in.DeepCopyInto(out) return out } diff --git a/cmd/main.go b/cmd/main.go index 72a3e38c7..dfb9c87e1 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -237,11 +237,11 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "Queue") os.Exit(1) } - if err := (&controller.LargeMessageStoreReconciler{ + if err := (&controller.ObjectStorageReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "LargeMessageStore") + setupLog.Error(err, "unable to create controller", "controller", "ObjectStorage") os.Exit(1) } //+kubebuilder:scaffold:builder diff --git a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml index 90c266230..a9fc2d811 100644 --- a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml @@ -5437,49 +5437,6 @@ spec: type: object x-kubernetes-map-type: atomic type: array - largeMessageStoreRef: - description: Large Message Store reference - properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. - type: string - kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ - type: string - resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency - type: string - uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids - type: string - type: object - x-kubernetes-map-type: atomic licenseManagerRef: description: LicenseManagerRef refers to a Splunk Enterprise license manager managed by the operator within Kubernetes @@ -5647,6 +5604,49 @@ spec: type: string type: object x-kubernetes-map-type: atomic + objectStorageRef: + description: Object Storage reference + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic queueRef: description: Queue reference properties: @@ -8329,9 +8329,8 @@ spec: type: array type: object x-kubernetes-validations: - - message: queueRef and largeMessageStoreRef must both be set or both - be empty - rule: has(self.queueRef) == has(self.largeMessageStoreRef) + - message: queueRef and objectStorageRef must both be set or both be empty + rule: has(self.queueRef) == has(self.objectStorageRef) status: description: IndexerClusterStatus defines the observed state of a Splunk Enterprise indexer cluster @@ -8375,8 +8374,17 @@ spec: initialized_flag: description: Indicates if the cluster is initialized. type: boolean - largeMessageStore: - description: Large Message Store + maintenance_mode: + description: Indicates if the cluster is in maintenance mode. + type: boolean + message: + description: Auxillary message describing CR status + type: string + namespace_scoped_secret_resource_version: + description: Indicates resource version of namespace scoped secret + type: string + objectStorage: + description: Object Storage properties: provider: description: Provider of queue resources @@ -8404,15 +8412,6 @@ spec: x-kubernetes-validations: - message: s3 must be provided when provider is s3 rule: self.provider != 's3' || has(self.s3) - maintenance_mode: - description: Indicates if the cluster is in maintenance mode. - type: boolean - message: - description: Auxillary message describing CR status - type: string - namespace_scoped_secret_resource_version: - description: Indicates resource version of namespace scoped secret - type: string peers: description: status of each indexer cluster peer items: diff --git a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml index 37c820c4c..46a142719 100644 --- a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml @@ -1413,49 +1413,6 @@ spec: type: object x-kubernetes-map-type: atomic type: array - largeMessageStoreRef: - description: Large Message Store reference - properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. - type: string - kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ - type: string - resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency - type: string - uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids - type: string - type: object - x-kubernetes-map-type: atomic licenseManagerRef: description: LicenseManagerRef refers to a Splunk Enterprise license manager managed by the operator within Kubernetes @@ -1623,6 +1580,49 @@ spec: type: string type: object x-kubernetes-map-type: atomic + objectStorageRef: + description: Object Storage reference + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic queueRef: description: Queue reference properties: @@ -4303,7 +4303,7 @@ spec: type: object type: array required: - - largeMessageStoreRef + - objectStorageRef - queueRef type: object status: @@ -4591,8 +4591,11 @@ spec: description: App Framework version info for future use type: integer type: object - largeMessageStore: - description: Large Message Store + message: + description: Auxillary message describing CR status + type: string + objectStorage: + description: Object Storage properties: provider: description: Provider of queue resources @@ -4620,9 +4623,6 @@ spec: x-kubernetes-validations: - message: s3 must be provided when provider is s3 rule: self.provider != 's3' || has(self.s3) - message: - description: Auxillary message describing CR status - type: string phase: description: Phase of the ingestor pods enum: diff --git a/config/crd/bases/enterprise.splunk.com_largemessagestores.yaml b/config/crd/bases/enterprise.splunk.com_objectstorages.yaml similarity index 86% rename from config/crd/bases/enterprise.splunk.com_largemessagestores.yaml rename to config/crd/bases/enterprise.splunk.com_objectstorages.yaml index 562cd773c..1456234c6 100644 --- a/config/crd/bases/enterprise.splunk.com_largemessagestores.yaml +++ b/config/crd/bases/enterprise.splunk.com_objectstorages.yaml @@ -4,24 +4,24 @@ kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.16.1 - name: largemessagestores.enterprise.splunk.com + name: objectstorages.enterprise.splunk.com spec: group: enterprise.splunk.com names: - kind: LargeMessageStore - listKind: LargeMessageStoreList - plural: largemessagestores + kind: ObjectStorage + listKind: ObjectStorageList + plural: objectstorages shortNames: - - lms - singular: largemessagestore + - os + singular: objectstorage scope: Namespaced versions: - additionalPrinterColumns: - - description: Status of large message store + - description: Status of object storage jsonPath: .status.phase name: Phase type: string - - description: Age of large message store resource + - description: Age of object storage resource jsonPath: .metadata.creationTimestamp name: Age type: date @@ -32,7 +32,7 @@ spec: name: v4 schema: openAPIV3Schema: - description: LargeMessageStore is the Schema for the largemessagestores API + description: ObjectStorage is the Schema for the objectstorages API properties: apiVersion: description: |- @@ -52,7 +52,7 @@ spec: metadata: type: object spec: - description: LargeMessageStoreSpec defines the desired state of LargeMessageStore + description: ObjectStorageSpec defines the desired state of ObjectStorage properties: provider: description: Provider of queue resources @@ -81,7 +81,7 @@ spec: - message: s3 must be provided when provider is s3 rule: self.provider != 's3' || has(self.s3) status: - description: LargeMessageStoreStatus defines the observed state of LargeMessageStore. + description: ObjectStorageStatus defines the observed state of ObjectStorage. properties: message: description: Auxillary message describing CR status diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index f80dfec5e..0304146cd 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -12,7 +12,7 @@ resources: - bases/enterprise.splunk.com_standalones.yaml - bases/enterprise.splunk.com_ingestorclusters.yaml - bases/enterprise.splunk.com_queues.yaml -- bases/enterprise.splunk.com_largemessagestores.yaml +- bases/enterprise.splunk.com_objectstorages.yaml #+kubebuilder:scaffold:crdkustomizeresource diff --git a/config/rbac/largemessagestore_editor_role.yaml b/config/rbac/objectstorage_editor_role.yaml similarity index 87% rename from config/rbac/largemessagestore_editor_role.yaml rename to config/rbac/objectstorage_editor_role.yaml index 614d09ad2..70323227f 100644 --- a/config/rbac/largemessagestore_editor_role.yaml +++ b/config/rbac/objectstorage_editor_role.yaml @@ -8,12 +8,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: largemessagestore-editor-role + name: objectstorage-editor-role rules: - apiGroups: - enterprise.splunk.com resources: - - largemessagestores + - objectstorages verbs: - create - delete @@ -25,6 +25,6 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - largemessagestores/status + - objectstorages/status verbs: - get diff --git a/config/rbac/largemessagestore_viewer_role.yaml b/config/rbac/objectstorage_viewer_role.yaml similarity index 87% rename from config/rbac/largemessagestore_viewer_role.yaml rename to config/rbac/objectstorage_viewer_role.yaml index 36cfde351..9764699bc 100644 --- a/config/rbac/largemessagestore_viewer_role.yaml +++ b/config/rbac/objectstorage_viewer_role.yaml @@ -8,12 +8,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: largemessagestore-viewer-role + name: objectstorage-viewer-role rules: - apiGroups: - enterprise.splunk.com resources: - - largemessagestores + - objectstorages verbs: - get - list @@ -21,6 +21,6 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - largemessagestores/status + - objectstorages/status verbs: - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 295e080c6..973105d16 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -51,10 +51,10 @@ rules: - clustermasters - indexerclusters - ingestorclusters - - largemessagestores - licensemanagers - licensemasters - monitoringconsoles + - objectstorages - queues - searchheadclusters - standalones @@ -73,10 +73,10 @@ rules: - clustermasters/finalizers - indexerclusters/finalizers - ingestorclusters/finalizers - - largemessagestores/finalizers - licensemanagers/finalizers - licensemasters/finalizers - monitoringconsoles/finalizers + - objectstorages/finalizers - queues/finalizers - searchheadclusters/finalizers - standalones/finalizers @@ -89,10 +89,10 @@ rules: - clustermasters/status - indexerclusters/status - ingestorclusters/status - - largemessagestores/status - licensemanagers/status - licensemasters/status - monitoringconsoles/status + - objectstorages/status - queues/status - searchheadclusters/status - standalones/status diff --git a/config/samples/enterprise_v4_largemessagestore.yaml b/config/samples/enterprise_v4_objectstorage.yaml similarity index 71% rename from config/samples/enterprise_v4_largemessagestore.yaml rename to config/samples/enterprise_v4_objectstorage.yaml index 508ba0b77..b693a14e0 100644 --- a/config/samples/enterprise_v4_largemessagestore.yaml +++ b/config/samples/enterprise_v4_objectstorage.yaml @@ -1,7 +1,7 @@ apiVersion: enterprise.splunk.com/v4 -kind: LargeMessageStore +kind: ObjectStorage metadata: - name: largemessagestore-sample + name: objectstorage-sample finalizers: - "enterprise.splunk.com/delete-pvc" spec: {} diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index 4de2ec89d..34c05ab05 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -15,5 +15,5 @@ resources: - enterprise_v4_licensemanager.yaml - enterprise_v4_ingestorcluster.yaml - enterprise_v4_queue.yaml -- enterprise_v4_largemessagestore.yaml +- enterprise_v4_objectstorage.yaml #+kubebuilder:scaffold:manifestskustomizesamples diff --git a/docs/CustomResources.md b/docs/CustomResources.md index f69a8fa50..157a9b123 100644 --- a/docs/CustomResources.md +++ b/docs/CustomResources.md @@ -22,7 +22,7 @@ you can use to manage Splunk Enterprise deployments in your Kubernetes cluster. - [ClusterManager Resource Spec Parameters](#clustermanager-resource-spec-parameters) - [IndexerCluster Resource Spec Parameters](#indexercluster-resource-spec-parameters) - [IngestorCluster Resource Spec Parameters](#ingestorcluster-resource-spec-parameters) - - [LargeMessageStore Resource Spec Parameters](#largemessagestore-resource-spec-parameters) + - [ObjectStorage Resource Spec Parameters](#objectstorage-resource-spec-parameters) - [MonitoringConsole Resource Spec Parameters](#monitoringconsole-resource-spec-parameters) - [Examples of Guaranteed and Burstable QoS](#examples-of-guaranteed-and-burstable-qos) - [A Guaranteed QoS Class example:](#a-guaranteed-qos-class-example) @@ -377,10 +377,10 @@ spec: replicas: 3 queueRef: name: queue - largeMessageStoreRef: - name: lms + objectStorageRef: + name: os ``` -Note: `queueRef` and `largeMessageStoreRef` are required fields in case of IngestorCluster resource since they will be used to connect the IngestorCluster to Queue and LargeMessageStore resources. +Note: `queueRef` and `objectStorageRef` are required fields in case of IngestorCluster resource since they will be used to connect the IngestorCluster to Queue and ObjectStorage resources. In addition to [Common Spec Parameters for All Resources](#common-spec-parameters-for-all-resources) and [Common Spec Parameters for All Splunk Enterprise Resources](#common-spec-parameters-for-all-splunk-enterprise-resources), @@ -390,13 +390,13 @@ the `IngestorCluster` resource provides the following `Spec` configuration param | ---------- | ------- | ----------------------------------------------------- | | replicas | integer | The number of ingestor peers (minimum of 3 which is the default) | -## LargeMessageStore Resource Spec Parameters +## ObjectStorage Resource Spec Parameters ```yaml apiVersion: enterprise.splunk.com/v4 -kind: LargeMessageStore +kind: ObjectStorage metadata: - name: lms + name: os spec: provider: s3 s3: @@ -404,7 +404,7 @@ spec: endpoint: https://s3.us-west-2.amazonaws.com ``` -LargeMessageStore inputs can be found in the table below. As of now, only S3 provider of large message store is supported. +ObjectStorage inputs can be found in the table below. As of now, only S3 provider of large message store is supported. | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | @@ -536,7 +536,7 @@ The Splunk Operator controller reconciles every Splunk Enterprise CR. However, t | clustermanager.enterprise.splunk.com | "clustermanager.enterprise.splunk.com/paused" | | indexercluster.enterprise.splunk.com | "indexercluster.enterprise.splunk.com/paused" | | ingestorcluster.enterprise.splunk.com | "ingestorcluster.enterprise.splunk.com/paused" | -| largemessagestore.enterprise.splunk.com | "largemessagestore.enterprise.splunk.com/paused" | +| objectstorage.enterprise.splunk.com | "objectstorage.enterprise.splunk.com/paused" | | licensemaster.enterprise.splunk.com | "licensemaster.enterprise.splunk.com/paused" | | monitoringconsole.enterprise.splunk.com | "monitoringconsole.enterprise.splunk.com/paused" | | searchheadcluster.enterprise.splunk.com | "searchheadcluster.enterprise.splunk.com/paused" | diff --git a/docs/IndexIngestionSeparation.md b/docs/IndexIngestionSeparation.md index 257e37400..bd5d97579 100644 --- a/docs/IndexIngestionSeparation.md +++ b/docs/IndexIngestionSeparation.md @@ -55,13 +55,13 @@ spec: dlq: sqs-dlq-test ``` -# LargeMessageStore +# ObjectStorage -LargeMessageStore is introduced to store large message (messages that exceed the size of messages that can be stored in SQS) store information to be shared among IngestorCluster and IndexerCluster. +ObjectStorage is introduced to store large message (messages that exceed the size of messages that can be stored in SQS) store information to be shared among IngestorCluster and IndexerCluster. ## Spec -LargeMessageStore inputs can be found in the table below. As of now, only S3 provider of large message store is supported. +ObjectStorage inputs can be found in the table below. As of now, only S3 provider of large message store is supported. | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | @@ -80,9 +80,9 @@ Change of any of the large message queue inputs triggers the restart of Splunk s ## Example ``` apiVersion: enterprise.splunk.com/v4 -kind: LargeMessageStore +kind: ObjectStorage metadata: - name: lms + name: os spec: provider: s3 s3: @@ -102,11 +102,11 @@ In addition to common spec inputs, the IngestorCluster resource provides the fol | ---------- | ------- | ------------------------------------------------- | | replicas | integer | The number of replicas (defaults to 3) | | queueRef | corev1.ObjectReference | Message queue reference | -| largeMessageStoreRef | corev1.ObjectReference | Large message store reference | +| objectStorageRef | corev1.ObjectReference | Large message store reference | ## Example -The example presented below configures IngestorCluster named ingestor with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the ingestion traffic. This IngestorCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Queue and LargeMessageStore references allow the user to specify queue and bucket settings for the ingestion process. +The example presented below configures IngestorCluster named ingestor with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the ingestion traffic. This IngestorCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Queue and ObjectStorage references allow the user to specify queue and bucket settings for the ingestion process. In this case, the setup uses the SQS and S3 based configuration where the messages are stored in sqs-test queue in us-west-2 region with dead letter queue set to sqs-dlq-test queue. The large message store is set to ingestion bucket in smartbus-test directory. Based on these inputs, default-mode.conf and outputs.conf files are configured accordingly. @@ -123,8 +123,8 @@ spec: image: splunk/splunk:${SPLUNK_IMAGE_VERSION} queueRef: name: queue - largeMessageStoreRef: - name: lms + objectStorageRef: + name: os ``` # IndexerCluster @@ -139,11 +139,11 @@ In addition to common spec inputs, the IndexerCluster resource provides the foll | ---------- | ------- | ------------------------------------------------- | | replicas | integer | The number of replicas (defaults to 3) | | queueRef | corev1.ObjectReference | Message queue reference | -| largeMessageStoreRef | corev1.ObjectReference | Large message store reference | +| objectStorageRef | corev1.ObjectReference | Large message store reference | ## Example -The example presented below configures IndexerCluster named indexer with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the indexing traffic. This IndexerCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Queue and LargeMessageStore references allow the user to specify queue and bucket settings for the indexing process. +The example presented below configures IndexerCluster named indexer with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the indexing traffic. This IndexerCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Queue and ObjectStorage references allow the user to specify queue and bucket settings for the indexing process. In this case, the setup uses the SQS and S3 based configuration where the messages are stored in and retrieved from sqs-test queue in us-west-2 region with dead letter queue set to sqs-dlq-test queue. The large message store is set to ingestion bucket in smartbus-test directory. Based on these inputs, default-mode.conf, inputs.conf and outputs.conf files are configured accordingly. @@ -172,8 +172,8 @@ spec: image: splunk/splunk:${SPLUNK_IMAGE_VERSION} queueRef: name: queue - largeMessageStoreRef: - name: lms + objectStorageRef: + name: os ``` # Common Spec @@ -182,11 +182,11 @@ Common spec values for all SOK Custom Resources can be found in [CustomResources # Helm Charts -Queue, LargeMessageStore and IngestorCluster have been added to the splunk/splunk-enterprise Helm chart. IndexerCluster has also been enhanced to support new inputs. +Queue, ObjectStorage and IngestorCluster have been added to the splunk/splunk-enterprise Helm chart. IndexerCluster has also been enhanced to support new inputs. ## Example -Below examples describe how to define values for Queue, LargeMessageStoe, IngestorCluster and IndexerCluster similarly to the above yaml files specifications. +Below examples describe how to define values for Queue, ObjectStorage, IngestorCluster and IndexerCluster similarly to the above yaml files specifications. ``` queue: @@ -201,9 +201,9 @@ queue: ``` ``` -largeMessageStore: +objectStorage: enabled: true - name: lms + name: os provider: s3 s3: endpoint: https://s3.us-west-2.amazonaws.com @@ -218,8 +218,8 @@ ingestorCluster: serviceAccount: ingestor-sa queueRef: name: queue - largeMessageStoreRef: - name: lms + objectStorageRef: + name: os ``` ``` @@ -238,8 +238,8 @@ indexerCluster: name: cm queueRef: name: queue - largeMessageStoreRef: - name: lms + objectStorageRef: + name: os ``` # Service Account @@ -599,14 +599,14 @@ Status: Events: ``` -4. Install LargeMessageStore resource. +4. Install ObjectStorage resource. ``` -$ cat lms.yaml +$ cat os.yaml apiVersion: enterprise.splunk.com/v4 -kind: LargeMessageStore +kind: ObjectStorage metadata: - name: lms + name: os finalizers: - enterprise.splunk.com/delete-pvc spec: @@ -617,23 +617,23 @@ spec: ``` ``` -$ kubectl apply -f lms.yaml +$ kubectl apply -f os.yaml ``` ``` -$ kubectl get lms +$ kubectl get os NAME PHASE AGE MESSAGE -lms Ready 20s +os Ready 20s ``` ``` -kubectl describe lms -Name: lms +kubectl describe os +Name: os Namespace: default Labels: Annotations: API Version: enterprise.splunk.com/v4 -Kind: LargeMessageStore +Kind: ObjectStorage Metadata: Creation Timestamp: 2025-10-27T10:25:53Z Finalizers: @@ -669,8 +669,8 @@ spec: image: splunk/splunk:${SPLUNK_IMAGE_VERSION} queueRef: name: queue - largeMessageStoreRef: - name: lms + objectStorageRef: + name: os ``` ``` @@ -704,7 +704,7 @@ Spec: Namespace: default Image: splunk/splunk:${SPLUNK_IMAGE_VERSION} Large Message Store Ref: - Name: lms + Name: os Namespace: default Replicas: 3 Service Account: ingestor-sa @@ -813,8 +813,8 @@ spec: serviceAccount: ingestor-sa queueRef: name: queue - largeMessageStoreRef: - name: lms + objectStorageRef: + name: os ``` ``` diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml index 536be0cd2..833f162aa 100644 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml @@ -169,8 +169,8 @@ items: {{- if .namespace }} namespace: {{ .namespace }} {{- end }} - {{- with $.Values.indexerCluster.largeMessageStoreRef }} - largeMessageStoreRef: + {{- with $.Values.indexerCluster.objectStorageRef }} + objectStorageRef: name: {{ .name }} {{- if .namespace }} namespace: {{ .namespace }} diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_ingestorcluster.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_ingestorcluster.yaml index b9ec62107..e5ab1258c 100644 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_ingestorcluster.yaml +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_ingestorcluster.yaml @@ -102,11 +102,11 @@ spec: namespace: {{ $.Values.ingestorCluster.queueRef.namespace }} {{- end }} {{- end }} - {{- with $.Values.ingestorCluster.largeMessageStoreRef }} - largeMessageStoreRef: - name: {{ $.Values.ingestorCluster.largeMessageStoreRef.name }} - {{- if $.Values.ingestorCluster.largeMessageStoreRef.namespace }} - namespace: {{ $.Values.ingestorCluster.largeMessageStoreRef.namespace }} + {{- with $.Values.ingestorCluster.objectStorageRef }} + objectStorageRef: + name: {{ $.Values.ingestorCluster.objectStorageRef.name }} + {{- if $.Values.ingestorCluster.objectStorageRef.namespace }} + namespace: {{ $.Values.ingestorCluster.objectStorageRef.namespace }} {{- end }} {{- end }} {{- with .Values.ingestorCluster.extraEnv }} diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_largemessagestores.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_largemessagestores.yaml deleted file mode 100644 index 77ef09e69..000000000 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_largemessagestores.yaml +++ /dev/null @@ -1,28 +0,0 @@ -{{- if .Values.largemessagestore }} -{{- if .Values.largemessagestore.enabled }} -apiVersion: enterprise.splunk.com/v4 -kind: LargeMessageStore -metadata: - name: {{ .Values.largemessagestore.name }} - namespace: {{ default .Release.Namespace .Values.largemessagestore.namespaceOverride }} - {{- with .Values.largemessagestore.additionalLabels }} - labels: -{{ toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.largemessagestore.additionalAnnotations }} - annotations: -{{ toYaml . | nindent 4 }} - {{- end }} -spec: - provider: {{ .Values.largemessagestore.provider | quote }} - {{- with .Values.largemessagestore.s3 }} - s3: - {{- if .endpoint }} - endpoint: {{ .endpoint | quote }} - {{- end }} - {{- if .path }} - path: {{ .path | quote }} - {{- end }} - {{- end }} -{{- end }} -{{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_objectstorages.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_objectstorages.yaml new file mode 100644 index 000000000..7cd5bdca0 --- /dev/null +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_objectstorages.yaml @@ -0,0 +1,28 @@ +{{- if .Values.objectStorage.enabled }} +{{- if .Values.objectStorage.enabled }} +apiVersion: enterprise.splunk.com/v4 +kind: ObjectStorage +metadata: + name: {{ .Values.objectStorage.name }} + namespace: {{ default .Release.Namespace .Values.objectStorage.namespaceOverride }} + {{- with .Values.objectStorage.additionalLabels }} + labels: +{{ toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.objectStorage.additionalAnnotations }} + annotations: +{{ toYaml . | nindent 4 }} + {{- end }} +spec: + provider: {{ .Values.objectStorage.provider | quote }} + {{- with .Values.objectStorage.s3 }} + s3: + {{- if .endpoint }} + endpoint: {{ .endpoint | quote }} + {{- end }} + {{- if .path }} + path: {{ .path | quote }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-enterprise/values.yaml b/helm-chart/splunk-enterprise/values.yaml index ea4921b52..6643728fa 100644 --- a/helm-chart/splunk-enterprise/values.yaml +++ b/helm-chart/splunk-enterprise/values.yaml @@ -352,7 +352,7 @@ indexerCluster: queueRef: {} - largeMessageStoreRef: {} + objectStorageRef: {} searchHeadCluster: @@ -903,4 +903,4 @@ ingestorCluster: queueRef: {} - largeMessageStoreRef: {} \ No newline at end of file + objectStorageRef: {} \ No newline at end of file diff --git a/helm-chart/splunk-operator/templates/rbac/role.yaml b/helm-chart/splunk-operator/templates/rbac/role.yaml index 26824528f..77be54727 100644 --- a/helm-chart/splunk-operator/templates/rbac/role.yaml +++ b/helm-chart/splunk-operator/templates/rbac/role.yaml @@ -277,7 +277,7 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - largemessagestores + - objectstorages verbs: - create - delete @@ -289,13 +289,13 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - largemessagestores/finalizers + - objectstorages/finalizers verbs: - update - apiGroups: - enterprise.splunk.com resources: - - largemessagestores/status + - objectstorages/status verbs: - get - patch diff --git a/internal/controller/indexercluster_controller.go b/internal/controller/indexercluster_controller.go index 2ed4d775e..7efb6e1b8 100644 --- a/internal/controller/indexercluster_controller.go +++ b/internal/controller/indexercluster_controller.go @@ -200,9 +200,9 @@ func (r *IndexerClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { return reqs }), ). - Watches(&enterpriseApi.LargeMessageStore{}, + Watches(&enterpriseApi.ObjectStorage{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { - lms, ok := obj.(*enterpriseApi.LargeMessageStore) + os, ok := obj.(*enterpriseApi.ObjectStorage) if !ok { return nil } @@ -212,11 +212,11 @@ func (r *IndexerClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { } var reqs []reconcile.Request for _, ic := range list.Items { - ns := ic.Spec.LargeMessageStoreRef.Namespace + ns := ic.Spec.ObjectStorageRef.Namespace if ns == "" { ns = ic.Namespace } - if ic.Spec.LargeMessageStoreRef.Name == lms.Name && ns == lms.Namespace { + if ic.Spec.ObjectStorageRef.Name == os.Name && ns == os.Namespace { reqs = append(reqs, reconcile.Request{ NamespacedName: types.NamespacedName{ Name: ic.Name, diff --git a/internal/controller/ingestorcluster_controller.go b/internal/controller/ingestorcluster_controller.go index a46a1dcff..0d8117bd2 100644 --- a/internal/controller/ingestorcluster_controller.go +++ b/internal/controller/ingestorcluster_controller.go @@ -169,23 +169,23 @@ func (r *IngestorClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { return reqs }), ). - Watches(&enterpriseApi.LargeMessageStore{}, + Watches(&enterpriseApi.ObjectStorage{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { - lms, ok := obj.(*enterpriseApi.LargeMessageStore) + os, ok := obj.(*enterpriseApi.ObjectStorage) if !ok { return nil } - var list enterpriseApi.IndexerClusterList + var list enterpriseApi.IngestorClusterList if err := r.Client.List(ctx, &list); err != nil { return nil } var reqs []reconcile.Request for _, ic := range list.Items { - ns := ic.Spec.LargeMessageStoreRef.Namespace + ns := ic.Spec.ObjectStorageRef.Namespace if ns == "" { ns = ic.Namespace } - if ic.Spec.LargeMessageStoreRef.Name == lms.Name && ns == lms.Namespace { + if ic.Spec.ObjectStorageRef.Name == os.Name && ns == os.Namespace { reqs = append(reqs, reconcile.Request{ NamespacedName: types.NamespacedName{ Name: ic.Name, diff --git a/internal/controller/ingestorcluster_controller_test.go b/internal/controller/ingestorcluster_controller_test.go index 4d140e1d6..d035d1037 100644 --- a/internal/controller/ingestorcluster_controller_test.go +++ b/internal/controller/ingestorcluster_controller_test.go @@ -86,12 +86,12 @@ var _ = Describe("IngestorCluster Controller", func() { }, }, } - lms := &enterpriseApi.LargeMessageStore{ + os := &enterpriseApi.ObjectStorage{ ObjectMeta: metav1.ObjectMeta{ - Name: "lms", + Name: "os", Namespace: nsSpecs.Name, }, - Spec: enterpriseApi.LargeMessageStoreSpec{ + Spec: enterpriseApi.ObjectStorageSpec{ Provider: "s3", S3: enterpriseApi.S3Spec{ Endpoint: "https://s3.us-west-2.amazonaws.com", @@ -99,7 +99,7 @@ var _ = Describe("IngestorCluster Controller", func() { }, }, } - CreateIngestorCluster("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, lms, queue) + CreateIngestorCluster("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, os, queue) icSpec, _ := GetIngestorCluster("test", nsSpecs.Name) annotations = map[string]string{} icSpec.Annotations = annotations @@ -134,12 +134,12 @@ var _ = Describe("IngestorCluster Controller", func() { }, }, } - lms := &enterpriseApi.LargeMessageStore{ + os := &enterpriseApi.ObjectStorage{ ObjectMeta: metav1.ObjectMeta{ - Name: "lms", + Name: "os", Namespace: nsSpecs.Name, }, - Spec: enterpriseApi.LargeMessageStoreSpec{ + Spec: enterpriseApi.ObjectStorageSpec{ Provider: "s3", S3: enterpriseApi.S3Spec{ Endpoint: "https://s3.us-west-2.amazonaws.com", @@ -147,7 +147,7 @@ var _ = Describe("IngestorCluster Controller", func() { }, }, } - CreateIngestorCluster("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, lms, queue) + CreateIngestorCluster("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, os, queue) DeleteIngestorCluster("test", nsSpecs.Name) Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) }) @@ -220,7 +220,7 @@ func GetIngestorCluster(name string, namespace string) (*enterpriseApi.IngestorC return ic, err } -func CreateIngestorCluster(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase, lms *enterpriseApi.LargeMessageStore, queue *enterpriseApi.Queue) *enterpriseApi.IngestorCluster { +func CreateIngestorCluster(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase, os *enterpriseApi.ObjectStorage, queue *enterpriseApi.Queue) *enterpriseApi.IngestorCluster { By("Expecting IngestorCluster custom resource to be created successfully") key := types.NamespacedName{ @@ -244,9 +244,9 @@ func CreateIngestorCluster(name string, namespace string, annotations map[string Name: queue.Name, Namespace: queue.Namespace, }, - LargeMessageStoreRef: corev1.ObjectReference{ - Name: lms.Name, - Namespace: lms.Namespace, + ObjectStorageRef: corev1.ObjectReference{ + Name: os.Name, + Namespace: os.Namespace, }, }, } diff --git a/internal/controller/largemessagestore_controller.go b/internal/controller/objectstorage_controller.go similarity index 68% rename from internal/controller/largemessagestore_controller.go rename to internal/controller/objectstorage_controller.go index 69a4af131..4ae36b1a2 100644 --- a/internal/controller/largemessagestore_controller.go +++ b/internal/controller/objectstorage_controller.go @@ -36,34 +36,34 @@ import ( enterprise "github.com/splunk/splunk-operator/pkg/splunk/enterprise" ) -// LargeMessageStoreReconciler reconciles a LargeMessageStore object -type LargeMessageStoreReconciler struct { +// ObjectStorageReconciler reconciles a ObjectStorage object +type ObjectStorageReconciler struct { client.Client Scheme *runtime.Scheme } -// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=largemessagestores,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=largemessagestores/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=largemessagestores/finalizers,verbs=update +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=objectstorages,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=objectstorages/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=objectstorages/finalizers,verbs=update // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. // TODO(user): Modify the Reconcile function to compare the state specified by -// the LargeMessageStore object against the actual cluster state, and then +// the ObjectStorage object against the actual cluster state, and then // perform operations to make the cluster state reflect the state specified by // the user. // // For more details, check Reconcile and its Result here: // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.22.1/pkg/reconcile -func (r *LargeMessageStoreReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - metrics.ReconcileCounters.With(metrics.GetPrometheusLabels(req, "LargeMessageStore")).Inc() - defer recordInstrumentionData(time.Now(), req, "controller", "LargeMessageStore") +func (r *ObjectStorageReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + metrics.ReconcileCounters.With(metrics.GetPrometheusLabels(req, "ObjectStorage")).Inc() + defer recordInstrumentionData(time.Now(), req, "controller", "ObjectStorage") reqLogger := log.FromContext(ctx) - reqLogger = reqLogger.WithValues("largemessagestore", req.NamespacedName) + reqLogger = reqLogger.WithValues("objectstorage", req.NamespacedName) - // Fetch the LargeMessageStore - instance := &enterpriseApi.LargeMessageStore{} + // Fetch the ObjectStorage + instance := &enterpriseApi.ObjectStorage{} err := r.Get(ctx, req.NamespacedName, instance) if err != nil { if k8serrors.IsNotFound(err) { @@ -74,20 +74,20 @@ func (r *LargeMessageStoreReconciler) Reconcile(ctx context.Context, req ctrl.Re return ctrl.Result{}, nil } // Error reading the object - requeue the request. - return ctrl.Result{}, errors.Wrap(err, "could not load largemessagestore data") + return ctrl.Result{}, errors.Wrap(err, "could not load objectstorage data") } // If the reconciliation is paused, requeue annotations := instance.GetAnnotations() if annotations != nil { - if _, ok := annotations[enterpriseApi.LargeMessageStorePausedAnnotation]; ok { + if _, ok := annotations[enterpriseApi.ObjectStoragePausedAnnotation]; ok { return ctrl.Result{Requeue: true, RequeueAfter: pauseRetryDelay}, nil } } reqLogger.Info("start", "CR version", instance.GetResourceVersion()) - result, err := ApplyLargeMessageStore(ctx, r.Client, instance) + result, err := ApplyObjectStorage(ctx, r.Client, instance) if result.Requeue && result.RequeueAfter != 0 { reqLogger.Info("Requeued", "period(seconds)", int(result.RequeueAfter/time.Second)) } @@ -95,14 +95,14 @@ func (r *LargeMessageStoreReconciler) Reconcile(ctx context.Context, req ctrl.Re return result, err } -var ApplyLargeMessageStore = func(ctx context.Context, client client.Client, instance *enterpriseApi.LargeMessageStore) (reconcile.Result, error) { - return enterprise.ApplyLargeMessageStore(ctx, client, instance) +var ApplyObjectStorage = func(ctx context.Context, client client.Client, instance *enterpriseApi.ObjectStorage) (reconcile.Result, error) { + return enterprise.ApplyObjectStorage(ctx, client, instance) } // SetupWithManager sets up the controller with the Manager. -func (r *LargeMessageStoreReconciler) SetupWithManager(mgr ctrl.Manager) error { +func (r *ObjectStorageReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&enterpriseApi.LargeMessageStore{}). + For(&enterpriseApi.ObjectStorage{}). WithEventFilter(predicate.Or( common.GenerationChangedPredicate(), common.AnnotationChangedPredicate(), diff --git a/internal/controller/largemessagestore_controller_test.go b/internal/controller/objectstorage_controller_test.go similarity index 51% rename from internal/controller/largemessagestore_controller_test.go rename to internal/controller/objectstorage_controller_test.go index 5d85d4409..6d7dec87a 100644 --- a/internal/controller/largemessagestore_controller_test.go +++ b/internal/controller/objectstorage_controller_test.go @@ -34,7 +34,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" ) -var _ = Describe("LargeMessageStore Controller", func() { +var _ = Describe("ObjectStorage Controller", func() { BeforeEach(func() { time.Sleep(2 * time.Second) }) @@ -43,53 +43,53 @@ var _ = Describe("LargeMessageStore Controller", func() { }) - Context("LargeMessageStore Management", func() { + Context("ObjectStorage Management", func() { - It("Get LargeMessageStore custom resource should fail", func() { - namespace := "ns-splunk-largemessagestore-1" - ApplyLargeMessageStore = func(ctx context.Context, client client.Client, instance *enterpriseApi.LargeMessageStore) (reconcile.Result, error) { + It("Get ObjectStorage custom resource should fail", func() { + namespace := "ns-splunk-objectstorage-1" + ApplyObjectStorage = func(ctx context.Context, client client.Client, instance *enterpriseApi.ObjectStorage) (reconcile.Result, error) { return reconcile.Result{}, nil } nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - _, err := GetLargeMessageStore("test", nsSpecs.Name) - Expect(err.Error()).Should(Equal("largemessagestores.enterprise.splunk.com \"test\" not found")) + _, err := GetObjectStorage("test", nsSpecs.Name) + Expect(err.Error()).Should(Equal("objectstorages.enterprise.splunk.com \"test\" not found")) Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) }) - It("Create LargeMessageStore custom resource with annotations should pause", func() { - namespace := "ns-splunk-largemessagestore-2" + It("Create ObjectStorage custom resource with annotations should pause", func() { + namespace := "ns-splunk-objectstorage-2" annotations := make(map[string]string) - annotations[enterpriseApi.LargeMessageStorePausedAnnotation] = "" - ApplyLargeMessageStore = func(ctx context.Context, client client.Client, instance *enterpriseApi.LargeMessageStore) (reconcile.Result, error) { + annotations[enterpriseApi.ObjectStoragePausedAnnotation] = "" + ApplyObjectStorage = func(ctx context.Context, client client.Client, instance *enterpriseApi.ObjectStorage) (reconcile.Result, error) { return reconcile.Result{}, nil } nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - spec := enterpriseApi.LargeMessageStoreSpec{ + spec := enterpriseApi.ObjectStorageSpec{ Provider: "s3", S3: enterpriseApi.S3Spec{ Endpoint: "https://s3.us-west-2.amazonaws.com", Path: "s3://ingestion/smartbus-test", }, } - CreateLargeMessageStore("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) - icSpec, _ := GetLargeMessageStore("test", nsSpecs.Name) + CreateObjectStorage("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) + osSpec, _ := GetObjectStorage("test", nsSpecs.Name) annotations = map[string]string{} - icSpec.Annotations = annotations - icSpec.Status.Phase = "Ready" - UpdateLargeMessageStore(icSpec, enterpriseApi.PhaseReady, spec) - DeleteLargeMessageStore("test", nsSpecs.Name) + osSpec.Annotations = annotations + osSpec.Status.Phase = "Ready" + UpdateObjectStorage(osSpec, enterpriseApi.PhaseReady, spec) + DeleteObjectStorage("test", nsSpecs.Name) Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) }) - It("Create LargeMessageStore custom resource should succeeded", func() { - namespace := "ns-splunk-largemessagestore-3" - ApplyLargeMessageStore = func(ctx context.Context, client client.Client, instance *enterpriseApi.LargeMessageStore) (reconcile.Result, error) { + It("Create ObjectStorage custom resource should succeeded", func() { + namespace := "ns-splunk-objectstorage-3" + ApplyObjectStorage = func(ctx context.Context, client client.Client, instance *enterpriseApi.ObjectStorage) (reconcile.Result, error) { return reconcile.Result{}, nil } nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} @@ -97,21 +97,21 @@ var _ = Describe("LargeMessageStore Controller", func() { Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) annotations := make(map[string]string) - spec := enterpriseApi.LargeMessageStoreSpec{ + spec := enterpriseApi.ObjectStorageSpec{ Provider: "s3", S3: enterpriseApi.S3Spec{ Endpoint: "https://s3.us-west-2.amazonaws.com", Path: "s3://ingestion/smartbus-test", }, } - CreateLargeMessageStore("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) - DeleteLargeMessageStore("test", nsSpecs.Name) + CreateObjectStorage("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) + DeleteObjectStorage("test", nsSpecs.Name) Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) }) It("Cover Unused methods", func() { - namespace := "ns-splunk-largemessagestore-4" - ApplyLargeMessageStore = func(ctx context.Context, client client.Client, instance *enterpriseApi.LargeMessageStore) (reconcile.Result, error) { + namespace := "ns-splunk-objectstorage-4" + ApplyObjectStorage = func(ctx context.Context, client client.Client, instance *enterpriseApi.ObjectStorage) (reconcile.Result, error) { return reconcile.Result{}, nil } nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} @@ -121,7 +121,7 @@ var _ = Describe("LargeMessageStore Controller", func() { ctx := context.TODO() builder := fake.NewClientBuilder() c := builder.Build() - instance := LargeMessageStoreReconciler{ + instance := ObjectStorageReconciler{ Client: c, Scheme: scheme.Scheme, } @@ -134,32 +134,32 @@ var _ = Describe("LargeMessageStore Controller", func() { _, err := instance.Reconcile(ctx, request) Expect(err).ToNot(HaveOccurred()) - spec := enterpriseApi.LargeMessageStoreSpec{ + spec := enterpriseApi.ObjectStorageSpec{ Provider: "s3", S3: enterpriseApi.S3Spec{ Endpoint: "https://s3.us-west-2.amazonaws.com", Path: "s3://ingestion/smartbus-test", }, } - lmsSpec := testutils.NewLargeMessageStore("test", namespace, spec) - Expect(c.Create(ctx, lmsSpec)).Should(Succeed()) + osSpec := testutils.NewObjectStorage("test", namespace, spec) + Expect(c.Create(ctx, osSpec)).Should(Succeed()) annotations := make(map[string]string) - annotations[enterpriseApi.LargeMessageStorePausedAnnotation] = "" - lmsSpec.Annotations = annotations - Expect(c.Update(ctx, lmsSpec)).Should(Succeed()) + annotations[enterpriseApi.ObjectStoragePausedAnnotation] = "" + osSpec.Annotations = annotations + Expect(c.Update(ctx, osSpec)).Should(Succeed()) _, err = instance.Reconcile(ctx, request) Expect(err).ToNot(HaveOccurred()) annotations = map[string]string{} - lmsSpec.Annotations = annotations - Expect(c.Update(ctx, lmsSpec)).Should(Succeed()) + osSpec.Annotations = annotations + Expect(c.Update(ctx, osSpec)).Should(Succeed()) _, err = instance.Reconcile(ctx, request) Expect(err).ToNot(HaveOccurred()) - lmsSpec.DeletionTimestamp = &metav1.Time{} + osSpec.DeletionTimestamp = &metav1.Time{} _, err = instance.Reconcile(ctx, request) Expect(err).ToNot(HaveOccurred()) }) @@ -167,31 +167,30 @@ var _ = Describe("LargeMessageStore Controller", func() { }) }) -func GetLargeMessageStore(name string, namespace string) (*enterpriseApi.LargeMessageStore, error) { - By("Expecting LargeMessageStore custom resource to be retrieved successfully") +func GetObjectStorage(name string, namespace string) (*enterpriseApi.ObjectStorage, error) { + By("Expecting ObjectStorage custom resource to be retrieved successfully") key := types.NamespacedName{ Name: name, Namespace: namespace, } - lms := &enterpriseApi.LargeMessageStore{} + os := &enterpriseApi.ObjectStorage{} - err := k8sClient.Get(context.Background(), key, lms) + err := k8sClient.Get(context.Background(), key, os) if err != nil { return nil, err } - return lms, err + return os, err } -func CreateLargeMessageStore(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase, spec enterpriseApi.LargeMessageStoreSpec) *enterpriseApi.LargeMessageStore { - By("Expecting LargeMessageStore custom resource to be created successfully") - +func CreateObjectStorage(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase, spec enterpriseApi.ObjectStorageSpec) *enterpriseApi.ObjectStorage { + By("Expecting ObjectStorage custom resource to be created successfully") key := types.NamespacedName{ Name: name, Namespace: namespace, } - lmsSpec := &enterpriseApi.LargeMessageStore{ + osSpec := &enterpriseApi.ObjectStorage{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, @@ -200,64 +199,62 @@ func CreateLargeMessageStore(name string, namespace string, annotations map[stri Spec: spec, } - Expect(k8sClient.Create(context.Background(), lmsSpec)).Should(Succeed()) + Expect(k8sClient.Create(context.Background(), osSpec)).Should(Succeed()) time.Sleep(2 * time.Second) - lms := &enterpriseApi.LargeMessageStore{} + os := &enterpriseApi.ObjectStorage{} Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, lms) + _ = k8sClient.Get(context.Background(), key, os) if status != "" { fmt.Printf("status is set to %v", status) - lms.Status.Phase = status - Expect(k8sClient.Status().Update(context.Background(), lms)).Should(Succeed()) + os.Status.Phase = status + Expect(k8sClient.Status().Update(context.Background(), os)).Should(Succeed()) time.Sleep(2 * time.Second) } return true }, timeout, interval).Should(BeTrue()) - return lms + return os } -func UpdateLargeMessageStore(instance *enterpriseApi.LargeMessageStore, status enterpriseApi.Phase, spec enterpriseApi.LargeMessageStoreSpec) *enterpriseApi.LargeMessageStore { - By("Expecting LargeMessageStore custom resource to be updated successfully") - +func UpdateObjectStorage(instance *enterpriseApi.ObjectStorage, status enterpriseApi.Phase, spec enterpriseApi.ObjectStorageSpec) *enterpriseApi.ObjectStorage { + By("Expecting ObjectStorage custom resource to be updated successfully") key := types.NamespacedName{ Name: instance.Name, Namespace: instance.Namespace, } - lmsSpec := testutils.NewLargeMessageStore(instance.Name, instance.Namespace, spec) - lmsSpec.ResourceVersion = instance.ResourceVersion - Expect(k8sClient.Update(context.Background(), lmsSpec)).Should(Succeed()) + osSpec := testutils.NewObjectStorage(instance.Name, instance.Namespace, spec) + osSpec.ResourceVersion = instance.ResourceVersion + Expect(k8sClient.Update(context.Background(), osSpec)).Should(Succeed()) time.Sleep(2 * time.Second) - lms := &enterpriseApi.LargeMessageStore{} + os := &enterpriseApi.ObjectStorage{} Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, lms) + _ = k8sClient.Get(context.Background(), key, os) if status != "" { fmt.Printf("status is set to %v", status) - lms.Status.Phase = status - Expect(k8sClient.Status().Update(context.Background(), lms)).Should(Succeed()) + os.Status.Phase = status + Expect(k8sClient.Status().Update(context.Background(), os)).Should(Succeed()) time.Sleep(2 * time.Second) } return true }, timeout, interval).Should(BeTrue()) - return lms + return os } -func DeleteLargeMessageStore(name string, namespace string) { - By("Expecting LargeMessageStore custom resource to be deleted successfully") - +func DeleteObjectStorage(name string, namespace string) { + By("Expecting ObjectStorage custom resource to be deleted successfully") key := types.NamespacedName{ Name: name, Namespace: namespace, } Eventually(func() error { - lms := &enterpriseApi.LargeMessageStore{} - _ = k8sClient.Get(context.Background(), key, lms) - err := k8sClient.Delete(context.Background(), lms) + os := &enterpriseApi.ObjectStorage{} + _ = k8sClient.Get(context.Background(), key, os) + err := k8sClient.Delete(context.Background(), os) return err }, timeout, interval).Should(Succeed()) } diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index eda9f320d..8454d15b5 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -128,7 +128,7 @@ var _ = BeforeSuite(func(ctx context.Context) { }).SetupWithManager(k8sManager); err != nil { Expect(err).NotTo(HaveOccurred()) } - if err := (&LargeMessageStoreReconciler{ + if err := (&ObjectStorageReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), }).SetupWithManager(k8sManager); err != nil { diff --git a/internal/controller/testutils/new.go b/internal/controller/testutils/new.go index b5b620337..aa47e8092 100644 --- a/internal/controller/testutils/new.go +++ b/internal/controller/testutils/new.go @@ -69,9 +69,9 @@ func NewQueue(name, ns string, spec enterpriseApi.QueueSpec) *enterpriseApi.Queu } } -// NewLargeMessageStore returns new LargeMessageStore instance with its config hash -func NewLargeMessageStore(name, ns string, spec enterpriseApi.LargeMessageStoreSpec) *enterpriseApi.LargeMessageStore { - return &enterpriseApi.LargeMessageStore{ +// NewObjectStorage returns new ObjectStorage instance with its config hash +func NewObjectStorage(name, ns string, spec enterpriseApi.ObjectStorageSpec) *enterpriseApi.ObjectStorage { + return &enterpriseApi.ObjectStorage{ ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: ns}, Spec: spec, } diff --git a/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml index 2b0596fdd..41f4ea2aa 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml @@ -17,9 +17,9 @@ status: --- # assert for large message store custom resource to be ready apiVersion: enterprise.splunk.com/v4 -kind: LargeMessageStore +kind: ObjectStorage metadata: - name: lms + name: os spec: provider: s3 s3: @@ -72,7 +72,7 @@ status: region: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com dlq: sqs-dlq-test - largeMessageStore: + objectStorage: provider: s3 s3: endpoint: https://s3.us-west-2.amazonaws.com @@ -113,7 +113,7 @@ status: region: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com dlq: sqs-dlq-test - largeMessageStore: + objectStorage: provider: s3 s3: endpoint: https://s3.us-west-2.amazonaws.com diff --git a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml index 57e6c4c68..00ff26a56 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml @@ -17,7 +17,7 @@ status: region: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com dlq: sqs-dlq-test - largeMessageStore: + objectStorage: provider: s3 s3: endpoint: https://s3.us-west-2.amazonaws.com diff --git a/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml b/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml index 1e8af1663..d05cb5bcf 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml @@ -15,9 +15,9 @@ queue: endpoint: https://sqs.us-west-2.amazonaws.com dlq: sqs-dlq-test -largeMessageStore: +objectStorage: enabled: true - name: lms + name: os provider: s3 s3: endpoint: https://s3.us-west-2.amazonaws.com @@ -29,8 +29,8 @@ ingestorCluster: replicaCount: 3 queueRef: name: queue - largeMessageStoreRef: - name: lms + objectStorageRef: + name: os clusterManager: enabled: true @@ -45,5 +45,5 @@ indexerCluster: name: cm queueRef: name: queue - largeMessageStoreRef: - name: lms + objectStorageRef: + name: os diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index 5e468196c..f6bcd046d 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -269,26 +269,26 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller } // Large Message Store - lms := enterpriseApi.LargeMessageStore{} - if cr.Spec.LargeMessageStoreRef.Name != "" { + os := enterpriseApi.ObjectStorage{} + if cr.Spec.ObjectStorageRef.Name != "" { ns := cr.GetNamespace() - if cr.Spec.LargeMessageStoreRef.Namespace != "" { - ns = cr.Spec.LargeMessageStoreRef.Namespace + if cr.Spec.ObjectStorageRef.Namespace != "" { + ns = cr.Spec.ObjectStorageRef.Namespace } err = client.Get(context.Background(), types.NamespacedName{ - Name: cr.Spec.LargeMessageStoreRef.Name, + Name: cr.Spec.ObjectStorageRef.Name, Namespace: ns, - }, &lms) + }, &os) if err != nil { return result, err } } // Can not override original large message store spec due to comparison in the later code - lmsCopy := lms - if lmsCopy.Spec.Provider == "s3" { - if lmsCopy.Spec.S3.Endpoint == "" { - lmsCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queueCopy.Spec.SQS.Region) + osCopy := os + if osCopy.Spec.Provider == "s3" { + if osCopy.Spec.S3.Endpoint == "" { + osCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queueCopy.Spec.SQS.Region) } } @@ -297,7 +297,7 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller if !reflect.DeepEqual(cr.Status.Queue, queue.Spec) { mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) - err = mgr.handlePullQueueChange(ctx, cr, queueCopy, lmsCopy, client) + err = mgr.handlePullQueueChange(ctx, cr, queueCopy, osCopy, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Queue/Pipeline config change after pod creation: %s", err.Error())) scopedLog.Error(err, "Failed to update conf file for Queue/Pipeline config change after pod creation") @@ -592,14 +592,14 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, } // Large Message Store - lms := enterpriseApi.LargeMessageStore{} - if cr.Spec.LargeMessageStoreRef.Name != "" { + os := enterpriseApi.ObjectStorage{} + if cr.Spec.ObjectStorageRef.Name != "" { ns := cr.GetNamespace() - if cr.Spec.LargeMessageStoreRef.Namespace != "" { - ns = cr.Spec.LargeMessageStoreRef.Namespace + if cr.Spec.ObjectStorageRef.Namespace != "" { + ns = cr.Spec.ObjectStorageRef.Namespace } err = client.Get(context.Background(), types.NamespacedName{ - Name: cr.Spec.LargeMessageStoreRef.Name, + Name: cr.Spec.ObjectStorageRef.Name, Namespace: ns, }, &queue) if err != nil { @@ -608,10 +608,10 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, } // Can not override original queue spec due to comparison in the later code - lmsCopy := lms - if lmsCopy.Spec.Provider == "s3" { - if lmsCopy.Spec.S3.Endpoint == "" { - lmsCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queueCopy.Spec.SQS.Region) + osCopy := os + if osCopy.Spec.Provider == "s3" { + if osCopy.Spec.S3.Endpoint == "" { + osCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queueCopy.Spec.SQS.Region) } } @@ -620,7 +620,7 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, if !reflect.DeepEqual(cr.Status.Queue, queue.Spec) { mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) - err = mgr.handlePullQueueChange(ctx, cr, queueCopy, lmsCopy, client) + err = mgr.handlePullQueueChange(ctx, cr, queueCopy, osCopy, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Queue/Pipeline config change after pod creation: %s", err.Error())) scopedLog.Error(err, "Failed to update conf file for Queue/Pipeline config change after pod creation") @@ -1297,7 +1297,7 @@ func getSiteName(ctx context.Context, c splcommon.ControllerClient, cr *enterpri var newSplunkClientForQueuePipeline = splclient.NewSplunkClient // Checks if only PullQueue or Pipeline config changed, and updates the conf file if so -func (mgr *indexerClusterPodManager) handlePullQueueChange(ctx context.Context, newCR *enterpriseApi.IndexerCluster, queue enterpriseApi.Queue, lms enterpriseApi.LargeMessageStore, k8s rclient.Client) error { +func (mgr *indexerClusterPodManager) handlePullQueueChange(ctx context.Context, newCR *enterpriseApi.IndexerCluster, queue enterpriseApi.Queue, os enterpriseApi.ObjectStorage, k8s rclient.Client) error { reqLogger := log.FromContext(ctx) scopedLog := reqLogger.WithName("handlePullQueueChange").WithValues("name", newCR.GetName(), "namespace", newCR.GetNamespace()) @@ -1327,7 +1327,7 @@ func (mgr *indexerClusterPodManager) handlePullQueueChange(ctx context.Context, afterDelete = true } - queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields := getChangedQueueFieldsForIndexer(&queue, &lms, newCR, afterDelete) + queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields := getChangedQueueFieldsForIndexer(&queue, &os, newCR, afterDelete) for _, pbVal := range queueChangedFieldsOutputs { if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name), [][]string{pbVal}); err != nil { @@ -1353,22 +1353,22 @@ func (mgr *indexerClusterPodManager) handlePullQueueChange(ctx context.Context, } // getChangedQueueFieldsForIndexer returns a list of changed queue and pipeline fields for indexer pods -func getChangedQueueFieldsForIndexer(queue *enterpriseApi.Queue, lms *enterpriseApi.LargeMessageStore, queueIndexerStatus *enterpriseApi.IndexerCluster, afterDelete bool) (queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields [][]string) { +func getChangedQueueFieldsForIndexer(queue *enterpriseApi.Queue, os *enterpriseApi.ObjectStorage, queueIndexerStatus *enterpriseApi.IndexerCluster, afterDelete bool) (queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields [][]string) { // Compare queue fields - oldPB := queueIndexerStatus.Status.Queue - if oldPB == nil { - oldPB = &enterpriseApi.QueueSpec{} + oldQueue := queueIndexerStatus.Status.Queue + if oldQueue == nil { + oldQueue = &enterpriseApi.QueueSpec{} } - newPB := queue.Spec + newQueue := queue.Spec - oldLMS := queueIndexerStatus.Status.LargeMessageStore - if oldLMS == nil { - oldLMS = &enterpriseApi.LargeMessageStoreSpec{} + oldOS := queueIndexerStatus.Status.ObjectStorage + if oldOS == nil { + oldOS = &enterpriseApi.ObjectStorageSpec{} } - newLMS := lms.Spec + newOS := os.Spec // Push all queue fields - queueChangedFieldsInputs, queueChangedFieldsOutputs = pullQueueChanged(oldPB, &newPB, oldLMS, &newLMS, afterDelete) + queueChangedFieldsInputs, queueChangedFieldsOutputs = pullQueueChanged(oldQueue, &newQueue, oldOS, &newOS, afterDelete) // Always set all pipeline fields, not just changed ones pipelineChangedFields = pipelineConfig(true) @@ -1386,14 +1386,14 @@ func imageUpdatedTo9(previousImage string, currentImage string) bool { return strings.HasPrefix(previousVersion, "8") && strings.HasPrefix(currentVersion, "9") } -func pullQueueChanged(oldQueue, newQueue *enterpriseApi.QueueSpec, oldLMS, newLMS *enterpriseApi.LargeMessageStoreSpec, afterDelete bool) (inputs, outputs [][]string) { +func pullQueueChanged(oldQueue, newQueue *enterpriseApi.QueueSpec, oldOS, newOS *enterpriseApi.ObjectStorageSpec, afterDelete bool) (inputs, outputs [][]string) { queueProvider := "" if newQueue.Provider == "sqs" { queueProvider = "sqs_smartbus" } - lmsProvider := "" - if newLMS.Provider == "s3" { - lmsProvider = "sqs_smartbus" + osProvider := "" + if newOS.Provider == "s3" { + osProvider = "sqs_smartbus" } if oldQueue.Provider != newQueue.Provider || afterDelete { @@ -1405,11 +1405,11 @@ func pullQueueChanged(oldQueue, newQueue *enterpriseApi.QueueSpec, oldLMS, newLM if oldQueue.SQS.Endpoint != newQueue.SQS.Endpoint || afterDelete { inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.endpoint", queueProvider), newQueue.SQS.Endpoint}) } - if oldLMS.S3.Endpoint != newLMS.S3.Endpoint || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", lmsProvider), newLMS.S3.Endpoint}) + if oldOS.S3.Endpoint != newOS.S3.Endpoint || afterDelete { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", osProvider), newOS.S3.Endpoint}) } - if oldLMS.S3.Path != newLMS.S3.Path || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", lmsProvider), newLMS.S3.Path}) + if oldOS.S3.Path != newOS.S3.Path || afterDelete { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", osProvider), newOS.S3.Path}) } if oldQueue.SQS.DLQ != newQueue.SQS.DLQ || afterDelete { inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", queueProvider), newQueue.SQS.DLQ}) diff --git a/pkg/splunk/enterprise/indexercluster_test.go b/pkg/splunk/enterprise/indexercluster_test.go index 4c166c8e0..c2b3a8063 100644 --- a/pkg/splunk/enterprise/indexercluster_test.go +++ b/pkg/splunk/enterprise/indexercluster_test.go @@ -2067,15 +2067,15 @@ func TestGetChangedQueueFieldsForIndexer(t *testing.T) { }, } - lms := enterpriseApi.LargeMessageStore{ + os := enterpriseApi.ObjectStorage{ TypeMeta: metav1.TypeMeta{ - Kind: "LargeMessageStore", + Kind: "ObjectStorage", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "lms", + Name: "os", }, - Spec: enterpriseApi.LargeMessageStoreSpec{ + Spec: enterpriseApi.ObjectStorageSpec{ Provider: "s3", S3: enterpriseApi.S3Spec{ Endpoint: "https://s3.us-west-2.amazonaws.com", @@ -2089,20 +2089,20 @@ func TestGetChangedQueueFieldsForIndexer(t *testing.T) { QueueRef: corev1.ObjectReference{ Name: queue.Name, }, - LargeMessageStoreRef: corev1.ObjectReference{ - Name: lms.Name, + ObjectStorageRef: corev1.ObjectReference{ + Name: os.Name, }, }, } - queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields := getChangedQueueFieldsForIndexer(&queue, &lms, newCR, false) + queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields := getChangedQueueFieldsForIndexer(&queue, &os, newCR, false) assert.Equal(t, 8, len(queueChangedFieldsInputs)) assert.Equal(t, [][]string{ {"remote_queue.type", provider}, {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.Region}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), os.Spec.S3.Path}, {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, @@ -2113,8 +2113,8 @@ func TestGetChangedQueueFieldsForIndexer(t *testing.T) { {"remote_queue.type", provider}, {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.Region}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), os.Spec.S3.Path}, {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, @@ -2156,16 +2156,16 @@ func TestHandlePullQueueChange(t *testing.T) { }, } - lms := enterpriseApi.LargeMessageStore{ + os := enterpriseApi.ObjectStorage{ TypeMeta: metav1.TypeMeta{ - Kind: "LargeMessageStore", + Kind: "ObjectStorage", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "lms", + Name: "os", Namespace: "test", }, - Spec: enterpriseApi.LargeMessageStoreSpec{ + Spec: enterpriseApi.ObjectStorageSpec{ Provider: "s3", S3: enterpriseApi.S3Spec{ Endpoint: "https://s3.us-west-2.amazonaws.com", @@ -2186,15 +2186,15 @@ func TestHandlePullQueueChange(t *testing.T) { QueueRef: corev1.ObjectReference{ Name: queue.Name, }, - LargeMessageStoreRef: corev1.ObjectReference{ - Name: lms.Name, - Namespace: lms.Namespace, + ObjectStorageRef: corev1.ObjectReference{ + Name: os.Name, + Namespace: os.Namespace, }, }, Status: enterpriseApi.IndexerClusterStatus{ ReadyReplicas: 3, Queue: &enterpriseApi.QueueSpec{}, - LargeMessageStore: &enterpriseApi.LargeMessageStoreSpec{}, + ObjectStorage: &enterpriseApi.ObjectStorageSpec{}, }, } @@ -2252,7 +2252,7 @@ func TestHandlePullQueueChange(t *testing.T) { c := spltest.NewMockClient() ctx := context.TODO() c.Create(ctx, &queue) - c.Create(ctx, &lms) + c.Create(ctx, &os) c.Create(ctx, newCR) c.Create(ctx, pod0) c.Create(ctx, pod1) @@ -2260,7 +2260,7 @@ func TestHandlePullQueueChange(t *testing.T) { // Negative test case: secret not found mgr := &indexerClusterPodManager{} - err := mgr.handlePullQueueChange(ctx, newCR, queue, lms, c) + err := mgr.handlePullQueueChange(ctx, newCR, queue, os, c) assert.NotNil(t, err) // Mock secret @@ -2271,15 +2271,15 @@ func TestHandlePullQueueChange(t *testing.T) { // Negative test case: failure in creating remote queue stanza mgr = newTestPullQueuePipelineManager(mockHTTPClient) - err = mgr.handlePullQueueChange(ctx, newCR, queue, lms, c) + err = mgr.handlePullQueueChange(ctx, newCR, queue, os, c) assert.NotNil(t, err) // outputs.conf propertyKVList := [][]string{ {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.Region}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), os.Spec.S3.Path}, {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, @@ -2295,7 +2295,7 @@ func TestHandlePullQueueChange(t *testing.T) { // Negative test case: failure in creating remote queue stanza mgr = newTestPullQueuePipelineManager(mockHTTPClient) - err = mgr.handlePullQueueChange(ctx, newCR, queue, lms, c) + err = mgr.handlePullQueueChange(ctx, newCR, queue, os, c) assert.NotNil(t, err) // inputs.conf @@ -2305,7 +2305,7 @@ func TestHandlePullQueueChange(t *testing.T) { // Negative test case: failure in updating remote queue stanza mgr = newTestPullQueuePipelineManager(mockHTTPClient) - err = mgr.handlePullQueueChange(ctx, newCR, queue, lms, c) + err = mgr.handlePullQueueChange(ctx, newCR, queue, os, c) assert.NotNil(t, err) // default-mode.conf @@ -2333,7 +2333,7 @@ func TestHandlePullQueueChange(t *testing.T) { mgr = newTestPullQueuePipelineManager(mockHTTPClient) - err = mgr.handlePullQueueChange(ctx, newCR, queue, lms, c) + err = mgr.handlePullQueueChange(ctx, newCR, queue, os, c) assert.Nil(t, err) } diff --git a/pkg/splunk/enterprise/ingestorcluster.go b/pkg/splunk/enterprise/ingestorcluster.go index 299aa8d0c..17cd14a44 100644 --- a/pkg/splunk/enterprise/ingestorcluster.go +++ b/pkg/splunk/enterprise/ingestorcluster.go @@ -235,26 +235,26 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr } // Large Message Store - lms := enterpriseApi.LargeMessageStore{} - if cr.Spec.LargeMessageStoreRef.Name != "" { + os := enterpriseApi.ObjectStorage{} + if cr.Spec.ObjectStorageRef.Name != "" { ns := cr.GetNamespace() - if cr.Spec.LargeMessageStoreRef.Namespace != "" { - ns = cr.Spec.LargeMessageStoreRef.Namespace + if cr.Spec.ObjectStorageRef.Namespace != "" { + ns = cr.Spec.ObjectStorageRef.Namespace } err = client.Get(context.Background(), types.NamespacedName{ - Name: cr.Spec.LargeMessageStoreRef.Name, + Name: cr.Spec.ObjectStorageRef.Name, Namespace: ns, - }, &lms) + }, &os) if err != nil { return result, err } } // Can not override original queue spec due to comparison in the later code - lmsCopy := lms - if lmsCopy.Spec.Provider == "s3" { - if lmsCopy.Spec.S3.Endpoint == "" { - lmsCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queue.Spec.SQS.Region) + osCopy := os + if osCopy.Spec.Provider == "s3" { + if osCopy.Spec.S3.Endpoint == "" { + osCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queue.Spec.SQS.Region) } } @@ -262,7 +262,7 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr if !reflect.DeepEqual(cr.Status.Queue, queue.Spec) { mgr := newIngestorClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) - err = mgr.handlePushQueueChange(ctx, cr, queueCopy, lmsCopy, client) + err = mgr.handlePushQueueChange(ctx, cr, queueCopy, osCopy, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIngestorCluster", fmt.Sprintf("Failed to update conf file for Queue/Pipeline config change after pod creation: %s", err.Error())) scopedLog.Error(err, "Failed to update conf file for Queue/Pipeline config change after pod creation") @@ -343,7 +343,7 @@ func getIngestorStatefulSet(ctx context.Context, client splcommon.ControllerClie } // Checks if only Queue or Pipeline config changed, and updates the conf file if so -func (mgr *ingestorClusterPodManager) handlePushQueueChange(ctx context.Context, newCR *enterpriseApi.IngestorCluster, queue enterpriseApi.Queue, lms enterpriseApi.LargeMessageStore, k8s client.Client) error { +func (mgr *ingestorClusterPodManager) handlePushQueueChange(ctx context.Context, newCR *enterpriseApi.IngestorCluster, queue enterpriseApi.Queue, os enterpriseApi.ObjectStorage, k8s client.Client) error { reqLogger := log.FromContext(ctx) scopedLog := reqLogger.WithName("handlePushQueueChange").WithValues("name", newCR.GetName(), "namespace", newCR.GetNamespace()) @@ -370,7 +370,7 @@ func (mgr *ingestorClusterPodManager) handlePushQueueChange(ctx context.Context, afterDelete = true } - queueChangedFields, pipelineChangedFields := getChangedQueueFieldsForIngestor(&queue, &lms, newCR, afterDelete) + queueChangedFields, pipelineChangedFields := getChangedQueueFieldsForIngestor(&queue, &os, newCR, afterDelete) for _, pbVal := range queueChangedFields { if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name), [][]string{pbVal}); err != nil { @@ -390,21 +390,20 @@ func (mgr *ingestorClusterPodManager) handlePushQueueChange(ctx context.Context, } // getChangedQueueFieldsForIngestor returns a list of changed queue and pipeline fields for ingestor pods -func getChangedQueueFieldsForIngestor(queue *enterpriseApi.Queue, lms *enterpriseApi.LargeMessageStore, queueIngestorStatus *enterpriseApi.IngestorCluster, afterDelete bool) (queueChangedFields, pipelineChangedFields [][]string) { - oldPB := queueIngestorStatus.Status.Queue - if oldPB == nil { - oldPB = &enterpriseApi.QueueSpec{} +func getChangedQueueFieldsForIngestor(queue *enterpriseApi.Queue, os *enterpriseApi.ObjectStorage, queueIngestorStatus *enterpriseApi.IngestorCluster, afterDelete bool) (queueChangedFields, pipelineChangedFields [][]string) { + oldQueue := queueIngestorStatus.Status.Queue + if oldQueue == nil { + oldQueue = &enterpriseApi.QueueSpec{} } - newPB := &queue.Spec + newQueue := &queue.Spec - oldLMS := queueIngestorStatus.Status.LargeMessageStore - if oldLMS == nil { - oldLMS = &enterpriseApi.LargeMessageStoreSpec{} + oldOS := queueIngestorStatus.Status.ObjectStorage + if oldOS == nil { + oldOS = &enterpriseApi.ObjectStorageSpec{} } - newLMS := &lms.Spec - + newOS := &os.Spec // Push changed queue fields - queueChangedFields = pushQueueChanged(oldPB, newPB, oldLMS, newLMS, afterDelete) + queueChangedFields = pushQueueChanged(oldQueue, newQueue, oldOS, newOS, afterDelete) // Always changed pipeline fields pipelineChangedFields = pipelineConfig(false) @@ -443,14 +442,14 @@ func pipelineConfig(isIndexer bool) (output [][]string) { return output } -func pushQueueChanged(oldQueue, newQueue *enterpriseApi.QueueSpec, oldLMS, newLMS *enterpriseApi.LargeMessageStoreSpec, afterDelete bool) (output [][]string) { +func pushQueueChanged(oldQueue, newQueue *enterpriseApi.QueueSpec, oldOS, newOS *enterpriseApi.ObjectStorageSpec, afterDelete bool) (output [][]string) { queueProvider := "" if newQueue.Provider == "sqs" { queueProvider = "sqs_smartbus" } - lmsProvider := "" - if newLMS.Provider == "s3" { - lmsProvider = "sqs_smartbus" + osProvider := "" + if newOS.Provider == "s3" { + osProvider = "sqs_smartbus" } if oldQueue.Provider != newQueue.Provider || afterDelete { @@ -462,11 +461,11 @@ func pushQueueChanged(oldQueue, newQueue *enterpriseApi.QueueSpec, oldLMS, newLM if oldQueue.SQS.Endpoint != newQueue.SQS.Endpoint || afterDelete { output = append(output, []string{fmt.Sprintf("remote_queue.%s.endpoint", queueProvider), newQueue.SQS.Endpoint}) } - if oldLMS.S3.Endpoint != newLMS.S3.Endpoint || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", lmsProvider), newLMS.S3.Endpoint}) + if oldOS.S3.Endpoint != newOS.S3.Endpoint || afterDelete { + output = append(output, []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", osProvider), newOS.S3.Endpoint}) } - if oldLMS.S3.Path != newLMS.S3.Path || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", lmsProvider), newLMS.S3.Path}) + if oldOS.S3.Path != newOS.S3.Path || afterDelete { + output = append(output, []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", osProvider), newOS.S3.Path}) } if oldQueue.SQS.DLQ != newQueue.SQS.DLQ || afterDelete { output = append(output, []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", queueProvider), newQueue.SQS.DLQ}) diff --git a/pkg/splunk/enterprise/ingestorcluster_test.go b/pkg/splunk/enterprise/ingestorcluster_test.go index 424806846..7bf69ac84 100644 --- a/pkg/splunk/enterprise/ingestorcluster_test.go +++ b/pkg/splunk/enterprise/ingestorcluster_test.go @@ -86,16 +86,16 @@ func TestApplyIngestorCluster(t *testing.T) { } c.Create(ctx, queue) - lms := enterpriseApi.LargeMessageStore{ + os := enterpriseApi.ObjectStorage{ TypeMeta: metav1.TypeMeta{ - Kind: "LargeMessageStore", + Kind: "ObjectStorage", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "lms", + Name: "os", Namespace: "test", }, - Spec: enterpriseApi.LargeMessageStoreSpec{ + Spec: enterpriseApi.ObjectStorageSpec{ Provider: "s3", S3: enterpriseApi.S3Spec{ Endpoint: "https://s3.us-west-2.amazonaws.com", @@ -103,7 +103,7 @@ func TestApplyIngestorCluster(t *testing.T) { }, }, } - c.Create(ctx, &lms) + c.Create(ctx, &os) cr := &enterpriseApi.IngestorCluster{ TypeMeta: metav1.TypeMeta{ @@ -123,9 +123,9 @@ func TestApplyIngestorCluster(t *testing.T) { Name: queue.Name, Namespace: queue.Namespace, }, - LargeMessageStoreRef: corev1.ObjectReference{ - Name: lms.Name, - Namespace: lms.Namespace, + ObjectStorageRef: corev1.ObjectReference{ + Name: os.Name, + Namespace: os.Namespace, }, }, } @@ -287,8 +287,8 @@ func TestApplyIngestorCluster(t *testing.T) { {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.Region}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), os.Spec.S3.Path}, {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, @@ -438,15 +438,15 @@ func TestGetChangedQueueFieldsForIngestor(t *testing.T) { }, } - lms := enterpriseApi.LargeMessageStore{ + os := enterpriseApi.ObjectStorage{ TypeMeta: metav1.TypeMeta{ - Kind: "LargeMessageStore", + Kind: "ObjectStorage", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "lms", + Name: "os", }, - Spec: enterpriseApi.LargeMessageStoreSpec{ + Spec: enterpriseApi.ObjectStorageSpec{ Provider: "s3", S3: enterpriseApi.S3Spec{ Endpoint: "https://s3.us-west-2.amazonaws.com", @@ -460,22 +460,22 @@ func TestGetChangedQueueFieldsForIngestor(t *testing.T) { QueueRef: corev1.ObjectReference{ Name: queue.Name, }, - LargeMessageStoreRef: corev1.ObjectReference{ - Name: lms.Name, + ObjectStorageRef: corev1.ObjectReference{ + Name: os.Name, }, }, Status: enterpriseApi.IngestorClusterStatus{}, } - queueChangedFields, pipelineChangedFields := getChangedQueueFieldsForIngestor(&queue, &lms, newCR, false) + queueChangedFields, pipelineChangedFields := getChangedQueueFieldsForIngestor(&queue, &os, newCR, false) assert.Equal(t, 10, len(queueChangedFields)) assert.Equal(t, [][]string{ {"remote_queue.type", provider}, {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.Region}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), os.Spec.S3.Path}, {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, @@ -517,15 +517,15 @@ func TestHandlePushQueueChange(t *testing.T) { }, } - lms := enterpriseApi.LargeMessageStore{ + os := enterpriseApi.ObjectStorage{ TypeMeta: metav1.TypeMeta{ - Kind: "LargeMessageStore", + Kind: "ObjectStorage", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "lms", + Name: "os", }, - Spec: enterpriseApi.LargeMessageStoreSpec{ + Spec: enterpriseApi.ObjectStorageSpec{ Provider: "s3", S3: enterpriseApi.S3Spec{ Endpoint: "https://s3.us-west-2.amazonaws.com", @@ -546,15 +546,15 @@ func TestHandlePushQueueChange(t *testing.T) { QueueRef: corev1.ObjectReference{ Name: queue.Name, }, - LargeMessageStoreRef: corev1.ObjectReference{ - Name: lms.Name, + ObjectStorageRef: corev1.ObjectReference{ + Name: os.Name, }, }, Status: enterpriseApi.IngestorClusterStatus{ Replicas: 3, ReadyReplicas: 3, Queue: &enterpriseApi.QueueSpec{}, - LargeMessageStore: &enterpriseApi.LargeMessageStoreSpec{}, + ObjectStorage: &enterpriseApi.ObjectStorageSpec{}, }, } @@ -618,7 +618,7 @@ func TestHandlePushQueueChange(t *testing.T) { // Negative test case: secret not found mgr := &ingestorClusterPodManager{} - err := mgr.handlePushQueueChange(ctx, newCR, queue, lms, c) + err := mgr.handlePushQueueChange(ctx, newCR, queue, os, c) assert.NotNil(t, err) // Mock secret @@ -629,7 +629,7 @@ func TestHandlePushQueueChange(t *testing.T) { // Negative test case: failure in creating remote queue stanza mgr = newTestPushQueuePipelineManager(mockHTTPClient) - err = mgr.handlePushQueueChange(ctx, newCR, queue, lms, c) + err = mgr.handlePushQueueChange(ctx, newCR, queue, os, c) assert.NotNil(t, err) // outputs.conf @@ -637,8 +637,8 @@ func TestHandlePushQueueChange(t *testing.T) { {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.Region}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), os.Spec.S3.Path}, {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, {fmt.Sprintf("remote_queue.max_count.%s.max_retries_per_part", provider), "4"}, {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, @@ -651,7 +651,7 @@ func TestHandlePushQueueChange(t *testing.T) { // Negative test case: failure in creating remote queue stanza mgr = newTestPushQueuePipelineManager(mockHTTPClient) - err = mgr.handlePushQueueChange(ctx, newCR, queue, lms, c) + err = mgr.handlePushQueueChange(ctx, newCR, queue, os, c) assert.NotNil(t, err) // default-mode.conf @@ -680,7 +680,7 @@ func TestHandlePushQueueChange(t *testing.T) { mgr = newTestPushQueuePipelineManager(mockHTTPClient) - err = mgr.handlePushQueueChange(ctx, newCR, queue, lms, c) + err = mgr.handlePushQueueChange(ctx, newCR, queue, os, c) assert.Nil(t, err) } diff --git a/pkg/splunk/enterprise/largemessagestore.go b/pkg/splunk/enterprise/objectstorage.go similarity index 89% rename from pkg/splunk/enterprise/largemessagestore.go rename to pkg/splunk/enterprise/objectstorage.go index 8e6ff93f5..4db3dcaee 100644 --- a/pkg/splunk/enterprise/largemessagestore.go +++ b/pkg/splunk/enterprise/objectstorage.go @@ -27,8 +27,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" ) -// ApplyLargeMessageStore reconciles the state of an IngestorCluster custom resource -func ApplyLargeMessageStore(ctx context.Context, client client.Client, cr *enterpriseApi.LargeMessageStore) (reconcile.Result, error) { +// ApplyObjectStorage reconciles the state of an IngestorCluster custom resource +func ApplyObjectStorage(ctx context.Context, client client.Client, cr *enterpriseApi.ObjectStorage) (reconcile.Result, error) { var err error // Unless modified, reconcile for this object will be requeued after 5 seconds @@ -44,7 +44,7 @@ func ApplyLargeMessageStore(ctx context.Context, client client.Client, cr *enter eventPublisher, _ := newK8EventPublisher(client, cr) ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher) - cr.Kind = "LargeMessageStore" + cr.Kind = "ObjectStorage" // Initialize phase cr.Status.Phase = enterpriseApi.PhaseError diff --git a/pkg/splunk/enterprise/largemessagestore_test.go b/pkg/splunk/enterprise/objectstorage_test.go similarity index 82% rename from pkg/splunk/enterprise/largemessagestore_test.go rename to pkg/splunk/enterprise/objectstorage_test.go index 0f627383c..a3511af69 100644 --- a/pkg/splunk/enterprise/largemessagestore_test.go +++ b/pkg/splunk/enterprise/objectstorage_test.go @@ -43,7 +43,7 @@ func init() { } } -func TestApplyLargeMessageStore(t *testing.T) { +func TestApplyObjectStorage(t *testing.T) { os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") ctx := context.TODO() @@ -55,16 +55,16 @@ func TestApplyLargeMessageStore(t *testing.T) { c := fake.NewClientBuilder().WithScheme(scheme).Build() // Object definitions - lms := &enterpriseApi.LargeMessageStore{ + os := &enterpriseApi.ObjectStorage{ TypeMeta: metav1.TypeMeta{ - Kind: "LargeMessageStore", + Kind: "ObjectStorage", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "lms", + Name: "os", Namespace: "test", }, - Spec: enterpriseApi.LargeMessageStoreSpec{ + Spec: enterpriseApi.ObjectStorageSpec{ Provider: "s3", S3: enterpriseApi.S3Spec{ Endpoint: "https://s3.us-west-2.amazonaws.com", @@ -72,12 +72,12 @@ func TestApplyLargeMessageStore(t *testing.T) { }, }, } - c.Create(ctx, lms) + c.Create(ctx, os) - // ApplyLargeMessageStore - result, err := ApplyLargeMessageStore(ctx, c, lms) + // ApplyObjectStorage + result, err := ApplyObjectStorage(ctx, c, os) assert.NoError(t, err) assert.True(t, result.Requeue) - assert.NotEqual(t, enterpriseApi.PhaseError, lms.Status.Phase) - assert.Equal(t, enterpriseApi.PhaseReady, lms.Status.Phase) + assert.NotEqual(t, enterpriseApi.PhaseError, os.Status.Phase) + assert.Equal(t, enterpriseApi.PhaseReady, os.Status.Phase) } diff --git a/pkg/splunk/enterprise/types.go b/pkg/splunk/enterprise/types.go index b7b691415..fe96430e4 100644 --- a/pkg/splunk/enterprise/types.go +++ b/pkg/splunk/enterprise/types.go @@ -66,8 +66,8 @@ const ( // SplunkQueue is the queue instance SplunkQueue InstanceType = "queue" - // SplunkLargeMessageStore is the large message store instance - SplunkLargeMessageStore InstanceType = "large-message-store" + // SplunkObjectStorage is the large message store instance + SplunkObjectStorage InstanceType = "object-storage" // SplunkDeployer is an instance that distributes baseline configurations and apps to search head cluster members SplunkDeployer InstanceType = "deployer" @@ -299,8 +299,8 @@ func KindToInstanceString(kind string) string { return SplunkIngestor.ToString() case "Queue": return SplunkQueue.ToString() - case "LargeMessageStore": - return SplunkLargeMessageStore.ToString() + case "ObjectStorage": + return SplunkObjectStorage.ToString() case "LicenseManager": return SplunkLicenseManager.ToString() case "LicenseMaster": diff --git a/pkg/splunk/enterprise/util.go b/pkg/splunk/enterprise/util.go index 01b304c12..afafa6ede 100644 --- a/pkg/splunk/enterprise/util.go +++ b/pkg/splunk/enterprise/util.go @@ -2305,19 +2305,19 @@ func fetchCurrentCRWithStatusUpdate(ctx context.Context, client splcommon.Contro origCR.(*enterpriseApi.Queue).Status.DeepCopyInto(&latestQueueCR.Status) return latestQueueCR, nil - case "LargeMessageStore": - latestLmsCR := &enterpriseApi.LargeMessageStore{} - err = client.Get(ctx, namespacedName, latestLmsCR) + case "ObjectStorage": + latestOsCR := &enterpriseApi.ObjectStorage{} + err = client.Get(ctx, namespacedName, latestOsCR) if err != nil { return nil, err } - origCR.(*enterpriseApi.LargeMessageStore).Status.Message = "" + origCR.(*enterpriseApi.ObjectStorage).Status.Message = "" if (crError != nil) && ((*crError) != nil) { - origCR.(*enterpriseApi.LargeMessageStore).Status.Message = (*crError).Error() + origCR.(*enterpriseApi.ObjectStorage).Status.Message = (*crError).Error() } - origCR.(*enterpriseApi.LargeMessageStore).Status.DeepCopyInto(&latestLmsCR.Status) - return latestLmsCR, nil + origCR.(*enterpriseApi.ObjectStorage).Status.DeepCopyInto(&latestOsCR.Status) + return latestOsCR, nil case "LicenseMaster": latestLmCR := &enterpriseApiV3.LicenseMaster{} diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go index 687473bc0..e2e27d268 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go @@ -48,7 +48,7 @@ var ( DLQ: "test-dead-letter-queue", }, } - lms = enterpriseApi.LargeMessageStoreSpec{ + objectStorage = enterpriseApi.ObjectStorageSpec{ Provider: "s3", S3: enterpriseApi.S3Spec{ Endpoint: "https://s3.us-west-2.amazonaws.com", diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go index a27269889..41beae4bc 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go @@ -84,14 +84,14 @@ var _ = Describe("indingsep test", func() { q, err := deployment.DeployQueue(ctx, "queue", queue) Expect(err).To(Succeed(), "Unable to deploy Queue") - // Deploy LargeMessageStore - testcaseEnvInst.Log.Info("Deploy LargeMessageStore") - lm, err := deployment.DeployLargeMessageStore(ctx, "lms", lms) - Expect(err).To(Succeed(), "Unable to deploy LargeMessageStore") + // Deploy ObjectStorage + testcaseEnvInst.Log.Info("Deploy ObjectStorage") + objStorage, err := deployment.DeployObjectStorage(ctx, "os", objectStorage) + Expect(err).To(Succeed(), "Unable to deploy ObjectStorage") // Deploy Ingestor Cluster testcaseEnvInst.Log.Info("Deploy Ingestor Cluster") - _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) + _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster") // Deploy Cluster Manager @@ -101,7 +101,7 @@ var _ = Describe("indingsep test", func() { // Deploy Indexer Cluster testcaseEnvInst.Log.Info("Deploy Indexer Cluster") - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Indexer Cluster") // Ensure that Ingestor Cluster is in Ready phase @@ -137,12 +137,12 @@ var _ = Describe("indingsep test", func() { err = deployment.DeleteCR(ctx, queue) Expect(err).To(Succeed(), "Unable to delete Queue", "Queue Name", queue) - // Delete the LargeMessageStore - lm = &enterpriseApi.LargeMessageStore{} - err = deployment.GetInstance(ctx, "lms", lm) - Expect(err).To(Succeed(), "Unable to get LargeMessageStore instance", "LargeMessageStore Name", lm) - err = deployment.DeleteCR(ctx, lm) - Expect(err).To(Succeed(), "Unable to delete LargeMessageStore", "LargeMessageStore Name", lm) + // Delete the ObjectStorage + objStorage = &enterpriseApi.ObjectStorage{} + err = deployment.GetInstance(ctx, "os", objStorage) + Expect(err).To(Succeed(), "Unable to get ObjectStorage instance", "ObjectStorage Name", objStorage) + err = deployment.DeleteCR(ctx, objStorage) + Expect(err).To(Succeed(), "Unable to delete ObjectStorage", "ObjectStorage Name", objStorage) }) }) @@ -157,10 +157,10 @@ var _ = Describe("indingsep test", func() { q, err := deployment.DeployQueue(ctx, "queue", queue) Expect(err).To(Succeed(), "Unable to deploy Queue") - // Deploy LargeMessageStore - testcaseEnvInst.Log.Info("Deploy LargeMessageStore") - lm, err := deployment.DeployLargeMessageStore(ctx, "lms", lms) - Expect(err).To(Succeed(), "Unable to deploy LargeMessageStore") + // Deploy ObjectStorage + testcaseEnvInst.Log.Info("Deploy ObjectStorage") + objStorage, err := deployment.DeployObjectStorage(ctx, "os", objectStorage) + Expect(err).To(Succeed(), "Unable to deploy ObjectStorage") // Upload apps to S3 testcaseEnvInst.Log.Info("Upload apps to S3") @@ -206,7 +206,7 @@ var _ = Describe("indingsep test", func() { }, }, QueueRef: v1.ObjectReference{Name: q.Name}, - LargeMessageStoreRef: v1.ObjectReference{Name: lm.Name}, + ObjectStorageRef: v1.ObjectReference{Name: objStorage.Name}, Replicas: 3, AppFrameworkConfig: appFrameworkSpec, }, @@ -261,14 +261,14 @@ var _ = Describe("indingsep test", func() { q, err := deployment.DeployQueue(ctx, "queue", queue) Expect(err).To(Succeed(), "Unable to deploy Queue") - // Deploy LargeMessageStore - testcaseEnvInst.Log.Info("Deploy LargeMessageStore") - lm, err := deployment.DeployLargeMessageStore(ctx, "lms", lms) - Expect(err).To(Succeed(), "Unable to deploy LargeMessageStore") + // Deploy ObjectStorage + testcaseEnvInst.Log.Info("Deploy ObjectStorage") + objStorage, err := deployment.DeployObjectStorage(ctx, "os", objectStorage) + Expect(err).To(Succeed(), "Unable to deploy ObjectStorage") // Deploy Ingestor Cluster testcaseEnvInst.Log.Info("Deploy Ingestor Cluster") - _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) + _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster") // Deploy Cluster Manager @@ -278,7 +278,7 @@ var _ = Describe("indingsep test", func() { // Deploy Indexer Cluster testcaseEnvInst.Log.Info("Deploy Indexer Cluster") - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Indexer Cluster") // Ensure that Ingestor Cluster is in Ready phase @@ -368,14 +368,14 @@ var _ = Describe("indingsep test", func() { q, err := deployment.DeployQueue(ctx, "queue", queue) Expect(err).To(Succeed(), "Unable to deploy Queue") - // Deploy LargeMessageStore - testcaseEnvInst.Log.Info("Deploy LargeMessageStore") - lm, err := deployment.DeployLargeMessageStore(ctx, "lms", lms) - Expect(err).To(Succeed(), "Unable to deploy LargeMessageStore") + // Deploy ObjectStorage + testcaseEnvInst.Log.Info("Deploy ObjectStorage") + objStorage, err := deployment.DeployObjectStorage(ctx, "os", objectStorage) + Expect(err).To(Succeed(), "Unable to deploy ObjectStorage") // Deploy Ingestor Cluster testcaseEnvInst.Log.Info("Deploy Ingestor Cluster") - _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) + _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster") // Deploy Cluster Manager @@ -385,7 +385,7 @@ var _ = Describe("indingsep test", func() { // Deploy Indexer Cluster testcaseEnvInst.Log.Info("Deploy Indexer Cluster") - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Indexer Cluster") // Ensure that Ingestor Cluster is in Ready phase diff --git a/test/testenv/deployment.go b/test/testenv/deployment.go index 00d8f1e95..781e5b6f0 100644 --- a/test/testenv/deployment.go +++ b/test/testenv/deployment.go @@ -431,9 +431,9 @@ func (d *Deployment) DeployClusterMasterWithSmartStoreIndexes(ctx context.Contex } // DeployIndexerCluster deploys the indexer cluster -func (d *Deployment) DeployIndexerCluster(ctx context.Context, name, LicenseManagerName string, count int, clusterManagerRef string, ansibleConfig string, queue, lms corev1.ObjectReference, serviceAccountName string) (*enterpriseApi.IndexerCluster, error) { +func (d *Deployment) DeployIndexerCluster(ctx context.Context, name, LicenseManagerName string, count int, clusterManagerRef string, ansibleConfig string, queue, os corev1.ObjectReference, serviceAccountName string) (*enterpriseApi.IndexerCluster, error) { d.testenv.Log.Info("Deploying indexer cluster", "name", name, "CM", clusterManagerRef) - indexer := newIndexerCluster(name, d.testenv.namespace, LicenseManagerName, count, clusterManagerRef, ansibleConfig, d.testenv.splunkImage, queue, lms, serviceAccountName) + indexer := newIndexerCluster(name, d.testenv.namespace, LicenseManagerName, count, clusterManagerRef, ansibleConfig, d.testenv.splunkImage, queue, os, serviceAccountName) pdata, _ := json.Marshal(indexer) d.testenv.Log.Info("indexer cluster spec", "cr", string(pdata)) deployed, err := d.deployCR(ctx, name, indexer) @@ -445,10 +445,10 @@ func (d *Deployment) DeployIndexerCluster(ctx context.Context, name, LicenseMana } // DeployIngestorCluster deploys the ingestor cluster -func (d *Deployment) DeployIngestorCluster(ctx context.Context, name string, count int, queue, lms corev1.ObjectReference, serviceAccountName string) (*enterpriseApi.IngestorCluster, error) { +func (d *Deployment) DeployIngestorCluster(ctx context.Context, name string, count int, queue, os corev1.ObjectReference, serviceAccountName string) (*enterpriseApi.IngestorCluster, error) { d.testenv.Log.Info("Deploying ingestor cluster", "name", name) - ingestor := newIngestorCluster(name, d.testenv.namespace, count, d.testenv.splunkImage, queue, lms, serviceAccountName) + ingestor := newIngestorCluster(name, d.testenv.namespace, count, d.testenv.splunkImage, queue, os, serviceAccountName) pdata, _ := json.Marshal(ingestor) d.testenv.Log.Info("ingestor cluster spec", "cr", string(pdata)) @@ -476,20 +476,20 @@ func (d *Deployment) DeployQueue(ctx context.Context, name string, queue enterpr return deployed.(*enterpriseApi.Queue), err } -// DeployLargeMessageStore deploys the large message store -func (d *Deployment) DeployLargeMessageStore(ctx context.Context, name string, lms enterpriseApi.LargeMessageStoreSpec) (*enterpriseApi.LargeMessageStore, error) { - d.testenv.Log.Info("Deploying large message store", "name", name) +// DeployObjectStorage deploys the object storage +func (d *Deployment) DeployObjectStorage(ctx context.Context, name string, objStorage enterpriseApi.ObjectStorageSpec) (*enterpriseApi.ObjectStorage, error) { + d.testenv.Log.Info("Deploying object storage", "name", name) - lmsCfg := newLargeMessageStore(name, d.testenv.namespace, lms) - pdata, _ := json.Marshal(lmsCfg) + objStorageCfg := newObjectStorage(name, d.testenv.namespace, objStorage) + pdata, _ := json.Marshal(objStorageCfg) - d.testenv.Log.Info("large message store spec", "cr", string(pdata)) - deployed, err := d.deployCR(ctx, name, lmsCfg) + d.testenv.Log.Info("object storage spec", "cr", string(pdata)) + deployed, err := d.deployCR(ctx, name, objStorageCfg) if err != nil { return nil, err } - return deployed.(*enterpriseApi.LargeMessageStore), err + return deployed.(*enterpriseApi.ObjectStorage), err } // DeployIngestorClusterWithAdditionalConfiguration deploys the ingestor cluster with additional configuration @@ -657,13 +657,13 @@ func (d *Deployment) UpdateCR(ctx context.Context, cr client.Object) error { ucr := cr.(*enterpriseApi.Queue) current.Spec = ucr.Spec cobject = current - case "LargeMessageStore": - current := &enterpriseApi.LargeMessageStore{} + case "ObjectStorage": + current := &enterpriseApi.ObjectStorage{} err = d.testenv.GetKubeClient().Get(ctx, namespacedName, current) if err != nil { return err } - ucr := cr.(*enterpriseApi.LargeMessageStore) + ucr := cr.(*enterpriseApi.ObjectStorage) current.Spec = ucr.Spec cobject = current case "ClusterMaster": diff --git a/test/testenv/util.go b/test/testenv/util.go index f71cc31f3..d9c6d5807 100644 --- a/test/testenv/util.go +++ b/test/testenv/util.go @@ -359,7 +359,7 @@ func newClusterMasterWithGivenIndexes(name, ns, licenseManagerName, ansibleConfi } // newIndexerCluster creates and initialize the CR for IndexerCluster Kind -func newIndexerCluster(name, ns, licenseManagerName string, replicas int, clusterManagerRef, ansibleConfig, splunkImage string, queue, lms corev1.ObjectReference, serviceAccountName string) *enterpriseApi.IndexerCluster { +func newIndexerCluster(name, ns, licenseManagerName string, replicas int, clusterManagerRef, ansibleConfig, splunkImage string, queue, os corev1.ObjectReference, serviceAccountName string) *enterpriseApi.IndexerCluster { licenseMasterRef, licenseManagerRef := swapLicenseManager(name, licenseManagerName) clusterMasterRef, clusterManagerRef := swapClusterManager(name, clusterManagerRef) @@ -398,7 +398,7 @@ func newIndexerCluster(name, ns, licenseManagerName string, replicas int, cluste }, Replicas: int32(replicas), QueueRef: queue, - LargeMessageStoreRef: lms, + ObjectStorageRef: os, }, } @@ -406,7 +406,7 @@ func newIndexerCluster(name, ns, licenseManagerName string, replicas int, cluste } // newIngestorCluster creates and initialize the CR for IngestorCluster Kind -func newIngestorCluster(name, ns string, replicas int, splunkImage string, queue, lms corev1.ObjectReference, serviceAccountName string) *enterpriseApi.IngestorCluster { +func newIngestorCluster(name, ns string, replicas int, splunkImage string, queue, os corev1.ObjectReference, serviceAccountName string) *enterpriseApi.IngestorCluster { return &enterpriseApi.IngestorCluster{ TypeMeta: metav1.TypeMeta{ Kind: "IngestorCluster", @@ -428,7 +428,7 @@ func newIngestorCluster(name, ns string, replicas int, splunkImage string, queue }, Replicas: int32(replicas), QueueRef: queue, - LargeMessageStoreRef: lms, + ObjectStorageRef: os, }, } } @@ -447,17 +447,17 @@ func newQueue(name, ns string, queue enterpriseApi.QueueSpec) *enterpriseApi.Que } } -// newLargeMessageStore creates and initializes the CR for LargeMessageStore Kind -func newLargeMessageStore(name, ns string, lms enterpriseApi.LargeMessageStoreSpec) *enterpriseApi.LargeMessageStore { - return &enterpriseApi.LargeMessageStore{ +// newObjectStorage creates and initializes the CR for ObjectStorage Kind +func newObjectStorage(name, ns string, objStorage enterpriseApi.ObjectStorageSpec) *enterpriseApi.ObjectStorage { + return &enterpriseApi.ObjectStorage{ TypeMeta: metav1.TypeMeta{ - Kind: "LargeMessageStore", + Kind: "ObjectStorage", }, ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: ns, }, - Spec: lms, + Spec: objStorage, } } From 607632f2c62ea57d9f2e682e4a10c06151135a40 Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Fri, 19 Dec 2025 10:02:24 +0100 Subject: [PATCH 7/7] CSPL-4358 Making region authRegion and optional, simplifying endpoint --- api/v4/objectstorage_types.go | 2 +- api/v4/queue_types.go | 8 ++-- ...enterprise.splunk.com_indexerclusters.yaml | 13 +++--- ...nterprise.splunk.com_ingestorclusters.yaml | 13 +++--- .../enterprise.splunk.com_objectstorages.yaml | 2 +- .../bases/enterprise.splunk.com_queues.yaml | 11 +++-- .../ingestorcluster_controller_test.go | 16 +++---- internal/controller/queue_controller_test.go | 24 +++++----- pkg/splunk/enterprise/indexercluster.go | 26 +++++------ pkg/splunk/enterprise/indexercluster_test.go | 42 +++++++++--------- pkg/splunk/enterprise/ingestorcluster.go | 16 +++---- pkg/splunk/enterprise/ingestorcluster_test.go | 44 +++++++++---------- pkg/splunk/enterprise/queue_test.go | 8 ++-- ...dex_and_ingestion_separation_suite_test.go | 16 +++---- 14 files changed, 119 insertions(+), 122 deletions(-) diff --git a/api/v4/objectstorage_types.go b/api/v4/objectstorage_types.go index 80fcd45cf..9e95392ce 100644 --- a/api/v4/objectstorage_types.go +++ b/api/v4/objectstorage_types.go @@ -43,7 +43,7 @@ type ObjectStorageSpec struct { type S3Spec struct { // +optional - // +kubebuilder:validation:Pattern=`^https://s3(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$` + // +kubebuilder:validation:Pattern=`^https?://[^\s/$.?#].[^\s]*$` // S3-compatible Service endpoint Endpoint string `json:"endpoint"` diff --git a/api/v4/queue_types.go b/api/v4/queue_types.go index 06703ac95..9828f7301 100644 --- a/api/v4/queue_types.go +++ b/api/v4/queue_types.go @@ -47,10 +47,10 @@ type SQSSpec struct { // Name of the queue Name string `json:"name"` - // +kubebuilder:validation:Required + // +optional // +kubebuilder:validation:Pattern=`^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$` - // Region of the resources - Region string `json:"region"` + // Auth Region of the resources + AuthRegion string `json:"authRegion"` // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 @@ -58,7 +58,7 @@ type SQSSpec struct { DLQ string `json:"dlq"` // +optional - // +kubebuilder:validation:Pattern=`^https://sqs(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$` + // +kubebuilder:validation:Pattern=`^https?://[^\s/$.?#].[^\s]*$` // Amazon SQS Service endpoint Endpoint string `json:"endpoint"` } diff --git a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml index a9fc2d811..59faab055 100644 --- a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml @@ -8396,7 +8396,7 @@ spec: properties: endpoint: description: S3-compatible Service endpoint - pattern: ^https://s3(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$ + pattern: ^https?://[^\s/$.?#].[^\s]*$ type: string path: description: S3 bucket path @@ -8464,26 +8464,25 @@ spec: sqs: description: sqs specific inputs properties: + authRegion: + description: Auth Region of the resources + pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ + type: string dlq: description: Name of the dead letter queue resource minLength: 1 type: string endpoint: description: Amazon SQS Service endpoint - pattern: ^https://sqs(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$ + pattern: ^https?://[^\s/$.?#].[^\s]*$ type: string name: description: Name of the queue minLength: 1 type: string - region: - description: Region of the resources - pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ - type: string required: - dlq - name - - region type: object required: - provider diff --git a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml index 46a142719..7432e96b4 100644 --- a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml @@ -4607,7 +4607,7 @@ spec: properties: endpoint: description: S3-compatible Service endpoint - pattern: ^https://s3(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$ + pattern: ^https?://[^\s/$.?#].[^\s]*$ type: string path: description: S3 bucket path @@ -4645,26 +4645,25 @@ spec: sqs: description: sqs specific inputs properties: + authRegion: + description: Auth Region of the resources + pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ + type: string dlq: description: Name of the dead letter queue resource minLength: 1 type: string endpoint: description: Amazon SQS Service endpoint - pattern: ^https://sqs(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$ + pattern: ^https?://[^\s/$.?#].[^\s]*$ type: string name: description: Name of the queue minLength: 1 type: string - region: - description: Region of the resources - pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ - type: string required: - dlq - name - - region type: object required: - provider diff --git a/config/crd/bases/enterprise.splunk.com_objectstorages.yaml b/config/crd/bases/enterprise.splunk.com_objectstorages.yaml index 1456234c6..2fac45707 100644 --- a/config/crd/bases/enterprise.splunk.com_objectstorages.yaml +++ b/config/crd/bases/enterprise.splunk.com_objectstorages.yaml @@ -64,7 +64,7 @@ spec: properties: endpoint: description: S3-compatible Service endpoint - pattern: ^https://s3(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$ + pattern: ^https?://[^\s/$.?#].[^\s]*$ type: string path: description: S3 bucket path diff --git a/config/crd/bases/enterprise.splunk.com_queues.yaml b/config/crd/bases/enterprise.splunk.com_queues.yaml index 928cd34ce..2ba8d03f5 100644 --- a/config/crd/bases/enterprise.splunk.com_queues.yaml +++ b/config/crd/bases/enterprise.splunk.com_queues.yaml @@ -62,26 +62,25 @@ spec: sqs: description: sqs specific inputs properties: + authRegion: + description: Auth Region of the resources + pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ + type: string dlq: description: Name of the dead letter queue resource minLength: 1 type: string endpoint: description: Amazon SQS Service endpoint - pattern: ^https://sqs(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$ + pattern: ^https?://[^\s/$.?#].[^\s]*$ type: string name: description: Name of the queue minLength: 1 type: string - region: - description: Region of the resources - pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ - type: string required: - dlq - name - - region type: object required: - provider diff --git a/internal/controller/ingestorcluster_controller_test.go b/internal/controller/ingestorcluster_controller_test.go index d035d1037..38e7cbb4e 100644 --- a/internal/controller/ingestorcluster_controller_test.go +++ b/internal/controller/ingestorcluster_controller_test.go @@ -79,10 +79,10 @@ var _ = Describe("IngestorCluster Controller", func() { Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - Name: "smartbus-queue", - Region: "us-west-2", - DLQ: "smartbus-dlq", - Endpoint: "https://sqs.us-west-2.amazonaws.com", + Name: "smartbus-queue", + AuthRegion: "us-west-2", + DLQ: "smartbus-dlq", + Endpoint: "https://sqs.us-west-2.amazonaws.com", }, }, } @@ -127,10 +127,10 @@ var _ = Describe("IngestorCluster Controller", func() { Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - Name: "smartbus-queue", - Region: "us-west-2", - DLQ: "smartbus-dlq", - Endpoint: "https://sqs.us-west-2.amazonaws.com", + Name: "smartbus-queue", + AuthRegion: "us-west-2", + DLQ: "smartbus-dlq", + Endpoint: "https://sqs.us-west-2.amazonaws.com", }, }, } diff --git a/internal/controller/queue_controller_test.go b/internal/controller/queue_controller_test.go index 23d40ae4c..b04a5d4b3 100644 --- a/internal/controller/queue_controller_test.go +++ b/internal/controller/queue_controller_test.go @@ -73,10 +73,10 @@ var _ = Describe("Queue Controller", func() { spec := enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - Name: "smartbus-queue", - Region: "us-west-2", - DLQ: "smartbus-dlq", - Endpoint: "https://sqs.us-west-2.amazonaws.com", + Name: "smartbus-queue", + AuthRegion: "us-west-2", + DLQ: "smartbus-dlq", + Endpoint: "https://sqs.us-west-2.amazonaws.com", }, } CreateQueue("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) @@ -102,10 +102,10 @@ var _ = Describe("Queue Controller", func() { spec := enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - Name: "smartbus-queue", - Region: "us-west-2", - DLQ: "smartbus-dlq", - Endpoint: "https://sqs.us-west-2.amazonaws.com", + Name: "smartbus-queue", + AuthRegion: "us-west-2", + DLQ: "smartbus-dlq", + Endpoint: "https://sqs.us-west-2.amazonaws.com", }, } CreateQueue("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) @@ -141,10 +141,10 @@ var _ = Describe("Queue Controller", func() { spec := enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - Name: "smartbus-queue", - Region: "us-west-2", - DLQ: "smartbus-dlq", - Endpoint: "https://sqs.us-west-2.amazonaws.com", + Name: "smartbus-queue", + AuthRegion: "us-west-2", + DLQ: "smartbus-dlq", + Endpoint: "https://sqs.us-west-2.amazonaws.com", }, } bcSpec := testutils.NewQueue("test", namespace, spec) diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index f6bcd046d..60b4d5a9a 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -263,8 +263,8 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller // Can not override original queue spec due to comparison in the later code queueCopy := queue if queueCopy.Spec.Provider == "sqs" { - if queueCopy.Spec.SQS.Endpoint == "" { - queueCopy.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", queueCopy.Spec.SQS.Region) + if queueCopy.Spec.SQS.Endpoint == "" && queueCopy.Spec.SQS.AuthRegion != "" { + queueCopy.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", queueCopy.Spec.SQS.AuthRegion) } } @@ -287,8 +287,8 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller // Can not override original large message store spec due to comparison in the later code osCopy := os if osCopy.Spec.Provider == "s3" { - if osCopy.Spec.S3.Endpoint == "" { - osCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queueCopy.Spec.SQS.Region) + if osCopy.Spec.S3.Endpoint == "" && queueCopy.Spec.SQS.AuthRegion != "" { + osCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queueCopy.Spec.SQS.AuthRegion) } } @@ -586,8 +586,8 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, // Can not override original queue spec due to comparison in the later code queueCopy := queue if queueCopy.Spec.Provider == "sqs" { - if queueCopy.Spec.SQS.Endpoint == "" { - queueCopy.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", queueCopy.Spec.SQS.Region) + if queueCopy.Spec.SQS.Endpoint == "" && queueCopy.Spec.SQS.AuthRegion != "" { + queueCopy.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", queueCopy.Spec.SQS.AuthRegion) } } @@ -610,8 +610,8 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, // Can not override original queue spec due to comparison in the later code osCopy := os if osCopy.Spec.Provider == "s3" { - if osCopy.Spec.S3.Endpoint == "" { - osCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queueCopy.Spec.SQS.Region) + if osCopy.Spec.S3.Endpoint == "" && queueCopy.Spec.SQS.AuthRegion != "" { + osCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queueCopy.Spec.SQS.AuthRegion) } } @@ -1391,7 +1391,7 @@ func pullQueueChanged(oldQueue, newQueue *enterpriseApi.QueueSpec, oldOS, newOS if newQueue.Provider == "sqs" { queueProvider = "sqs_smartbus" } - osProvider := "" + osProvider := "" if newOS.Provider == "s3" { osProvider = "sqs_smartbus" } @@ -1399,13 +1399,13 @@ func pullQueueChanged(oldQueue, newQueue *enterpriseApi.QueueSpec, oldOS, newOS if oldQueue.Provider != newQueue.Provider || afterDelete { inputs = append(inputs, []string{"remote_queue.type", queueProvider}) } - if oldQueue.SQS.Region != newQueue.SQS.Region || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.auth_region", queueProvider), newQueue.SQS.Region}) + if newQueue.SQS.AuthRegion != "" &&(oldQueue.SQS.AuthRegion != newQueue.SQS.AuthRegion || afterDelete) { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.auth_region", queueProvider), newQueue.SQS.AuthRegion}) } - if oldQueue.SQS.Endpoint != newQueue.SQS.Endpoint || afterDelete { + if newQueue.SQS.Endpoint != "" && (oldQueue.SQS.Endpoint != newQueue.SQS.Endpoint || afterDelete) { inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.endpoint", queueProvider), newQueue.SQS.Endpoint}) } - if oldOS.S3.Endpoint != newOS.S3.Endpoint || afterDelete { + if newOS.S3.Endpoint != "" && (oldOS.S3.Endpoint != newOS.S3.Endpoint || afterDelete) { inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", osProvider), newOS.S3.Endpoint}) } if oldOS.S3.Path != newOS.S3.Path || afterDelete { diff --git a/pkg/splunk/enterprise/indexercluster_test.go b/pkg/splunk/enterprise/indexercluster_test.go index c2b3a8063..a74ab4acd 100644 --- a/pkg/splunk/enterprise/indexercluster_test.go +++ b/pkg/splunk/enterprise/indexercluster_test.go @@ -1355,10 +1355,10 @@ func TestGetIndexerStatefulSet(t *testing.T) { Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - Name: "test-queue", - Region: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - DLQ: "sqs-dlq-test", + Name: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", }, }, } @@ -2059,10 +2059,10 @@ func TestGetChangedQueueFieldsForIndexer(t *testing.T) { Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - Name: "test-queue", - Region: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - DLQ: "sqs-dlq-test", + Name: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", }, }, } @@ -2099,7 +2099,7 @@ func TestGetChangedQueueFieldsForIndexer(t *testing.T) { assert.Equal(t, 8, len(queueChangedFieldsInputs)) assert.Equal(t, [][]string{ {"remote_queue.type", provider}, - {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.Region}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), os.Spec.S3.Path}, @@ -2111,7 +2111,7 @@ func TestGetChangedQueueFieldsForIndexer(t *testing.T) { assert.Equal(t, 10, len(queueChangedFieldsOutputs)) assert.Equal(t, [][]string{ {"remote_queue.type", provider}, - {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.Region}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), os.Spec.S3.Path}, @@ -2148,10 +2148,10 @@ func TestHandlePullQueueChange(t *testing.T) { Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - Name: "test-queue", - Region: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - DLQ: "sqs-dlq-test", + Name: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", }, }, } @@ -2192,8 +2192,8 @@ func TestHandlePullQueueChange(t *testing.T) { }, }, Status: enterpriseApi.IndexerClusterStatus{ - ReadyReplicas: 3, - Queue: &enterpriseApi.QueueSpec{}, + ReadyReplicas: 3, + Queue: &enterpriseApi.QueueSpec{}, ObjectStorage: &enterpriseApi.ObjectStorageSpec{}, }, } @@ -2276,7 +2276,7 @@ func TestHandlePullQueueChange(t *testing.T) { // outputs.conf propertyKVList := [][]string{ - {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.Region}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), os.Spec.S3.Path}, @@ -2407,10 +2407,10 @@ func TestApplyIndexerClusterManager_Queue_Success(t *testing.T) { Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - Name: "test-queue", - Region: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - DLQ: "sqs-dlq-test", + Name: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", }, }, } diff --git a/pkg/splunk/enterprise/ingestorcluster.go b/pkg/splunk/enterprise/ingestorcluster.go index 17cd14a44..0fc94487b 100644 --- a/pkg/splunk/enterprise/ingestorcluster.go +++ b/pkg/splunk/enterprise/ingestorcluster.go @@ -229,8 +229,8 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr // Can not override original queue spec due to comparison in the later code queueCopy := queue if queueCopy.Spec.Provider == "sqs" { - if queueCopy.Spec.SQS.Endpoint == "" { - queueCopy.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", queueCopy.Spec.SQS.Region) + if queueCopy.Spec.SQS.Endpoint == "" && queueCopy.Spec.SQS.AuthRegion != "" { + queueCopy.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", queueCopy.Spec.SQS.AuthRegion) } } @@ -253,8 +253,8 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr // Can not override original queue spec due to comparison in the later code osCopy := os if osCopy.Spec.Provider == "s3" { - if osCopy.Spec.S3.Endpoint == "" { - osCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queue.Spec.SQS.Region) + if osCopy.Spec.S3.Endpoint == "" && queueCopy.Spec.SQS.AuthRegion != "" { + osCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queue.Spec.SQS.AuthRegion) } } @@ -455,13 +455,13 @@ func pushQueueChanged(oldQueue, newQueue *enterpriseApi.QueueSpec, oldOS, newOS if oldQueue.Provider != newQueue.Provider || afterDelete { output = append(output, []string{"remote_queue.type", queueProvider}) } - if oldQueue.SQS.Region != newQueue.SQS.Region || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.auth_region", queueProvider), newQueue.SQS.Region}) + if newQueue.SQS.AuthRegion != "" && (oldQueue.SQS.AuthRegion != newQueue.SQS.AuthRegion || afterDelete) { + output = append(output, []string{fmt.Sprintf("remote_queue.%s.auth_region", queueProvider), newQueue.SQS.AuthRegion}) } - if oldQueue.SQS.Endpoint != newQueue.SQS.Endpoint || afterDelete { + if newQueue.SQS.Endpoint != "" && (oldQueue.SQS.Endpoint != newQueue.SQS.Endpoint || afterDelete) { output = append(output, []string{fmt.Sprintf("remote_queue.%s.endpoint", queueProvider), newQueue.SQS.Endpoint}) } - if oldOS.S3.Endpoint != newOS.S3.Endpoint || afterDelete { + if newOS.S3.Endpoint != "" && (oldOS.S3.Endpoint != newOS.S3.Endpoint || afterDelete) { output = append(output, []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", osProvider), newOS.S3.Endpoint}) } if oldOS.S3.Path != newOS.S3.Path || afterDelete { diff --git a/pkg/splunk/enterprise/ingestorcluster_test.go b/pkg/splunk/enterprise/ingestorcluster_test.go index 7bf69ac84..fac91bbbe 100644 --- a/pkg/splunk/enterprise/ingestorcluster_test.go +++ b/pkg/splunk/enterprise/ingestorcluster_test.go @@ -77,10 +77,10 @@ func TestApplyIngestorCluster(t *testing.T) { Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - Name: "test-queue", - Region: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - DLQ: "sqs-dlq-test", + Name: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", }, }, } @@ -285,7 +285,7 @@ func TestApplyIngestorCluster(t *testing.T) { propertyKVList := [][]string{ {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, - {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.Region}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), os.Spec.S3.Path}, @@ -344,10 +344,10 @@ func TestGetIngestorStatefulSet(t *testing.T) { Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - Name: "test-queue", - Region: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - DLQ: "sqs-dlq-test", + Name: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", }, }, } @@ -430,10 +430,10 @@ func TestGetChangedQueueFieldsForIngestor(t *testing.T) { Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - Name: "test-queue", - Region: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - DLQ: "sqs-dlq-test", + Name: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", }, }, } @@ -472,7 +472,7 @@ func TestGetChangedQueueFieldsForIngestor(t *testing.T) { assert.Equal(t, 10, len(queueChangedFields)) assert.Equal(t, [][]string{ {"remote_queue.type", provider}, - {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.Region}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), os.Spec.S3.Path}, @@ -509,10 +509,10 @@ func TestHandlePushQueueChange(t *testing.T) { Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - Name: "test-queue", - Region: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - DLQ: "sqs-dlq-test", + Name: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", }, }, } @@ -551,9 +551,9 @@ func TestHandlePushQueueChange(t *testing.T) { }, }, Status: enterpriseApi.IngestorClusterStatus{ - Replicas: 3, - ReadyReplicas: 3, - Queue: &enterpriseApi.QueueSpec{}, + Replicas: 3, + ReadyReplicas: 3, + Queue: &enterpriseApi.QueueSpec{}, ObjectStorage: &enterpriseApi.ObjectStorageSpec{}, }, } @@ -635,7 +635,7 @@ func TestHandlePushQueueChange(t *testing.T) { // outputs.conf propertyKVList := [][]string{ {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, - {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.Region}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), os.Spec.S3.Path}, diff --git a/pkg/splunk/enterprise/queue_test.go b/pkg/splunk/enterprise/queue_test.go index 45a813282..767d33e83 100644 --- a/pkg/splunk/enterprise/queue_test.go +++ b/pkg/splunk/enterprise/queue_test.go @@ -51,10 +51,10 @@ func TestApplyQueue(t *testing.T) { Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - Name: "test-queue", - Region: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - DLQ: "sqs-dlq-test", + Name: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", }, }, } diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go index e2e27d268..86231df14 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go @@ -42,10 +42,10 @@ var ( queue = enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - Name: "test-queue", - Region: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - DLQ: "test-dead-letter-queue", + Name: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "test-dead-letter-queue", }, } objectStorage = enterpriseApi.ObjectStorageSpec{ @@ -88,10 +88,10 @@ var ( updateQueue = enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - Name: "test-queue-updated", - Region: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - DLQ: "test-dead-letter-queue-updated", + Name: "test-queue-updated", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "test-dead-letter-queue-updated", }, }