diff --git a/PROJECT b/PROJECT index 983f3418b..e87979069 100644 --- a/PROJECT +++ b/PROJECT @@ -128,7 +128,16 @@ resources: controller: true domain: splunk.com group: enterprise - kind: BusConfiguration + kind: Queue + path: github.com/splunk/splunk-operator/api/v4 + version: v4 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: splunk.com + group: enterprise + kind: ObjectStorage path: github.com/splunk/splunk-operator/api/v4 version: v4 version: "3" diff --git a/api/v4/indexercluster_types.go b/api/v4/indexercluster_types.go index 493aeb0f3..e74f900a7 100644 --- a/api/v4/indexercluster_types.go +++ b/api/v4/indexercluster_types.go @@ -34,12 +34,18 @@ const ( IndexerClusterPausedAnnotation = "indexercluster.enterprise.splunk.com/paused" ) +// +kubebuilder:validation:XValidation:rule="has(self.queueRef) == has(self.objectStorageRef)",message="queueRef and objectStorageRef must both be set or both be empty" // IndexerClusterSpec defines the desired state of a Splunk Enterprise indexer cluster type IndexerClusterSpec struct { CommonSplunkSpec `json:",inline"` - // Bus configuration reference - BusConfigurationRef corev1.ObjectReference `json:"busConfigurationRef,omitempty"` + // +optional + // Queue reference + QueueRef corev1.ObjectReference `json:"queueRef"` + + // +optional + // Object Storage reference + ObjectStorageRef corev1.ObjectReference `json:"objectStorageRef"` // Number of search head pods; a search head cluster will be created if > 1 Replicas int32 `json:"replicas"` @@ -115,8 +121,11 @@ type IndexerClusterStatus struct { // Auxillary message describing CR status Message string `json:"message"` - // Bus configuration - BusConfiguration BusConfigurationSpec `json:"busConfiguration,omitempty"` + // Queue + Queue *QueueSpec `json:"queue,omitempty"` + + // Object Storage + ObjectStorage *ObjectStorageSpec `json:"objectStorage,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/api/v4/ingestorcluster_types.go b/api/v4/ingestorcluster_types.go index 364625e97..f2e061284 100644 --- a/api/v4/ingestorcluster_types.go +++ b/api/v4/ingestorcluster_types.go @@ -39,8 +39,13 @@ type IngestorClusterSpec struct { // Splunk Enterprise app repository that specifies remote app location and scope for Splunk app management AppFrameworkConfig AppFrameworkSpec `json:"appRepo,omitempty"` - // Bus configuration reference - BusConfigurationRef corev1.ObjectReference `json:"busConfigurationRef"` + // +kubebuilder:validation:Required + // Queue reference + QueueRef corev1.ObjectReference `json:"queueRef"` + + // +kubebuilder:validation:Required + // Object Storage reference + ObjectStorageRef corev1.ObjectReference `json:"objectStorageRef"` } // IngestorClusterStatus defines the observed state of Ingestor Cluster @@ -69,8 +74,11 @@ type IngestorClusterStatus struct { // Auxillary message describing CR status Message string `json:"message"` - // Bus configuration - BusConfiguration BusConfigurationSpec `json:"busConfiguration,omitempty"` + // Queue + Queue *QueueSpec `json:"queue,omitempty"` + + // Object Storage + ObjectStorage *ObjectStorageSpec `json:"objectStorage,omitempty"` } // +kubebuilder:object:root=true diff --git a/api/v4/objectstorage_types.go b/api/v4/objectstorage_types.go new file mode 100644 index 000000000..9e95392ce --- /dev/null +++ b/api/v4/objectstorage_types.go @@ -0,0 +1,138 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v4 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +const ( + // ObjectStoragePausedAnnotation is the annotation that pauses the reconciliation (triggers + // an immediate requeue) + ObjectStoragePausedAnnotation = "objectstorage.enterprise.splunk.com/paused" +) + +// +kubebuilder:validation:XValidation:rule="self.provider != 's3' || has(self.s3)",message="s3 must be provided when provider is s3" +// ObjectStorageSpec defines the desired state of ObjectStorage +type ObjectStorageSpec struct { + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum=s3 + // Provider of queue resources + Provider string `json:"provider"` + + // +kubebuilder:validation:Required + // s3 specific inputs + S3 S3Spec `json:"s3"` +} + +type S3Spec struct { + // +optional + // +kubebuilder:validation:Pattern=`^https?://[^\s/$.?#].[^\s]*$` + // S3-compatible Service endpoint + Endpoint string `json:"endpoint"` + + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^s3://[a-z0-9.-]{3,63}(?:/[^\s]+)?$` + // S3 bucket path + Path string `json:"path"` +} + +// ObjectStorageStatus defines the observed state of ObjectStorage. +type ObjectStorageStatus struct { + // Phase of the large message store + Phase Phase `json:"phase"` + + // Resource revision tracker + ResourceRevMap map[string]string `json:"resourceRevMap"` + + // Auxillary message describing CR status + Message string `json:"message"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ObjectStorage is the Schema for a Splunk Enterprise object storage +// +k8s:openapi-gen=true +// +kubebuilder:subresource:status +// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.selector +// +kubebuilder:resource:path=objectstorages,scope=Namespaced,shortName=os +// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Status of object storage" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age of object storage resource" +// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Auxillary message describing CR status" +// +kubebuilder:storageversion + +// ObjectStorage is the Schema for the objectstorages API +type ObjectStorage struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty,omitzero"` + + Spec ObjectStorageSpec `json:"spec"` + Status ObjectStorageStatus `json:"status,omitempty,omitzero"` +} + +// DeepCopyObject implements runtime.Object +func (in *ObjectStorage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// +kubebuilder:object:root=true + +// ObjectStorageList contains a list of ObjectStorage +type ObjectStorageList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ObjectStorage `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ObjectStorage{}, &ObjectStorageList{}) +} + +// NewEvent creates a new event associated with the object and ready +// to be published to Kubernetes API +func (os *ObjectStorage) NewEvent(eventType, reason, message string) corev1.Event { + t := metav1.Now() + return corev1.Event{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: reason + "-", + Namespace: os.ObjectMeta.Namespace, + }, + InvolvedObject: corev1.ObjectReference{ + Kind: "ObjectStorage", + Namespace: os.Namespace, + Name: os.Name, + UID: os.UID, + APIVersion: GroupVersion.String(), + }, + Reason: reason, + Message: message, + Source: corev1.EventSource{ + Component: "splunk-object-storage-controller", + }, + FirstTimestamp: t, + LastTimestamp: t, + Count: 1, + Type: eventType, + ReportingController: "enterprise.splunk.com/object-storage-controller", + } +} diff --git a/api/v4/busconfiguration_types.go b/api/v4/queue_types.go similarity index 53% rename from api/v4/busconfiguration_types.go rename to api/v4/queue_types.go index a4b76a00b..9828f7301 100644 --- a/api/v4/busconfiguration_types.go +++ b/api/v4/queue_types.go @@ -23,35 +23,49 @@ import ( ) const ( - // BusConfigurationPausedAnnotation is the annotation that pauses the reconciliation (triggers + // QueuePausedAnnotation is the annotation that pauses the reconciliation (triggers // an immediate requeue) - BusConfigurationPausedAnnotation = "busconfiguration.enterprise.splunk.com/paused" + QueuePausedAnnotation = "queue.enterprise.splunk.com/paused" ) -// BusConfigurationSpec defines the desired state of BusConfiguration -type BusConfigurationSpec struct { - Type string `json:"type"` +// +kubebuilder:validation:XValidation:rule="self.provider != 'sqs' || has(self.sqs)",message="sqs must be provided when provider is sqs" +// QueueSpec defines the desired state of Queue +type QueueSpec struct { + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum=sqs + // Provider of queue resources + Provider string `json:"provider"` + // +kubebuilder:validation:Required + // sqs specific inputs SQS SQSSpec `json:"sqs"` } type SQSSpec struct { - QueueName string `json:"queueName"` - + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // Name of the queue + Name string `json:"name"` + + // +optional + // +kubebuilder:validation:Pattern=`^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$` + // Auth Region of the resources AuthRegion string `json:"authRegion"` - Endpoint string `json:"endpoint"` - - LargeMessageStoreEndpoint string `json:"largeMessageStoreEndpoint"` + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // Name of the dead letter queue resource + DLQ string `json:"dlq"` - LargeMessageStorePath string `json:"largeMessageStorePath"` - - DeadLetterQueueName string `json:"deadLetterQueueName"` + // +optional + // +kubebuilder:validation:Pattern=`^https?://[^\s/$.?#].[^\s]*$` + // Amazon SQS Service endpoint + Endpoint string `json:"endpoint"` } -// BusConfigurationStatus defines the observed state of BusConfiguration. -type BusConfigurationStatus struct { - // Phase of the bus configuration +// QueueStatus defines the observed state of Queue +type QueueStatus struct { + // Phase of the queue Phase Phase `json:"phase"` // Resource revision tracker @@ -64,27 +78,27 @@ type BusConfigurationStatus struct { // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// BusConfiguration is the Schema for a Splunk Enterprise bus configuration +// Queue is the Schema for a Splunk Enterprise queue // +k8s:openapi-gen=true // +kubebuilder:subresource:status // +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.selector -// +kubebuilder:resource:path=busconfigurations,scope=Namespaced,shortName=bus -// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Status of bus configuration" -// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age of bus configuration resource" +// +kubebuilder:resource:path=queues,scope=Namespaced,shortName=queue +// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Status of queue" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age of queue resource" // +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Auxillary message describing CR status" // +kubebuilder:storageversion -// BusConfiguration is the Schema for the busconfigurations API -type BusConfiguration struct { +// Queue is the Schema for the queues API +type Queue struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty,omitzero"` - Spec BusConfigurationSpec `json:"spec"` - Status BusConfigurationStatus `json:"status,omitempty,omitzero"` + Spec QueueSpec `json:"spec"` + Status QueueStatus `json:"status,omitempty,omitzero"` } // DeepCopyObject implements runtime.Object -func (in *BusConfiguration) DeepCopyObject() runtime.Object { +func (in *Queue) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -93,42 +107,42 @@ func (in *BusConfiguration) DeepCopyObject() runtime.Object { // +kubebuilder:object:root=true -// BusConfigurationList contains a list of BusConfiguration -type BusConfigurationList struct { +// QueueList contains a list of Queue +type QueueList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` - Items []BusConfiguration `json:"items"` + Items []Queue `json:"items"` } func init() { - SchemeBuilder.Register(&BusConfiguration{}, &BusConfigurationList{}) + SchemeBuilder.Register(&Queue{}, &QueueList{}) } // NewEvent creates a new event associated with the object and ready // to be published to Kubernetes API -func (bc *BusConfiguration) NewEvent(eventType, reason, message string) corev1.Event { +func (os *Queue) NewEvent(eventType, reason, message string) corev1.Event { t := metav1.Now() return corev1.Event{ ObjectMeta: metav1.ObjectMeta{ GenerateName: reason + "-", - Namespace: bc.ObjectMeta.Namespace, + Namespace: os.ObjectMeta.Namespace, }, InvolvedObject: corev1.ObjectReference{ - Kind: "BusConfiguration", - Namespace: bc.Namespace, - Name: bc.Name, - UID: bc.UID, + Kind: "Queue", + Namespace: os.Namespace, + Name: os.Name, + UID: os.UID, APIVersion: GroupVersion.String(), }, Reason: reason, Message: message, Source: corev1.EventSource{ - Component: "splunk-busconfiguration-controller", + Component: "splunk-queue-controller", }, FirstTimestamp: t, LastTimestamp: t, Count: 1, Type: eventType, - ReportingController: "enterprise.splunk.com/busconfiguration-controller", + ReportingController: "enterprise.splunk.com/queue-controller", } } diff --git a/api/v4/zz_generated.deepcopy.go b/api/v4/zz_generated.deepcopy.go index fa23c996a..dd9b2f347 100644 --- a/api/v4/zz_generated.deepcopy.go +++ b/api/v4/zz_generated.deepcopy.go @@ -180,95 +180,6 @@ func (in *BundlePushTracker) DeepCopy() *BundlePushTracker { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BusConfiguration) DeepCopyInto(out *BusConfiguration) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BusConfiguration. -func (in *BusConfiguration) DeepCopy() *BusConfiguration { - if in == nil { - return nil - } - out := new(BusConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BusConfigurationList) DeepCopyInto(out *BusConfigurationList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]BusConfiguration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BusConfigurationList. -func (in *BusConfigurationList) DeepCopy() *BusConfigurationList { - if in == nil { - return nil - } - out := new(BusConfigurationList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *BusConfigurationList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BusConfigurationSpec) DeepCopyInto(out *BusConfigurationSpec) { - *out = *in - out.SQS = in.SQS -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BusConfigurationSpec. -func (in *BusConfigurationSpec) DeepCopy() *BusConfigurationSpec { - if in == nil { - return nil - } - out := new(BusConfigurationSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BusConfigurationStatus) DeepCopyInto(out *BusConfigurationStatus) { - *out = *in - if in.ResourceRevMap != nil { - in, out := &in.ResourceRevMap, &out.ResourceRevMap - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BusConfigurationStatus. -func (in *BusConfigurationStatus) DeepCopy() *BusConfigurationStatus { - if in == nil { - return nil - } - out := new(BusConfigurationStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CacheManagerSpec) DeepCopyInto(out *CacheManagerSpec) { *out = *in @@ -600,7 +511,8 @@ func (in *IndexerClusterMemberStatus) DeepCopy() *IndexerClusterMemberStatus { func (in *IndexerClusterSpec) DeepCopyInto(out *IndexerClusterSpec) { *out = *in in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) - out.BusConfigurationRef = in.BusConfigurationRef + out.QueueRef = in.QueueRef + out.ObjectStorageRef = in.ObjectStorageRef } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexerClusterSpec. @@ -633,7 +545,16 @@ func (in *IndexerClusterStatus) DeepCopyInto(out *IndexerClusterStatus) { *out = make([]IndexerClusterMemberStatus, len(*in)) copy(*out, *in) } - out.BusConfiguration = in.BusConfiguration + if in.Queue != nil { + in, out := &in.Queue, &out.Queue + *out = new(QueueSpec) + **out = **in + } + if in.ObjectStorage != nil { + in, out := &in.ObjectStorage, &out.ObjectStorage + *out = new(ObjectStorageSpec) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexerClusterStatus. @@ -702,7 +623,8 @@ func (in *IngestorClusterSpec) DeepCopyInto(out *IngestorClusterSpec) { *out = *in in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) in.AppFrameworkConfig.DeepCopyInto(&out.AppFrameworkConfig) - out.BusConfigurationRef = in.BusConfigurationRef + out.QueueRef = in.QueueRef + out.ObjectStorageRef = in.ObjectStorageRef } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngestorClusterSpec. @@ -726,7 +648,16 @@ func (in *IngestorClusterStatus) DeepCopyInto(out *IngestorClusterStatus) { } } in.AppContext.DeepCopyInto(&out.AppContext) - out.BusConfiguration = in.BusConfiguration + if in.Queue != nil { + in, out := &in.Queue, &out.Queue + *out = new(QueueSpec) + **out = **in + } + if in.ObjectStorage != nil { + in, out := &in.ObjectStorage, &out.ObjectStorage + *out = new(ObjectStorageSpec) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngestorClusterStatus. @@ -931,6 +862,95 @@ func (in *MonitoringConsoleStatus) DeepCopy() *MonitoringConsoleStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectStorage) DeepCopyInto(out *ObjectStorage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorage. +func (in *ObjectStorage) DeepCopy() *ObjectStorage { + if in == nil { + return nil + } + out := new(ObjectStorage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectStorageList) DeepCopyInto(out *ObjectStorageList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ObjectStorage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageList. +func (in *ObjectStorageList) DeepCopy() *ObjectStorageList { + if in == nil { + return nil + } + out := new(ObjectStorageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ObjectStorageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectStorageSpec) DeepCopyInto(out *ObjectStorageSpec) { + *out = *in + out.S3 = in.S3 +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageSpec. +func (in *ObjectStorageSpec) DeepCopy() *ObjectStorageSpec { + if in == nil { + return nil + } + out := new(ObjectStorageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectStorageStatus) DeepCopyInto(out *ObjectStorageStatus) { + *out = *in + if in.ResourceRevMap != nil { + in, out := &in.ResourceRevMap, &out.ResourceRevMap + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageStatus. +func (in *ObjectStorageStatus) DeepCopy() *ObjectStorageStatus { + if in == nil { + return nil + } + out := new(ObjectStorageStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PhaseInfo) DeepCopyInto(out *PhaseInfo) { *out = *in @@ -977,6 +997,110 @@ func (in *Probe) DeepCopy() *Probe { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Queue) DeepCopyInto(out *Queue) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Queue. +func (in *Queue) DeepCopy() *Queue { + if in == nil { + return nil + } + out := new(Queue) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueList) DeepCopyInto(out *QueueList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Queue, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueList. +func (in *QueueList) DeepCopy() *QueueList { + if in == nil { + return nil + } + out := new(QueueList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *QueueList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueSpec) DeepCopyInto(out *QueueSpec) { + *out = *in + out.SQS = in.SQS +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueSpec. +func (in *QueueSpec) DeepCopy() *QueueSpec { + if in == nil { + return nil + } + out := new(QueueSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueStatus) DeepCopyInto(out *QueueStatus) { + *out = *in + if in.ResourceRevMap != nil { + in, out := &in.ResourceRevMap, &out.ResourceRevMap + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueStatus. +func (in *QueueStatus) DeepCopy() *QueueStatus { + if in == nil { + return nil + } + out := new(QueueStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3Spec) DeepCopyInto(out *S3Spec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Spec. +func (in *S3Spec) DeepCopy() *S3Spec { + if in == nil { + return nil + } + out := new(S3Spec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SQSSpec) DeepCopyInto(out *SQSSpec) { *out = *in diff --git a/cmd/main.go b/cmd/main.go index 1984474fa..dfb9c87e1 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -230,11 +230,18 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "IngestorCluster") os.Exit(1) } - if err := (&controller.BusConfigurationReconciler{ + if err := (&controller.QueueReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "BusConfiguration") + setupLog.Error(err, "unable to create controller", "controller", "Queue") + os.Exit(1) + } + if err := (&controller.ObjectStorageReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "ObjectStorage") os.Exit(1) } //+kubebuilder:scaffold:builder diff --git a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml index d66e057fb..59faab055 100644 --- a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml @@ -5165,49 +5165,6 @@ spec: x-kubernetes-list-type: atomic type: object type: object - busConfigurationRef: - description: Bus configuration reference - properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. - type: string - kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ - type: string - resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency - type: string - uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids - type: string - type: object - x-kubernetes-map-type: atomic clusterManagerRef: description: ClusterManagerRef refers to a Splunk Enterprise indexer cluster managed by the operator within Kubernetes @@ -5647,6 +5604,92 @@ spec: type: string type: object x-kubernetes-map-type: atomic + objectStorageRef: + description: Object Storage reference + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + queueRef: + description: Queue reference + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic readinessInitialDelaySeconds: description: |- ReadinessInitialDelaySeconds defines initialDelaySeconds(See https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes) for Readiness probe @@ -8285,6 +8328,9 @@ spec: type: object type: array type: object + x-kubernetes-validations: + - message: queueRef and objectStorageRef must both be set or both be empty + rule: has(self.queueRef) == has(self.objectStorageRef) status: description: IndexerClusterStatus defines the observed state of a Splunk Enterprise indexer cluster @@ -8294,27 +8340,6 @@ spec: type: boolean description: Holds secrets whose IDXC password has changed type: object - busConfiguration: - description: Bus configuration - properties: - sqs: - properties: - authRegion: - type: string - deadLetterQueueName: - type: string - endpoint: - type: string - largeMessageStoreEndpoint: - type: string - largeMessageStorePath: - type: string - queueName: - type: string - type: object - type: - type: string - type: object clusterManagerPhase: description: current phase of the cluster manager enum: @@ -8358,6 +8383,35 @@ spec: namespace_scoped_secret_resource_version: description: Indicates resource version of namespace scoped secret type: string + objectStorage: + description: Object Storage + properties: + provider: + description: Provider of queue resources + enum: + - s3 + type: string + s3: + description: s3 specific inputs + properties: + endpoint: + description: S3-compatible Service endpoint + pattern: ^https?://[^\s/$.?#].[^\s]*$ + type: string + path: + description: S3 bucket path + pattern: ^s3://[a-z0-9.-]{3,63}(?:/[^\s]+)?$ + type: string + required: + - path + type: object + required: + - provider + - s3 + type: object + x-kubernetes-validations: + - message: s3 must be provided when provider is s3 + rule: self.provider != 's3' || has(self.s3) peers: description: status of each indexer cluster peer items: @@ -8399,6 +8453,44 @@ spec: - Terminating - Error type: string + queue: + description: Queue + properties: + provider: + description: Provider of queue resources + enum: + - sqs + type: string + sqs: + description: sqs specific inputs + properties: + authRegion: + description: Auth Region of the resources + pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ + type: string + dlq: + description: Name of the dead letter queue resource + minLength: 1 + type: string + endpoint: + description: Amazon SQS Service endpoint + pattern: ^https?://[^\s/$.?#].[^\s]*$ + type: string + name: + description: Name of the queue + minLength: 1 + type: string + required: + - dlq + - name + type: object + required: + - provider + - sqs + type: object + x-kubernetes-validations: + - message: sqs must be provided when provider is sqs + rule: self.provider != 'sqs' || has(self.sqs) readyReplicas: description: current number of ready indexer peers format: int32 diff --git a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml index 82f1f868a..7432e96b4 100644 --- a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml @@ -1141,49 +1141,6 @@ spec: type: object type: array type: object - busConfigurationRef: - description: Bus configuration reference - properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. - type: string - kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ - type: string - resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency - type: string - uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids - type: string - type: object - x-kubernetes-map-type: atomic clusterManagerRef: description: ClusterManagerRef refers to a Splunk Enterprise indexer cluster managed by the operator within Kubernetes @@ -1623,6 +1580,92 @@ spec: type: string type: object x-kubernetes-map-type: atomic + objectStorageRef: + description: Object Storage reference + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + queueRef: + description: Queue reference + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic readinessInitialDelaySeconds: description: |- ReadinessInitialDelaySeconds defines initialDelaySeconds(See https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes) for Readiness probe @@ -4259,6 +4302,9 @@ spec: - name type: object type: array + required: + - objectStorageRef + - queueRef type: object status: description: IngestorClusterStatus defines the observed state of Ingestor @@ -4545,30 +4591,38 @@ spec: description: App Framework version info for future use type: integer type: object - busConfiguration: - description: Bus configuration + message: + description: Auxillary message describing CR status + type: string + objectStorage: + description: Object Storage properties: - sqs: + provider: + description: Provider of queue resources + enum: + - s3 + type: string + s3: + description: s3 specific inputs properties: - authRegion: - type: string - deadLetterQueueName: - type: string endpoint: + description: S3-compatible Service endpoint + pattern: ^https?://[^\s/$.?#].[^\s]*$ type: string - largeMessageStoreEndpoint: - type: string - largeMessageStorePath: - type: string - queueName: + path: + description: S3 bucket path + pattern: ^s3://[a-z0-9.-]{3,63}(?:/[^\s]+)?$ type: string + required: + - path type: object - type: - type: string + required: + - provider + - s3 type: object - message: - description: Auxillary message describing CR status - type: string + x-kubernetes-validations: + - message: s3 must be provided when provider is s3 + rule: self.provider != 's3' || has(self.s3) phase: description: Phase of the ingestor pods enum: @@ -4580,6 +4634,44 @@ spec: - Terminating - Error type: string + queue: + description: Queue + properties: + provider: + description: Provider of queue resources + enum: + - sqs + type: string + sqs: + description: sqs specific inputs + properties: + authRegion: + description: Auth Region of the resources + pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ + type: string + dlq: + description: Name of the dead letter queue resource + minLength: 1 + type: string + endpoint: + description: Amazon SQS Service endpoint + pattern: ^https?://[^\s/$.?#].[^\s]*$ + type: string + name: + description: Name of the queue + minLength: 1 + type: string + required: + - dlq + - name + type: object + required: + - provider + - sqs + type: object + x-kubernetes-validations: + - message: sqs must be provided when provider is sqs + rule: self.provider != 'sqs' || has(self.sqs) readyReplicas: description: Number of ready ingestor pods format: int32 diff --git a/config/crd/bases/enterprise.splunk.com_busconfigurations.yaml b/config/crd/bases/enterprise.splunk.com_objectstorages.yaml similarity index 66% rename from config/crd/bases/enterprise.splunk.com_busconfigurations.yaml rename to config/crd/bases/enterprise.splunk.com_objectstorages.yaml index 9f80cdbea..2fac45707 100644 --- a/config/crd/bases/enterprise.splunk.com_busconfigurations.yaml +++ b/config/crd/bases/enterprise.splunk.com_objectstorages.yaml @@ -4,24 +4,24 @@ kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.16.1 - name: busconfigurations.enterprise.splunk.com + name: objectstorages.enterprise.splunk.com spec: group: enterprise.splunk.com names: - kind: BusConfiguration - listKind: BusConfigurationList - plural: busconfigurations + kind: ObjectStorage + listKind: ObjectStorageList + plural: objectstorages shortNames: - - bus - singular: busconfiguration + - os + singular: objectstorage scope: Namespaced versions: - additionalPrinterColumns: - - description: Status of bus configuration + - description: Status of object storage jsonPath: .status.phase name: Phase type: string - - description: Age of bus configuration resource + - description: Age of object storage resource jsonPath: .metadata.creationTimestamp name: Age type: date @@ -32,7 +32,7 @@ spec: name: v4 schema: openAPIV3Schema: - description: BusConfiguration is the Schema for the busconfigurations API + description: ObjectStorage is the Schema for the objectstorages API properties: apiVersion: description: |- @@ -52,34 +52,42 @@ spec: metadata: type: object spec: - description: BusConfigurationSpec defines the desired state of BusConfiguration + description: ObjectStorageSpec defines the desired state of ObjectStorage properties: - sqs: + provider: + description: Provider of queue resources + enum: + - s3 + type: string + s3: + description: s3 specific inputs properties: - authRegion: - type: string - deadLetterQueueName: - type: string endpoint: + description: S3-compatible Service endpoint + pattern: ^https?://[^\s/$.?#].[^\s]*$ type: string - largeMessageStoreEndpoint: - type: string - largeMessageStorePath: - type: string - queueName: + path: + description: S3 bucket path + pattern: ^s3://[a-z0-9.-]{3,63}(?:/[^\s]+)?$ type: string + required: + - path type: object - type: - type: string + required: + - provider + - s3 type: object + x-kubernetes-validations: + - message: s3 must be provided when provider is s3 + rule: self.provider != 's3' || has(self.s3) status: - description: BusConfigurationStatus defines the observed state of BusConfiguration. + description: ObjectStorageStatus defines the observed state of ObjectStorage. properties: message: description: Auxillary message describing CR status type: string phase: - description: Phase of the bus configuration + description: Phase of the large message store enum: - Pending - Ready diff --git a/config/crd/bases/enterprise.splunk.com_queues.yaml b/config/crd/bases/enterprise.splunk.com_queues.yaml new file mode 100644 index 000000000..2ba8d03f5 --- /dev/null +++ b/config/crd/bases/enterprise.splunk.com_queues.yaml @@ -0,0 +1,123 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: queues.enterprise.splunk.com +spec: + group: enterprise.splunk.com + names: + kind: Queue + listKind: QueueList + plural: queues + shortNames: + - queue + singular: queue + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Status of queue + jsonPath: .status.phase + name: Phase + type: string + - description: Age of queue resource + jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Auxillary message describing CR status + jsonPath: .status.message + name: Message + type: string + name: v4 + schema: + openAPIV3Schema: + description: Queue is the Schema for the queues API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: QueueSpec defines the desired state of Queue + properties: + provider: + description: Provider of queue resources + enum: + - sqs + type: string + sqs: + description: sqs specific inputs + properties: + authRegion: + description: Auth Region of the resources + pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ + type: string + dlq: + description: Name of the dead letter queue resource + minLength: 1 + type: string + endpoint: + description: Amazon SQS Service endpoint + pattern: ^https?://[^\s/$.?#].[^\s]*$ + type: string + name: + description: Name of the queue + minLength: 1 + type: string + required: + - dlq + - name + type: object + required: + - provider + - sqs + type: object + x-kubernetes-validations: + - message: sqs must be provided when provider is sqs + rule: self.provider != 'sqs' || has(self.sqs) + status: + description: QueueStatus defines the observed state of Queue + properties: + message: + description: Auxillary message describing CR status + type: string + phase: + description: Phase of the queue + enum: + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error + type: string + resourceRevMap: + additionalProperties: + type: string + description: Resource revision tracker + type: object + type: object + type: object + served: true + storage: true + subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 679c1dc72..0304146cd 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -11,7 +11,8 @@ resources: - bases/enterprise.splunk.com_searchheadclusters.yaml - bases/enterprise.splunk.com_standalones.yaml - bases/enterprise.splunk.com_ingestorclusters.yaml -- bases/enterprise.splunk.com_busconfigurations.yaml +- bases/enterprise.splunk.com_queues.yaml +- bases/enterprise.splunk.com_objectstorages.yaml #+kubebuilder:scaffold:crdkustomizeresource diff --git a/config/rbac/busconfiguration_editor_role.yaml b/config/rbac/objectstorage_editor_role.yaml similarity index 88% rename from config/rbac/busconfiguration_editor_role.yaml rename to config/rbac/objectstorage_editor_role.yaml index fde8687f7..70323227f 100644 --- a/config/rbac/busconfiguration_editor_role.yaml +++ b/config/rbac/objectstorage_editor_role.yaml @@ -8,12 +8,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: busconfiguration-editor-role + name: objectstorage-editor-role rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations + - objectstorages verbs: - create - delete @@ -25,6 +25,6 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations/status + - objectstorages/status verbs: - get diff --git a/config/rbac/busconfiguration_viewer_role.yaml b/config/rbac/objectstorage_viewer_role.yaml similarity index 87% rename from config/rbac/busconfiguration_viewer_role.yaml rename to config/rbac/objectstorage_viewer_role.yaml index 6230863a9..9764699bc 100644 --- a/config/rbac/busconfiguration_viewer_role.yaml +++ b/config/rbac/objectstorage_viewer_role.yaml @@ -8,12 +8,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: busconfiguration-viewer-role + name: objectstorage-viewer-role rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations + - objectstorages verbs: - get - list @@ -21,6 +21,6 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations/status + - objectstorages/status verbs: - get diff --git a/config/rbac/queue_editor_role.yaml b/config/rbac/queue_editor_role.yaml new file mode 100644 index 000000000..bf7e4d890 --- /dev/null +++ b/config/rbac/queue_editor_role.yaml @@ -0,0 +1,30 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants permissions to create, update, and delete resources within the enterprise.splunk.com. +# This role is intended for users who need to manage these resources +# but should not control RBAC or manage permissions for others. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: queue-editor-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - queues + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - queues/status + verbs: + - get diff --git a/config/rbac/queue_viewer_role.yaml b/config/rbac/queue_viewer_role.yaml new file mode 100644 index 000000000..b186c8650 --- /dev/null +++ b/config/rbac/queue_viewer_role.yaml @@ -0,0 +1,26 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants read-only access to enterprise.splunk.com resources. +# This role is intended for users who need visibility into these resources +# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: queue-viewer-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - queues + verbs: + - get + - list + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - queues/status + verbs: + - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 78231b303..973105d16 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -47,7 +47,6 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations - clustermanagers - clustermasters - indexerclusters @@ -55,6 +54,8 @@ rules: - licensemanagers - licensemasters - monitoringconsoles + - objectstorages + - queues - searchheadclusters - standalones verbs: @@ -68,7 +69,6 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations/finalizers - clustermanagers/finalizers - clustermasters/finalizers - indexerclusters/finalizers @@ -76,6 +76,8 @@ rules: - licensemanagers/finalizers - licensemasters/finalizers - monitoringconsoles/finalizers + - objectstorages/finalizers + - queues/finalizers - searchheadclusters/finalizers - standalones/finalizers verbs: @@ -83,7 +85,6 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations/status - clustermanagers/status - clustermasters/status - indexerclusters/status @@ -91,6 +92,8 @@ rules: - licensemanagers/status - licensemasters/status - monitoringconsoles/status + - objectstorages/status + - queues/status - searchheadclusters/status - standalones/status verbs: diff --git a/config/samples/enterprise_v4_busconfiguration.yaml b/config/samples/enterprise_v4_objectstorage.yaml similarity index 72% rename from config/samples/enterprise_v4_busconfiguration.yaml rename to config/samples/enterprise_v4_objectstorage.yaml index 0cc1aed31..b693a14e0 100644 --- a/config/samples/enterprise_v4_busconfiguration.yaml +++ b/config/samples/enterprise_v4_objectstorage.yaml @@ -1,7 +1,7 @@ apiVersion: enterprise.splunk.com/v4 -kind: BusConfiguration +kind: ObjectStorage metadata: - name: busconfiguration-sample + name: objectstorage-sample finalizers: - "enterprise.splunk.com/delete-pvc" spec: {} diff --git a/config/samples/enterprise_v4_queue.yaml b/config/samples/enterprise_v4_queue.yaml new file mode 100644 index 000000000..374d4adb2 --- /dev/null +++ b/config/samples/enterprise_v4_queue.yaml @@ -0,0 +1,8 @@ +apiVersion: enterprise.splunk.com/v4 +kind: Queue +metadata: + name: queue-sample + finalizers: + - "enterprise.splunk.com/delete-pvc" +spec: {} +# TODO(user): Add fields here diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index 88c71025d..34c05ab05 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -14,5 +14,6 @@ resources: - enterprise_v4_clustermanager.yaml - enterprise_v4_licensemanager.yaml - enterprise_v4_ingestorcluster.yaml -- enterprise_v4_busconfiguration.yaml +- enterprise_v4_queue.yaml +- enterprise_v4_objectstorage.yaml #+kubebuilder:scaffold:manifestskustomizesamples diff --git a/docs/CustomResources.md b/docs/CustomResources.md index 6461d4488..157a9b123 100644 --- a/docs/CustomResources.md +++ b/docs/CustomResources.md @@ -18,9 +18,11 @@ you can use to manage Splunk Enterprise deployments in your Kubernetes cluster. - [LicenseManager Resource Spec Parameters](#licensemanager-resource-spec-parameters) - [Standalone Resource Spec Parameters](#standalone-resource-spec-parameters) - [SearchHeadCluster Resource Spec Parameters](#searchheadcluster-resource-spec-parameters) + - [Queue Resource Spec Parameters](#queue-resource-spec-parameters) - [ClusterManager Resource Spec Parameters](#clustermanager-resource-spec-parameters) - [IndexerCluster Resource Spec Parameters](#indexercluster-resource-spec-parameters) - [IngestorCluster Resource Spec Parameters](#ingestorcluster-resource-spec-parameters) + - [ObjectStorage Resource Spec Parameters](#objectstorage-resource-spec-parameters) - [MonitoringConsole Resource Spec Parameters](#monitoringconsole-resource-spec-parameters) - [Examples of Guaranteed and Burstable QoS](#examples-of-guaranteed-and-burstable-qos) - [A Guaranteed QoS Class example:](#a-guaranteed-qos-class-example) @@ -279,6 +281,41 @@ spec: cpu: "4" ``` +## Queue Resource Spec Parameters + +```yaml +apiVersion: enterprise.splunk.com/v4 +kind: Queue +metadata: + name: queue +spec: + replicas: 3 + provider: sqs + sqs: + name: sqs-test + region: us-west-2 + endpoint: https://sqs.us-west-2.amazonaws.com + dlq: sqs-dlq-test +``` + +Queue inputs can be found in the table below. As of now, only SQS provider of message queue is supported. + +| Key | Type | Description | +| ---------- | ------- | ------------------------------------------------- | +| provider | string | [Required] Provider of message queue (Allowed values: sqs) | +| sqs | SQS | [Required if provider=sqs] SQS message queue inputs | + +SQS message queue inputs can be found in the table below. + +| Key | Type | Description | +| ---------- | ------- | ------------------------------------------------- | +| name | string | [Required] Name of the queue | +| region | string | [Required] Region where the queue is located | +| endpoint | string | [Optional, if not provided formed based on region] AWS SQS Service endpoint +| dlq | string | [Required] Name of the dead letter queue | + +Change of any of the queue inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. + ## ClusterManager Resource Spec Parameters ClusterManager resource does not have a required spec parameter, but to configure SmartStore, you can specify indexes and volume configuration as below - ```yaml @@ -338,10 +375,12 @@ metadata: name: ic spec: replicas: 3 - busConfigurationRef: - name: bus-config + queueRef: + name: queue + objectStorageRef: + name: os ``` -Note: `busConfigurationRef` is required field in case of IngestorCluster resource since it will be used to connect the IngestorCluster to BusConfiguration resource. +Note: `queueRef` and `objectStorageRef` are required fields in case of IngestorCluster resource since they will be used to connect the IngestorCluster to Queue and ObjectStorage resources. In addition to [Common Spec Parameters for All Resources](#common-spec-parameters-for-all-resources) and [Common Spec Parameters for All Splunk Enterprise Resources](#common-spec-parameters-for-all-splunk-enterprise-resources), @@ -351,6 +390,36 @@ the `IngestorCluster` resource provides the following `Spec` configuration param | ---------- | ------- | ----------------------------------------------------- | | replicas | integer | The number of ingestor peers (minimum of 3 which is the default) | +## ObjectStorage Resource Spec Parameters + +```yaml +apiVersion: enterprise.splunk.com/v4 +kind: ObjectStorage +metadata: + name: os +spec: + provider: s3 + s3: + path: s3://ingestion/smartbus-test + endpoint: https://s3.us-west-2.amazonaws.com +``` + +ObjectStorage inputs can be found in the table below. As of now, only S3 provider of large message store is supported. + +| Key | Type | Description | +| ---------- | ------- | ------------------------------------------------- | +| provider | string | [Required] Provider of large message store (Allowed values: s3) | +| s3 | S3 | [Required if provider=s3] S3 large message store inputs | + +S3 large message store inputs can be found in the table below. + +| Key | Type | Description | +| ---------- | ------- | ------------------------------------------------- | +| path | string | [Required] Remote storage location for messages that are larger than the underlying maximum message size | +| endpoint | string | [Optional, if not provided formed based on region] S3-compatible service endpoint + +Change of any of the large message queue inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. + ## MonitoringConsole Resource Spec Parameters ```yaml @@ -462,10 +531,12 @@ The Splunk Operator controller reconciles every Splunk Enterprise CR. However, t | Customer Resource Definition | Annotation | | ----------- | --------- | +| queue.enterprise.splunk.com | "queue.enterprise.splunk.com/paused" | | clustermaster.enterprise.splunk.com | "clustermaster.enterprise.splunk.com/paused" | | clustermanager.enterprise.splunk.com | "clustermanager.enterprise.splunk.com/paused" | | indexercluster.enterprise.splunk.com | "indexercluster.enterprise.splunk.com/paused" | | ingestorcluster.enterprise.splunk.com | "ingestorcluster.enterprise.splunk.com/paused" | +| objectstorage.enterprise.splunk.com | "objectstorage.enterprise.splunk.com/paused" | | licensemaster.enterprise.splunk.com | "licensemaster.enterprise.splunk.com/paused" | | monitoringconsole.enterprise.splunk.com | "monitoringconsole.enterprise.splunk.com/paused" | | searchheadcluster.enterprise.splunk.com | "searchheadcluster.enterprise.splunk.com/paused" | diff --git a/docs/IndexIngestionSeparation.md b/docs/IndexIngestionSeparation.md index dd53922ff..bd5d97579 100644 --- a/docs/IndexIngestionSeparation.md +++ b/docs/IndexIngestionSeparation.md @@ -4,7 +4,7 @@ Separation between ingestion and indexing services within Splunk Operator for Ku This separation enables: - Independent scaling: Match resource allocation to ingestion or indexing workload. -- Data durability: Off‑load buffer management and retry logic to a durable message bus. +- Data durability: Off‑load buffer management and retry logic to a durable message queue. - Operational clarity: Separate monitoring dashboards for ingestion throughput vs indexing latency. # Important Note @@ -16,52 +16,83 @@ This separation enables: - SPLUNK_IMAGE_VERSION: Splunk Enterprise Docker Image version -# BusConfiguration +# Queue -BusConfiguration is introduced to store message bus configuration to be shared among IngestorCluster and IndexerCluster. +Queue is introduced to store message queue information to be shared among IngestorCluster and IndexerCluster. ## Spec -BusConfiguration inputs can be found in the table below. As of now, only SQS type of message bus is supported. +Queue inputs can be found in the table below. As of now, only SQS provider of message queue is supported. | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | -| type | string | Type of message bus (Only sqs_smartbus as of now) | -| sqs | SQS | SQS message bus inputs | +| provider | string | [Required] Provider of message queue (Allowed values: sqs) | +| sqs | SQS | [Required if provider=sqs] SQS message queue inputs | -SQS message bus inputs can be found in the table below. +SQS message queue inputs can be found in the table below. | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | -| queueName | string | Name of the SQS queue | -| authRegion | string | Region where the SQS queue is located | -| endpoint | string | AWS SQS endpoint -| largeMessageStoreEndpoint | string | AWS S3 Large Message Store endpoint | -| largeMessageStorePath | string | S3 path for Large Message Store | -| deadLetterQueueName | string | Name of the SQS dead letter queue | +| name | string | [Required] Name of the queue | +| region | string | [Required] Region where the queue is located | +| endpoint | string | [Optional, if not provided formed based on region] AWS SQS Service endpoint +| dlq | string | [Required] Name of the dead letter queue | -Change of any of the bus inputs does not restart Splunk. It just updates the config values with no disruptions. +Change of any of the queue inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. ## Example ``` apiVersion: enterprise.splunk.com/v4 -kind: BusConfiguration +kind: Queue metadata: - name: bus-config + name: queue spec: - type: sqs_smartbus + provider: sqs sqs: - queueName: sqs-test - authRegion: us-west-2 + name: sqs-test + region: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com - largeMessageStorePath: s3://ingestion/smartbus-test - deadLetterQueueName: sqs-dlq-test + dlq: sqs-dlq-test +``` + +# ObjectStorage + +ObjectStorage is introduced to store large message (messages that exceed the size of messages that can be stored in SQS) store information to be shared among IngestorCluster and IndexerCluster. + +## Spec + +ObjectStorage inputs can be found in the table below. As of now, only S3 provider of large message store is supported. + +| Key | Type | Description | +| ---------- | ------- | ------------------------------------------------- | +| provider | string | [Required] Provider of large message store (Allowed values: s3) | +| s3 | S3 | [Required if provider=s3] S3 large message store inputs | + +S3 large message store inputs can be found in the table below. + +| Key | Type | Description | +| ---------- | ------- | ------------------------------------------------- | +| path | string | [Required] Remote storage location for messages that are larger than the underlying maximum message size | +| endpoint | string | [Optional, if not provided formed based on region] S3-compatible service endpoint + +Change of any of the large message queue inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. + +## Example +``` +apiVersion: enterprise.splunk.com/v4 +kind: ObjectStorage +metadata: + name: os +spec: + provider: s3 + s3: + path: s3://ingestion/smartbus-test + endpoint: https://s3.us-west-2.amazonaws.com ``` # IngestorCluster -IngestorCluster is introduced for high‑throughput data ingestion into a durable message bus. Its Splunk pods are configured to receive events (outputs.conf) and publish them to a message bus. +IngestorCluster is introduced for high‑throughput data ingestion into a durable message queue. Its Splunk pods are configured to receive events (outputs.conf) and publish them to a message queue. ## Spec @@ -70,11 +101,12 @@ In addition to common spec inputs, the IngestorCluster resource provides the fol | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | | replicas | integer | The number of replicas (defaults to 3) | -| busConfigurationRef | corev1.ObjectReference | Message bus configuration reference | +| queueRef | corev1.ObjectReference | Message queue reference | +| objectStorageRef | corev1.ObjectReference | Large message store reference | ## Example -The example presented below configures IngestorCluster named ingestor with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the ingestion traffic. This IngestorCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Push Bus reference allows the user to specify queue and bucket settings for the ingestion process. +The example presented below configures IngestorCluster named ingestor with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the ingestion traffic. This IngestorCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Queue and ObjectStorage references allow the user to specify queue and bucket settings for the ingestion process. In this case, the setup uses the SQS and S3 based configuration where the messages are stored in sqs-test queue in us-west-2 region with dead letter queue set to sqs-dlq-test queue. The large message store is set to ingestion bucket in smartbus-test directory. Based on these inputs, default-mode.conf and outputs.conf files are configured accordingly. @@ -89,13 +121,15 @@ spec: serviceAccount: ingestor-sa replicas: 3 image: splunk/splunk:${SPLUNK_IMAGE_VERSION} - busConfigurationRef: - name: bus-config + queueRef: + name: queue + objectStorageRef: + name: os ``` # IndexerCluster -IndexerCluster is enhanced to support index‑only mode enabling independent scaling, loss‑safe buffering, and simplified day‑0/day‑n management via Kubernetes CRDs. Its Splunk pods are configured to pull events from the bus (inputs.conf) and index them. +IndexerCluster is enhanced to support index‑only mode enabling independent scaling, loss‑safe buffering, and simplified day‑0/day‑n management via Kubernetes CRDs. Its Splunk pods are configured to pull events from the queue (inputs.conf) and index them. ## Spec @@ -104,11 +138,12 @@ In addition to common spec inputs, the IndexerCluster resource provides the foll | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | | replicas | integer | The number of replicas (defaults to 3) | -| busConfigurationRef | corev1.ObjectReference | Message bus configuration reference | +| queueRef | corev1.ObjectReference | Message queue reference | +| objectStorageRef | corev1.ObjectReference | Large message store reference | ## Example -The example presented below configures IndexerCluster named indexer with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the indexing traffic. This IndexerCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Pull Bus reference allows the user to specify queue and bucket settings for the indexing process. +The example presented below configures IndexerCluster named indexer with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the indexing traffic. This IndexerCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Queue and ObjectStorage references allow the user to specify queue and bucket settings for the indexing process. In this case, the setup uses the SQS and S3 based configuration where the messages are stored in and retrieved from sqs-test queue in us-west-2 region with dead letter queue set to sqs-dlq-test queue. The large message store is set to ingestion bucket in smartbus-test directory. Based on these inputs, default-mode.conf, inputs.conf and outputs.conf files are configured accordingly. @@ -135,8 +170,10 @@ spec: serviceAccount: ingestor-sa replicas: 3 image: splunk/splunk:${SPLUNK_IMAGE_VERSION} - busConfigurationRef: - name: bus-config + queueRef: + name: queue + objectStorageRef: + name: os ``` # Common Spec @@ -145,24 +182,32 @@ Common spec values for all SOK Custom Resources can be found in [CustomResources # Helm Charts -An IngestorCluster template has been added to the splunk/splunk-enterprise Helm chart. The IndexerCluster template has also been enhanced to support new inputs. +Queue, ObjectStorage and IngestorCluster have been added to the splunk/splunk-enterprise Helm chart. IndexerCluster has also been enhanced to support new inputs. ## Example -Below examples describe how to define values for BusConfiguration, IngestorCluster and IndexerCluster similarly to the above yaml files specifications. +Below examples describe how to define values for Queue, ObjectStorage, IngestorCluster and IndexerCluster similarly to the above yaml files specifications. ``` -busConfiguration:: +queue: enabled: true - name: bus-config - type: sqs_smartbus + name: queue + provider: sqs sqs: - queueName: sqs-test - authRegion: us-west-2 + name: sqs-test + region: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com - largeMessageStorePath: s3://ingestion/smartbus-test - deadLetterQueueName: sqs-dlq-test + dlq: sqs-dlq-test +``` + +``` +objectStorage: + enabled: true + name: os + provider: s3 + s3: + endpoint: https://s3.us-west-2.amazonaws.com + path: s3://ingestion/smartbus-test ``` ``` @@ -171,8 +216,10 @@ ingestorCluster: name: ingestor replicaCount: 3 serviceAccount: ingestor-sa - busConfigurationRef: - name: bus-config + queueRef: + name: queue + objectStorageRef: + name: os ``` ``` @@ -189,8 +236,10 @@ indexerCluster: serviceAccount: ingestor-sa clusterManagerRef: name: cm - busConfigurationRef: - name: bus-config + queueRef: + name: queue + objectStorageRef: + name: os ``` # Service Account @@ -492,45 +541,43 @@ $ aws iam list-attached-role-policies --role-name eksctl-ind-ing-sep-demo-addon- } ``` -3. Install BusConfiguration resource. +3. Install Queue resource. ``` -$ cat bus.yaml +$ cat queue.yaml apiVersion: enterprise.splunk.com/v4 -kind: BusConfiguration +kind: Queue metadata: - name: bus + name: queue finalizers: - enterprise.splunk.com/delete-pvc spec: - type: sqs_smartbus + provider: sqs sqs: - queueName: sqs-test - authRegion: us-west-2 + name: sqs-test + region: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com - largeMessageStorePath: s3://ingestion/smartbus-test - deadLetterQueueName: sqs-dlq-test + dlq: sqs-dlq-test ``` ``` -$ kubectl apply -f bus.yaml +$ kubectl apply -f queue.yaml ``` ``` -$ kubectl get busconfiguration +$ kubectl get queue NAME PHASE AGE MESSAGE -bus Ready 20s +queue Ready 20s ``` ``` -kubectl describe busconfiguration -Name: bus +kubectl describe queue +Name: queue Namespace: default Labels: Annotations: API Version: enterprise.splunk.com/v4 -Kind: BusConfiguration +Kind: Queue Metadata: Creation Timestamp: 2025-10-27T10:25:53Z Finalizers: @@ -540,13 +587,11 @@ Metadata: UID: 12345678-1234-5678-1234-012345678911 Spec: Sqs: - Auth Region: us-west-2 - Dead Letter Queue Name: sqs-dlq-test + Region: us-west-2 + DLQ: sqs-dlq-test Endpoint: https://sqs.us-west-2.amazonaws.com - Large Message Store Endpoint: https://s3.us-west-2.amazonaws.com - Large Message Store Path: s3://ingestion/smartbus-test - Queue Name: sqs-test - Type: sqs_smartbus + Name: sqs-test + Provider: sqs Status: Message: Phase: Ready @@ -554,7 +599,61 @@ Status: Events: ``` -4. Install IngestorCluster resource. +4. Install ObjectStorage resource. + +``` +$ cat os.yaml +apiVersion: enterprise.splunk.com/v4 +kind: ObjectStorage +metadata: + name: os + finalizers: + - enterprise.splunk.com/delete-pvc +spec: + provider: s3 + s3: + endpoint: https://s3.us-west-2.amazonaws.com + path: s3://ingestion/smartbus-test +``` + +``` +$ kubectl apply -f os.yaml +``` + +``` +$ kubectl get os +NAME PHASE AGE MESSAGE +os Ready 20s +``` + +``` +kubectl describe os +Name: os +Namespace: default +Labels: +Annotations: +API Version: enterprise.splunk.com/v4 +Kind: ObjectStorage +Metadata: + Creation Timestamp: 2025-10-27T10:25:53Z + Finalizers: + enterprise.splunk.com/delete-pvc + Generation: 1 + Resource Version: 12345678 + UID: 12345678-1234-5678-1234-012345678911 +Spec: + S3: + Endpoint: https://s3.us-west-2.amazonaws.com + Path: s3://ingestion/smartbus-test + Provider: s3 +Status: + Message: + Phase: Ready + Resource Rev Map: +Events: +``` + +5. Install IngestorCluster resource. ``` $ cat ingestor.yaml @@ -568,8 +667,10 @@ spec: serviceAccount: ingestor-sa replicas: 3 image: splunk/splunk:${SPLUNK_IMAGE_VERSION} - busConfigurationRef: - name: bus-config + queueRef: + name: queue + objectStorageRef: + name: os ``` ``` @@ -598,10 +699,13 @@ Metadata: Resource Version: 12345678 UID: 12345678-1234-1234-1234-1234567890123 Spec: - Bus Configuration Ref: - Name: bus-config + Queue Ref: + Name: queue Namespace: default Image: splunk/splunk:${SPLUNK_IMAGE_VERSION} + Large Message Store Ref: + Name: os + Namespace: default Replicas: 3 Service Account: ingestor-sa Status: @@ -616,15 +720,18 @@ Status: Is Deployment In Progress: false Last App Info Check Time: 0 Version: 0 - Bus Configuration: + Queue: Sqs: - Auth Region: us-west-2 - Dead Letter Queue Name: sqs-dlq-test - Endpoint: https://sqs.us-west-2.amazonaws.com - Large Message Store Endpoint: https://s3.us-west-2.amazonaws.com - Large Message Store Path: s3://ingestion/smartbus-test - Queue Name: sqs-test - Type: sqs_smartbus + Region: us-west-2 + DLQ: sqs-dlq-test + Endpoint: https://sqs.us-west-2.amazonaws.com + Name: sqs-test + Provider: sqs + Large Message Store: + S3: + Endpoint: https://s3.us-west-2.amazonaws.com + Path: s3://ingestion/smartbus-test + Provider: s3 Message: Phase: Ready Ready Replicas: 3 @@ -678,7 +785,7 @@ remote_queue.sqs_smartbus.send_interval = 5s remote_queue.type = sqs_smartbus ``` -5. Install IndexerCluster resource. +6. Install IndexerCluster resource. ``` $ cat idxc.yaml @@ -704,8 +811,10 @@ spec: clusterManagerRef: name: cm serviceAccount: ingestor-sa - busConfigurationRef: - name: bus-config + queueRef: + name: queue + objectStorageRef: + name: os ``` ``` @@ -777,7 +886,7 @@ disabled = false disabled = true ``` -6. Install Horizontal Pod Autoscaler for IngestorCluster. +7. Install Horizontal Pod Autoscaler for IngestorCluster. ``` $ cat hpa-ing.yaml @@ -860,7 +969,7 @@ NAME REFERENCE TARGETS MINPODS MAXPODS REPLICA ing-hpa IngestorCluster/ingestor cpu: 115%/50% 3 10 10 8m54s ``` -7. Generate fake load. +8. Generate fake load. - HEC_TOKEN: HEC token for making fake calls diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_busconfigurations.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_busconfigurations.yaml deleted file mode 100644 index 2a746968e..000000000 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_busconfigurations.yaml +++ /dev/null @@ -1,40 +0,0 @@ -{{- if .Values.busConfiguration }} -{{- if .Values.busConfiguration.enabled }} -apiVersion: enterprise.splunk.com/v4 -kind: BusConfiguration -metadata: - name: {{ .Values.busConfiguration.name }} - namespace: {{ default .Release.Namespace .Values.busConfiguration.namespaceOverride }} - {{- with .Values.busConfiguration.additionalLabels }} - labels: -{{ toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.busConfiguration.additionalAnnotations }} - annotations: -{{ toYaml . | nindent 4 }} - {{- end }} -spec: - type: {{ .Values.busConfiguration.type | quote }} - {{- with .Values.busConfiguration.sqs }} - sqs: - {{- if .queueName }} - queueName: {{ .queueName | quote }} - {{- end }} - {{- if .authRegion }} - authRegion: {{ .authRegion | quote }} - {{- end }} - {{- if .endpoint }} - endpoint: {{ .endpoint | quote }} - {{- end }} - {{- if .largeMessageStoreEndpoint }} - largeMessageStoreEndpoint: {{ .largeMessageStoreEndpoint | quote }} - {{- end }} - {{- if .largeMessageStorePath }} - largeMessageStorePath: {{ .largeMessageStorePath | quote }} - {{- end }} - {{- if .deadLetterQueueName }} - deadLetterQueueName: {{ .deadLetterQueueName | quote }} - {{- end }} - {{- end }} -{{- end }} -{{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml index 77c24d500..833f162aa 100644 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml @@ -163,8 +163,14 @@ items: {{ toYaml . | indent 6 }} {{- end }} {{- end }} - {{- with $.Values.indexerCluster.busConfigurationRef }} - busConfigurationRef: + {{- with $.Values.indexerCluster.queueRef }} + queueRef: + name: {{ .name }} + {{- if .namespace }} + namespace: {{ .namespace }} + {{- end }} + {{- with $.Values.indexerCluster.objectStorageRef }} + objectStorageRef: name: {{ .name }} {{- if .namespace }} namespace: {{ .namespace }} diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_ingestorcluster.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_ingestorcluster.yaml index fd72da310..e5ab1258c 100644 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_ingestorcluster.yaml +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_ingestorcluster.yaml @@ -95,11 +95,18 @@ spec: topologySpreadConstraints: {{- toYaml . | nindent 4 }} {{- end }} - {{- with $.Values.ingestorCluster.busConfigurationRef }} - busConfigurationRef: - name: {{ $.Values.ingestorCluster.busConfigurationRef.name }} - {{- if $.Values.ingestorCluster.busConfigurationRef.namespace }} - namespace: {{ $.Values.ingestorCluster.busConfigurationRef.namespace }} + {{- with $.Values.ingestorCluster.queueRef }} + queueRef: + name: {{ $.Values.ingestorCluster.queueRef.name }} + {{- if $.Values.ingestorCluster.queueRef.namespace }} + namespace: {{ $.Values.ingestorCluster.queueRef.namespace }} + {{- end }} + {{- end }} + {{- with $.Values.ingestorCluster.objectStorageRef }} + objectStorageRef: + name: {{ $.Values.ingestorCluster.objectStorageRef.name }} + {{- if $.Values.ingestorCluster.objectStorageRef.namespace }} + namespace: {{ $.Values.ingestorCluster.objectStorageRef.namespace }} {{- end }} {{- end }} {{- with .Values.ingestorCluster.extraEnv }} diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_objectstorages.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_objectstorages.yaml new file mode 100644 index 000000000..7cd5bdca0 --- /dev/null +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_objectstorages.yaml @@ -0,0 +1,28 @@ +{{- if .Values.objectStorage.enabled }} +{{- if .Values.objectStorage.enabled }} +apiVersion: enterprise.splunk.com/v4 +kind: ObjectStorage +metadata: + name: {{ .Values.objectStorage.name }} + namespace: {{ default .Release.Namespace .Values.objectStorage.namespaceOverride }} + {{- with .Values.objectStorage.additionalLabels }} + labels: +{{ toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.objectStorage.additionalAnnotations }} + annotations: +{{ toYaml . | nindent 4 }} + {{- end }} +spec: + provider: {{ .Values.objectStorage.provider | quote }} + {{- with .Values.objectStorage.s3 }} + s3: + {{- if .endpoint }} + endpoint: {{ .endpoint | quote }} + {{- end }} + {{- if .path }} + path: {{ .path | quote }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_queues.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_queues.yaml new file mode 100644 index 000000000..b586e45da --- /dev/null +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_queues.yaml @@ -0,0 +1,34 @@ +{{- if .Values.queue }} +{{- if .Values.queue.enabled }} +apiVersion: enterprise.splunk.com/v4 +kind: Queue +metadata: + name: {{ .Values.queue.name }} + namespace: {{ default .Release.Namespace .Values.queue.namespaceOverride }} + {{- with .Values.queue.additionalLabels }} + labels: +{{ toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.queue.additionalAnnotations }} + annotations: +{{ toYaml . | nindent 4 }} + {{- end }} +spec: + provider: {{ .Values.queue.provider | quote }} + {{- with .Values.queue.sqs }} + sqs: + {{- if .endpoint }} + endpoint: {{ .endpoint | quote }} + {{- end }} + {{- if .dlq }} + dlq: {{ .dlq | quote }} + {{- end }} + {{- if .name }} + name: {{ .name | quote }} + {{- end }} + {{- if .region }} + region: {{ .region | quote }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-enterprise/values.yaml b/helm-chart/splunk-enterprise/values.yaml index e49073398..6643728fa 100644 --- a/helm-chart/splunk-enterprise/values.yaml +++ b/helm-chart/splunk-enterprise/values.yaml @@ -350,7 +350,9 @@ indexerCluster: # nodeAffinityPolicy: [Honor|Ignore] # optional; beta since v1.26 # nodeTaintsPolicy: [Honor|Ignore] # optional; beta since v1.26 - busConfigurationRef: {} + queueRef: {} + + objectStorageRef: {} searchHeadCluster: @@ -899,4 +901,6 @@ ingestorCluster: affinity: {} - busConfigurationRef: {} \ No newline at end of file + queueRef: {} + + objectStorageRef: {} \ No newline at end of file diff --git a/helm-chart/splunk-operator/templates/rbac/busconfiguration_editor_role.yaml b/helm-chart/splunk-operator/templates/rbac/queue_editor_role.yaml similarity index 78% rename from helm-chart/splunk-operator/templates/rbac/busconfiguration_editor_role.yaml rename to helm-chart/splunk-operator/templates/rbac/queue_editor_role.yaml index 1475add32..6c04be75b 100644 --- a/helm-chart/splunk-operator/templates/rbac/busconfiguration_editor_role.yaml +++ b/helm-chart/splunk-operator/templates/rbac/queue_editor_role.yaml @@ -8,12 +8,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: {{ include "splunk-operator.operator.fullname" . }}-busconfiguration-editor-role + name: {{ include "splunk-operator.operator.fullname" . }}-queue-editor-role rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations + - queues verbs: - create - delete @@ -25,19 +25,19 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations/status + - queues/status verbs: - get {{- else }} apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: - name: {{ include "splunk-operator.operator.fullname" . }}-busconfiguration-editor-role + name: {{ include "splunk-operator.operator.fullname" . }}-queue-editor-role rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations + - queues verbs: - create - delete @@ -49,7 +49,7 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations/status + - queues/status verbs: - get {{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-operator/templates/rbac/busconfiguration_viewer_role.yaml b/helm-chart/splunk-operator/templates/rbac/queue_viewer_role.yaml similarity index 76% rename from helm-chart/splunk-operator/templates/rbac/busconfiguration_viewer_role.yaml rename to helm-chart/splunk-operator/templates/rbac/queue_viewer_role.yaml index 500b1d100..2c81b98fd 100644 --- a/helm-chart/splunk-operator/templates/rbac/busconfiguration_viewer_role.yaml +++ b/helm-chart/splunk-operator/templates/rbac/queue_viewer_role.yaml @@ -8,12 +8,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: {{ include "splunk-operator.operator.fullname" . }}-busconfiguration-viewer-role + name: {{ include "splunk-operator.operator.fullname" . }}-queue-viewer-role rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations + - queues verbs: - get - list @@ -21,19 +21,19 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations/status + - queues/status verbs: - get {{- else }} apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: - name: {{ include "splunk-operator.operator.fullname" . }}-busconfiguration-viewer-role + name: {{ include "splunk-operator.operator.fullname" . }}-queue-viewer-role rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations + - queues verbs: - get - list @@ -41,7 +41,7 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations/status + - queues/status verbs: - get {{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-operator/templates/rbac/role.yaml b/helm-chart/splunk-operator/templates/rbac/role.yaml index 4eab5275e..77be54727 100644 --- a/helm-chart/splunk-operator/templates/rbac/role.yaml +++ b/helm-chart/splunk-operator/templates/rbac/role.yaml @@ -251,7 +251,7 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations + - queues verbs: - create - delete @@ -263,13 +263,39 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations/finalizers + - queues/finalizers verbs: - update - apiGroups: - enterprise.splunk.com resources: - - busconfigurations/status + - queues/status + verbs: + - get + - patch + - update +- apiGroups: + - enterprise.splunk.com + resources: + - objectstorages + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - objectstorages/finalizers + verbs: + - update +- apiGroups: + - enterprise.splunk.com + resources: + - objectstorages/status verbs: - get - patch diff --git a/internal/controller/indexercluster_controller.go b/internal/controller/indexercluster_controller.go index 3cc840baa..7efb6e1b8 100644 --- a/internal/controller/indexercluster_controller.go +++ b/internal/controller/indexercluster_controller.go @@ -172,9 +172,9 @@ func (r *IndexerClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { mgr.GetRESTMapper(), &enterpriseApi.IndexerCluster{}, )). - Watches(&enterpriseApi.BusConfiguration{}, + Watches(&enterpriseApi.Queue{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { - bc, ok := obj.(*enterpriseApi.BusConfiguration) + b, ok := obj.(*enterpriseApi.Queue) if !ok { return nil } @@ -184,11 +184,39 @@ func (r *IndexerClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { } var reqs []reconcile.Request for _, ic := range list.Items { - ns := ic.Spec.BusConfigurationRef.Namespace + ns := ic.Spec.QueueRef.Namespace if ns == "" { ns = ic.Namespace } - if ic.Spec.BusConfigurationRef.Name == bc.Name && ns == bc.Namespace { + if ic.Spec.QueueRef.Name == b.Name && ns == b.Namespace { + reqs = append(reqs, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: ic.Name, + Namespace: ic.Namespace, + }, + }) + } + } + return reqs + }), + ). + Watches(&enterpriseApi.ObjectStorage{}, + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { + os, ok := obj.(*enterpriseApi.ObjectStorage) + if !ok { + return nil + } + var list enterpriseApi.IndexerClusterList + if err := r.Client.List(ctx, &list); err != nil { + return nil + } + var reqs []reconcile.Request + for _, ic := range list.Items { + ns := ic.Spec.ObjectStorageRef.Namespace + if ns == "" { + ns = ic.Namespace + } + if ic.Spec.ObjectStorageRef.Name == os.Name && ns == os.Namespace { reqs = append(reqs, reconcile.Request{ NamespacedName: types.NamespacedName{ Name: ic.Name, diff --git a/internal/controller/ingestorcluster_controller.go b/internal/controller/ingestorcluster_controller.go index a2c5846df..0d8117bd2 100644 --- a/internal/controller/ingestorcluster_controller.go +++ b/internal/controller/ingestorcluster_controller.go @@ -141,9 +141,9 @@ func (r *IngestorClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { mgr.GetRESTMapper(), &enterpriseApi.IngestorCluster{}, )). - Watches(&enterpriseApi.BusConfiguration{}, + Watches(&enterpriseApi.Queue{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { - bc, ok := obj.(*enterpriseApi.BusConfiguration) + queue, ok := obj.(*enterpriseApi.Queue) if !ok { return nil } @@ -153,11 +153,39 @@ func (r *IngestorClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { } var reqs []reconcile.Request for _, ic := range list.Items { - ns := ic.Spec.BusConfigurationRef.Namespace + ns := ic.Spec.QueueRef.Namespace if ns == "" { ns = ic.Namespace } - if ic.Spec.BusConfigurationRef.Name == bc.Name && ns == bc.Namespace { + if ic.Spec.QueueRef.Name == queue.Name && ns == queue.Namespace { + reqs = append(reqs, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: ic.Name, + Namespace: ic.Namespace, + }, + }) + } + } + return reqs + }), + ). + Watches(&enterpriseApi.ObjectStorage{}, + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { + os, ok := obj.(*enterpriseApi.ObjectStorage) + if !ok { + return nil + } + var list enterpriseApi.IngestorClusterList + if err := r.Client.List(ctx, &list); err != nil { + return nil + } + var reqs []reconcile.Request + for _, ic := range list.Items { + ns := ic.Spec.ObjectStorageRef.Namespace + if ns == "" { + ns = ic.Namespace + } + if ic.Spec.ObjectStorageRef.Name == os.Name && ns == os.Namespace { reqs = append(reqs, reconcile.Request{ NamespacedName: types.NamespacedName{ Name: ic.Name, diff --git a/internal/controller/ingestorcluster_controller_test.go b/internal/controller/ingestorcluster_controller_test.go index 5e7ae4b73..38e7cbb4e 100644 --- a/internal/controller/ingestorcluster_controller_test.go +++ b/internal/controller/ingestorcluster_controller_test.go @@ -71,7 +71,35 @@ var _ = Describe("IngestorCluster Controller", func() { Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - CreateIngestorCluster("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady) + queue := &enterpriseApi.Queue{ + ObjectMeta: metav1.ObjectMeta{ + Name: "queue", + Namespace: nsSpecs.Name, + }, + Spec: enterpriseApi.QueueSpec{ + Provider: "sqs", + SQS: enterpriseApi.SQSSpec{ + Name: "smartbus-queue", + AuthRegion: "us-west-2", + DLQ: "smartbus-dlq", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + }, + }, + } + os := &enterpriseApi.ObjectStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: "os", + Namespace: nsSpecs.Name, + }, + Spec: enterpriseApi.ObjectStorageSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "s3://ingestion/smartbus-test", + }, + }, + } + CreateIngestorCluster("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, os, queue) icSpec, _ := GetIngestorCluster("test", nsSpecs.Name) annotations = map[string]string{} icSpec.Annotations = annotations @@ -91,7 +119,35 @@ var _ = Describe("IngestorCluster Controller", func() { Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) annotations := make(map[string]string) - CreateIngestorCluster("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady) + queue := &enterpriseApi.Queue{ + ObjectMeta: metav1.ObjectMeta{ + Name: "queue", + Namespace: nsSpecs.Name, + }, + Spec: enterpriseApi.QueueSpec{ + Provider: "sqs", + SQS: enterpriseApi.SQSSpec{ + Name: "smartbus-queue", + AuthRegion: "us-west-2", + DLQ: "smartbus-dlq", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + }, + }, + } + os := &enterpriseApi.ObjectStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: "os", + Namespace: nsSpecs.Name, + }, + Spec: enterpriseApi.ObjectStorageSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "s3://ingestion/smartbus-test", + }, + }, + } + CreateIngestorCluster("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, os, queue) DeleteIngestorCluster("test", nsSpecs.Name) Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) }) @@ -164,7 +220,7 @@ func GetIngestorCluster(name string, namespace string) (*enterpriseApi.IngestorC return ic, err } -func CreateIngestorCluster(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase) *enterpriseApi.IngestorCluster { +func CreateIngestorCluster(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase, os *enterpriseApi.ObjectStorage, queue *enterpriseApi.Queue) *enterpriseApi.IngestorCluster { By("Expecting IngestorCluster custom resource to be created successfully") key := types.NamespacedName{ @@ -184,8 +240,13 @@ func CreateIngestorCluster(name string, namespace string, annotations map[string }, }, Replicas: 3, - BusConfigurationRef: corev1.ObjectReference{ - Name: "busConfig", + QueueRef: corev1.ObjectReference{ + Name: queue.Name, + Namespace: queue.Namespace, + }, + ObjectStorageRef: corev1.ObjectReference{ + Name: os.Name, + Namespace: os.Namespace, }, }, } diff --git a/internal/controller/busconfiguration_controller.go b/internal/controller/objectstorage_controller.go similarity index 69% rename from internal/controller/busconfiguration_controller.go rename to internal/controller/objectstorage_controller.go index c8519c017..4ae36b1a2 100644 --- a/internal/controller/busconfiguration_controller.go +++ b/internal/controller/objectstorage_controller.go @@ -36,34 +36,34 @@ import ( enterprise "github.com/splunk/splunk-operator/pkg/splunk/enterprise" ) -// BusConfigurationReconciler reconciles a BusConfiguration object -type BusConfigurationReconciler struct { +// ObjectStorageReconciler reconciles a ObjectStorage object +type ObjectStorageReconciler struct { client.Client Scheme *runtime.Scheme } -// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=busconfigurations,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=busconfigurations/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=busconfigurations/finalizers,verbs=update +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=objectstorages,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=objectstorages/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=objectstorages/finalizers,verbs=update // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. // TODO(user): Modify the Reconcile function to compare the state specified by -// the BusConfiguration object against the actual cluster state, and then +// the ObjectStorage object against the actual cluster state, and then // perform operations to make the cluster state reflect the state specified by // the user. // // For more details, check Reconcile and its Result here: // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.22.1/pkg/reconcile -func (r *BusConfigurationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - metrics.ReconcileCounters.With(metrics.GetPrometheusLabels(req, "BusConfiguration")).Inc() - defer recordInstrumentionData(time.Now(), req, "controller", "BusConfiguration") +func (r *ObjectStorageReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + metrics.ReconcileCounters.With(metrics.GetPrometheusLabels(req, "ObjectStorage")).Inc() + defer recordInstrumentionData(time.Now(), req, "controller", "ObjectStorage") reqLogger := log.FromContext(ctx) - reqLogger = reqLogger.WithValues("busconfiguration", req.NamespacedName) + reqLogger = reqLogger.WithValues("objectstorage", req.NamespacedName) - // Fetch the BusConfiguration - instance := &enterpriseApi.BusConfiguration{} + // Fetch the ObjectStorage + instance := &enterpriseApi.ObjectStorage{} err := r.Get(ctx, req.NamespacedName, instance) if err != nil { if k8serrors.IsNotFound(err) { @@ -74,20 +74,20 @@ func (r *BusConfigurationReconciler) Reconcile(ctx context.Context, req ctrl.Req return ctrl.Result{}, nil } // Error reading the object - requeue the request. - return ctrl.Result{}, errors.Wrap(err, "could not load bus configuration data") + return ctrl.Result{}, errors.Wrap(err, "could not load objectstorage data") } // If the reconciliation is paused, requeue annotations := instance.GetAnnotations() if annotations != nil { - if _, ok := annotations[enterpriseApi.BusConfigurationPausedAnnotation]; ok { + if _, ok := annotations[enterpriseApi.ObjectStoragePausedAnnotation]; ok { return ctrl.Result{Requeue: true, RequeueAfter: pauseRetryDelay}, nil } } reqLogger.Info("start", "CR version", instance.GetResourceVersion()) - result, err := ApplyBusConfiguration(ctx, r.Client, instance) + result, err := ApplyObjectStorage(ctx, r.Client, instance) if result.Requeue && result.RequeueAfter != 0 { reqLogger.Info("Requeued", "period(seconds)", int(result.RequeueAfter/time.Second)) } @@ -95,14 +95,14 @@ func (r *BusConfigurationReconciler) Reconcile(ctx context.Context, req ctrl.Req return result, err } -var ApplyBusConfiguration = func(ctx context.Context, client client.Client, instance *enterpriseApi.BusConfiguration) (reconcile.Result, error) { - return enterprise.ApplyBusConfiguration(ctx, client, instance) +var ApplyObjectStorage = func(ctx context.Context, client client.Client, instance *enterpriseApi.ObjectStorage) (reconcile.Result, error) { + return enterprise.ApplyObjectStorage(ctx, client, instance) } // SetupWithManager sets up the controller with the Manager. -func (r *BusConfigurationReconciler) SetupWithManager(mgr ctrl.Manager) error { +func (r *ObjectStorageReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&enterpriseApi.BusConfiguration{}). + For(&enterpriseApi.ObjectStorage{}). WithEventFilter(predicate.Or( common.GenerationChangedPredicate(), common.AnnotationChangedPredicate(), diff --git a/internal/controller/objectstorage_controller_test.go b/internal/controller/objectstorage_controller_test.go new file mode 100644 index 000000000..6d7dec87a --- /dev/null +++ b/internal/controller/objectstorage_controller_test.go @@ -0,0 +1,260 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "fmt" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + "github.com/splunk/splunk-operator/internal/controller/testutils" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var _ = Describe("ObjectStorage Controller", func() { + BeforeEach(func() { + time.Sleep(2 * time.Second) + }) + + AfterEach(func() { + + }) + + Context("ObjectStorage Management", func() { + + It("Get ObjectStorage custom resource should fail", func() { + namespace := "ns-splunk-objectstorage-1" + ApplyObjectStorage = func(ctx context.Context, client client.Client, instance *enterpriseApi.ObjectStorage) (reconcile.Result, error) { + return reconcile.Result{}, nil + } + nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} + + Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) + + _, err := GetObjectStorage("test", nsSpecs.Name) + Expect(err.Error()).Should(Equal("objectstorages.enterprise.splunk.com \"test\" not found")) + Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) + }) + + It("Create ObjectStorage custom resource with annotations should pause", func() { + namespace := "ns-splunk-objectstorage-2" + annotations := make(map[string]string) + annotations[enterpriseApi.ObjectStoragePausedAnnotation] = "" + ApplyObjectStorage = func(ctx context.Context, client client.Client, instance *enterpriseApi.ObjectStorage) (reconcile.Result, error) { + return reconcile.Result{}, nil + } + nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} + + Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) + + spec := enterpriseApi.ObjectStorageSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "s3://ingestion/smartbus-test", + }, + } + CreateObjectStorage("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) + osSpec, _ := GetObjectStorage("test", nsSpecs.Name) + annotations = map[string]string{} + osSpec.Annotations = annotations + osSpec.Status.Phase = "Ready" + UpdateObjectStorage(osSpec, enterpriseApi.PhaseReady, spec) + DeleteObjectStorage("test", nsSpecs.Name) + Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) + }) + + It("Create ObjectStorage custom resource should succeeded", func() { + namespace := "ns-splunk-objectstorage-3" + ApplyObjectStorage = func(ctx context.Context, client client.Client, instance *enterpriseApi.ObjectStorage) (reconcile.Result, error) { + return reconcile.Result{}, nil + } + nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} + + Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) + + annotations := make(map[string]string) + spec := enterpriseApi.ObjectStorageSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "s3://ingestion/smartbus-test", + }, + } + CreateObjectStorage("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) + DeleteObjectStorage("test", nsSpecs.Name) + Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) + }) + + It("Cover Unused methods", func() { + namespace := "ns-splunk-objectstorage-4" + ApplyObjectStorage = func(ctx context.Context, client client.Client, instance *enterpriseApi.ObjectStorage) (reconcile.Result, error) { + return reconcile.Result{}, nil + } + nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} + + Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) + + ctx := context.TODO() + builder := fake.NewClientBuilder() + c := builder.Build() + instance := ObjectStorageReconciler{ + Client: c, + Scheme: scheme.Scheme, + } + request := reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "test", + Namespace: namespace, + }, + } + _, err := instance.Reconcile(ctx, request) + Expect(err).ToNot(HaveOccurred()) + + spec := enterpriseApi.ObjectStorageSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "s3://ingestion/smartbus-test", + }, + } + osSpec := testutils.NewObjectStorage("test", namespace, spec) + Expect(c.Create(ctx, osSpec)).Should(Succeed()) + + annotations := make(map[string]string) + annotations[enterpriseApi.ObjectStoragePausedAnnotation] = "" + osSpec.Annotations = annotations + Expect(c.Update(ctx, osSpec)).Should(Succeed()) + + _, err = instance.Reconcile(ctx, request) + Expect(err).ToNot(HaveOccurred()) + + annotations = map[string]string{} + osSpec.Annotations = annotations + Expect(c.Update(ctx, osSpec)).Should(Succeed()) + + _, err = instance.Reconcile(ctx, request) + Expect(err).ToNot(HaveOccurred()) + + osSpec.DeletionTimestamp = &metav1.Time{} + _, err = instance.Reconcile(ctx, request) + Expect(err).ToNot(HaveOccurred()) + }) + + }) +}) + +func GetObjectStorage(name string, namespace string) (*enterpriseApi.ObjectStorage, error) { + By("Expecting ObjectStorage custom resource to be retrieved successfully") + + key := types.NamespacedName{ + Name: name, + Namespace: namespace, + } + os := &enterpriseApi.ObjectStorage{} + + err := k8sClient.Get(context.Background(), key, os) + if err != nil { + return nil, err + } + + return os, err +} + +func CreateObjectStorage(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase, spec enterpriseApi.ObjectStorageSpec) *enterpriseApi.ObjectStorage { + By("Expecting ObjectStorage custom resource to be created successfully") + key := types.NamespacedName{ + Name: name, + Namespace: namespace, + } + osSpec := &enterpriseApi.ObjectStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: annotations, + }, + Spec: spec, + } + + Expect(k8sClient.Create(context.Background(), osSpec)).Should(Succeed()) + time.Sleep(2 * time.Second) + + os := &enterpriseApi.ObjectStorage{} + Eventually(func() bool { + _ = k8sClient.Get(context.Background(), key, os) + if status != "" { + fmt.Printf("status is set to %v", status) + os.Status.Phase = status + Expect(k8sClient.Status().Update(context.Background(), os)).Should(Succeed()) + time.Sleep(2 * time.Second) + } + return true + }, timeout, interval).Should(BeTrue()) + + return os +} + +func UpdateObjectStorage(instance *enterpriseApi.ObjectStorage, status enterpriseApi.Phase, spec enterpriseApi.ObjectStorageSpec) *enterpriseApi.ObjectStorage { + By("Expecting ObjectStorage custom resource to be updated successfully") + key := types.NamespacedName{ + Name: instance.Name, + Namespace: instance.Namespace, + } + + osSpec := testutils.NewObjectStorage(instance.Name, instance.Namespace, spec) + osSpec.ResourceVersion = instance.ResourceVersion + Expect(k8sClient.Update(context.Background(), osSpec)).Should(Succeed()) + time.Sleep(2 * time.Second) + + os := &enterpriseApi.ObjectStorage{} + Eventually(func() bool { + _ = k8sClient.Get(context.Background(), key, os) + if status != "" { + fmt.Printf("status is set to %v", status) + os.Status.Phase = status + Expect(k8sClient.Status().Update(context.Background(), os)).Should(Succeed()) + time.Sleep(2 * time.Second) + } + return true + }, timeout, interval).Should(BeTrue()) + + return os +} + +func DeleteObjectStorage(name string, namespace string) { + By("Expecting ObjectStorage custom resource to be deleted successfully") + key := types.NamespacedName{ + Name: name, + Namespace: namespace, + } + + Eventually(func() error { + os := &enterpriseApi.ObjectStorage{} + _ = k8sClient.Get(context.Background(), key, os) + err := k8sClient.Delete(context.Background(), os) + return err + }, timeout, interval).Should(Succeed()) +} diff --git a/internal/controller/queue_controller.go b/internal/controller/queue_controller.go new file mode 100644 index 000000000..6fff662b9 --- /dev/null +++ b/internal/controller/queue_controller.go @@ -0,0 +1,120 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "time" + + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/pkg/errors" + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + "github.com/splunk/splunk-operator/internal/controller/common" + metrics "github.com/splunk/splunk-operator/pkg/splunk/client/metrics" + enterprise "github.com/splunk/splunk-operator/pkg/splunk/enterprise" +) + +// QueueReconciler reconciles a Queue object +type QueueReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=queues,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=queues/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=queues/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the Queue object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.22.1/pkg/reconcile +func (r *QueueReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + metrics.ReconcileCounters.With(metrics.GetPrometheusLabels(req, "Queue")).Inc() + defer recordInstrumentionData(time.Now(), req, "controller", "Queue") + + reqLogger := log.FromContext(ctx) + reqLogger = reqLogger.WithValues("queue", req.NamespacedName) + + // Fetch the Queue + instance := &enterpriseApi.Queue{} + err := r.Get(ctx, req.NamespacedName, instance) + if err != nil { + if k8serrors.IsNotFound(err) { + // Request object not found, could have been deleted after + // reconcile request. Owned objects are automatically + // garbage collected. For additional cleanup logic use + // finalizers. Return and don't requeue + return ctrl.Result{}, nil + } + // Error reading the object - requeue the request. + return ctrl.Result{}, errors.Wrap(err, "could not load queue data") + } + + // If the reconciliation is paused, requeue + annotations := instance.GetAnnotations() + if annotations != nil { + if _, ok := annotations[enterpriseApi.QueuePausedAnnotation]; ok { + return ctrl.Result{Requeue: true, RequeueAfter: pauseRetryDelay}, nil + } + } + + reqLogger.Info("start", "CR version", instance.GetResourceVersion()) + + result, err := ApplyQueue(ctx, r.Client, instance) + if result.Requeue && result.RequeueAfter != 0 { + reqLogger.Info("Requeued", "period(seconds)", int(result.RequeueAfter/time.Second)) + } + + return result, err +} + +var ApplyQueue = func(ctx context.Context, client client.Client, instance *enterpriseApi.Queue) (reconcile.Result, error) { + return enterprise.ApplyQueue(ctx, client, instance) +} + +// SetupWithManager sets up the controller with the Manager. +func (r *QueueReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&enterpriseApi.Queue{}). + WithEventFilter(predicate.Or( + common.GenerationChangedPredicate(), + common.AnnotationChangedPredicate(), + common.LabelChangedPredicate(), + common.SecretChangedPredicate(), + common.ConfigMapChangedPredicate(), + common.StatefulsetChangedPredicate(), + common.PodChangedPredicate(), + common.CrdChangedPredicate(), + )). + WithOptions(controller.Options{ + MaxConcurrentReconciles: enterpriseApi.TotalWorker, + }). + Complete(r) +} diff --git a/internal/controller/busconfiguration_controller_test.go b/internal/controller/queue_controller_test.go similarity index 53% rename from internal/controller/busconfiguration_controller_test.go rename to internal/controller/queue_controller_test.go index e08154211..b04a5d4b3 100644 --- a/internal/controller/busconfiguration_controller_test.go +++ b/internal/controller/queue_controller_test.go @@ -34,7 +34,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" ) -var _ = Describe("BusConfiguration Controller", func() { +var _ = Describe("Queue Controller", func() { BeforeEach(func() { time.Sleep(2 * time.Second) }) @@ -43,47 +43,55 @@ var _ = Describe("BusConfiguration Controller", func() { }) - Context("BusConfiguration Management", func() { + Context("Queue Management", func() { - It("Get BusConfiguration custom resource should fail", func() { - namespace := "ns-splunk-bus-1" - ApplyBusConfiguration = func(ctx context.Context, client client.Client, instance *enterpriseApi.BusConfiguration) (reconcile.Result, error) { + It("Get Queue custom resource should fail", func() { + namespace := "ns-splunk-queue-1" + ApplyQueue = func(ctx context.Context, client client.Client, instance *enterpriseApi.Queue) (reconcile.Result, error) { return reconcile.Result{}, nil } nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - _, err := GetBusConfiguration("test", nsSpecs.Name) - Expect(err.Error()).Should(Equal("busconfigurations.enterprise.splunk.com \"test\" not found")) - + _, err := GetQueue("test", nsSpecs.Name) + Expect(err.Error()).Should(Equal("queues.enterprise.splunk.com \"test\" not found")) Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) }) - It("Create BusConfiguration custom resource with annotations should pause", func() { - namespace := "ns-splunk-bus-2" + It("Create Queue custom resource with annotations should pause", func() { + namespace := "ns-splunk-queue-2" annotations := make(map[string]string) - annotations[enterpriseApi.BusConfigurationPausedAnnotation] = "" - ApplyBusConfiguration = func(ctx context.Context, client client.Client, instance *enterpriseApi.BusConfiguration) (reconcile.Result, error) { + annotations[enterpriseApi.QueuePausedAnnotation] = "" + ApplyQueue = func(ctx context.Context, client client.Client, instance *enterpriseApi.Queue) (reconcile.Result, error) { return reconcile.Result{}, nil } nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - CreateBusConfiguration("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady) - icSpec, _ := GetBusConfiguration("test", nsSpecs.Name) + spec := enterpriseApi.QueueSpec{ + Provider: "sqs", + SQS: enterpriseApi.SQSSpec{ + Name: "smartbus-queue", + AuthRegion: "us-west-2", + DLQ: "smartbus-dlq", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + }, + } + CreateQueue("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) + icSpec, _ := GetQueue("test", nsSpecs.Name) annotations = map[string]string{} icSpec.Annotations = annotations icSpec.Status.Phase = "Ready" - UpdateBusConfiguration(icSpec, enterpriseApi.PhaseReady) - DeleteBusConfiguration("test", nsSpecs.Name) + UpdateQueue(icSpec, enterpriseApi.PhaseReady, spec) + DeleteQueue("test", nsSpecs.Name) Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) }) - It("Create BusConfiguration custom resource should succeeded", func() { - namespace := "ns-splunk-bus-3" - ApplyBusConfiguration = func(ctx context.Context, client client.Client, instance *enterpriseApi.BusConfiguration) (reconcile.Result, error) { + It("Create Queue custom resource should succeeded", func() { + namespace := "ns-splunk-queue-3" + ApplyQueue = func(ctx context.Context, client client.Client, instance *enterpriseApi.Queue) (reconcile.Result, error) { return reconcile.Result{}, nil } nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} @@ -91,14 +99,23 @@ var _ = Describe("BusConfiguration Controller", func() { Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) annotations := make(map[string]string) - CreateBusConfiguration("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady) - DeleteBusConfiguration("test", nsSpecs.Name) + spec := enterpriseApi.QueueSpec{ + Provider: "sqs", + SQS: enterpriseApi.SQSSpec{ + Name: "smartbus-queue", + AuthRegion: "us-west-2", + DLQ: "smartbus-dlq", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + }, + } + CreateQueue("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) + DeleteQueue("test", nsSpecs.Name) Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) }) It("Cover Unused methods", func() { - namespace := "ns-splunk-bus-4" - ApplyBusConfiguration = func(ctx context.Context, client client.Client, instance *enterpriseApi.BusConfiguration) (reconcile.Result, error) { + namespace := "ns-splunk-queue-4" + ApplyQueue = func(ctx context.Context, client client.Client, instance *enterpriseApi.Queue) (reconcile.Result, error) { return reconcile.Result{}, nil } nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} @@ -108,7 +125,7 @@ var _ = Describe("BusConfiguration Controller", func() { ctx := context.TODO() builder := fake.NewClientBuilder() c := builder.Build() - instance := BusConfigurationReconciler{ + instance := QueueReconciler{ Client: c, Scheme: scheme.Scheme, } @@ -121,11 +138,20 @@ var _ = Describe("BusConfiguration Controller", func() { _, err := instance.Reconcile(ctx, request) Expect(err).ToNot(HaveOccurred()) - bcSpec := testutils.NewBusConfiguration("test", namespace, "image") + spec := enterpriseApi.QueueSpec{ + Provider: "sqs", + SQS: enterpriseApi.SQSSpec{ + Name: "smartbus-queue", + AuthRegion: "us-west-2", + DLQ: "smartbus-dlq", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + }, + } + bcSpec := testutils.NewQueue("test", namespace, spec) Expect(c.Create(ctx, bcSpec)).Should(Succeed()) annotations := make(map[string]string) - annotations[enterpriseApi.BusConfigurationPausedAnnotation] = "" + annotations[enterpriseApi.QueuePausedAnnotation] = "" bcSpec.Annotations = annotations Expect(c.Update(ctx, bcSpec)).Should(Succeed()) @@ -147,86 +173,87 @@ var _ = Describe("BusConfiguration Controller", func() { }) }) -func GetBusConfiguration(name string, namespace string) (*enterpriseApi.BusConfiguration, error) { - By("Expecting BusConfiguration custom resource to be retrieved successfully") +func GetQueue(name string, namespace string) (*enterpriseApi.Queue, error) { + By("Expecting Queue custom resource to be retrieved successfully") key := types.NamespacedName{ Name: name, Namespace: namespace, } - bc := &enterpriseApi.BusConfiguration{} + b := &enterpriseApi.Queue{} - err := k8sClient.Get(context.Background(), key, bc) + err := k8sClient.Get(context.Background(), key, b) if err != nil { return nil, err } - return bc, err + return b, err } -func CreateBusConfiguration(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase) *enterpriseApi.BusConfiguration { - By("Expecting BusConfiguration custom resource to be created successfully") +func CreateQueue(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase, spec enterpriseApi.QueueSpec) *enterpriseApi.Queue { + By("Expecting Queue custom resource to be created successfully") key := types.NamespacedName{ Name: name, Namespace: namespace, } - ingSpec := &enterpriseApi.BusConfiguration{ + ingSpec := &enterpriseApi.Queue{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, Annotations: annotations, }, + Spec: spec, } Expect(k8sClient.Create(context.Background(), ingSpec)).Should(Succeed()) time.Sleep(2 * time.Second) - bc := &enterpriseApi.BusConfiguration{} + b := &enterpriseApi.Queue{} Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, bc) + _ = k8sClient.Get(context.Background(), key, b) if status != "" { fmt.Printf("status is set to %v", status) - bc.Status.Phase = status - Expect(k8sClient.Status().Update(context.Background(), bc)).Should(Succeed()) + b.Status.Phase = status + Expect(k8sClient.Status().Update(context.Background(), b)).Should(Succeed()) time.Sleep(2 * time.Second) } return true }, timeout, interval).Should(BeTrue()) - return bc + return b } -func UpdateBusConfiguration(instance *enterpriseApi.BusConfiguration, status enterpriseApi.Phase) *enterpriseApi.BusConfiguration { - By("Expecting BusConfiguration custom resource to be updated successfully") +func UpdateQueue(instance *enterpriseApi.Queue, status enterpriseApi.Phase, spec enterpriseApi.QueueSpec) *enterpriseApi.Queue { + By("Expecting Queue custom resource to be updated successfully") key := types.NamespacedName{ Name: instance.Name, Namespace: instance.Namespace, } - bcSpec := testutils.NewBusConfiguration(instance.Name, instance.Namespace, "image") - bcSpec.ResourceVersion = instance.ResourceVersion - Expect(k8sClient.Update(context.Background(), bcSpec)).Should(Succeed()) + bSpec := testutils.NewQueue(instance.Name, instance.Namespace, spec) + bSpec.ResourceVersion = instance.ResourceVersion + Expect(k8sClient.Update(context.Background(), bSpec)).Should(Succeed()) time.Sleep(2 * time.Second) - bc := &enterpriseApi.BusConfiguration{} + b := &enterpriseApi.Queue{} Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, bc) + _ = k8sClient.Get(context.Background(), key, b) if status != "" { fmt.Printf("status is set to %v", status) - bc.Status.Phase = status - Expect(k8sClient.Status().Update(context.Background(), bc)).Should(Succeed()) + b.Status.Phase = status + Expect(k8sClient.Status().Update(context.Background(), b)).Should(Succeed()) time.Sleep(2 * time.Second) } return true }, timeout, interval).Should(BeTrue()) - return bc + return b } -func DeleteBusConfiguration(name string, namespace string) { - By("Expecting BusConfiguration custom resource to be deleted successfully") +func DeleteQueue(name string, namespace string) { + By("Expecting Queue custom resource to be deleted successfully") key := types.NamespacedName{ Name: name, @@ -234,9 +261,9 @@ func DeleteBusConfiguration(name string, namespace string) { } Eventually(func() error { - bc := &enterpriseApi.BusConfiguration{} - _ = k8sClient.Get(context.Background(), key, bc) - err := k8sClient.Delete(context.Background(), bc) + b := &enterpriseApi.Queue{} + _ = k8sClient.Get(context.Background(), key, b) + err := k8sClient.Delete(context.Background(), b) return err }, timeout, interval).Should(Succeed()) } diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index 52c4c1a1d..8454d15b5 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -50,7 +50,6 @@ func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Controller Suite") - } var _ = BeforeSuite(func(ctx context.Context) { @@ -99,6 +98,12 @@ var _ = BeforeSuite(func(ctx context.Context) { Scheme: clientgoscheme.Scheme, }) Expect(err).ToNot(HaveOccurred()) + if err := (&QueueReconciler{ + Client: k8sManager.GetClient(), + Scheme: k8sManager.GetScheme(), + }).SetupWithManager(k8sManager); err != nil { + Expect(err).NotTo(HaveOccurred()) + } if err := (&ClusterManagerReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), @@ -117,37 +122,43 @@ var _ = BeforeSuite(func(ctx context.Context) { }).SetupWithManager(k8sManager); err != nil { Expect(err).NotTo(HaveOccurred()) } - if err := (&LicenseManagerReconciler{ + if err := (&IngestorClusterReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), }).SetupWithManager(k8sManager); err != nil { Expect(err).NotTo(HaveOccurred()) } - if err := (&LicenseMasterReconciler{ + if err := (&ObjectStorageReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), }).SetupWithManager(k8sManager); err != nil { Expect(err).NotTo(HaveOccurred()) } - if err := (&MonitoringConsoleReconciler{ + if err := (&LicenseManagerReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), }).SetupWithManager(k8sManager); err != nil { Expect(err).NotTo(HaveOccurred()) } - if err := (&SearchHeadClusterReconciler{ + if err := (&LicenseMasterReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), }).SetupWithManager(k8sManager); err != nil { Expect(err).NotTo(HaveOccurred()) } - if err := (&StandaloneReconciler{ + if err := (&MonitoringConsoleReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), }).SetupWithManager(k8sManager); err != nil { Expect(err).NotTo(HaveOccurred()) } - if err := (&IngestorClusterReconciler{ + if err := (&SearchHeadClusterReconciler{ + Client: k8sManager.GetClient(), + Scheme: k8sManager.GetScheme(), + }).SetupWithManager(k8sManager); err != nil { + Expect(err).NotTo(HaveOccurred()) + } + if err := (&StandaloneReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), }).SetupWithManager(k8sManager); err != nil { diff --git a/internal/controller/testutils/new.go b/internal/controller/testutils/new.go index 9ca78593c..aa47e8092 100644 --- a/internal/controller/testutils/new.go +++ b/internal/controller/testutils/new.go @@ -54,28 +54,26 @@ func NewIngestorCluster(name, ns, image string) *enterpriseApi.IngestorCluster { Spec: enterpriseApi.Spec{ImagePullPolicy: string(pullPolicy)}, }, Replicas: 3, - BusConfigurationRef: corev1.ObjectReference{ - Name: "busConfig", + QueueRef: corev1.ObjectReference{ + Name: "queue", }, }, } } -// NewBusConfiguration returns new BusConfiguration instance with its config hash -func NewBusConfiguration(name, ns, image string) *enterpriseApi.BusConfiguration { - return &enterpriseApi.BusConfiguration{ +// NewQueue returns new Queue instance with its config hash +func NewQueue(name, ns string, spec enterpriseApi.QueueSpec) *enterpriseApi.Queue { + return &enterpriseApi.Queue{ ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: ns}, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", - SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", - }, - }, + Spec: spec, + } +} + +// NewObjectStorage returns new ObjectStorage instance with its config hash +func NewObjectStorage(name, ns string, spec enterpriseApi.ObjectStorageSpec) *enterpriseApi.ObjectStorage { + return &enterpriseApi.ObjectStorage{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: ns}, + Spec: spec, } } @@ -313,9 +311,6 @@ func NewIndexerCluster(name, ns, image string) *enterpriseApi.IndexerCluster { ad.Spec = enterpriseApi.IndexerClusterSpec{ CommonSplunkSpec: *cs, - BusConfigurationRef: corev1.ObjectReference{ - Name: "busConfig", - }, } return ad } diff --git a/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml index 5ac9b4a7a..41f4ea2aa 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml @@ -1,18 +1,30 @@ --- -# assert for bus configurtion custom resource to be ready +# assert for queue custom resource to be ready apiVersion: enterprise.splunk.com/v4 -kind: BusConfiguration +kind: Queue metadata: - name: bus-config + name: queue spec: - type: sqs_smartbus + provider: sqs sqs: - queueName: sqs-test - authRegion: us-west-2 + name: sqs-test + region: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com - largeMessageStorePath: s3://ingestion/smartbus-test - deadLetterQueueName: sqs-dlq-test + dlq: sqs-dlq-test +status: + phase: Ready + +--- +# assert for large message store custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: ObjectStorage +metadata: + name: os +spec: + provider: s3 + s3: + endpoint: https://s3.us-west-2.amazonaws.com + path: s3://ingestion/smartbus-test status: phase: Ready @@ -49,20 +61,23 @@ metadata: name: indexer spec: replicas: 3 - busConfigurationRef: - name: bus-config + queueRef: + name: queue status: phase: Ready - busConfiguration: - type: sqs_smartbus + queue: + provider: sqs sqs: - queueName: sqs-test - authRegion: us-west-2 + name: sqs-test + region: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com - largeMessageStorePath: s3://ingestion/smartbus-test - deadLetterQueueName: sqs-dlq-test - + dlq: sqs-dlq-test + objectStorage: + provider: s3 + s3: + endpoint: https://s3.us-west-2.amazonaws.com + path: s3://ingestion/smartbus-test + --- # check for stateful set and replicas as configured apiVersion: apps/v1 @@ -87,19 +102,22 @@ metadata: name: ingestor spec: replicas: 3 - busConfigurationRef: - name: bus-config + queueRef: + name: queue status: phase: Ready - busConfiguration: - type: sqs_smartbus + queue: + provider: sqs sqs: - queueName: sqs-test - authRegion: us-west-2 + name: sqs-test + region: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com - largeMessageStorePath: s3://ingestion/smartbus-test - deadLetterQueueName: sqs-dlq-test + dlq: sqs-dlq-test + objectStorage: + provider: s3 + s3: + endpoint: https://s3.us-west-2.amazonaws.com + path: s3://ingestion/smartbus-test --- # check for stateful set and replicas as configured diff --git a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml index daa1ab4ab..00ff26a56 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml @@ -6,19 +6,22 @@ metadata: name: ingestor spec: replicas: 4 - busConfigurationRef: - name: bus-config + queueRef: + name: queue status: phase: Ready - busConfiguration: - type: sqs_smartbus + queue: + provider: sqs sqs: - queueName: sqs-test - authRegion: us-west-2 + name: sqs-test + region: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com - largeMessageStorePath: s3://ingestion/smartbus-test - deadLetterQueueName: sqs-dlq-test + dlq: sqs-dlq-test + objectStorage: + provider: s3 + s3: + endpoint: https://s3.us-west-2.amazonaws.com + path: s3://ingestion/smartbus-test --- # check for stateful sets and replicas updated diff --git a/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml b/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml index 6e87733cc..d05cb5bcf 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml @@ -5,24 +5,32 @@ splunk-operator: persistentVolumeClaim: storageClassName: gp2 -busConfiguration: +queue: enabled: true - name: bus-config - type: sqs_smartbus + name: queue + provider: sqs sqs: - queueName: sqs-test - authRegion: us-west-2 + name: sqs-test + region: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com - largeMessageStorePath: s3://ingestion/smartbus-test - deadLetterQueueName: sqs-dlq-test + dlq: sqs-dlq-test + +objectStorage: + enabled: true + name: os + provider: s3 + s3: + endpoint: https://s3.us-west-2.amazonaws.com + path: s3://ingestion/smartbus-test ingestorCluster: enabled: true name: ingestor replicaCount: 3 - busConfigurationRef: - name: bus-config + queueRef: + name: queue + objectStorageRef: + name: os clusterManager: enabled: true @@ -35,5 +43,7 @@ indexerCluster: replicaCount: 3 clusterManagerRef: name: cm - busConfigurationRef: - name: bus-config + queueRef: + name: queue + objectStorageRef: + name: os diff --git a/pkg/splunk/enterprise/busconfiguration.go b/pkg/splunk/enterprise/busconfiguration.go deleted file mode 100644 index 43fd35f68..000000000 --- a/pkg/splunk/enterprise/busconfiguration.go +++ /dev/null @@ -1,140 +0,0 @@ -/* -Copyright 2025. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package enterprise - -import ( - "context" - "errors" - "fmt" - "strings" - "time" - - enterpriseApi "github.com/splunk/splunk-operator/api/v4" - splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" - splctrl "github.com/splunk/splunk-operator/pkg/splunk/splkcontroller" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -// ApplyBusConfiguration reconciles the state of an IngestorCluster custom resource -func ApplyBusConfiguration(ctx context.Context, client client.Client, cr *enterpriseApi.BusConfiguration) (reconcile.Result, error) { - var err error - - // Unless modified, reconcile for this object will be requeued after 5 seconds - result := reconcile.Result{ - Requeue: true, - RequeueAfter: time.Second * 5, - } - - reqLogger := log.FromContext(ctx) - scopedLog := reqLogger.WithName("ApplyBusConfiguration") - - if cr.Status.ResourceRevMap == nil { - cr.Status.ResourceRevMap = make(map[string]string) - } - - eventPublisher, _ := newK8EventPublisher(client, cr) - ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher) - - cr.Kind = "BusConfiguration" - - // Initialize phase - cr.Status.Phase = enterpriseApi.PhaseError - - // Update the CR Status - defer updateCRStatus(ctx, client, cr, &err) - - // Validate and updates defaults for CR - err = validateBusConfigurationSpec(ctx, client, cr) - if err != nil { - eventPublisher.Warning(ctx, "validateBusConfigurationSpec", fmt.Sprintf("validate bus configuration spec failed %s", err.Error())) - scopedLog.Error(err, "Failed to validate bus configuration spec") - return result, err - } - - // Check if deletion has been requested - if cr.ObjectMeta.DeletionTimestamp != nil { - terminating, err := splctrl.CheckForDeletion(ctx, cr, client) - if terminating && err != nil { - cr.Status.Phase = enterpriseApi.PhaseTerminating - } else { - result.Requeue = false - } - return result, err - } - - cr.Status.Phase = enterpriseApi.PhaseReady - - // RequeueAfter if greater than 0, tells the Controller to requeue the reconcile key after the Duration. - // Implies that Requeue is true, there is no need to set Requeue to true at the same time as RequeueAfter. - if !result.Requeue { - result.RequeueAfter = 0 - } - - return result, nil -} - -// validateBusConfigurationSpec checks validity and makes default updates to a BusConfigurationSpec and returns error if something is wrong -func validateBusConfigurationSpec(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.BusConfiguration) error { - return validateBusConfigurationInputs(cr) -} - -func validateBusConfigurationInputs(cr *enterpriseApi.BusConfiguration) error { - // sqs_smartbus type is supported for now - if cr.Spec.Type != "sqs_smartbus" { - return errors.New("only sqs_smartbus type is supported in bus configuration") - } - - // Cannot be empty fields check - cannotBeEmptyFields := []string{} - if cr.Spec.SQS.QueueName == "" { - cannotBeEmptyFields = append(cannotBeEmptyFields, "queueName") - } - - if cr.Spec.SQS.AuthRegion == "" { - cannotBeEmptyFields = append(cannotBeEmptyFields, "authRegion") - } - - if cr.Spec.SQS.DeadLetterQueueName == "" { - cannotBeEmptyFields = append(cannotBeEmptyFields, "deadLetterQueueName") - } - - if len(cannotBeEmptyFields) > 0 { - return errors.New("bus configuration sqs " + strings.Join(cannotBeEmptyFields, ", ") + " cannot be empty") - } - - // Have to start with https:// or s3:// checks - haveToStartWithHttps := []string{} - if !strings.HasPrefix(cr.Spec.SQS.Endpoint, "https://") { - haveToStartWithHttps = append(haveToStartWithHttps, "endpoint") - } - - if !strings.HasPrefix(cr.Spec.SQS.LargeMessageStoreEndpoint, "https://") { - haveToStartWithHttps = append(haveToStartWithHttps, "largeMessageStoreEndpoint") - } - - if len(haveToStartWithHttps) > 0 { - return errors.New("bus configuration sqs " + strings.Join(haveToStartWithHttps, ", ") + " must start with https://") - } - - if !strings.HasPrefix(cr.Spec.SQS.LargeMessageStorePath, "s3://") { - return errors.New("bus configuration sqs largeMessageStorePath must start with s3://") - } - - return nil -} diff --git a/pkg/splunk/enterprise/busconfiguration_test.go b/pkg/splunk/enterprise/busconfiguration_test.go deleted file mode 100644 index 45d19bb40..000000000 --- a/pkg/splunk/enterprise/busconfiguration_test.go +++ /dev/null @@ -1,151 +0,0 @@ -/* -Copyright 2025. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package enterprise - -import ( - "context" - "os" - "path/filepath" - "testing" - - enterpriseApi "github.com/splunk/splunk-operator/api/v4" - "github.com/stretchr/testify/assert" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/client/fake" -) - -func init() { - GetReadinessScriptLocation = func() string { - fileLocation, _ := filepath.Abs("../../../" + readinessScriptLocation) - return fileLocation - } - GetLivenessScriptLocation = func() string { - fileLocation, _ := filepath.Abs("../../../" + livenessScriptLocation) - return fileLocation - } - GetStartupScriptLocation = func() string { - fileLocation, _ := filepath.Abs("../../../" + startupScriptLocation) - return fileLocation - } -} - -func TestApplyBusConfiguration(t *testing.T) { - os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") - - ctx := context.TODO() - - scheme := runtime.NewScheme() - _ = enterpriseApi.AddToScheme(scheme) - _ = corev1.AddToScheme(scheme) - _ = appsv1.AddToScheme(scheme) - c := fake.NewClientBuilder().WithScheme(scheme).Build() - - // Object definitions - busConfig := &enterpriseApi.BusConfiguration{ - TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", - APIVersion: "enterprise.splunk.com/v4", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "busConfig", - Namespace: "test", - }, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", - SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", - }, - }, - } - c.Create(ctx, busConfig) - - // ApplyBusConfiguration - result, err := ApplyBusConfiguration(ctx, c, busConfig) - assert.NoError(t, err) - assert.True(t, result.Requeue) - assert.NotEqual(t, enterpriseApi.PhaseError, busConfig.Status.Phase) - assert.Equal(t, enterpriseApi.PhaseReady, busConfig.Status.Phase) -} - -func TestValidateBusConfigurationInputs(t *testing.T) { - busConfig := enterpriseApi.BusConfiguration{ - TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", - APIVersion: "enterprise.splunk.com/v4", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "busConfig", - }, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "othertype", - SQS: enterpriseApi.SQSSpec{}, - }, - } - - err := validateBusConfigurationInputs(&busConfig) - assert.NotNil(t, err) - assert.Equal(t, "only sqs_smartbus type is supported in bus configuration", err.Error()) - - busConfig.Spec.Type = "sqs_smartbus" - - err = validateBusConfigurationInputs(&busConfig) - assert.NotNil(t, err) - assert.Equal(t, "bus configuration sqs queueName, authRegion, deadLetterQueueName cannot be empty", err.Error()) - - busConfig.Spec.SQS.AuthRegion = "us-west-2" - - err = validateBusConfigurationInputs(&busConfig) - assert.NotNil(t, err) - assert.Equal(t, "bus configuration sqs queueName, deadLetterQueueName cannot be empty", err.Error()) - - busConfig.Spec.SQS.QueueName = "test-queue" - busConfig.Spec.SQS.DeadLetterQueueName = "dlq-test" - busConfig.Spec.SQS.AuthRegion = "" - - err = validateBusConfigurationInputs(&busConfig) - assert.NotNil(t, err) - assert.Equal(t, "bus configuration sqs authRegion cannot be empty", err.Error()) - - busConfig.Spec.SQS.AuthRegion = "us-west-2" - - err = validateBusConfigurationInputs(&busConfig) - assert.NotNil(t, err) - assert.Equal(t, "bus configuration sqs endpoint, largeMessageStoreEndpoint must start with https://", err.Error()) - - busConfig.Spec.SQS.Endpoint = "https://sqs.us-west-2.amazonaws.com" - busConfig.Spec.SQS.LargeMessageStoreEndpoint = "https://s3.us-west-2.amazonaws.com" - - err = validateBusConfigurationInputs(&busConfig) - assert.NotNil(t, err) - assert.Equal(t, "bus configuration sqs largeMessageStorePath must start with s3://", err.Error()) - - busConfig.Spec.SQS.LargeMessageStorePath = "ingestion/smartbus-test" - - err = validateBusConfigurationInputs(&busConfig) - assert.NotNil(t, err) - assert.Equal(t, "bus configuration sqs largeMessageStorePath must start with s3://", err.Error()) - - busConfig.Spec.SQS.LargeMessageStorePath = "s3://ingestion/smartbus-test" - - err = validateBusConfigurationInputs(&busConfig) - assert.Nil(t, err) -} diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 269753c5c..150dfdbbe 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -22,7 +22,6 @@ import ( "time" enterpriseApi "github.com/splunk/splunk-operator/api/v4" - "sigs.k8s.io/controller-runtime/pkg/client" rclient "sigs.k8s.io/controller-runtime/pkg/client" "github.com/go-logr/logr" @@ -427,9 +426,9 @@ func PushManagerAppsBundle(ctx context.Context, c splcommon.ControllerClient, cr return splunkClient.BundlePush(true) } - + // helper function to get the list of ClusterManager types in the current namespace -func getClusterManagerList(ctx context.Context, c splcommon.ControllerClient, cr splcommon.MetaObject, listOpts []client.ListOption) (int, error) { +func getClusterManagerList(ctx context.Context, c splcommon.ControllerClient, cr splcommon.MetaObject, listOpts []rclient.ListOption) (int, error) { reqLogger := log.FromContext(ctx) scopedLog := reqLogger.WithName("getClusterManagerList").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index 74b1b0a91..60b4d5a9a 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -37,7 +37,6 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" rclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -78,7 +77,7 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller // updates status after function completes cr.Status.ClusterManagerPhase = enterpriseApi.PhaseError if cr.Status.Replicas < cr.Spec.Replicas { - cr.Status.BusConfiguration = enterpriseApi.BusConfigurationSpec{} + cr.Status.Queue = &enterpriseApi.QueueSpec{} } cr.Status.Replicas = cr.Spec.Replicas cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-indexer", cr.GetName()) @@ -245,35 +244,67 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller // no need to requeue if everything is ready if cr.Status.Phase == enterpriseApi.PhaseReady { - // Bus config - busConfig := enterpriseApi.BusConfiguration{} - if cr.Spec.BusConfigurationRef.Name != "" { + // Queue + queue := enterpriseApi.Queue{} + if cr.Spec.QueueRef.Name != "" { ns := cr.GetNamespace() - if cr.Spec.BusConfigurationRef.Namespace != "" { - ns = cr.Spec.BusConfigurationRef.Namespace + if cr.Spec.QueueRef.Namespace != "" { + ns = cr.Spec.QueueRef.Namespace } err = client.Get(context.Background(), types.NamespacedName{ - Name: cr.Spec.BusConfigurationRef.Name, + Name: cr.Spec.QueueRef.Name, Namespace: ns, - }, &busConfig) + }, &queue) if err != nil { return result, err } } - // If bus config is updated - if cr.Spec.BusConfigurationRef.Name != "" { - if !reflect.DeepEqual(cr.Status.BusConfiguration, busConfig.Spec) { + // Can not override original queue spec due to comparison in the later code + queueCopy := queue + if queueCopy.Spec.Provider == "sqs" { + if queueCopy.Spec.SQS.Endpoint == "" && queueCopy.Spec.SQS.AuthRegion != "" { + queueCopy.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", queueCopy.Spec.SQS.AuthRegion) + } + } + + // Large Message Store + os := enterpriseApi.ObjectStorage{} + if cr.Spec.ObjectStorageRef.Name != "" { + ns := cr.GetNamespace() + if cr.Spec.ObjectStorageRef.Namespace != "" { + ns = cr.Spec.ObjectStorageRef.Namespace + } + err = client.Get(context.Background(), types.NamespacedName{ + Name: cr.Spec.ObjectStorageRef.Name, + Namespace: ns, + }, &os) + if err != nil { + return result, err + } + } + + // Can not override original large message store spec due to comparison in the later code + osCopy := os + if osCopy.Spec.Provider == "s3" { + if osCopy.Spec.S3.Endpoint == "" && queueCopy.Spec.SQS.AuthRegion != "" { + osCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queueCopy.Spec.SQS.AuthRegion) + } + } + + // If queue is updated + if cr.Spec.QueueRef.Name != "" { + if !reflect.DeepEqual(cr.Status.Queue, queue.Spec) { mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) - err = mgr.handlePullBusChange(ctx, cr, busConfig, client) + err = mgr.handlePullQueueChange(ctx, cr, queueCopy, osCopy, client) if err != nil { - eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Bus/Pipeline config change after pod creation: %s", err.Error())) - scopedLog.Error(err, "Failed to update conf file for Bus/Pipeline config change after pod creation") + eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Queue/Pipeline config change after pod creation: %s", err.Error())) + scopedLog.Error(err, "Failed to update conf file for Queue/Pipeline config change after pod creation") return result, err } - cr.Status.BusConfiguration = busConfig.Spec + cr.Status.Queue = &queue.Spec } } @@ -366,7 +397,7 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, cr.Status.Phase = enterpriseApi.PhaseError cr.Status.ClusterMasterPhase = enterpriseApi.PhaseError if cr.Status.Replicas < cr.Spec.Replicas { - cr.Status.BusConfiguration = enterpriseApi.BusConfigurationSpec{} + cr.Status.Queue = &enterpriseApi.QueueSpec{} } cr.Status.Replicas = cr.Spec.Replicas cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-indexer", cr.GetName()) @@ -536,35 +567,67 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, // no need to requeue if everything is ready if cr.Status.Phase == enterpriseApi.PhaseReady { - // Bus config - busConfig := enterpriseApi.BusConfiguration{} - if cr.Spec.BusConfigurationRef.Name != "" { + // Queue + queue := enterpriseApi.Queue{} + if cr.Spec.QueueRef.Name != "" { ns := cr.GetNamespace() - if cr.Spec.BusConfigurationRef.Namespace != "" { - ns = cr.Spec.BusConfigurationRef.Namespace + if cr.Spec.QueueRef.Namespace != "" { + ns = cr.Spec.QueueRef.Namespace } err = client.Get(context.Background(), types.NamespacedName{ - Name: cr.Spec.BusConfigurationRef.Name, + Name: cr.Spec.QueueRef.Name, Namespace: ns, - }, &busConfig) + }, &queue) if err != nil { return result, err } } - // If bus config is updated - if cr.Spec.BusConfigurationRef.Name != "" { - if !reflect.DeepEqual(cr.Status.BusConfiguration, busConfig.Spec) { + // Can not override original queue spec due to comparison in the later code + queueCopy := queue + if queueCopy.Spec.Provider == "sqs" { + if queueCopy.Spec.SQS.Endpoint == "" && queueCopy.Spec.SQS.AuthRegion != "" { + queueCopy.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", queueCopy.Spec.SQS.AuthRegion) + } + } + + // Large Message Store + os := enterpriseApi.ObjectStorage{} + if cr.Spec.ObjectStorageRef.Name != "" { + ns := cr.GetNamespace() + if cr.Spec.ObjectStorageRef.Namespace != "" { + ns = cr.Spec.ObjectStorageRef.Namespace + } + err = client.Get(context.Background(), types.NamespacedName{ + Name: cr.Spec.ObjectStorageRef.Name, + Namespace: ns, + }, &queue) + if err != nil { + return result, err + } + } + + // Can not override original queue spec due to comparison in the later code + osCopy := os + if osCopy.Spec.Provider == "s3" { + if osCopy.Spec.S3.Endpoint == "" && queueCopy.Spec.SQS.AuthRegion != "" { + osCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queueCopy.Spec.SQS.AuthRegion) + } + } + + // If queue is updated + if cr.Spec.QueueRef.Name != "" { + if !reflect.DeepEqual(cr.Status.Queue, queue.Spec) { mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) - err = mgr.handlePullBusChange(ctx, cr, busConfig, client) + err = mgr.handlePullQueueChange(ctx, cr, queueCopy, osCopy, client) if err != nil { - eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Bus/Pipeline config change after pod creation: %s", err.Error())) - scopedLog.Error(err, "Failed to update conf file for Bus/Pipeline config change after pod creation") + eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Queue/Pipeline config change after pod creation: %s", err.Error())) + scopedLog.Error(err, "Failed to update conf file for Queue/Pipeline config change after pod creation") return result, err } - cr.Status.BusConfiguration = busConfig.Spec + cr.Status.Queue = &queue.Spec } } @@ -1154,7 +1217,7 @@ func validateIndexerClusterSpec(ctx context.Context, c splcommon.ControllerClien } // helper function to get the list of IndexerCluster types in the current namespace -func getIndexerClusterList(ctx context.Context, c splcommon.ControllerClient, cr splcommon.MetaObject, listOpts []client.ListOption) (enterpriseApi.IndexerClusterList, error) { +func getIndexerClusterList(ctx context.Context, c splcommon.ControllerClient, cr splcommon.MetaObject, listOpts []rclient.ListOption) (enterpriseApi.IndexerClusterList, error) { reqLogger := log.FromContext(ctx) scopedLog := reqLogger.WithName("getIndexerClusterList").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) @@ -1231,12 +1294,12 @@ func getSiteName(ctx context.Context, c splcommon.ControllerClient, cr *enterpri return extractedValue } -var newSplunkClientForBusPipeline = splclient.NewSplunkClient +var newSplunkClientForQueuePipeline = splclient.NewSplunkClient -// Checks if only PullBus or Pipeline config changed, and updates the conf file if so -func (mgr *indexerClusterPodManager) handlePullBusChange(ctx context.Context, newCR *enterpriseApi.IndexerCluster, busConfig enterpriseApi.BusConfiguration, k8s client.Client) error { +// Checks if only PullQueue or Pipeline config changed, and updates the conf file if so +func (mgr *indexerClusterPodManager) handlePullQueueChange(ctx context.Context, newCR *enterpriseApi.IndexerCluster, queue enterpriseApi.Queue, os enterpriseApi.ObjectStorage, k8s rclient.Client) error { reqLogger := log.FromContext(ctx) - scopedLog := reqLogger.WithName("handlePullBusChange").WithValues("name", newCR.GetName(), "namespace", newCR.GetNamespace()) + scopedLog := reqLogger.WithName("handlePullQueueChange").WithValues("name", newCR.GetName(), "namespace", newCR.GetNamespace()) // Only update config for pods that exist readyReplicas := newCR.Status.ReadyReplicas @@ -1250,30 +1313,30 @@ func (mgr *indexerClusterPodManager) handlePullBusChange(ctx context.Context, ne if err != nil { return err } - splunkClient := newSplunkClientForBusPipeline(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd)) + splunkClient := newSplunkClientForQueuePipeline(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd)) afterDelete := false - if (busConfig.Spec.SQS.QueueName != "" && newCR.Status.BusConfiguration.SQS.QueueName != "" && busConfig.Spec.SQS.QueueName != newCR.Status.BusConfiguration.SQS.QueueName) || - (busConfig.Spec.Type != "" && newCR.Status.BusConfiguration.Type != "" && busConfig.Spec.Type != newCR.Status.BusConfiguration.Type) { - if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCR.Status.BusConfiguration.SQS.QueueName)); err != nil { + if (queue.Spec.SQS.Name != "" && newCR.Status.Queue.SQS.Name != "" && queue.Spec.SQS.Name != newCR.Status.Queue.SQS.Name) || + (queue.Spec.Provider != "" && newCR.Status.Queue.Provider != "" && queue.Spec.Provider != newCR.Status.Queue.Provider) { + if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCR.Status.Queue.SQS.Name)); err != nil { updateErr = err } - if err := splunkClient.DeleteConfFileProperty(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", newCR.Status.BusConfiguration.SQS.QueueName)); err != nil { + if err := splunkClient.DeleteConfFileProperty(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", newCR.Status.Queue.SQS.Name)); err != nil { updateErr = err } afterDelete = true } - busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields := getChangedBusFieldsForIndexer(&busConfig, newCR, afterDelete) + queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields := getChangedQueueFieldsForIndexer(&queue, &os, newCR, afterDelete) - for _, pbVal := range busChangedFieldsOutputs { - if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", busConfig.Spec.SQS.QueueName), [][]string{pbVal}); err != nil { + for _, pbVal := range queueChangedFieldsOutputs { + if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name), [][]string{pbVal}); err != nil { updateErr = err } } - for _, pbVal := range busChangedFieldsInputs { - if err := splunkClient.UpdateConfFile(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", busConfig.Spec.SQS.QueueName), [][]string{pbVal}); err != nil { + for _, pbVal := range queueChangedFieldsInputs { + if err := splunkClient.UpdateConfFile(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name), [][]string{pbVal}); err != nil { updateErr = err } } @@ -1289,15 +1352,23 @@ func (mgr *indexerClusterPodManager) handlePullBusChange(ctx context.Context, ne return updateErr } -// getChangedBusFieldsForIndexer returns a list of changed bus and pipeline fields for indexer pods -func getChangedBusFieldsForIndexer(busConfig *enterpriseApi.BusConfiguration, busConfigIndexerStatus *enterpriseApi.IndexerCluster, afterDelete bool) (busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields [][]string) { - // Compare bus fields - oldPB := busConfigIndexerStatus.Status.BusConfiguration - newPB := busConfig.Spec +// getChangedQueueFieldsForIndexer returns a list of changed queue and pipeline fields for indexer pods +func getChangedQueueFieldsForIndexer(queue *enterpriseApi.Queue, os *enterpriseApi.ObjectStorage, queueIndexerStatus *enterpriseApi.IndexerCluster, afterDelete bool) (queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields [][]string) { + // Compare queue fields + oldQueue := queueIndexerStatus.Status.Queue + if oldQueue == nil { + oldQueue = &enterpriseApi.QueueSpec{} + } + newQueue := queue.Spec - // Push all bus fields - busChangedFieldsInputs, busChangedFieldsOutputs = pullBusChanged(&oldPB, &newPB, afterDelete) + oldOS := queueIndexerStatus.Status.ObjectStorage + if oldOS == nil { + oldOS = &enterpriseApi.ObjectStorageSpec{} + } + newOS := os.Spec + // Push all queue fields + queueChangedFieldsInputs, queueChangedFieldsOutputs = pullQueueChanged(oldQueue, &newQueue, oldOS, &newOS, afterDelete) // Always set all pipeline fields, not just changed ones pipelineChangedFields = pipelineConfig(true) @@ -1315,34 +1386,43 @@ func imageUpdatedTo9(previousImage string, currentImage string) bool { return strings.HasPrefix(previousVersion, "8") && strings.HasPrefix(currentVersion, "9") } -func pullBusChanged(oldBus, newBus *enterpriseApi.BusConfigurationSpec, afterDelete bool) (inputs, outputs [][]string) { - if oldBus.Type != newBus.Type || afterDelete { - inputs = append(inputs, []string{"remote_queue.type", newBus.Type}) +func pullQueueChanged(oldQueue, newQueue *enterpriseApi.QueueSpec, oldOS, newOS *enterpriseApi.ObjectStorageSpec, afterDelete bool) (inputs, outputs [][]string) { + queueProvider := "" + if newQueue.Provider == "sqs" { + queueProvider = "sqs_smartbus" + } + osProvider := "" + if newOS.Provider == "s3" { + osProvider = "sqs_smartbus" + } + + if oldQueue.Provider != newQueue.Provider || afterDelete { + inputs = append(inputs, []string{"remote_queue.type", queueProvider}) } - if oldBus.SQS.AuthRegion != newBus.SQS.AuthRegion || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.auth_region", newBus.Type), newBus.SQS.AuthRegion}) + if newQueue.SQS.AuthRegion != "" &&(oldQueue.SQS.AuthRegion != newQueue.SQS.AuthRegion || afterDelete) { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.auth_region", queueProvider), newQueue.SQS.AuthRegion}) } - if oldBus.SQS.Endpoint != newBus.SQS.Endpoint || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.endpoint", newBus.Type), newBus.SQS.Endpoint}) + if newQueue.SQS.Endpoint != "" && (oldQueue.SQS.Endpoint != newQueue.SQS.Endpoint || afterDelete) { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.endpoint", queueProvider), newQueue.SQS.Endpoint}) } - if oldBus.SQS.LargeMessageStoreEndpoint != newBus.SQS.LargeMessageStoreEndpoint || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", newBus.Type), newBus.SQS.LargeMessageStoreEndpoint}) + if newOS.S3.Endpoint != "" && (oldOS.S3.Endpoint != newOS.S3.Endpoint || afterDelete) { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", osProvider), newOS.S3.Endpoint}) } - if oldBus.SQS.LargeMessageStorePath != newBus.SQS.LargeMessageStorePath || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", newBus.Type), newBus.SQS.LargeMessageStorePath}) + if oldOS.S3.Path != newOS.S3.Path || afterDelete { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", osProvider), newOS.S3.Path}) } - if oldBus.SQS.DeadLetterQueueName != newBus.SQS.DeadLetterQueueName || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", newBus.Type), newBus.SQS.DeadLetterQueueName}) + if oldQueue.SQS.DLQ != newQueue.SQS.DLQ || afterDelete { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", queueProvider), newQueue.SQS.DLQ}) } inputs = append(inputs, - []string{fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", newBus.Type), "4"}, - []string{fmt.Sprintf("remote_queue.%s.retry_policy", newBus.Type), "max_count"}, + []string{fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", queueProvider), "4"}, + []string{fmt.Sprintf("remote_queue.%s.retry_policy", queueProvider), "max_count"}, ) outputs = inputs outputs = append(outputs, - []string{fmt.Sprintf("remote_queue.%s.send_interval", newBus.Type), "5s"}, - []string{fmt.Sprintf("remote_queue.%s.encoding_format", newBus.Type), "s2s"}, + []string{fmt.Sprintf("remote_queue.%s.send_interval", queueProvider), "5s"}, + []string{fmt.Sprintf("remote_queue.%s.encoding_format", queueProvider), "s2s"}, ) return inputs, outputs diff --git a/pkg/splunk/enterprise/indexercluster_test.go b/pkg/splunk/enterprise/indexercluster_test.go index e541fc4f6..a74ab4acd 100644 --- a/pkg/splunk/enterprise/indexercluster_test.go +++ b/pkg/splunk/enterprise/indexercluster_test.go @@ -1344,23 +1344,21 @@ func TestInvalidIndexerClusterSpec(t *testing.T) { func TestGetIndexerStatefulSet(t *testing.T) { os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") - busConfig := enterpriseApi.BusConfiguration{ + queue := enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", + Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "busConfig", + Name: "queue", }, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", + Spec: enterpriseApi.QueueSpec{ + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", + Name: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", }, }, } @@ -1371,8 +1369,8 @@ func TestGetIndexerStatefulSet(t *testing.T) { Namespace: "test", }, Spec: enterpriseApi.IndexerClusterSpec{ - BusConfigurationRef: corev1.ObjectReference{ - Name: busConfig.Name, + QueueRef: corev1.ObjectReference{ + Name: queue.Name, }, }, } @@ -2047,62 +2045,82 @@ func TestImageUpdatedTo9(t *testing.T) { } } -func TestGetChangedBusFieldsForIndexer(t *testing.T) { - busConfig := enterpriseApi.BusConfiguration{ +func TestGetChangedQueueFieldsForIndexer(t *testing.T) { + provider := "sqs_smartbus" + + queue := enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", + Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "busConfig", + Name: "queue", }, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", + Spec: enterpriseApi.QueueSpec{ + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", + Name: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", + }, + }, + } + + os := enterpriseApi.ObjectStorage{ + TypeMeta: metav1.TypeMeta{ + Kind: "ObjectStorage", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "os", + }, + Spec: enterpriseApi.ObjectStorageSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "s3://bucket/key", }, }, } newCR := &enterpriseApi.IndexerCluster{ Spec: enterpriseApi.IndexerClusterSpec{ - BusConfigurationRef: corev1.ObjectReference{ - Name: busConfig.Name, + QueueRef: corev1.ObjectReference{ + Name: queue.Name, + }, + ObjectStorageRef: corev1.ObjectReference{ + Name: os.Name, }, }, } - busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields := getChangedBusFieldsForIndexer(&busConfig, newCR, false) - assert.Equal(t, 8, len(busChangedFieldsInputs)) + queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields := getChangedQueueFieldsForIndexer(&queue, &os, newCR, false) + assert.Equal(t, 8, len(queueChangedFieldsInputs)) assert.Equal(t, [][]string{ - {"remote_queue.type", busConfig.Spec.Type}, - {fmt.Sprintf("remote_queue.%s.auth_region", busConfig.Spec.Type), busConfig.Spec.SQS.AuthRegion}, - {fmt.Sprintf("remote_queue.%s.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStoreEndpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStorePath}, - {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busConfig.Spec.Type), busConfig.Spec.SQS.DeadLetterQueueName}, - {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", busConfig.Spec.Type), "4"}, - {fmt.Sprintf("remote_queue.%s.retry_policy", busConfig.Spec.Type), "max_count"}, - }, busChangedFieldsInputs) - - assert.Equal(t, 10, len(busChangedFieldsOutputs)) + {"remote_queue.type", provider}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion}, + {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), os.Spec.S3.Path}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, + {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, + {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, + }, queueChangedFieldsInputs) + + assert.Equal(t, 10, len(queueChangedFieldsOutputs)) assert.Equal(t, [][]string{ - {"remote_queue.type", busConfig.Spec.Type}, - {fmt.Sprintf("remote_queue.%s.auth_region", busConfig.Spec.Type), busConfig.Spec.SQS.AuthRegion}, - {fmt.Sprintf("remote_queue.%s.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStoreEndpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStorePath}, - {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busConfig.Spec.Type), busConfig.Spec.SQS.DeadLetterQueueName}, - {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", busConfig.Spec.Type), "4"}, - {fmt.Sprintf("remote_queue.%s.retry_policy", busConfig.Spec.Type), "max_count"}, - {fmt.Sprintf("remote_queue.%s.send_interval", busConfig.Spec.Type), "5s"}, - {fmt.Sprintf("remote_queue.%s.encoding_format", busConfig.Spec.Type), "s2s"}, - }, busChangedFieldsOutputs) + {"remote_queue.type", provider}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion}, + {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), os.Spec.S3.Path}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, + {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, + {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, + {fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"}, + {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, + }, queueChangedFieldsOutputs) assert.Equal(t, 5, len(pipelineChangedFields)) assert.Equal(t, [][]string{ @@ -2114,26 +2132,44 @@ func TestGetChangedBusFieldsForIndexer(t *testing.T) { }, pipelineChangedFields) } -func TestHandlePullBusChange(t *testing.T) { +func TestHandlePullQueueChange(t *testing.T) { // Object definitions - busConfig := enterpriseApi.BusConfiguration{ + provider := "sqs_smartbus" + + queue := enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", + Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "busConfig", + Name: "queue", Namespace: "test", }, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", + Spec: enterpriseApi.QueueSpec{ + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", + Name: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", + }, + }, + } + + os := enterpriseApi.ObjectStorage{ + TypeMeta: metav1.TypeMeta{ + Kind: "ObjectStorage", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "os", + Namespace: "test", + }, + Spec: enterpriseApi.ObjectStorageSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "s3://bucket/key", }, }, } @@ -2147,12 +2183,18 @@ func TestHandlePullBusChange(t *testing.T) { Namespace: "test", }, Spec: enterpriseApi.IndexerClusterSpec{ - BusConfigurationRef: corev1.ObjectReference{ - Name: busConfig.Name, + QueueRef: corev1.ObjectReference{ + Name: queue.Name, + }, + ObjectStorageRef: corev1.ObjectReference{ + Name: os.Name, + Namespace: os.Namespace, }, }, Status: enterpriseApi.IndexerClusterStatus{ ReadyReplicas: 3, + Queue: &enterpriseApi.QueueSpec{}, + ObjectStorage: &enterpriseApi.ObjectStorageSpec{}, }, } @@ -2209,7 +2251,8 @@ func TestHandlePullBusChange(t *testing.T) { // Mock pods c := spltest.NewMockClient() ctx := context.TODO() - c.Create(ctx, &busConfig) + c.Create(ctx, &queue) + c.Create(ctx, &os) c.Create(ctx, newCR) c.Create(ctx, pod0) c.Create(ctx, pod1) @@ -2217,7 +2260,7 @@ func TestHandlePullBusChange(t *testing.T) { // Negative test case: secret not found mgr := &indexerClusterPodManager{} - err := mgr.handlePullBusChange(ctx, newCR, busConfig, c) + err := mgr.handlePullQueueChange(ctx, newCR, queue, os, c) assert.NotNil(t, err) // Mock secret @@ -2226,43 +2269,43 @@ func TestHandlePullBusChange(t *testing.T) { mockHTTPClient := &spltest.MockHTTPClient{} // Negative test case: failure in creating remote queue stanza - mgr = newTestPullBusPipelineManager(mockHTTPClient) + mgr = newTestPullQueuePipelineManager(mockHTTPClient) - err = mgr.handlePullBusChange(ctx, newCR, busConfig, c) + err = mgr.handlePullQueueChange(ctx, newCR, queue, os, c) assert.NotNil(t, err) // outputs.conf propertyKVList := [][]string{ - {fmt.Sprintf("remote_queue.%s.auth_region", busConfig.Spec.Type), busConfig.Spec.SQS.AuthRegion}, - {fmt.Sprintf("remote_queue.%s.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStoreEndpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStorePath}, - {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busConfig.Spec.Type), busConfig.Spec.SQS.DeadLetterQueueName}, - {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", busConfig.Spec.Type), "4"}, - {fmt.Sprintf("remote_queue.%s.retry_policy", busConfig.Spec.Type), "max_count"}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion}, + {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), os.Spec.S3.Path}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, + {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, + {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, } propertyKVListOutputs := propertyKVList - propertyKVListOutputs = append(propertyKVListOutputs, []string{fmt.Sprintf("remote_queue.%s.encoding_format", busConfig.Spec.Type), "s2s"}) - propertyKVListOutputs = append(propertyKVListOutputs, []string{fmt.Sprintf("remote_queue.%s.send_interval", busConfig.Spec.Type), "5s"}) + propertyKVListOutputs = append(propertyKVListOutputs, []string{fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}) + propertyKVListOutputs = append(propertyKVListOutputs, []string{fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"}) body := buildFormBody(propertyKVListOutputs) - addRemoteQueueHandlersForIndexer(mockHTTPClient, newCR, busConfig, newCR.Status.ReadyReplicas, "conf-outputs", body) + addRemoteQueueHandlersForIndexer(mockHTTPClient, newCR, queue, newCR.Status.ReadyReplicas, "conf-outputs", body) // Negative test case: failure in creating remote queue stanza - mgr = newTestPullBusPipelineManager(mockHTTPClient) + mgr = newTestPullQueuePipelineManager(mockHTTPClient) - err = mgr.handlePullBusChange(ctx, newCR, busConfig, c) + err = mgr.handlePullQueueChange(ctx, newCR, queue, os, c) assert.NotNil(t, err) // inputs.conf body = buildFormBody(propertyKVList) - addRemoteQueueHandlersForIndexer(mockHTTPClient, newCR, busConfig, newCR.Status.ReadyReplicas, "conf-inputs", body) + addRemoteQueueHandlersForIndexer(mockHTTPClient, newCR, queue, newCR.Status.ReadyReplicas, "conf-inputs", body) // Negative test case: failure in updating remote queue stanza - mgr = newTestPullBusPipelineManager(mockHTTPClient) + mgr = newTestPullQueuePipelineManager(mockHTTPClient) - err = mgr.handlePullBusChange(ctx, newCR, busConfig, c) + err = mgr.handlePullQueueChange(ctx, newCR, queue, os, c) assert.NotNil(t, err) // default-mode.conf @@ -2288,9 +2331,9 @@ func TestHandlePullBusChange(t *testing.T) { } } - mgr = newTestPullBusPipelineManager(mockHTTPClient) + mgr = newTestPullQueuePipelineManager(mockHTTPClient) - err = mgr.handlePullBusChange(ctx, newCR, busConfig, c) + err = mgr.handlePullQueueChange(ctx, newCR, queue, os, c) assert.Nil(t, err) } @@ -2308,7 +2351,7 @@ func buildFormBody(pairs [][]string) string { return b.String() } -func addRemoteQueueHandlersForIndexer(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IndexerCluster, busConfig enterpriseApi.BusConfiguration, replicas int32, confName, body string) { +func addRemoteQueueHandlersForIndexer(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IndexerCluster, queue enterpriseApi.Queue, replicas int32, confName, body string) { for i := 0; i < int(replicas); i++ { podName := fmt.Sprintf("splunk-%s-indexer-%d", cr.GetName(), i) baseURL := fmt.Sprintf( @@ -2316,18 +2359,18 @@ func addRemoteQueueHandlersForIndexer(mockHTTPClient *spltest.MockHTTPClient, cr podName, cr.GetName(), cr.GetNamespace(), confName, ) - createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", busConfig.Spec.SQS.QueueName)) + createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name)) reqCreate, _ := http.NewRequest("POST", baseURL, strings.NewReader(createReqBody)) mockHTTPClient.AddHandler(reqCreate, 200, "", nil) - updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", busConfig.Spec.SQS.QueueName)) + updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name)) reqUpdate, _ := http.NewRequest("POST", updateURL, strings.NewReader(body)) mockHTTPClient.AddHandler(reqUpdate, 200, "", nil) } } -func newTestPullBusPipelineManager(mockHTTPClient *spltest.MockHTTPClient) *indexerClusterPodManager { - newSplunkClientForBusPipeline = func(uri, user, pass string) *splclient.SplunkClient { +func newTestPullQueuePipelineManager(mockHTTPClient *spltest.MockHTTPClient) *indexerClusterPodManager { + newSplunkClientForQueuePipeline = func(uri, user, pass string) *splclient.SplunkClient { return &splclient.SplunkClient{ ManagementURI: uri, Username: user, @@ -2336,11 +2379,11 @@ func newTestPullBusPipelineManager(mockHTTPClient *spltest.MockHTTPClient) *inde } } return &indexerClusterPodManager{ - newSplunkClient: newSplunkClientForBusPipeline, + newSplunkClient: newSplunkClientForQueuePipeline, } } -func TestApplyIndexerClusterManager_BusConfig_Success(t *testing.T) { +func TestApplyIndexerClusterManager_Queue_Success(t *testing.T) { os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") ctx := context.TODO() @@ -2352,28 +2395,26 @@ func TestApplyIndexerClusterManager_BusConfig_Success(t *testing.T) { c := fake.NewClientBuilder().WithScheme(scheme).Build() // Object definitions - busConfig := enterpriseApi.BusConfiguration{ + queue := enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", + Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "busConfig", + Name: "queue", Namespace: "test", }, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", + Spec: enterpriseApi.QueueSpec{ + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", + Name: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", }, }, } - c.Create(ctx, &busConfig) + c.Create(ctx, &queue) cm := &enterpriseApi.ClusterManager{ TypeMeta: metav1.TypeMeta{Kind: "ClusterManager"}, @@ -2395,9 +2436,9 @@ func TestApplyIndexerClusterManager_BusConfig_Success(t *testing.T) { }, Spec: enterpriseApi.IndexerClusterSpec{ Replicas: 1, - BusConfigurationRef: corev1.ObjectReference{ - Name: busConfig.Name, - Namespace: busConfig.Namespace, + QueueRef: corev1.ObjectReference{ + Name: queue.Name, + Namespace: queue.Namespace, }, CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ ClusterManagerRef: corev1.ObjectReference{ @@ -2511,14 +2552,14 @@ func TestApplyIndexerClusterManager_BusConfig_Success(t *testing.T) { mockHTTPClient := &spltest.MockHTTPClient{} base := "https://splunk-test-indexer-0.splunk-test-indexer-headless.test.svc.cluster.local:8089/servicesNS/nobody/system/configs" - queue := "remote_queue:test-queue" + q := "remote_queue:test-queue" - mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-outputs", base), "name="+queue), 200, "", nil) - mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-outputs/%s", base, queue), ""), 200, "", nil) + mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-outputs", base), "name="+q), 200, "", nil) + mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-outputs/%s", base, q), ""), 200, "", nil) // inputs.conf - mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-inputs", base), "name="+queue), 200, "", nil) - mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-inputs/%s", base, queue), ""), 200, "", nil) + mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-inputs", base), "name="+q), 200, "", nil) + mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-inputs/%s", base, q), ""), 200, "", nil) // default-mode.conf pipelineFields := []string{ diff --git a/pkg/splunk/enterprise/ingestorcluster.go b/pkg/splunk/enterprise/ingestorcluster.go index 4f96f05bc..0fc94487b 100644 --- a/pkg/splunk/enterprise/ingestorcluster.go +++ b/pkg/splunk/enterprise/ingestorcluster.go @@ -73,7 +73,7 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr defer updateCRStatus(ctx, client, cr, &err) if cr.Status.Replicas < cr.Spec.Replicas { - cr.Status.BusConfiguration = enterpriseApi.BusConfigurationSpec{} + cr.Status.Queue = &enterpriseApi.QueueSpec{} } cr.Status.Replicas = cr.Spec.Replicas @@ -210,34 +210,66 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr // No need to requeue if everything is ready if cr.Status.Phase == enterpriseApi.PhaseReady { - // Bus config - busConfig := enterpriseApi.BusConfiguration{} - if cr.Spec.BusConfigurationRef.Name != "" { + // Queue + queue := enterpriseApi.Queue{} + if cr.Spec.QueueRef.Name != "" { ns := cr.GetNamespace() - if cr.Spec.BusConfigurationRef.Namespace != "" { - ns = cr.Spec.BusConfigurationRef.Namespace + if cr.Spec.QueueRef.Namespace != "" { + ns = cr.Spec.QueueRef.Namespace } err = client.Get(ctx, types.NamespacedName{ - Name: cr.Spec.BusConfigurationRef.Name, + Name: cr.Spec.QueueRef.Name, Namespace: ns, - }, &busConfig) + }, &queue) if err != nil { return result, err } } - // If bus config is updated - if !reflect.DeepEqual(cr.Status.BusConfiguration, busConfig.Spec) { + // Can not override original queue spec due to comparison in the later code + queueCopy := queue + if queueCopy.Spec.Provider == "sqs" { + if queueCopy.Spec.SQS.Endpoint == "" && queueCopy.Spec.SQS.AuthRegion != "" { + queueCopy.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", queueCopy.Spec.SQS.AuthRegion) + } + } + + // Large Message Store + os := enterpriseApi.ObjectStorage{} + if cr.Spec.ObjectStorageRef.Name != "" { + ns := cr.GetNamespace() + if cr.Spec.ObjectStorageRef.Namespace != "" { + ns = cr.Spec.ObjectStorageRef.Namespace + } + err = client.Get(context.Background(), types.NamespacedName{ + Name: cr.Spec.ObjectStorageRef.Name, + Namespace: ns, + }, &os) + if err != nil { + return result, err + } + } + + // Can not override original queue spec due to comparison in the later code + osCopy := os + if osCopy.Spec.Provider == "s3" { + if osCopy.Spec.S3.Endpoint == "" && queueCopy.Spec.SQS.AuthRegion != "" { + osCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queue.Spec.SQS.AuthRegion) + } + } + + // If queue is updated + if !reflect.DeepEqual(cr.Status.Queue, queue.Spec) { mgr := newIngestorClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) - err = mgr.handlePushBusChange(ctx, cr, busConfig, client) + err = mgr.handlePushQueueChange(ctx, cr, queueCopy, osCopy, client) if err != nil { - eventPublisher.Warning(ctx, "ApplyIngestorCluster", fmt.Sprintf("Failed to update conf file for Bus/Pipeline config change after pod creation: %s", err.Error())) - scopedLog.Error(err, "Failed to update conf file for Bus/Pipeline config change after pod creation") + eventPublisher.Warning(ctx, "ApplyIngestorCluster", fmt.Sprintf("Failed to update conf file for Queue/Pipeline config change after pod creation: %s", err.Error())) + scopedLog.Error(err, "Failed to update conf file for Queue/Pipeline config change after pod creation") return result, err } - cr.Status.BusConfiguration = busConfig.Spec + cr.Status.Queue = &queue.Spec } // Upgrade fron automated MC to MC CRD @@ -310,10 +342,10 @@ func getIngestorStatefulSet(ctx context.Context, client splcommon.ControllerClie return ss, nil } -// Checks if only Bus or Pipeline config changed, and updates the conf file if so -func (mgr *ingestorClusterPodManager) handlePushBusChange(ctx context.Context, newCR *enterpriseApi.IngestorCluster, busConfig enterpriseApi.BusConfiguration, k8s client.Client) error { +// Checks if only Queue or Pipeline config changed, and updates the conf file if so +func (mgr *ingestorClusterPodManager) handlePushQueueChange(ctx context.Context, newCR *enterpriseApi.IngestorCluster, queue enterpriseApi.Queue, os enterpriseApi.ObjectStorage, k8s client.Client) error { reqLogger := log.FromContext(ctx) - scopedLog := reqLogger.WithName("handlePushBusChange").WithValues("name", newCR.GetName(), "namespace", newCR.GetNamespace()) + scopedLog := reqLogger.WithName("handlePushQueueChange").WithValues("name", newCR.GetName(), "namespace", newCR.GetNamespace()) // Only update config for pods that exist readyReplicas := newCR.Status.Replicas @@ -330,18 +362,18 @@ func (mgr *ingestorClusterPodManager) handlePushBusChange(ctx context.Context, n splunkClient := mgr.newSplunkClient(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd)) afterDelete := false - if (busConfig.Spec.SQS.QueueName != "" && newCR.Status.BusConfiguration.SQS.QueueName != "" && busConfig.Spec.SQS.QueueName != newCR.Status.BusConfiguration.SQS.QueueName) || - (busConfig.Spec.Type != "" && newCR.Status.BusConfiguration.Type != "" && busConfig.Spec.Type != newCR.Status.BusConfiguration.Type) { - if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCR.Status.BusConfiguration.SQS.QueueName)); err != nil { + if (queue.Spec.SQS.Name != "" && newCR.Status.Queue.SQS.Name != "" && queue.Spec.SQS.Name != newCR.Status.Queue.SQS.Name) || + (queue.Spec.Provider != "" && newCR.Status.Queue.Provider != "" && queue.Spec.Provider != newCR.Status.Queue.Provider) { + if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCR.Status.Queue.SQS.Name)); err != nil { updateErr = err } afterDelete = true } - busChangedFields, pipelineChangedFields := getChangedBusFieldsForIngestor(&busConfig, newCR, afterDelete) + queueChangedFields, pipelineChangedFields := getChangedQueueFieldsForIngestor(&queue, &os, newCR, afterDelete) - for _, pbVal := range busChangedFields { - if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", busConfig.Spec.SQS.QueueName), [][]string{pbVal}); err != nil { + for _, pbVal := range queueChangedFields { + if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name), [][]string{pbVal}); err != nil { updateErr = err } } @@ -357,13 +389,21 @@ func (mgr *ingestorClusterPodManager) handlePushBusChange(ctx context.Context, n return updateErr } -// getChangedBusFieldsForIngestor returns a list of changed bus and pipeline fields for ingestor pods -func getChangedBusFieldsForIngestor(busConfig *enterpriseApi.BusConfiguration, busConfigIngestorStatus *enterpriseApi.IngestorCluster, afterDelete bool) (busChangedFields, pipelineChangedFields [][]string) { - oldPB := &busConfigIngestorStatus.Status.BusConfiguration - newPB := &busConfig.Spec +// getChangedQueueFieldsForIngestor returns a list of changed queue and pipeline fields for ingestor pods +func getChangedQueueFieldsForIngestor(queue *enterpriseApi.Queue, os *enterpriseApi.ObjectStorage, queueIngestorStatus *enterpriseApi.IngestorCluster, afterDelete bool) (queueChangedFields, pipelineChangedFields [][]string) { + oldQueue := queueIngestorStatus.Status.Queue + if oldQueue == nil { + oldQueue = &enterpriseApi.QueueSpec{} + } + newQueue := &queue.Spec - // Push changed bus fields - busChangedFields = pushBusChanged(oldPB, newPB, afterDelete) + oldOS := queueIngestorStatus.Status.ObjectStorage + if oldOS == nil { + oldOS = &enterpriseApi.ObjectStorageSpec{} + } + newOS := &os.Spec + // Push changed queue fields + queueChangedFields = pushQueueChanged(oldQueue, newQueue, oldOS, newOS, afterDelete) // Always changed pipeline fields pipelineChangedFields = pipelineConfig(false) @@ -402,31 +442,40 @@ func pipelineConfig(isIndexer bool) (output [][]string) { return output } -func pushBusChanged(oldBus, newBus *enterpriseApi.BusConfigurationSpec, afterDelete bool) (output [][]string) { - if oldBus.Type != newBus.Type || afterDelete { - output = append(output, []string{"remote_queue.type", newBus.Type}) +func pushQueueChanged(oldQueue, newQueue *enterpriseApi.QueueSpec, oldOS, newOS *enterpriseApi.ObjectStorageSpec, afterDelete bool) (output [][]string) { + queueProvider := "" + if newQueue.Provider == "sqs" { + queueProvider = "sqs_smartbus" + } + osProvider := "" + if newOS.Provider == "s3" { + osProvider = "sqs_smartbus" + } + + if oldQueue.Provider != newQueue.Provider || afterDelete { + output = append(output, []string{"remote_queue.type", queueProvider}) } - if oldBus.SQS.AuthRegion != newBus.SQS.AuthRegion || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.auth_region", newBus.Type), newBus.SQS.AuthRegion}) + if newQueue.SQS.AuthRegion != "" && (oldQueue.SQS.AuthRegion != newQueue.SQS.AuthRegion || afterDelete) { + output = append(output, []string{fmt.Sprintf("remote_queue.%s.auth_region", queueProvider), newQueue.SQS.AuthRegion}) } - if oldBus.SQS.Endpoint != newBus.SQS.Endpoint || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.endpoint", newBus.Type), newBus.SQS.Endpoint}) + if newQueue.SQS.Endpoint != "" && (oldQueue.SQS.Endpoint != newQueue.SQS.Endpoint || afterDelete) { + output = append(output, []string{fmt.Sprintf("remote_queue.%s.endpoint", queueProvider), newQueue.SQS.Endpoint}) } - if oldBus.SQS.LargeMessageStoreEndpoint != newBus.SQS.LargeMessageStoreEndpoint || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", newBus.Type), newBus.SQS.LargeMessageStoreEndpoint}) + if newOS.S3.Endpoint != "" && (oldOS.S3.Endpoint != newOS.S3.Endpoint || afterDelete) { + output = append(output, []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", osProvider), newOS.S3.Endpoint}) } - if oldBus.SQS.LargeMessageStorePath != newBus.SQS.LargeMessageStorePath || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", newBus.Type), newBus.SQS.LargeMessageStorePath}) + if oldOS.S3.Path != newOS.S3.Path || afterDelete { + output = append(output, []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", osProvider), newOS.S3.Path}) } - if oldBus.SQS.DeadLetterQueueName != newBus.SQS.DeadLetterQueueName || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", newBus.Type), newBus.SQS.DeadLetterQueueName}) + if oldQueue.SQS.DLQ != newQueue.SQS.DLQ || afterDelete { + output = append(output, []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", queueProvider), newQueue.SQS.DLQ}) } output = append(output, - []string{fmt.Sprintf("remote_queue.%s.encoding_format", newBus.Type), "s2s"}, - []string{fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", newBus.Type), "4"}, - []string{fmt.Sprintf("remote_queue.%s.retry_policy", newBus.Type), "max_count"}, - []string{fmt.Sprintf("remote_queue.%s.send_interval", newBus.Type), "5s"}) + []string{fmt.Sprintf("remote_queue.%s.encoding_format", queueProvider), "s2s"}, + []string{fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", queueProvider), "4"}, + []string{fmt.Sprintf("remote_queue.%s.retry_policy", queueProvider), "max_count"}, + []string{fmt.Sprintf("remote_queue.%s.send_interval", queueProvider), "5s"}) return output } diff --git a/pkg/splunk/enterprise/ingestorcluster_test.go b/pkg/splunk/enterprise/ingestorcluster_test.go index bee3df4d6..fac91bbbe 100644 --- a/pkg/splunk/enterprise/ingestorcluster_test.go +++ b/pkg/splunk/enterprise/ingestorcluster_test.go @@ -63,28 +63,47 @@ func TestApplyIngestorCluster(t *testing.T) { c := fake.NewClientBuilder().WithScheme(scheme).Build() // Object definitions - busConfig := &enterpriseApi.BusConfiguration{ + provider := "sqs_smartbus" + + queue := &enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", + Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "busConfig", + Name: "queue", Namespace: "test", }, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", + Spec: enterpriseApi.QueueSpec{ + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", + Name: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", }, }, } - c.Create(ctx, busConfig) + c.Create(ctx, queue) + + os := enterpriseApi.ObjectStorage{ + TypeMeta: metav1.TypeMeta{ + Kind: "ObjectStorage", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "os", + Namespace: "test", + }, + Spec: enterpriseApi.ObjectStorageSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "s3://bucket/key", + }, + }, + } + c.Create(ctx, &os) cr := &enterpriseApi.IngestorCluster{ TypeMeta: metav1.TypeMeta{ @@ -100,9 +119,13 @@ func TestApplyIngestorCluster(t *testing.T) { CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ Mock: true, }, - BusConfigurationRef: corev1.ObjectReference{ - Name: busConfig.Name, - Namespace: busConfig.Namespace, + QueueRef: corev1.ObjectReference{ + Name: queue.Name, + Namespace: queue.Namespace, + }, + ObjectStorageRef: corev1.ObjectReference{ + Name: os.Name, + Namespace: os.Namespace, }, }, } @@ -261,19 +284,19 @@ func TestApplyIngestorCluster(t *testing.T) { defer func() { newIngestorClusterPodManager = origNew }() propertyKVList := [][]string{ - {fmt.Sprintf("remote_queue.%s.encoding_format", busConfig.Spec.Type), "s2s"}, - {fmt.Sprintf("remote_queue.%s.auth_region", busConfig.Spec.Type), busConfig.Spec.SQS.AuthRegion}, - {fmt.Sprintf("remote_queue.%s.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStoreEndpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStorePath}, - {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busConfig.Spec.Type), busConfig.Spec.SQS.DeadLetterQueueName}, - {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", busConfig.Spec.Type), "4"}, - {fmt.Sprintf("remote_queue.%s.retry_policy", busConfig.Spec.Type), "max_count"}, - {fmt.Sprintf("remote_queue.%s.send_interval", busConfig.Spec.Type), "5s"}, + {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion}, + {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), os.Spec.S3.Path}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, + {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, + {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, + {fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"}, } body := buildFormBody(propertyKVList) - addRemoteQueueHandlersForIngestor(mockHTTPClient, cr, busConfig, cr.Status.ReadyReplicas, "conf-outputs", body) + addRemoteQueueHandlersForIngestor(mockHTTPClient, cr, queue, cr.Status.ReadyReplicas, "conf-outputs", body) // default-mode.conf propertyKVList = [][]string{ @@ -310,23 +333,21 @@ func TestGetIngestorStatefulSet(t *testing.T) { // Object definitions os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") - busConfig := enterpriseApi.BusConfiguration{ + queue := enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", + Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "busConfig", + Name: "queue", }, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", + Spec: enterpriseApi.QueueSpec{ + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", + Name: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", }, }, } @@ -341,8 +362,8 @@ func TestGetIngestorStatefulSet(t *testing.T) { }, Spec: enterpriseApi.IngestorClusterSpec{ Replicas: 2, - BusConfigurationRef: corev1.ObjectReference{ - Name: busConfig.Name, + QueueRef: corev1.ObjectReference{ + Name: queue.Name, }, }, } @@ -395,52 +416,72 @@ func TestGetIngestorStatefulSet(t *testing.T) { test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-test-ingestor","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor","app.kubernetes.io/test-extra-label":"test-extra-label-value"},"ownerReferences":[{"apiVersion":"","kind":"IngestorCluster","name":"test","uid":"","controller":true}]},"spec":{"replicas":3,"selector":{"matchLabels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor","app.kubernetes.io/test-extra-label":"test-extra-label-value"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997","traffic.sidecar.istio.io/includeInboundPorts":"8000,8088"}},"spec":{"volumes":[{"name":"splunk-test-probe-configmap","configMap":{"name":"splunk-test-probe-configmap","defaultMode":365}},{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-test-ingestor-secret-v1","defaultMode":420}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"http-splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"http-hec","containerPort":8088,"protocol":"TCP"},{"name":"https-splunkd","containerPort":8089,"protocol":"TCP"},{"name":"tcp-s2s","containerPort":9997,"protocol":"TCP"},{"name":"user-defined","containerPort":32000,"protocol":"UDP"}],"env":[{"name":"TEST_ENV_VAR","value":"test_value"},{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_standalone"},{"name":"SPLUNK_DECLARATIVE_ADMIN_PASSWORD","value":"true"},{"name":"SPLUNK_OPERATOR_K8_LIVENESS_DRIVER_FILE_PATH","value":"/tmp/splunk_operator_k8s/probes/k8_liveness_driver.sh"},{"name":"SPLUNK_GENERAL_TERMS","value":"--accept-sgt-current-at-splunk-com"},{"name":"SPLUNK_SKIP_CLUSTER_BUNDLE_PUSH","value":"true"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"splunk-test-probe-configmap","mountPath":"/mnt/probes"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"}],"livenessProbe":{"exec":{"command":["/mnt/probes/livenessProbe.sh"]},"initialDelaySeconds":30,"timeoutSeconds":30,"periodSeconds":30,"failureThreshold":3},"readinessProbe":{"exec":{"command":["/mnt/probes/readinessProbe.sh"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5,"failureThreshold":3},"startupProbe":{"exec":{"command":["/mnt/probes/startupProbe.sh"]},"initialDelaySeconds":40,"timeoutSeconds":30,"periodSeconds":30,"failureThreshold":12},"imagePullPolicy":"IfNotPresent","securityContext":{"capabilities":{"add":["NET_BIND_SERVICE"],"drop":["ALL"]},"privileged":false,"runAsUser":41812,"runAsNonRoot":true,"allowPrivilegeEscalation":false,"seccompProfile":{"type":"RuntimeDefault"}}}],"serviceAccountName":"defaults","securityContext":{"runAsUser":41812,"runAsNonRoot":true,"fsGroup":41812,"fsGroupChangePolicy":"OnRootMismatch"},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-test-ingestor"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor","app.kubernetes.io/test-extra-label":"test-extra-label-value"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"10Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor","app.kubernetes.io/test-extra-label":"test-extra-label-value"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"100Gi"}}},"status":{}}],"serviceName":"splunk-test-ingestor-headless","podManagementPolicy":"Parallel","updateStrategy":{"type":"OnDelete"}},"status":{"replicas":0,"availableReplicas":0}}`) } -func TestGetChangedBusFieldsForIngestor(t *testing.T) { - busConfig := enterpriseApi.BusConfiguration{ +func TestGetChangedQueueFieldsForIngestor(t *testing.T) { + provider := "sqs_smartbus" + + queue := enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", + Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "busConfig", + Name: "queue", }, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", + Spec: enterpriseApi.QueueSpec{ + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", + Name: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", + }, + }, + } + + os := enterpriseApi.ObjectStorage{ + TypeMeta: metav1.TypeMeta{ + Kind: "ObjectStorage", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "os", + }, + Spec: enterpriseApi.ObjectStorageSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "s3://bucket/key", }, }, } newCR := &enterpriseApi.IngestorCluster{ Spec: enterpriseApi.IngestorClusterSpec{ - BusConfigurationRef: corev1.ObjectReference{ - Name: busConfig.Name, + QueueRef: corev1.ObjectReference{ + Name: queue.Name, + }, + ObjectStorageRef: corev1.ObjectReference{ + Name: os.Name, }, }, Status: enterpriseApi.IngestorClusterStatus{}, } - busChangedFields, pipelineChangedFields := getChangedBusFieldsForIngestor(&busConfig, newCR, false) + queueChangedFields, pipelineChangedFields := getChangedQueueFieldsForIngestor(&queue, &os, newCR, false) - assert.Equal(t, 10, len(busChangedFields)) + assert.Equal(t, 10, len(queueChangedFields)) assert.Equal(t, [][]string{ - {"remote_queue.type", busConfig.Spec.Type}, - {fmt.Sprintf("remote_queue.%s.auth_region", busConfig.Spec.Type), busConfig.Spec.SQS.AuthRegion}, - {fmt.Sprintf("remote_queue.%s.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStoreEndpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStorePath}, - {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busConfig.Spec.Type), busConfig.Spec.SQS.DeadLetterQueueName}, - {fmt.Sprintf("remote_queue.%s.encoding_format", busConfig.Spec.Type), "s2s"}, - {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", busConfig.Spec.Type), "4"}, - {fmt.Sprintf("remote_queue.%s.retry_policy", busConfig.Spec.Type), "max_count"}, - {fmt.Sprintf("remote_queue.%s.send_interval", busConfig.Spec.Type), "5s"}, - }, busChangedFields) + {"remote_queue.type", provider}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion}, + {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), os.Spec.S3.Path}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, + {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, + {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, + {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, + {fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"}, + }, queueChangedFields) assert.Equal(t, 6, len(pipelineChangedFields)) assert.Equal(t, [][]string{ @@ -453,25 +494,42 @@ func TestGetChangedBusFieldsForIngestor(t *testing.T) { }, pipelineChangedFields) } -func TestHandlePushBusChange(t *testing.T) { +func TestHandlePushQueueChange(t *testing.T) { // Object definitions - busConfig := enterpriseApi.BusConfiguration{ + provider := "sqs_smartbus" + + queue := enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", + Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "busConfig", + Name: "queue", }, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", + Spec: enterpriseApi.QueueSpec{ + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", + Name: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", + }, + }, + } + + os := enterpriseApi.ObjectStorage{ + TypeMeta: metav1.TypeMeta{ + Kind: "ObjectStorage", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "os", + }, + Spec: enterpriseApi.ObjectStorageSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "s3://bucket/key", }, }, } @@ -485,13 +543,18 @@ func TestHandlePushBusChange(t *testing.T) { Namespace: "test", }, Spec: enterpriseApi.IngestorClusterSpec{ - BusConfigurationRef: corev1.ObjectReference{ - Name: busConfig.Name, + QueueRef: corev1.ObjectReference{ + Name: queue.Name, + }, + ObjectStorageRef: corev1.ObjectReference{ + Name: os.Name, }, }, Status: enterpriseApi.IngestorClusterStatus{ Replicas: 3, ReadyReplicas: 3, + Queue: &enterpriseApi.QueueSpec{}, + ObjectStorage: &enterpriseApi.ObjectStorageSpec{}, }, } @@ -555,7 +618,7 @@ func TestHandlePushBusChange(t *testing.T) { // Negative test case: secret not found mgr := &ingestorClusterPodManager{} - err := mgr.handlePushBusChange(ctx, newCR, busConfig, c) + err := mgr.handlePushQueueChange(ctx, newCR, queue, os, c) assert.NotNil(t, err) // Mock secret @@ -564,31 +627,31 @@ func TestHandlePushBusChange(t *testing.T) { mockHTTPClient := &spltest.MockHTTPClient{} // Negative test case: failure in creating remote queue stanza - mgr = newTestPushBusPipelineManager(mockHTTPClient) + mgr = newTestPushQueuePipelineManager(mockHTTPClient) - err = mgr.handlePushBusChange(ctx, newCR, busConfig, c) + err = mgr.handlePushQueueChange(ctx, newCR, queue, os, c) assert.NotNil(t, err) // outputs.conf propertyKVList := [][]string{ - {fmt.Sprintf("remote_queue.%s.encoding_format", busConfig.Spec.Type), "s2s"}, - {fmt.Sprintf("remote_queue.%s.auth_region", busConfig.Spec.Type), busConfig.Spec.SQS.AuthRegion}, - {fmt.Sprintf("remote_queue.%s.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStoreEndpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStorePath}, - {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busConfig.Spec.Type), busConfig.Spec.SQS.DeadLetterQueueName}, - {fmt.Sprintf("remote_queue.max_count.%s.max_retries_per_part", busConfig.Spec.Type), "4"}, - {fmt.Sprintf("remote_queue.%s.retry_policy", busConfig.Spec.Type), "max_count"}, - {fmt.Sprintf("remote_queue.%s.send_interval", busConfig.Spec.Type), "5s"}, + {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion}, + {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), os.Spec.S3.Path}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, + {fmt.Sprintf("remote_queue.max_count.%s.max_retries_per_part", provider), "4"}, + {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, + {fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"}, } body := buildFormBody(propertyKVList) - addRemoteQueueHandlersForIngestor(mockHTTPClient, newCR, &busConfig, newCR.Status.ReadyReplicas, "conf-outputs", body) + addRemoteQueueHandlersForIngestor(mockHTTPClient, newCR, &queue, newCR.Status.ReadyReplicas, "conf-outputs", body) // Negative test case: failure in creating remote queue stanza - mgr = newTestPushBusPipelineManager(mockHTTPClient) + mgr = newTestPushQueuePipelineManager(mockHTTPClient) - err = mgr.handlePushBusChange(ctx, newCR, busConfig, c) + err = mgr.handlePushQueueChange(ctx, newCR, queue, os, c) assert.NotNil(t, err) // default-mode.conf @@ -615,13 +678,13 @@ func TestHandlePushBusChange(t *testing.T) { } } - mgr = newTestPushBusPipelineManager(mockHTTPClient) + mgr = newTestPushQueuePipelineManager(mockHTTPClient) - err = mgr.handlePushBusChange(ctx, newCR, busConfig, c) + err = mgr.handlePushQueueChange(ctx, newCR, queue, os, c) assert.Nil(t, err) } -func addRemoteQueueHandlersForIngestor(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IngestorCluster, busConfig *enterpriseApi.BusConfiguration, replicas int32, confName, body string) { +func addRemoteQueueHandlersForIngestor(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IngestorCluster, queue *enterpriseApi.Queue, replicas int32, confName, body string) { for i := 0; i < int(replicas); i++ { podName := fmt.Sprintf("splunk-%s-ingestor-%d", cr.GetName(), i) baseURL := fmt.Sprintf( @@ -629,18 +692,18 @@ func addRemoteQueueHandlersForIngestor(mockHTTPClient *spltest.MockHTTPClient, c podName, cr.GetName(), cr.GetNamespace(), confName, ) - createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", busConfig.Spec.SQS.QueueName)) + createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name)) reqCreate, _ := http.NewRequest("POST", baseURL, strings.NewReader(createReqBody)) mockHTTPClient.AddHandler(reqCreate, 200, "", nil) - updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", busConfig.Spec.SQS.QueueName)) + updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name)) reqUpdate, _ := http.NewRequest("POST", updateURL, strings.NewReader(body)) mockHTTPClient.AddHandler(reqUpdate, 200, "", nil) } } -func newTestPushBusPipelineManager(mockHTTPClient *spltest.MockHTTPClient) *ingestorClusterPodManager { - newSplunkClientForPushBusPipeline := func(uri, user, pass string) *splclient.SplunkClient { +func newTestPushQueuePipelineManager(mockHTTPClient *spltest.MockHTTPClient) *ingestorClusterPodManager { + newSplunkClientForPushQueuePipeline := func(uri, user, pass string) *splclient.SplunkClient { return &splclient.SplunkClient{ ManagementURI: uri, Username: user, @@ -649,6 +712,6 @@ func newTestPushBusPipelineManager(mockHTTPClient *spltest.MockHTTPClient) *inge } } return &ingestorClusterPodManager{ - newSplunkClient: newSplunkClientForPushBusPipeline, + newSplunkClient: newSplunkClientForPushQueuePipeline, } } diff --git a/pkg/splunk/enterprise/monitoringconsole.go b/pkg/splunk/enterprise/monitoringconsole.go index 64de4a2de..77c58c328 100644 --- a/pkg/splunk/enterprise/monitoringconsole.go +++ b/pkg/splunk/enterprise/monitoringconsole.go @@ -33,7 +33,6 @@ import ( k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" rclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -207,7 +206,7 @@ func getMonitoringConsoleStatefulSet(ctx context.Context, client splcommon.Contr } // helper function to get the list of MonitoringConsole types in the current namespace -func getMonitoringConsoleList(ctx context.Context, c splcommon.ControllerClient, cr splcommon.MetaObject, listOpts []client.ListOption) (enterpriseApi.MonitoringConsoleList, error) { +func getMonitoringConsoleList(ctx context.Context, c splcommon.ControllerClient, cr splcommon.MetaObject, listOpts []rclient.ListOption) (enterpriseApi.MonitoringConsoleList, error) { reqLogger := log.FromContext(ctx) scopedLog := reqLogger.WithName("getMonitoringConsoleList").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) diff --git a/pkg/splunk/enterprise/objectstorage.go b/pkg/splunk/enterprise/objectstorage.go new file mode 100644 index 000000000..4db3dcaee --- /dev/null +++ b/pkg/splunk/enterprise/objectstorage.go @@ -0,0 +1,75 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package enterprise + +import ( + "context" + "time" + + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" + splctrl "github.com/splunk/splunk-operator/pkg/splunk/splkcontroller" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// ApplyObjectStorage reconciles the state of an IngestorCluster custom resource +func ApplyObjectStorage(ctx context.Context, client client.Client, cr *enterpriseApi.ObjectStorage) (reconcile.Result, error) { + var err error + + // Unless modified, reconcile for this object will be requeued after 5 seconds + result := reconcile.Result{ + Requeue: true, + RequeueAfter: time.Second * 5, + } + + if cr.Status.ResourceRevMap == nil { + cr.Status.ResourceRevMap = make(map[string]string) + } + + eventPublisher, _ := newK8EventPublisher(client, cr) + ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher) + + cr.Kind = "ObjectStorage" + + // Initialize phase + cr.Status.Phase = enterpriseApi.PhaseError + + // Update the CR Status + defer updateCRStatus(ctx, client, cr, &err) + + // Check if deletion has been requested + if cr.ObjectMeta.DeletionTimestamp != nil { + terminating, err := splctrl.CheckForDeletion(ctx, cr, client) + if terminating && err != nil { + cr.Status.Phase = enterpriseApi.PhaseTerminating + } else { + result.Requeue = false + } + return result, err + } + + cr.Status.Phase = enterpriseApi.PhaseReady + + // RequeueAfter if greater than 0, tells the Controller to requeue the reconcile key after the Duration. + // Implies that Requeue is true, there is no need to set Requeue to true at the same time as RequeueAfter. + if !result.Requeue { + result.RequeueAfter = 0 + } + + return result, nil +} diff --git a/pkg/splunk/enterprise/objectstorage_test.go b/pkg/splunk/enterprise/objectstorage_test.go new file mode 100644 index 000000000..a3511af69 --- /dev/null +++ b/pkg/splunk/enterprise/objectstorage_test.go @@ -0,0 +1,83 @@ +/* +Copyright 2025. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package enterprise + +import ( + "context" + "os" + "path/filepath" + "testing" + + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func init() { + GetReadinessScriptLocation = func() string { + fileLocation, _ := filepath.Abs("../../../" + readinessScriptLocation) + return fileLocation + } + GetLivenessScriptLocation = func() string { + fileLocation, _ := filepath.Abs("../../../" + livenessScriptLocation) + return fileLocation + } + GetStartupScriptLocation = func() string { + fileLocation, _ := filepath.Abs("../../../" + startupScriptLocation) + return fileLocation + } +} + +func TestApplyObjectStorage(t *testing.T) { + os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") + + ctx := context.TODO() + + scheme := runtime.NewScheme() + _ = enterpriseApi.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + c := fake.NewClientBuilder().WithScheme(scheme).Build() + + // Object definitions + os := &enterpriseApi.ObjectStorage{ + TypeMeta: metav1.TypeMeta{ + Kind: "ObjectStorage", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "os", + Namespace: "test", + }, + Spec: enterpriseApi.ObjectStorageSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "s3://bucket/key", + }, + }, + } + c.Create(ctx, os) + + // ApplyObjectStorage + result, err := ApplyObjectStorage(ctx, c, os) + assert.NoError(t, err) + assert.True(t, result.Requeue) + assert.NotEqual(t, enterpriseApi.PhaseError, os.Status.Phase) + assert.Equal(t, enterpriseApi.PhaseReady, os.Status.Phase) +} diff --git a/pkg/splunk/enterprise/queue.go b/pkg/splunk/enterprise/queue.go new file mode 100644 index 000000000..1f36f6bad --- /dev/null +++ b/pkg/splunk/enterprise/queue.go @@ -0,0 +1,75 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package enterprise + +import ( + "context" + "time" + + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" + splctrl "github.com/splunk/splunk-operator/pkg/splunk/splkcontroller" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// ApplyQueue reconciles the state of an IngestorCluster custom resource +func ApplyQueue(ctx context.Context, client client.Client, cr *enterpriseApi.Queue) (reconcile.Result, error) { + var err error + + // Unless modified, reconcile for this object will be requeued after 5 seconds + result := reconcile.Result{ + Requeue: true, + RequeueAfter: time.Second * 5, + } + + if cr.Status.ResourceRevMap == nil { + cr.Status.ResourceRevMap = make(map[string]string) + } + + eventPublisher, _ := newK8EventPublisher(client, cr) + ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher) + + cr.Kind = "Queue" + + // Initialize phase + cr.Status.Phase = enterpriseApi.PhaseError + + // Update the CR Status + defer updateCRStatus(ctx, client, cr, &err) + + // Check if deletion has been requested + if cr.ObjectMeta.DeletionTimestamp != nil { + terminating, err := splctrl.CheckForDeletion(ctx, cr, client) + if terminating && err != nil { + cr.Status.Phase = enterpriseApi.PhaseTerminating + } else { + result.Requeue = false + } + return result, err + } + + cr.Status.Phase = enterpriseApi.PhaseReady + + // RequeueAfter if greater than 0, tells the Controller to requeue the reconcile key after the Duration. + // Implies that Requeue is true, there is no need to set Requeue to true at the same time as RequeueAfter. + if !result.Requeue { + result.RequeueAfter = 0 + } + + return result, nil +} diff --git a/pkg/splunk/enterprise/queue_test.go b/pkg/splunk/enterprise/queue_test.go new file mode 100644 index 000000000..767d33e83 --- /dev/null +++ b/pkg/splunk/enterprise/queue_test.go @@ -0,0 +1,69 @@ +/* +Copyright 2025. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package enterprise + +import ( + "context" + "os" + "testing" + + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestApplyQueue(t *testing.T) { + os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") + + ctx := context.TODO() + + scheme := runtime.NewScheme() + _ = enterpriseApi.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + c := fake.NewClientBuilder().WithScheme(scheme).Build() + + // Object definitions + queue := &enterpriseApi.Queue{ + TypeMeta: metav1.TypeMeta{ + Kind: "Queue", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "queue", + Namespace: "test", + }, + Spec: enterpriseApi.QueueSpec{ + Provider: "sqs", + SQS: enterpriseApi.SQSSpec{ + Name: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", + }, + }, + } + c.Create(ctx, queue) + + // ApplyQueue + result, err := ApplyQueue(ctx, c, queue) + assert.NoError(t, err) + assert.True(t, result.Requeue) + assert.NotEqual(t, enterpriseApi.PhaseError, queue.Status.Phase) + assert.Equal(t, enterpriseApi.PhaseReady, queue.Status.Phase) +} diff --git a/pkg/splunk/enterprise/types.go b/pkg/splunk/enterprise/types.go index 6ebd3df34..fe96430e4 100644 --- a/pkg/splunk/enterprise/types.go +++ b/pkg/splunk/enterprise/types.go @@ -63,8 +63,11 @@ const ( // SplunkIngestor may be a standalone or clustered ingestion peer SplunkIngestor InstanceType = "ingestor" - // SplunkBusConfiguration is the bus configuration instance - SplunkBusConfiguration InstanceType = "busconfiguration" + // SplunkQueue is the queue instance + SplunkQueue InstanceType = "queue" + + // SplunkObjectStorage is the large message store instance + SplunkObjectStorage InstanceType = "object-storage" // SplunkDeployer is an instance that distributes baseline configurations and apps to search head cluster members SplunkDeployer InstanceType = "deployer" @@ -294,8 +297,10 @@ func KindToInstanceString(kind string) string { return SplunkIndexer.ToString() case "IngestorCluster": return SplunkIngestor.ToString() - case "BusConfiguration": - return SplunkBusConfiguration.ToString() + case "Queue": + return SplunkQueue.ToString() + case "ObjectStorage": + return SplunkObjectStorage.ToString() case "LicenseManager": return SplunkLicenseManager.ToString() case "LicenseMaster": diff --git a/pkg/splunk/enterprise/upgrade.go b/pkg/splunk/enterprise/upgrade.go index 5d50e8cec..71fc017da 100644 --- a/pkg/splunk/enterprise/upgrade.go +++ b/pkg/splunk/enterprise/upgrade.go @@ -10,7 +10,6 @@ import ( appsv1 "k8s.io/api/apps/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" - rclient "sigs.k8s.io/controller-runtime/pkg/client" runtime "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" ) @@ -161,8 +160,8 @@ IndexerCluster: } // check if cluster is multisite if clusterInfo.MultiSite == "true" { - opts := []rclient.ListOption{ - rclient.InNamespace(cr.GetNamespace()), + opts := []runtime.ListOption{ + runtime.InNamespace(cr.GetNamespace()), } indexerList, err := getIndexerClusterList(ctx, c, cr, opts) if err != nil { @@ -220,8 +219,8 @@ SearchHeadCluster: // check if a search head cluster exists with the same ClusterManager instance attached searchHeadClusterInstance := enterpriseApi.SearchHeadCluster{} - opts := []rclient.ListOption{ - rclient.InNamespace(cr.GetNamespace()), + opts := []runtime.ListOption{ + runtime.InNamespace(cr.GetNamespace()), } searchHeadList, err := getSearchHeadClusterList(ctx, c, cr, opts) if err != nil { diff --git a/pkg/splunk/enterprise/util.go b/pkg/splunk/enterprise/util.go index 38853aab0..afafa6ede 100644 --- a/pkg/splunk/enterprise/util.go +++ b/pkg/splunk/enterprise/util.go @@ -2291,19 +2291,33 @@ func fetchCurrentCRWithStatusUpdate(ctx context.Context, client splcommon.Contro origCR.(*enterpriseApi.IngestorCluster).Status.DeepCopyInto(&latestIngCR.Status) return latestIngCR, nil - case "BusConfiguration": - latestBusCR := &enterpriseApi.BusConfiguration{} - err = client.Get(ctx, namespacedName, latestBusCR) + case "Queue": + latestQueueCR := &enterpriseApi.Queue{} + err = client.Get(ctx, namespacedName, latestQueueCR) if err != nil { return nil, err } - origCR.(*enterpriseApi.BusConfiguration).Status.Message = "" + origCR.(*enterpriseApi.Queue).Status.Message = "" if (crError != nil) && ((*crError) != nil) { - origCR.(*enterpriseApi.BusConfiguration).Status.Message = (*crError).Error() + origCR.(*enterpriseApi.Queue).Status.Message = (*crError).Error() } - origCR.(*enterpriseApi.BusConfiguration).Status.DeepCopyInto(&latestBusCR.Status) - return latestBusCR, nil + origCR.(*enterpriseApi.Queue).Status.DeepCopyInto(&latestQueueCR.Status) + return latestQueueCR, nil + + case "ObjectStorage": + latestOsCR := &enterpriseApi.ObjectStorage{} + err = client.Get(ctx, namespacedName, latestOsCR) + if err != nil { + return nil, err + } + + origCR.(*enterpriseApi.ObjectStorage).Status.Message = "" + if (crError != nil) && ((*crError) != nil) { + origCR.(*enterpriseApi.ObjectStorage).Status.Message = (*crError).Error() + } + origCR.(*enterpriseApi.ObjectStorage).Status.DeepCopyInto(&latestOsCR.Status) + return latestOsCR, nil case "LicenseMaster": latestLmCR := &enterpriseApiV3.LicenseMaster{} @@ -2533,7 +2547,7 @@ func loadFixture(t *testing.T, filename string) string { if err != nil { t.Fatalf("Failed to load fixture %s: %v", filename, err) } - + // Compact the JSON to match the output from json.Marshal var compactJSON bytes.Buffer if err := json.Compact(&compactJSON, data); err != nil { diff --git a/test/appframework_aws/c3/appframework_aws_test.go b/test/appframework_aws/c3/appframework_aws_test.go index ba0162ffa..2d150f5ac 100644 --- a/test/appframework_aws/c3/appframework_aws_test.go +++ b/test/appframework_aws/c3/appframework_aws_test.go @@ -3182,7 +3182,7 @@ var _ = Describe("c3appfw test", func() { // Deploy the Indexer Cluster testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster") indexerReplicas := 3 - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, "") + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster") // Deploy the Search Head Cluster diff --git a/test/appframework_aws/c3/manager_appframework_test.go b/test/appframework_aws/c3/manager_appframework_test.go index afc7abae6..904433195 100644 --- a/test/appframework_aws/c3/manager_appframework_test.go +++ b/test/appframework_aws/c3/manager_appframework_test.go @@ -355,7 +355,7 @@ var _ = Describe("c3appfw test", func() { shcName := fmt.Sprintf("%s-shc", deployment.GetName()) idxName := fmt.Sprintf("%s-idxc", deployment.GetName()) shc, err := deployment.DeploySearchHeadCluster(ctx, shcName, cm.GetName(), lm.GetName(), "", mcName) - idxc, err := deployment.DeployIndexerCluster(ctx, idxName, lm.GetName(), 3, cm.GetName(), "", corev1.ObjectReference{}, "") + idxc, err := deployment.DeployIndexerCluster(ctx, idxName, lm.GetName(), 3, cm.GetName(), "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") // Wait for License Manager to be in READY phase testenv.LicenseManagerReady(ctx, deployment, testcaseEnvInst) @@ -3324,7 +3324,7 @@ var _ = Describe("c3appfw test", func() { // Deploy the Indexer Cluster testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster") indexerReplicas := 3 - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, "") + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster") // Deploy the Search Head Cluster diff --git a/test/appframework_az/c3/appframework_azure_test.go b/test/appframework_az/c3/appframework_azure_test.go index 0622700a4..c7fea6ff3 100644 --- a/test/appframework_az/c3/appframework_azure_test.go +++ b/test/appframework_az/c3/appframework_azure_test.go @@ -993,7 +993,7 @@ var _ = Describe("c3appfw test", func() { // Deploy the Indexer Cluster testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster") indexerReplicas := 3 - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, "") + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster") // Deploy the Search Head Cluster diff --git a/test/appframework_az/c3/manager_appframework_azure_test.go b/test/appframework_az/c3/manager_appframework_azure_test.go index 2a0af0b3b..4412efe43 100644 --- a/test/appframework_az/c3/manager_appframework_azure_test.go +++ b/test/appframework_az/c3/manager_appframework_azure_test.go @@ -991,7 +991,7 @@ var _ = Describe("c3appfw test", func() { // Deploy the Indexer Cluster testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster") indexerReplicas := 3 - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, "") + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster") // Deploy the Search Head Cluster diff --git a/test/appframework_gcp/c3/manager_appframework_test.go b/test/appframework_gcp/c3/manager_appframework_test.go index 02ad17cfb..66c553e47 100644 --- a/test/appframework_gcp/c3/manager_appframework_test.go +++ b/test/appframework_gcp/c3/manager_appframework_test.go @@ -361,7 +361,7 @@ var _ = Describe("c3appfw test", func() { shcName := fmt.Sprintf("%s-shc", deployment.GetName()) idxName := fmt.Sprintf("%s-idxc", deployment.GetName()) shc, err := deployment.DeploySearchHeadCluster(ctx, shcName, cm.GetName(), lm.GetName(), "", mcName) - idxc, err := deployment.DeployIndexerCluster(ctx, idxName, lm.GetName(), 3, cm.GetName(), "", corev1.ObjectReference{}, "") + idxc, err := deployment.DeployIndexerCluster(ctx, idxName, lm.GetName(), 3, cm.GetName(), "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") // Wait for License Manager to be in READY phase testenv.LicenseManagerReady(ctx, deployment, testcaseEnvInst) @@ -3327,7 +3327,7 @@ var _ = Describe("c3appfw test", func() { // Deploy the Indexer Cluster testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster") indexerReplicas := 3 - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, "") + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster") // Deploy the Search Head Cluster diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go index c040802f8..86231df14 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go @@ -39,15 +39,20 @@ var ( testenvInstance *testenv.TestEnv testSuiteName = "indingsep-" + testenv.RandomDNSName(3) - bus = enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", + queue = enterpriseApi.QueueSpec{ + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://test-bucket/smartbus-test", - DeadLetterQueueName: "test-dead-letter-queue", + Name: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "test-dead-letter-queue", + }, + } + objectStorage = enterpriseApi.ObjectStorageSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "s3://test-bucket/smartbus-test", }, } serviceAccountName = "index-ingest-sa" @@ -80,15 +85,13 @@ var ( "AWS_STS_REGIONAL_ENDPOINTS=regional", } - updateBus = enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", + updateQueue = enterpriseApi.QueueSpec{ + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue-updated", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://test-bucket-updated/smartbus-test", - DeadLetterQueueName: "test-dead-letter-queue-updated", + Name: "test-queue-updated", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "test-dead-letter-queue-updated", }, } diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go index 8bccddb47..41beae4bc 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go @@ -79,14 +79,19 @@ var _ = Describe("indingsep test", func() { testcaseEnvInst.Log.Info("Create Service Account") testcaseEnvInst.CreateServiceAccount(serviceAccountName) - // Deploy Bus Configuration - testcaseEnvInst.Log.Info("Deploy Bus Configuration") - bc, err := deployment.DeployBusConfiguration(ctx, "bus-config", bus) - Expect(err).To(Succeed(), "Unable to deploy Bus Configuration") + // Deploy Queue + testcaseEnvInst.Log.Info("Deploy Queue") + q, err := deployment.DeployQueue(ctx, "queue", queue) + Expect(err).To(Succeed(), "Unable to deploy Queue") + + // Deploy ObjectStorage + testcaseEnvInst.Log.Info("Deploy ObjectStorage") + objStorage, err := deployment.DeployObjectStorage(ctx, "os", objectStorage) + Expect(err).To(Succeed(), "Unable to deploy ObjectStorage") // Deploy Ingestor Cluster testcaseEnvInst.Log.Info("Deploy Ingestor Cluster") - _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: bc.Name}, serviceAccountName) + _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster") // Deploy Cluster Manager @@ -96,7 +101,7 @@ var _ = Describe("indingsep test", func() { // Deploy Indexer Cluster testcaseEnvInst.Log.Info("Deploy Indexer Cluster") - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: bc.Name}, serviceAccountName) + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Indexer Cluster") // Ensure that Ingestor Cluster is in Ready phase @@ -125,12 +130,19 @@ var _ = Describe("indingsep test", func() { err = deployment.DeleteCR(ctx, ingest) Expect(err).To(Succeed(), "Unable to delete Ingestor Cluster instance", "Ingestor Cluster Name", ingest) - // Delete the Bus Configuration - busConfiguration := &enterpriseApi.BusConfiguration{} - err = deployment.GetInstance(ctx, "bus-config", busConfiguration) - Expect(err).To(Succeed(), "Unable to get Bus Configuration instance", "Bus Configuration Name", busConfiguration) - err = deployment.DeleteCR(ctx, busConfiguration) - Expect(err).To(Succeed(), "Unable to delete Bus Configuration", "Bus Configuration Name", busConfiguration) + // Delete the Queue + queue := &enterpriseApi.Queue{} + err = deployment.GetInstance(ctx, "queue", queue) + Expect(err).To(Succeed(), "Unable to get Queue instance", "Queue Name", queue) + err = deployment.DeleteCR(ctx, queue) + Expect(err).To(Succeed(), "Unable to delete Queue", "Queue Name", queue) + + // Delete the ObjectStorage + objStorage = &enterpriseApi.ObjectStorage{} + err = deployment.GetInstance(ctx, "os", objStorage) + Expect(err).To(Succeed(), "Unable to get ObjectStorage instance", "ObjectStorage Name", objStorage) + err = deployment.DeleteCR(ctx, objStorage) + Expect(err).To(Succeed(), "Unable to delete ObjectStorage", "ObjectStorage Name", objStorage) }) }) @@ -140,10 +152,15 @@ var _ = Describe("indingsep test", func() { testcaseEnvInst.Log.Info("Create Service Account") testcaseEnvInst.CreateServiceAccount(serviceAccountName) - // Deploy Bus Configuration - testcaseEnvInst.Log.Info("Deploy Bus Configuration") - bc, err := deployment.DeployBusConfiguration(ctx, "bus-config", bus) - Expect(err).To(Succeed(), "Unable to deploy Bus Configuration") + // Deploy Queue + testcaseEnvInst.Log.Info("Deploy Queue") + q, err := deployment.DeployQueue(ctx, "queue", queue) + Expect(err).To(Succeed(), "Unable to deploy Queue") + + // Deploy ObjectStorage + testcaseEnvInst.Log.Info("Deploy ObjectStorage") + objStorage, err := deployment.DeployObjectStorage(ctx, "os", objectStorage) + Expect(err).To(Succeed(), "Unable to deploy ObjectStorage") // Upload apps to S3 testcaseEnvInst.Log.Info("Upload apps to S3") @@ -188,9 +205,10 @@ var _ = Describe("indingsep test", func() { Image: testcaseEnvInst.GetSplunkImage(), }, }, - BusConfigurationRef: v1.ObjectReference{Name: bc.Name}, - Replicas: 3, - AppFrameworkConfig: appFrameworkSpec, + QueueRef: v1.ObjectReference{Name: q.Name}, + ObjectStorageRef: v1.ObjectReference{Name: objStorage.Name}, + Replicas: 3, + AppFrameworkConfig: appFrameworkSpec, }, } @@ -238,14 +256,19 @@ var _ = Describe("indingsep test", func() { testcaseEnvInst.Log.Info("Create Service Account") testcaseEnvInst.CreateServiceAccount(serviceAccountName) - // Deploy Bus Configuration - testcaseEnvInst.Log.Info("Deploy Bus Configuration") - bc, err := deployment.DeployBusConfiguration(ctx, "bus-config", bus) - Expect(err).To(Succeed(), "Unable to deploy Bus Configuration") + // Deploy Queue + testcaseEnvInst.Log.Info("Deploy Queue") + q, err := deployment.DeployQueue(ctx, "queue", queue) + Expect(err).To(Succeed(), "Unable to deploy Queue") + + // Deploy ObjectStorage + testcaseEnvInst.Log.Info("Deploy ObjectStorage") + objStorage, err := deployment.DeployObjectStorage(ctx, "os", objectStorage) + Expect(err).To(Succeed(), "Unable to deploy ObjectStorage") // Deploy Ingestor Cluster testcaseEnvInst.Log.Info("Deploy Ingestor Cluster") - _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: bc.Name}, serviceAccountName) + _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster") // Deploy Cluster Manager @@ -255,7 +278,7 @@ var _ = Describe("indingsep test", func() { // Deploy Indexer Cluster testcaseEnvInst.Log.Info("Deploy Indexer Cluster") - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: bc.Name}, serviceAccountName) + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Indexer Cluster") // Ensure that Ingestor Cluster is in Ready phase @@ -278,7 +301,7 @@ var _ = Describe("indingsep test", func() { // Verify Ingestor Cluster Status testcaseEnvInst.Log.Info("Verify Ingestor Cluster Status") - Expect(ingest.Status.BusConfiguration).To(Equal(bus), "Ingestor bus configuration status is not the same as provided as input") + Expect(ingest.Status.Queue).To(Equal(queue), "Ingestor queue status is not the same as provided as input") // Get instance of current Indexer Cluster CR with latest config testcaseEnvInst.Log.Info("Get instance of current Indexer Cluster CR with latest config") @@ -288,7 +311,7 @@ var _ = Describe("indingsep test", func() { // Verify Indexer Cluster Status testcaseEnvInst.Log.Info("Verify Indexer Cluster Status") - Expect(index.Status.BusConfiguration).To(Equal(bus), "Indexer bus configuration status is not the same as provided as input") + Expect(index.Status.Queue).To(Equal(queue), "Indexer queue status is not the same as provided as input") // Verify conf files testcaseEnvInst.Log.Info("Verify conf files") @@ -340,14 +363,19 @@ var _ = Describe("indingsep test", func() { testcaseEnvInst.Log.Info("Create Service Account") testcaseEnvInst.CreateServiceAccount(serviceAccountName) - // Deploy Bus Configuration - testcaseEnvInst.Log.Info("Deploy Bus Configuration") - bc, err := deployment.DeployBusConfiguration(ctx, "bus-config", bus) - Expect(err).To(Succeed(), "Unable to deploy Bus Configuration") + // Deploy Queue + testcaseEnvInst.Log.Info("Deploy Queue") + q, err := deployment.DeployQueue(ctx, "queue", queue) + Expect(err).To(Succeed(), "Unable to deploy Queue") + + // Deploy ObjectStorage + testcaseEnvInst.Log.Info("Deploy ObjectStorage") + objStorage, err := deployment.DeployObjectStorage(ctx, "os", objectStorage) + Expect(err).To(Succeed(), "Unable to deploy ObjectStorage") // Deploy Ingestor Cluster testcaseEnvInst.Log.Info("Deploy Ingestor Cluster") - _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: bc.Name}, serviceAccountName) + _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster") // Deploy Cluster Manager @@ -357,7 +385,7 @@ var _ = Describe("indingsep test", func() { // Deploy Indexer Cluster testcaseEnvInst.Log.Info("Deploy Indexer Cluster") - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: bc.Name}, serviceAccountName) + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Indexer Cluster") // Ensure that Ingestor Cluster is in Ready phase @@ -372,17 +400,17 @@ var _ = Describe("indingsep test", func() { testcaseEnvInst.Log.Info("Ensure that Indexer Cluster is in Ready phase") testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) - // Get instance of current Bus Configuration CR with latest config - testcaseEnvInst.Log.Info("Get instance of current Bus Configuration CR with latest config") - bus := &enterpriseApi.BusConfiguration{} - err = deployment.GetInstance(ctx, bc.Name, bus) - Expect(err).To(Succeed(), "Failed to get instance of Bus Configuration") + // Get instance of current Queue CR with latest config + testcaseEnvInst.Log.Info("Get instance of current Queue CR with latest config") + queue := &enterpriseApi.Queue{} + err = deployment.GetInstance(ctx, q.Name, queue) + Expect(err).To(Succeed(), "Failed to get instance of Queue") - // Update instance of BusConfiguration CR with new bus configuration - testcaseEnvInst.Log.Info("Update instance of BusConfiguration CR with new bus configuration") - bus.Spec = updateBus - err = deployment.UpdateCR(ctx, bus) - Expect(err).To(Succeed(), "Unable to deploy Bus Configuration with updated CR") + // Update instance of Queue CR with new queue + testcaseEnvInst.Log.Info("Update instance of Queue CR with new queue") + queue.Spec = updateQueue + err = deployment.UpdateCR(ctx, queue) + Expect(err).To(Succeed(), "Unable to deploy Queue with updated CR") // Ensure that Ingestor Cluster has not been restarted testcaseEnvInst.Log.Info("Ensure that Ingestor Cluster has not been restarted") @@ -400,7 +428,7 @@ var _ = Describe("indingsep test", func() { // Verify Ingestor Cluster Status testcaseEnvInst.Log.Info("Verify Ingestor Cluster Status") - Expect(ingest.Status.BusConfiguration).To(Equal(updateBus), "Ingestor bus configuration status is not the same as provided as input") + Expect(ingest.Status.Queue).To(Equal(updateQueue), "Ingestor queue status is not the same as provided as input") // Get instance of current Indexer Cluster CR with latest config testcaseEnvInst.Log.Info("Get instance of current Indexer Cluster CR with latest config") @@ -410,7 +438,7 @@ var _ = Describe("indingsep test", func() { // Verify Indexer Cluster Status testcaseEnvInst.Log.Info("Verify Indexer Cluster Status") - Expect(index.Status.BusConfiguration).To(Equal(updateBus), "Indexer bus configuration status is not the same as provided as input") + Expect(index.Status.Queue).To(Equal(updateQueue), "Indexer queue status is not the same as provided as input") // Verify conf files testcaseEnvInst.Log.Info("Verify conf files") diff --git a/test/testenv/deployment.go b/test/testenv/deployment.go index 2e312c652..781e5b6f0 100644 --- a/test/testenv/deployment.go +++ b/test/testenv/deployment.go @@ -431,9 +431,9 @@ func (d *Deployment) DeployClusterMasterWithSmartStoreIndexes(ctx context.Contex } // DeployIndexerCluster deploys the indexer cluster -func (d *Deployment) DeployIndexerCluster(ctx context.Context, name, LicenseManagerName string, count int, clusterManagerRef string, ansibleConfig string, busConfig corev1.ObjectReference, serviceAccountName string) (*enterpriseApi.IndexerCluster, error) { +func (d *Deployment) DeployIndexerCluster(ctx context.Context, name, LicenseManagerName string, count int, clusterManagerRef string, ansibleConfig string, queue, os corev1.ObjectReference, serviceAccountName string) (*enterpriseApi.IndexerCluster, error) { d.testenv.Log.Info("Deploying indexer cluster", "name", name, "CM", clusterManagerRef) - indexer := newIndexerCluster(name, d.testenv.namespace, LicenseManagerName, count, clusterManagerRef, ansibleConfig, d.testenv.splunkImage, busConfig, serviceAccountName) + indexer := newIndexerCluster(name, d.testenv.namespace, LicenseManagerName, count, clusterManagerRef, ansibleConfig, d.testenv.splunkImage, queue, os, serviceAccountName) pdata, _ := json.Marshal(indexer) d.testenv.Log.Info("indexer cluster spec", "cr", string(pdata)) deployed, err := d.deployCR(ctx, name, indexer) @@ -445,10 +445,10 @@ func (d *Deployment) DeployIndexerCluster(ctx context.Context, name, LicenseMana } // DeployIngestorCluster deploys the ingestor cluster -func (d *Deployment) DeployIngestorCluster(ctx context.Context, name string, count int, busConfig corev1.ObjectReference, serviceAccountName string) (*enterpriseApi.IngestorCluster, error) { +func (d *Deployment) DeployIngestorCluster(ctx context.Context, name string, count int, queue, os corev1.ObjectReference, serviceAccountName string) (*enterpriseApi.IngestorCluster, error) { d.testenv.Log.Info("Deploying ingestor cluster", "name", name) - ingestor := newIngestorCluster(name, d.testenv.namespace, count, d.testenv.splunkImage, busConfig, serviceAccountName) + ingestor := newIngestorCluster(name, d.testenv.namespace, count, d.testenv.splunkImage, queue, os, serviceAccountName) pdata, _ := json.Marshal(ingestor) d.testenv.Log.Info("ingestor cluster spec", "cr", string(pdata)) @@ -460,20 +460,36 @@ func (d *Deployment) DeployIngestorCluster(ctx context.Context, name string, cou return deployed.(*enterpriseApi.IngestorCluster), err } -// DeployBusConfiguration deploys the bus configuration -func (d *Deployment) DeployBusConfiguration(ctx context.Context, name string, busConfig enterpriseApi.BusConfigurationSpec) (*enterpriseApi.BusConfiguration, error) { - d.testenv.Log.Info("Deploying bus configuration", "name", name) +// DeployQueue deploys the queue +func (d *Deployment) DeployQueue(ctx context.Context, name string, queue enterpriseApi.QueueSpec) (*enterpriseApi.Queue, error) { + d.testenv.Log.Info("Deploying queue", "name", name) - busCfg := newBusConfiguration(name, d.testenv.namespace, busConfig) - pdata, _ := json.Marshal(busCfg) + queueCfg := newQueue(name, d.testenv.namespace, queue) + pdata, _ := json.Marshal(queueCfg) - d.testenv.Log.Info("bus configuration spec", "cr", string(pdata)) - deployed, err := d.deployCR(ctx, name, busCfg) + d.testenv.Log.Info("queue spec", "cr", string(pdata)) + deployed, err := d.deployCR(ctx, name, queueCfg) if err != nil { return nil, err } - return deployed.(*enterpriseApi.BusConfiguration), err + return deployed.(*enterpriseApi.Queue), err +} + +// DeployObjectStorage deploys the object storage +func (d *Deployment) DeployObjectStorage(ctx context.Context, name string, objStorage enterpriseApi.ObjectStorageSpec) (*enterpriseApi.ObjectStorage, error) { + d.testenv.Log.Info("Deploying object storage", "name", name) + + objStorageCfg := newObjectStorage(name, d.testenv.namespace, objStorage) + pdata, _ := json.Marshal(objStorageCfg) + + d.testenv.Log.Info("object storage spec", "cr", string(pdata)) + deployed, err := d.deployCR(ctx, name, objStorageCfg) + if err != nil { + return nil, err + } + + return deployed.(*enterpriseApi.ObjectStorage), err } // DeployIngestorClusterWithAdditionalConfiguration deploys the ingestor cluster with additional configuration @@ -632,13 +648,22 @@ func (d *Deployment) UpdateCR(ctx context.Context, cr client.Object) error { ucr := cr.(*enterpriseApi.IngestorCluster) current.Spec = ucr.Spec cobject = current - case "BusConfiguration": - current := &enterpriseApi.BusConfiguration{} + case "Queue": + current := &enterpriseApi.Queue{} + err = d.testenv.GetKubeClient().Get(ctx, namespacedName, current) + if err != nil { + return err + } + ucr := cr.(*enterpriseApi.Queue) + current.Spec = ucr.Spec + cobject = current + case "ObjectStorage": + current := &enterpriseApi.ObjectStorage{} err = d.testenv.GetKubeClient().Get(ctx, namespacedName, current) if err != nil { return err } - ucr := cr.(*enterpriseApi.BusConfiguration) + ucr := cr.(*enterpriseApi.ObjectStorage) current.Spec = ucr.Spec cobject = current case "ClusterMaster": @@ -740,7 +765,7 @@ func (d *Deployment) DeploySingleSiteCluster(ctx context.Context, name string, i } // Deploy the indexer cluster - _, err := d.DeployIndexerCluster(ctx, name+"-idxc", LicenseManager, indexerReplicas, name, "", corev1.ObjectReference{}, "") + _, err := d.DeployIndexerCluster(ctx, name+"-idxc", LicenseManager, indexerReplicas, name, "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return err } @@ -798,7 +823,7 @@ func (d *Deployment) DeployMultisiteClusterMasterWithSearchHead(ctx context.Cont multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-master", siteName) - _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseMaster, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") + _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseMaster, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return err } @@ -870,7 +895,7 @@ func (d *Deployment) DeployMultisiteClusterWithSearchHead(ctx context.Context, n multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-manager", siteName) - _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") + _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return err } @@ -931,7 +956,7 @@ func (d *Deployment) DeployMultisiteCluster(ctx context.Context, name string, in multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-manager", siteName) - _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") + _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return err } @@ -1067,7 +1092,7 @@ func (d *Deployment) DeployMultisiteClusterWithSearchHeadAndIndexes(ctx context. multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-manager", siteName) - _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") + _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return err } @@ -1122,7 +1147,7 @@ func (d *Deployment) DeployMultisiteClusterMasterWithSearchHeadAndIndexes(ctx co multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-master", siteName) - _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") + _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return err } @@ -1227,7 +1252,7 @@ func (d *Deployment) DeploySingleSiteClusterWithGivenAppFrameworkSpec(ctx contex } // Deploy the indexer cluster - idxc, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseManager, indexerReplicas, name, "", corev1.ObjectReference{}, "") + idxc, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseManager, indexerReplicas, name, "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return cm, idxc, sh, err } @@ -1305,7 +1330,7 @@ func (d *Deployment) DeploySingleSiteClusterMasterWithGivenAppFrameworkSpec(ctx } // Deploy the indexer cluster - idxc, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseMaster, indexerReplicas, name, "", corev1.ObjectReference{}, "") + idxc, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseMaster, indexerReplicas, name, "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return cm, idxc, sh, err } @@ -1405,7 +1430,7 @@ func (d *Deployment) DeployMultisiteClusterWithSearchHeadAndAppFramework(ctx con multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-manager", siteName) - idxc, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") + idxc, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return cm, idxc, sh, err } @@ -1509,7 +1534,7 @@ func (d *Deployment) DeployMultisiteClusterMasterWithSearchHeadAndAppFramework(c multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-master", siteName) - idxc, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseMaster, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") + idxc, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseMaster, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return cm, idxc, sh, err } @@ -1590,7 +1615,7 @@ func (d *Deployment) DeploySingleSiteClusterWithGivenMonitoringConsole(ctx conte } // Deploy the indexer cluster - _, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseManager, indexerReplicas, name, "", corev1.ObjectReference{}, "") + _, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseManager, indexerReplicas, name, "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return err } @@ -1662,7 +1687,7 @@ func (d *Deployment) DeploySingleSiteClusterMasterWithGivenMonitoringConsole(ctx } // Deploy the indexer cluster - _, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseMaster, indexerReplicas, name, "", corev1.ObjectReference{}, "") + _, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseMaster, indexerReplicas, name, "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return err } @@ -1756,7 +1781,7 @@ func (d *Deployment) DeployMultisiteClusterWithMonitoringConsole(ctx context.Con multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-manager", siteName) - _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") + _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return err } @@ -1856,7 +1881,7 @@ func (d *Deployment) DeployMultisiteClusterMasterWithMonitoringConsole(ctx conte multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-master", siteName) - _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseMaster, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") + _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseMaster, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return err } diff --git a/test/testenv/util.go b/test/testenv/util.go index b779ab3c3..d9c6d5807 100644 --- a/test/testenv/util.go +++ b/test/testenv/util.go @@ -359,7 +359,7 @@ func newClusterMasterWithGivenIndexes(name, ns, licenseManagerName, ansibleConfi } // newIndexerCluster creates and initialize the CR for IndexerCluster Kind -func newIndexerCluster(name, ns, licenseManagerName string, replicas int, clusterManagerRef, ansibleConfig, splunkImage string, busConfig corev1.ObjectReference, serviceAccountName string) *enterpriseApi.IndexerCluster { +func newIndexerCluster(name, ns, licenseManagerName string, replicas int, clusterManagerRef, ansibleConfig, splunkImage string, queue, os corev1.ObjectReference, serviceAccountName string) *enterpriseApi.IndexerCluster { licenseMasterRef, licenseManagerRef := swapLicenseManager(name, licenseManagerName) clusterMasterRef, clusterManagerRef := swapClusterManager(name, clusterManagerRef) @@ -396,8 +396,9 @@ func newIndexerCluster(name, ns, licenseManagerName string, replicas int, cluste }, Defaults: ansibleConfig, }, - Replicas: int32(replicas), - BusConfigurationRef: busConfig, + Replicas: int32(replicas), + QueueRef: queue, + ObjectStorageRef: os, }, } @@ -405,7 +406,7 @@ func newIndexerCluster(name, ns, licenseManagerName string, replicas int, cluste } // newIngestorCluster creates and initialize the CR for IngestorCluster Kind -func newIngestorCluster(name, ns string, replicas int, splunkImage string, busConfig corev1.ObjectReference, serviceAccountName string) *enterpriseApi.IngestorCluster { +func newIngestorCluster(name, ns string, replicas int, splunkImage string, queue, os corev1.ObjectReference, serviceAccountName string) *enterpriseApi.IngestorCluster { return &enterpriseApi.IngestorCluster{ TypeMeta: metav1.TypeMeta{ Kind: "IngestorCluster", @@ -425,24 +426,38 @@ func newIngestorCluster(name, ns string, replicas int, splunkImage string, busCo Image: splunkImage, }, }, - Replicas: int32(replicas), - BusConfigurationRef: busConfig, + Replicas: int32(replicas), + QueueRef: queue, + ObjectStorageRef: os, }, } } -// newBusConfiguration creates and initializes the CR for BusConfiguration Kind -func newBusConfiguration(name, ns string, busConfig enterpriseApi.BusConfigurationSpec) *enterpriseApi.BusConfiguration { - return &enterpriseApi.BusConfiguration{ +// newQueue creates and initializes the CR for Queue Kind +func newQueue(name, ns string, queue enterpriseApi.QueueSpec) *enterpriseApi.Queue { + return &enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", + Kind: "Queue", }, ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: ns, }, - Spec: busConfig, + Spec: queue, + } +} +// newObjectStorage creates and initializes the CR for ObjectStorage Kind +func newObjectStorage(name, ns string, objStorage enterpriseApi.ObjectStorageSpec) *enterpriseApi.ObjectStorage { + return &enterpriseApi.ObjectStorage{ + TypeMeta: metav1.TypeMeta{ + Kind: "ObjectStorage", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + }, + Spec: objStorage, } }