diff --git a/.github/workflows/arm-AL2023-build-test-push-workflow-AL2023.yml b/.github/workflows/arm-AL2023-build-test-push-workflow-AL2023.yml index 8ccaf2e65..f3a9e38f5 100644 --- a/.github/workflows/arm-AL2023-build-test-push-workflow-AL2023.yml +++ b/.github/workflows/arm-AL2023-build-test-push-workflow-AL2023.yml @@ -146,6 +146,8 @@ jobs: DEPLOYMENT_TYPE: "" ARM64: "true" GRAVITON_TESTING: "true" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Chekcout code uses: actions/checkout@v2 diff --git a/.github/workflows/arm-AL2023-int-test-workflow.yml b/.github/workflows/arm-AL2023-int-test-workflow.yml index bdd7fe563..9003cb439 100644 --- a/.github/workflows/arm-AL2023-int-test-workflow.yml +++ b/.github/workflows/arm-AL2023-int-test-workflow.yml @@ -94,6 +94,8 @@ jobs: DEPLOYMENT_TYPE: "" ARM64: "true" GRAVITON_TESTING: "true" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Set Test Cluster Nodes and Parallel Runs run: >- diff --git a/.github/workflows/arm-RHEL-build-test-push-workflow.yml b/.github/workflows/arm-RHEL-build-test-push-workflow.yml index d108005e7..0f473836e 100644 --- a/.github/workflows/arm-RHEL-build-test-push-workflow.yml +++ b/.github/workflows/arm-RHEL-build-test-push-workflow.yml @@ -94,6 +94,8 @@ jobs: DEPLOYMENT_TYPE: "" ARM64: "true" GRAVITON_TESTING: "true" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Set Test Cluster Nodes and Parallel Runs run: >- diff --git a/.github/workflows/arm-RHEL-int-test-workflow.yml b/.github/workflows/arm-RHEL-int-test-workflow.yml index 681491b61..1718b316b 100644 --- a/.github/workflows/arm-RHEL-int-test-workflow.yml +++ b/.github/workflows/arm-RHEL-int-test-workflow.yml @@ -94,6 +94,8 @@ jobs: DEPLOYMENT_TYPE: "" ARM64: "true" GRAVITON_TESTING: "true" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Set Test Cluster Nodes and Parallel Runs run: >- diff --git a/.github/workflows/arm-Ubuntu-build-test-push-workflow.yml b/.github/workflows/arm-Ubuntu-build-test-push-workflow.yml index 356812323..8e0d6aa3d 100644 --- a/.github/workflows/arm-Ubuntu-build-test-push-workflow.yml +++ b/.github/workflows/arm-Ubuntu-build-test-push-workflow.yml @@ -146,6 +146,8 @@ jobs: DEPLOYMENT_TYPE: "" ARM64: "true" GRAVITON_TESTING: "true" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Chekcout code uses: actions/checkout@v2 diff --git a/.github/workflows/arm-Ubuntu-int-test-workflow.yml b/.github/workflows/arm-Ubuntu-int-test-workflow.yml index ebbea6176..3ddeaa82d 100644 --- a/.github/workflows/arm-Ubuntu-int-test-workflow.yml +++ b/.github/workflows/arm-Ubuntu-int-test-workflow.yml @@ -94,6 +94,8 @@ jobs: DEPLOYMENT_TYPE: "" ARM64: "true" GRAVITON_TESTING: "true" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Set Test Cluster Nodes and Parallel Runs run: >- diff --git a/.github/workflows/build-test-push-workflow.yml b/.github/workflows/build-test-push-workflow.yml index 6c79f58a9..7e8af7d45 100644 --- a/.github/workflows/build-test-push-workflow.yml +++ b/.github/workflows/build-test-push-workflow.yml @@ -190,6 +190,8 @@ jobs: EKS_SSH_PUBLIC_KEY: ${{ secrets.EKS_SSH_PUBLIC_KEY }} CLUSTER_WIDE: "true" DEPLOYMENT_TYPE: "" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Chekcout code uses: actions/checkout@v2 diff --git a/.github/workflows/distroless-build-test-push-workflow.yml b/.github/workflows/distroless-build-test-push-workflow.yml index c47d72ab7..bb99d1742 100644 --- a/.github/workflows/distroless-build-test-push-workflow.yml +++ b/.github/workflows/distroless-build-test-push-workflow.yml @@ -191,6 +191,8 @@ jobs: EKS_SSH_PUBLIC_KEY: ${{ secrets.EKS_SSH_PUBLIC_KEY }} CLUSTER_WIDE: "true" DEPLOYMENT_TYPE: "" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Chekcout code uses: actions/checkout@v2 diff --git a/.github/workflows/distroless-int-test-workflow.yml b/.github/workflows/distroless-int-test-workflow.yml index da4719183..a73d194c5 100644 --- a/.github/workflows/distroless-int-test-workflow.yml +++ b/.github/workflows/distroless-int-test-workflow.yml @@ -88,6 +88,8 @@ jobs: S3_REGION: ${{ secrets.AWS_DEFAULT_REGION }} CLUSTER_WIDE: "true" DEPLOYMENT_TYPE: "" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Set Test Cluster Nodes and Parallel Runs run: >- diff --git a/.github/workflows/helm-test-workflow.yml b/.github/workflows/helm-test-workflow.yml index 6e83bcc63..d5e58c914 100644 --- a/.github/workflows/helm-test-workflow.yml +++ b/.github/workflows/helm-test-workflow.yml @@ -65,6 +65,8 @@ jobs: HELM_REPO_PATH: "../../../../helm-chart" INSTALL_OPERATOR: "true" TEST_VPC_ENDPOINT_URL: ${{ secrets.TEST_VPC_ENDPOINT_URL }} + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - uses: chrisdickinson/setup-yq@3d931309f27270ebbafd53f2daee773a82ea1822 - name: Checking YQ installation diff --git a/.github/workflows/int-test-workflow.yml b/.github/workflows/int-test-workflow.yml index e5b12b5dc..c09b6c305 100644 --- a/.github/workflows/int-test-workflow.yml +++ b/.github/workflows/int-test-workflow.yml @@ -84,6 +84,8 @@ jobs: S3_REGION: ${{ secrets.AWS_DEFAULT_REGION }} CLUSTER_WIDE: "true" DEPLOYMENT_TYPE: "" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Set Test Cluster Nodes and Parallel Runs run: >- diff --git a/.github/workflows/manual-int-test-workflow.yml b/.github/workflows/manual-int-test-workflow.yml index b76b3d515..c042347aa 100644 --- a/.github/workflows/manual-int-test-workflow.yml +++ b/.github/workflows/manual-int-test-workflow.yml @@ -45,6 +45,8 @@ jobs: PRIVATE_REGISTRY: ${{ secrets.ECR_REPOSITORY }} S3_REGION: ${{ secrets.AWS_DEFAULT_REGION }} CLUSTER_WIDE: ${{ github.event.inputs.CLUSTER_WIDE }} + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Set Test Cluster Nodes and Parallel Runs run: >- diff --git a/.github/workflows/namespace-scope-int-workflow.yml b/.github/workflows/namespace-scope-int-workflow.yml index b32dcee92..9153bd950 100644 --- a/.github/workflows/namespace-scope-int-workflow.yml +++ b/.github/workflows/namespace-scope-int-workflow.yml @@ -40,6 +40,8 @@ jobs: PRIVATE_REGISTRY: ${{ secrets.ECR_REPOSITORY }} S3_REGION: ${{ secrets.AWS_DEFAULT_REGION }} CLUSTER_WIDE: "false" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Set Test Cluster Nodes and Parallel Runs run: >- diff --git a/.github/workflows/nightly-int-test-workflow.yml b/.github/workflows/nightly-int-test-workflow.yml index 4bc4c199c..41fbf3d74 100644 --- a/.github/workflows/nightly-int-test-workflow.yml +++ b/.github/workflows/nightly-int-test-workflow.yml @@ -81,6 +81,8 @@ jobs: PRIVATE_REGISTRY: ${{ secrets.ECR_REPOSITORY }} S3_REGION: ${{ secrets.AWS_DEFAULT_REGION }} CLUSTER_WIDE: "true" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Set Test Cluster Nodes and Parallel Runs run: >- diff --git a/api/v4/indexercluster_types.go b/api/v4/indexercluster_types.go index e74f900a7..02cf1562d 100644 --- a/api/v4/indexercluster_types.go +++ b/api/v4/indexercluster_types.go @@ -35,6 +35,8 @@ const ( ) // +kubebuilder:validation:XValidation:rule="has(self.queueRef) == has(self.objectStorageRef)",message="queueRef and objectStorageRef must both be set or both be empty" +// +kubebuilder:validation:XValidation:rule="self.queueRef == oldSelf.queueRef",message="queueRef is immutable once created" +// +kubebuilder:validation:XValidation:rule="self.objectStorageRef == oldSelf.objectStorageRef",message="objectStorageRef is immutable once created" // IndexerClusterSpec defines the desired state of a Splunk Enterprise indexer cluster type IndexerClusterSpec struct { CommonSplunkSpec `json:",inline"` @@ -121,11 +123,8 @@ type IndexerClusterStatus struct { // Auxillary message describing CR status Message string `json:"message"` - // Queue - Queue *QueueSpec `json:"queue,omitempty"` - - // Object Storage - ObjectStorage *ObjectStorageSpec `json:"objectStorage,omitempty"` + // Queue and bucket access secret version + QueueBucketAccessSecretVersion string `json:"queueBucketAccessSecretVersion,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/api/v4/ingestorcluster_types.go b/api/v4/ingestorcluster_types.go index f2e061284..021acd025 100644 --- a/api/v4/ingestorcluster_types.go +++ b/api/v4/ingestorcluster_types.go @@ -28,6 +28,8 @@ const ( IngestorClusterPausedAnnotation = "ingestorcluster.enterprise.splunk.com/paused" ) +// +kubebuilder:validation:XValidation:rule="self.queueRef == oldSelf.queueRef",message="queueRef is immutable once created" +// +kubebuilder:validation:XValidation:rule="self.objectStorageRef == oldSelf.objectStorageRef",message="objectStorageRef is immutable once created" // IngestorClusterSpec defines the spec of Ingestor Cluster type IngestorClusterSpec struct { // Common Splunk spec @@ -74,11 +76,8 @@ type IngestorClusterStatus struct { // Auxillary message describing CR status Message string `json:"message"` - // Queue - Queue *QueueSpec `json:"queue,omitempty"` - - // Object Storage - ObjectStorage *ObjectStorageSpec `json:"objectStorage,omitempty"` + // Queue and bucket access secret version + QueueBucketAccessSecretVersion string `json:"queueBucketAccessSecretVersion,omitempty"` } // +kubebuilder:object:root=true diff --git a/api/v4/objectstorage_types.go b/api/v4/objectstorage_types.go index 9e95392ce..7712e81d6 100644 --- a/api/v4/objectstorage_types.go +++ b/api/v4/objectstorage_types.go @@ -17,7 +17,6 @@ limitations under the License. package v4 import ( - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) @@ -28,6 +27,8 @@ const ( ObjectStoragePausedAnnotation = "objectstorage.enterprise.splunk.com/paused" ) +// +kubebuilder:validation:XValidation:rule="self.provider == oldSelf.provider",message="provider is immutable once created" +// +kubebuilder:validation:XValidation:rule="self.s3 == oldSelf.s3",message="s3 is immutable once created" // +kubebuilder:validation:XValidation:rule="self.provider != 's3' || has(self.s3)",message="s3 must be provided when provider is s3" // ObjectStorageSpec defines the desired state of ObjectStorage type ObjectStorageSpec struct { @@ -55,7 +56,7 @@ type S3Spec struct { // ObjectStorageStatus defines the observed state of ObjectStorage. type ObjectStorageStatus struct { - // Phase of the large message store + // Phase of the object storage Phase Phase `json:"phase"` // Resource revision tracker @@ -107,32 +108,3 @@ type ObjectStorageList struct { func init() { SchemeBuilder.Register(&ObjectStorage{}, &ObjectStorageList{}) } - -// NewEvent creates a new event associated with the object and ready -// to be published to Kubernetes API -func (os *ObjectStorage) NewEvent(eventType, reason, message string) corev1.Event { - t := metav1.Now() - return corev1.Event{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: reason + "-", - Namespace: os.ObjectMeta.Namespace, - }, - InvolvedObject: corev1.ObjectReference{ - Kind: "ObjectStorage", - Namespace: os.Namespace, - Name: os.Name, - UID: os.UID, - APIVersion: GroupVersion.String(), - }, - Reason: reason, - Message: message, - Source: corev1.EventSource{ - Component: "splunk-object-storage-controller", - }, - FirstTimestamp: t, - LastTimestamp: t, - Count: 1, - Type: eventType, - ReportingController: "enterprise.splunk.com/object-storage-controller", - } -} diff --git a/api/v4/queue_types.go b/api/v4/queue_types.go index 9828f7301..2139f43dd 100644 --- a/api/v4/queue_types.go +++ b/api/v4/queue_types.go @@ -17,7 +17,6 @@ limitations under the License. package v4 import ( - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) @@ -28,6 +27,11 @@ const ( QueuePausedAnnotation = "queue.enterprise.splunk.com/paused" ) +// +kubebuilder:validation:XValidation:rule="self.provider == oldSelf.provider",message="provider is immutable once created" +// +kubebuilder:validation:XValidation:rule="self.sqs.name == oldSelf.sqs.name",message="sqs.name is immutable once created" +// +kubebuilder:validation:XValidation:rule="self.sqs.authRegion == oldSelf.sqs.authRegion",message="sqs.authRegion is immutable once created" +// +kubebuilder:validation:XValidation:rule="self.sqs.dlq == oldSelf.sqs.dlq",message="sqs.dlq is immutable once created" +// +kubebuilder:validation:XValidation:rule="self.sqs.endpoint == oldSelf.sqs.endpoint",message="sqs.endpoint is immutable once created" // +kubebuilder:validation:XValidation:rule="self.provider != 'sqs' || has(self.sqs)",message="sqs must be provided when provider is sqs" // QueueSpec defines the desired state of Queue type QueueSpec struct { @@ -61,6 +65,10 @@ type SQSSpec struct { // +kubebuilder:validation:Pattern=`^https?://[^\s/$.?#].[^\s]*$` // Amazon SQS Service endpoint Endpoint string `json:"endpoint"` + + // +optional + // List of remote storage volumes + VolList []VolumeSpec `json:"volumes,omitempty"` } // QueueStatus defines the observed state of Queue @@ -117,32 +125,3 @@ type QueueList struct { func init() { SchemeBuilder.Register(&Queue{}, &QueueList{}) } - -// NewEvent creates a new event associated with the object and ready -// to be published to Kubernetes API -func (os *Queue) NewEvent(eventType, reason, message string) corev1.Event { - t := metav1.Now() - return corev1.Event{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: reason + "-", - Namespace: os.ObjectMeta.Namespace, - }, - InvolvedObject: corev1.ObjectReference{ - Kind: "Queue", - Namespace: os.Namespace, - Name: os.Name, - UID: os.UID, - APIVersion: GroupVersion.String(), - }, - Reason: reason, - Message: message, - Source: corev1.EventSource{ - Component: "splunk-queue-controller", - }, - FirstTimestamp: t, - LastTimestamp: t, - Count: 1, - Type: eventType, - ReportingController: "enterprise.splunk.com/queue-controller", - } -} diff --git a/api/v4/zz_generated.deepcopy.go b/api/v4/zz_generated.deepcopy.go index dd9b2f347..c7759fa58 100644 --- a/api/v4/zz_generated.deepcopy.go +++ b/api/v4/zz_generated.deepcopy.go @@ -545,16 +545,6 @@ func (in *IndexerClusterStatus) DeepCopyInto(out *IndexerClusterStatus) { *out = make([]IndexerClusterMemberStatus, len(*in)) copy(*out, *in) } - if in.Queue != nil { - in, out := &in.Queue, &out.Queue - *out = new(QueueSpec) - **out = **in - } - if in.ObjectStorage != nil { - in, out := &in.ObjectStorage, &out.ObjectStorage - *out = new(ObjectStorageSpec) - **out = **in - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexerClusterStatus. @@ -648,16 +638,6 @@ func (in *IngestorClusterStatus) DeepCopyInto(out *IngestorClusterStatus) { } } in.AppContext.DeepCopyInto(&out.AppContext) - if in.Queue != nil { - in, out := &in.Queue, &out.Queue - *out = new(QueueSpec) - **out = **in - } - if in.ObjectStorage != nil { - in, out := &in.ObjectStorage, &out.ObjectStorage - *out = new(ObjectStorageSpec) - **out = **in - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngestorClusterStatus. @@ -1002,7 +982,7 @@ func (in *Queue) DeepCopyInto(out *Queue) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec + in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) } @@ -1051,7 +1031,7 @@ func (in *QueueList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *QueueSpec) DeepCopyInto(out *QueueSpec) { *out = *in - out.SQS = in.SQS + in.SQS.DeepCopyInto(&out.SQS) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueSpec. @@ -1104,6 +1084,11 @@ func (in *S3Spec) DeepCopy() *S3Spec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SQSSpec) DeepCopyInto(out *SQSSpec) { *out = *in + if in.VolList != nil { + in, out := &in.VolList, &out.VolList + *out = make([]VolumeSpec, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQSSpec. diff --git a/cmd/main.go b/cmd/main.go index dfb9c87e1..a037f87b1 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -230,20 +230,6 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "IngestorCluster") os.Exit(1) } - if err := (&controller.QueueReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "Queue") - os.Exit(1) - } - if err := (&controller.ObjectStorageReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "ObjectStorage") - os.Exit(1) - } //+kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { diff --git a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml index 59faab055..3ea073d7d 100644 --- a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml @@ -8331,6 +8331,10 @@ spec: x-kubernetes-validations: - message: queueRef and objectStorageRef must both be set or both be empty rule: has(self.queueRef) == has(self.objectStorageRef) + - message: queueRef is immutable once created + rule: self.queueRef == oldSelf.queueRef + - message: objectStorageRef is immutable once created + rule: self.objectStorageRef == oldSelf.objectStorageRef status: description: IndexerClusterStatus defines the observed state of a Splunk Enterprise indexer cluster @@ -8383,35 +8387,6 @@ spec: namespace_scoped_secret_resource_version: description: Indicates resource version of namespace scoped secret type: string - objectStorage: - description: Object Storage - properties: - provider: - description: Provider of queue resources - enum: - - s3 - type: string - s3: - description: s3 specific inputs - properties: - endpoint: - description: S3-compatible Service endpoint - pattern: ^https?://[^\s/$.?#].[^\s]*$ - type: string - path: - description: S3 bucket path - pattern: ^s3://[a-z0-9.-]{3,63}(?:/[^\s]+)?$ - type: string - required: - - path - type: object - required: - - provider - - s3 - type: object - x-kubernetes-validations: - - message: s3 must be provided when provider is s3 - rule: self.provider != 's3' || has(self.s3) peers: description: status of each indexer cluster peer items: @@ -8453,44 +8428,9 @@ spec: - Terminating - Error type: string - queue: - description: Queue - properties: - provider: - description: Provider of queue resources - enum: - - sqs - type: string - sqs: - description: sqs specific inputs - properties: - authRegion: - description: Auth Region of the resources - pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ - type: string - dlq: - description: Name of the dead letter queue resource - minLength: 1 - type: string - endpoint: - description: Amazon SQS Service endpoint - pattern: ^https?://[^\s/$.?#].[^\s]*$ - type: string - name: - description: Name of the queue - minLength: 1 - type: string - required: - - dlq - - name - type: object - required: - - provider - - sqs - type: object - x-kubernetes-validations: - - message: sqs must be provided when provider is sqs - rule: self.provider != 'sqs' || has(self.sqs) + queueBucketAccessSecretVersion: + description: Queue and bucket access secret version + type: string readyReplicas: description: current number of ready indexer peers format: int32 diff --git a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml index 7432e96b4..703af01e6 100644 --- a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml @@ -4306,6 +4306,11 @@ spec: - objectStorageRef - queueRef type: object + x-kubernetes-validations: + - message: queueRef is immutable once created + rule: self.queueRef == oldSelf.queueRef + - message: objectStorageRef is immutable once created + rule: self.objectStorageRef == oldSelf.objectStorageRef status: description: IngestorClusterStatus defines the observed state of Ingestor Cluster @@ -4594,35 +4599,6 @@ spec: message: description: Auxillary message describing CR status type: string - objectStorage: - description: Object Storage - properties: - provider: - description: Provider of queue resources - enum: - - s3 - type: string - s3: - description: s3 specific inputs - properties: - endpoint: - description: S3-compatible Service endpoint - pattern: ^https?://[^\s/$.?#].[^\s]*$ - type: string - path: - description: S3 bucket path - pattern: ^s3://[a-z0-9.-]{3,63}(?:/[^\s]+)?$ - type: string - required: - - path - type: object - required: - - provider - - s3 - type: object - x-kubernetes-validations: - - message: s3 must be provided when provider is s3 - rule: self.provider != 's3' || has(self.s3) phase: description: Phase of the ingestor pods enum: @@ -4634,44 +4610,9 @@ spec: - Terminating - Error type: string - queue: - description: Queue - properties: - provider: - description: Provider of queue resources - enum: - - sqs - type: string - sqs: - description: sqs specific inputs - properties: - authRegion: - description: Auth Region of the resources - pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ - type: string - dlq: - description: Name of the dead letter queue resource - minLength: 1 - type: string - endpoint: - description: Amazon SQS Service endpoint - pattern: ^https?://[^\s/$.?#].[^\s]*$ - type: string - name: - description: Name of the queue - minLength: 1 - type: string - required: - - dlq - - name - type: object - required: - - provider - - sqs - type: object - x-kubernetes-validations: - - message: sqs must be provided when provider is sqs - rule: self.provider != 'sqs' || has(self.sqs) + queueBucketAccessSecretVersion: + description: Queue and bucket access secret version + type: string readyReplicas: description: Number of ready ingestor pods format: int32 diff --git a/config/crd/bases/enterprise.splunk.com_objectstorages.yaml b/config/crd/bases/enterprise.splunk.com_objectstorages.yaml index 2fac45707..23d5b437b 100644 --- a/config/crd/bases/enterprise.splunk.com_objectstorages.yaml +++ b/config/crd/bases/enterprise.splunk.com_objectstorages.yaml @@ -78,6 +78,10 @@ spec: - s3 type: object x-kubernetes-validations: + - message: provider is immutable once created + rule: self.provider == oldSelf.provider + - message: s3 is immutable once created + rule: self.s3 == oldSelf.s3 - message: s3 must be provided when provider is s3 rule: self.provider != 's3' || has(self.s3) status: @@ -87,7 +91,7 @@ spec: description: Auxillary message describing CR status type: string phase: - description: Phase of the large message store + description: Phase of the object storage enum: - Pending - Ready diff --git a/config/crd/bases/enterprise.splunk.com_queues.yaml b/config/crd/bases/enterprise.splunk.com_queues.yaml index 2ba8d03f5..e10ee536a 100644 --- a/config/crd/bases/enterprise.splunk.com_queues.yaml +++ b/config/crd/bases/enterprise.splunk.com_queues.yaml @@ -78,6 +78,39 @@ spec: description: Name of the queue minLength: 1 type: string + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio, azure, gcp.' + type: string + region: + description: Region of the remote storage volume where apps + reside. Used for aws, if provided. Not used for minio + and azure. + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: 'Remote Storage type. Supported values: s3, + blob, gcs. s3 works with aws or minio providers, whereas + blob works with azure provider, gcs works for gcp.' + type: string + type: object + type: array required: - dlq - name @@ -87,6 +120,16 @@ spec: - sqs type: object x-kubernetes-validations: + - message: provider is immutable once created + rule: self.provider == oldSelf.provider + - message: sqs.name is immutable once created + rule: self.sqs.name == oldSelf.sqs.name + - message: sqs.authRegion is immutable once created + rule: self.sqs.authRegion == oldSelf.sqs.authRegion + - message: sqs.dlq is immutable once created + rule: self.sqs.dlq == oldSelf.sqs.dlq + - message: sqs.endpoint is immutable once created + rule: self.sqs.endpoint == oldSelf.sqs.endpoint - message: sqs must be provided when provider is sqs rule: self.provider != 'sqs' || has(self.sqs) status: diff --git a/docs/CustomResources.md b/docs/CustomResources.md index 157a9b123..bd85c05ca 100644 --- a/docs/CustomResources.md +++ b/docs/CustomResources.md @@ -404,21 +404,21 @@ spec: endpoint: https://s3.us-west-2.amazonaws.com ``` -ObjectStorage inputs can be found in the table below. As of now, only S3 provider of large message store is supported. +ObjectStorage inputs can be found in the table below. As of now, only S3 provider of object storage is supported. | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | -| provider | string | [Required] Provider of large message store (Allowed values: s3) | -| s3 | S3 | [Required if provider=s3] S3 large message store inputs | +| provider | string | [Required] Provider of object storage (Allowed values: s3) | +| s3 | S3 | [Required if provider=s3] S3 object storage inputs | -S3 large message store inputs can be found in the table below. +S3 object storage inputs can be found in the table below. | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | | path | string | [Required] Remote storage location for messages that are larger than the underlying maximum message size | | endpoint | string | [Optional, if not provided formed based on region] S3-compatible service endpoint -Change of any of the large message queue inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. +Change of any of the object storage inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. ## MonitoringConsole Resource Spec Parameters diff --git a/docs/IndexIngestionSeparation.md b/docs/IndexIngestionSeparation.md index bd5d97579..ab6f789c7 100644 --- a/docs/IndexIngestionSeparation.md +++ b/docs/IndexIngestionSeparation.md @@ -1,3 +1,9 @@ +--- +title: Index and Ingestion Separation +parent: Deploy & Configure +nav_order: 6 +--- + # Background Separation between ingestion and indexing services within Splunk Operator for Kubernetes enables the operator to independently manage the ingestion service while maintaining seamless integration with the indexing service. @@ -10,7 +16,7 @@ This separation enables: # Important Note > [!WARNING] -> **As of now, only brand new deployments are supported for Index and Ingestion Separation. No migration path is implemented, described or tested for existing deployments to move from a standard model to Index & Ingestion separation model.** +> **For customers deploying SmartBus on CMP, the Splunk Operator for Kubernetes (SOK) manages the configuration and lifecycle of the ingestor tier. The following SOK guide provides implementation details for setting up ingestion separation and integrating with existing indexers. This reference is primarily intended for CMP users leveraging SOK-managed ingestors.** # Document Variables @@ -37,8 +43,9 @@ SQS message queue inputs can be found in the table below. | region | string | [Required] Region where the queue is located | | endpoint | string | [Optional, if not provided formed based on region] AWS SQS Service endpoint | dlq | string | [Required] Name of the dead letter queue | +| volumes | []VolumeSpec | [Optional] List of remote storage volumes used to mount the credentials for queue and bucket access (must contain s3_access_key and s3_secret_key) | -Change of any of the queue inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. +**SOK doesn't support update of any of the Queue inputs except from the volumes which allow the change of secrets.** ## Example ``` @@ -53,6 +60,9 @@ spec: region: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com dlq: sqs-dlq-test + volumes: + - name: s3-sqs-volume + secretRef: s3-secret ``` # ObjectStorage @@ -61,21 +71,21 @@ ObjectStorage is introduced to store large message (messages that exceed the siz ## Spec -ObjectStorage inputs can be found in the table below. As of now, only S3 provider of large message store is supported. +ObjectStorage inputs can be found in the table below. As of now, only S3 provider of object storage is supported. | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | -| provider | string | [Required] Provider of large message store (Allowed values: s3) | -| s3 | S3 | [Required if provider=s3] S3 large message store inputs | +| provider | string | [Required] Provider of object storage (Allowed values: s3) | +| s3 | S3 | [Required if provider=s3] S3 object storage inputs | -S3 large message store inputs can be found in the table below. +S3 object storage inputs can be found in the table below. | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | | path | string | [Required] Remote storage location for messages that are larger than the underlying maximum message size | | endpoint | string | [Optional, if not provided formed based on region] S3-compatible service endpoint -Change of any of the large message queue inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. +**SOK doesn't support update of any of the ObjectStorage inputs.** ## Example ``` @@ -102,13 +112,17 @@ In addition to common spec inputs, the IngestorCluster resource provides the fol | ---------- | ------- | ------------------------------------------------- | | replicas | integer | The number of replicas (defaults to 3) | | queueRef | corev1.ObjectReference | Message queue reference | -| objectStorageRef | corev1.ObjectReference | Large message store reference | +| objectStorageRef | corev1.ObjectReference | Object storage reference | + +**SOK doesn't support update of queueRef and objectStorageRef.** + +**First provisioning or scaling up the number of replicas requires Ingestor Cluster Splunkd restart, but this restart is implemented automatically and done by SOK.** ## Example -The example presented below configures IngestorCluster named ingestor with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the ingestion traffic. This IngestorCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Queue and ObjectStorage references allow the user to specify queue and bucket settings for the ingestion process. +The example presented below configures IngestorCluster named ingestor with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the ingestion traffic. This IngestorCluster custom resource is set up with the s3-secret credentials allowing it to perform SQS and S3 operations. Queue and ObjectStorage references allow the user to specify queue and bucket settings for the ingestion process. -In this case, the setup uses the SQS and S3 based configuration where the messages are stored in sqs-test queue in us-west-2 region with dead letter queue set to sqs-dlq-test queue. The large message store is set to ingestion bucket in smartbus-test directory. Based on these inputs, default-mode.conf and outputs.conf files are configured accordingly. +In this case, the setup uses the SQS and S3 based configuration where the messages are stored in sqs-test queue in us-west-2 region with dead letter queue set to sqs-dlq-test queue. The object storage is set to ingestion bucket in smartbus-test directory. Based on these inputs, default-mode.conf and outputs.conf files are configured accordingly. ``` apiVersion: enterprise.splunk.com/v4 @@ -139,13 +153,17 @@ In addition to common spec inputs, the IndexerCluster resource provides the foll | ---------- | ------- | ------------------------------------------------- | | replicas | integer | The number of replicas (defaults to 3) | | queueRef | corev1.ObjectReference | Message queue reference | -| objectStorageRef | corev1.ObjectReference | Large message store reference | +| objectStorageRef | corev1.ObjectReference | Object storage reference | + +**SOK doesn't support update of queueRef and objectStorageRef.** + +**First provisioning or scaling up the number of replicas requires Indexer Cluster Splunkd restart, but this restart is implemented automatically and done by SOK.** ## Example -The example presented below configures IndexerCluster named indexer with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the indexing traffic. This IndexerCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Queue and ObjectStorage references allow the user to specify queue and bucket settings for the indexing process. +The example presented below configures IndexerCluster named indexer with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the indexing traffic. This IndexerCluster custom resource is set up with the s3-secret credentials allowing it to perform SQS and S3 operations. Queue and ObjectStorage references allow the user to specify queue and bucket settings for the indexing process. -In this case, the setup uses the SQS and S3 based configuration where the messages are stored in and retrieved from sqs-test queue in us-west-2 region with dead letter queue set to sqs-dlq-test queue. The large message store is set to ingestion bucket in smartbus-test directory. Based on these inputs, default-mode.conf, inputs.conf and outputs.conf files are configured accordingly. +In this case, the setup uses the SQS and S3 based configuration where the messages are stored in and retrieved from sqs-test queue in us-west-2 region with dead letter queue set to sqs-dlq-test queue. The object storage is set to ingestion bucket in smartbus-test directory. Based on these inputs, default-mode.conf, inputs.conf and outputs.conf files are configured accordingly. ``` apiVersion: enterprise.splunk.com/v4 @@ -198,6 +216,9 @@ queue: region: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com dlq: sqs-dlq-test + volumes: + - name: s3-sqs-volume + secretRef: s3-secret ``` ``` @@ -425,6 +446,14 @@ In the following example, the dashboard presents ingestion and indexing data in - [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) +# App Installation for Ingestor Cluster Instances + +Application installation is supported for Ingestor Cluster instances. However, as of now, applications are installed using local scope and if any application requires Splunk restart, there is no automated way to detect it and trigger automatically via Splunk Operator. + +Therefore, to be able to enforce Splunk restart for each of the Ingestor Cluster pods, it is recommended to add/update IngestorCluster CR annotations/labels and apply the new configuration which will trigger the rolling restart of Splunk pods for Ingestor Cluster. + +We are under the investigation on how to make it fully automated. What is more, ideally, update of annotations and labels should not trigger pod restart at all and we are investigating on how to fix this behaviour eventually. + # Example 1. Install CRDs and Splunk Operator for Kubernetes. @@ -703,7 +732,7 @@ Spec: Name: queue Namespace: default Image: splunk/splunk:${SPLUNK_IMAGE_VERSION} - Large Message Store Ref: + Object Storage Ref: Name: os Namespace: default Replicas: 3 @@ -720,18 +749,7 @@ Status: Is Deployment In Progress: false Last App Info Check Time: 0 Version: 0 - Queue: - Sqs: - Region: us-west-2 - DLQ: sqs-dlq-test - Endpoint: https://sqs.us-west-2.amazonaws.com - Name: sqs-test - Provider: sqs - Large Message Store: - S3: - Endpoint: https://s3.us-west-2.amazonaws.com - Path: s3://ingestion/smartbus-test - Provider: s3 + Queue Bucket Access Secret Version: 33744270 Message: Phase: Ready Ready Replicas: 3 diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml index 833f162aa..e5541e017 100644 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml @@ -169,6 +169,7 @@ items: {{- if .namespace }} namespace: {{ .namespace }} {{- end }} + {{- end }} {{- with $.Values.indexerCluster.objectStorageRef }} objectStorageRef: name: {{ .name }} diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_objectstorages.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_objectstorages.yaml index 7cd5bdca0..033aed904 100644 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_objectstorages.yaml +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_objectstorages.yaml @@ -1,4 +1,4 @@ -{{- if .Values.objectStorage.enabled }} +{{- if .Values.objectStorage }} {{- if .Values.objectStorage.enabled }} apiVersion: enterprise.splunk.com/v4 kind: ObjectStorage diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_queues.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_queues.yaml index b586e45da..06a3c5dbd 100644 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_queues.yaml +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_queues.yaml @@ -26,8 +26,12 @@ spec: {{- if .name }} name: {{ .name | quote }} {{- end }} - {{- if .region }} - region: {{ .region | quote }} + {{- if .authRegion }} + authRegion: {{ .authRegion | quote }} + {{- end }} + {{- if .volumes }} + volumes: + {{ toYaml . | indent 4 }} {{- end }} {{- end }} {{- end }} diff --git a/helm-chart/splunk-operator/templates/rbac/clusterrole.yaml b/helm-chart/splunk-operator/templates/rbac/clusterrole.yaml index 2b5d51ec9..a952b174c 100644 --- a/helm-chart/splunk-operator/templates/rbac/clusterrole.yaml +++ b/helm-chart/splunk-operator/templates/rbac/clusterrole.yaml @@ -222,6 +222,32 @@ rules: - get - patch - update +- apiGroups: + - enterprise.splunk.com + resources: + - ingestorclusters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - ingestorclusters/finalizers + verbs: + - update +- apiGroups: + - enterprise.splunk.com + resources: + - ingestorclusters/status + verbs: + - get + - patch + - update - apiGroups: - enterprise.splunk.com resources: @@ -300,6 +326,58 @@ rules: - get - patch - update +- apiGroups: + - enterprise.splunk.com + resources: + - objectstorages + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - objectstorages/finalizers + verbs: + - update +- apiGroups: + - enterprise.splunk.com + resources: + - objectstorages/status + verbs: + - get + - patch + - update +- apiGroups: + - enterprise.splunk.com + resources: + - queues + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - queues/finalizers + verbs: + - update +- apiGroups: + - enterprise.splunk.com + resources: + - queues/status + verbs: + - get + - patch + - update - apiGroups: - enterprise.splunk.com resources: diff --git a/helm-chart/splunk-operator/templates/rbac/objectstorage_editor_role.yaml b/helm-chart/splunk-operator/templates/rbac/objectstorage_editor_role.yaml new file mode 100644 index 000000000..d90f7673b --- /dev/null +++ b/helm-chart/splunk-operator/templates/rbac/objectstorage_editor_role.yaml @@ -0,0 +1,55 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants permissions to create, update, and delete resources within the enterprise.splunk.com. +# This role is intended for users who need to manage these resources +# but should not control RBAC or manage permissions for others. +{{- if .Values.splunkOperator.clusterWideAccess }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "splunk-operator.operator.fullname" . }}-objectstorage-editor-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - objectstorages + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - objectstorages/status + verbs: + - get +{{- else }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "splunk-operator.operator.fullname" . }}-objectstorage-editor-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - objectstorages + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - objectstorages/status + verbs: + - get +{{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-operator/templates/rbac/objectstorage_viewer_role.yaml b/helm-chart/splunk-operator/templates/rbac/objectstorage_viewer_role.yaml new file mode 100644 index 000000000..ec9358b8d --- /dev/null +++ b/helm-chart/splunk-operator/templates/rbac/objectstorage_viewer_role.yaml @@ -0,0 +1,47 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants read-only access to enterprise.splunk.com resources. +# This role is intended for users who need visibility into these resources +# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing. +{{- if .Values.splunkOperator.clusterWideAccess }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "splunk-operator.operator.fullname" . }}-objectstorage-viewer-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - objectstorages + verbs: + - get + - list + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - objectstorages/status + verbs: + - get +{{- else }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "splunk-operator.operator.fullname" . }}-objectstorage-viewer-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - objectstorages + verbs: + - get + - list + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - objectstorages/status + verbs: + - get +{{- end }} \ No newline at end of file diff --git a/internal/controller/indexercluster_controller.go b/internal/controller/indexercluster_controller.go index 7efb6e1b8..4f83f5abe 100644 --- a/internal/controller/indexercluster_controller.go +++ b/internal/controller/indexercluster_controller.go @@ -148,6 +148,57 @@ func (r *IndexerClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { mgr.GetRESTMapper(), &enterpriseApi.IndexerCluster{}, )). + Watches(&corev1.Secret{}, + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { + secret, ok := obj.(*corev1.Secret) + if !ok { + return nil + } + + // Only consider indexer clusters in the same namespace as the Secret + var list enterpriseApi.IndexerClusterList + if err := r.Client.List(ctx, &list, client.InNamespace(secret.Namespace)); err != nil { + return nil + } + + var reqs []reconcile.Request + for _, ic := range list.Items { + if ic.Spec.QueueRef.Name == "" { + continue + } + + queueNS := ic.Spec.QueueRef.Namespace + if queueNS == "" { + queueNS = ic.Namespace + } + + queue := &enterpriseApi.Queue{} + if err := r.Client.Get(ctx, types.NamespacedName{ + Name: ic.Spec.QueueRef.Name, + Namespace: queueNS, + }, queue); err != nil { + continue + } + + if queue.Spec.Provider != "sqs" { + continue + } + + for _, vol := range queue.Spec.SQS.VolList { + if vol.SecretRef == secret.Name { + reqs = append(reqs, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: ic.Name, + Namespace: ic.Namespace, + }, + }) + break + } + } + } + return reqs + }), + ). Watches(&corev1.Pod{}, handler.EnqueueRequestForOwner( mgr.GetScheme(), diff --git a/internal/controller/ingestorcluster_controller.go b/internal/controller/ingestorcluster_controller.go index 0d8117bd2..b5aa3d911 100644 --- a/internal/controller/ingestorcluster_controller.go +++ b/internal/controller/ingestorcluster_controller.go @@ -50,6 +50,10 @@ type IngestorClusterReconciler struct { // +kubebuilder:rbac:groups=enterprise.splunk.com,resources=ingestorclusters/status,verbs=get;update;patch // +kubebuilder:rbac:groups=enterprise.splunk.com,resources=ingestorclusters/finalizers,verbs=update +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=queues;objectstorages,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=queues/status;objectstorages/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=queues/finalizers;objectstorages/finalizers,verbs=update + // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. // TODO(user): Modify the Reconcile function to compare the state specified by @@ -129,6 +133,57 @@ func (r *IngestorClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { mgr.GetRESTMapper(), &enterpriseApi.IngestorCluster{}, )). + Watches(&corev1.Secret{}, + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { + secret, ok := obj.(*corev1.Secret) + if !ok { + return nil + } + + // Only consider ingestor clusters in the same namespace as the Secret + var list enterpriseApi.IngestorClusterList + if err := r.Client.List(ctx, &list, client.InNamespace(secret.Namespace)); err != nil { + return nil + } + + var reqs []reconcile.Request + for _, ic := range list.Items { + if ic.Spec.QueueRef.Name == "" { + continue + } + + queueNS := ic.Spec.QueueRef.Namespace + if queueNS == "" { + queueNS = ic.Namespace + } + + queue := &enterpriseApi.Queue{} + if err := r.Client.Get(ctx, types.NamespacedName{ + Name: ic.Spec.QueueRef.Name, + Namespace: queueNS, + }, queue); err != nil { + continue + } + + if queue.Spec.Provider != "sqs" { + continue + } + + for _, vol := range queue.Spec.SQS.VolList { + if vol.SecretRef == secret.Name { + reqs = append(reqs, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: ic.Name, + Namespace: ic.Namespace, + }, + }) + break + } + } + } + return reqs + }), + ). Watches(&corev1.Pod{}, handler.EnqueueRequestForOwner( mgr.GetScheme(), diff --git a/internal/controller/ingestorcluster_controller_test.go b/internal/controller/ingestorcluster_controller_test.go index 38e7cbb4e..49d59e608 100644 --- a/internal/controller/ingestorcluster_controller_test.go +++ b/internal/controller/ingestorcluster_controller_test.go @@ -104,7 +104,7 @@ var _ = Describe("IngestorCluster Controller", func() { annotations = map[string]string{} icSpec.Annotations = annotations icSpec.Status.Phase = "Ready" - UpdateIngestorCluster(icSpec, enterpriseApi.PhaseReady) + UpdateIngestorCluster(icSpec, enterpriseApi.PhaseReady, os, queue) DeleteIngestorCluster("test", nsSpecs.Name) Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) }) @@ -161,6 +161,35 @@ var _ = Describe("IngestorCluster Controller", func() { Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) + queue := &enterpriseApi.Queue{ + ObjectMeta: metav1.ObjectMeta{ + Name: "queue", + Namespace: nsSpecs.Name, + }, + Spec: enterpriseApi.QueueSpec{ + Provider: "sqs", + SQS: enterpriseApi.SQSSpec{ + Name: "smartbus-queue", + AuthRegion: "us-west-2", + DLQ: "smartbus-dlq", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + }, + }, + } + os := &enterpriseApi.ObjectStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: "os", + Namespace: nsSpecs.Name, + }, + Spec: enterpriseApi.ObjectStorageSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "s3://ingestion/smartbus-test", + }, + }, + } + ctx := context.TODO() builder := fake.NewClientBuilder() c := builder.Build() @@ -177,7 +206,7 @@ var _ = Describe("IngestorCluster Controller", func() { _, err := instance.Reconcile(ctx, request) Expect(err).ToNot(HaveOccurred()) - icSpec := testutils.NewIngestorCluster("test", namespace, "image") + icSpec := testutils.NewIngestorCluster("test", namespace, "image", os, queue) Expect(c.Create(ctx, icSpec)).Should(Succeed()) annotations := make(map[string]string) @@ -269,7 +298,7 @@ func CreateIngestorCluster(name string, namespace string, annotations map[string return ic } -func UpdateIngestorCluster(instance *enterpriseApi.IngestorCluster, status enterpriseApi.Phase) *enterpriseApi.IngestorCluster { +func UpdateIngestorCluster(instance *enterpriseApi.IngestorCluster, status enterpriseApi.Phase, os *enterpriseApi.ObjectStorage, queue *enterpriseApi.Queue) *enterpriseApi.IngestorCluster { By("Expecting IngestorCluster custom resource to be updated successfully") key := types.NamespacedName{ @@ -277,7 +306,7 @@ func UpdateIngestorCluster(instance *enterpriseApi.IngestorCluster, status enter Namespace: instance.Namespace, } - icSpec := testutils.NewIngestorCluster(instance.Name, instance.Namespace, "image") + icSpec := testutils.NewIngestorCluster(instance.Name, instance.Namespace, "image", os, queue) icSpec.ResourceVersion = instance.ResourceVersion Expect(k8sClient.Update(context.Background(), icSpec)).Should(Succeed()) time.Sleep(2 * time.Second) diff --git a/internal/controller/objectstorage_controller.go b/internal/controller/objectstorage_controller.go deleted file mode 100644 index 4ae36b1a2..000000000 --- a/internal/controller/objectstorage_controller.go +++ /dev/null @@ -1,120 +0,0 @@ -/* -Copyright 2025. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "time" - - k8serrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - "github.com/pkg/errors" - enterpriseApi "github.com/splunk/splunk-operator/api/v4" - "github.com/splunk/splunk-operator/internal/controller/common" - metrics "github.com/splunk/splunk-operator/pkg/splunk/client/metrics" - enterprise "github.com/splunk/splunk-operator/pkg/splunk/enterprise" -) - -// ObjectStorageReconciler reconciles a ObjectStorage object -type ObjectStorageReconciler struct { - client.Client - Scheme *runtime.Scheme -} - -// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=objectstorages,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=objectstorages/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=objectstorages/finalizers,verbs=update - -// Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// TODO(user): Modify the Reconcile function to compare the state specified by -// the ObjectStorage object against the actual cluster state, and then -// perform operations to make the cluster state reflect the state specified by -// the user. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.22.1/pkg/reconcile -func (r *ObjectStorageReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - metrics.ReconcileCounters.With(metrics.GetPrometheusLabels(req, "ObjectStorage")).Inc() - defer recordInstrumentionData(time.Now(), req, "controller", "ObjectStorage") - - reqLogger := log.FromContext(ctx) - reqLogger = reqLogger.WithValues("objectstorage", req.NamespacedName) - - // Fetch the ObjectStorage - instance := &enterpriseApi.ObjectStorage{} - err := r.Get(ctx, req.NamespacedName, instance) - if err != nil { - if k8serrors.IsNotFound(err) { - // Request object not found, could have been deleted after - // reconcile request. Owned objects are automatically - // garbage collected. For additional cleanup logic use - // finalizers. Return and don't requeue - return ctrl.Result{}, nil - } - // Error reading the object - requeue the request. - return ctrl.Result{}, errors.Wrap(err, "could not load objectstorage data") - } - - // If the reconciliation is paused, requeue - annotations := instance.GetAnnotations() - if annotations != nil { - if _, ok := annotations[enterpriseApi.ObjectStoragePausedAnnotation]; ok { - return ctrl.Result{Requeue: true, RequeueAfter: pauseRetryDelay}, nil - } - } - - reqLogger.Info("start", "CR version", instance.GetResourceVersion()) - - result, err := ApplyObjectStorage(ctx, r.Client, instance) - if result.Requeue && result.RequeueAfter != 0 { - reqLogger.Info("Requeued", "period(seconds)", int(result.RequeueAfter/time.Second)) - } - - return result, err -} - -var ApplyObjectStorage = func(ctx context.Context, client client.Client, instance *enterpriseApi.ObjectStorage) (reconcile.Result, error) { - return enterprise.ApplyObjectStorage(ctx, client, instance) -} - -// SetupWithManager sets up the controller with the Manager. -func (r *ObjectStorageReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&enterpriseApi.ObjectStorage{}). - WithEventFilter(predicate.Or( - common.GenerationChangedPredicate(), - common.AnnotationChangedPredicate(), - common.LabelChangedPredicate(), - common.SecretChangedPredicate(), - common.ConfigMapChangedPredicate(), - common.StatefulsetChangedPredicate(), - common.PodChangedPredicate(), - common.CrdChangedPredicate(), - )). - WithOptions(controller.Options{ - MaxConcurrentReconciles: enterpriseApi.TotalWorker, - }). - Complete(r) -} diff --git a/internal/controller/objectstorage_controller_test.go b/internal/controller/objectstorage_controller_test.go deleted file mode 100644 index 6d7dec87a..000000000 --- a/internal/controller/objectstorage_controller_test.go +++ /dev/null @@ -1,260 +0,0 @@ -/* -Copyright 2025. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "fmt" - "time" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - enterpriseApi "github.com/splunk/splunk-operator/api/v4" - "github.com/splunk/splunk-operator/internal/controller/testutils" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes/scheme" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -var _ = Describe("ObjectStorage Controller", func() { - BeforeEach(func() { - time.Sleep(2 * time.Second) - }) - - AfterEach(func() { - - }) - - Context("ObjectStorage Management", func() { - - It("Get ObjectStorage custom resource should fail", func() { - namespace := "ns-splunk-objectstorage-1" - ApplyObjectStorage = func(ctx context.Context, client client.Client, instance *enterpriseApi.ObjectStorage) (reconcile.Result, error) { - return reconcile.Result{}, nil - } - nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} - - Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - - _, err := GetObjectStorage("test", nsSpecs.Name) - Expect(err.Error()).Should(Equal("objectstorages.enterprise.splunk.com \"test\" not found")) - Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) - }) - - It("Create ObjectStorage custom resource with annotations should pause", func() { - namespace := "ns-splunk-objectstorage-2" - annotations := make(map[string]string) - annotations[enterpriseApi.ObjectStoragePausedAnnotation] = "" - ApplyObjectStorage = func(ctx context.Context, client client.Client, instance *enterpriseApi.ObjectStorage) (reconcile.Result, error) { - return reconcile.Result{}, nil - } - nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} - - Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - - spec := enterpriseApi.ObjectStorageSpec{ - Provider: "s3", - S3: enterpriseApi.S3Spec{ - Endpoint: "https://s3.us-west-2.amazonaws.com", - Path: "s3://ingestion/smartbus-test", - }, - } - CreateObjectStorage("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) - osSpec, _ := GetObjectStorage("test", nsSpecs.Name) - annotations = map[string]string{} - osSpec.Annotations = annotations - osSpec.Status.Phase = "Ready" - UpdateObjectStorage(osSpec, enterpriseApi.PhaseReady, spec) - DeleteObjectStorage("test", nsSpecs.Name) - Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) - }) - - It("Create ObjectStorage custom resource should succeeded", func() { - namespace := "ns-splunk-objectstorage-3" - ApplyObjectStorage = func(ctx context.Context, client client.Client, instance *enterpriseApi.ObjectStorage) (reconcile.Result, error) { - return reconcile.Result{}, nil - } - nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} - - Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - - annotations := make(map[string]string) - spec := enterpriseApi.ObjectStorageSpec{ - Provider: "s3", - S3: enterpriseApi.S3Spec{ - Endpoint: "https://s3.us-west-2.amazonaws.com", - Path: "s3://ingestion/smartbus-test", - }, - } - CreateObjectStorage("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) - DeleteObjectStorage("test", nsSpecs.Name) - Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) - }) - - It("Cover Unused methods", func() { - namespace := "ns-splunk-objectstorage-4" - ApplyObjectStorage = func(ctx context.Context, client client.Client, instance *enterpriseApi.ObjectStorage) (reconcile.Result, error) { - return reconcile.Result{}, nil - } - nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} - - Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - - ctx := context.TODO() - builder := fake.NewClientBuilder() - c := builder.Build() - instance := ObjectStorageReconciler{ - Client: c, - Scheme: scheme.Scheme, - } - request := reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: "test", - Namespace: namespace, - }, - } - _, err := instance.Reconcile(ctx, request) - Expect(err).ToNot(HaveOccurred()) - - spec := enterpriseApi.ObjectStorageSpec{ - Provider: "s3", - S3: enterpriseApi.S3Spec{ - Endpoint: "https://s3.us-west-2.amazonaws.com", - Path: "s3://ingestion/smartbus-test", - }, - } - osSpec := testutils.NewObjectStorage("test", namespace, spec) - Expect(c.Create(ctx, osSpec)).Should(Succeed()) - - annotations := make(map[string]string) - annotations[enterpriseApi.ObjectStoragePausedAnnotation] = "" - osSpec.Annotations = annotations - Expect(c.Update(ctx, osSpec)).Should(Succeed()) - - _, err = instance.Reconcile(ctx, request) - Expect(err).ToNot(HaveOccurred()) - - annotations = map[string]string{} - osSpec.Annotations = annotations - Expect(c.Update(ctx, osSpec)).Should(Succeed()) - - _, err = instance.Reconcile(ctx, request) - Expect(err).ToNot(HaveOccurred()) - - osSpec.DeletionTimestamp = &metav1.Time{} - _, err = instance.Reconcile(ctx, request) - Expect(err).ToNot(HaveOccurred()) - }) - - }) -}) - -func GetObjectStorage(name string, namespace string) (*enterpriseApi.ObjectStorage, error) { - By("Expecting ObjectStorage custom resource to be retrieved successfully") - - key := types.NamespacedName{ - Name: name, - Namespace: namespace, - } - os := &enterpriseApi.ObjectStorage{} - - err := k8sClient.Get(context.Background(), key, os) - if err != nil { - return nil, err - } - - return os, err -} - -func CreateObjectStorage(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase, spec enterpriseApi.ObjectStorageSpec) *enterpriseApi.ObjectStorage { - By("Expecting ObjectStorage custom resource to be created successfully") - key := types.NamespacedName{ - Name: name, - Namespace: namespace, - } - osSpec := &enterpriseApi.ObjectStorage{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Annotations: annotations, - }, - Spec: spec, - } - - Expect(k8sClient.Create(context.Background(), osSpec)).Should(Succeed()) - time.Sleep(2 * time.Second) - - os := &enterpriseApi.ObjectStorage{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, os) - if status != "" { - fmt.Printf("status is set to %v", status) - os.Status.Phase = status - Expect(k8sClient.Status().Update(context.Background(), os)).Should(Succeed()) - time.Sleep(2 * time.Second) - } - return true - }, timeout, interval).Should(BeTrue()) - - return os -} - -func UpdateObjectStorage(instance *enterpriseApi.ObjectStorage, status enterpriseApi.Phase, spec enterpriseApi.ObjectStorageSpec) *enterpriseApi.ObjectStorage { - By("Expecting ObjectStorage custom resource to be updated successfully") - key := types.NamespacedName{ - Name: instance.Name, - Namespace: instance.Namespace, - } - - osSpec := testutils.NewObjectStorage(instance.Name, instance.Namespace, spec) - osSpec.ResourceVersion = instance.ResourceVersion - Expect(k8sClient.Update(context.Background(), osSpec)).Should(Succeed()) - time.Sleep(2 * time.Second) - - os := &enterpriseApi.ObjectStorage{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, os) - if status != "" { - fmt.Printf("status is set to %v", status) - os.Status.Phase = status - Expect(k8sClient.Status().Update(context.Background(), os)).Should(Succeed()) - time.Sleep(2 * time.Second) - } - return true - }, timeout, interval).Should(BeTrue()) - - return os -} - -func DeleteObjectStorage(name string, namespace string) { - By("Expecting ObjectStorage custom resource to be deleted successfully") - key := types.NamespacedName{ - Name: name, - Namespace: namespace, - } - - Eventually(func() error { - os := &enterpriseApi.ObjectStorage{} - _ = k8sClient.Get(context.Background(), key, os) - err := k8sClient.Delete(context.Background(), os) - return err - }, timeout, interval).Should(Succeed()) -} diff --git a/internal/controller/queue_controller.go b/internal/controller/queue_controller.go deleted file mode 100644 index 6fff662b9..000000000 --- a/internal/controller/queue_controller.go +++ /dev/null @@ -1,120 +0,0 @@ -/* -Copyright 2025. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "time" - - k8serrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - "github.com/pkg/errors" - enterpriseApi "github.com/splunk/splunk-operator/api/v4" - "github.com/splunk/splunk-operator/internal/controller/common" - metrics "github.com/splunk/splunk-operator/pkg/splunk/client/metrics" - enterprise "github.com/splunk/splunk-operator/pkg/splunk/enterprise" -) - -// QueueReconciler reconciles a Queue object -type QueueReconciler struct { - client.Client - Scheme *runtime.Scheme -} - -// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=queues,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=queues/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=queues/finalizers,verbs=update - -// Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// TODO(user): Modify the Reconcile function to compare the state specified by -// the Queue object against the actual cluster state, and then -// perform operations to make the cluster state reflect the state specified by -// the user. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.22.1/pkg/reconcile -func (r *QueueReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - metrics.ReconcileCounters.With(metrics.GetPrometheusLabels(req, "Queue")).Inc() - defer recordInstrumentionData(time.Now(), req, "controller", "Queue") - - reqLogger := log.FromContext(ctx) - reqLogger = reqLogger.WithValues("queue", req.NamespacedName) - - // Fetch the Queue - instance := &enterpriseApi.Queue{} - err := r.Get(ctx, req.NamespacedName, instance) - if err != nil { - if k8serrors.IsNotFound(err) { - // Request object not found, could have been deleted after - // reconcile request. Owned objects are automatically - // garbage collected. For additional cleanup logic use - // finalizers. Return and don't requeue - return ctrl.Result{}, nil - } - // Error reading the object - requeue the request. - return ctrl.Result{}, errors.Wrap(err, "could not load queue data") - } - - // If the reconciliation is paused, requeue - annotations := instance.GetAnnotations() - if annotations != nil { - if _, ok := annotations[enterpriseApi.QueuePausedAnnotation]; ok { - return ctrl.Result{Requeue: true, RequeueAfter: pauseRetryDelay}, nil - } - } - - reqLogger.Info("start", "CR version", instance.GetResourceVersion()) - - result, err := ApplyQueue(ctx, r.Client, instance) - if result.Requeue && result.RequeueAfter != 0 { - reqLogger.Info("Requeued", "period(seconds)", int(result.RequeueAfter/time.Second)) - } - - return result, err -} - -var ApplyQueue = func(ctx context.Context, client client.Client, instance *enterpriseApi.Queue) (reconcile.Result, error) { - return enterprise.ApplyQueue(ctx, client, instance) -} - -// SetupWithManager sets up the controller with the Manager. -func (r *QueueReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&enterpriseApi.Queue{}). - WithEventFilter(predicate.Or( - common.GenerationChangedPredicate(), - common.AnnotationChangedPredicate(), - common.LabelChangedPredicate(), - common.SecretChangedPredicate(), - common.ConfigMapChangedPredicate(), - common.StatefulsetChangedPredicate(), - common.PodChangedPredicate(), - common.CrdChangedPredicate(), - )). - WithOptions(controller.Options{ - MaxConcurrentReconciles: enterpriseApi.TotalWorker, - }). - Complete(r) -} diff --git a/internal/controller/queue_controller_test.go b/internal/controller/queue_controller_test.go deleted file mode 100644 index b04a5d4b3..000000000 --- a/internal/controller/queue_controller_test.go +++ /dev/null @@ -1,269 +0,0 @@ -/* -Copyright 2025. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "fmt" - "time" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - enterpriseApi "github.com/splunk/splunk-operator/api/v4" - "github.com/splunk/splunk-operator/internal/controller/testutils" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes/scheme" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -var _ = Describe("Queue Controller", func() { - BeforeEach(func() { - time.Sleep(2 * time.Second) - }) - - AfterEach(func() { - - }) - - Context("Queue Management", func() { - - It("Get Queue custom resource should fail", func() { - namespace := "ns-splunk-queue-1" - ApplyQueue = func(ctx context.Context, client client.Client, instance *enterpriseApi.Queue) (reconcile.Result, error) { - return reconcile.Result{}, nil - } - nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} - - Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - - _, err := GetQueue("test", nsSpecs.Name) - Expect(err.Error()).Should(Equal("queues.enterprise.splunk.com \"test\" not found")) - Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) - }) - - It("Create Queue custom resource with annotations should pause", func() { - namespace := "ns-splunk-queue-2" - annotations := make(map[string]string) - annotations[enterpriseApi.QueuePausedAnnotation] = "" - ApplyQueue = func(ctx context.Context, client client.Client, instance *enterpriseApi.Queue) (reconcile.Result, error) { - return reconcile.Result{}, nil - } - nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} - - Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - - spec := enterpriseApi.QueueSpec{ - Provider: "sqs", - SQS: enterpriseApi.SQSSpec{ - Name: "smartbus-queue", - AuthRegion: "us-west-2", - DLQ: "smartbus-dlq", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - }, - } - CreateQueue("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) - icSpec, _ := GetQueue("test", nsSpecs.Name) - annotations = map[string]string{} - icSpec.Annotations = annotations - icSpec.Status.Phase = "Ready" - UpdateQueue(icSpec, enterpriseApi.PhaseReady, spec) - DeleteQueue("test", nsSpecs.Name) - Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) - }) - - It("Create Queue custom resource should succeeded", func() { - namespace := "ns-splunk-queue-3" - ApplyQueue = func(ctx context.Context, client client.Client, instance *enterpriseApi.Queue) (reconcile.Result, error) { - return reconcile.Result{}, nil - } - nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} - - Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - - annotations := make(map[string]string) - spec := enterpriseApi.QueueSpec{ - Provider: "sqs", - SQS: enterpriseApi.SQSSpec{ - Name: "smartbus-queue", - AuthRegion: "us-west-2", - DLQ: "smartbus-dlq", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - }, - } - CreateQueue("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) - DeleteQueue("test", nsSpecs.Name) - Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) - }) - - It("Cover Unused methods", func() { - namespace := "ns-splunk-queue-4" - ApplyQueue = func(ctx context.Context, client client.Client, instance *enterpriseApi.Queue) (reconcile.Result, error) { - return reconcile.Result{}, nil - } - nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} - - Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - - ctx := context.TODO() - builder := fake.NewClientBuilder() - c := builder.Build() - instance := QueueReconciler{ - Client: c, - Scheme: scheme.Scheme, - } - request := reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: "test", - Namespace: namespace, - }, - } - _, err := instance.Reconcile(ctx, request) - Expect(err).ToNot(HaveOccurred()) - - spec := enterpriseApi.QueueSpec{ - Provider: "sqs", - SQS: enterpriseApi.SQSSpec{ - Name: "smartbus-queue", - AuthRegion: "us-west-2", - DLQ: "smartbus-dlq", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - }, - } - bcSpec := testutils.NewQueue("test", namespace, spec) - Expect(c.Create(ctx, bcSpec)).Should(Succeed()) - - annotations := make(map[string]string) - annotations[enterpriseApi.QueuePausedAnnotation] = "" - bcSpec.Annotations = annotations - Expect(c.Update(ctx, bcSpec)).Should(Succeed()) - - _, err = instance.Reconcile(ctx, request) - Expect(err).ToNot(HaveOccurred()) - - annotations = map[string]string{} - bcSpec.Annotations = annotations - Expect(c.Update(ctx, bcSpec)).Should(Succeed()) - - _, err = instance.Reconcile(ctx, request) - Expect(err).ToNot(HaveOccurred()) - - bcSpec.DeletionTimestamp = &metav1.Time{} - _, err = instance.Reconcile(ctx, request) - Expect(err).ToNot(HaveOccurred()) - }) - - }) -}) - -func GetQueue(name string, namespace string) (*enterpriseApi.Queue, error) { - By("Expecting Queue custom resource to be retrieved successfully") - - key := types.NamespacedName{ - Name: name, - Namespace: namespace, - } - b := &enterpriseApi.Queue{} - - err := k8sClient.Get(context.Background(), key, b) - if err != nil { - return nil, err - } - - return b, err -} - -func CreateQueue(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase, spec enterpriseApi.QueueSpec) *enterpriseApi.Queue { - By("Expecting Queue custom resource to be created successfully") - - key := types.NamespacedName{ - Name: name, - Namespace: namespace, - } - ingSpec := &enterpriseApi.Queue{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Annotations: annotations, - }, - Spec: spec, - } - - Expect(k8sClient.Create(context.Background(), ingSpec)).Should(Succeed()) - time.Sleep(2 * time.Second) - - b := &enterpriseApi.Queue{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, b) - if status != "" { - fmt.Printf("status is set to %v", status) - b.Status.Phase = status - Expect(k8sClient.Status().Update(context.Background(), b)).Should(Succeed()) - time.Sleep(2 * time.Second) - } - return true - }, timeout, interval).Should(BeTrue()) - - return b -} - -func UpdateQueue(instance *enterpriseApi.Queue, status enterpriseApi.Phase, spec enterpriseApi.QueueSpec) *enterpriseApi.Queue { - By("Expecting Queue custom resource to be updated successfully") - - key := types.NamespacedName{ - Name: instance.Name, - Namespace: instance.Namespace, - } - - bSpec := testutils.NewQueue(instance.Name, instance.Namespace, spec) - bSpec.ResourceVersion = instance.ResourceVersion - Expect(k8sClient.Update(context.Background(), bSpec)).Should(Succeed()) - time.Sleep(2 * time.Second) - - b := &enterpriseApi.Queue{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, b) - if status != "" { - fmt.Printf("status is set to %v", status) - b.Status.Phase = status - Expect(k8sClient.Status().Update(context.Background(), b)).Should(Succeed()) - time.Sleep(2 * time.Second) - } - return true - }, timeout, interval).Should(BeTrue()) - - return b -} - -func DeleteQueue(name string, namespace string) { - By("Expecting Queue custom resource to be deleted successfully") - - key := types.NamespacedName{ - Name: name, - Namespace: namespace, - } - - Eventually(func() error { - b := &enterpriseApi.Queue{} - _ = k8sClient.Get(context.Background(), key, b) - err := k8sClient.Delete(context.Background(), b) - return err - }, timeout, interval).Should(Succeed()) -} diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index 8454d15b5..142a8720c 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -98,12 +98,6 @@ var _ = BeforeSuite(func(ctx context.Context) { Scheme: clientgoscheme.Scheme, }) Expect(err).ToNot(HaveOccurred()) - if err := (&QueueReconciler{ - Client: k8sManager.GetClient(), - Scheme: k8sManager.GetScheme(), - }).SetupWithManager(k8sManager); err != nil { - Expect(err).NotTo(HaveOccurred()) - } if err := (&ClusterManagerReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), @@ -128,12 +122,6 @@ var _ = BeforeSuite(func(ctx context.Context) { }).SetupWithManager(k8sManager); err != nil { Expect(err).NotTo(HaveOccurred()) } - if err := (&ObjectStorageReconciler{ - Client: k8sManager.GetClient(), - Scheme: k8sManager.GetScheme(), - }).SetupWithManager(k8sManager); err != nil { - Expect(err).NotTo(HaveOccurred()) - } if err := (&LicenseManagerReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), diff --git a/internal/controller/testutils/new.go b/internal/controller/testutils/new.go index aa47e8092..63a291a1d 100644 --- a/internal/controller/testutils/new.go +++ b/internal/controller/testutils/new.go @@ -46,7 +46,7 @@ func NewStandalone(name, ns, image string) *enterpriseApi.Standalone { } // NewIngestorCluster returns new IngestorCluster instance with its config hash -func NewIngestorCluster(name, ns, image string) *enterpriseApi.IngestorCluster { +func NewIngestorCluster(name, ns, image string, os *enterpriseApi.ObjectStorage, queue *enterpriseApi.Queue) *enterpriseApi.IngestorCluster { return &enterpriseApi.IngestorCluster{ ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: ns}, Spec: enterpriseApi.IngestorClusterSpec{ @@ -55,7 +55,12 @@ func NewIngestorCluster(name, ns, image string) *enterpriseApi.IngestorCluster { }, Replicas: 3, QueueRef: corev1.ObjectReference{ - Name: "queue", + Name: queue.Name, + Namespace: queue.Namespace, + }, + ObjectStorageRef: corev1.ObjectReference{ + Name: os.Name, + Namespace: os.Namespace, }, }, } diff --git a/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml index 41f4ea2aa..a4aaa0824 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml @@ -1,136 +1,5 @@ --- -# assert for queue custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: Queue -metadata: - name: queue -spec: - provider: sqs - sqs: - name: sqs-test - region: us-west-2 - endpoint: https://sqs.us-west-2.amazonaws.com - dlq: sqs-dlq-test -status: - phase: Ready - ---- -# assert for large message store custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: ObjectStorage -metadata: - name: os -spec: - provider: s3 - s3: - endpoint: https://s3.us-west-2.amazonaws.com - path: s3://ingestion/smartbus-test -status: - phase: Ready - ---- -# assert for cluster manager custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: ClusterManager -metadata: - name: cm -status: - phase: Ready - ---- -# check if stateful sets are created -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-cm-cluster-manager -status: - replicas: 1 - ---- -# check if secret object are created -apiVersion: v1 -kind: Secret -metadata: - name: splunk-cm-cluster-manager-secret-v1 - ---- -# assert for indexer cluster custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: IndexerCluster -metadata: - name: indexer -spec: - replicas: 3 - queueRef: - name: queue -status: - phase: Ready - queue: - provider: sqs - sqs: - name: sqs-test - region: us-west-2 - endpoint: https://sqs.us-west-2.amazonaws.com - dlq: sqs-dlq-test - objectStorage: - provider: s3 - s3: - endpoint: https://s3.us-west-2.amazonaws.com - path: s3://ingestion/smartbus-test - ---- -# check for stateful set and replicas as configured -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-indexer-indexer -status: - replicas: 3 - ---- -# check if secret object are created -apiVersion: v1 -kind: Secret -metadata: - name: splunk-indexer-indexer-secret-v1 - ---- -# assert for indexer cluster custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: IngestorCluster -metadata: - name: ingestor -spec: - replicas: 3 - queueRef: - name: queue -status: - phase: Ready - queue: - provider: sqs - sqs: - name: sqs-test - region: us-west-2 - endpoint: https://sqs.us-west-2.amazonaws.com - dlq: sqs-dlq-test - objectStorage: - provider: s3 - s3: - endpoint: https://s3.us-west-2.amazonaws.com - path: s3://ingestion/smartbus-test - ---- -# check for stateful set and replicas as configured -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-ingestor-ingestor -status: - replicas: 3 - ---- -# check if secret object are created apiVersion: v1 kind: Secret metadata: - name: splunk-ingestor-ingestor-secret-v1 \ No newline at end of file + name: index-ing-sep-secret diff --git a/kuttl/tests/helm/index-and-ingest-separation/01-create-s3-secret.yaml b/kuttl/tests/helm/index-and-ingest-separation/01-create-s3-secret.yaml new file mode 100644 index 000000000..591aa8fd5 --- /dev/null +++ b/kuttl/tests/helm/index-and-ingest-separation/01-create-s3-secret.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: kubectl create secret generic index-ing-sep-secret --from-literal=s3_access_key=$AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID --from-literal=s3_secret_key=$AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY --namespace $NAMESPACE + background: false + skipLogOutput: true \ No newline at end of file diff --git a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml index 00ff26a56..c6cc343d8 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml @@ -1,33 +1,112 @@ --- -# assert for ingestor cluster custom resource to be ready +# assert for queue custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: Queue +metadata: + name: queue +spec: + provider: sqs + sqs: + name: index-ingest-separation-test-q + authRegion: us-west-2 + endpoint: https://sqs.us-west-2.amazonaws.com + dlq: index-ingest-separation-test-dlq + +--- +# assert for object storage custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: ObjectStorage +metadata: + name: os +spec: + provider: s3 + s3: + endpoint: https://s3.us-west-2.amazonaws.com + path: s3://index-ingest-separation-test-bucket/smartbus-test + +--- +# assert for cluster manager custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: ClusterManager +metadata: + name: cm +status: + phase: Ready + +--- +# check if stateful sets are created +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-cm-cluster-manager +status: + replicas: 1 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-cm-cluster-manager-secret-v1 + +--- +# assert for indexer cluster custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: IndexerCluster +metadata: + name: indexer +spec: + replicas: 3 + queueRef: + name: queue + objectStorageRef: + name: os +status: + phase: Ready + +--- +# check for stateful set and replicas as configured +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-indexer-indexer +status: + replicas: 3 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-indexer-indexer-secret-v1 + +--- +# assert for indexer cluster custom resource to be ready apiVersion: enterprise.splunk.com/v4 kind: IngestorCluster metadata: name: ingestor spec: - replicas: 4 + replicas: 3 queueRef: name: queue + objectStorageRef: + name: os status: phase: Ready - queue: - provider: sqs - sqs: - name: sqs-test - region: us-west-2 - endpoint: https://sqs.us-west-2.amazonaws.com - dlq: sqs-dlq-test - objectStorage: - provider: s3 - s3: - endpoint: https://s3.us-west-2.amazonaws.com - path: s3://ingestion/smartbus-test - ---- -# check for stateful sets and replicas updated + +--- +# check for stateful set and replicas as configured apiVersion: apps/v1 kind: StatefulSet metadata: name: splunk-ingestor-ingestor status: - replicas: 4 + replicas: 3 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-ingestor-ingestor-secret-v1 \ No newline at end of file diff --git a/kuttl/tests/helm/index-and-ingest-separation/01-install-setup.yaml b/kuttl/tests/helm/index-and-ingest-separation/02-install-setup.yaml similarity index 100% rename from kuttl/tests/helm/index-and-ingest-separation/01-install-setup.yaml rename to kuttl/tests/helm/index-and-ingest-separation/02-install-setup.yaml diff --git a/kuttl/tests/helm/index-and-ingest-separation/03-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/03-assert.yaml new file mode 100644 index 000000000..8bf619148 --- /dev/null +++ b/kuttl/tests/helm/index-and-ingest-separation/03-assert.yaml @@ -0,0 +1,23 @@ +--- +# assert for ingestor cluster custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: IngestorCluster +metadata: + name: ingestor +spec: + replicas: 4 + queueRef: + name: queue + objectStorageRef: + name: os +status: + phase: Ready + +--- +# check for stateful sets and replicas updated +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-ingestor-ingestor +status: + replicas: 4 diff --git a/kuttl/tests/helm/index-and-ingest-separation/02-scaleup-ingestor.yaml b/kuttl/tests/helm/index-and-ingest-separation/03-scaleup-ingestor.yaml similarity index 100% rename from kuttl/tests/helm/index-and-ingest-separation/02-scaleup-ingestor.yaml rename to kuttl/tests/helm/index-and-ingest-separation/03-scaleup-ingestor.yaml diff --git a/kuttl/tests/helm/index-and-ingest-separation/03-uninstall-setup.yaml b/kuttl/tests/helm/index-and-ingest-separation/04-uninstall-setup.yaml similarity index 100% rename from kuttl/tests/helm/index-and-ingest-separation/03-uninstall-setup.yaml rename to kuttl/tests/helm/index-and-ingest-separation/04-uninstall-setup.yaml diff --git a/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml b/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml index d05cb5bcf..1cdbc33b8 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml @@ -10,10 +10,13 @@ queue: name: queue provider: sqs sqs: - name: sqs-test - region: us-west-2 + name: index-ingest-separation-test-q + authRegion: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - dlq: sqs-dlq-test + dlq: index-ingest-separation-test-dlq + volumes: + - name: helm-bus-secret-ref-test + secretRef: index-ing-sep-secret objectStorage: enabled: true @@ -21,7 +24,7 @@ objectStorage: provider: s3 s3: endpoint: https://s3.us-west-2.amazonaws.com - path: s3://ingestion/smartbus-test + path: s3://index-ingest-separation-test-bucket/smartbus-test ingestorCluster: enabled: true diff --git a/pkg/splunk/client/enterprise.go b/pkg/splunk/client/enterprise.go index 6eb4d2f87..e51688661 100644 --- a/pkg/splunk/client/enterprise.go +++ b/pkg/splunk/client/enterprise.go @@ -1015,22 +1015,3 @@ func (c *SplunkClient) UpdateConfFile(scopedLog logr.Logger, fileName, property } return err } - -// Deletes conf files properties -func (c *SplunkClient) DeleteConfFileProperty(scopedLog logr.Logger, fileName, property string) error { - endpoint := fmt.Sprintf("%s/servicesNS/nobody/system/configs/conf-%s/%s", c.ManagementURI, fileName, property) - - scopedLog.Info("Deleting conf file object", "fileName", fileName, "property", property) - request, err := http.NewRequest("DELETE", endpoint, nil) - if err != nil { - scopedLog.Error(err, "Failed to delete conf file object", "fileName", fileName, "property", property) - return err - } - - expectedStatus := []int{200, 201, 404} - err = c.Do(request, expectedStatus, nil) - if err != nil { - scopedLog.Error(err, fmt.Sprintf("Status not in %v for conf file object deletion", expectedStatus), "fileName", fileName, "property", property) - } - return err -} diff --git a/pkg/splunk/client/enterprise_test.go b/pkg/splunk/client/enterprise_test.go index 6b97c24d7..4934eedfc 100644 --- a/pkg/splunk/client/enterprise_test.go +++ b/pkg/splunk/client/enterprise_test.go @@ -705,35 +705,3 @@ func TestUpdateConfFile(t *testing.T) { t.Errorf("UpdateConfFile expected error on update, got nil") } } - -func TestDeleteConfFileProperty(t *testing.T) { - // Test successful deletion of conf property - property := "myproperty" - fileName := "outputs" - - reqLogger := log.FromContext(context.TODO()) - scopedLog := reqLogger.WithName("TestDeleteConfFileProperty") - - wantDeleteRequest, _ := http.NewRequest("DELETE", fmt.Sprintf("https://localhost:8089/servicesNS/nobody/system/configs/conf-outputs/%s", property), nil) - - mockSplunkClient := &spltest.MockHTTPClient{} - mockSplunkClient.AddHandler(wantDeleteRequest, 200, "", nil) - - c := NewSplunkClient("https://localhost:8089", "admin", "p@ssw0rd") - c.Client = mockSplunkClient - - err := c.DeleteConfFileProperty(scopedLog, fileName, property) - if err != nil { - t.Errorf("DeleteConfFileProperty err = %v", err) - } - mockSplunkClient.CheckRequests(t, "TestDeleteConfFileProperty") - - // Negative test: error on delete - mockSplunkClient = &spltest.MockHTTPClient{} - mockSplunkClient.AddHandler(wantDeleteRequest, 500, "", nil) - c.Client = mockSplunkClient - err = c.DeleteConfFileProperty(scopedLog, fileName, property) - if err == nil { - t.Errorf("DeleteConfFileProperty expected error on delete, got nil") - } -} diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index 60b4d5a9a..42b714924 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -19,7 +19,6 @@ import ( "context" "errors" "fmt" - "reflect" "regexp" "sort" "strconv" @@ -77,7 +76,7 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller // updates status after function completes cr.Status.ClusterManagerPhase = enterpriseApi.PhaseError if cr.Status.Replicas < cr.Spec.Replicas { - cr.Status.Queue = &enterpriseApi.QueueSpec{} + cr.Status.QueueBucketAccessSecretVersion = "0" } cr.Status.Replicas = cr.Spec.Replicas cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-indexer", cr.GetName()) @@ -118,7 +117,7 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller cr.Status.ClusterManagerPhase = enterpriseApi.PhaseError } - mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) + mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) // Check if we have configured enough number(<= RF) of replicas if mgr.cr.Status.ClusterManagerPhase == enterpriseApi.PhaseReady { err = VerifyRFPeers(ctx, mgr, client) @@ -251,7 +250,7 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller if cr.Spec.QueueRef.Namespace != "" { ns = cr.Spec.QueueRef.Namespace } - err = client.Get(context.Background(), types.NamespacedName{ + err = client.Get(ctx, types.NamespacedName{ Name: cr.Spec.QueueRef.Name, Namespace: ns, }, &queue) @@ -259,23 +258,20 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller return result, err } } - - // Can not override original queue spec due to comparison in the later code - queueCopy := queue - if queueCopy.Spec.Provider == "sqs" { - if queueCopy.Spec.SQS.Endpoint == "" && queueCopy.Spec.SQS.AuthRegion != "" { - queueCopy.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", queueCopy.Spec.SQS.AuthRegion) + if queue.Spec.Provider == "sqs" { + if queue.Spec.SQS.Endpoint == "" && queue.Spec.SQS.AuthRegion != "" { + queue.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", queue.Spec.SQS.AuthRegion) } } - // Large Message Store + // Object Storage os := enterpriseApi.ObjectStorage{} if cr.Spec.ObjectStorageRef.Name != "" { ns := cr.GetNamespace() if cr.Spec.ObjectStorageRef.Namespace != "" { ns = cr.Spec.ObjectStorageRef.Namespace } - err = client.Get(context.Background(), types.NamespacedName{ + err = client.Get(ctx, types.NamespacedName{ Name: cr.Spec.ObjectStorageRef.Name, Namespace: ns, }, &os) @@ -283,28 +279,49 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller return result, err } } + if os.Spec.Provider == "s3" { + if os.Spec.S3.Endpoint == "" && queue.Spec.SQS.AuthRegion != "" { + os.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queue.Spec.SQS.AuthRegion) + } + } - // Can not override original large message store spec due to comparison in the later code - osCopy := os - if osCopy.Spec.Provider == "s3" { - if osCopy.Spec.S3.Endpoint == "" && queueCopy.Spec.SQS.AuthRegion != "" { - osCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queueCopy.Spec.SQS.AuthRegion) + // Secret reference + accessKey, secretKey, version := "", "", "" + if queue.Spec.Provider == "sqs" && cr.Spec.ServiceAccount == "" { + for _, vol := range queue.Spec.SQS.VolList { + if vol.SecretRef != "" { + accessKey, secretKey, version, err = GetQueueRemoteVolumeSecrets(ctx, vol, client, cr) + if err != nil { + scopedLog.Error(err, "Failed to get queue remote volume secrets") + return result, err + } + } } } + secretChanged := cr.Status.QueueBucketAccessSecretVersion != version + // If queue is updated if cr.Spec.QueueRef.Name != "" { - if !reflect.DeepEqual(cr.Status.Queue, queue.Spec) { - mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) - - err = mgr.handlePullQueueChange(ctx, cr, queueCopy, osCopy, client) + if secretChanged { + mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) + err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Queue/Pipeline config change after pod creation: %s", err.Error())) scopedLog.Error(err, "Failed to update conf file for Queue/Pipeline config change after pod creation") return result, err } - cr.Status.Queue = &queue.Spec + for i := int32(0); i < cr.Spec.Replicas; i++ { + idxcClient := mgr.getClient(ctx, i) + err = idxcClient.RestartSplunk() + if err != nil { + return result, err + } + scopedLog.Info("Restarted splunk", "indexer", i) + } + + cr.Status.QueueBucketAccessSecretVersion = version } } @@ -397,7 +414,7 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, cr.Status.Phase = enterpriseApi.PhaseError cr.Status.ClusterMasterPhase = enterpriseApi.PhaseError if cr.Status.Replicas < cr.Spec.Replicas { - cr.Status.Queue = &enterpriseApi.QueueSpec{} + cr.Status.QueueBucketAccessSecretVersion = "0" } cr.Status.Replicas = cr.Spec.Replicas cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-indexer", cr.GetName()) @@ -440,7 +457,7 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, cr.Status.ClusterMasterPhase = enterpriseApi.PhaseError } - mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) + mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) // Check if we have configured enough number(<= RF) of replicas if mgr.cr.Status.ClusterMasterPhase == enterpriseApi.PhaseReady { err = VerifyRFPeers(ctx, mgr, client) @@ -582,16 +599,13 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, return result, err } } - - // Can not override original queue spec due to comparison in the later code - queueCopy := queue - if queueCopy.Spec.Provider == "sqs" { - if queueCopy.Spec.SQS.Endpoint == "" && queueCopy.Spec.SQS.AuthRegion != "" { - queueCopy.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", queueCopy.Spec.SQS.AuthRegion) + if queue.Spec.Provider == "sqs" { + if queue.Spec.SQS.Endpoint == "" && queue.Spec.SQS.AuthRegion != "" { + queue.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", queue.Spec.SQS.AuthRegion) } } - // Large Message Store + // Object Storage os := enterpriseApi.ObjectStorage{} if cr.Spec.ObjectStorageRef.Name != "" { ns := cr.GetNamespace() @@ -601,33 +615,53 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, err = client.Get(context.Background(), types.NamespacedName{ Name: cr.Spec.ObjectStorageRef.Name, Namespace: ns, - }, &queue) + }, &os) if err != nil { return result, err } } + if os.Spec.Provider == "s3" { + if os.Spec.S3.Endpoint == "" && queue.Spec.SQS.AuthRegion != "" { + os.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queue.Spec.SQS.AuthRegion) + } + } - // Can not override original queue spec due to comparison in the later code - osCopy := os - if osCopy.Spec.Provider == "s3" { - if osCopy.Spec.S3.Endpoint == "" && queueCopy.Spec.SQS.AuthRegion != "" { - osCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queueCopy.Spec.SQS.AuthRegion) + // Secret reference + accessKey, secretKey, version := "", "", "" + if queue.Spec.Provider == "sqs" && cr.Spec.ServiceAccount == "" { + for _, vol := range queue.Spec.SQS.VolList { + if vol.SecretRef != "" { + accessKey, secretKey, version, err = GetQueueRemoteVolumeSecrets(ctx, vol, client, cr) + if err != nil { + scopedLog.Error(err, "Failed to get queue remote volume secrets") + return result, err + } + } } } - // If queue is updated - if cr.Spec.QueueRef.Name != "" { - if !reflect.DeepEqual(cr.Status.Queue, queue.Spec) { - mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) + secretChanged := cr.Status.QueueBucketAccessSecretVersion != version - err = mgr.handlePullQueueChange(ctx, cr, queueCopy, osCopy, client) + if cr.Spec.QueueRef.Name != "" { + if secretChanged { + mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) + err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Queue/Pipeline config change after pod creation: %s", err.Error())) scopedLog.Error(err, "Failed to update conf file for Queue/Pipeline config change after pod creation") return result, err } - cr.Status.Queue = &queue.Spec + for i := int32(0); i < cr.Spec.Replicas; i++ { + idxcClient := mgr.getClient(ctx, i) + err = idxcClient.RestartSplunk() + if err != nil { + return result, err + } + scopedLog.Info("Restarted splunk", "indexer", i) + } + + cr.Status.QueueBucketAccessSecretVersion = version } } @@ -710,12 +744,13 @@ type indexerClusterPodManager struct { } // newIndexerClusterPodManager function to create pod manager this is added to write unit test case -var newIndexerClusterPodManager = func(log logr.Logger, cr *enterpriseApi.IndexerCluster, secret *corev1.Secret, newSplunkClient NewSplunkClientFunc) indexerClusterPodManager { +var newIndexerClusterPodManager = func(log logr.Logger, cr *enterpriseApi.IndexerCluster, secret *corev1.Secret, newSplunkClient NewSplunkClientFunc, c splcommon.ControllerClient) indexerClusterPodManager { return indexerClusterPodManager{ log: log, cr: cr, secrets: secret, newSplunkClient: newSplunkClient, + c: c, } } @@ -1296,10 +1331,10 @@ func getSiteName(ctx context.Context, c splcommon.ControllerClient, cr *enterpri var newSplunkClientForQueuePipeline = splclient.NewSplunkClient -// Checks if only PullQueue or Pipeline config changed, and updates the conf file if so -func (mgr *indexerClusterPodManager) handlePullQueueChange(ctx context.Context, newCR *enterpriseApi.IndexerCluster, queue enterpriseApi.Queue, os enterpriseApi.ObjectStorage, k8s rclient.Client) error { +// updateIndexerConfFiles checks if Queue or Pipeline inputs are created for the first time and updates the conf file if so +func (mgr *indexerClusterPodManager) updateIndexerConfFiles(ctx context.Context, newCR *enterpriseApi.IndexerCluster, queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, accessKey, secretKey string, k8s rclient.Client) error { reqLogger := log.FromContext(ctx) - scopedLog := reqLogger.WithName("handlePullQueueChange").WithValues("name", newCR.GetName(), "namespace", newCR.GetNamespace()) + scopedLog := reqLogger.WithName("updateIndexerConfFiles").WithValues("name", newCR.GetName(), "namespace", newCR.GetNamespace()) // Only update config for pods that exist readyReplicas := newCR.Status.ReadyReplicas @@ -1315,62 +1350,37 @@ func (mgr *indexerClusterPodManager) handlePullQueueChange(ctx context.Context, } splunkClient := newSplunkClientForQueuePipeline(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd)) - afterDelete := false - if (queue.Spec.SQS.Name != "" && newCR.Status.Queue.SQS.Name != "" && queue.Spec.SQS.Name != newCR.Status.Queue.SQS.Name) || - (queue.Spec.Provider != "" && newCR.Status.Queue.Provider != "" && queue.Spec.Provider != newCR.Status.Queue.Provider) { - if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCR.Status.Queue.SQS.Name)); err != nil { - updateErr = err - } - if err := splunkClient.DeleteConfFileProperty(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", newCR.Status.Queue.SQS.Name)); err != nil { - updateErr = err - } - afterDelete = true - } - - queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields := getChangedQueueFieldsForIndexer(&queue, &os, newCR, afterDelete) + queueInputs, queueOutputs, pipelineInputs := getQueueAndPipelineInputsForIndexerConfFiles(queue, os, accessKey, secretKey) - for _, pbVal := range queueChangedFieldsOutputs { - if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name), [][]string{pbVal}); err != nil { + for _, pbVal := range queueOutputs { + if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", queue.SQS.Name), [][]string{pbVal}); err != nil { updateErr = err } } - for _, pbVal := range queueChangedFieldsInputs { - if err := splunkClient.UpdateConfFile(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name), [][]string{pbVal}); err != nil { + for _, pbVal := range queueInputs { + if err := splunkClient.UpdateConfFile(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", queue.SQS.Name), [][]string{pbVal}); err != nil { updateErr = err } } - for _, field := range pipelineChangedFields { + for _, field := range pipelineInputs { if err := splunkClient.UpdateConfFile(scopedLog, "default-mode", field[0], [][]string{{field[1], field[2]}}); err != nil { updateErr = err } } } - // Do NOT restart Splunk return updateErr } -// getChangedQueueFieldsForIndexer returns a list of changed queue and pipeline fields for indexer pods -func getChangedQueueFieldsForIndexer(queue *enterpriseApi.Queue, os *enterpriseApi.ObjectStorage, queueIndexerStatus *enterpriseApi.IndexerCluster, afterDelete bool) (queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields [][]string) { - // Compare queue fields - oldQueue := queueIndexerStatus.Status.Queue - if oldQueue == nil { - oldQueue = &enterpriseApi.QueueSpec{} - } - newQueue := queue.Spec +// getQueueAndPipelineInputsForIndexerConfFiles returns a list of queue and pipeline inputs for indexer pods conf files +func getQueueAndPipelineInputsForIndexerConfFiles(queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, accessKey, secretKey string) (queueInputs, queueOutputs, pipelineInputs [][]string) { + // Queue Inputs + queueInputs, queueOutputs = getQueueAndObjectStorageInputsForIndexerConfFiles(queue, os, accessKey, secretKey) - oldOS := queueIndexerStatus.Status.ObjectStorage - if oldOS == nil { - oldOS = &enterpriseApi.ObjectStorageSpec{} - } - newOS := os.Spec - - // Push all queue fields - queueChangedFieldsInputs, queueChangedFieldsOutputs = pullQueueChanged(oldQueue, &newQueue, oldOS, &newOS, afterDelete) - // Always set all pipeline fields, not just changed ones - pipelineChangedFields = pipelineConfig(true) + // Pipeline inputs + pipelineInputs = getPipelineInputsForConfFile(true) return } @@ -1386,39 +1396,34 @@ func imageUpdatedTo9(previousImage string, currentImage string) bool { return strings.HasPrefix(previousVersion, "8") && strings.HasPrefix(currentVersion, "9") } -func pullQueueChanged(oldQueue, newQueue *enterpriseApi.QueueSpec, oldOS, newOS *enterpriseApi.ObjectStorageSpec, afterDelete bool) (inputs, outputs [][]string) { +// getQueueAndObjectStorageInputsForIndexerConfFiles returns a list of queue and object storage inputs for conf files +func getQueueAndObjectStorageInputsForIndexerConfFiles(queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, accessKey, secretKey string) (inputs, outputs [][]string) { queueProvider := "" - if newQueue.Provider == "sqs" { + if queue.Provider == "sqs" { queueProvider = "sqs_smartbus" } osProvider := "" - if newOS.Provider == "s3" { + if os.Provider == "s3" { osProvider = "sqs_smartbus" } - if oldQueue.Provider != newQueue.Provider || afterDelete { - inputs = append(inputs, []string{"remote_queue.type", queueProvider}) - } - if newQueue.SQS.AuthRegion != "" &&(oldQueue.SQS.AuthRegion != newQueue.SQS.AuthRegion || afterDelete) { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.auth_region", queueProvider), newQueue.SQS.AuthRegion}) - } - if newQueue.SQS.Endpoint != "" && (oldQueue.SQS.Endpoint != newQueue.SQS.Endpoint || afterDelete) { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.endpoint", queueProvider), newQueue.SQS.Endpoint}) - } - if newOS.S3.Endpoint != "" && (oldOS.S3.Endpoint != newOS.S3.Endpoint || afterDelete) { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", osProvider), newOS.S3.Endpoint}) - } - if oldOS.S3.Path != newOS.S3.Path || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", osProvider), newOS.S3.Path}) - } - if oldQueue.SQS.DLQ != newQueue.SQS.DLQ || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", queueProvider), newQueue.SQS.DLQ}) - } inputs = append(inputs, + []string{"remote_queue.type", queueProvider}, + []string{fmt.Sprintf("remote_queue.%s.auth_region", queueProvider), queue.SQS.AuthRegion}, + []string{fmt.Sprintf("remote_queue.%s.endpoint", queueProvider), queue.SQS.Endpoint}, + []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", osProvider), os.S3.Endpoint}, + []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", osProvider), os.S3.Path}, + []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", queueProvider), queue.SQS.DLQ}, []string{fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", queueProvider), "4"}, []string{fmt.Sprintf("remote_queue.%s.retry_policy", queueProvider), "max_count"}, ) + // TODO: Handle credentials change + if accessKey != "" && secretKey != "" { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.access_key", queueProvider), accessKey}) + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.secret_key", queueProvider), secretKey}) + } + outputs = inputs outputs = append(outputs, []string{fmt.Sprintf("remote_queue.%s.send_interval", queueProvider), "5s"}, diff --git a/pkg/splunk/enterprise/indexercluster_test.go b/pkg/splunk/enterprise/indexercluster_test.go index a74ab4acd..ac9e59554 100644 --- a/pkg/splunk/enterprise/indexercluster_test.go +++ b/pkg/splunk/enterprise/indexercluster_test.go @@ -1569,7 +1569,7 @@ func TestIndexerClusterWithReadyState(t *testing.T) { return nil } - newIndexerClusterPodManager = func(log logr.Logger, cr *enterpriseApi.IndexerCluster, secret *corev1.Secret, newSplunkClient NewSplunkClientFunc) indexerClusterPodManager { + newIndexerClusterPodManager = func(log logr.Logger, cr *enterpriseApi.IndexerCluster, secret *corev1.Secret, newSplunkClient NewSplunkClientFunc, c splcommon.ControllerClient) indexerClusterPodManager { return indexerClusterPodManager{ log: log, cr: cr, @@ -1579,6 +1579,7 @@ func TestIndexerClusterWithReadyState(t *testing.T) { c.Client = mclient return c }, + c: c, } } @@ -2045,10 +2046,10 @@ func TestImageUpdatedTo9(t *testing.T) { } } -func TestGetChangedQueueFieldsForIndexer(t *testing.T) { +func TestGetQueueAndPipelineInputsForIndexerConfFiles(t *testing.T) { provider := "sqs_smartbus" - queue := enterpriseApi.Queue{ + queue := &enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", @@ -2063,11 +2064,14 @@ func TestGetChangedQueueFieldsForIndexer(t *testing.T) { AuthRegion: "us-west-2", Endpoint: "https://sqs.us-west-2.amazonaws.com", DLQ: "sqs-dlq-test", + VolList: []enterpriseApi.VolumeSpec{ + {SecretRef: "secret"}, + }, }, }, } - os := enterpriseApi.ObjectStorage{ + os := &enterpriseApi.ObjectStorage{ TypeMeta: metav1.TypeMeta{ Kind: "ObjectStorage", APIVersion: "enterprise.splunk.com/v4", @@ -2084,19 +2088,11 @@ func TestGetChangedQueueFieldsForIndexer(t *testing.T) { }, } - newCR := &enterpriseApi.IndexerCluster{ - Spec: enterpriseApi.IndexerClusterSpec{ - QueueRef: corev1.ObjectReference{ - Name: queue.Name, - }, - ObjectStorageRef: corev1.ObjectReference{ - Name: os.Name, - }, - }, - } + key := "key" + secret := "secret" - queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields := getChangedQueueFieldsForIndexer(&queue, &os, newCR, false) - assert.Equal(t, 8, len(queueChangedFieldsInputs)) + queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields := getQueueAndPipelineInputsForIndexerConfFiles(&queue.Spec, &os.Spec, key, secret) + assert.Equal(t, 10, len(queueChangedFieldsInputs)) assert.Equal(t, [][]string{ {"remote_queue.type", provider}, {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion}, @@ -2106,9 +2102,11 @@ func TestGetChangedQueueFieldsForIndexer(t *testing.T) { {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, + {fmt.Sprintf("remote_queue.%s.access_key", provider), key}, + {fmt.Sprintf("remote_queue.%s.secret_key", provider), secret}, }, queueChangedFieldsInputs) - assert.Equal(t, 10, len(queueChangedFieldsOutputs)) + assert.Equal(t, 12, len(queueChangedFieldsOutputs)) assert.Equal(t, [][]string{ {"remote_queue.type", provider}, {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion}, @@ -2118,6 +2116,8 @@ func TestGetChangedQueueFieldsForIndexer(t *testing.T) { {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, + {fmt.Sprintf("remote_queue.%s.access_key", provider), key}, + {fmt.Sprintf("remote_queue.%s.secret_key", provider), secret}, {fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"}, {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, }, queueChangedFieldsOutputs) @@ -2132,11 +2132,17 @@ func TestGetChangedQueueFieldsForIndexer(t *testing.T) { }, pipelineChangedFields) } -func TestHandlePullQueueChange(t *testing.T) { +func TestUpdateIndexerConfFiles(t *testing.T) { + c := spltest.NewMockClient() + ctx := context.TODO() + // Object definitions provider := "sqs_smartbus" - queue := enterpriseApi.Queue{ + accessKey := "accessKey" + secretKey := "secretKey" + + queue := &enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", @@ -2155,6 +2161,7 @@ func TestHandlePullQueueChange(t *testing.T) { }, }, } + c.Create(ctx, queue) os := enterpriseApi.ObjectStorage{ TypeMeta: metav1.TypeMeta{ @@ -2173,8 +2180,9 @@ func TestHandlePullQueueChange(t *testing.T) { }, }, } + c.Create(ctx, &os) - newCR := &enterpriseApi.IndexerCluster{ + cr := &enterpriseApi.IndexerCluster{ TypeMeta: metav1.TypeMeta{ Kind: "IndexerCluster", }, @@ -2192,11 +2200,11 @@ func TestHandlePullQueueChange(t *testing.T) { }, }, Status: enterpriseApi.IndexerClusterStatus{ - ReadyReplicas: 3, - Queue: &enterpriseApi.QueueSpec{}, - ObjectStorage: &enterpriseApi.ObjectStorageSpec{}, + ReadyReplicas: 3, + QueueBucketAccessSecretVersion: "123", }, } + c.Create(ctx, cr) pod0 := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -2238,6 +2246,10 @@ func TestHandlePullQueueChange(t *testing.T) { pod2 := pod0.DeepCopy() pod2.ObjectMeta.Name = "splunk-test-indexer-2" + c.Create(ctx, pod0) + c.Create(ctx, pod1) + c.Create(ctx, pod2) + secret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "test-secrets", @@ -2248,19 +2260,9 @@ func TestHandlePullQueueChange(t *testing.T) { }, } - // Mock pods - c := spltest.NewMockClient() - ctx := context.TODO() - c.Create(ctx, &queue) - c.Create(ctx, &os) - c.Create(ctx, newCR) - c.Create(ctx, pod0) - c.Create(ctx, pod1) - c.Create(ctx, pod2) - // Negative test case: secret not found mgr := &indexerClusterPodManager{} - err := mgr.handlePullQueueChange(ctx, newCR, queue, os, c) + err := mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c) assert.NotNil(t, err) // Mock secret @@ -2269,9 +2271,9 @@ func TestHandlePullQueueChange(t *testing.T) { mockHTTPClient := &spltest.MockHTTPClient{} // Negative test case: failure in creating remote queue stanza - mgr = newTestPullQueuePipelineManager(mockHTTPClient) + mgr = newTestIndexerQueuePipelineManager(mockHTTPClient) - err = mgr.handlePullQueueChange(ctx, newCR, queue, os, c) + err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c) assert.NotNil(t, err) // outputs.conf @@ -2290,22 +2292,22 @@ func TestHandlePullQueueChange(t *testing.T) { propertyKVListOutputs = append(propertyKVListOutputs, []string{fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"}) body := buildFormBody(propertyKVListOutputs) - addRemoteQueueHandlersForIndexer(mockHTTPClient, newCR, queue, newCR.Status.ReadyReplicas, "conf-outputs", body) + addRemoteQueueHandlersForIndexer(mockHTTPClient, cr, &queue.Spec, "conf-outputs", body) // Negative test case: failure in creating remote queue stanza - mgr = newTestPullQueuePipelineManager(mockHTTPClient) + mgr = newTestIndexerQueuePipelineManager(mockHTTPClient) - err = mgr.handlePullQueueChange(ctx, newCR, queue, os, c) + err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c) assert.NotNil(t, err) // inputs.conf body = buildFormBody(propertyKVList) - addRemoteQueueHandlersForIndexer(mockHTTPClient, newCR, queue, newCR.Status.ReadyReplicas, "conf-inputs", body) + addRemoteQueueHandlersForIndexer(mockHTTPClient, cr, &queue.Spec, "conf-inputs", body) // Negative test case: failure in updating remote queue stanza - mgr = newTestPullQueuePipelineManager(mockHTTPClient) + mgr = newTestIndexerQueuePipelineManager(mockHTTPClient) - err = mgr.handlePullQueueChange(ctx, newCR, queue, os, c) + err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c) assert.NotNil(t, err) // default-mode.conf @@ -2317,7 +2319,7 @@ func TestHandlePullQueueChange(t *testing.T) { {"pipeline:typing", "disabled", "true"}, } - for i := 0; i < int(newCR.Status.ReadyReplicas); i++ { + for i := 0; i < int(cr.Status.ReadyReplicas); i++ { podName := fmt.Sprintf("splunk-test-indexer-%d", i) baseURL := fmt.Sprintf("https://%s.splunk-test-indexer-headless.test.svc.cluster.local:8089/servicesNS/nobody/system/configs/conf-default-mode", podName) @@ -2331,9 +2333,9 @@ func TestHandlePullQueueChange(t *testing.T) { } } - mgr = newTestPullQueuePipelineManager(mockHTTPClient) + mgr = newTestIndexerQueuePipelineManager(mockHTTPClient) - err = mgr.handlePullQueueChange(ctx, newCR, queue, os, c) + err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c) assert.Nil(t, err) } @@ -2351,25 +2353,25 @@ func buildFormBody(pairs [][]string) string { return b.String() } -func addRemoteQueueHandlersForIndexer(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IndexerCluster, queue enterpriseApi.Queue, replicas int32, confName, body string) { - for i := 0; i < int(replicas); i++ { +func addRemoteQueueHandlersForIndexer(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IndexerCluster, queue *enterpriseApi.QueueSpec, confName, body string) { + for i := 0; i < int(cr.Status.ReadyReplicas); i++ { podName := fmt.Sprintf("splunk-%s-indexer-%d", cr.GetName(), i) baseURL := fmt.Sprintf( "https://%s.splunk-%s-indexer-headless.%s.svc.cluster.local:8089/servicesNS/nobody/system/configs/%s", podName, cr.GetName(), cr.GetNamespace(), confName, ) - createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name)) + createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", queue.SQS.Name)) reqCreate, _ := http.NewRequest("POST", baseURL, strings.NewReader(createReqBody)) mockHTTPClient.AddHandler(reqCreate, 200, "", nil) - updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name)) + updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", queue.SQS.Name)) reqUpdate, _ := http.NewRequest("POST", updateURL, strings.NewReader(body)) mockHTTPClient.AddHandler(reqUpdate, 200, "", nil) } } -func newTestPullQueuePipelineManager(mockHTTPClient *spltest.MockHTTPClient) *indexerClusterPodManager { +func newTestIndexerQueuePipelineManager(mockHTTPClient *spltest.MockHTTPClient) *indexerClusterPodManager { newSplunkClientForQueuePipeline = func(uri, user, pass string) *splclient.SplunkClient { return &splclient.SplunkClient{ ManagementURI: uri, @@ -2395,7 +2397,7 @@ func TestApplyIndexerClusterManager_Queue_Success(t *testing.T) { c := fake.NewClientBuilder().WithScheme(scheme).Build() // Object definitions - queue := enterpriseApi.Queue{ + queue := &enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", @@ -2414,7 +2416,26 @@ func TestApplyIndexerClusterManager_Queue_Success(t *testing.T) { }, }, } - c.Create(ctx, &queue) + c.Create(ctx, queue) + + os := &enterpriseApi.ObjectStorage{ + TypeMeta: metav1.TypeMeta{ + Kind: "ObjectStorage", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "os", + Namespace: "test", + }, + Spec: enterpriseApi.ObjectStorageSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "s3://bucket/key", + }, + }, + } + c.Create(ctx, os) cm := &enterpriseApi.ClusterManager{ TypeMeta: metav1.TypeMeta{Kind: "ClusterManager"}, @@ -2440,6 +2461,10 @@ func TestApplyIndexerClusterManager_Queue_Success(t *testing.T) { Name: queue.Name, Namespace: queue.Namespace, }, + ObjectStorageRef: corev1.ObjectReference{ + Name: os.Name, + Namespace: os.Namespace, + }, CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ ClusterManagerRef: corev1.ObjectReference{ Name: "cm", diff --git a/pkg/splunk/enterprise/ingestorcluster.go b/pkg/splunk/enterprise/ingestorcluster.go index 0fc94487b..fb4c9474a 100644 --- a/pkg/splunk/enterprise/ingestorcluster.go +++ b/pkg/splunk/enterprise/ingestorcluster.go @@ -71,9 +71,8 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr // Update the CR Status defer updateCRStatus(ctx, client, cr, &err) - if cr.Status.Replicas < cr.Spec.Replicas { - cr.Status.Queue = &enterpriseApi.QueueSpec{} + cr.Status.QueueBucketAccessSecretVersion = "0" } cr.Status.Replicas = cr.Spec.Replicas @@ -225,23 +224,20 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr return result, err } } - - // Can not override original queue spec due to comparison in the later code - queueCopy := queue - if queueCopy.Spec.Provider == "sqs" { - if queueCopy.Spec.SQS.Endpoint == "" && queueCopy.Spec.SQS.AuthRegion != "" { - queueCopy.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", queueCopy.Spec.SQS.AuthRegion) + if queue.Spec.Provider == "sqs" { + if queue.Spec.SQS.Endpoint == "" && queue.Spec.SQS.AuthRegion != "" { + queue.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", queue.Spec.SQS.AuthRegion) } } - // Large Message Store + // Object Storage os := enterpriseApi.ObjectStorage{} if cr.Spec.ObjectStorageRef.Name != "" { ns := cr.GetNamespace() if cr.Spec.ObjectStorageRef.Namespace != "" { ns = cr.Spec.ObjectStorageRef.Namespace } - err = client.Get(context.Background(), types.NamespacedName{ + err = client.Get(ctx, types.NamespacedName{ Name: cr.Spec.ObjectStorageRef.Name, Namespace: ns, }, &os) @@ -249,27 +245,48 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr return result, err } } + if os.Spec.Provider == "s3" { + if os.Spec.S3.Endpoint == "" && queue.Spec.SQS.AuthRegion != "" { + os.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queue.Spec.SQS.AuthRegion) + } + } - // Can not override original queue spec due to comparison in the later code - osCopy := os - if osCopy.Spec.Provider == "s3" { - if osCopy.Spec.S3.Endpoint == "" && queueCopy.Spec.SQS.AuthRegion != "" { - osCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queue.Spec.SQS.AuthRegion) + // Secret reference + accessKey, secretKey, version := "", "", "" + if queue.Spec.Provider == "sqs" && cr.Spec.ServiceAccount == "" { + for _, vol := range queue.Spec.SQS.VolList { + if vol.SecretRef != "" { + accessKey, secretKey, version, err = GetQueueRemoteVolumeSecrets(ctx, vol, client, cr) + if err != nil { + scopedLog.Error(err, "Failed to get queue remote volume secrets") + return result, err + } + } } } - // If queue is updated - if !reflect.DeepEqual(cr.Status.Queue, queue.Spec) { - mgr := newIngestorClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) + secretChanged := cr.Status.QueueBucketAccessSecretVersion != version - err = mgr.handlePushQueueChange(ctx, cr, queueCopy, osCopy, client) + // If queue is updated + if secretChanged { + mgr := newIngestorClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) + err = mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIngestorCluster", fmt.Sprintf("Failed to update conf file for Queue/Pipeline config change after pod creation: %s", err.Error())) scopedLog.Error(err, "Failed to update conf file for Queue/Pipeline config change after pod creation") return result, err } - cr.Status.Queue = &queue.Spec + for i := int32(0); i < cr.Spec.Replicas; i++ { + ingClient := mgr.getClient(ctx, i) + err = ingClient.RestartSplunk() + if err != nil { + return result, err + } + scopedLog.Info("Restarted splunk", "ingestor", i) + } + + cr.Status.QueueBucketAccessSecretVersion = version } // Upgrade fron automated MC to MC CRD @@ -312,9 +329,30 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr return result, nil } +// getClient for ingestorClusterPodManager returns a SplunkClient for the member n +func (mgr *ingestorClusterPodManager) getClient(ctx context.Context, n int32) *splclient.SplunkClient { + reqLogger := log.FromContext(ctx) + scopedLog := reqLogger.WithName("ingestorClusterPodManager.getClient").WithValues("name", mgr.cr.GetName(), "namespace", mgr.cr.GetNamespace()) + + // Get Pod Name + memberName := GetSplunkStatefulsetPodName(SplunkIngestor, mgr.cr.GetName(), n) + + // Get Fully Qualified Domain Name + fqdnName := splcommon.GetServiceFQDN(mgr.cr.GetNamespace(), + fmt.Sprintf("%s.%s", memberName, GetSplunkServiceName(SplunkIngestor, mgr.cr.GetName(), true))) + + // Retrieve admin password from Pod + adminPwd, err := splutil.GetSpecificSecretTokenFromPod(ctx, mgr.c, memberName, mgr.cr.GetNamespace(), "password") + if err != nil { + scopedLog.Error(err, "Couldn't retrieve the admin password from pod") + } + + return mgr.newSplunkClient(fmt.Sprintf("https://%s:8089", fqdnName), "admin", adminPwd) +} + // validateIngestorClusterSpec checks validity and makes default updates to a IngestorClusterSpec and returns error if something is wrong func validateIngestorClusterSpec(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.IngestorCluster) error { - // We cannot have 0 replicas in IngestorCluster spec since this refers to number of ingestion pods in an ingestor cluster + // We cannot have 0 replicas in IngestorCluster spec since this refers to number of ingestion pods in the ingestor cluster if cr.Spec.Replicas < 3 { cr.Spec.Replicas = 3 } @@ -342,10 +380,10 @@ func getIngestorStatefulSet(ctx context.Context, client splcommon.ControllerClie return ss, nil } -// Checks if only Queue or Pipeline config changed, and updates the conf file if so -func (mgr *ingestorClusterPodManager) handlePushQueueChange(ctx context.Context, newCR *enterpriseApi.IngestorCluster, queue enterpriseApi.Queue, os enterpriseApi.ObjectStorage, k8s client.Client) error { +// updateIngestorConfFiles checks if Queue or Pipeline inputs are created for the first time and updates the conf file if so +func (mgr *ingestorClusterPodManager) updateIngestorConfFiles(ctx context.Context, newCR *enterpriseApi.IngestorCluster, queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, accessKey, secretKey string, k8s client.Client) error { reqLogger := log.FromContext(ctx) - scopedLog := reqLogger.WithName("handlePushQueueChange").WithValues("name", newCR.GetName(), "namespace", newCR.GetNamespace()) + scopedLog := reqLogger.WithName("updateIngestorConfFiles").WithValues("name", newCR.GetName(), "namespace", newCR.GetNamespace()) // Only update config for pods that exist readyReplicas := newCR.Status.Replicas @@ -361,75 +399,57 @@ func (mgr *ingestorClusterPodManager) handlePushQueueChange(ctx context.Context, } splunkClient := mgr.newSplunkClient(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd)) - afterDelete := false - if (queue.Spec.SQS.Name != "" && newCR.Status.Queue.SQS.Name != "" && queue.Spec.SQS.Name != newCR.Status.Queue.SQS.Name) || - (queue.Spec.Provider != "" && newCR.Status.Queue.Provider != "" && queue.Spec.Provider != newCR.Status.Queue.Provider) { - if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCR.Status.Queue.SQS.Name)); err != nil { - updateErr = err - } - afterDelete = true - } - - queueChangedFields, pipelineChangedFields := getChangedQueueFieldsForIngestor(&queue, &os, newCR, afterDelete) + queueInputs, pipelineInputs := getQueueAndPipelineInputsForIngestorConfFiles(queue, os, accessKey, secretKey) - for _, pbVal := range queueChangedFields { - if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name), [][]string{pbVal}); err != nil { + for _, input := range queueInputs { + if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", queue.SQS.Name), [][]string{input}); err != nil { updateErr = err } } - for _, field := range pipelineChangedFields { - if err := splunkClient.UpdateConfFile(scopedLog, "default-mode", field[0], [][]string{{field[1], field[2]}}); err != nil { + for _, input := range pipelineInputs { + if err := splunkClient.UpdateConfFile(scopedLog, "default-mode", input[0], [][]string{{input[1], input[2]}}); err != nil { updateErr = err } } } - // Do NOT restart Splunk return updateErr } -// getChangedQueueFieldsForIngestor returns a list of changed queue and pipeline fields for ingestor pods -func getChangedQueueFieldsForIngestor(queue *enterpriseApi.Queue, os *enterpriseApi.ObjectStorage, queueIngestorStatus *enterpriseApi.IngestorCluster, afterDelete bool) (queueChangedFields, pipelineChangedFields [][]string) { - oldQueue := queueIngestorStatus.Status.Queue - if oldQueue == nil { - oldQueue = &enterpriseApi.QueueSpec{} - } - newQueue := &queue.Spec - - oldOS := queueIngestorStatus.Status.ObjectStorage - if oldOS == nil { - oldOS = &enterpriseApi.ObjectStorageSpec{} - } - newOS := &os.Spec - // Push changed queue fields - queueChangedFields = pushQueueChanged(oldQueue, newQueue, oldOS, newOS, afterDelete) +// getQueueAndPipelineInputsForIngestorConfFiles returns a list of queue and pipeline inputs for ingestor pods conf files +func getQueueAndPipelineInputsForIngestorConfFiles(queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, accessKey, secretKey string) (queueInputs, pipelineInputs [][]string) { + // Queue Inputs + queueInputs = getQueueAndObjectStorageInputsForIngestorConfFiles(queue, os, accessKey, secretKey) - // Always changed pipeline fields - pipelineChangedFields = pipelineConfig(false) + // Pipeline inputs + pipelineInputs = getPipelineInputsForConfFile(false) return } type ingestorClusterPodManager struct { + c splcommon.ControllerClient log logr.Logger cr *enterpriseApi.IngestorCluster secrets *corev1.Secret newSplunkClient func(managementURI, username, password string) *splclient.SplunkClient } -// newIngestorClusterPodManager function to create pod manager this is added to write unit test case -var newIngestorClusterPodManager = func(log logr.Logger, cr *enterpriseApi.IngestorCluster, secret *corev1.Secret, newSplunkClient NewSplunkClientFunc) ingestorClusterPodManager { +// newIngestorClusterPodManager creates pod manager to handle unit test cases +var newIngestorClusterPodManager = func(log logr.Logger, cr *enterpriseApi.IngestorCluster, secret *corev1.Secret, newSplunkClient NewSplunkClientFunc, c splcommon.ControllerClient) ingestorClusterPodManager { return ingestorClusterPodManager{ log: log, cr: cr, secrets: secret, newSplunkClient: newSplunkClient, + c: c, } } -func pipelineConfig(isIndexer bool) (output [][]string) { - output = append(output, +// getPipelineInputsForConfFile returns a list of pipeline inputs for conf file +func getPipelineInputsForConfFile(isIndexer bool) (config [][]string) { + config = append(config, []string{"pipeline:remotequeueruleset", "disabled", "false"}, []string{"pipeline:ruleset", "disabled", "true"}, []string{"pipeline:remotequeuetyping", "disabled", "false"}, @@ -437,45 +457,39 @@ func pipelineConfig(isIndexer bool) (output [][]string) { []string{"pipeline:typing", "disabled", "true"}, ) if !isIndexer { - output = append(output, []string{"pipeline:indexerPipe", "disabled", "true"}) + config = append(config, []string{"pipeline:indexerPipe", "disabled", "true"}) } - return output + + return } -func pushQueueChanged(oldQueue, newQueue *enterpriseApi.QueueSpec, oldOS, newOS *enterpriseApi.ObjectStorageSpec, afterDelete bool) (output [][]string) { +// getQueueAndObjectStorageInputsForConfFiles returns a list of queue and object storage inputs for conf files +func getQueueAndObjectStorageInputsForIngestorConfFiles(queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, accessKey, secretKey string) (config [][]string) { queueProvider := "" - if newQueue.Provider == "sqs" { + if queue.Provider == "sqs" { queueProvider = "sqs_smartbus" } osProvider := "" - if newOS.Provider == "s3" { + if os.Provider == "s3" { osProvider = "sqs_smartbus" } - - if oldQueue.Provider != newQueue.Provider || afterDelete { - output = append(output, []string{"remote_queue.type", queueProvider}) - } - if newQueue.SQS.AuthRegion != "" && (oldQueue.SQS.AuthRegion != newQueue.SQS.AuthRegion || afterDelete) { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.auth_region", queueProvider), newQueue.SQS.AuthRegion}) - } - if newQueue.SQS.Endpoint != "" && (oldQueue.SQS.Endpoint != newQueue.SQS.Endpoint || afterDelete) { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.endpoint", queueProvider), newQueue.SQS.Endpoint}) - } - if newOS.S3.Endpoint != "" && (oldOS.S3.Endpoint != newOS.S3.Endpoint || afterDelete) { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", osProvider), newOS.S3.Endpoint}) - } - if oldOS.S3.Path != newOS.S3.Path || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", osProvider), newOS.S3.Path}) - } - if oldQueue.SQS.DLQ != newQueue.SQS.DLQ || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", queueProvider), newQueue.SQS.DLQ}) - } - - output = append(output, + config = append(config, + []string{"remote_queue.type", queueProvider}, + []string{fmt.Sprintf("remote_queue.%s.auth_region", queueProvider), queue.SQS.AuthRegion}, + []string{fmt.Sprintf("remote_queue.%s.endpoint", queueProvider), queue.SQS.Endpoint}, + []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", osProvider), os.S3.Endpoint}, + []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", osProvider), os.S3.Path}, + []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", queueProvider), queue.SQS.DLQ}, []string{fmt.Sprintf("remote_queue.%s.encoding_format", queueProvider), "s2s"}, []string{fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", queueProvider), "4"}, []string{fmt.Sprintf("remote_queue.%s.retry_policy", queueProvider), "max_count"}, - []string{fmt.Sprintf("remote_queue.%s.send_interval", queueProvider), "5s"}) + []string{fmt.Sprintf("remote_queue.%s.send_interval", queueProvider), "5s"}, + ) + + if accessKey != "" && secretKey != "" { + config = append(config, []string{fmt.Sprintf("remote_queue.%s.access_key", queueProvider), accessKey}) + config = append(config, []string{fmt.Sprintf("remote_queue.%s.secret_key", queueProvider), secretKey}) + } - return output + return } diff --git a/pkg/splunk/enterprise/ingestorcluster_test.go b/pkg/splunk/enterprise/ingestorcluster_test.go index fac91bbbe..f7dd54b39 100644 --- a/pkg/splunk/enterprise/ingestorcluster_test.go +++ b/pkg/splunk/enterprise/ingestorcluster_test.go @@ -25,6 +25,7 @@ import ( "github.com/go-logr/logr" enterpriseApi "github.com/splunk/splunk-operator/api/v4" splclient "github.com/splunk/splunk-operator/pkg/splunk/client" + splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" spltest "github.com/splunk/splunk-operator/pkg/splunk/test" splutil "github.com/splunk/splunk-operator/pkg/splunk/util" "github.com/stretchr/testify/assert" @@ -32,7 +33,6 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client/fake" ) @@ -86,7 +86,7 @@ func TestApplyIngestorCluster(t *testing.T) { } c.Create(ctx, queue) - os := enterpriseApi.ObjectStorage{ + os := &enterpriseApi.ObjectStorage{ TypeMeta: metav1.TypeMeta{ Kind: "ObjectStorage", APIVersion: "enterprise.splunk.com/v4", @@ -103,7 +103,7 @@ func TestApplyIngestorCluster(t *testing.T) { }, }, } - c.Create(ctx, &os) + c.Create(ctx, os) cr := &enterpriseApi.IngestorCluster{ TypeMeta: metav1.TypeMeta{ @@ -117,7 +117,8 @@ func TestApplyIngestorCluster(t *testing.T) { Spec: enterpriseApi.IngestorClusterSpec{ Replicas: 3, CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ - Mock: true, + Mock: true, + ServiceAccount: "sa", }, QueueRef: corev1.ObjectReference{ Name: queue.Name, @@ -247,34 +248,12 @@ func TestApplyIngestorCluster(t *testing.T) { assert.True(t, result.Requeue) assert.NotEqual(t, enterpriseApi.PhaseError, cr.Status.Phase) - // Ensure stored StatefulSet status reflects readiness after any reconcile modifications - fetched := &appsv1.StatefulSet{} - _ = c.Get(ctx, types.NamespacedName{Name: "splunk-test-ingestor", Namespace: "test"}, fetched) - fetched.Status.Replicas = replicas - fetched.Status.ReadyReplicas = replicas - fetched.Status.UpdatedReplicas = replicas - if fetched.Status.UpdateRevision == "" { - fetched.Status.UpdateRevision = "v1" - } - c.Update(ctx, fetched) - - // Guarantee all pods have matching revision label - for _, pn := range []string{"splunk-test-ingestor-0", "splunk-test-ingestor-1", "splunk-test-ingestor-2"} { - p := &corev1.Pod{} - if err := c.Get(ctx, types.NamespacedName{Name: pn, Namespace: "test"}, p); err == nil { - if p.Labels == nil { - p.Labels = map[string]string{} - } - p.Labels["controller-revision-hash"] = fetched.Status.UpdateRevision - c.Update(ctx, p) - } - } - // outputs.conf origNew := newIngestorClusterPodManager mockHTTPClient := &spltest.MockHTTPClient{} - newIngestorClusterPodManager = func(l logr.Logger, cr *enterpriseApi.IngestorCluster, secret *corev1.Secret, _ NewSplunkClientFunc) ingestorClusterPodManager { + newIngestorClusterPodManager = func(l logr.Logger, cr *enterpriseApi.IngestorCluster, secret *corev1.Secret, _ NewSplunkClientFunc, c splcommon.ControllerClient) ingestorClusterPodManager { return ingestorClusterPodManager{ + c: c, log: l, cr: cr, secrets: secret, newSplunkClient: func(uri, user, pass string) *splclient.SplunkClient { return &splclient.SplunkClient{ManagementURI: uri, Username: user, Password: pass, Client: mockHTTPClient} @@ -284,6 +263,7 @@ func TestApplyIngestorCluster(t *testing.T) { defer func() { newIngestorClusterPodManager = origNew }() propertyKVList := [][]string{ + {"remote_queue.type", provider}, {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, @@ -296,7 +276,7 @@ func TestApplyIngestorCluster(t *testing.T) { } body := buildFormBody(propertyKVList) - addRemoteQueueHandlersForIngestor(mockHTTPClient, cr, queue, cr.Status.ReadyReplicas, "conf-outputs", body) + addRemoteQueueHandlersForIngestor(mockHTTPClient, cr, &queue.Spec, "conf-outputs", body) // default-mode.conf propertyKVList = [][]string{ @@ -322,6 +302,13 @@ func TestApplyIngestorCluster(t *testing.T) { } } + for i := 0; i < int(cr.Status.ReadyReplicas); i++ { + podName := fmt.Sprintf("splunk-test-ingestor-%d", i) + baseURL := fmt.Sprintf("https://%s.splunk-%s-ingestor-headless.%s.svc.cluster.local:8089/services/server/control/restart", podName, cr.GetName(), cr.GetNamespace()) + req, _ := http.NewRequest("POST", baseURL, nil) + mockHTTPClient.AddHandler(req, 200, "", nil) + } + // Second reconcile should now yield Ready cr.Status.TelAppInstalled = true result, err = ApplyIngestorCluster(ctx, c, cr) @@ -416,7 +403,7 @@ func TestGetIngestorStatefulSet(t *testing.T) { test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-test-ingestor","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor","app.kubernetes.io/test-extra-label":"test-extra-label-value"},"ownerReferences":[{"apiVersion":"","kind":"IngestorCluster","name":"test","uid":"","controller":true}]},"spec":{"replicas":3,"selector":{"matchLabels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor","app.kubernetes.io/test-extra-label":"test-extra-label-value"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997","traffic.sidecar.istio.io/includeInboundPorts":"8000,8088"}},"spec":{"volumes":[{"name":"splunk-test-probe-configmap","configMap":{"name":"splunk-test-probe-configmap","defaultMode":365}},{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-test-ingestor-secret-v1","defaultMode":420}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"http-splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"http-hec","containerPort":8088,"protocol":"TCP"},{"name":"https-splunkd","containerPort":8089,"protocol":"TCP"},{"name":"tcp-s2s","containerPort":9997,"protocol":"TCP"},{"name":"user-defined","containerPort":32000,"protocol":"UDP"}],"env":[{"name":"TEST_ENV_VAR","value":"test_value"},{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_standalone"},{"name":"SPLUNK_DECLARATIVE_ADMIN_PASSWORD","value":"true"},{"name":"SPLUNK_OPERATOR_K8_LIVENESS_DRIVER_FILE_PATH","value":"/tmp/splunk_operator_k8s/probes/k8_liveness_driver.sh"},{"name":"SPLUNK_GENERAL_TERMS","value":"--accept-sgt-current-at-splunk-com"},{"name":"SPLUNK_SKIP_CLUSTER_BUNDLE_PUSH","value":"true"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"splunk-test-probe-configmap","mountPath":"/mnt/probes"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"}],"livenessProbe":{"exec":{"command":["/mnt/probes/livenessProbe.sh"]},"initialDelaySeconds":30,"timeoutSeconds":30,"periodSeconds":30,"failureThreshold":3},"readinessProbe":{"exec":{"command":["/mnt/probes/readinessProbe.sh"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5,"failureThreshold":3},"startupProbe":{"exec":{"command":["/mnt/probes/startupProbe.sh"]},"initialDelaySeconds":40,"timeoutSeconds":30,"periodSeconds":30,"failureThreshold":12},"imagePullPolicy":"IfNotPresent","securityContext":{"capabilities":{"add":["NET_BIND_SERVICE"],"drop":["ALL"]},"privileged":false,"runAsUser":41812,"runAsNonRoot":true,"allowPrivilegeEscalation":false,"seccompProfile":{"type":"RuntimeDefault"}}}],"serviceAccountName":"defaults","securityContext":{"runAsUser":41812,"runAsNonRoot":true,"fsGroup":41812,"fsGroupChangePolicy":"OnRootMismatch"},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-test-ingestor"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor","app.kubernetes.io/test-extra-label":"test-extra-label-value"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"10Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor","app.kubernetes.io/test-extra-label":"test-extra-label-value"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"100Gi"}}},"status":{}}],"serviceName":"splunk-test-ingestor-headless","podManagementPolicy":"Parallel","updateStrategy":{"type":"OnDelete"}},"status":{"replicas":0,"availableReplicas":0}}`) } -func TestGetChangedQueueFieldsForIngestor(t *testing.T) { +func TestGetQueueAndPipelineInputsForIngestorConfFiles(t *testing.T) { provider := "sqs_smartbus" queue := enterpriseApi.Queue{ @@ -434,6 +421,9 @@ func TestGetChangedQueueFieldsForIngestor(t *testing.T) { AuthRegion: "us-west-2", Endpoint: "https://sqs.us-west-2.amazonaws.com", DLQ: "sqs-dlq-test", + VolList: []enterpriseApi.VolumeSpec{ + {SecretRef: "secret"}, + }, }, }, } @@ -455,21 +445,12 @@ func TestGetChangedQueueFieldsForIngestor(t *testing.T) { }, } - newCR := &enterpriseApi.IngestorCluster{ - Spec: enterpriseApi.IngestorClusterSpec{ - QueueRef: corev1.ObjectReference{ - Name: queue.Name, - }, - ObjectStorageRef: corev1.ObjectReference{ - Name: os.Name, - }, - }, - Status: enterpriseApi.IngestorClusterStatus{}, - } + key := "key" + secret := "secret" - queueChangedFields, pipelineChangedFields := getChangedQueueFieldsForIngestor(&queue, &os, newCR, false) + queueInputs, pipelineInputs := getQueueAndPipelineInputsForIngestorConfFiles(&queue.Spec, &os.Spec, key, secret) - assert.Equal(t, 10, len(queueChangedFields)) + assert.Equal(t, 12, len(queueInputs)) assert.Equal(t, [][]string{ {"remote_queue.type", provider}, {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion}, @@ -481,9 +462,11 @@ func TestGetChangedQueueFieldsForIngestor(t *testing.T) { {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, {fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"}, - }, queueChangedFields) + {fmt.Sprintf("remote_queue.%s.access_key", provider), key}, + {fmt.Sprintf("remote_queue.%s.secret_key", provider), secret}, + }, queueInputs) - assert.Equal(t, 6, len(pipelineChangedFields)) + assert.Equal(t, 6, len(pipelineInputs)) assert.Equal(t, [][]string{ {"pipeline:remotequeueruleset", "disabled", "false"}, {"pipeline:ruleset", "disabled", "true"}, @@ -491,14 +474,20 @@ func TestGetChangedQueueFieldsForIngestor(t *testing.T) { {"pipeline:remotequeueoutput", "disabled", "false"}, {"pipeline:typing", "disabled", "true"}, {"pipeline:indexerPipe", "disabled", "true"}, - }, pipelineChangedFields) + }, pipelineInputs) } -func TestHandlePushQueueChange(t *testing.T) { +func TestUpdateIngestorConfFiles(t *testing.T) { + c := spltest.NewMockClient() + ctx := context.TODO() + // Object definitions provider := "sqs_smartbus" - queue := enterpriseApi.Queue{ + accessKey := "accessKey" + secretKey := "secretKey" + + queue := &enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", @@ -517,7 +506,7 @@ func TestHandlePushQueueChange(t *testing.T) { }, } - os := enterpriseApi.ObjectStorage{ + os := &enterpriseApi.ObjectStorage{ TypeMeta: metav1.TypeMeta{ Kind: "ObjectStorage", APIVersion: "enterprise.splunk.com/v4", @@ -534,7 +523,7 @@ func TestHandlePushQueueChange(t *testing.T) { }, } - newCR := &enterpriseApi.IngestorCluster{ + cr := &enterpriseApi.IngestorCluster{ TypeMeta: metav1.TypeMeta{ Kind: "IngestorCluster", }, @@ -551,10 +540,9 @@ func TestHandlePushQueueChange(t *testing.T) { }, }, Status: enterpriseApi.IngestorClusterStatus{ - Replicas: 3, - ReadyReplicas: 3, - Queue: &enterpriseApi.QueueSpec{}, - ObjectStorage: &enterpriseApi.ObjectStorageSpec{}, + Replicas: 3, + ReadyReplicas: 3, + QueueBucketAccessSecretVersion: "123", }, } @@ -598,6 +586,10 @@ func TestHandlePushQueueChange(t *testing.T) { pod2 := pod0.DeepCopy() pod2.ObjectMeta.Name = "splunk-test-ingestor-2" + c.Create(ctx, pod0) + c.Create(ctx, pod1) + c.Create(ctx, pod2) + secret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "test-secrets", @@ -608,17 +600,10 @@ func TestHandlePushQueueChange(t *testing.T) { }, } - // Mock pods - c := spltest.NewMockClient() - ctx := context.TODO() - c.Create(ctx, pod0) - c.Create(ctx, pod1) - c.Create(ctx, pod2) - // Negative test case: secret not found mgr := &ingestorClusterPodManager{} - err := mgr.handlePushQueueChange(ctx, newCR, queue, os, c) + err := mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c) assert.NotNil(t, err) // Mock secret @@ -627,9 +612,9 @@ func TestHandlePushQueueChange(t *testing.T) { mockHTTPClient := &spltest.MockHTTPClient{} // Negative test case: failure in creating remote queue stanza - mgr = newTestPushQueuePipelineManager(mockHTTPClient) + mgr = newTestIngestorQueuePipelineManager(mockHTTPClient) - err = mgr.handlePushQueueChange(ctx, newCR, queue, os, c) + err = mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c) assert.NotNil(t, err) // outputs.conf @@ -646,12 +631,12 @@ func TestHandlePushQueueChange(t *testing.T) { } body := buildFormBody(propertyKVList) - addRemoteQueueHandlersForIngestor(mockHTTPClient, newCR, &queue, newCR.Status.ReadyReplicas, "conf-outputs", body) + addRemoteQueueHandlersForIngestor(mockHTTPClient, cr, &queue.Spec, "conf-outputs", body) // Negative test case: failure in creating remote queue stanza - mgr = newTestPushQueuePipelineManager(mockHTTPClient) + mgr = newTestIngestorQueuePipelineManager(mockHTTPClient) - err = mgr.handlePushQueueChange(ctx, newCR, queue, os, c) + err = mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c) assert.NotNil(t, err) // default-mode.conf @@ -664,9 +649,9 @@ func TestHandlePushQueueChange(t *testing.T) { {"pipeline:indexerPipe", "disabled", "true"}, } - for i := 0; i < int(newCR.Status.ReadyReplicas); i++ { + for i := 0; i < int(cr.Status.ReadyReplicas); i++ { podName := fmt.Sprintf("splunk-test-ingestor-%d", i) - baseURL := fmt.Sprintf("https://%s.splunk-%s-ingestor-headless.%s.svc.cluster.local:8089/servicesNS/nobody/system/configs/conf-default-mode", podName, newCR.GetName(), newCR.GetNamespace()) + baseURL := fmt.Sprintf("https://%s.splunk-%s-ingestor-headless.%s.svc.cluster.local:8089/servicesNS/nobody/system/configs/conf-default-mode", podName, cr.GetName(), cr.GetNamespace()) for _, field := range propertyKVList { req, _ := http.NewRequest("POST", baseURL, strings.NewReader(fmt.Sprintf("name=%s", field[0]))) @@ -678,32 +663,32 @@ func TestHandlePushQueueChange(t *testing.T) { } } - mgr = newTestPushQueuePipelineManager(mockHTTPClient) + mgr = newTestIngestorQueuePipelineManager(mockHTTPClient) - err = mgr.handlePushQueueChange(ctx, newCR, queue, os, c) + err = mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c) assert.Nil(t, err) } -func addRemoteQueueHandlersForIngestor(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IngestorCluster, queue *enterpriseApi.Queue, replicas int32, confName, body string) { - for i := 0; i < int(replicas); i++ { +func addRemoteQueueHandlersForIngestor(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IngestorCluster, queue *enterpriseApi.QueueSpec, confName, body string) { + for i := 0; i < int(cr.Status.ReadyReplicas); i++ { podName := fmt.Sprintf("splunk-%s-ingestor-%d", cr.GetName(), i) baseURL := fmt.Sprintf( "https://%s.splunk-%s-ingestor-headless.%s.svc.cluster.local:8089/servicesNS/nobody/system/configs/%s", podName, cr.GetName(), cr.GetNamespace(), confName, ) - createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name)) + createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", queue.SQS.Name)) reqCreate, _ := http.NewRequest("POST", baseURL, strings.NewReader(createReqBody)) mockHTTPClient.AddHandler(reqCreate, 200, "", nil) - updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name)) + updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", queue.SQS.Name)) reqUpdate, _ := http.NewRequest("POST", updateURL, strings.NewReader(body)) mockHTTPClient.AddHandler(reqUpdate, 200, "", nil) } } -func newTestPushQueuePipelineManager(mockHTTPClient *spltest.MockHTTPClient) *ingestorClusterPodManager { - newSplunkClientForPushQueuePipeline := func(uri, user, pass string) *splclient.SplunkClient { +func newTestIngestorQueuePipelineManager(mockHTTPClient *spltest.MockHTTPClient) *ingestorClusterPodManager { + newSplunkClientForQueuePipeline := func(uri, user, pass string) *splclient.SplunkClient { return &splclient.SplunkClient{ ManagementURI: uri, Username: user, @@ -712,6 +697,6 @@ func newTestPushQueuePipelineManager(mockHTTPClient *spltest.MockHTTPClient) *in } } return &ingestorClusterPodManager{ - newSplunkClient: newSplunkClientForPushQueuePipeline, + newSplunkClient: newSplunkClientForQueuePipeline, } } diff --git a/pkg/splunk/enterprise/types.go b/pkg/splunk/enterprise/types.go index fe96430e4..4267662d8 100644 --- a/pkg/splunk/enterprise/types.go +++ b/pkg/splunk/enterprise/types.go @@ -66,7 +66,7 @@ const ( // SplunkQueue is the queue instance SplunkQueue InstanceType = "queue" - // SplunkObjectStorage is the large message store instance + // SplunkObjectStorage is the object storage instance SplunkObjectStorage InstanceType = "object-storage" // SplunkDeployer is an instance that distributes baseline configurations and apps to search head cluster members diff --git a/pkg/splunk/enterprise/util.go b/pkg/splunk/enterprise/util.go index afafa6ede..88a85b448 100644 --- a/pkg/splunk/enterprise/util.go +++ b/pkg/splunk/enterprise/util.go @@ -417,6 +417,27 @@ func GetSmartstoreRemoteVolumeSecrets(ctx context.Context, volume enterpriseApi. return accessKey, secretKey, namespaceScopedSecret.ResourceVersion, nil } +// GetQueueRemoteVolumeSecrets is used to retrieve access key and secrete key for Index & Ingestion separation +func GetQueueRemoteVolumeSecrets(ctx context.Context, volume enterpriseApi.VolumeSpec, client splcommon.ControllerClient, cr splcommon.MetaObject) (string, string, string, error) { + namespaceScopedSecret, err := splutil.GetSecretByName(ctx, client, cr.GetNamespace(), cr.GetName(), volume.SecretRef) + if err != nil { + return "", "", "", err + } + + accessKey := string(namespaceScopedSecret.Data[s3AccessKey]) + secretKey := string(namespaceScopedSecret.Data[s3SecretKey]) + + version := namespaceScopedSecret.ResourceVersion + + if accessKey == "" { + return "", "", "", errors.New("access Key is missing") + } else if secretKey == "" { + return "", "", "", errors.New("secret Key is missing") + } + + return accessKey, secretKey, version, nil +} + // getLocalAppFileName generates the local app file name // For e.g., if the app package name is sample_app.tgz // and etag is "abcd1234", then it will be downloaded locally as sample_app.tgz_abcd1234 diff --git a/pkg/splunk/enterprise/util_test.go b/pkg/splunk/enterprise/util_test.go index f5405b2cf..35523a028 100644 --- a/pkg/splunk/enterprise/util_test.go +++ b/pkg/splunk/enterprise/util_test.go @@ -2624,6 +2624,9 @@ func TestUpdateCRStatus(t *testing.T) { WithStatusSubresource(&enterpriseApi.Standalone{}). WithStatusSubresource(&enterpriseApi.MonitoringConsole{}). WithStatusSubresource(&enterpriseApi.IndexerCluster{}). + WithStatusSubresource(&enterpriseApi.Queue{}). + WithStatusSubresource(&enterpriseApi.ObjectStorage{}). + WithStatusSubresource(&enterpriseApi.IngestorCluster{}). WithStatusSubresource(&enterpriseApi.SearchHeadCluster{}) c := builder.Build() ctx := context.TODO() @@ -3304,6 +3307,8 @@ func TestGetCurrentImage(t *testing.T) { WithStatusSubresource(&enterpriseApi.MonitoringConsole{}). WithStatusSubresource(&enterpriseApi.IndexerCluster{}). WithStatusSubresource(&enterpriseApi.SearchHeadCluster{}). + WithStatusSubresource(&enterpriseApi.Queue{}). + WithStatusSubresource(&enterpriseApi.ObjectStorage{}). WithStatusSubresource(&enterpriseApi.IngestorCluster{}) client := builder.Build() client.Create(ctx, ¤t) diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go index 86231df14..3e18b669c 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go @@ -42,29 +42,29 @@ var ( queue = enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - Name: "test-queue", + Name: "index-ingest-separation-test-q", AuthRegion: "us-west-2", Endpoint: "https://sqs.us-west-2.amazonaws.com", - DLQ: "test-dead-letter-queue", + DLQ: "index-ingest-separation-test-dlq", }, } objectStorage = enterpriseApi.ObjectStorageSpec{ Provider: "s3", S3: enterpriseApi.S3Spec{ Endpoint: "https://s3.us-west-2.amazonaws.com", - Path: "s3://test-bucket/smartbus-test", + Path: "s3://index-ingest-separation-test-bucket/smartbus-test", }, } serviceAccountName = "index-ingest-sa" inputs = []string{ - "[remote_queue:test-queue]", + "[remote_queue:index-ingest-separation-test-q]", "remote_queue.type = sqs_smartbus", "remote_queue.sqs_smartbus.auth_region = us-west-2", - "remote_queue.sqs_smartbus.dead_letter_queue.name = test-dead-letter-queue", + "remote_queue.sqs_smartbus.dead_letter_queue.name = index-ingest-separation-test-dlq", "remote_queue.sqs_smartbus.endpoint = https://sqs.us-west-2.amazonaws.com", "remote_queue.sqs_smartbus.large_message_store.endpoint = https://s3.us-west-2.amazonaws.com", - "remote_queue.sqs_smartbus.large_message_store.path = s3://test-bucket/smartbus-test", + "remote_queue.sqs_smartbus.large_message_store.path = s3://index-ingest-separation-test-bucket/smartbus-test", "remote_queue.sqs_smartbus.retry_policy = max_count", "remote_queue.sqs_smartbus.max_count.max_retries_per_part = 4"} outputs = append(inputs, "remote_queue.sqs_smartbus.encoding_format = s2s", "remote_queue.sqs_smartbus.send_interval = 5s") @@ -85,40 +85,10 @@ var ( "AWS_STS_REGIONAL_ENDPOINTS=regional", } - updateQueue = enterpriseApi.QueueSpec{ - Provider: "sqs", - SQS: enterpriseApi.SQSSpec{ - Name: "test-queue-updated", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - DLQ: "test-dead-letter-queue-updated", - }, - } - - updatedInputs = []string{ - "[remote_queue:test-queue-updated]", - "remote_queue.type = sqs_smartbus", - "remote_queue.sqs_smartbus.auth_region = us-west-2", - "remote_queue.sqs_smartbus.dead_letter_queue.name = test-dead-letter-queue-updated", - "remote_queue.sqs_smartbus.endpoint = https://sqs.us-west-2.amazonaws.com", - "remote_queue.sqs_smartbus.large_message_store.endpoint = https://s3.us-west-2.amazonaws.com", - "remote_queue.sqs_smartbus.large_message_store.path = s3://test-bucket-updated/smartbus-test", - "remote_queue.sqs_smartbus.retry_policy = max", - "remote_queue.max.sqs_smartbus.max_retries_per_part = 5"} - updatedOutputs = append(updatedInputs, "remote_queue.sqs_smartbus.encoding_format = s2s", "remote_queue.sqs_smartbus.send_interval = 4s") - updatedDefaultsAll = []string{ - "[pipeline:remotequeueruleset]\ndisabled = false", - "[pipeline:ruleset]\ndisabled = false", - "[pipeline:remotequeuetyping]\ndisabled = false", - "[pipeline:remotequeueoutput]\ndisabled = false", - "[pipeline:typing]\ndisabled = true", - } - updatedDefaultsIngest = append(updatedDefaultsAll, "[pipeline:indexerPipe]\ndisabled = true") - inputsShouldNotContain = []string{ - "[remote_queue:test-queue]", - "remote_queue.sqs_smartbus.dead_letter_queue.name = test-dead-letter-queue", - "remote_queue.sqs_smartbus.large_message_store.path = s3://test-bucket/smartbus-test", + "[remote_queue:index-ingest-separation-test-q]", + "remote_queue.sqs_smartbus.dead_letter_queue.name = index-ingest-separation-test-dlq", + "remote_queue.sqs_smartbus.large_message_store.path = s3://index-ingest-separation-test-bucket/smartbus-test", "remote_queue.sqs_smartbus.retry_policy = max_count", "remote_queue.sqs_smartbus.max_count.max_retries_per_part = 4"} outputsShouldNotContain = append(inputs, "remote_queue.sqs_smartbus.send_interval = 5s") diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go index 41beae4bc..17b5bd8da 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go @@ -75,9 +75,14 @@ var _ = Describe("indingsep test", func() { Context("Ingestor and Indexer deployment", func() { It("indingsep, smoke, indingsep: Splunk Operator can deploy Ingestors and Indexers", func() { + // TODO: Remove secret reference and uncomment serviceAccountName part once IRSA fixed for Splunk and EKS 1.34+ // Create Service Account - testcaseEnvInst.Log.Info("Create Service Account") - testcaseEnvInst.CreateServiceAccount(serviceAccountName) + // testcaseEnvInst.Log.Info("Create Service Account") + // testcaseEnvInst.CreateServiceAccount(serviceAccountName) + + // Secret reference + volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateQueueVolumeSpec("queue-secret-ref-volume", testcaseEnvInst.GetIndexIngestSepSecretName())} + queue.SQS.VolList = volumeSpec // Deploy Queue testcaseEnvInst.Log.Info("Deploy Queue") @@ -91,7 +96,7 @@ var _ = Describe("indingsep test", func() { // Deploy Ingestor Cluster testcaseEnvInst.Log.Info("Deploy Ingestor Cluster") - _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, serviceAccountName) + _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, "") // , serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster") // Deploy Cluster Manager @@ -101,7 +106,7 @@ var _ = Describe("indingsep test", func() { // Deploy Indexer Cluster testcaseEnvInst.Log.Info("Deploy Indexer Cluster") - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, serviceAccountName) + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, "") // , serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Indexer Cluster") // Ensure that Ingestor Cluster is in Ready phase @@ -131,11 +136,11 @@ var _ = Describe("indingsep test", func() { Expect(err).To(Succeed(), "Unable to delete Ingestor Cluster instance", "Ingestor Cluster Name", ingest) // Delete the Queue - queue := &enterpriseApi.Queue{} - err = deployment.GetInstance(ctx, "queue", queue) - Expect(err).To(Succeed(), "Unable to get Queue instance", "Queue Name", queue) - err = deployment.DeleteCR(ctx, queue) - Expect(err).To(Succeed(), "Unable to delete Queue", "Queue Name", queue) + q = &enterpriseApi.Queue{} + err = deployment.GetInstance(ctx, "queue", q) + Expect(err).To(Succeed(), "Unable to get Queue instance", "Queue Name", q) + err = deployment.DeleteCR(ctx, q) + Expect(err).To(Succeed(), "Unable to delete Queue", "Queue Name", q) // Delete the ObjectStorage objStorage = &enterpriseApi.ObjectStorage{} @@ -148,9 +153,14 @@ var _ = Describe("indingsep test", func() { Context("Ingestor and Indexer deployment", func() { It("indingsep, smoke, indingsep: Splunk Operator can deploy Ingestors and Indexers with additional configurations", func() { + // TODO: Remove secret reference and uncomment serviceAccountName part once IRSA fixed for Splunk and EKS 1.34+ // Create Service Account - testcaseEnvInst.Log.Info("Create Service Account") - testcaseEnvInst.CreateServiceAccount(serviceAccountName) + // testcaseEnvInst.Log.Info("Create Service Account") + // testcaseEnvInst.CreateServiceAccount(serviceAccountName) + + // Secret reference + volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateQueueVolumeSpec("queue-secret-ref-volume", testcaseEnvInst.GetIndexIngestSepSecretName())} + queue.SQS.VolList = volumeSpec // Deploy Queue testcaseEnvInst.Log.Info("Deploy Queue") @@ -162,24 +172,19 @@ var _ = Describe("indingsep test", func() { objStorage, err := deployment.DeployObjectStorage(ctx, "os", objectStorage) Expect(err).To(Succeed(), "Unable to deploy ObjectStorage") - // Upload apps to S3 - testcaseEnvInst.Log.Info("Upload apps to S3") - appFileList := testenv.GetAppFileList(appListV1) - _, err = testenv.UploadFilesToS3(testS3Bucket, s3TestDir, appFileList, downloadDirV1) - Expect(err).To(Succeed(), "Unable to upload V1 apps to S3 test directory for IngestorCluster") - // Deploy Ingestor Cluster with additional configurations (similar to standalone app framework test) appSourceName := "appframework-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) appFrameworkSpec := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeName, enterpriseApi.ScopeLocal, appSourceName, s3TestDir, 60) appFrameworkSpec.MaxConcurrentAppDownloads = uint64(5) ic := &enterpriseApi.IngestorCluster{ ObjectMeta: metav1.ObjectMeta{ - Name: deployment.GetName() + "-ingest", - Namespace: testcaseEnvInst.GetName(), + Name: deployment.GetName() + "-ingest", + Namespace: testcaseEnvInst.GetName(), + Finalizers: []string{"enterprise.splunk.com/delete-pvc"}, }, Spec: enterpriseApi.IngestorClusterSpec{ CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ - ServiceAccount: serviceAccountName, + // ServiceAccount: serviceAccountName, LivenessInitialDelaySeconds: 600, ReadinessInitialDelaySeconds: 50, StartupProbe: &enterpriseApi.Probe{ @@ -205,10 +210,10 @@ var _ = Describe("indingsep test", func() { Image: testcaseEnvInst.GetSplunkImage(), }, }, - QueueRef: v1.ObjectReference{Name: q.Name}, - ObjectStorageRef: v1.ObjectReference{Name: objStorage.Name}, - Replicas: 3, - AppFrameworkConfig: appFrameworkSpec, + QueueRef: v1.ObjectReference{Name: q.Name}, + ObjectStorageRef: v1.ObjectReference{Name: objStorage.Name}, + Replicas: 3, + AppFrameworkConfig: appFrameworkSpec, }, } @@ -220,6 +225,12 @@ var _ = Describe("indingsep test", func() { testcaseEnvInst.Log.Info("Ensure that Ingestor Cluster is in Ready phase") testenv.IngestorReady(ctx, deployment, testcaseEnvInst) + // Upload apps to S3 + testcaseEnvInst.Log.Info("Upload apps to S3") + appFileList := testenv.GetAppFileList(appListV1) + _, err = testenv.UploadFilesToS3(testS3Bucket, s3TestDir, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload V1 apps to S3 test directory for IngestorCluster") + // Verify Ingestor Cluster Pods have apps installed testcaseEnvInst.Log.Info("Verify Ingestor Cluster Pods have apps installed") ingestorPod := []string{fmt.Sprintf(testenv.IngestorPod, deployment.GetName()+"-ingest", 0)} @@ -252,9 +263,14 @@ var _ = Describe("indingsep test", func() { Context("Ingestor and Indexer deployment", func() { It("indingsep, integration, indingsep: Splunk Operator can deploy Ingestors and Indexers with correct setup", func() { + // TODO: Remove secret reference and uncomment serviceAccountName part once IRSA fixed for Splunk and EKS 1.34+ // Create Service Account - testcaseEnvInst.Log.Info("Create Service Account") - testcaseEnvInst.CreateServiceAccount(serviceAccountName) + // testcaseEnvInst.Log.Info("Create Service Account") + // testcaseEnvInst.CreateServiceAccount(serviceAccountName) + + // Secret reference + volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateQueueVolumeSpec("queue-secret-ref-volume", testcaseEnvInst.GetIndexIngestSepSecretName())} + queue.SQS.VolList = volumeSpec // Deploy Queue testcaseEnvInst.Log.Info("Deploy Queue") @@ -268,7 +284,7 @@ var _ = Describe("indingsep test", func() { // Deploy Ingestor Cluster testcaseEnvInst.Log.Info("Deploy Ingestor Cluster") - _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, serviceAccountName) + _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, "") // , serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster") // Deploy Cluster Manager @@ -278,7 +294,7 @@ var _ = Describe("indingsep test", func() { // Deploy Indexer Cluster testcaseEnvInst.Log.Info("Deploy Indexer Cluster") - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, serviceAccountName) + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, "") // , serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Indexer Cluster") // Ensure that Ingestor Cluster is in Ready phase @@ -301,7 +317,8 @@ var _ = Describe("indingsep test", func() { // Verify Ingestor Cluster Status testcaseEnvInst.Log.Info("Verify Ingestor Cluster Status") - Expect(ingest.Status.Queue).To(Equal(queue), "Ingestor queue status is not the same as provided as input") + Expect(ingest.Status.QueueBucketAccessSecretVersion).To(Not(Equal("")), "Ingestor queue status queue bucket access secret version is empty") + Expect(ingest.Status.QueueBucketAccessSecretVersion).To(Not(Equal("0")), "Ingestor queue status queue bucket access secret version is 0") // Get instance of current Indexer Cluster CR with latest config testcaseEnvInst.Log.Info("Get instance of current Indexer Cluster CR with latest config") @@ -311,7 +328,8 @@ var _ = Describe("indingsep test", func() { // Verify Indexer Cluster Status testcaseEnvInst.Log.Info("Verify Indexer Cluster Status") - Expect(index.Status.Queue).To(Equal(queue), "Indexer queue status is not the same as provided as input") + Expect(index.Status.QueueBucketAccessSecretVersion).To(Not(Equal("")), "Indexer queue status queue bucket access secret version is empty") + Expect(index.Status.QueueBucketAccessSecretVersion).To(Not(Equal("0")), "Indexer queue status queue bucket access secret version is 0") // Verify conf files testcaseEnvInst.Log.Info("Verify conf files") @@ -356,177 +374,4 @@ var _ = Describe("indingsep test", func() { } }) }) - - Context("Ingestor and Indexer deployment", func() { - It("indingsep, integration, indingsep: Splunk Operator can update Ingestors and Indexers with correct setup", func() { - // Create Service Account - testcaseEnvInst.Log.Info("Create Service Account") - testcaseEnvInst.CreateServiceAccount(serviceAccountName) - - // Deploy Queue - testcaseEnvInst.Log.Info("Deploy Queue") - q, err := deployment.DeployQueue(ctx, "queue", queue) - Expect(err).To(Succeed(), "Unable to deploy Queue") - - // Deploy ObjectStorage - testcaseEnvInst.Log.Info("Deploy ObjectStorage") - objStorage, err := deployment.DeployObjectStorage(ctx, "os", objectStorage) - Expect(err).To(Succeed(), "Unable to deploy ObjectStorage") - - // Deploy Ingestor Cluster - testcaseEnvInst.Log.Info("Deploy Ingestor Cluster") - _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, serviceAccountName) - Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster") - - // Deploy Cluster Manager - testcaseEnvInst.Log.Info("Deploy Cluster Manager") - _, err = deployment.DeployClusterManagerWithGivenSpec(ctx, deployment.GetName(), cmSpec) - Expect(err).To(Succeed(), "Unable to deploy Cluster Manager") - - // Deploy Indexer Cluster - testcaseEnvInst.Log.Info("Deploy Indexer Cluster") - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, serviceAccountName) - Expect(err).To(Succeed(), "Unable to deploy Indexer Cluster") - - // Ensure that Ingestor Cluster is in Ready phase - testcaseEnvInst.Log.Info("Ensure that Ingestor Cluster is in Ready phase") - testenv.IngestorReady(ctx, deployment, testcaseEnvInst) - - // Ensure that Cluster Manager is in Ready phase - testcaseEnvInst.Log.Info("Ensure that Cluster Manager is in Ready phase") - testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) - - // Ensure that Indexer Cluster is in Ready phase - testcaseEnvInst.Log.Info("Ensure that Indexer Cluster is in Ready phase") - testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) - - // Get instance of current Queue CR with latest config - testcaseEnvInst.Log.Info("Get instance of current Queue CR with latest config") - queue := &enterpriseApi.Queue{} - err = deployment.GetInstance(ctx, q.Name, queue) - Expect(err).To(Succeed(), "Failed to get instance of Queue") - - // Update instance of Queue CR with new queue - testcaseEnvInst.Log.Info("Update instance of Queue CR with new queue") - queue.Spec = updateQueue - err = deployment.UpdateCR(ctx, queue) - Expect(err).To(Succeed(), "Unable to deploy Queue with updated CR") - - // Ensure that Ingestor Cluster has not been restarted - testcaseEnvInst.Log.Info("Ensure that Ingestor Cluster has not been restarted") - testenv.IngestorReady(ctx, deployment, testcaseEnvInst) - - // Ensure that Indexer Cluster has not been restarted - testcaseEnvInst.Log.Info("Ensure that Indexer Cluster has not been restarted") - testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) - - // Get instance of current Ingestor Cluster CR with latest config - testcaseEnvInst.Log.Info("Get instance of current Ingestor Cluster CR with latest config") - ingest := &enterpriseApi.IngestorCluster{} - err = deployment.GetInstance(ctx, deployment.GetName()+"-ingest", ingest) - Expect(err).To(Succeed(), "Failed to get instance of Ingestor Cluster") - - // Verify Ingestor Cluster Status - testcaseEnvInst.Log.Info("Verify Ingestor Cluster Status") - Expect(ingest.Status.Queue).To(Equal(updateQueue), "Ingestor queue status is not the same as provided as input") - - // Get instance of current Indexer Cluster CR with latest config - testcaseEnvInst.Log.Info("Get instance of current Indexer Cluster CR with latest config") - index := &enterpriseApi.IndexerCluster{} - err = deployment.GetInstance(ctx, deployment.GetName()+"-idxc", index) - Expect(err).To(Succeed(), "Failed to get instance of Indexer Cluster") - - // Verify Indexer Cluster Status - testcaseEnvInst.Log.Info("Verify Indexer Cluster Status") - Expect(index.Status.Queue).To(Equal(updateQueue), "Indexer queue status is not the same as provided as input") - - // Verify conf files - testcaseEnvInst.Log.Info("Verify conf files") - pods := testenv.DumpGetPods(deployment.GetName()) - for _, pod := range pods { - defaultsConf := "" - - if strings.Contains(pod, "ingest") || strings.Contains(pod, "idxc") { - // Verify outputs.conf - testcaseEnvInst.Log.Info("Verify outputs.conf") - outputsPath := "opt/splunk/etc/system/local/outputs.conf" - outputsConf, err := testenv.GetConfFile(pod, outputsPath, deployment.GetName()) - Expect(err).To(Succeed(), "Failed to get outputs.conf from Ingestor Cluster pod") - testenv.ValidateContent(outputsConf, updatedOutputs, true) - testenv.ValidateContent(outputsConf, outputsShouldNotContain, false) - - // Verify default-mode.conf - testcaseEnvInst.Log.Info("Verify default-mode.conf") - defaultsPath := "opt/splunk/etc/system/local/default-mode.conf" - defaultsConf, err := testenv.GetConfFile(pod, defaultsPath, deployment.GetName()) - Expect(err).To(Succeed(), "Failed to get default-mode.conf from Ingestor Cluster pod") - testenv.ValidateContent(defaultsConf, defaultsAll, true) - - // Verify AWS env variables - testcaseEnvInst.Log.Info("Verify AWS env variables") - envVars, err := testenv.GetAWSEnv(pod, deployment.GetName()) - Expect(err).To(Succeed(), "Failed to get AWS env variables from Ingestor Cluster pod") - testenv.ValidateContent(envVars, awsEnvVars, true) - } - - if strings.Contains(pod, "ingest") { - // Verify default-mode.conf - testcaseEnvInst.Log.Info("Verify default-mode.conf") - testenv.ValidateContent(defaultsConf, defaultsIngest, true) - } else if strings.Contains(pod, "idxc") { - // Verify inputs.conf - testcaseEnvInst.Log.Info("Verify inputs.conf") - inputsPath := "opt/splunk/etc/system/local/inputs.conf" - inputsConf, err := testenv.GetConfFile(pod, inputsPath, deployment.GetName()) - Expect(err).To(Succeed(), "Failed to get inputs.conf from Indexer Cluster pod") - testenv.ValidateContent(inputsConf, updatedInputs, true) - testenv.ValidateContent(inputsConf, inputsShouldNotContain, false) - } - } - - // Verify conf files - testcaseEnvInst.Log.Info("Verify conf files") - pods = testenv.DumpGetPods(deployment.GetName()) - for _, pod := range pods { - defaultsConf := "" - - if strings.Contains(pod, "ingest") || strings.Contains(pod, "idxc") { - // Verify outputs.conf - testcaseEnvInst.Log.Info("Verify outputs.conf") - outputsPath := "opt/splunk/etc/system/local/outputs.conf" - outputsConf, err := testenv.GetConfFile(pod, outputsPath, deployment.GetName()) - Expect(err).To(Succeed(), "Failed to get outputs.conf from Ingestor Cluster pod") - testenv.ValidateContent(outputsConf, updatedOutputs, true) - testenv.ValidateContent(outputsConf, outputsShouldNotContain, false) - - // Verify default-mode.conf - testcaseEnvInst.Log.Info("Verify default-mode.conf") - defaultsPath := "opt/splunk/etc/system/local/default-mode.conf" - defaultsConf, err := testenv.GetConfFile(pod, defaultsPath, deployment.GetName()) - Expect(err).To(Succeed(), "Failed to get default-mode.conf from Ingestor Cluster pod") - testenv.ValidateContent(defaultsConf, updatedDefaultsAll, true) - - // Verify AWS env variables - testcaseEnvInst.Log.Info("Verify AWS env variables") - envVars, err := testenv.GetAWSEnv(pod, deployment.GetName()) - Expect(err).To(Succeed(), "Failed to get AWS env variables from Ingestor Cluster pod") - testenv.ValidateContent(envVars, awsEnvVars, true) - } - - if strings.Contains(pod, "ingest") { - // Verify default-mode.conf - testcaseEnvInst.Log.Info("Verify default-mode.conf") - testenv.ValidateContent(defaultsConf, updatedDefaultsIngest, true) - } else if strings.Contains(pod, "idxc") { - // Verify inputs.conf - testcaseEnvInst.Log.Info("Verify inputs.conf") - inputsPath := "opt/splunk/etc/system/local/inputs.conf" - inputsConf, err := testenv.GetConfFile(pod, inputsPath, deployment.GetName()) - Expect(err).To(Succeed(), "Failed to get inputs.conf from Indexer Cluster pod") - testenv.ValidateContent(inputsConf, updatedInputs, true) - testenv.ValidateContent(inputsConf, inputsShouldNotContain, false) - } - } - }) - }) }) diff --git a/test/testenv/remote_index_utils.go b/test/testenv/remote_index_utils.go index 0eb2b485c..f696a4a17 100644 --- a/test/testenv/remote_index_utils.go +++ b/test/testenv/remote_index_utils.go @@ -86,6 +86,14 @@ func RollHotToWarm(ctx context.Context, deployment *Deployment, podName string, return true } +// GenerateQueueVolumeSpec return VolumeSpec struct with given values +func GenerateQueueVolumeSpec(name, secretRef string) enterpriseApi.VolumeSpec { + return enterpriseApi.VolumeSpec{ + Name: name, + SecretRef: secretRef, + } +} + // GenerateIndexVolumeSpec return VolumeSpec struct with given values func GenerateIndexVolumeSpec(volumeName string, endpoint string, secretRef string, provider string, storageType string, region string) enterpriseApi.VolumeSpec { return enterpriseApi.VolumeSpec{ diff --git a/test/testenv/testcaseenv.go b/test/testenv/testcaseenv.go index a1081e0a0..737aaa9a6 100644 --- a/test/testenv/testcaseenv.go +++ b/test/testenv/testcaseenv.go @@ -35,24 +35,25 @@ import ( // TestCaseEnv represents a namespaced-isolated k8s cluster environment (aka virtual k8s cluster) to run test cases against type TestCaseEnv struct { - kubeClient client.Client - name string - namespace string - serviceAccountName string - roleName string - roleBindingName string - operatorName string - operatorImage string - splunkImage string - initialized bool - SkipTeardown bool - licenseFilePath string - licenseCMName string - s3IndexSecret string - Log logr.Logger - cleanupFuncs []cleanupFunc - debug string - clusterWideOperator string + kubeClient client.Client + name string + namespace string + serviceAccountName string + roleName string + roleBindingName string + operatorName string + operatorImage string + splunkImage string + initialized bool + SkipTeardown bool + licenseFilePath string + licenseCMName string + s3IndexSecret string + indexIngestSepSecret string + Log logr.Logger + cleanupFuncs []cleanupFunc + debug string + clusterWideOperator string } // GetKubeClient returns the kube client to talk to kube-apiserver @@ -79,21 +80,22 @@ func NewTestCaseEnv(kubeClient client.Client, name string, operatorImage string, } testenv := &TestCaseEnv{ - kubeClient: kubeClient, - name: name, - namespace: name, - serviceAccountName: name, - roleName: name, - roleBindingName: name, - operatorName: "splunk-op-" + name, - operatorImage: operatorImage, - splunkImage: splunkImage, - SkipTeardown: specifiedSkipTeardown, - licenseCMName: name, - licenseFilePath: licenseFilePath, - s3IndexSecret: "splunk-s3-index-" + name, - debug: os.Getenv("DEBUG"), - clusterWideOperator: installOperatorClusterWide, + kubeClient: kubeClient, + name: name, + namespace: name, + serviceAccountName: name, + roleName: name, + roleBindingName: name, + operatorName: "splunk-op-" + name, + operatorImage: operatorImage, + splunkImage: splunkImage, + SkipTeardown: specifiedSkipTeardown, + licenseCMName: name, + licenseFilePath: licenseFilePath, + s3IndexSecret: "splunk-s3-index-" + name, + indexIngestSepSecret: "splunk--index-ingest-sep-" + name, + debug: os.Getenv("DEBUG"), + clusterWideOperator: installOperatorClusterWide, } testenv.Log = logf.Log.WithValues("testcaseenv", testenv.name) @@ -156,6 +158,7 @@ func (testenv *TestCaseEnv) setup() error { switch ClusterProvider { case "eks": testenv.createIndexSecret() + testenv.createIndexIngestSepSecret() case "azure": testenv.createIndexSecretAzure() case "gcp": @@ -588,11 +591,41 @@ func (testenv *TestCaseEnv) createIndexSecretAzure() error { return nil } +// CreateIndexIngestSepSecret creates secret object +func (testenv *TestCaseEnv) createIndexIngestSepSecret() error { + secretName := testenv.indexIngestSepSecret + ns := testenv.namespace + + data := map[string][]byte{"s3_access_key": []byte(os.Getenv("AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID")), + "s3_secret_key": []byte(os.Getenv("AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY"))} + secret := newSecretSpec(ns, secretName, data) + + if err := testenv.GetKubeClient().Create(context.TODO(), secret); err != nil { + testenv.Log.Error(err, "Unable to create index and ingestion sep secret object") + return err + } + + testenv.pushCleanupFunc(func() error { + err := testenv.GetKubeClient().Delete(context.TODO(), secret) + if err != nil { + testenv.Log.Error(err, "Unable to delete index and ingestion sep secret object") + return err + } + return nil + }) + return nil +} + // GetIndexSecretName return index secret object name func (testenv *TestCaseEnv) GetIndexSecretName() string { return testenv.s3IndexSecret } +// GetIndexSecretName return index and ingestion separation secret object name +func (testenv *TestCaseEnv) GetIndexIngestSepSecretName() string { + return testenv.indexIngestSepSecret +} + // GetLMConfigMap Return name of license config map func (testenv *TestCaseEnv) GetLMConfigMap() string { return testenv.licenseCMName diff --git a/test/testenv/testenv.go b/test/testenv/testenv.go index f82310015..06fe304d4 100644 --- a/test/testenv/testenv.go +++ b/test/testenv/testenv.go @@ -160,24 +160,25 @@ type cleanupFunc func() error // TestEnv represents a namespaced-isolated k8s cluster environment (aka virtual k8s cluster) to run tests against type TestEnv struct { - kubeAPIServer string - name string - namespace string - serviceAccountName string - roleName string - roleBindingName string - operatorName string - operatorImage string - splunkImage string - initialized bool - SkipTeardown bool - licenseFilePath string - licenseCMName string - s3IndexSecret string - kubeClient client.Client - Log logr.Logger - cleanupFuncs []cleanupFunc - debug string + kubeAPIServer string + name string + namespace string + serviceAccountName string + roleName string + roleBindingName string + operatorName string + operatorImage string + splunkImage string + initialized bool + SkipTeardown bool + licenseFilePath string + licenseCMName string + s3IndexSecret string + indexIngestSepSecret string + kubeClient client.Client + Log logr.Logger + cleanupFuncs []cleanupFunc + debug string } func init() { @@ -231,19 +232,20 @@ func NewTestEnv(name, commitHash, operatorImage, splunkImage, licenseFilePath st } testenv := &TestEnv{ - name: envName, - namespace: envName, - serviceAccountName: envName, - roleName: envName, - roleBindingName: envName, - operatorName: "splunk-op-" + envName, - operatorImage: operatorImage, - splunkImage: splunkImage, - SkipTeardown: specifiedSkipTeardown, - licenseCMName: envName, - licenseFilePath: licenseFilePath, - s3IndexSecret: "splunk-s3-index-" + envName, - debug: os.Getenv("DEBUG"), + name: envName, + namespace: envName, + serviceAccountName: envName, + roleName: envName, + roleBindingName: envName, + operatorName: "splunk-op-" + envName, + operatorImage: operatorImage, + splunkImage: splunkImage, + SkipTeardown: specifiedSkipTeardown, + licenseCMName: envName, + licenseFilePath: licenseFilePath, + s3IndexSecret: "splunk-s3-index-" + envName, + indexIngestSepSecret: "splunk--index-ingest-sep-" + name, + debug: os.Getenv("DEBUG"), } testenv.Log = logf.Log.WithValues("testenv", testenv.name) diff --git a/test/testenv/util.go b/test/testenv/util.go index d9c6d5807..366ea3668 100644 --- a/test/testenv/util.go +++ b/test/testenv/util.go @@ -396,8 +396,8 @@ func newIndexerCluster(name, ns, licenseManagerName string, replicas int, cluste }, Defaults: ansibleConfig, }, - Replicas: int32(replicas), - QueueRef: queue, + Replicas: int32(replicas), + QueueRef: queue, ObjectStorageRef: os, }, } @@ -426,8 +426,8 @@ func newIngestorCluster(name, ns string, replicas int, splunkImage string, queue Image: splunkImage, }, }, - Replicas: int32(replicas), - QueueRef: queue, + Replicas: int32(replicas), + QueueRef: queue, ObjectStorageRef: os, }, }