From 0b434d772a3e95c687c4c2ec3a704fc212f8b9b4 Mon Sep 17 00:00:00 2001 From: Luka Skugor Date: Sat, 17 Jan 2026 13:46:49 +0000 Subject: [PATCH 1/4] feat: implement RolloutSchedule and ClusterRolloutSchedule controllers with time-based gating --- api/v1alpha1/clusterrolloutschedule_types.go | 114 +++++ api/v1alpha1/rolloutschedule_types.go | 182 +++++++ api/v1alpha1/zz_generated.deepcopy.go | 309 ++++++++++++ cmd/main.go | 20 + .../kuberik.com_clusterrolloutschedules.yaml | 327 ++++++++++++ .../bases/kuberik.com_rolloutschedules.yaml | 276 ++++++++++ config/rbac/role.yaml | 16 +- .../v1alpha1_clusterrolloutschedule.yaml | 46 ++ config/samples/v1alpha1_rolloutschedule.yaml | 33 ++ go.mod | 2 + .../clusterrolloutschedule_controller.go | 285 +++++++++++ .../controller/rolloutschedule_controller.go | 195 ++++++++ .../rolloutschedule_controller_test.go | 470 ++++++++++++++++++ .../controller/rolloutschedule_helpers.go | 426 ++++++++++++++++ 14 files changed, 2693 insertions(+), 8 deletions(-) create mode 100644 api/v1alpha1/clusterrolloutschedule_types.go create mode 100644 api/v1alpha1/rolloutschedule_types.go create mode 100644 config/crd/bases/kuberik.com_clusterrolloutschedules.yaml create mode 100644 config/crd/bases/kuberik.com_rolloutschedules.yaml create mode 100644 config/samples/v1alpha1_clusterrolloutschedule.yaml create mode 100644 config/samples/v1alpha1_rolloutschedule.yaml create mode 100644 internal/controller/clusterrolloutschedule_controller.go create mode 100644 internal/controller/rolloutschedule_controller.go create mode 100644 internal/controller/rolloutschedule_controller_test.go create mode 100644 internal/controller/rolloutschedule_helpers.go diff --git a/api/v1alpha1/clusterrolloutschedule_types.go b/api/v1alpha1/clusterrolloutschedule_types.go new file mode 100644 index 0000000..8ebb107 --- /dev/null +++ b/api/v1alpha1/clusterrolloutschedule_types.go @@ -0,0 +1,114 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ClusterRolloutScheduleSpec defines the desired state of ClusterRolloutSchedule. +type ClusterRolloutScheduleSpec struct { + // RolloutSelector is a label selector to match Rollouts across namespaces. + // +required + RolloutSelector *metav1.LabelSelector `json:"rolloutSelector"` + + // NamespaceSelector is a label selector to match namespaces. + // If empty, applies to all namespaces. + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty"` + + // Rules is a list of schedule rules. + // The schedule is active if ANY rule matches the current time/date. + // +required + // +kubebuilder:validation:MinItems=1 + Rules []ScheduleRule `json:"rules"` + + // Timezone is the IANA timezone for the schedule (e.g., "America/New_York"). + // Defaults to "UTC" if not specified. + // +kubebuilder:default="UTC" + // +optional + Timezone string `json:"timezone,omitempty"` + + // Action defines what to do when the schedule is active. + // - "Allow": Gate passes when active, blocks when inactive + // - "Deny": Gate blocks when active, passes when inactive + // +kubebuilder:default="Deny" + // +optional + Action RolloutScheduleAction `json:"action,omitempty"` +} + +// ClusterRolloutScheduleStatus defines the observed state of ClusterRolloutSchedule. +type ClusterRolloutScheduleStatus struct { + // Active indicates if the schedule is currently active (any rule matches). + // +optional + Active bool `json:"active,omitempty"` + + // ActiveRules is a list of rule names that are currently active. + // +optional + ActiveRules []string `json:"activeRules,omitempty"` + + // NextTransition is the timestamp when the active state will next change. + // +optional + NextTransition *metav1.Time `json:"nextTransition,omitempty"` + + // ManagedGates is a list of RolloutGate names being managed by this schedule. + // Format: "namespace/name" + // +optional + ManagedGates []string `json:"managedGates,omitempty"` + + // MatchingRollouts is the count of rollouts currently matched by the selectors. + // +optional + MatchingRollouts int `json:"matchingRollouts,omitempty"` + + // Conditions represents the current state of the schedule. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster +// +kubebuilder:printcolumn:name="Action",type=string,JSONPath=`.spec.action` +// +kubebuilder:printcolumn:name="Active",type=boolean,JSONPath=`.status.active` +// +kubebuilder:printcolumn:name="Matching",type=integer,JSONPath=`.status.matchingRollouts` +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" + +// ClusterRolloutSchedule is the Schema for the clusterrolloutschedules API. +type ClusterRolloutSchedule struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ClusterRolloutScheduleSpec `json:"spec,omitempty"` + Status ClusterRolloutScheduleStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ClusterRolloutScheduleList contains a list of ClusterRolloutSchedule. +type ClusterRolloutScheduleList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ClusterRolloutSchedule `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ClusterRolloutSchedule{}, &ClusterRolloutScheduleList{}) +} diff --git a/api/v1alpha1/rolloutschedule_types.go b/api/v1alpha1/rolloutschedule_types.go new file mode 100644 index 0000000..3acb56c --- /dev/null +++ b/api/v1alpha1/rolloutschedule_types.go @@ -0,0 +1,182 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// RolloutScheduleAction defines the action to take when the schedule is active. +// +kubebuilder:validation:Enum=Allow;Deny +type RolloutScheduleAction string + +const ( + // RolloutScheduleActionAllow allows rollouts when the schedule is active. + // When inactive, rollouts are blocked. + RolloutScheduleActionAllow RolloutScheduleAction = "Allow" + + // RolloutScheduleActionDeny blocks rollouts when the schedule is active. + // When inactive, rollouts are allowed. + RolloutScheduleActionDeny RolloutScheduleAction = "Deny" +) + +// DayOfWeek represents a day of the week. +// +kubebuilder:validation:Enum=Monday;Tuesday;Wednesday;Thursday;Friday;Saturday;Sunday +type DayOfWeek string + +const ( + Monday DayOfWeek = "Monday" + Tuesday DayOfWeek = "Tuesday" + Wednesday DayOfWeek = "Wednesday" + Thursday DayOfWeek = "Thursday" + Friday DayOfWeek = "Friday" + Saturday DayOfWeek = "Saturday" + Sunday DayOfWeek = "Sunday" +) + +// TimeRange represents a time range within a day. +type TimeRange struct { + // Start time in HH:MM format (24-hour) + // +kubebuilder:validation:Pattern=`^([01]\d|2[0-3]):[0-5]\d$` + // +required + Start string `json:"start"` + + // End time in HH:MM format (24-hour) + // +kubebuilder:validation:Pattern=`^([01]\d|2[0-3]):[0-5]\d$` + // +required + End string `json:"end"` +} + +// DateRange represents a date range. +type DateRange struct { + // Start date in YYYY-MM-DD format + // +kubebuilder:validation:Pattern=`^\d{4}-\d{2}-\d{2}$` + // +required + Start string `json:"start"` + + // End date in YYYY-MM-DD format + // +kubebuilder:validation:Pattern=`^\d{4}-\d{2}-\d{2}$` + // +required + End string `json:"end"` +} + +// ScheduleRule defines a time-based rule. +// The schedule is active if the current time/date matches this rule. +type ScheduleRule struct { + // Name is an optional identifier for this rule + // +optional + Name string `json:"name,omitempty"` + + // TimeRange restricts the rule to specific times of day + // +optional + TimeRange *TimeRange `json:"timeRange,omitempty"` + + // DaysOfWeek restricts the rule to specific days of the week + // +optional + DaysOfWeek []DayOfWeek `json:"daysOfWeek,omitempty"` + + // DateRange restricts the rule to specific date range + // +optional + DateRange *DateRange `json:"dateRange,omitempty"` +} + +// RolloutScheduleSpec defines the desired state of RolloutSchedule. +type RolloutScheduleSpec struct { + // RolloutSelector is a label selector to match Rollouts in the same namespace. + // +required + RolloutSelector *metav1.LabelSelector `json:"rolloutSelector"` + + // Rules is a list of schedule rules. + // The schedule is active if ANY rule matches the current time/date. + // +required + // +kubebuilder:validation:MinItems=1 + Rules []ScheduleRule `json:"rules"` + + // Timezone is the IANA timezone for the schedule (e.g., "America/New_York"). + // Defaults to "UTC" if not specified. + // +kubebuilder:default="UTC" + // +optional + Timezone string `json:"timezone,omitempty"` + + // Action defines what to do when the schedule is active. + // - "Allow": Gate passes when active, blocks when inactive + // - "Deny": Gate blocks when active, passes when inactive + // +kubebuilder:default="Deny" + // +optional + Action RolloutScheduleAction `json:"action,omitempty"` +} + +// RolloutScheduleStatus defines the observed state of RolloutSchedule. +type RolloutScheduleStatus struct { + // Active indicates if the schedule is currently active (any rule matches). + // +optional + Active bool `json:"active,omitempty"` + + // ActiveRules is a list of rule names that are currently active. + // +optional + ActiveRules []string `json:"activeRules,omitempty"` + + // NextTransition is the timestamp when the active state will next change. + // +optional + NextTransition *metav1.Time `json:"nextTransition,omitempty"` + + // ManagedGates is a list of RolloutGate names being managed by this schedule. + // +optional + ManagedGates []string `json:"managedGates,omitempty"` + + // MatchingRollouts is the count of rollouts currently matched by the selector. + // +optional + MatchingRollouts int `json:"matchingRollouts,omitempty"` + + // Conditions represents the current state of the schedule. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Namespaced +// +kubebuilder:printcolumn:name="Action",type=string,JSONPath=`.spec.action` +// +kubebuilder:printcolumn:name="Active",type=boolean,JSONPath=`.status.active` +// +kubebuilder:printcolumn:name="Matching",type=integer,JSONPath=`.status.matchingRollouts` +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" + +// RolloutSchedule is the Schema for the rolloutschedules API. +type RolloutSchedule struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec RolloutScheduleSpec `json:"spec,omitempty"` + Status RolloutScheduleStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// RolloutScheduleList contains a list of RolloutSchedule. +type RolloutScheduleList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []RolloutSchedule `json:"items"` +} + +func init() { + SchemeBuilder.Register(&RolloutSchedule{}, &RolloutScheduleList{}) +} diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 140df0d..6df911d 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -26,6 +26,148 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRolloutSchedule) DeepCopyInto(out *ClusterRolloutSchedule) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRolloutSchedule. +func (in *ClusterRolloutSchedule) DeepCopy() *ClusterRolloutSchedule { + if in == nil { + return nil + } + out := new(ClusterRolloutSchedule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterRolloutSchedule) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRolloutScheduleList) DeepCopyInto(out *ClusterRolloutScheduleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterRolloutSchedule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRolloutScheduleList. +func (in *ClusterRolloutScheduleList) DeepCopy() *ClusterRolloutScheduleList { + if in == nil { + return nil + } + out := new(ClusterRolloutScheduleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterRolloutScheduleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRolloutScheduleSpec) DeepCopyInto(out *ClusterRolloutScheduleSpec) { + *out = *in + if in.RolloutSelector != nil { + in, out := &in.RolloutSelector, &out.RolloutSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]ScheduleRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRolloutScheduleSpec. +func (in *ClusterRolloutScheduleSpec) DeepCopy() *ClusterRolloutScheduleSpec { + if in == nil { + return nil + } + out := new(ClusterRolloutScheduleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRolloutScheduleStatus) DeepCopyInto(out *ClusterRolloutScheduleStatus) { + *out = *in + if in.ActiveRules != nil { + in, out := &in.ActiveRules, &out.ActiveRules + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NextTransition != nil { + in, out := &in.NextTransition, &out.NextTransition + *out = (*in).DeepCopy() + } + if in.ManagedGates != nil { + in, out := &in.ManagedGates, &out.ManagedGates + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRolloutScheduleStatus. +func (in *ClusterRolloutScheduleStatus) DeepCopy() *ClusterRolloutScheduleStatus { + if in == nil { + return nil + } + out := new(ClusterRolloutScheduleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DateRange) DeepCopyInto(out *DateRange) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DateRange. +func (in *DateRange) DeepCopy() *DateRange { + if in == nil { + return nil + } + out := new(DateRange) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DeploymentHistoryEntry) DeepCopyInto(out *DeploymentHistoryEntry) { *out = *in @@ -447,6 +589,128 @@ func (in *RolloutList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolloutSchedule) DeepCopyInto(out *RolloutSchedule) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutSchedule. +func (in *RolloutSchedule) DeepCopy() *RolloutSchedule { + if in == nil { + return nil + } + out := new(RolloutSchedule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RolloutSchedule) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolloutScheduleList) DeepCopyInto(out *RolloutScheduleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RolloutSchedule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutScheduleList. +func (in *RolloutScheduleList) DeepCopy() *RolloutScheduleList { + if in == nil { + return nil + } + out := new(RolloutScheduleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RolloutScheduleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolloutScheduleSpec) DeepCopyInto(out *RolloutScheduleSpec) { + *out = *in + if in.RolloutSelector != nil { + in, out := &in.RolloutSelector, &out.RolloutSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]ScheduleRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutScheduleSpec. +func (in *RolloutScheduleSpec) DeepCopy() *RolloutScheduleSpec { + if in == nil { + return nil + } + out := new(RolloutScheduleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolloutScheduleStatus) DeepCopyInto(out *RolloutScheduleStatus) { + *out = *in + if in.ActiveRules != nil { + in, out := &in.ActiveRules, &out.ActiveRules + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NextTransition != nil { + in, out := &in.NextTransition, &out.NextTransition + *out = (*in).DeepCopy() + } + if in.ManagedGates != nil { + in, out := &in.ManagedGates, &out.ManagedGates + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutScheduleStatus. +func (in *RolloutScheduleStatus) DeepCopy() *RolloutScheduleStatus { + if in == nil { + return nil + } + out := new(RolloutScheduleStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RolloutSpec) DeepCopyInto(out *RolloutSpec) { *out = *in @@ -575,6 +839,51 @@ func (in *RolloutStatus) DeepCopy() *RolloutStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleRule) DeepCopyInto(out *ScheduleRule) { + *out = *in + if in.TimeRange != nil { + in, out := &in.TimeRange, &out.TimeRange + *out = new(TimeRange) + **out = **in + } + if in.DaysOfWeek != nil { + in, out := &in.DaysOfWeek, &out.DaysOfWeek + *out = make([]DayOfWeek, len(*in)) + copy(*out, *in) + } + if in.DateRange != nil { + in, out := &in.DateRange, &out.DateRange + *out = new(DateRange) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleRule. +func (in *ScheduleRule) DeepCopy() *ScheduleRule { + if in == nil { + return nil + } + out := new(ScheduleRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeRange) DeepCopyInto(out *TimeRange) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeRange. +func (in *TimeRange) DeepCopy() *TimeRange { + if in == nil { + return nil + } + out := new(TimeRange) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TriggeredByInfo) DeepCopyInto(out *TriggeredByInfo) { *out = *in diff --git a/cmd/main.go b/cmd/main.go index 044e766..06da8a0 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -240,6 +240,26 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "KustomizationHealth") os.Exit(1) } + + if err = (&controller.RolloutScheduleReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Recorder: mgr.GetEventRecorderFor("rolloutschedule-controller"), + Clock: &controller.RealClock{}, + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "RolloutSchedule") + os.Exit(1) + } + if err = (&controller.ClusterRolloutScheduleReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Recorder: mgr.GetEventRecorderFor("clusterrolloutschedule-controller"), + Clock: &controller.RealClock{}, + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "ClusterRolloutSchedule") + os.Exit(1) + } + // +kubebuilder:scaffold:builder if metricsCertWatcher != nil { diff --git a/config/crd/bases/kuberik.com_clusterrolloutschedules.yaml b/config/crd/bases/kuberik.com_clusterrolloutschedules.yaml new file mode 100644 index 0000000..903ca3b --- /dev/null +++ b/config/crd/bases/kuberik.com_clusterrolloutschedules.yaml @@ -0,0 +1,327 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.2 + name: clusterrolloutschedules.kuberik.com +spec: + group: kuberik.com + names: + kind: ClusterRolloutSchedule + listKind: ClusterRolloutScheduleList + plural: clusterrolloutschedules + singular: clusterrolloutschedule + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .spec.action + name: Action + type: string + - jsonPath: .status.active + name: Active + type: boolean + - jsonPath: .status.matchingRollouts + name: Matching + type: integer + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ClusterRolloutSchedule is the Schema for the clusterrolloutschedules + API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ClusterRolloutScheduleSpec defines the desired state of ClusterRolloutSchedule. + properties: + action: + default: Deny + description: |- + Action defines what to do when the schedule is active. + - "Allow": Gate passes when active, blocks when inactive + - "Deny": Gate blocks when active, passes when inactive + enum: + - Allow + - Deny + type: string + namespaceSelector: + description: |- + NamespaceSelector is a label selector to match namespaces. + If empty, applies to all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + rolloutSelector: + description: RolloutSelector is a label selector to match Rollouts + across namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + rules: + description: |- + Rules is a list of schedule rules. + The schedule is active if ANY rule matches the current time/date. + items: + description: |- + ScheduleRule defines a time-based rule. + The schedule is active if the current time/date matches this rule. + properties: + dateRange: + description: DateRange restricts the rule to specific date range + properties: + end: + description: End date in YYYY-MM-DD format + pattern: ^\d{4}-\d{2}-\d{2}$ + type: string + start: + description: Start date in YYYY-MM-DD format + pattern: ^\d{4}-\d{2}-\d{2}$ + type: string + required: + - end + - start + type: object + daysOfWeek: + description: DaysOfWeek restricts the rule to specific days + of the week + items: + description: DayOfWeek represents a day of the week. + enum: + - Monday + - Tuesday + - Wednesday + - Thursday + - Friday + - Saturday + - Sunday + type: string + type: array + name: + description: Name is an optional identifier for this rule + type: string + timeRange: + description: TimeRange restricts the rule to specific times + of day + properties: + end: + description: End time in HH:MM format (24-hour) + pattern: ^([01]\d|2[0-3]):[0-5]\d$ + type: string + start: + description: Start time in HH:MM format (24-hour) + pattern: ^([01]\d|2[0-3]):[0-5]\d$ + type: string + required: + - end + - start + type: object + type: object + minItems: 1 + type: array + timezone: + default: UTC + description: |- + Timezone is the IANA timezone for the schedule (e.g., "America/New_York"). + Defaults to "UTC" if not specified. + type: string + required: + - rolloutSelector + - rules + type: object + status: + description: ClusterRolloutScheduleStatus defines the observed state of + ClusterRolloutSchedule. + properties: + active: + description: Active indicates if the schedule is currently active + (any rule matches). + type: boolean + activeRules: + description: ActiveRules is a list of rule names that are currently + active. + items: + type: string + type: array + conditions: + description: Conditions represents the current state of the schedule. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + managedGates: + description: |- + ManagedGates is a list of RolloutGate names being managed by this schedule. + Format: "namespace/name" + items: + type: string + type: array + matchingRollouts: + description: MatchingRollouts is the count of rollouts currently matched + by the selectors. + type: integer + nextTransition: + description: NextTransition is the timestamp when the active state + will next change. + format: date-time + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/kuberik.com_rolloutschedules.yaml b/config/crd/bases/kuberik.com_rolloutschedules.yaml new file mode 100644 index 0000000..83c7deb --- /dev/null +++ b/config/crd/bases/kuberik.com_rolloutschedules.yaml @@ -0,0 +1,276 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.2 + name: rolloutschedules.kuberik.com +spec: + group: kuberik.com + names: + kind: RolloutSchedule + listKind: RolloutScheduleList + plural: rolloutschedules + singular: rolloutschedule + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.action + name: Action + type: string + - jsonPath: .status.active + name: Active + type: boolean + - jsonPath: .status.matchingRollouts + name: Matching + type: integer + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: RolloutSchedule is the Schema for the rolloutschedules API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: RolloutScheduleSpec defines the desired state of RolloutSchedule. + properties: + action: + default: Deny + description: |- + Action defines what to do when the schedule is active. + - "Allow": Gate passes when active, blocks when inactive + - "Deny": Gate blocks when active, passes when inactive + enum: + - Allow + - Deny + type: string + rolloutSelector: + description: RolloutSelector is a label selector to match Rollouts + in the same namespace. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + rules: + description: |- + Rules is a list of schedule rules. + The schedule is active if ANY rule matches the current time/date. + items: + description: |- + ScheduleRule defines a time-based rule. + The schedule is active if the current time/date matches this rule. + properties: + dateRange: + description: DateRange restricts the rule to specific date range + properties: + end: + description: End date in YYYY-MM-DD format + pattern: ^\d{4}-\d{2}-\d{2}$ + type: string + start: + description: Start date in YYYY-MM-DD format + pattern: ^\d{4}-\d{2}-\d{2}$ + type: string + required: + - end + - start + type: object + daysOfWeek: + description: DaysOfWeek restricts the rule to specific days + of the week + items: + description: DayOfWeek represents a day of the week. + enum: + - Monday + - Tuesday + - Wednesday + - Thursday + - Friday + - Saturday + - Sunday + type: string + type: array + name: + description: Name is an optional identifier for this rule + type: string + timeRange: + description: TimeRange restricts the rule to specific times + of day + properties: + end: + description: End time in HH:MM format (24-hour) + pattern: ^([01]\d|2[0-3]):[0-5]\d$ + type: string + start: + description: Start time in HH:MM format (24-hour) + pattern: ^([01]\d|2[0-3]):[0-5]\d$ + type: string + required: + - end + - start + type: object + type: object + minItems: 1 + type: array + timezone: + default: UTC + description: |- + Timezone is the IANA timezone for the schedule (e.g., "America/New_York"). + Defaults to "UTC" if not specified. + type: string + required: + - rolloutSelector + - rules + type: object + status: + description: RolloutScheduleStatus defines the observed state of RolloutSchedule. + properties: + active: + description: Active indicates if the schedule is currently active + (any rule matches). + type: boolean + activeRules: + description: ActiveRules is a list of rule names that are currently + active. + items: + type: string + type: array + conditions: + description: Conditions represents the current state of the schedule. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + managedGates: + description: ManagedGates is a list of RolloutGate names being managed + by this schedule. + items: + type: string + type: array + matchingRollouts: + description: MatchingRollouts is the count of rollouts currently matched + by the selector. + type: integer + nextTransition: + description: NextTransition is the timestamp when the active state + will next change. + format: date-time + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index e44859a..b611cc2 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -8,6 +8,7 @@ rules: - "" resources: - '*' + - namespaces - secrets verbs: - get @@ -40,9 +41,12 @@ rules: - apiGroups: - kuberik.com resources: + - clusterrolloutschedules - healthchecks - kustomizationhealths + - rolloutgates - rollouts + - rolloutschedules verbs: - create - delete @@ -54,29 +58,25 @@ rules: - apiGroups: - kuberik.com resources: + - clusterrolloutschedules/finalizers - healthchecks/finalizers - kustomizationhealths/finalizers - rollouts/finalizers + - rolloutschedules/finalizers verbs: - update - apiGroups: - kuberik.com resources: + - clusterrolloutschedules/status - healthchecks/status - kustomizationhealths/status - rollouts/status + - rolloutschedules/status verbs: - get - patch - update -- apiGroups: - - kuberik.com - resources: - - rolloutgates - verbs: - - get - - list - - watch - apiGroups: - kustomize.toolkit.fluxcd.io resources: diff --git a/config/samples/v1alpha1_clusterrolloutschedule.yaml b/config/samples/v1alpha1_clusterrolloutschedule.yaml new file mode 100644 index 0000000..2b10bab --- /dev/null +++ b/config/samples/v1alpha1_clusterrolloutschedule.yaml @@ -0,0 +1,46 @@ +apiVersion: kuberik.com/v1alpha1 +kind: ClusterRolloutSchedule +metadata: + name: production-peak-hours-deny + labels: + app.kubernetes.io/name: rollout-controller + app.kubernetes.io/managed-by: kustomize +spec: + # Match rollouts with tier=frontend across all namespaces + rolloutSelector: + matchLabels: + tier: frontend + + # Only apply to production namespaces + namespaceSelector: + matchLabels: + environment: production + + # Multiple rules - schedule is active if ANY rule matches + rules: + # Block during weekday business hours + - name: "weekday-peak-hours" + timeRange: + start: "09:00" + end: "17:00" + daysOfWeek: + - Monday + - Tuesday + - Wednesday + - Thursday + - Friday + + # Also block Saturday mornings + - name: "saturday-morning" + timeRange: + start: "09:00" + end: "12:00" + daysOfWeek: + - Saturday + + # Use Eastern Time + timezone: "America/New_York" + + # Action: Deny - block rollouts during peak hours + # (allows rollouts outside these times) + action: Deny diff --git a/config/samples/v1alpha1_rolloutschedule.yaml b/config/samples/v1alpha1_rolloutschedule.yaml new file mode 100644 index 0000000..874ea3b --- /dev/null +++ b/config/samples/v1alpha1_rolloutschedule.yaml @@ -0,0 +1,33 @@ +apiVersion: kuberik.com/v1alpha1 +kind: RolloutSchedule +metadata: + name: business-hours-allow + namespace: default + labels: + app.kubernetes.io/name: rollout-controller + app.kubernetes.io/managed-by: kustomize +spec: + # Match rollouts with the label "schedule=business-hours" + rolloutSelector: + matchLabels: + schedule: business-hours + + # List of schedule rules - schedule is active if ANY rule matches + rules: + - name: "weekday-business-hours" + timeRange: + start: "09:00" + end: "17:00" + daysOfWeek: + - Monday + - Tuesday + - Wednesday + - Thursday + - Friday + + # Use Eastern Time + timezone: "America/New_York" + + # Action: Allow - only allow rollouts when schedule is active + # (blocks rollouts outside business hours) + action: Allow diff --git a/go.mod b/go.mod index 36c465e..27c45e3 100644 --- a/go.mod +++ b/go.mod @@ -10,6 +10,7 @@ require ( github.com/google/go-containerregistry v0.20.6 github.com/onsi/ginkgo/v2 v2.22.0 github.com/onsi/gomega v1.36.1 + github.com/stretchr/testify v1.10.0 k8s.io/apiextensions-apiserver v0.33.0 k8s.io/apimachinery v0.33.0 k8s.io/client-go v0.33.0 @@ -39,6 +40,7 @@ require ( github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/vbatts/tar-split v0.12.1 // indirect github.com/xlab/treeprint v1.2.0 // indirect diff --git a/internal/controller/clusterrolloutschedule_controller.go b/internal/controller/clusterrolloutschedule_controller.go new file mode 100644 index 0000000..d867b45 --- /dev/null +++ b/internal/controller/clusterrolloutschedule_controller.go @@ -0,0 +1,285 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "fmt" + "strings" + "time" + + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + rolloutv1alpha1 "github.com/kuberik/rollout-controller/api/v1alpha1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +// ClusterRolloutScheduleReconciler reconciles a ClusterRolloutSchedule object +type ClusterRolloutScheduleReconciler struct { + client.Client + Scheme *runtime.Scheme + Recorder record.EventRecorder + Clock Clock +} + +//+kubebuilder:rbac:groups=kuberik.com,resources=clusterrolloutschedules,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=kuberik.com,resources=clusterrolloutschedules/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=kuberik.com,resources=clusterrolloutschedules/finalizers,verbs=update +//+kubebuilder:rbac:groups=kuberik.com,resources=rolloutgates,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=kuberik.com,resources=rollouts,verbs=get;list;watch +//+kubebuilder:rbac:groups="",resources=namespaces,verbs=get;list;watch + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +func (r *ClusterRolloutScheduleReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx) + + schedule := &rolloutv1alpha1.ClusterRolloutSchedule{} + if err := r.Get(ctx, req.NamespacedName, schedule); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + // 1. Evaluate Schedule + now := r.Clock.Now() + active, activeRules, nextTransition, err := evaluateScheduleRules(now, schedule.Spec.Rules, schedule.Spec.Timezone) + if err != nil { + logger.Error(err, "Failed to evaluate schedule rules") + return ctrl.Result{}, nil + } + + // 2. Find matching Rollouts (Cross Namespace) + // First list namespaces + namespaceList := &corev1.NamespaceList{} + nsSelector, err := metav1.LabelSelectorAsSelector(schedule.Spec.NamespaceSelector) + if err != nil { + logger.Error(err, "Invalid namespace selector") + return ctrl.Result{}, nil + } + + if err := r.List(ctx, namespaceList, client.MatchingLabelsSelector{Selector: nsSelector}); err != nil { + return ctrl.Result{}, err + } + + rolloutSelector, err := metav1.LabelSelectorAsSelector(schedule.Spec.RolloutSelector) + if err != nil { + logger.Error(err, "Invalid rollout selector") + return ctrl.Result{}, nil + } + + var allMatchingRollouts []rolloutv1alpha1.Rollout + for _, ns := range namespaceList.Items { + ros := &rolloutv1alpha1.RolloutList{} + if err := r.List(ctx, ros, client.InNamespace(ns.Name), client.MatchingLabelsSelector{Selector: rolloutSelector}); err != nil { + logger.Error(err, "Failed to list rollouts in namespace", "namespace", ns.Name) + continue + } + allMatchingRollouts = append(allMatchingRollouts, ros.Items...) + } + + // 3. Manage Gates + passing := calculateGateStatus(active, schedule.Spec.Action) + managedGates := []string{} // stored as "namespace/name" + + ownerRef, err := makeOwnerReference(schedule, r.Scheme) + if err != nil { + return ctrl.Result{}, err + } + + // Set of current gates to check against previous for cleanup + currentGatesSet := make(map[string]bool) + + for _, rollout := range allMatchingRollouts { + gateName := fmt.Sprintf("%s-%s", schedule.Name, rollout.Name) + if err := syncRolloutGate(ctx, r.Client, &rollout, gateName, passing, ownerRef); err != nil { + logger.Error(err, "Failed to sync gate", "rollout", rollout.Name, "namespace", rollout.Namespace) + } else { + key := fmt.Sprintf("%s/%s", rollout.Namespace, gateName) + managedGates = append(managedGates, key) + currentGatesSet[key] = true + } + } + + // 4. Cleanup Orphans + // Check previously managed gates that are no longer in current set + for _, oldKey := range schedule.Status.ManagedGates { + if !currentGatesSet[oldKey] { + // Orphaned - parse and delete + parts := strings.Split(oldKey, "/") + if len(parts) != 2 { + continue + } + ns, name := parts[0], parts[1] + + gate := &rolloutv1alpha1.RolloutGate{} + if err := r.Get(ctx, types.NamespacedName{Namespace: ns, Name: name}, gate); err == nil { + if err := r.Delete(ctx, gate); client.IgnoreNotFound(err) != nil { + logger.Error(err, "Failed to delete orphaned gate", "key", oldKey) + } + } + } + } + + // 5. Update Status + schedule.Status.Active = active + schedule.Status.ActiveRules = activeRules + if !nextTransition.IsZero() { + t := metav1.NewTime(nextTransition) + schedule.Status.NextTransition = &t + } else { + schedule.Status.NextTransition = nil + } + schedule.Status.ManagedGates = managedGates + schedule.Status.MatchingRollouts = len(allMatchingRollouts) + + if err := r.Status().Update(ctx, schedule); err != nil { + return ctrl.Result{}, err + } + + // 6. Requeue at next transition + if !nextTransition.IsZero() { + sleepDuration := nextTransition.Sub(now) + if sleepDuration < 0 { + sleepDuration = time.Second + } + sleepDuration += 100 * time.Millisecond + return ctrl.Result{RequeueAfter: sleepDuration}, nil + } + + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *ClusterRolloutScheduleReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&rolloutv1alpha1.ClusterRolloutSchedule{}). + Owns(&rolloutv1alpha1.RolloutGate{}). + Watches( + &rolloutv1alpha1.Rollout{}, + handler.EnqueueRequestsFromMapFunc(r.findSchedulesForRollout), + ). + Watches( + &corev1.Namespace{}, + handler.EnqueueRequestsFromMapFunc(r.findSchedulesForNamespace), + ). + Complete(r) +} + +func (r *ClusterRolloutScheduleReconciler) findSchedulesForRollout(ctx context.Context, obj client.Object) []reconcile.Request { + rollout, ok := obj.(*rolloutv1alpha1.Rollout) + if !ok { + return nil + } + + // Need to check all Cluster Schedules + scheduleList := &rolloutv1alpha1.ClusterRolloutScheduleList{} + if err := r.List(ctx, scheduleList); err != nil { + return nil + } + + var requests []reconcile.Request + + // Pre-fetch namespace to check labels? Or assume listing is cheap? + // We need rollout's namespace labels to check NamespaceSelector + ns := &corev1.Namespace{} + if err := r.Get(ctx, types.NamespacedName{Name: rollout.Namespace}, ns); err != nil { + // Log? + return nil + } + + for _, schedule := range scheduleList.Items { + match := false + + // 1. Check Namespace Selector + nsSelector, err := metav1.LabelSelectorAsSelector(schedule.Spec.NamespaceSelector) + if err == nil && nsSelector.Matches(labels.Set(ns.Labels)) { + // 2. Check Rollout Selector + rolloutSelector, err := metav1.LabelSelectorAsSelector(schedule.Spec.RolloutSelector) + if err == nil && rolloutSelector.Matches(labels.Set(rollout.Labels)) { + match = true + } + } + + // Also check if previously managed + if !match { + expectedKey := fmt.Sprintf("%s/%s-%s", rollout.Namespace, schedule.Name, rollout.Name) + for _, managedKey := range schedule.Status.ManagedGates { + if managedKey == expectedKey { + match = true + break + } + } + } + + if match { + requests = append(requests, reconcile.Request{ + NamespacedName: client.ObjectKey{ + Name: schedule.Name, + // Cluster scoped, no namespace + }, + }) + } + } + return requests +} + +func (r *ClusterRolloutScheduleReconciler) findSchedulesForNamespace(ctx context.Context, obj client.Object) []reconcile.Request { + ns, ok := obj.(*corev1.Namespace) + if !ok { + return nil + } + + scheduleList := &rolloutv1alpha1.ClusterRolloutScheduleList{} + if err := r.List(ctx, scheduleList); err != nil { + return nil + } + + var requests []reconcile.Request + for _, schedule := range scheduleList.Items { + + // Let's check if it matches NOW + nsSelector, err := metav1.LabelSelectorAsSelector(schedule.Spec.NamespaceSelector) + if err == nil && nsSelector.Matches(labels.Set(ns.Labels)) { + requests = append(requests, reconcile.Request{NamespacedName: client.ObjectKey{Name: schedule.Name}}) + continue + } + + // If it DOESN'T match now, we should check if it manages any gates in this namespace. + // This handles the "cleanup" case. + hasGatesInNs := false + prefix := ns.Name + "/" + for _, managedKey := range schedule.Status.ManagedGates { + if strings.HasPrefix(managedKey, prefix) { + hasGatesInNs = true + break + } + } + + if hasGatesInNs { + requests = append(requests, reconcile.Request{NamespacedName: client.ObjectKey{Name: schedule.Name}}) + } + } + return requests +} diff --git a/internal/controller/rolloutschedule_controller.go b/internal/controller/rolloutschedule_controller.go new file mode 100644 index 0000000..17be0ed --- /dev/null +++ b/internal/controller/rolloutschedule_controller.go @@ -0,0 +1,195 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "fmt" + "time" + + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + rolloutv1alpha1 "github.com/kuberik/rollout-controller/api/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// RolloutScheduleReconciler reconciles a RolloutSchedule object +type RolloutScheduleReconciler struct { + client.Client + Scheme *runtime.Scheme + Recorder record.EventRecorder + Clock Clock +} + +//+kubebuilder:rbac:groups=kuberik.com,resources=rolloutschedules,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=kuberik.com,resources=rolloutschedules/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=kuberik.com,resources=rolloutschedules/finalizers,verbs=update +//+kubebuilder:rbac:groups=kuberik.com,resources=rolloutgates,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=kuberik.com,resources=rollouts,verbs=get;list;watch + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +func (r *RolloutScheduleReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx) + + schedule := &rolloutv1alpha1.RolloutSchedule{} + if err := r.Get(ctx, req.NamespacedName, schedule); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + // 1. Evaluate Schedule + now := r.Clock.Now() + active, activeRules, nextTransition, err := evaluateScheduleRules(now, schedule.Spec.Rules, schedule.Spec.Timezone) + if err != nil { + logger.Error(err, "Failed to evaluate schedule rules") + // Don't requeue immediately on config error + return ctrl.Result{}, nil + } + + // 2. Find matching Rollouts + rolloutList := &rolloutv1alpha1.RolloutList{} + selector, err := metav1.LabelSelectorAsSelector(schedule.Spec.RolloutSelector) + if err != nil { + logger.Error(err, "Invalid rollout selector") + return ctrl.Result{}, nil + } + + if err := r.List(ctx, rolloutList, client.InNamespace(schedule.Namespace), client.MatchingLabelsSelector{Selector: selector}); err != nil { + return ctrl.Result{}, err + } + + // 3. Manage Gates + passing := calculateGateStatus(active, schedule.Spec.Action) + managedGates := []string{} + + ownerRef, err := makeOwnerReference(schedule, r.Scheme) + if err != nil { + return ctrl.Result{}, err + } + + for _, rollout := range rolloutList.Items { + gateName := fmt.Sprintf("%s-%s", schedule.Name, rollout.Name) + if err := syncRolloutGate(ctx, r.Client, &rollout, gateName, passing, ownerRef); err != nil { + logger.Error(err, "Failed to sync gate", "rollout", rollout.Name, "gate", gateName) + // Continue with other rollouts, but we'll return the error at end if needed? + // Best effort to sync others. + } else { + managedGates = append(managedGates, gateName) + } + } + + // 4. Cleanup Orphans + // Remove gates that are no longer needed (rollout no longer matches) + // We use the previous status.ManagedGates to know what we should check + if err := cleanupOrphanedGates(ctx, r.Client, schedule.Status.ManagedGates, managedGates, schedule.Namespace); err != nil { + logger.Error(err, "Failed to cleanup orphaned gates") + // Don't block status update + } + + // 5. Update Status + schedule.Status.Active = active + schedule.Status.ActiveRules = activeRules + if !nextTransition.IsZero() { + t := metav1.NewTime(nextTransition) + schedule.Status.NextTransition = &t + } else { + schedule.Status.NextTransition = nil + } + schedule.Status.ManagedGates = managedGates + schedule.Status.MatchingRollouts = len(rolloutList.Items) + + if err := r.Status().Update(ctx, schedule); err != nil { + return ctrl.Result{}, err + } + + // 6. Requeue at next transition + if !nextTransition.IsZero() { + sleepDuration := nextTransition.Sub(now) + if sleepDuration < 0 { + sleepDuration = time.Second // Should have happened slightly in past, retry soon + } + // Add a small buffer to ensure we are past the transition time + sleepDuration += 100 * time.Millisecond + return ctrl.Result{RequeueAfter: sleepDuration}, nil + } + + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *RolloutScheduleReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&rolloutv1alpha1.RolloutSchedule{}). + Owns(&rolloutv1alpha1.RolloutGate{}). + Watches( + &rolloutv1alpha1.Rollout{}, + handler.EnqueueRequestsFromMapFunc(r.findSchedulesForRollout), + ). + Complete(r) +} + +func (r *RolloutScheduleReconciler) findSchedulesForRollout(ctx context.Context, obj client.Object) []reconcile.Request { + rollout, ok := obj.(*rolloutv1alpha1.Rollout) + if !ok { + return nil + } + + // List all RolloutSchedules in the namespace + scheduleList := &rolloutv1alpha1.RolloutScheduleList{} + if err := r.List(ctx, scheduleList, client.InNamespace(rollout.Namespace)); err != nil { + return nil + } + + var requests []reconcile.Request + for _, schedule := range scheduleList.Items { + match := false + + // Check if matches selector + selector, err := metav1.LabelSelectorAsSelector(schedule.Spec.RolloutSelector) + if err == nil && selector.Matches(labels.Set(rollout.Labels)) { + match = true + } + + // Also check if previously managed (to handle cleanup if no longer matches) + if !match { + expectedGateName := fmt.Sprintf("%s-%s", schedule.Name, rollout.Name) + for _, managedGate := range schedule.Status.ManagedGates { + if managedGate == expectedGateName { + match = true + break + } + } + } + + if match { + requests = append(requests, reconcile.Request{ + NamespacedName: client.ObjectKey{ + Name: schedule.Name, + Namespace: schedule.Namespace, + }, + }) + } + } + return requests +} diff --git a/internal/controller/rolloutschedule_controller_test.go b/internal/controller/rolloutschedule_controller_test.go new file mode 100644 index 0000000..15e9b20 --- /dev/null +++ b/internal/controller/rolloutschedule_controller_test.go @@ -0,0 +1,470 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "testing" + "time" + + rolloutv1alpha1 "github.com/kuberik/rollout-controller/api/v1alpha1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// MockClock implements Clock interface for testing +type MockClock struct { + CurrentTime time.Time +} + +func (m *MockClock) Now() time.Time { + return m.CurrentTime +} + +func TestEvaluateScheduleRules(t *testing.T) { + // Use UTC for simplicity in tests + locUTC, _ := time.LoadLocation("UTC") + + tests := []struct { + name string + now time.Time + rules []rolloutv1alpha1.ScheduleRule + timezone string + expectedActive bool + expectedRules []string + }{ + { + name: "Time range: inside window", + now: time.Date(2025, 1, 1, 10, 0, 0, 0, locUTC), // 10:00 + rules: []rolloutv1alpha1.ScheduleRule{ + { + Name: "business-hours", + TimeRange: &rolloutv1alpha1.TimeRange{ + Start: "09:00", + End: "17:00", + }, + }, + }, + timezone: "UTC", + expectedActive: true, + expectedRules: []string{"business-hours"}, + }, + { + name: "Time range: outside window (before)", + now: time.Date(2025, 1, 1, 8, 0, 0, 0, locUTC), // 8:00 + rules: []rolloutv1alpha1.ScheduleRule{ + { + Name: "business-hours", + TimeRange: &rolloutv1alpha1.TimeRange{ + Start: "09:00", + End: "17:00", + }, + }, + }, + timezone: "UTC", + expectedActive: false, + expectedRules: nil, + }, + { + name: "Time range: cross midnight (inside)", + now: time.Date(2025, 1, 1, 23, 0, 0, 0, locUTC), // 23:00 + rules: []rolloutv1alpha1.ScheduleRule{ + { + Name: "night-shift", + TimeRange: &rolloutv1alpha1.TimeRange{ + Start: "22:00", + End: "06:00", + }, + }, + }, + timezone: "UTC", + expectedActive: true, + expectedRules: []string{"night-shift"}, + }, + { + name: "Time range: cross midnight (outside)", + now: time.Date(2025, 1, 1, 12, 0, 0, 0, locUTC), // 12:00 + rules: []rolloutv1alpha1.ScheduleRule{ + { + Name: "night-shift", + TimeRange: &rolloutv1alpha1.TimeRange{ + Start: "22:00", + End: "06:00", + }, + }, + }, + timezone: "UTC", + expectedActive: false, + expectedRules: nil, + }, + { + name: "Days of week: match", + now: time.Date(2025, 1, 1, 12, 0, 0, 0, locUTC), // Wed Jan 1 2025 + rules: []rolloutv1alpha1.ScheduleRule{ + { + Name: "wed-only", + DaysOfWeek: []rolloutv1alpha1.DayOfWeek{ + rolloutv1alpha1.Wednesday, + }, + }, + }, + timezone: "UTC", + expectedActive: true, + expectedRules: []string{"wed-only"}, + }, + { + name: "Days of week: mismatch", + now: time.Date(2025, 1, 2, 12, 0, 0, 0, locUTC), // Thu Jan 2 2025 + rules: []rolloutv1alpha1.ScheduleRule{ + { + Name: "wed-only", + DaysOfWeek: []rolloutv1alpha1.DayOfWeek{ + rolloutv1alpha1.Wednesday, + }, + }, + }, + timezone: "UTC", + expectedActive: false, + expectedRules: nil, + }, + { + name: "Date range: match", + now: time.Date(2025, 12, 25, 12, 0, 0, 0, locUTC), + rules: []rolloutv1alpha1.ScheduleRule{ + { + Name: "christmas", + DateRange: &rolloutv1alpha1.DateRange{ + Start: "2025-12-24", + End: "2025-12-26", + }, + }, + }, + timezone: "UTC", + expectedActive: true, + expectedRules: []string{"christmas"}, + }, + { + name: "Date range: mismatch", + now: time.Date(2025, 12, 27, 12, 0, 0, 0, locUTC), + rules: []rolloutv1alpha1.ScheduleRule{ + { + Name: "christmas", + DateRange: &rolloutv1alpha1.DateRange{ + Start: "2025-12-24", + End: "2025-12-26", + }, + }, + }, + timezone: "UTC", + expectedActive: false, + expectedRules: nil, + }, + { + name: "Multiple rules (OR logic)", + now: time.Date(2025, 1, 1, 10, 0, 0, 0, locUTC), + rules: []rolloutv1alpha1.ScheduleRule{ + { + Name: "morning", + TimeRange: &rolloutv1alpha1.TimeRange{ + Start: "09:00", + End: "11:00", + }, + }, + { + Name: "afternoon", + TimeRange: &rolloutv1alpha1.TimeRange{ + Start: "14:00", + End: "16:00", + }, + }, + }, + timezone: "UTC", + expectedActive: true, + expectedRules: []string{"morning"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + active, activeRules, _, err := evaluateScheduleRules(tt.now, tt.rules, tt.timezone) + require.NoError(t, err) + assert.Equal(t, tt.expectedActive, active) + if tt.expectedRules != nil { + assert.Equal(t, tt.expectedRules, activeRules) + } + }) + } +} + +func TestRolloutScheduleReconciler(t *testing.T) { + scheme := runtime.NewScheme() + require.NoError(t, rolloutv1alpha1.AddToScheme(scheme)) + require.NoError(t, corev1.AddToScheme(scheme)) + + // Setup basic objects + rollout := &rolloutv1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-rollout", + Namespace: "default", + Labels: map[string]string{ + "app": "my-app", + }, + }, + } + + schedule := &rolloutv1alpha1.RolloutSchedule{ + TypeMeta: metav1.TypeMeta{ + APIVersion: rolloutv1alpha1.GroupVersion.String(), + Kind: "RolloutSchedule", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "business-hours", + Namespace: "default", + }, + Spec: rolloutv1alpha1.RolloutScheduleSpec{ + RolloutSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "my-app"}, + }, + Rules: []rolloutv1alpha1.ScheduleRule{ + { + Name: "workday", + TimeRange: &rolloutv1alpha1.TimeRange{ + Start: "09:00", + End: "17:00", + }, + }, + }, + Action: rolloutv1alpha1.RolloutScheduleActionAllow, + }, + } + + // Mock clock at 10:00 (inside window) + mockClock := &MockClock{ + CurrentTime: time.Date(2025, 1, 1, 10, 0, 0, 0, time.UTC), + } + + client := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(rollout, schedule). + WithStatusSubresource(schedule). // Add status subresource support + Build() + + // Verify object exists + checkSchedule := &rolloutv1alpha1.RolloutSchedule{} + err := client.Get(context.Background(), types.NamespacedName{Name: schedule.Name, Namespace: schedule.Namespace}, checkSchedule) + require.NoError(t, err, "Failed to find schedule in fake client during setup") + + r := &RolloutScheduleReconciler{ + Client: client, + Scheme: scheme, + Recorder: record.NewFakeRecorder(10), + Clock: mockClock, + } + + req := reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: schedule.Name, + Namespace: schedule.Namespace, + }, + } + + // 1. First Reconciliation - Should create gate passing=true (Allow action + Inside window) + _, err = r.Reconcile(context.Background(), req) + require.NoError(t, err) + + // Verify gate created + gate := &rolloutv1alpha1.RolloutGate{} + gateName := "business-hours-my-rollout" // schedule-rollout + err = client.Get(context.Background(), types.NamespacedName{Name: gateName, Namespace: "default"}, gate) + require.NoError(t, err) + require.NotNil(t, gate.Spec.Passing) + assert.True(t, *gate.Spec.Passing, "Gate should be passing (Allow + Inside window)") + assert.Equal(t, rollout.Name, gate.Spec.RolloutRef.Name) + + // Verify status updated + err = client.Get(context.Background(), types.NamespacedName{Name: schedule.Name, Namespace: schedule.Namespace}, schedule) + require.NoError(t, err) + assert.True(t, schedule.Status.Active) + assert.Contains(t, schedule.Status.ManagedGates, gateName) + + // 2. Advance time to 20:00 (outside window) + mockClock.CurrentTime = time.Date(2025, 1, 1, 20, 0, 0, 0, time.UTC) + + _, err = r.Reconcile(context.Background(), req) + require.NoError(t, err) + + // Verify gate updated to passing=false + err = client.Get(context.Background(), types.NamespacedName{Name: gateName, Namespace: "default"}, gate) + require.NoError(t, err) + require.NotNil(t, gate.Spec.Passing) + assert.False(t, *gate.Spec.Passing, "Gate should NOT be passing (Allow + Outside window)") + + // 3. Change Action to Deny + err = client.Get(context.Background(), types.NamespacedName{Name: schedule.Name, Namespace: schedule.Namespace}, schedule) + require.NoError(t, err) + schedule.Spec.Action = rolloutv1alpha1.RolloutScheduleActionDeny + err = client.Update(context.Background(), schedule) + require.NoError(t, err) + + // Reconcile (still outside window at 20:00) + _, err = r.Reconcile(context.Background(), req) + require.NoError(t, err) + + // Outside window + Deny action = Passing (Deny only blocks active periods) + err = client.Get(context.Background(), types.NamespacedName{Name: gateName, Namespace: "default"}, gate) + require.NoError(t, err) + assert.True(t, *gate.Spec.Passing, "Gate should be passing (Deny + Outside window)") + + // 4. Advance time back to 10:00 (inside window) + mockClock.CurrentTime = time.Date(2025, 1, 2, 10, 0, 0, 0, time.UTC) + + _, err = r.Reconcile(context.Background(), req) + require.NoError(t, err) + + // Inside window + Deny action = Not Passing + err = client.Get(context.Background(), types.NamespacedName{Name: gateName, Namespace: "default"}, gate) + require.NoError(t, err) + assert.False(t, *gate.Spec.Passing, "Gate should NOT be passing (Deny + Inside window)") +} + +func TestClusterRolloutScheduleReconciler(t *testing.T) { + scheme := runtime.NewScheme() + require.NoError(t, rolloutv1alpha1.AddToScheme(scheme)) + require.NoError(t, corev1.AddToScheme(scheme)) + + // Objects: Rollout in "prod", Rollout in "dev" + prodRollout := &rolloutv1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{ + Name: "app-prod", + Namespace: "prod", + Labels: map[string]string{"app": "foo"}, + }, + } + devRollout := &rolloutv1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{ + Name: "app-dev", + Namespace: "dev", + Labels: map[string]string{"app": "foo"}, + }, + } + + prodNs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "prod", Labels: map[string]string{"env": "prod"}}} + devNs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "dev", Labels: map[string]string{"env": "dev"}}} + + schedule := &rolloutv1alpha1.ClusterRolloutSchedule{ + TypeMeta: metav1.TypeMeta{ + APIVersion: rolloutv1alpha1.GroupVersion.String(), + Kind: "ClusterRolloutSchedule", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "prod-freeze", + }, + Spec: rolloutv1alpha1.ClusterRolloutScheduleSpec{ + // Start with namespace selector + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"env": "prod"}, + }, + RolloutSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "foo"}, + }, + Rules: []rolloutv1alpha1.ScheduleRule{ + { + Name: "freeze", + // Always active date range for test + DateRange: &rolloutv1alpha1.DateRange{ + Start: "2025-01-01", + End: "2025-01-02", + }, + }, + }, + Action: rolloutv1alpha1.RolloutScheduleActionDeny, + }, + } + + mockClock := &MockClock{ + CurrentTime: time.Date(2025, 1, 1, 12, 0, 0, 0, time.UTC), + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(prodRollout, devRollout, prodNs, devNs, schedule). + WithStatusSubresource(schedule). + Build() + + // Verify object exists + checkSchedule := &rolloutv1alpha1.ClusterRolloutSchedule{} + err := fakeClient.Get(context.Background(), types.NamespacedName{Name: schedule.Name}, checkSchedule) + require.NoError(t, err, "Failed to find cluster schedule in fake client during setup") + + r := &ClusterRolloutScheduleReconciler{ + Client: fakeClient, + Scheme: scheme, + Recorder: record.NewFakeRecorder(10), + Clock: mockClock, + } + + req := reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: schedule.Name, + }, + } + + // 1. Reconcile - Should affect only prod rollout + _, err = r.Reconcile(context.Background(), req) + require.NoError(t, err) + + // Check prod gate + prodGate := &rolloutv1alpha1.RolloutGate{} + prodGateName := "prod-freeze-app-prod" + err = fakeClient.Get(context.Background(), types.NamespacedName{Name: prodGateName, Namespace: "prod"}, prodGate) + require.NoError(t, err) + assert.False(t, *prodGate.Spec.Passing, "Prod gate should block") + + // Check dev gate (should not exist) + devGate := &rolloutv1alpha1.RolloutGate{} + devGateName := "prod-freeze-app-dev" + err = fakeClient.Get(context.Background(), types.NamespacedName{Name: devGateName, Namespace: "dev"}, devGate) + assert.Error(t, err) + assert.True(t, client.IgnoreNotFound(err) == nil) + + // 2. Remove Namespace Selector (matches all) + err = fakeClient.Get(context.Background(), types.NamespacedName{Name: schedule.Name}, schedule) + require.NoError(t, err) + schedule.Spec.NamespaceSelector = &metav1.LabelSelector{} // Empty matches all? + // Make it nil or empty + // Actually metav1.LabelSelector{} (empty) matches everything in LabelSelectorAsSelector? + // Wait, empty label selector matches everything. + + err = fakeClient.Update(context.Background(), schedule) + require.NoError(t, err) + + _, err = r.Reconcile(context.Background(), req) + require.NoError(t, err) + + // Now dev gate should exist and block + err = fakeClient.Get(context.Background(), types.NamespacedName{Name: devGateName, Namespace: "dev"}, devGate) + require.NoError(t, err) + assert.False(t, *devGate.Spec.Passing, "Dev gate should block now") +} diff --git a/internal/controller/rolloutschedule_helpers.go b/internal/controller/rolloutschedule_helpers.go new file mode 100644 index 0000000..c69b07f --- /dev/null +++ b/internal/controller/rolloutschedule_helpers.go @@ -0,0 +1,426 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "fmt" + "strconv" + "strings" + "time" + + rolloutv1alpha1 "github.com/kuberik/rollout-controller/api/v1alpha1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// evaluateScheduleRules determines if any rule in the schedule is currently active. +// Returns whether we're active, which rules are active, and when the next transition will occur. +func evaluateScheduleRules(now time.Time, rules []rolloutv1alpha1.ScheduleRule, timezone string) (bool, []string, time.Time, error) { + // Load the timezone + loc, err := time.LoadLocation(timezone) + if err != nil { + return false, nil, time.Time{}, fmt.Errorf("invalid timezone %q: %w", timezone, err) + } + + nowInTZ := now.In(loc) + var activeRules []string + var nextTransition time.Time + + // Evaluate each rule + for _, rule := range rules { + active, ruleNextTransition, err := evaluateRule(nowInTZ, rule, loc) + if err != nil { + return false, nil, time.Time{}, fmt.Errorf("failed to evaluate rule %q: %w", rule.Name, err) + } + + if active { + activeRules = append(activeRules, rule.Name) + } + + // Track the earliest next transition + if !ruleNextTransition.IsZero() { + if nextTransition.IsZero() || ruleNextTransition.Before(nextTransition) { + nextTransition = ruleNextTransition + } + } + } + + // Schedule is active if ANY rule is active + isActive := len(activeRules) > 0 + + return isActive, activeRules, nextTransition, nil +} + +// evaluateRule evaluates a single schedule rule. +func evaluateRule(now time.Time, rule rolloutv1alpha1.ScheduleRule, loc *time.Location) (bool, time.Time, error) { + // Check date range first (if specified) + if rule.DateRange != nil { + inDateRange, err := isInDateRange(now, rule.DateRange, loc) + if err != nil { + return false, time.Time{}, err + } + if !inDateRange { + // Not in date range, rule doesn't match + // Next transition is at the start of the date range (or end if we're past it) + nextTransition := calculateDateRangeTransition(now, rule.DateRange, loc) + return false, nextTransition, nil + } + } + + // Check day of week (if specified) + if len(rule.DaysOfWeek) > 0 { + currentDay := now.Weekday() + dayMatches := false + for _, allowedDay := range rule.DaysOfWeek { + if dayOfWeekMatches(currentDay, allowedDay) { + dayMatches = true + break + } + } + if !dayMatches { + // Wrong day, rule doesn't match + nextTransition := findNextMatchingDay(now, rule.DaysOfWeek, rule.TimeRange, loc) + return false, nextTransition, nil + } + } + + // Check time range (if specified) + if rule.TimeRange != nil { + return isInTimeRange(now, rule.TimeRange, rule.DaysOfWeek, loc) + } + + // No time range specified but date/day matched - rule is active all day + // Next transition is midnight tomorrow (or next day change) + midnight := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, loc) + nextTransition := midnight.AddDate(0, 0, 1) + return true, nextTransition, nil +} + +// isInDateRange checks if the current date is within the specified date range. +func isInDateRange(now time.Time, dateRange *rolloutv1alpha1.DateRange, loc *time.Location) (bool, error) { + startDate, err := time.ParseInLocation("2006-01-02", dateRange.Start, loc) + if err != nil { + return false, fmt.Errorf("invalid start date %q: %w", dateRange.Start, err) + } + + endDate, err := time.ParseInLocation("2006-01-02", dateRange.End, loc) + if err != nil { + return false, fmt.Errorf("invalid end date %q: %w", dateRange.End, err) + } + + // Normalize to midnight for date comparison + currentDate := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, loc) + startDate = time.Date(startDate.Year(), startDate.Month(), startDate.Day(), 0, 0, 0, 0, loc) + endDate = time.Date(endDate.Year(), endDate.Month(), endDate.Day(), 0, 0, 0, 0, loc) + + return (currentDate.Equal(startDate) || currentDate.After(startDate)) && + (currentDate.Equal(endDate) || currentDate.Before(endDate)), nil +} + +// calculateDateRangeTransition calculates when the next transition will occur for a date range. +func calculateDateRangeTransition(now time.Time, dateRange *rolloutv1alpha1.DateRange, loc *time.Location) time.Time { + startDate, _ := time.ParseInLocation("2006-01-02", dateRange.Start, loc) + endDate, _ := time.ParseInLocation("2006-01-02", dateRange.End, loc) + + currentDate := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, loc) + + if currentDate.Before(startDate) { + // Before the range, next transition is start date + return startDate + } + + // After or in the range, next transition is day after end date + return endDate.AddDate(0, 0, 1) +} + +// dayOfWeekMatches checks if a time.Weekday matches a DayOfWeek. +func dayOfWeekMatches(weekday time.Weekday, day rolloutv1alpha1.DayOfWeek) bool { + switch day { + case rolloutv1alpha1.Monday: + return weekday == time.Monday + case rolloutv1alpha1.Tuesday: + return weekday == time.Tuesday + case rolloutv1alpha1.Wednesday: + return weekday == time.Wednesday + case rolloutv1alpha1.Thursday: + return weekday == time.Thursday + case rolloutv1alpha1.Friday: + return weekday == time.Friday + case rolloutv1alpha1.Saturday: + return weekday == time.Saturday + case rolloutv1alpha1.Sunday: + return weekday == time.Sunday + default: + return false + } +} + +// isInTimeRange checks if the current time is within the time range. +func isInTimeRange(now time.Time, tr *rolloutv1alpha1.TimeRange, daysOfWeek []rolloutv1alpha1.DayOfWeek, loc *time.Location) (bool, time.Time, error) { + startOffset, err := parseTimeOfDay(tr.Start) + if err != nil { + return false, time.Time{}, fmt.Errorf("invalid start time %q: %w", tr.Start, err) + } + + endOffset, err := parseTimeOfDay(tr.End) + if err != nil { + return false, time.Time{}, fmt.Errorf("invalid end time %q: %w", tr.End, err) + } + + midnight := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, loc) + currentOffset := now.Sub(midnight) + + var inWindow bool + var nextTransition time.Time + + if startOffset < endOffset { + // Normal range (e.g., 09:00-17:00) + inWindow = currentOffset >= startOffset && currentOffset < endOffset + if inWindow { + // Next transition is at end time today + nextTransition = midnight.Add(endOffset) + } else if currentOffset < startOffset { + // Before the window starts today + nextTransition = midnight.Add(startOffset) + } else { + // After the window ends today, next is start time tomorrow (if days match) + nextTransition = findNextTimeRangeStart(now, startOffset, daysOfWeek, loc) + } + } else { + // Crosses midnight (e.g., 22:00-02:00) + inWindow = currentOffset >= startOffset || currentOffset < endOffset + if inWindow { + if currentOffset >= startOffset { + // After start time today, ends tomorrow + nextTransition = midnight.AddDate(0, 0, 1).Add(endOffset) + } else { + // Before end time today (started yesterday) + nextTransition = midnight.Add(endOffset) + } + } else { + // Not in window (between end and start) + nextTransition = midnight.Add(startOffset) + } + } + + return inWindow, nextTransition, nil +} + +// parseTimeOfDay parses a time string like "09:00" into a duration from midnight. +func parseTimeOfDay(timeStr string) (time.Duration, error) { + parts := strings.Split(timeStr, ":") + if len(parts) != 2 { + return 0, fmt.Errorf("invalid time format %q, expected HH:MM", timeStr) + } + + hours, err := strconv.Atoi(parts[0]) + if err != nil || hours < 0 || hours > 23 { + return 0, fmt.Errorf("invalid hours %q", parts[0]) + } + + minutes, err := strconv.Atoi(parts[1]) + if err != nil || minutes < 0 || minutes > 59 { + return 0, fmt.Errorf("invalid minutes %q", parts[1]) + } + + return time.Duration(hours)*time.Hour + time.Duration(minutes)*time.Minute, nil +} + +// findNextTimeRangeStart finds the next time the time range will start. +func findNextTimeRangeStart(now time.Time, startOffset time.Duration, daysOfWeek []rolloutv1alpha1.DayOfWeek, loc *time.Location) time.Time { + // If no day restrictions, next window starts tomorrow at start time + if len(daysOfWeek) == 0 { + midnight := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, loc) + return midnight.AddDate(0, 0, 1).Add(startOffset) + } + + // Find the next matching day + return findNextMatchingDay(now, daysOfWeek, &rolloutv1alpha1.TimeRange{Start: formatDuration(startOffset)}, loc) +} + +// findNextMatchingDay finds the next occurrence of a matching day of week. +func findNextMatchingDay(now time.Time, allowedDays []rolloutv1alpha1.DayOfWeek, tr *rolloutv1alpha1.TimeRange, loc *time.Location) time.Time { + // Start from tomorrow + checkDate := now.AddDate(0, 0, 1) + + // Check up to 7 days ahead + for i := 0; i < 7; i++ { + dayToCheck := checkDate.AddDate(0, 0, i) + for _, allowedDay := range allowedDays { + if dayOfWeekMatches(dayToCheck.Weekday(), allowedDay) { + midnight := time.Date(dayToCheck.Year(), dayToCheck.Month(), dayToCheck.Day(), 0, 0, 0, 0, loc) + if tr != nil && tr.Start != "" { + offset, _ := parseTimeOfDay(tr.Start) + return midnight.Add(offset) + } + return midnight + } + } + } + + // Shouldn't happen, but default to tomorrow + midnight := time.Date(checkDate.Year(), checkDate.Month(), checkDate.Day(), 0, 0, 0, 0, loc) + return midnight +} + +// formatDuration formats a duration as HH:MM. +func formatDuration(d time.Duration) string { + hours := int(d.Hours()) + minutes := int(d.Minutes()) % 60 + return fmt.Sprintf("%02d:%02d", hours, minutes) +} + +// calculateGateStatus determines if the gate should pass based on active state and action. +func calculateGateStatus(active bool, action rolloutv1alpha1.RolloutScheduleAction) bool { + switch action { + case rolloutv1alpha1.RolloutScheduleActionAllow: + // Allow when active, deny when inactive + return active + case rolloutv1alpha1.RolloutScheduleActionDeny: + // Deny when active, allow when inactive + return !active + default: + // Default to deny behavior + return !active + } +} + +// syncRolloutGate creates or updates a RolloutGate for a rollout. +func syncRolloutGate( + ctx context.Context, + c client.Client, + rollout *rolloutv1alpha1.Rollout, + gateName string, + passing bool, + ownerRef metav1.OwnerReference, +) error { + gate := &rolloutv1alpha1.RolloutGate{} + err := c.Get(ctx, types.NamespacedName{ + Namespace: rollout.Namespace, + Name: gateName, + }, gate) + + if errors.IsNotFound(err) { + // Create new gate + gate = &rolloutv1alpha1.RolloutGate{ + ObjectMeta: metav1.ObjectMeta{ + Name: gateName, + Namespace: rollout.Namespace, + OwnerReferences: []metav1.OwnerReference{ownerRef}, + }, + Spec: rolloutv1alpha1.RolloutGateSpec{ + RolloutRef: &corev1.LocalObjectReference{ + Name: rollout.Name, + }, + Passing: &passing, + }, + } + return c.Create(ctx, gate) + } + + if err != nil { + return fmt.Errorf("failed to get gate %s: %w", gateName, err) + } + + // Update existing gate if needed + needsUpdate := false + if gate.Spec.Passing == nil || *gate.Spec.Passing != passing { + gate.Spec.Passing = &passing + needsUpdate = true + } + + // Ensure owner reference is set + hasOwner := false + for _, ref := range gate.OwnerReferences { + if ref.UID == ownerRef.UID { + hasOwner = true + break + } + } + if !hasOwner { + gate.OwnerReferences = append(gate.OwnerReferences, ownerRef) + needsUpdate = true + } + + if needsUpdate { + return c.Update(ctx, gate) + } + + return nil +} + +// cleanupOrphanedGates removes gates that are no longer needed. +func cleanupOrphanedGates( + ctx context.Context, + c client.Client, + managedGates []string, + currentGates []string, + namespace string, +) error { + // Convert currentGates to a map for quick lookup + current := make(map[string]bool) + for _, name := range currentGates { + current[name] = true + } + + // Delete gates that are in managedGates but not in currentGates + for _, gateName := range managedGates { + if !current[gateName] { + gate := &rolloutv1alpha1.RolloutGate{} + err := c.Get(ctx, types.NamespacedName{ + Namespace: namespace, + Name: gateName, + }, gate) + + if err == nil { + // Gate exists, delete it + if err := c.Delete(ctx, gate); err != nil && !errors.IsNotFound(err) { + return fmt.Errorf("failed to delete orphaned gate %s: %w", gateName, err) + } + } else if !errors.IsNotFound(err) { + return fmt.Errorf("failed to get gate %s for cleanup: %w", gateName, err) + } + // If already not found, that's fine + } + } + + return nil +} + +// makeOwnerReference creates an owner reference for controller-owned objects. +func makeOwnerReference(obj client.Object, scheme *runtime.Scheme) (metav1.OwnerReference, error) { + gvk, err := apiutil.GVKForObject(obj, scheme) + if err != nil { + return metav1.OwnerReference{}, err + } + + return metav1.OwnerReference{ + APIVersion: gvk.GroupVersion().String(), + Kind: gvk.Kind, + Name: obj.GetName(), + UID: obj.GetUID(), + Controller: pointer.Bool(true), + BlockOwnerDeletion: pointer.Bool(true), + }, nil +} From 23bfa3ecfb7a50f90fcf5a0d3ab5ea44d9ea1312 Mon Sep 17 00:00:00 2001 From: Luka Skugor Date: Sat, 31 Jan 2026 10:02:01 +0000 Subject: [PATCH 2/4] samples, fixes, improvements --- CLAUDE.md | 216 ++++++++++++ PROJECT | 16 + README.md | 330 ++++++++++++++++++ config/crd/kustomization.yaml | 2 + config/samples/kustomization.yaml | 10 +- .../v1alpha1_clusterrolloutschedule.yaml | 3 + ...clusterrolloutschedule_holiday_freeze.yaml | 56 +++ ...lusterrolloutschedule_peak_hours_deny.yaml | 63 ++++ config/samples/v1alpha1_rolloutschedule.yaml | 3 + ..._rolloutschedule_combined_constraints.yaml | 59 ++++ ...a1_rolloutschedule_maintenance_window.yaml | 38 ++ ...pha1_rolloutschedule_multiple_windows.yaml | 61 ++++ ...pha1_rolloutschedule_night_deployment.yaml | 42 +++ ...v1alpha1_rolloutschedule_weekend_only.yaml | 36 ++ .../clusterrolloutschedule_controller.go | 2 +- .../controller/rolloutschedule_controller.go | 2 +- .../controller/rolloutschedule_helpers.go | 30 ++ 17 files changed, 966 insertions(+), 3 deletions(-) create mode 100644 CLAUDE.md create mode 100644 config/samples/v1alpha1_clusterrolloutschedule_holiday_freeze.yaml create mode 100644 config/samples/v1alpha1_clusterrolloutschedule_peak_hours_deny.yaml create mode 100644 config/samples/v1alpha1_rolloutschedule_combined_constraints.yaml create mode 100644 config/samples/v1alpha1_rolloutschedule_maintenance_window.yaml create mode 100644 config/samples/v1alpha1_rolloutschedule_multiple_windows.yaml create mode 100644 config/samples/v1alpha1_rolloutschedule_night_deployment.yaml create mode 100644 config/samples/v1alpha1_rolloutschedule_weekend_only.yaml diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 0000000..1e5ed04 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,216 @@ +# CLAUDE.md + +This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. + +## What is Rollout Controller? + +A Kubernetes controller for managing application rollouts with support for health checks, gates, and bake time. It integrates tightly with Flux CD for GitOps workflows, providing progressive delivery capabilities. + +## Key Features + +- **Flux ImagePolicy Integration**: Reads available releases from Flux ImagePolicy +- **Multiple Rollout Support**: Kustomizations can be managed by multiple rollouts using rollout-specific annotations +- **Health Check Integration**: Monitor application health during rollouts +- **Rollout Gates**: Control deployment progression with custom gates +- **Bake Time**: Wait for a specified duration before considering a deployment successful +- **Version History**: Maintains version history with configurable retention policies +- **Gate Bypass**: Emergency deployment support via annotations + +## Common Development Commands + +```bash +# Generate code after modifying CRD types +make manifests # Generate CRD YAML from Go types +make generate # Generate DeepCopy methods + +# Code quality +make fmt # go fmt +make vet # go vet +make lint # gofmt + govet + +# Testing +make test # Run unit tests (Ginkgo/Gomega) +make test-e2e # Run e2e tests on Kind cluster + +# Building +make build # Build binary +make docker-build # Build container image +make docker-push # Push to registry + +# Deployment +make install # Install CRDs to cluster +make deploy # Deploy controller to cluster +make uninstall # Remove CRDs and controller +make run # Run locally (without cluster deployment) +``` + +## Development Workflow + +1. Modify CRD type definitions in `api/v1alpha1/*_types.go` +2. Run `make manifests generate` to update CRDs and DeepCopy methods +3. Implement reconciliation logic in `internal/controller/*_controller.go` +4. Write tests in `internal/controller/*_controller_test.go` +5. Run `make test` to verify +6. Deploy with `make docker-build docker-push deploy IMG=registry/image:tag` + +## Flux CD Integration + +### Annotation-Based Resource Patching + +**OCIRepository** - managed by single rollout: +```yaml +metadata: + annotations: + rollout.kuberik.com/rollout: "my-app-rollout" +spec: + ref: + tag: "1.0.0" # Updated by rollout controller +``` + +**Kustomization** - multiple rollouts can manage different variables: +```yaml +metadata: + annotations: + rollout.kuberik.com/substitute.IMAGE_TAG.from: "frontend-rollout" + rollout.kuberik.com/substitute.VERSION.from: "backend-rollout" +spec: + postBuild: + substitute: + IMAGE_TAG: "1.0.0" # Managed by frontend-rollout + VERSION: "2.0.0" # Managed by backend-rollout +``` + +### ImagePolicy Workflow + +1. Flux ImageRepository scans OCI registry +2. Flux ImagePolicy filters releases with semver/regex +3. Rollout Controller reads `latestRef` from ImagePolicy status +4. Controller patches OCIRepository/Kustomization resources +5. Flux reconciles changes and deploys + +## CRDs Managed + +- **Rollout**: Main resource defining deployment strategy +- **RolloutGate**: Conditions that must pass before deployment +- **RolloutSchedule**: Scheduled deployment configurations +- **HealthCheck**: Health status from various sources +- **ClusterRolloutSchedule**: Cross-cluster schedules + +## Rollout Spec Example + +```yaml +apiVersion: kuberik.com/v1alpha1 +kind: Rollout +metadata: + name: my-app-rollout +spec: + releasesImagePolicy: + name: my-app-policy + + versionHistoryLimit: 10 + releaseUpdateInterval: "5m" + + # Bake time for health checks + bakeTime: "10m" + healthCheckSelector: + matchLabels: + app: myapp +``` + +## Important Annotations + +```yaml +# OCIRepository +rollout.kuberik.com/rollout: "rollout-name" + +# Kustomization +rollout.kuberik.com/substitute..from: "rollout-name" + +# Gate bypass for emergency deployments +rollout.kuberik.com/bypass-gates: "v1.2.3" +``` + +## Common Patterns + +### Controller Reconciliation + +```go +func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + // 1. Get resource (with IgnoreNotFound for deleted resources) + rollout := &kuberikv1alpha1.Rollout{} + if err := r.Get(ctx, req.NamespacedName, rollout); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + // 2. Add finalizer if needed + // 3. Perform business logic + // 4. Update resource status with conditions + // 5. Return Result with optional RequeueAfter + + return ctrl.Result{RequeueAfter: 30 * time.Second}, nil +} +``` + +**Result Options:** +- `ctrl.Result{}` - Stop, don't requeue +- `ctrl.Result{Requeue: true}` - Retry immediately +- `ctrl.Result{RequeueAfter: 5*time.Second}` - Retry after delay + +### Testing with Ginkgo/Gomega + +```go +var _ = Describe("RolloutController", func() { + Context("when rollout is created", func() { + It("should update status", func() { + rollout := &kuberikv1alpha1.Rollout{...} + Expect(k8sClient.Create(ctx, rollout)).To(Succeed()) + + Eventually(func() string { + err := k8sClient.Get(ctx, key, rollout) + if err != nil { + return "" + } + return rollout.Status.Phase + }).Should(Equal("Ready")) + }) + }) +}) +``` + +### Event Recording + +```go +r.Recorder.Event(rollout, corev1.EventTypeNormal, "RolloutSucceeded", "Deployment completed") +r.Recorder.Event(rollout, corev1.EventTypeWarning, "GatesFailing", "Required gates not passing") +``` + +### Status Subresource Pattern + +```go +r.Client.Status().Update(ctx, resource) // Update status +r.Client.Update(ctx, resource) // Update spec/metadata +``` + +## Dependencies + +Key dependencies: +- `github.com/fluxcd/image-reflector-controller/api` - ImagePolicy +- `github.com/fluxcd/kustomize-controller/api` - Kustomization +- `github.com/fluxcd/source-controller/api` - OCIRepository +- `github.com/google/go-containerregistry` - OCI image operations +- `sigs.k8s.io/controller-runtime` - Kubebuilder framework + +## Debugging + +```bash +# Controller logs +kubectl logs -n kuberik-system deployment/rollout-controller -f + +# Events +kubectl get events --sort-by='.lastTimestamp' + +# Describe resources +kubectl describe rollout +kubectl describe rolloutgate +kubectl describe healthcheck +``` diff --git a/PROJECT b/PROJECT index 6210e5f..ebc3547 100644 --- a/PROJECT +++ b/PROJECT @@ -35,4 +35,20 @@ resources: domain: kuberik.com kind: KustomizationHealth version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: kuberik.com + kind: RolloutSchedule + path: github.com/kuberik/rollout-controller/api/v1alpha1 + version: v1alpha1 +- api: + crdVersion: v1 + namespaced: false + controller: true + domain: kuberik.com + kind: ClusterRolloutSchedule + path: github.com/kuberik/rollout-controller/api/v1alpha1 + version: v1alpha1 version: "3" diff --git a/README.md b/README.md index df1f20a..3fe391d 100644 --- a/README.md +++ b/README.md @@ -8,6 +8,7 @@ A Kubernetes controller for managing application rollouts with support for healt - **Rollout Gates**: Control deployment progression with custom gates - **Bake Time**: Wait for a specified duration before considering a deployment successful - **Multiple Rollout Support**: Kustomizations can be managed by multiple rollouts using rollout-specific annotations +- **Time-Based Scheduling**: Control when deployments can occur using RolloutSchedule resources ## Multiple Rollout Support @@ -66,6 +67,332 @@ spec: Each rollout can manage its own substitute in Kustomizations, while OCIRepositories are managed by a single rollout. +## Time-Based Scheduling + +RolloutSchedule and ClusterRolloutSchedule resources enable time-based control over when rollouts can be deployed. Schedules automatically manage RolloutGate resources based on time windows, days of the week, and date ranges. + +### Key Concepts + +**Schedule Types:** +- **RolloutSchedule**: Namespaced resource that applies to rollouts in the same namespace +- **ClusterRolloutSchedule**: Cluster-scoped resource that can apply across multiple namespaces + +**Actions:** +- **Allow**: Deployments are permitted during the schedule window, blocked outside it +- **Deny**: Deployments are blocked during the schedule window, permitted outside it + +**Rules:** +Multiple rules can be defined in a schedule. The schedule is active if **ANY rule matches** (OR logic). + +Each rule can specify: +- **timeRange**: Specific hours in HH:MM format (24-hour) +- **daysOfWeek**: List of days (Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday) +- **dateRange**: Specific date range in YYYY-MM-DD format +- **timezone**: IANA timezone format (e.g., "America/New_York", "Europe/London") + +### How It Works + +1. The schedule controller evaluates time-based rules using the specified timezone +2. For each rollout matching the `rolloutSelector`, a RolloutGate is created/updated +3. The gate's `passing` status is set based on schedule evaluation: + - **Action: "Allow"** → `passing = active` (allows rollouts when active) + - **Action: "Deny"** → `passing = !active` (blocks rollouts when active) +4. The rollout controller respects gates when deciding whether to deploy new versions + +### RolloutSchedule Example: Business Hours Only + +Allow deployments only during weekday business hours: + +```yaml +apiVersion: kuberik.com/v1alpha1 +kind: RolloutSchedule +metadata: + name: business-hours-allow + namespace: default +spec: + rolloutSelector: + matchLabels: + schedule: business-hours + rules: + - name: "weekday-business-hours" + timeRange: + start: "09:00" + end: "17:00" + daysOfWeek: + - Monday + - Tuesday + - Wednesday + - Thursday + - Friday + timezone: "America/New_York" + action: Allow # Only allow rollouts during business hours +``` + +Label your rollout to apply this schedule: + +```yaml +apiVersion: kuberik.com/v1alpha1 +kind: Rollout +metadata: + name: my-app-rollout + labels: + schedule: business-hours +spec: + # ... rollout spec +``` + +### ClusterRolloutSchedule Example: Block Peak Hours + +Prevent deployments during peak traffic hours across production namespaces: + +```yaml +apiVersion: kuberik.com/v1alpha1 +kind: ClusterRolloutSchedule +metadata: + name: production-peak-hours-deny +spec: + rolloutSelector: + matchLabels: + tier: frontend + namespaceSelector: + matchLabels: + environment: production + rules: + - name: "weekday-peak-hours" + timeRange: + start: "09:00" + end: "17:00" + daysOfWeek: + - Monday + - Tuesday + - Wednesday + - Thursday + - Friday + - name: "saturday-morning" + timeRange: + start: "09:00" + end: "12:00" + daysOfWeek: + - Saturday + timezone: "America/New_York" + action: Deny # Block rollouts during peak hours +``` + +### Common Use Cases + +#### Maintenance Windows + +Allow deployments only during scheduled maintenance windows: + +```yaml +apiVersion: kuberik.com/v1alpha1 +kind: RolloutSchedule +metadata: + name: maintenance-window + namespace: production +spec: + rolloutSelector: + matchLabels: + maintenance: "true" + rules: + - name: "sunday-early-morning" + timeRange: + start: "02:00" + end: "06:00" + daysOfWeek: + - Sunday + timezone: "UTC" + action: Allow +``` + +#### Holiday Deployment Freeze + +Block all deployments during holiday periods: + +```yaml +apiVersion: kuberik.com/v1alpha1 +kind: ClusterRolloutSchedule +metadata: + name: holiday-freeze +spec: + rolloutSelector: + matchLabels: + freeze: "holiday" + rules: + - name: "christmas-freeze" + dateRange: + start: "2026-12-23" + end: "2026-12-26" + - name: "new-year-freeze" + dateRange: + start: "2026-12-31" + end: "2027-01-02" + timezone: "America/New_York" + action: Deny +``` + +#### Weekend-Only Deployments + +Restrict certain rollouts to weekends only: + +```yaml +apiVersion: kuberik.com/v1alpha1 +kind: RolloutSchedule +metadata: + name: weekend-only + namespace: default +spec: + rolloutSelector: + matchLabels: + schedule: weekend + rules: + - name: "weekend-deployment" + daysOfWeek: + - Saturday + - Sunday + timezone: "America/Los_Angeles" + action: Allow +``` + +#### Cross-Midnight Time Windows + +Time ranges automatically handle cross-midnight windows: + +```yaml +apiVersion: kuberik.com/v1alpha1 +kind: RolloutSchedule +metadata: + name: night-deployment + namespace: default +spec: + rolloutSelector: + matchLabels: + schedule: night + rules: + - name: "overnight-window" + timeRange: + start: "22:00" # 10 PM + end: "06:00" # 6 AM (next day) + daysOfWeek: + - Monday + - Tuesday + - Wednesday + - Thursday + - Friday + timezone: "UTC" + action: Allow +``` + +### Schedule Status + +View the current status of schedules: + +```bash +kubectl get rolloutschedules +kubectl get clusterrolloutschedules +``` + +Output shows: +- **ACTION**: Allow or Deny +- **ACTIVE**: Current state (true/false) +- **MATCHING**: Number of rollouts matched by selectors + +Describe a schedule for detailed information: + +```bash +kubectl describe rolloutschedule business-hours-allow +``` + +Status includes: +- `active`: Whether the schedule is currently active +- `activeRules`: Names of rules currently matching +- `nextTransition`: When the active state will next change +- `managedGates`: List of RolloutGate names managed by this schedule +- `matchingRollouts`: Count of rollouts matched by selectors + +### Multiple Rules (OR Logic) + +A schedule is active if **ANY** rule matches. This enables flexible scheduling: + +```yaml +apiVersion: kuberik.com/v1alpha1 +kind: RolloutSchedule +metadata: + name: flexible-window + namespace: default +spec: + rolloutSelector: + matchLabels: + schedule: flexible + rules: + - name: "morning-window" + timeRange: + start: "09:00" + end: "11:00" + - name: "afternoon-window" + timeRange: + start: "14:00" + end: "16:00" + - name: "weekend-anytime" + daysOfWeek: + - Saturday + - Sunday + timezone: "America/New_York" + action: Allow +``` + +This schedule allows deployments: +- 9-11 AM on any day +- 2-4 PM on any day +- Anytime on Saturday or Sunday + +### Integration with RolloutGate + +Schedules work by automatically creating and managing RolloutGate resources. Each managed gate is named: `{schedule-name}-{rollout-name}`. + +You can view the gates created by schedules: + +```bash +kubectl get rolloutgates -l "app.kubernetes.io/managed-by=rollout-schedule" +``` + +The gates are automatically cleaned up when: +- The schedule is deleted +- A rollout no longer matches the selector +- The rollout is deleted + +### Timezone Support + +All schedules use IANA timezone database names. Common examples: + +- **UTC**: "UTC" +- **US Eastern**: "America/New_York" +- **US Pacific**: "America/Los_Angeles" +- **UK**: "Europe/London" +- **Central European**: "Europe/Paris" +- **Japan**: "Asia/Tokyo" + +If no timezone is specified, UTC is used by default. + +### Troubleshooting + +**Schedule not activating:** +1. Check the schedule status: `kubectl describe rolloutschedule ` +2. Verify the timezone is correct +3. Check that rollouts match the `rolloutSelector` labels +4. Review the time range and day of week settings +5. Check the `activeRules` in status to see which rules are matching + +**Deployments still blocked:** +1. Verify the action is correct (Allow vs Deny) +2. Check if other RolloutGates are blocking the rollout +3. Review rollout events: `kubectl describe rollout ` + +**Cross-namespace schedules not working:** +1. Ensure you're using ClusterRolloutSchedule (not RolloutSchedule) +2. Check the `namespaceSelector` matches target namespaces +3. Verify RBAC permissions for the controller + ## Overview The Rollout Controller manages application deployments by: @@ -229,6 +556,9 @@ See the `config/samples/` directory for complete examples including: - `v1alpha1_imagerepository.yaml` - Flux ImageRepository example - `v1alpha1_imagepolicy.yaml` - Flux ImagePolicy example - `v1alpha1_rollout.yaml` - Rollout controller example +- `v1alpha1_rolloutschedule.yaml` - RolloutSchedule example +- `v1alpha1_clusterrolloutschedule.yaml` - ClusterRolloutSchedule example +- `v1alpha1_rolloutschedule_*.yaml` - Additional scheduling scenarios ## Development diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index f980dc0..6aa123d 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -5,6 +5,8 @@ resources: - bases/kuberik.com_rollouts.yaml - bases/kuberik.com_rolloutgates.yaml - bases/kuberik.com_healthchecks.yaml + - bases/kuberik.com_rolloutschedules.yaml + - bases/kuberik.com_clusterrolloutschedules.yaml # +kubebuilder:scaffold:crdkustomizeresource patches: diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index 8f8220e..908aea9 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -1,7 +1,6 @@ ## Append samples of your project ## resources: - v1alpha1_rollout.yaml - - v1alpha1_rollout_bypass.yaml - v1alpha1_rollout_advanced_selectors.yaml - v1alpha1_rolloutgate.yaml - v1alpha1_healthcheck.yaml @@ -9,4 +8,13 @@ resources: - v1alpha1_imagerepository.yaml - v1alpha1_ocirepository.yaml - v1alpha1_kustomization.yaml + - v1alpha1_kustomization_healthcheck.yaml + - v1alpha1_rolloutschedule.yaml + - v1alpha1_rolloutschedule_maintenance_window.yaml + - v1alpha1_rolloutschedule_weekend_only.yaml + - v1alpha1_rolloutschedule_night_deployment.yaml + - v1alpha1_rolloutschedule_multiple_windows.yaml + - v1alpha1_rolloutschedule_combined_constraints.yaml + - v1alpha1_clusterrolloutschedule_holiday_freeze.yaml + - v1alpha1_clusterrolloutschedule_peak_hours_deny.yaml # +kubebuilder:scaffold:manifestskustomizesamples diff --git a/config/samples/v1alpha1_clusterrolloutschedule.yaml b/config/samples/v1alpha1_clusterrolloutschedule.yaml index 2b10bab..0bacc2e 100644 --- a/config/samples/v1alpha1_clusterrolloutschedule.yaml +++ b/config/samples/v1alpha1_clusterrolloutschedule.yaml @@ -5,6 +5,9 @@ metadata: labels: app.kubernetes.io/name: rollout-controller app.kubernetes.io/managed-by: kustomize + annotations: + gate.kuberik.com/pretty-name: "Peak Hours Protection" + gate.kuberik.com/description: "Blocks deployments during peak traffic hours in production" spec: # Match rollouts with tier=frontend across all namespaces rolloutSelector: diff --git a/config/samples/v1alpha1_clusterrolloutschedule_holiday_freeze.yaml b/config/samples/v1alpha1_clusterrolloutschedule_holiday_freeze.yaml new file mode 100644 index 0000000..ad650c9 --- /dev/null +++ b/config/samples/v1alpha1_clusterrolloutschedule_holiday_freeze.yaml @@ -0,0 +1,56 @@ +apiVersion: kuberik.com/v1alpha1 +kind: ClusterRolloutSchedule +metadata: + name: holiday-freeze + annotations: + gate.kuberik.com/pretty-name: "Holiday Deployment Freeze" + gate.kuberik.com/description: "Blocks all deployments during major holidays (Christmas, New Year, Thanksgiving)" +spec: + # Block all deployments during major holidays + rolloutSelector: + matchLabels: + freeze: "holiday" + namespaceSelector: + matchLabels: + environment: production + rules: + # Christmas freeze + - name: "christmas-freeze" + dateRange: + start: "2026-12-23" + end: "2026-12-26" + # New Year freeze + - name: "new-year-freeze" + dateRange: + start: "2026-12-31" + end: "2027-01-02" + # Thanksgiving freeze + - name: "thanksgiving-freeze" + dateRange: + start: "2026-11-26" + end: "2026-11-27" + timezone: "America/New_York" + action: Deny + +--- +# Example namespace that matches the selector +apiVersion: v1 +kind: Namespace +metadata: + name: production-east + labels: + environment: production + +--- +# Example rollout in production namespace +apiVersion: kuberik.com/v1alpha1 +kind: Rollout +metadata: + name: frontend-rollout + namespace: production-east + labels: + freeze: "holiday" # Matches the schedule selector +spec: + releasesImagePolicy: + name: frontend-policy + versionHistoryLimit: 10 diff --git a/config/samples/v1alpha1_clusterrolloutschedule_peak_hours_deny.yaml b/config/samples/v1alpha1_clusterrolloutschedule_peak_hours_deny.yaml new file mode 100644 index 0000000..321289b --- /dev/null +++ b/config/samples/v1alpha1_clusterrolloutschedule_peak_hours_deny.yaml @@ -0,0 +1,63 @@ +apiVersion: kuberik.com/v1alpha1 +kind: ClusterRolloutSchedule +metadata: + name: production-peak-hours-deny + annotations: + gate.kuberik.com/pretty-name: "Peak Hours Protection" + gate.kuberik.com/description: "Blocks deployments during weekday peak hours (9 AM - 5 PM) and Saturday morning (9 AM - 12 PM)" +spec: + # Block deployments during peak traffic hours in production + rolloutSelector: + matchLabels: + tier: frontend + namespaceSelector: + matchLabels: + environment: production + rules: + # Weekday peak hours (9 AM - 5 PM) + - name: "weekday-peak-hours" + timeRange: + start: "09:00" + end: "17:00" + daysOfWeek: + - Monday + - Tuesday + - Wednesday + - Thursday + - Friday + # Saturday morning peak (9 AM - 12 PM) + - name: "saturday-morning-peak" + timeRange: + start: "09:00" + end: "12:00" + daysOfWeek: + - Saturday + timezone: "America/New_York" + action: Deny + +--- +# Example namespace with production label +apiVersion: v1 +kind: Namespace +metadata: + name: production-web + labels: + environment: production + +--- +# Example frontend rollout in production +apiVersion: kuberik.com/v1alpha1 +kind: Rollout +metadata: + name: web-frontend-rollout + namespace: production-web + labels: + tier: frontend # Matches the schedule selector +spec: + releasesImagePolicy: + name: web-frontend-policy + versionHistoryLimit: 15 + bakeTime: "20m" + healthCheckSelector: + matchLabels: + app: web-frontend diff --git a/config/samples/v1alpha1_rolloutschedule.yaml b/config/samples/v1alpha1_rolloutschedule.yaml index 874ea3b..92f8720 100644 --- a/config/samples/v1alpha1_rolloutschedule.yaml +++ b/config/samples/v1alpha1_rolloutschedule.yaml @@ -6,6 +6,9 @@ metadata: labels: app.kubernetes.io/name: rollout-controller app.kubernetes.io/managed-by: kustomize + annotations: + gate.kuberik.com/pretty-name: "Business Hours Only" + gate.kuberik.com/description: "Deployments allowed only during weekday business hours (9 AM - 5 PM EST)" spec: # Match rollouts with the label "schedule=business-hours" rolloutSelector: diff --git a/config/samples/v1alpha1_rolloutschedule_combined_constraints.yaml b/config/samples/v1alpha1_rolloutschedule_combined_constraints.yaml new file mode 100644 index 0000000..3ef1756 --- /dev/null +++ b/config/samples/v1alpha1_rolloutschedule_combined_constraints.yaml @@ -0,0 +1,59 @@ +apiVersion: kuberik.com/v1alpha1 +kind: RolloutSchedule +metadata: + name: restricted-deployment + namespace: default + annotations: + gate.kuberik.com/pretty-name: "Restricted Deployment Schedule" + gate.kuberik.com/description: "Q1: Weekdays 10 AM-4 PM only. Q2: Weekdays anytime" +spec: + # Complex schedule with date range, time, and day constraints + rolloutSelector: + matchLabels: + schedule: restricted + rules: + # Q1 2026: Weekdays only, business hours only + - name: "q1-business-hours" + dateRange: + start: "2026-01-01" + end: "2026-03-31" + timeRange: + start: "10:00" + end: "16:00" + daysOfWeek: + - Monday + - Tuesday + - Wednesday + - Thursday + - Friday + # Q2 2026: More flexible - weekdays anytime + - name: "q2-weekdays" + dateRange: + start: "2026-04-01" + end: "2026-06-30" + daysOfWeek: + - Monday + - Tuesday + - Wednesday + - Thursday + - Friday + timezone: "Europe/London" + action: Allow + +--- +# Example rollout with combined constraints +apiVersion: kuberik.com/v1alpha1 +kind: Rollout +metadata: + name: critical-service-rollout + namespace: default + labels: + schedule: restricted # Matches the schedule selector +spec: + releasesImagePolicy: + name: critical-service-policy + versionHistoryLimit: 20 + bakeTime: "1h" + healthCheckSelector: + matchLabels: + app: critical-service diff --git a/config/samples/v1alpha1_rolloutschedule_maintenance_window.yaml b/config/samples/v1alpha1_rolloutschedule_maintenance_window.yaml new file mode 100644 index 0000000..5745b26 --- /dev/null +++ b/config/samples/v1alpha1_rolloutschedule_maintenance_window.yaml @@ -0,0 +1,38 @@ +apiVersion: kuberik.com/v1alpha1 +kind: RolloutSchedule +metadata: + name: maintenance-window + namespace: default + annotations: + gate.kuberik.com/pretty-name: "Maintenance Window" + gate.kuberik.com/description: "Deployments allowed only during Sunday maintenance window (2-6 AM UTC)" +spec: + # Allow deployments only during scheduled maintenance windows + rolloutSelector: + matchLabels: + maintenance: "true" + rules: + # Sunday early morning maintenance window + - name: "sunday-maintenance" + timeRange: + start: "02:00" + end: "06:00" + daysOfWeek: + - Sunday + timezone: "UTC" + action: Allow + +--- +# Example rollout that uses this schedule +apiVersion: kuberik.com/v1alpha1 +kind: Rollout +metadata: + name: database-rollout + namespace: default + labels: + maintenance: "true" # Matches the schedule selector +spec: + releasesImagePolicy: + name: database-policy + versionHistoryLimit: 10 + releaseUpdateInterval: "5m" diff --git a/config/samples/v1alpha1_rolloutschedule_multiple_windows.yaml b/config/samples/v1alpha1_rolloutschedule_multiple_windows.yaml new file mode 100644 index 0000000..0d54b7e --- /dev/null +++ b/config/samples/v1alpha1_rolloutschedule_multiple_windows.yaml @@ -0,0 +1,61 @@ +apiVersion: kuberik.com/v1alpha1 +kind: RolloutSchedule +metadata: + name: flexible-window + namespace: default + annotations: + gate.kuberik.com/pretty-name: "Flexible Deployment Windows" + gate.kuberik.com/description: "Deployments allowed during morning (9-11 AM), afternoon (2-4 PM), or anytime on weekends" +spec: + # Multiple deployment windows throughout the day + rolloutSelector: + matchLabels: + schedule: flexible + rules: + # Morning window (9-11 AM) + - name: "morning-window" + timeRange: + start: "09:00" + end: "11:00" + daysOfWeek: + - Monday + - Tuesday + - Wednesday + - Thursday + - Friday + # Afternoon window (2-4 PM) + - name: "afternoon-window" + timeRange: + start: "14:00" + end: "16:00" + daysOfWeek: + - Monday + - Tuesday + - Wednesday + - Thursday + - Friday + # Weekend anytime + - name: "weekend-anytime" + daysOfWeek: + - Saturday + - Sunday + timezone: "America/New_York" + action: Allow + +--- +# Example rollout with flexible deployment windows +apiVersion: kuberik.com/v1alpha1 +kind: Rollout +metadata: + name: api-rollout + namespace: default + labels: + schedule: flexible # Matches the schedule selector +spec: + releasesImagePolicy: + name: api-policy + versionHistoryLimit: 10 + bakeTime: "15m" + healthCheckSelector: + matchLabels: + app: api diff --git a/config/samples/v1alpha1_rolloutschedule_night_deployment.yaml b/config/samples/v1alpha1_rolloutschedule_night_deployment.yaml new file mode 100644 index 0000000..dc588f9 --- /dev/null +++ b/config/samples/v1alpha1_rolloutschedule_night_deployment.yaml @@ -0,0 +1,42 @@ +apiVersion: kuberik.com/v1alpha1 +kind: RolloutSchedule +metadata: + name: night-deployment + namespace: default + annotations: + gate.kuberik.com/pretty-name: "Night Deployment Window" + gate.kuberik.com/description: "Deployments allowed overnight (10 PM - 6 AM UTC)" +spec: + # Allow deployments overnight (cross-midnight window) + rolloutSelector: + matchLabels: + schedule: night + rules: + # Weeknight deployment window (10 PM to 6 AM) + - name: "overnight-window" + timeRange: + start: "22:00" # 10 PM + end: "06:00" # 6 AM (next day) + daysOfWeek: + - Monday + - Tuesday + - Wednesday + - Thursday + - Friday + timezone: "UTC" + action: Allow + +--- +# Example rollout that deploys at night +apiVersion: kuberik.com/v1alpha1 +kind: Rollout +metadata: + name: batch-processor-rollout + namespace: default + labels: + schedule: night # Matches the schedule selector +spec: + releasesImagePolicy: + name: batch-processor-policy + versionHistoryLimit: 10 + bakeTime: "30m" diff --git a/config/samples/v1alpha1_rolloutschedule_weekend_only.yaml b/config/samples/v1alpha1_rolloutschedule_weekend_only.yaml new file mode 100644 index 0000000..905bdef --- /dev/null +++ b/config/samples/v1alpha1_rolloutschedule_weekend_only.yaml @@ -0,0 +1,36 @@ +apiVersion: kuberik.com/v1alpha1 +kind: RolloutSchedule +metadata: + name: weekend-only + namespace: default + annotations: + gate.kuberik.com/pretty-name: "Weekend Deployments" + gate.kuberik.com/description: "Deployments allowed only on weekends" +spec: + # Allow deployments only on weekends + rolloutSelector: + matchLabels: + schedule: weekend + rules: + # Saturday and Sunday all day + - name: "weekend-deployment" + daysOfWeek: + - Saturday + - Sunday + timezone: "America/Los_Angeles" + action: Allow + +--- +# Example rollout that deploys on weekends only +apiVersion: kuberik.com/v1alpha1 +kind: Rollout +metadata: + name: experimental-rollout + namespace: default + labels: + schedule: weekend # Matches the schedule selector +spec: + releasesImagePolicy: + name: experimental-policy + versionHistoryLimit: 5 + releaseUpdateInterval: "10m" diff --git a/internal/controller/clusterrolloutschedule_controller.go b/internal/controller/clusterrolloutschedule_controller.go index d867b45..435c1ea 100644 --- a/internal/controller/clusterrolloutschedule_controller.go +++ b/internal/controller/clusterrolloutschedule_controller.go @@ -113,7 +113,7 @@ func (r *ClusterRolloutScheduleReconciler) Reconcile(ctx context.Context, req ct for _, rollout := range allMatchingRollouts { gateName := fmt.Sprintf("%s-%s", schedule.Name, rollout.Name) - if err := syncRolloutGate(ctx, r.Client, &rollout, gateName, passing, ownerRef); err != nil { + if err := syncRolloutGate(ctx, r.Client, &rollout, gateName, passing, ownerRef, schedule.Annotations); err != nil { logger.Error(err, "Failed to sync gate", "rollout", rollout.Name, "namespace", rollout.Namespace) } else { key := fmt.Sprintf("%s/%s", rollout.Namespace, gateName) diff --git a/internal/controller/rolloutschedule_controller.go b/internal/controller/rolloutschedule_controller.go index 17be0ed..5b183d2 100644 --- a/internal/controller/rolloutschedule_controller.go +++ b/internal/controller/rolloutschedule_controller.go @@ -90,7 +90,7 @@ func (r *RolloutScheduleReconciler) Reconcile(ctx context.Context, req ctrl.Requ for _, rollout := range rolloutList.Items { gateName := fmt.Sprintf("%s-%s", schedule.Name, rollout.Name) - if err := syncRolloutGate(ctx, r.Client, &rollout, gateName, passing, ownerRef); err != nil { + if err := syncRolloutGate(ctx, r.Client, &rollout, gateName, passing, ownerRef, schedule.Annotations); err != nil { logger.Error(err, "Failed to sync gate", "rollout", rollout.Name, "gate", gateName) // Continue with other rollouts, but we'll return the error at end if needed? // Best effort to sync others. diff --git a/internal/controller/rolloutschedule_helpers.go b/internal/controller/rolloutschedule_helpers.go index c69b07f..ca7f92f 100644 --- a/internal/controller/rolloutschedule_helpers.go +++ b/internal/controller/rolloutschedule_helpers.go @@ -314,6 +314,7 @@ func syncRolloutGate( gateName string, passing bool, ownerRef metav1.OwnerReference, + scheduleAnnotations map[string]string, ) error { gate := &rolloutv1alpha1.RolloutGate{} err := c.Get(ctx, types.NamespacedName{ @@ -323,10 +324,20 @@ func syncRolloutGate( if errors.IsNotFound(err) { // Create new gate + // Copy gate-related annotations from schedule to gate + annotations := make(map[string]string) + if prettyName, ok := scheduleAnnotations["gate.kuberik.com/pretty-name"]; ok { + annotations["gate.kuberik.com/pretty-name"] = prettyName + } + if description, ok := scheduleAnnotations["gate.kuberik.com/description"]; ok { + annotations["gate.kuberik.com/description"] = description + } + gate = &rolloutv1alpha1.RolloutGate{ ObjectMeta: metav1.ObjectMeta{ Name: gateName, Namespace: rollout.Namespace, + Annotations: annotations, OwnerReferences: []metav1.OwnerReference{ownerRef}, }, Spec: rolloutv1alpha1.RolloutGateSpec{ @@ -350,6 +361,25 @@ func syncRolloutGate( needsUpdate = true } + // Update annotations from schedule if needed + if gate.Annotations == nil { + gate.Annotations = make(map[string]string) + } + + if prettyName, ok := scheduleAnnotations["gate.kuberik.com/pretty-name"]; ok { + if gate.Annotations["gate.kuberik.com/pretty-name"] != prettyName { + gate.Annotations["gate.kuberik.com/pretty-name"] = prettyName + needsUpdate = true + } + } + + if description, ok := scheduleAnnotations["gate.kuberik.com/description"]; ok { + if gate.Annotations["gate.kuberik.com/description"] != description { + gate.Annotations["gate.kuberik.com/description"] = description + needsUpdate = true + } + } + // Ensure owner reference is set hasOwner := false for _, ref := range gate.OwnerReferences { From 71066c89c3a14778e248bbbac62dfe03e539e097 Mon Sep 17 00:00:00 2001 From: Luka Skugor Date: Sat, 31 Jan 2026 18:13:39 +0000 Subject: [PATCH 3/4] Use GenerateName for schedule-generated rollout gates Replace predictable gate names with Kubernetes-generated names using GenerateName prefix "schedule-gate-". Gates are now identified and tracked using labels instead of name patterns: - gate.kuberik.com/schedule-name - gate.kuberik.com/schedule-namespace (for namespaced schedules) - gate.kuberik.com/schedule-kind - gate.kuberik.com/rollout-name This change affects both RolloutSchedule and ClusterRolloutSchedule controllers, updating gate creation, lookup, and cleanup logic to use label-based identification. Generated with [Claude Code](https://claude.ai/code) via [Happy](https://happy.engineering) Co-Authored-By: Claude Co-Authored-By: Happy --- .../clusterrolloutschedule_controller.go | 95 +++++++----- .../controller/rolloutschedule_controller.go | 40 +++-- .../rolloutschedule_controller_test.go | 86 +++++++---- .../controller/rolloutschedule_helpers.go | 146 ++++++++++++------ 4 files changed, 240 insertions(+), 127 deletions(-) diff --git a/internal/controller/clusterrolloutschedule_controller.go b/internal/controller/clusterrolloutschedule_controller.go index 435c1ea..299f97a 100644 --- a/internal/controller/clusterrolloutschedule_controller.go +++ b/internal/controller/clusterrolloutschedule_controller.go @@ -19,11 +19,11 @@ package controller import ( "context" "fmt" - "strings" "time" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -34,7 +34,6 @@ import ( rolloutv1alpha1 "github.com/kuberik/rollout-controller/api/v1alpha1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" ) // ClusterRolloutScheduleReconciler reconciles a ClusterRolloutSchedule object @@ -108,37 +107,33 @@ func (r *ClusterRolloutScheduleReconciler) Reconcile(ctx context.Context, req ct return ctrl.Result{}, err } - // Set of current gates to check against previous for cleanup - currentGatesSet := make(map[string]bool) + // Track rollouts per namespace for cleanup + rolloutsByNamespace := make(map[string]map[string]bool) for _, rollout := range allMatchingRollouts { - gateName := fmt.Sprintf("%s-%s", schedule.Name, rollout.Name) - if err := syncRolloutGate(ctx, r.Client, &rollout, gateName, passing, ownerRef, schedule.Annotations); err != nil { + if rolloutsByNamespace[rollout.Namespace] == nil { + rolloutsByNamespace[rollout.Namespace] = make(map[string]bool) + } + rolloutsByNamespace[rollout.Namespace][rollout.Name] = true + + gateName, err := syncRolloutGate(ctx, r.Client, &rollout, schedule.Name, "", "ClusterRolloutSchedule", passing, ownerRef, schedule.Annotations) + if err != nil { logger.Error(err, "Failed to sync gate", "rollout", rollout.Name, "namespace", rollout.Namespace) } else { key := fmt.Sprintf("%s/%s", rollout.Namespace, gateName) managedGates = append(managedGates, key) - currentGatesSet[key] = true } } // 4. Cleanup Orphans - // Check previously managed gates that are no longer in current set - for _, oldKey := range schedule.Status.ManagedGates { - if !currentGatesSet[oldKey] { - // Orphaned - parse and delete - parts := strings.Split(oldKey, "/") - if len(parts) != 2 { - continue - } - ns, name := parts[0], parts[1] - - gate := &rolloutv1alpha1.RolloutGate{} - if err := r.Get(ctx, types.NamespacedName{Namespace: ns, Name: name}, gate); err == nil { - if err := r.Delete(ctx, gate); client.IgnoreNotFound(err) != nil { - logger.Error(err, "Failed to delete orphaned gate", "key", oldKey) - } - } + // For each namespace that had gates, check for orphans + for _, ns := range namespaceList.Items { + currentRollouts := rolloutsByNamespace[ns.Name] + if currentRollouts == nil { + currentRollouts = make(map[string]bool) + } + if err := cleanupOrphanedGates(ctx, r.Client, schedule.Name, "", "ClusterRolloutSchedule", currentRollouts, ns.Name); err != nil { + logger.Error(err, "Failed to cleanup orphaned gates", "namespace", ns.Name) } } @@ -209,6 +204,22 @@ func (r *ClusterRolloutScheduleReconciler) findSchedulesForRollout(ctx context.C return nil } + // Also check for gates that reference this rollout (to handle cleanup if no longer matches) + gateList := &rolloutv1alpha1.RolloutGateList{} + _ = r.List(ctx, gateList, + client.InNamespace(rollout.Namespace), + client.MatchingLabels{ + LabelScheduleKind: "ClusterRolloutSchedule", + LabelRolloutName: rollout.Name, + }, + ) + gateScheduleNames := make(map[string]bool) + for _, gate := range gateList.Items { + if scheduleName := gate.Labels[LabelScheduleName]; scheduleName != "" { + gateScheduleNames[scheduleName] = true + } + } + for _, schedule := range scheduleList.Items { match := false @@ -222,15 +233,9 @@ func (r *ClusterRolloutScheduleReconciler) findSchedulesForRollout(ctx context.C } } - // Also check if previously managed - if !match { - expectedKey := fmt.Sprintf("%s/%s-%s", rollout.Namespace, schedule.Name, rollout.Name) - for _, managedKey := range schedule.Status.ManagedGates { - if managedKey == expectedKey { - match = true - break - } - } + // Also check if there's an existing gate for this rollout (to handle cleanup if no longer matches) + if !match && gateScheduleNames[schedule.Name] { + match = true } if match { @@ -256,6 +261,21 @@ func (r *ClusterRolloutScheduleReconciler) findSchedulesForNamespace(ctx context return nil } + // Check for gates in this namespace managed by ClusterRolloutSchedules + gateList := &rolloutv1alpha1.RolloutGateList{} + _ = r.List(ctx, gateList, + client.InNamespace(ns.Name), + client.MatchingLabels{ + LabelScheduleKind: "ClusterRolloutSchedule", + }, + ) + gateScheduleNames := make(map[string]bool) + for _, gate := range gateList.Items { + if scheduleName := gate.Labels[LabelScheduleName]; scheduleName != "" { + gateScheduleNames[scheduleName] = true + } + } + var requests []reconcile.Request for _, schedule := range scheduleList.Items { @@ -268,16 +288,7 @@ func (r *ClusterRolloutScheduleReconciler) findSchedulesForNamespace(ctx context // If it DOESN'T match now, we should check if it manages any gates in this namespace. // This handles the "cleanup" case. - hasGatesInNs := false - prefix := ns.Name + "/" - for _, managedKey := range schedule.Status.ManagedGates { - if strings.HasPrefix(managedKey, prefix) { - hasGatesInNs = true - break - } - } - - if hasGatesInNs { + if gateScheduleNames[schedule.Name] { requests = append(requests, reconcile.Request{NamespacedName: client.ObjectKey{Name: schedule.Name}}) } } diff --git a/internal/controller/rolloutschedule_controller.go b/internal/controller/rolloutschedule_controller.go index 5b183d2..ab0d513 100644 --- a/internal/controller/rolloutschedule_controller.go +++ b/internal/controller/rolloutschedule_controller.go @@ -18,7 +18,6 @@ package controller import ( "context" - "fmt" "time" "k8s.io/apimachinery/pkg/labels" @@ -88,10 +87,12 @@ func (r *RolloutScheduleReconciler) Reconcile(ctx context.Context, req ctrl.Requ return ctrl.Result{}, err } + currentRollouts := make(map[string]bool) for _, rollout := range rolloutList.Items { - gateName := fmt.Sprintf("%s-%s", schedule.Name, rollout.Name) - if err := syncRolloutGate(ctx, r.Client, &rollout, gateName, passing, ownerRef, schedule.Annotations); err != nil { - logger.Error(err, "Failed to sync gate", "rollout", rollout.Name, "gate", gateName) + currentRollouts[rollout.Name] = true + gateName, err := syncRolloutGate(ctx, r.Client, &rollout, schedule.Name, schedule.Namespace, "RolloutSchedule", passing, ownerRef, schedule.Annotations) + if err != nil { + logger.Error(err, "Failed to sync gate", "rollout", rollout.Name) // Continue with other rollouts, but we'll return the error at end if needed? // Best effort to sync others. } else { @@ -101,8 +102,7 @@ func (r *RolloutScheduleReconciler) Reconcile(ctx context.Context, req ctrl.Requ // 4. Cleanup Orphans // Remove gates that are no longer needed (rollout no longer matches) - // We use the previous status.ManagedGates to know what we should check - if err := cleanupOrphanedGates(ctx, r.Client, schedule.Status.ManagedGates, managedGates, schedule.Namespace); err != nil { + if err := cleanupOrphanedGates(ctx, r.Client, schedule.Name, schedule.Namespace, "RolloutSchedule", currentRollouts, schedule.Namespace); err != nil { logger.Error(err, "Failed to cleanup orphaned gates") // Don't block status update } @@ -161,6 +161,22 @@ func (r *RolloutScheduleReconciler) findSchedulesForRollout(ctx context.Context, return nil } + // Also check for gates that reference this rollout (to handle cleanup if no longer matches) + gateList := &rolloutv1alpha1.RolloutGateList{} + _ = r.List(ctx, gateList, + client.InNamespace(rollout.Namespace), + client.MatchingLabels{ + LabelScheduleKind: "RolloutSchedule", + LabelRolloutName: rollout.Name, + }, + ) + gateScheduleNames := make(map[string]bool) + for _, gate := range gateList.Items { + if scheduleName := gate.Labels[LabelScheduleName]; scheduleName != "" { + gateScheduleNames[scheduleName] = true + } + } + var requests []reconcile.Request for _, schedule := range scheduleList.Items { match := false @@ -171,15 +187,9 @@ func (r *RolloutScheduleReconciler) findSchedulesForRollout(ctx context.Context, match = true } - // Also check if previously managed (to handle cleanup if no longer matches) - if !match { - expectedGateName := fmt.Sprintf("%s-%s", schedule.Name, rollout.Name) - for _, managedGate := range schedule.Status.ManagedGates { - if managedGate == expectedGateName { - match = true - break - } - } + // Also check if there's an existing gate for this rollout (to handle cleanup if no longer matches) + if !match && gateScheduleNames[schedule.Name] { + match = true } if match { diff --git a/internal/controller/rolloutschedule_controller_test.go b/internal/controller/rolloutschedule_controller_test.go index 15e9b20..628a264 100644 --- a/internal/controller/rolloutschedule_controller_test.go +++ b/internal/controller/rolloutschedule_controller_test.go @@ -18,6 +18,7 @@ package controller import ( "context" + "strings" "testing" "time" @@ -29,7 +30,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" - "sigs.k8s.io/controller-runtime/pkg/client" + clientpkg "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) @@ -265,7 +266,7 @@ func TestRolloutScheduleReconciler(t *testing.T) { CurrentTime: time.Date(2025, 1, 1, 10, 0, 0, 0, time.UTC), } - client := fake.NewClientBuilder(). + fakeClient := fake.NewClientBuilder(). WithScheme(scheme). WithObjects(rollout, schedule). WithStatusSubresource(schedule). // Add status subresource support @@ -273,11 +274,11 @@ func TestRolloutScheduleReconciler(t *testing.T) { // Verify object exists checkSchedule := &rolloutv1alpha1.RolloutSchedule{} - err := client.Get(context.Background(), types.NamespacedName{Name: schedule.Name, Namespace: schedule.Namespace}, checkSchedule) + err := fakeClient.Get(context.Background(), types.NamespacedName{Name: schedule.Name, Namespace: schedule.Namespace}, checkSchedule) require.NoError(t, err, "Failed to find schedule in fake client during setup") r := &RolloutScheduleReconciler{ - Client: client, + Client: fakeClient, Scheme: scheme, Recorder: record.NewFakeRecorder(10), Clock: mockClock, @@ -294,20 +295,30 @@ func TestRolloutScheduleReconciler(t *testing.T) { _, err = r.Reconcile(context.Background(), req) require.NoError(t, err) - // Verify gate created - gate := &rolloutv1alpha1.RolloutGate{} - gateName := "business-hours-my-rollout" // schedule-rollout - err = client.Get(context.Background(), types.NamespacedName{Name: gateName, Namespace: "default"}, gate) + // Verify gate created by finding it via labels + gateList := &rolloutv1alpha1.RolloutGateList{} + err = fakeClient.List(context.Background(), gateList, + clientpkg.InNamespace("default"), + clientpkg.MatchingLabels{ + LabelScheduleName: schedule.Name, + LabelScheduleKind: "RolloutSchedule", + LabelRolloutName: rollout.Name, + }, + ) require.NoError(t, err) + require.Len(t, gateList.Items, 1, "Should have one gate created") + + gate := &gateList.Items[0] require.NotNil(t, gate.Spec.Passing) assert.True(t, *gate.Spec.Passing, "Gate should be passing (Allow + Inside window)") assert.Equal(t, rollout.Name, gate.Spec.RolloutRef.Name) + assert.True(t, strings.HasPrefix(gate.Name, "schedule-gate-"), "Gate name should have generated prefix") // Verify status updated - err = client.Get(context.Background(), types.NamespacedName{Name: schedule.Name, Namespace: schedule.Namespace}, schedule) + err = fakeClient.Get(context.Background(), types.NamespacedName{Name: schedule.Name, Namespace: schedule.Namespace}, schedule) require.NoError(t, err) assert.True(t, schedule.Status.Active) - assert.Contains(t, schedule.Status.ManagedGates, gateName) + assert.Len(t, schedule.Status.ManagedGates, 1) // 2. Advance time to 20:00 (outside window) mockClock.CurrentTime = time.Date(2025, 1, 1, 20, 0, 0, 0, time.UTC) @@ -315,17 +326,17 @@ func TestRolloutScheduleReconciler(t *testing.T) { _, err = r.Reconcile(context.Background(), req) require.NoError(t, err) - // Verify gate updated to passing=false - err = client.Get(context.Background(), types.NamespacedName{Name: gateName, Namespace: "default"}, gate) + // Verify gate updated to passing=false (fetch by name from previous gate) + err = fakeClient.Get(context.Background(), types.NamespacedName{Name: gate.Name, Namespace: "default"}, gate) require.NoError(t, err) require.NotNil(t, gate.Spec.Passing) assert.False(t, *gate.Spec.Passing, "Gate should NOT be passing (Allow + Outside window)") // 3. Change Action to Deny - err = client.Get(context.Background(), types.NamespacedName{Name: schedule.Name, Namespace: schedule.Namespace}, schedule) + err = fakeClient.Get(context.Background(), types.NamespacedName{Name: schedule.Name, Namespace: schedule.Namespace}, schedule) require.NoError(t, err) schedule.Spec.Action = rolloutv1alpha1.RolloutScheduleActionDeny - err = client.Update(context.Background(), schedule) + err = fakeClient.Update(context.Background(), schedule) require.NoError(t, err) // Reconcile (still outside window at 20:00) @@ -333,7 +344,7 @@ func TestRolloutScheduleReconciler(t *testing.T) { require.NoError(t, err) // Outside window + Deny action = Passing (Deny only blocks active periods) - err = client.Get(context.Background(), types.NamespacedName{Name: gateName, Namespace: "default"}, gate) + err = fakeClient.Get(context.Background(), types.NamespacedName{Name: gate.Name, Namespace: "default"}, gate) require.NoError(t, err) assert.True(t, *gate.Spec.Passing, "Gate should be passing (Deny + Outside window)") @@ -344,7 +355,7 @@ func TestRolloutScheduleReconciler(t *testing.T) { require.NoError(t, err) // Inside window + Deny action = Not Passing - err = client.Get(context.Background(), types.NamespacedName{Name: gateName, Namespace: "default"}, gate) + err = fakeClient.Get(context.Background(), types.NamespacedName{Name: gate.Name, Namespace: "default"}, gate) require.NoError(t, err) assert.False(t, *gate.Spec.Passing, "Gate should NOT be passing (Deny + Inside window)") } @@ -435,19 +446,33 @@ func TestClusterRolloutScheduleReconciler(t *testing.T) { _, err = r.Reconcile(context.Background(), req) require.NoError(t, err) - // Check prod gate - prodGate := &rolloutv1alpha1.RolloutGate{} - prodGateName := "prod-freeze-app-prod" - err = fakeClient.Get(context.Background(), types.NamespacedName{Name: prodGateName, Namespace: "prod"}, prodGate) + // Check prod gate by labels + prodGateList := &rolloutv1alpha1.RolloutGateList{} + err = fakeClient.List(context.Background(), prodGateList, + clientpkg.InNamespace("prod"), + clientpkg.MatchingLabels{ + LabelScheduleName: schedule.Name, + LabelScheduleKind: "ClusterRolloutSchedule", + LabelRolloutName: prodRollout.Name, + }, + ) require.NoError(t, err) + require.Len(t, prodGateList.Items, 1, "Should have one prod gate") + prodGate := &prodGateList.Items[0] assert.False(t, *prodGate.Spec.Passing, "Prod gate should block") // Check dev gate (should not exist) - devGate := &rolloutv1alpha1.RolloutGate{} - devGateName := "prod-freeze-app-dev" - err = fakeClient.Get(context.Background(), types.NamespacedName{Name: devGateName, Namespace: "dev"}, devGate) - assert.Error(t, err) - assert.True(t, client.IgnoreNotFound(err) == nil) + devGateList := &rolloutv1alpha1.RolloutGateList{} + err = fakeClient.List(context.Background(), devGateList, + clientpkg.InNamespace("dev"), + clientpkg.MatchingLabels{ + LabelScheduleName: schedule.Name, + LabelScheduleKind: "ClusterRolloutSchedule", + LabelRolloutName: devRollout.Name, + }, + ) + require.NoError(t, err) + assert.Len(t, devGateList.Items, 0, "Dev gate should not exist yet") // 2. Remove Namespace Selector (matches all) err = fakeClient.Get(context.Background(), types.NamespacedName{Name: schedule.Name}, schedule) @@ -464,7 +489,16 @@ func TestClusterRolloutScheduleReconciler(t *testing.T) { require.NoError(t, err) // Now dev gate should exist and block - err = fakeClient.Get(context.Background(), types.NamespacedName{Name: devGateName, Namespace: "dev"}, devGate) + err = fakeClient.List(context.Background(), devGateList, + clientpkg.InNamespace("dev"), + clientpkg.MatchingLabels{ + LabelScheduleName: schedule.Name, + LabelScheduleKind: "ClusterRolloutSchedule", + LabelRolloutName: devRollout.Name, + }, + ) require.NoError(t, err) + require.Len(t, devGateList.Items, 1, "Dev gate should now exist") + devGate := &devGateList.Items[0] assert.False(t, *devGate.Spec.Passing, "Dev gate should block now") } diff --git a/internal/controller/rolloutschedule_helpers.go b/internal/controller/rolloutschedule_helpers.go index ca7f92f..73b0f66 100644 --- a/internal/controller/rolloutschedule_helpers.go +++ b/internal/controller/rolloutschedule_helpers.go @@ -28,7 +28,6 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" @@ -306,24 +305,68 @@ func calculateGateStatus(active bool, action rolloutv1alpha1.RolloutScheduleActi } } +// Label keys for schedule-managed gates +const ( + LabelScheduleName = "gate.kuberik.com/schedule-name" + LabelScheduleNamespace = "gate.kuberik.com/schedule-namespace" + LabelScheduleKind = "gate.kuberik.com/schedule-kind" + LabelRolloutName = "gate.kuberik.com/rollout-name" +) + +// findExistingGate finds an existing gate managed by a schedule for a specific rollout. +func findExistingGate( + ctx context.Context, + c client.Client, + rollout *rolloutv1alpha1.Rollout, + scheduleName string, + scheduleNamespace string, + scheduleKind string, +) (*rolloutv1alpha1.RolloutGate, error) { + gateList := &rolloutv1alpha1.RolloutGateList{} + labels := map[string]string{ + LabelScheduleName: scheduleName, + LabelScheduleKind: scheduleKind, + LabelRolloutName: rollout.Name, + } + if scheduleNamespace != "" { + labels[LabelScheduleNamespace] = scheduleNamespace + } + + if err := c.List(ctx, gateList, + client.InNamespace(rollout.Namespace), + client.MatchingLabels(labels), + ); err != nil { + return nil, err + } + + if len(gateList.Items) == 0 { + return nil, nil + } + + return &gateList.Items[0], nil +} + // syncRolloutGate creates or updates a RolloutGate for a rollout. +// Returns the gate name for tracking purposes. func syncRolloutGate( ctx context.Context, c client.Client, rollout *rolloutv1alpha1.Rollout, - gateName string, + scheduleName string, + scheduleNamespace string, + scheduleKind string, passing bool, ownerRef metav1.OwnerReference, scheduleAnnotations map[string]string, -) error { - gate := &rolloutv1alpha1.RolloutGate{} - err := c.Get(ctx, types.NamespacedName{ - Namespace: rollout.Namespace, - Name: gateName, - }, gate) - - if errors.IsNotFound(err) { - // Create new gate +) (string, error) { + // Find existing gate by labels + gate, err := findExistingGate(ctx, c, rollout, scheduleName, scheduleNamespace, scheduleKind) + if err != nil { + return "", fmt.Errorf("failed to find existing gate: %w", err) + } + + if gate == nil { + // Create new gate using GenerateName // Copy gate-related annotations from schedule to gate annotations := make(map[string]string) if prettyName, ok := scheduleAnnotations["gate.kuberik.com/pretty-name"]; ok { @@ -333,10 +376,21 @@ func syncRolloutGate( annotations["gate.kuberik.com/description"] = description } + // Build labels to identify the gate + gateLabels := map[string]string{ + LabelScheduleName: scheduleName, + LabelScheduleKind: scheduleKind, + LabelRolloutName: rollout.Name, + } + if scheduleNamespace != "" { + gateLabels[LabelScheduleNamespace] = scheduleNamespace + } + gate = &rolloutv1alpha1.RolloutGate{ ObjectMeta: metav1.ObjectMeta{ - Name: gateName, + GenerateName: "schedule-gate-", Namespace: rollout.Namespace, + Labels: gateLabels, Annotations: annotations, OwnerReferences: []metav1.OwnerReference{ownerRef}, }, @@ -347,11 +401,10 @@ func syncRolloutGate( Passing: &passing, }, } - return c.Create(ctx, gate) - } - - if err != nil { - return fmt.Errorf("failed to get gate %s: %w", gateName, err) + if err := c.Create(ctx, gate); err != nil { + return "", err + } + return gate.Name, nil } // Update existing gate if needed @@ -394,44 +447,49 @@ func syncRolloutGate( } if needsUpdate { - return c.Update(ctx, gate) + if err := c.Update(ctx, gate); err != nil { + return "", err + } } - return nil + return gate.Name, nil } // cleanupOrphanedGates removes gates that are no longer needed. +// currentRollouts contains the names of rollouts that should have gates. func cleanupOrphanedGates( ctx context.Context, c client.Client, - managedGates []string, - currentGates []string, + scheduleName string, + scheduleNamespace string, + scheduleKind string, + currentRollouts map[string]bool, namespace string, ) error { - // Convert currentGates to a map for quick lookup - current := make(map[string]bool) - for _, name := range currentGates { - current[name] = true - } - - // Delete gates that are in managedGates but not in currentGates - for _, gateName := range managedGates { - if !current[gateName] { - gate := &rolloutv1alpha1.RolloutGate{} - err := c.Get(ctx, types.NamespacedName{ - Namespace: namespace, - Name: gateName, - }, gate) - - if err == nil { - // Gate exists, delete it - if err := c.Delete(ctx, gate); err != nil && !errors.IsNotFound(err) { - return fmt.Errorf("failed to delete orphaned gate %s: %w", gateName, err) - } - } else if !errors.IsNotFound(err) { - return fmt.Errorf("failed to get gate %s for cleanup: %w", gateName, err) + // List all gates managed by this schedule in this namespace + gateList := &rolloutv1alpha1.RolloutGateList{} + labels := map[string]string{ + LabelScheduleName: scheduleName, + LabelScheduleKind: scheduleKind, + } + if scheduleNamespace != "" { + labels[LabelScheduleNamespace] = scheduleNamespace + } + + if err := c.List(ctx, gateList, + client.InNamespace(namespace), + client.MatchingLabels(labels), + ); err != nil { + return fmt.Errorf("failed to list gates for cleanup: %w", err) + } + + // Delete gates for rollouts that are no longer matched + for _, gate := range gateList.Items { + rolloutName := gate.Labels[LabelRolloutName] + if !currentRollouts[rolloutName] { + if err := c.Delete(ctx, &gate); err != nil && !errors.IsNotFound(err) { + return fmt.Errorf("failed to delete orphaned gate %s: %w", gate.Name, err) } - // If already not found, that's fine } } From 53caa0717826bb3983c42966d9287a93370fde32 Mon Sep 17 00:00:00 2001 From: Luka Skugor Date: Sun, 1 Feb 2026 16:17:52 +0000 Subject: [PATCH 4/4] docs: add kubebuilder CLI usage rule --- CLAUDE.md | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/CLAUDE.md b/CLAUDE.md index 1e5ed04..326163f 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -2,6 +2,25 @@ This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. +## Rules + +- **Always use kubebuilder CLI to scaffold controllers.** Never manually create controller files. Use: + ```bash + # For controllers with new CRDs: + kubebuilder create api --group --version --kind + + # For controllers watching external CRDs: + kubebuilder create api \ + --group \ + --version \ + --kind \ + --controller=true \ + --resource=false \ + --external-api-domain \ + --external-api-path + ``` + Then implement your logic in the scaffolded `*_controller.go` file. + ## What is Rollout Controller? A Kubernetes controller for managing application rollouts with support for health checks, gates, and bake time. It integrates tightly with Flux CD for GitOps workflows, providing progressive delivery capabilities.