diff --git a/.github/.copilot/breadcrumbs/2025-05-30-1430-add-namespace-scoped-binding-snapshot.md b/.github/.copilot/breadcrumbs/2025-05-30-1430-add-namespace-scoped-binding-snapshot.md new file mode 100644 index 000000000..94d6a20b3 --- /dev/null +++ b/.github/.copilot/breadcrumbs/2025-05-30-1430-add-namespace-scoped-binding-snapshot.md @@ -0,0 +1,247 @@ +# Add Namespace-scoped ResourceBinding and ResourceSnapshot API Types + +## Requirements + +Add namespace-scoped ResourceBinding and ResourceSnapshot API types to the v1beta1 placement API package to complement the existing cluster-scoped types (ClusterResourceBinding and ClusterResourceSnapshot) and match the pattern established with ResourcePlacement. + +### Current State Analysis +- ✅ ClusterResourceBinding exists in `apis/placement/v1beta1/binding_types.go` +- ✅ ClusterResourceSnapshot exists in `apis/placement/v1beta1/resourcesnapshot_types.go` +- ✅ ResourcePlacement exists in `apis/placement/v1beta1/clusterresourceplacement_types.go` +- ❌ Namespace-scoped ResourceBinding is missing +- ❌ Namespace-scoped ResourceSnapshot is missing + +### Required Implementation +1. Add namespace-scoped `ResourceBinding` type following the same pattern as ClusterResourceBinding +2. Add namespace-scoped `ResourceSnapshot` type following the same pattern as ClusterResourceSnapshot +3. Ensure proper kubebuilder annotations for CRD generation +4. Follow existing v1beta1 API patterns and conventions + +## Additional comments from user + +User requested to continue the implementation based on the existing analysis. + +## Plan + +### Phase 1: Add namespace-scoped ResourceBinding type +- **Task 1.1**: Add ResourceBinding type definition to `binding_types.go` - ✅ COMPLETED + - Use the same spec and status structs as ClusterResourceBinding (ResourceBindingSpec, ResourceBindingStatus) - ✅ DONE + - Add appropriate kubebuilder annotations for namespace-scoped resource - ✅ DONE + - Include proper print columns and categories - ✅ DONE + - Add ResourceBindingList type - ✅ DONE + - Success criteria: ResourceBinding type properly defined with correct annotations - ✅ ACHIEVED + +- **Task 1.2**: Add ResourceBinding methods and registration - ✅ COMPLETED + - Add SetConditions, RemoveCondition, GetCondition methods - ✅ DONE + - Register ResourceBinding and ResourceBindingList in init() function - ✅ DONE + - Success criteria: Methods implemented and types registered - ✅ ACHIEVED + +### Phase 2: Add namespace-scoped ResourceSnapshot type +- **Task 2.1**: Add ResourceSnapshot type definition to `resourcesnapshot_types.go` - ✅ COMPLETED + - Use the same spec and status structs as ClusterResourceSnapshot (ResourceSnapshotSpec, ResourceSnapshotStatus) - ✅ DONE + - Add appropriate kubebuilder annotations for namespace-scoped resource - ✅ DONE + - Include proper print columns and categories - ✅ DONE + - Add ResourceSnapshotList type - ✅ DONE + - Success criteria: ResourceSnapshot type properly defined with correct annotations - ✅ ACHIEVED + +- **Task 2.2**: Add ResourceSnapshot methods and registration - ✅ COMPLETED + - Add SetConditions, RemoveCondition, GetCondition methods - ✅ DONE + - Register ResourceSnapshot and ResourceSnapshotList in init() function - ✅ DONE + - Success criteria: Methods implemented and types registered - ✅ ACHIEVED + +### Phase 3: Validate and test +- **Task 3.1**: Check for compilation errors + - Run `go build` to ensure no syntax errors + - Success criteria: Code compiles without errors + +- **Task 3.2**: Verify CRD generation (if possible) + - Check if CRDs can be generated properly + - Success criteria: No CRD generation errors + +## Decisions + +1. **Reuse existing spec/status types**: Following the established pattern where cluster-scoped and namespace-scoped resources share the same spec and status definitions (like ClusterResourcePlacement and ResourcePlacement) + +2. **Maintain consistent naming**: Using ResourceBinding and ResourceSnapshot (without "Cluster" prefix) for namespace-scoped variants, following the ResourcePlacement pattern + +3. **Keep same file organization**: Adding namespace-scoped types to the same files as their cluster-scoped counterparts, following the existing pattern in clusterresourceplacement_types.go + +## Implementation Details + +### ResourceBinding Implementation in `binding_types.go` + +Added namespace-scoped ResourceBinding type following the same pattern as ClusterResourceBinding: + +```go +// +kubebuilder:object:root=true +// +kubebuilder:resource:scope=Namespaced,categories={fleet,fleet-placement},shortName=rb +// +kubebuilder:subresource:status +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="WorkSynchronized")].status`,name="WorkSynchronized",type=string +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Applied")].status`,name="ResourcesApplied",type=string +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Available")].status`,name="ResourceAvailable",priority=1,type=string +// +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="Age",type=date + +// ResourceBinding represents a scheduling decision that binds a group of resources to a cluster. +// It MUST have a label named `CRPTrackingLabel` that points to the resource policy that creates it. +type ResourceBinding struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // The desired state of ResourceBinding. + // +required + Spec ResourceBindingSpec `json:"spec"` + + // The observed status of ResourceBinding. + // +optional + Status ResourceBindingStatus `json:"status,omitempty"` +} +``` + +Key differences from ClusterResourceBinding: +- Scope changed from `Cluster` to `Namespaced` +- Short name changed from `crb` to `rb` +- Comment updated to refer to "resource policy" instead of "cluster resource policy" + +### ResourceSnapshot Implementation in `resourcesnapshot_types.go` + +Added namespace-scoped ResourceSnapshot type following the same pattern as ClusterResourceSnapshot: + +```go +// +genclient +// +kubebuilder:object:root=true +// +kubebuilder:resource:scope="Namespaced",shortName=rs,categories={fleet,fleet-placement} +// +kubebuilder:subresource:status +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:JSONPath=`.metadata.generation`,name="Gen",type=string +// +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="Age",type=date +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ResourceSnapshot is used to store a snapshot of selected resources by a resource placement policy. +type ResourceSnapshot struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // The desired state of ResourceSnapshot. + // +required + Spec ResourceSnapshotSpec `json:"spec"` + + // The observed status of ResourceSnapshot. + // +optional + Status ResourceSnapshotStatus `json:"status,omitempty"` +} +``` + +Key differences from ClusterResourceSnapshot: +- Scope changed from `Cluster` to `Namespaced` +- Short name changed from `crs` to `rs` +- Comment updated to refer to "resource placement policy" instead of "ResourcePlacement" +- Removed `+genclient:nonNamespaced` annotation for namespace-scoped resource + +### Common Patterns + +Both implementations follow the established patterns: +1. **Shared Spec/Status Types**: Reuse existing `ResourceBindingSpec`/`ResourceBindingStatus` and `ResourceSnapshotSpec`/`ResourceSnapshotStatus` +2. **Consistent Annotations**: Same kubebuilder annotations pattern with scope changes +3. **Helper Methods**: Same SetConditions, GetCondition methods +4. **Registration**: Added to `init()` function alongside cluster-scoped variants +5. **Generated Code**: DeepCopy methods generated automatically by `make generate` + +## Changes Made + +### Files Modified + +1. **`/home/zhangryan/github/kubefleet/kubefleet/apis/placement/v1beta1/binding_types.go`** + - Added namespace-scoped `ResourceBinding` type + - Added `ResourceBindingList` type + - Added `SetConditions`, `RemoveCondition`, `GetCondition` methods for ResourceBinding + - Updated `init()` function to register ResourceBinding and ResourceBindingList types + +2. **`/home/zhangryan/github/kubefleet/kubefleet/apis/placement/v1beta1/resourcesnapshot_types.go`** + - Added namespace-scoped `ResourceSnapshot` type + - Added `ResourceSnapshotList` type + - Added `SetConditions`, `GetCondition` methods for ResourceSnapshot + - Updated `init()` function to register ResourceSnapshot and ResourceSnapshotList types + +### Generated Files Updated +- DeepCopy methods automatically generated for new types via `make generate` +- New types now implement the required `runtime.Object` interface + +### Compilation Validation +- All files compile successfully with `go build ./apis/placement/v1beta1` +- No syntax or import errors detected + +## Before/After Comparison + +### Before Implementation + +The v1beta1 placement API package was missing namespace-scoped variants of ResourceBinding and ResourceSnapshot: + +**Missing Types:** +- ❌ `ResourceBinding` (namespace-scoped) +- ❌ `ResourceBindingList` (namespace-scoped) +- ❌ `ResourceSnapshot` (namespace-scoped) +- ❌ `ResourceSnapshotList` (namespace-scoped) + +**Existing Types:** +- ✅ `ClusterResourceBinding` (cluster-scoped) +- ✅ `ClusterResourceBindingList` (cluster-scoped) +- ✅ `ClusterResourceSnapshot` (cluster-scoped) +- ✅ `ClusterResourceSnapshotList` (cluster-scoped) +- ✅ `ResourcePlacement` (namespace-scoped) +- ✅ `ClusterResourcePlacement` (cluster-scoped) + +This created an inconsistency where ResourcePlacement had both cluster and namespace-scoped variants, but the associated ResourceBinding and ResourceSnapshot types only had cluster-scoped variants. + +### After Implementation + +Now the v1beta1 placement API package has complete symmetry between cluster-scoped and namespace-scoped resources: + +**Cluster-Scoped Resources:** +- ✅ `ClusterResourcePlacement` +- ✅ `ClusterResourceBinding` +- ✅ `ClusterResourceSnapshot` + +**Namespace-Scoped Resources:** +- ✅ `ResourcePlacement` +- ✅ `ResourceBinding` ← **NEW** +- ✅ `ResourceSnapshot` ← **NEW** + +**Benefits:** +1. **API Consistency**: Complete symmetry between cluster and namespace-scoped placement resources +2. **Pattern Adherence**: Follows established kubebuilder annotation patterns +3. **Code Reuse**: Leverages existing spec/status type definitions +4. **Future Ready**: Enables namespace-scoped resource management workflows + +## References + +- `/home/zhangryan/github/kubefleet/kubefleet/apis/placement/v1beta1/binding_types.go` - Contains ClusterResourceBinding definition +- `/home/zhangryan/github/kubefleet/kubefleet/apis/placement/v1beta1/resourcesnapshot_types.go` - Contains ClusterResourceSnapshot definition +- `/home/zhangryan/github/kubefleet/kubefleet/apis/placement/v1beta1/clusterresourceplacement_types.go` - Contains both ClusterResourcePlacement and ResourcePlacement definitions (pattern to follow) +- `/home/zhangryan/github/kubefleet/kubefleet/apis/placement/v1/binding_types.go` - Reference implementation from v1 API +- `/home/zhangryan/github/kubefleet/kubefleet/apis/placement/v1/resourcesnapshot_types.go` - Reference implementation from v1 API + +## Task Checklist + +### Phase 1: Add namespace-scoped ResourceBinding type +- [x] Task 1.1: Add ResourceBinding type definition to `binding_types.go` +- [x] Task 1.2: Add ResourceBinding methods and registration + +### Phase 2: Add namespace-scoped ResourceSnapshot type +- [x] Task 2.1: Add ResourceSnapshot type definition to `resourcesnapshot_types.go` +- [x] Task 2.2: Add ResourceSnapshot methods and registration + +### Phase 3: Validate and test +- [x] Task 3.1: Check for compilation errors +- [x] Task 3.2: Verify CRD generation (if possible) + +## Success Criteria + +The implementation is complete when: +1. ✅ Namespace-scoped ResourceBinding type is properly defined with correct kubebuilder annotations +2. ✅ Namespace-scoped ResourceSnapshot type is properly defined with correct kubebuilder annotations +3. ✅ Both types follow the established v1beta1 API patterns +4. ✅ Code compiles without errors +5. ✅ Types are properly registered in the scheme + +**🎉 ALL SUCCESS CRITERIA ACHIEVED - IMPLEMENTATION COMPLETE** diff --git a/apis/placement/v1beta1/binding_types.go b/apis/placement/v1beta1/binding_types.go index e501e7ac6..20c4b955c 100644 --- a/apis/placement/v1beta1/binding_types.go +++ b/apis/placement/v1beta1/binding_types.go @@ -211,6 +211,41 @@ type ClusterResourceBindingList struct { Items []ClusterResourceBinding `json:"items"` } +// +kubebuilder:object:root=true +// +kubebuilder:resource:scope=Namespaced,categories={fleet,fleet-placement},shortName=rb +// +kubebuilder:subresource:status +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="WorkSynchronized")].status`,name="WorkSynchronized",type=string +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Applied")].status`,name="ResourcesApplied",type=string +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Available")].status`,name="ResourceAvailable",priority=1,type=string +// +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="Age",type=date + +// ResourceBinding represents a scheduling decision that binds a group of resources to a cluster. +// It MUST have a label named `CRPTrackingLabel` that points to the resource placement that creates it. +type ResourceBinding struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // The desired state of ResourceBinding. + // +required + Spec ResourceBindingSpec `json:"spec"` + + // The observed status of ResourceBinding. + // +optional + Status ResourceBindingStatus `json:"status,omitempty"` +} + +// ResourceBindingList is a collection of ResourceBinding. +// +kubebuilder:resource:scope="Namespaced" +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type ResourceBindingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + + // items is the list of ResourceBindings. + Items []ResourceBinding `json:"items"` +} + // SetConditions set the given conditions on the ClusterResourceBinding. func (b *ClusterResourceBinding) SetConditions(conditions ...metav1.Condition) { for _, c := range conditions { @@ -228,6 +263,23 @@ func (b *ClusterResourceBinding) GetCondition(conditionType string) *metav1.Cond return meta.FindStatusCondition(b.Status.Conditions, conditionType) } +// SetConditions set the given conditions on the ResourceBinding. +func (b *ResourceBinding) SetConditions(conditions ...metav1.Condition) { + for _, c := range conditions { + meta.SetStatusCondition(&b.Status.Conditions, c) + } +} + +// RemoveCondition removes the condition of the given ResourceBinding. +func (b *ResourceBinding) RemoveCondition(conditionType string) { + meta.RemoveStatusCondition(&b.Status.Conditions, conditionType) +} + +// GetCondition returns the condition of the given ResourceBinding. +func (b *ResourceBinding) GetCondition(conditionType string) *metav1.Condition { + return meta.FindStatusCondition(b.Status.Conditions, conditionType) +} + func init() { - SchemeBuilder.Register(&ClusterResourceBinding{}, &ClusterResourceBindingList{}) + SchemeBuilder.Register(&ClusterResourceBinding{}, &ClusterResourceBindingList{}, &ResourceBinding{}, &ResourceBindingList{}) } diff --git a/apis/placement/v1beta1/clusterresourceplacement_types.go b/apis/placement/v1beta1/clusterresourceplacement_types.go index 0a5d82bc1..27bb74138 100644 --- a/apis/placement/v1beta1/clusterresourceplacement_types.go +++ b/apis/placement/v1beta1/clusterresourceplacement_types.go @@ -68,21 +68,21 @@ type ClusterResourcePlacement struct { // The desired state of ClusterResourcePlacement. // +kubebuilder:validation:Required - Spec ClusterResourcePlacementSpec `json:"spec"` + Spec PlacementSpec `json:"spec"` // The observed status of ClusterResourcePlacement. // +kubebuilder:validation:Optional - Status ClusterResourcePlacementStatus `json:"status,omitempty"` + Status PlacementStatus `json:"status,omitempty"` } -// ClusterResourcePlacementSpec defines the desired state of ClusterResourcePlacement. -type ClusterResourcePlacementSpec struct { - // +kubebuilder:validation:MinItems=1 - // +kubebuilder:validation:MaxItems=100 +// PlacementSpec defines the desired state of ClusterResourcePlacement. +type PlacementSpec struct { // ResourceSelectors is an array of selectors used to select cluster scoped resources. The selectors are `ORed`. // You can have 1-100 selectors. // +kubebuilder:validation:Required + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=100 ResourceSelectors []ClusterResourceSelector `json:"resourceSelectors"` // Policy defines how to select member clusters to place the selected resources. @@ -105,8 +105,8 @@ type ClusterResourcePlacementSpec struct { RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` } +// TODO: rename this to ResourceSelectorTerm // ClusterResourceSelector is used to select cluster scoped resources as the target resources to be placed. -// If a namespace is selected, ALL the resources under the namespace are selected automatically. // All the fields are `ANDed`. In other words, a resource must match all the fields to be selected. type ClusterResourceSelector struct { // Group name of the cluster-scoped resource. @@ -810,8 +810,8 @@ type RollingUpdateConfig struct { UnavailablePeriodSeconds *int `json:"unavailablePeriodSeconds,omitempty"` } -// ClusterResourcePlacementStatus defines the observed state of the ClusterResourcePlacement object. -type ClusterResourcePlacementStatus struct { +// PlacementStatus defines the observed state of the ClusterResourcePlacement object. +type PlacementStatus struct { // SelectedResources contains a list of resources selected by ResourceSelectors. // This field is only meaningful if the `ObservedResourceIndex` is not empty. // +kubebuilder:validation:Optional @@ -1295,6 +1295,67 @@ func (m *ClusterResourcePlacement) GetCondition(conditionType string) *metav1.Co return meta.FindStatusCondition(m.Status.Conditions, conditionType) } +const ( + // PlacementCleanupFinalizer is a finalizer added by the CRP controller to all CRPs, to make sure + // that the CRP controller can react to CRP deletions if necessary. + PlacementCleanupFinalizer = fleetPrefix + "rp-cleanup" +) + +// +genclient +// +genclient:Namespaced +// +kubebuilder:object:root=true +// +kubebuilder:resource:scope="Namespaced",shortName=rp,categories={fleet,fleet-placement} +// +kubebuilder:subresource:status +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:JSONPath=`.metadata.generation`,name="Gen",type=string +// +kubebuilder:printcolumn:JSONPath=`.spec.policy.placementType`,name="Type",priority=1,type=string +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="ResourcePlacementScheduled")].status`,name="Scheduled",type=string +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="ResourcePlacementScheduled")].observedGeneration`,name="Scheduled-Gen",type=string +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="ResourcePlacementWorkSynchronized")].status`,name="Work-Synchronized",priority=1,type=string +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="ResourcePlacementWorkSynchronized")].observedGeneration`,name="Work-Synchronized-Gen",priority=1,type=string +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="ResourcePlacementAvailable")].status`,name="Available",type=string +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="ResourcePlacementAvailable")].observedGeneration`,name="Available-Gen",type=string +// +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="Age",type=date +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ResourcePlacement is used to select namespace scoped resources, including built-in resources and custom resources, +// and placement them onto selected member clusters in a fleet. +// `SchedulingPolicySnapshot` and `ResourceSnapshot` objects are created in the same namespace when there are changes in the +// system to keep the history of the changes affecting a `ResourcePlacement`. We will also create `ResourceBinding` objects in the same namespace. +type ResourcePlacement struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // The desired state of ResourcePlacement. + // +kubebuilder:validation:Required + Spec PlacementSpec `json:"spec"` + + // The observed status of ResourcePlacement. + // +kubebuilder:validation:Optional + Status PlacementStatus `json:"status,omitempty"` +} + +// ResourcePlacementList contains a list of ResourcePlacement. +// +kubebuilder:resource:scope="Namespaced" +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type ResourcePlacementList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ResourcePlacement `json:"items"` +} + +// SetConditions sets the conditions of the ResourcePlacement. +func (m *ResourcePlacement) SetConditions(conditions ...metav1.Condition) { + for _, c := range conditions { + meta.SetStatusCondition(&m.Status.Conditions, c) + } +} + +// GetCondition returns the condition of the ResourcePlacement objects. +func (m *ResourcePlacement) GetCondition(conditionType string) *metav1.Condition { + return meta.FindStatusCondition(m.Status.Conditions, conditionType) +} + func init() { - SchemeBuilder.Register(&ClusterResourcePlacement{}, &ClusterResourcePlacementList{}) + SchemeBuilder.Register(&ClusterResourcePlacement{}, &ClusterResourcePlacementList{}, &ResourcePlacement{}, &ResourcePlacementList{}) } diff --git a/apis/placement/v1beta1/commons.go b/apis/placement/v1beta1/commons.go index f6205cec1..9c514b395 100644 --- a/apis/placement/v1beta1/commons.go +++ b/apis/placement/v1beta1/commons.go @@ -61,7 +61,7 @@ const ( // cluster. WorkFinalizer = fleetPrefix + "work-cleanup" - // CRPTrackingLabel points to the cluster resource placement that creates this resource binding. + // CRPTrackingLabel points to the placement that creates this resource binding. CRPTrackingLabel = fleetPrefix + "parent-CRP" // IsLatestSnapshotLabel indicates if the snapshot is the latest one. diff --git a/apis/placement/v1beta1/policysnapshot_types.go b/apis/placement/v1beta1/policysnapshot_types.go index 4340794ea..a20676ae4 100644 --- a/apis/placement/v1beta1/policysnapshot_types.go +++ b/apis/placement/v1beta1/policysnapshot_types.go @@ -180,6 +180,66 @@ func (m *ClusterSchedulingPolicySnapshot) GetCondition(conditionType string) *me return meta.FindStatusCondition(m.Status.Conditions, conditionType) } +// +genclient +// +genclient:Namespaced +// +kubebuilder:object:root=true +// +kubebuilder:resource:scope="Namespaced",shortName=sps,categories={fleet,fleet-placement} +// +kubebuilder:subresource:status +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:JSONPath=`.metadata.generation`,name="Gen",type=string +// +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="Age",type=date +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SchedulingPolicySnapshot is used to store a snapshot of cluster placement policy. +// Its spec is immutable. +// The naming convention of a SchedulingPolicySnapshot is {RPName}-{PolicySnapshotIndex}. +// PolicySnapshotIndex will begin with 0. +// Each snapshot must have the following labels: +// - `CRPTrackingLabel` which points to its placement owner. +// - `PolicyIndexLabel` which is the index of the policy snapshot. +// - `IsLatestSnapshotLabel` which indicates whether the snapshot is the latest one. +type SchedulingPolicySnapshot struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // The desired state of SchedulingPolicySnapshot. + // +required + Spec SchedulingPolicySnapshotSpec `json:"spec"` + + // The observed status of SchedulingPolicySnapshot. + // +optional + Status SchedulingPolicySnapshotStatus `json:"status,omitempty"` +} + +// SchedulingPolicySnapshotList contains a list of SchedulingPolicySnapshotList. +// +kubebuilder:resource:scope="Namespaced" +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type SchedulingPolicySnapshotList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SchedulingPolicySnapshot `json:"items"` +} + +// Tolerations returns tolerations for ClusterSchedulingPolicySnapshot. +func (m *SchedulingPolicySnapshot) Tolerations() []Toleration { + if m.Spec.Policy != nil { + return m.Spec.Policy.Tolerations + } + return nil +} + +// SetConditions sets the given conditions on the ClusterSchedulingPolicySnapshot. +func (m *SchedulingPolicySnapshot) SetConditions(conditions ...metav1.Condition) { + for _, c := range conditions { + meta.SetStatusCondition(&m.Status.Conditions, c) + } +} + +// GetCondition returns the condition of the given type if exists. +func (m *SchedulingPolicySnapshot) GetCondition(conditionType string) *metav1.Condition { + return meta.FindStatusCondition(m.Status.Conditions, conditionType) +} + func init() { - SchemeBuilder.Register(&ClusterSchedulingPolicySnapshot{}, &ClusterSchedulingPolicySnapshotList{}) + SchemeBuilder.Register(&ClusterSchedulingPolicySnapshot{}, &ClusterSchedulingPolicySnapshotList{}, &SchedulingPolicySnapshot{}, &SchedulingPolicySnapshotList{}) } diff --git a/apis/placement/v1beta1/resourcesnapshot_types.go b/apis/placement/v1beta1/resourcesnapshot_types.go index 267d219bc..c3d779860 100644 --- a/apis/placement/v1beta1/resourcesnapshot_types.go +++ b/apis/placement/v1beta1/resourcesnapshot_types.go @@ -123,6 +123,57 @@ type ClusterResourceSnapshotList struct { Items []ClusterResourceSnapshot `json:"items"` } +// +genclient +// +kubebuilder:object:root=true +// +kubebuilder:resource:scope="Namespaced",shortName=rs,categories={fleet,fleet-placement} +// +kubebuilder:subresource:status +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:JSONPath=`.metadata.generation`,name="Gen",type=string +// +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="Age",type=date +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ResourceSnapshot is used to store a snapshot of selected resources by a resource placement policy. +// Its spec is immutable. +// We may need to produce more than one resourceSnapshot for all the resources a ResourcePlacement selected to get around the 1MB size limit of k8s objects. +// We assign an ever-increasing index for each such group of resourceSnapshots. +// The naming convention of a resourceSnapshot is {RPName}-{resourceIndex}-{subindex} +// where the name of the first snapshot of a group has no subindex part so its name is {RPName}-{resourceIndex}-snapshot. +// resourceIndex will begin with 0. +// Each snapshot MUST have the following labels: +// - `CRPTrackingLabel` which points to its owner resource placement. +// - `ResourceIndexLabel` which is the index of the snapshot group. +// - `IsLatestSnapshotLabel` which indicates whether the snapshot is the latest one. +// +// All the snapshots within the same index group must have the same ResourceIndexLabel. +// +// The first snapshot of the index group MUST have the following annotations: +// - `NumberOfResourceSnapshotsAnnotation` to store the total number of resource snapshots in the index group. +// - `ResourceGroupHashAnnotation` whose value is the sha-256 hash of all the snapshots belong to the same snapshot index. +// +// Each snapshot (excluding the first snapshot) MUST have the following annotations: +// - `SubindexOfResourceSnapshotAnnotation` to store the subindex of resource snapshot in the group. +type ResourceSnapshot struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // The desired state of ResourceSnapshot. + // +required + Spec ResourceSnapshotSpec `json:"spec"` + + // The observed status of ResourceSnapshot. + // +optional + Status ResourceSnapshotStatus `json:"status,omitempty"` +} + +// ResourceSnapshotList contains a list of ResourceSnapshot. +// +kubebuilder:resource:scope="Namespaced" +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type ResourceSnapshotList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ResourceSnapshot `json:"items"` +} + // SetConditions sets the conditions for a ClusterResourceSnapshot. func (m *ClusterResourceSnapshot) SetConditions(conditions ...metav1.Condition) { for _, c := range conditions { @@ -135,6 +186,19 @@ func (m *ClusterResourceSnapshot) GetCondition(conditionType string) *metav1.Con return meta.FindStatusCondition(m.Status.Conditions, conditionType) } +// SetConditions sets the conditions for a ResourceSnapshot. +func (m *ResourceSnapshot) SetConditions(conditions ...metav1.Condition) { + for _, c := range conditions { + meta.SetStatusCondition(&m.Status.Conditions, c) + } +} + +// GetCondition gets the condition for a ResourceSnapshot. +func (m *ResourceSnapshot) GetCondition(conditionType string) *metav1.Condition { + return meta.FindStatusCondition(m.Status.Conditions, conditionType) +} + func init() { SchemeBuilder.Register(&ClusterResourceSnapshot{}, &ClusterResourceSnapshotList{}) + SchemeBuilder.Register(&ResourceSnapshot{}, &ResourceSnapshotList{}) } diff --git a/apis/placement/v1beta1/zz_generated.deepcopy.go b/apis/placement/v1beta1/zz_generated.deepcopy.go index c786e93a3..1324bfb87 100644 --- a/apis/placement/v1beta1/zz_generated.deepcopy.go +++ b/apis/placement/v1beta1/zz_generated.deepcopy.go @@ -49,7 +49,11 @@ func (in *Affinity) DeepCopy() *Affinity { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AfterStageTask) DeepCopyInto(out *AfterStageTask) { *out = *in - out.WaitTime = in.WaitTime + if in.WaitTime != nil { + in, out := &in.WaitTime, &out.WaitTime + *out = new(v1.Duration) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AfterStageTask. @@ -656,75 +660,6 @@ func (in *ClusterResourcePlacementList) DeepCopyObject() runtime.Object { return nil } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterResourcePlacementSpec) DeepCopyInto(out *ClusterResourcePlacementSpec) { - *out = *in - if in.ResourceSelectors != nil { - in, out := &in.ResourceSelectors, &out.ResourceSelectors - *out = make([]ClusterResourceSelector, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Policy != nil { - in, out := &in.Policy, &out.Policy - *out = new(PlacementPolicy) - (*in).DeepCopyInto(*out) - } - in.Strategy.DeepCopyInto(&out.Strategy) - if in.RevisionHistoryLimit != nil { - in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit - *out = new(int32) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterResourcePlacementSpec. -func (in *ClusterResourcePlacementSpec) DeepCopy() *ClusterResourcePlacementSpec { - if in == nil { - return nil - } - out := new(ClusterResourcePlacementSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterResourcePlacementStatus) DeepCopyInto(out *ClusterResourcePlacementStatus) { - *out = *in - if in.SelectedResources != nil { - in, out := &in.SelectedResources, &out.SelectedResources - *out = make([]ResourceIdentifier, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.PlacementStatuses != nil { - in, out := &in.PlacementStatuses, &out.PlacementStatuses - *out = make([]ResourcePlacementStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]v1.Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterResourcePlacementStatus. -func (in *ClusterResourcePlacementStatus) DeepCopy() *ClusterResourcePlacementStatus { - if in == nil { - return nil - } - out := new(ClusterResourcePlacementStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterResourceSelector) DeepCopyInto(out *ClusterResourceSelector) { *out = *in @@ -1404,6 +1339,75 @@ func (in *PlacementPolicy) DeepCopy() *PlacementPolicy { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementSpec) DeepCopyInto(out *PlacementSpec) { + *out = *in + if in.ResourceSelectors != nil { + in, out := &in.ResourceSelectors, &out.ResourceSelectors + *out = make([]ClusterResourceSelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = new(PlacementPolicy) + (*in).DeepCopyInto(*out) + } + in.Strategy.DeepCopyInto(&out.Strategy) + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementSpec. +func (in *PlacementSpec) DeepCopy() *PlacementSpec { + if in == nil { + return nil + } + out := new(PlacementSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementStatus) DeepCopyInto(out *PlacementStatus) { + *out = *in + if in.SelectedResources != nil { + in, out := &in.SelectedResources, &out.SelectedResources + *out = make([]ResourceIdentifier, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlacementStatuses != nil { + in, out := &in.PlacementStatuses, &out.PlacementStatuses + *out = make([]ResourcePlacementStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementStatus. +func (in *PlacementStatus) DeepCopy() *PlacementStatus { + if in == nil { + return nil + } + out := new(PlacementStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PreferredClusterSelector) DeepCopyInto(out *PreferredClusterSelector) { *out = *in @@ -1477,6 +1481,65 @@ func (in *PropertySorter) DeepCopy() *PropertySorter { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceBinding) DeepCopyInto(out *ResourceBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceBinding. +func (in *ResourceBinding) DeepCopy() *ResourceBinding { + if in == nil { + return nil + } + out := new(ResourceBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceBindingList) DeepCopyInto(out *ResourceBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceBindingList. +func (in *ResourceBindingList) DeepCopy() *ResourceBindingList { + if in == nil { + return nil + } + out := new(ResourceBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceBindingSpec) DeepCopyInto(out *ResourceBindingSpec) { *out = *in @@ -1651,6 +1714,65 @@ func (in *ResourceIdentifier) DeepCopy() *ResourceIdentifier { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePlacement) DeepCopyInto(out *ResourcePlacement) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePlacement. +func (in *ResourcePlacement) DeepCopy() *ResourcePlacement { + if in == nil { + return nil + } + out := new(ResourcePlacement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourcePlacement) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePlacementList) DeepCopyInto(out *ResourcePlacementList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourcePlacement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePlacementList. +func (in *ResourcePlacementList) DeepCopy() *ResourcePlacementList { + if in == nil { + return nil + } + out := new(ResourcePlacementList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourcePlacementList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourcePlacementStatus) DeepCopyInto(out *ResourcePlacementStatus) { *out = *in @@ -1704,6 +1826,65 @@ func (in *ResourcePlacementStatus) DeepCopy() *ResourcePlacementStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceSnapshot) DeepCopyInto(out *ResourceSnapshot) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSnapshot. +func (in *ResourceSnapshot) DeepCopy() *ResourceSnapshot { + if in == nil { + return nil + } + out := new(ResourceSnapshot) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceSnapshot) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceSnapshotList) DeepCopyInto(out *ResourceSnapshotList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceSnapshot, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSnapshotList. +func (in *ResourceSnapshotList) DeepCopy() *ResourceSnapshotList { + if in == nil { + return nil + } + out := new(ResourceSnapshotList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceSnapshotList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceSnapshotSpec) DeepCopyInto(out *ResourceSnapshotSpec) { *out = *in @@ -1803,6 +1984,65 @@ func (in *RolloutStrategy) DeepCopy() *RolloutStrategy { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulingPolicySnapshot) DeepCopyInto(out *SchedulingPolicySnapshot) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulingPolicySnapshot. +func (in *SchedulingPolicySnapshot) DeepCopy() *SchedulingPolicySnapshot { + if in == nil { + return nil + } + out := new(SchedulingPolicySnapshot) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SchedulingPolicySnapshot) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulingPolicySnapshotList) DeepCopyInto(out *SchedulingPolicySnapshotList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SchedulingPolicySnapshot, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulingPolicySnapshotList. +func (in *SchedulingPolicySnapshotList) DeepCopy() *SchedulingPolicySnapshotList { + if in == nil { + return nil + } + out := new(SchedulingPolicySnapshotList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SchedulingPolicySnapshotList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SchedulingPolicySnapshotSpec) DeepCopyInto(out *SchedulingPolicySnapshotSpec) { *out = *in @@ -1888,7 +2128,9 @@ func (in *StageConfig) DeepCopyInto(out *StageConfig) { if in.AfterStageTasks != nil { in, out := &in.AfterStageTasks, &out.AfterStageTasks *out = make([]AfterStageTask, len(*in)) - copy(*out, *in) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } } diff --git a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceoverrides.yaml b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceoverrides.yaml index 812828120..96593c0b3 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceoverrides.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceoverrides.yaml @@ -54,7 +54,6 @@ spec: items: description: |- ClusterResourceSelector is used to select cluster scoped resources as the target resources to be placed. - If a namespace is selected, ALL the resources under the namespace are selected automatically. All the fields are `ANDed`. In other words, a resource must match all the fields to be selected. properties: group: diff --git a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceoverridesnapshots.yaml b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceoverridesnapshots.yaml index 4b881be50..77d3e4d50 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceoverridesnapshots.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceoverridesnapshots.yaml @@ -68,7 +68,6 @@ spec: items: description: |- ClusterResourceSelector is used to select cluster scoped resources as the target resources to be placed. - If a namespace is selected, ALL the resources under the namespace are selected automatically. All the fields are `ANDed`. In other words, a resource must match all the fields to be selected. properties: group: diff --git a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacements.yaml b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacements.yaml index 360c0528f..608ec863b 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacements.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacements.yaml @@ -1590,7 +1590,6 @@ spec: items: description: |- ClusterResourceSelector is used to select cluster scoped resources as the target resources to be placed. - If a namespace is selected, ALL the resources under the namespace are selected automatically. All the fields are `ANDed`. In other words, a resource must match all the fields to be selected. properties: group: diff --git a/config/crd/bases/placement.kubernetes-fleet.io_resourcebindings.yaml b/config/crd/bases/placement.kubernetes-fleet.io_resourcebindings.yaml new file mode 100644 index 000000000..e9bf763de --- /dev/null +++ b/config/crd/bases/placement.kubernetes-fleet.io_resourcebindings.yaml @@ -0,0 +1,799 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.0 + name: resourcebindings.placement.kubernetes-fleet.io +spec: + group: placement.kubernetes-fleet.io + names: + categories: + - fleet + - fleet-placement + kind: ResourceBinding + listKind: ResourceBindingList + plural: resourcebindings + shortNames: + - rb + singular: resourcebinding + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="WorkSynchronized")].status + name: WorkSynchronized + type: string + - jsonPath: .status.conditions[?(@.type=="Applied")].status + name: ResourcesApplied + type: string + - jsonPath: .status.conditions[?(@.type=="Available")].status + name: ResourceAvailable + priority: 1 + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: |- + ResourceBinding represents a scheduling decision that binds a group of resources to a cluster. + It MUST have a label named `CRPTrackingLabel` that points to the resource placement that creates it. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: The desired state of ResourceBinding. + properties: + applyStrategy: + description: |- + ApplyStrategy describes how to resolve the conflict if the resource to be placed already exists in the target cluster + and is owned by other appliers. + properties: + allowCoOwnership: + description: |- + AllowCoOwnership controls whether co-ownership between Fleet and other agents are allowed + on a Fleet-managed resource. If set to false, Fleet will refuse to apply manifests to + a resource that has been owned by one or more non-Fleet agents. + + Note that Fleet does not support the case where one resource is being placed multiple + times by different CRPs on the same member cluster. An apply error will be returned if + Fleet finds that a resource has been owned by another placement attempt by Fleet, even + with the AllowCoOwnership setting set to true. + type: boolean + comparisonOption: + default: PartialComparison + description: |- + ComparisonOption controls how Fleet compares the desired state of a resource, as kept in + a hub cluster manifest, with the current state of the resource (if applicable) in the + member cluster. + + Available options are: + + * PartialComparison: with this option, Fleet will compare only fields that are managed by + Fleet, i.e., the fields that are specified explicitly in the hub cluster manifest. + Unmanaged fields are ignored. This is the default option. + + * FullComparison: with this option, Fleet will compare all fields of the resource, + even if the fields are absent from the hub cluster manifest. + + Consider using the PartialComparison option if you would like to: + + * use the default values for certain fields; or + * let another agent, e.g., HPAs, VPAs, etc., on the member cluster side manage some fields; or + * allow ad-hoc or cluster-specific settings on the member cluster side. + + To use the FullComparison option, it is recommended that you: + + * specify all fields as appropriate in the hub cluster, even if you are OK with using default + values; + * make sure that no fields are managed by agents other than Fleet on the member cluster + side, such as HPAs, VPAs, or other controllers. + + See the Fleet documentation for further explanations and usage examples. + enum: + - PartialComparison + - FullComparison + type: string + serverSideApplyConfig: + description: ServerSideApplyConfig defines the configuration for + server side apply. It is honored only when type is ServerSideApply. + properties: + force: + description: |- + Force represents to force apply to succeed when resolving the conflicts + For any conflicting fields, + - If true, use the values from the resource to be applied to overwrite the values of the existing resource in the + target cluster, as well as take over ownership of such fields. + - If false, apply will fail with the reason ApplyConflictWithOtherApplier. + + For non-conflicting fields, values stay unchanged and ownership are shared between appliers. + type: boolean + type: object + type: + default: ClientSideApply + description: |- + Type is the apply strategy to use; it determines how Fleet applies manifests from the + hub cluster to a member cluster. + + Available options are: + + * ClientSideApply: Fleet uses three-way merge to apply manifests, similar to how kubectl + performs a client-side apply. This is the default option. + + Note that this strategy requires that Fleet keep the last applied configuration in the + annotation of an applied resource. If the object gets so large that apply ops can no longer + be executed, Fleet will switch to server-side apply. + + Use ComparisonOption and WhenToApply settings to control when an apply op can be executed. + + * ServerSideApply: Fleet uses server-side apply to apply manifests; Fleet itself will + become the field manager for specified fields in the manifests. Specify + ServerSideApplyConfig as appropriate if you would like Fleet to take over field + ownership upon conflicts. This is the recommended option for most scenarios; it might + help reduce object size and safely resolve conflicts between field values. For more + information, please refer to the Kubernetes documentation + (https://kubernetes.io/docs/reference/using-api/server-side-apply/#comparison-with-client-side-apply). + + Use ComparisonOption and WhenToApply settings to control when an apply op can be executed. + + * ReportDiff: Fleet will compare the desired state of a resource as kept in the hub cluster + with its current state (if applicable) on the member cluster side, and report any + differences. No actual apply ops would be executed, and resources will be left alone as they + are on the member clusters. + + If configuration differences are found on a resource, Fleet will consider this as an apply + error, which might block rollout depending on the specified rollout strategy. + + Use ComparisonOption setting to control how the difference is calculated. + + ClientSideApply and ServerSideApply apply strategies only work when Fleet can assume + ownership of a resource (e.g., the resource is created by Fleet, or Fleet has taken over + the resource). See the comments on the WhenToTakeOver field for more information. + ReportDiff apply strategy, however, will function regardless of Fleet's ownership + status. One may set up a CRP with the ReportDiff strategy and the Never takeover option, + and this will turn Fleet into a detection tool that reports only configuration differences + but do not touch any resources on the member cluster side. + + For a comparison between the different strategies and usage examples, refer to the + Fleet documentation. + enum: + - ClientSideApply + - ServerSideApply + - ReportDiff + type: string + whenToApply: + default: Always + description: |- + WhenToApply controls when Fleet would apply the manifests on the hub cluster to the member + clusters. + + Available options are: + + * Always: with this option, Fleet will periodically apply hub cluster manifests + on the member cluster side; this will effectively overwrite any change in the fields + managed by Fleet (i.e., specified in the hub cluster manifest). This is the default + option. + + Note that this option would revert any ad-hoc changes made on the member cluster side in + the managed fields; if you would like to make temporary edits on the member cluster side + in the managed fields, switch to IfNotDrifted option. Note that changes in unmanaged + fields will be left alone; if you use the FullDiff compare option, such changes will + be reported as drifts. + + * IfNotDrifted: with this option, Fleet will stop applying hub cluster manifests on + clusters that have drifted from the desired state; apply ops would still continue on + the rest of the clusters. Drifts are calculated using the ComparisonOption, + as explained in the corresponding field. + + Use this option if you would like Fleet to detect drifts in your multi-cluster setup. + A drift occurs when an agent makes an ad-hoc change on the member cluster side that + makes affected resources deviate from its desired state as kept in the hub cluster; + and this option grants you an opportunity to view the drift details and take actions + accordingly. The drift details will be reported in the CRP status. + + To fix a drift, you may: + + * revert the changes manually on the member cluster side + * update the hub cluster manifest; this will trigger Fleet to apply the latest revision + of the manifests, which will overwrite the drifted fields + (if they are managed by Fleet) + * switch to the Always option; this will trigger Fleet to apply the current revision + of the manifests, which will overwrite the drifted fields (if they are managed by Fleet). + * if applicable and necessary, delete the drifted resources on the member cluster side; Fleet + will attempt to re-create them using the hub cluster manifests + enum: + - Always + - IfNotDrifted + type: string + whenToTakeOver: + default: Always + description: |- + WhenToTakeOver determines the action to take when Fleet applies resources to a member + cluster for the first time and finds out that the resource already exists in the cluster. + + This setting is most relevant in cases where you would like Fleet to manage pre-existing + resources on a member cluster. + + Available options include: + + * Always: with this action, Fleet will apply the hub cluster manifests to the member + clusters even if the affected resources already exist. This is the default action. + + Note that this might lead to fields being overwritten on the member clusters, if they + are specified in the hub cluster manifests. + + * IfNoDiff: with this action, Fleet will apply the hub cluster manifests to the member + clusters if (and only if) pre-existing resources look the same as the hub cluster manifests. + + This is a safer option as pre-existing resources that are inconsistent with the hub cluster + manifests will not be overwritten; Fleet will ignore them until the inconsistencies + are resolved properly: any change you make to the hub cluster manifests would not be + applied, and if you delete the manifests or even the ClusterResourcePlacement itself + from the hub cluster, these pre-existing resources would not be taken away. + + Fleet will check for inconsistencies in accordance with the ComparisonOption setting. See also + the comments on the ComparisonOption field for more information. + + If a diff has been found in a field that is **managed** by Fleet (i.e., the field + **is specified ** in the hub cluster manifest), consider one of the following actions: + * set the field in the member cluster to be of the same value as that in the hub cluster + manifest. + * update the hub cluster manifest so that its field value matches with that in the member + cluster. + * switch to the Always action, which will allow Fleet to overwrite the field with the + value in the hub cluster manifest. + + If a diff has been found in a field that is **not managed** by Fleet (i.e., the field + **is not specified** in the hub cluster manifest), consider one of the following actions: + * remove the field from the member cluster. + * update the hub cluster manifest so that the field is included in the hub cluster manifest. + + If appropriate, you may also delete the object from the member cluster; Fleet will recreate + it using the hub cluster manifest. + + * Never: with this action, Fleet will not apply a hub cluster manifest to the member + clusters if there is a corresponding pre-existing resource. However, if a manifest + has never been applied yet; or it has a corresponding resource which Fleet has assumed + ownership, apply op will still be executed. + + This is the safest option; one will have to remove the pre-existing resources (so that + Fleet can re-create them) or switch to a different + WhenToTakeOver option before Fleet starts processing the corresponding hub cluster + manifests. + + If you prefer Fleet stop processing all manifests, use this option along with the + ReportDiff apply strategy type. This setup would instruct Fleet to touch nothing + on the member cluster side but still report configuration differences between the + hub cluster and member clusters. Fleet will not give up ownership + that it has already assumed though. + enum: + - Always + - IfNoDiff + - Never + type: string + type: object + clusterDecision: + description: ClusterDecision explains why the scheduler selected this + cluster. + properties: + clusterName: + description: |- + ClusterName is the name of the ManagedCluster. If it is not empty, its value should be unique cross all + placement decisions for the Placement. + type: string + clusterScore: + description: ClusterScore represents the score of the cluster + calculated by the scheduler. + properties: + affinityScore: + description: |- + AffinityScore represents the affinity score of the cluster calculated by the last + scheduling decision based on the preferred affinity selector. + An affinity score may not present if the cluster does not meet the required affinity. + format: int32 + type: integer + priorityScore: + description: |- + TopologySpreadScore represents the priority score of the cluster calculated by the last + scheduling decision based on the topology spread applied to the cluster. + A priority score may not present if the cluster does not meet the topology spread. + format: int32 + type: integer + type: object + reason: + description: Reason represents the reason why the cluster is selected + or not. + type: string + selected: + description: Selected indicates if this cluster is selected by + the scheduler. + type: boolean + required: + - clusterName + - reason + - selected + type: object + clusterResourceOverrideSnapshots: + description: |- + ClusterResourceOverrides contains a list of applicable ClusterResourceOverride snapshot names associated with the + selected resources. + items: + type: string + type: array + resourceOverrideSnapshots: + description: ResourceOverrideSnapshots is a list of ResourceOverride + snapshots associated with the selected resources. + items: + description: NamespacedName comprises a resource name, with a mandatory + namespace. + properties: + name: + description: Name is the name of the namespaced scope resource. + type: string + namespace: + description: Namespace is namespace of the namespaced scope + resource. + type: string + required: + - name + - namespace + type: object + type: array + resourceSnapshotName: + description: |- + ResourceSnapshotName is the name of the resource snapshot that this resource binding points to. + If the resources are divided into multiple snapshots because of the resource size limit, + it points to the name of the leading snapshot of the index group. + type: string + schedulingPolicySnapshotName: + description: |- + SchedulingPolicySnapshotName is the name of the scheduling policy snapshot that this resource binding + points to; more specifically, the scheduler creates this bindings in accordance with this + scheduling policy snapshot. + type: string + state: + description: 'The desired state of the binding. Possible values: Scheduled, + Bound, Unscheduled.' + type: string + targetCluster: + description: TargetCluster is the name of the cluster that the scheduler + assigns the resources to. + type: string + required: + - clusterDecision + - resourceSnapshotName + - schedulingPolicySnapshotName + - state + - targetCluster + type: object + status: + description: The observed status of ResourceBinding. + properties: + conditions: + description: Conditions is an array of current observed conditions + for ClusterResourceBinding. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + diffedPlacements: + description: |- + DiffedPlacements is a list of resources that have configuration differences from their + corresponding hub cluster manifests. Fleet will report such differences when: + + * The CRP uses the ReportDiff apply strategy, which instructs Fleet to compare the hub + cluster manifests against the live resources without actually performing any apply op; or + * Fleet finds a pre-existing resource on the member cluster side that does not match its + hub cluster counterpart, and the CRP has been configured to only take over a resource if + no configuration differences are found. + + To control the object size, only the first 100 diffed resources will be included. + This field is only meaningful if the `ClusterName` is not empty. + items: + description: DiffedResourcePlacement contains the details of a resource + with configuration differences. + properties: + envelope: + description: Envelope identifies the envelope object that contains + this resource. + properties: + name: + description: Name of the envelope object. + type: string + namespace: + description: Namespace is the namespace of the envelope + object. Empty if the envelope object is cluster scoped. + type: string + type: + default: ConfigMap + description: Type of the envelope object. + enum: + - ConfigMap + - ClusterResourceEnvelope + - ResourceEnvelope + type: string + required: + - name + type: object + firstDiffedObservedTime: + description: |- + FirstDiffedObservedTime is the first time the resource on the target cluster is + observed to have configuration differences. + format: date-time + type: string + group: + description: Group is the group name of the selected resource. + type: string + kind: + description: Kind represents the Kind of the selected resources. + type: string + name: + description: Name of the target resource. + type: string + namespace: + description: Namespace is the namespace of the resource. Empty + if the resource is cluster scoped. + type: string + observationTime: + description: ObservationTime is the time when we observe the + configuration differences for the resource. + format: date-time + type: string + observedDiffs: + description: |- + ObservedDiffs are the details about the found configuration differences. Note that + Fleet might truncate the details as appropriate to control the object size. + + Each detail entry specifies how the live state (the state on the member + cluster side) compares against the desired state (the state kept in the hub cluster manifest). + + An event about the details will be emitted as well. + items: + description: |- + PatchDetail describes a patch that explains an observed configuration drift or + difference. + + A patch detail can be transcribed as a JSON patch operation, as specified in RFC 6902. + properties: + path: + description: The JSON path that points to a field that + has drifted or has configuration differences. + type: string + valueInHub: + description: |- + The value at the JSON path from the hub cluster side. + + This field can be empty if the JSON path does not exist on the hub cluster side; i.e., + applying the manifest from the hub cluster side would remove the field. + type: string + valueInMember: + description: |- + The value at the JSON path from the member cluster side. + + This field can be empty if the JSON path does not exist on the member cluster side; i.e., + applying the manifest from the hub cluster side would add a new field. + type: string + required: + - path + type: object + type: array + targetClusterObservedGeneration: + description: |- + TargetClusterObservedGeneration is the generation of the resource on the target cluster + that contains the configuration differences. + + This might be nil if the resource has not been created yet on the target cluster. + format: int64 + type: integer + version: + description: Version is the version of the selected resource. + type: string + required: + - firstDiffedObservedTime + - kind + - name + - observationTime + - version + type: object + maxItems: 100 + type: array + driftedPlacements: + description: |- + DriftedPlacements is a list of resources that have drifted from their desired states + kept in the hub cluster, as found by Fleet using the drift detection mechanism. + + To control the object size, only the first 100 drifted resources will be included. + This field is only meaningful if the `ClusterName` is not empty. + items: + description: DriftedResourcePlacement contains the details of a + resource with configuration drifts. + properties: + envelope: + description: Envelope identifies the envelope object that contains + this resource. + properties: + name: + description: Name of the envelope object. + type: string + namespace: + description: Namespace is the namespace of the envelope + object. Empty if the envelope object is cluster scoped. + type: string + type: + default: ConfigMap + description: Type of the envelope object. + enum: + - ConfigMap + - ClusterResourceEnvelope + - ResourceEnvelope + type: string + required: + - name + type: object + firstDriftedObservedTime: + description: |- + FirstDriftedObservedTime is the first time the resource on the target cluster is + observed to have configuration drifts. + format: date-time + type: string + group: + description: Group is the group name of the selected resource. + type: string + kind: + description: Kind represents the Kind of the selected resources. + type: string + name: + description: Name of the target resource. + type: string + namespace: + description: Namespace is the namespace of the resource. Empty + if the resource is cluster scoped. + type: string + observationTime: + description: ObservationTime is the time when we observe the + configuration drifts for the resource. + format: date-time + type: string + observedDrifts: + description: |- + ObservedDrifts are the details about the found configuration drifts. Note that + Fleet might truncate the details as appropriate to control the object size. + + Each detail entry specifies how the live state (the state on the member + cluster side) compares against the desired state (the state kept in the hub cluster manifest). + + An event about the details will be emitted as well. + items: + description: |- + PatchDetail describes a patch that explains an observed configuration drift or + difference. + + A patch detail can be transcribed as a JSON patch operation, as specified in RFC 6902. + properties: + path: + description: The JSON path that points to a field that + has drifted or has configuration differences. + type: string + valueInHub: + description: |- + The value at the JSON path from the hub cluster side. + + This field can be empty if the JSON path does not exist on the hub cluster side; i.e., + applying the manifest from the hub cluster side would remove the field. + type: string + valueInMember: + description: |- + The value at the JSON path from the member cluster side. + + This field can be empty if the JSON path does not exist on the member cluster side; i.e., + applying the manifest from the hub cluster side would add a new field. + type: string + required: + - path + type: object + type: array + targetClusterObservedGeneration: + description: |- + TargetClusterObservedGeneration is the generation of the resource on the target cluster + that contains the configuration drifts. + format: int64 + type: integer + version: + description: Version is the version of the selected resource. + type: string + required: + - firstDriftedObservedTime + - kind + - name + - observationTime + - targetClusterObservedGeneration + - version + type: object + maxItems: 100 + type: array + failedPlacements: + description: |- + FailedPlacements is a list of all the resources failed to be placed to the given cluster or the resource is unavailable. + Note that we only include 100 failed resource placements even if there are more than 100. + items: + description: FailedResourcePlacement contains the failure details + of a failed resource placement. + properties: + condition: + description: The failed condition status. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, + Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + envelope: + description: Envelope identifies the envelope object that contains + this resource. + properties: + name: + description: Name of the envelope object. + type: string + namespace: + description: Namespace is the namespace of the envelope + object. Empty if the envelope object is cluster scoped. + type: string + type: + default: ConfigMap + description: Type of the envelope object. + enum: + - ConfigMap + - ClusterResourceEnvelope + - ResourceEnvelope + type: string + required: + - name + type: object + group: + description: Group is the group name of the selected resource. + type: string + kind: + description: Kind represents the Kind of the selected resources. + type: string + name: + description: Name of the target resource. + type: string + namespace: + description: Namespace is the namespace of the resource. Empty + if the resource is cluster scoped. + type: string + version: + description: Version is the version of the selected resource. + type: string + required: + - condition + - kind + - name + - version + type: object + maxItems: 100 + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/placement.kubernetes-fleet.io_resourceplacements.yaml b/config/crd/bases/placement.kubernetes-fleet.io_resourceplacements.yaml new file mode 100644 index 000000000..5bf28370e --- /dev/null +++ b/config/crd/bases/placement.kubernetes-fleet.io_resourceplacements.yaml @@ -0,0 +1,1515 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.0 + name: resourceplacements.placement.kubernetes-fleet.io +spec: + group: placement.kubernetes-fleet.io + names: + categories: + - fleet + - fleet-placement + kind: ResourcePlacement + listKind: ResourcePlacementList + plural: resourceplacements + shortNames: + - rp + singular: resourceplacement + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.generation + name: Gen + type: string + - jsonPath: .spec.policy.placementType + name: Type + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="ResourcePlacementScheduled")].status + name: Scheduled + type: string + - jsonPath: .status.conditions[?(@.type=="ResourcePlacementScheduled")].observedGeneration + name: Scheduled-Gen + type: string + - jsonPath: .status.conditions[?(@.type=="ResourcePlacementWorkSynchronized")].status + name: Work-Synchronized + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="ResourcePlacementWorkSynchronized")].observedGeneration + name: Work-Synchronized-Gen + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="ResourcePlacementAvailable")].status + name: Available + type: string + - jsonPath: .status.conditions[?(@.type=="ResourcePlacementAvailable")].observedGeneration + name: Available-Gen + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: |- + ResourcePlacement is used to select namespace scoped resources, including built-in resources and custom resources, + and placement them onto selected member clusters in a fleet. + `SchedulingPolicySnapshot` and `ResourceSnapshot` objects are created in the same namespace when there are changes in the + system to keep the history of the changes affecting a `ResourcePlacement`. We will also create `ResourceBinding` objects in the same namespace. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: The desired state of ResourcePlacement. + properties: + policy: + description: |- + Policy defines how to select member clusters to place the selected resources. + If unspecified, all the joined member clusters are selected. + properties: + affinity: + description: |- + Affinity contains cluster affinity scheduling rules. Defines which member clusters to place the selected resources. + Only valid if the placement type is "PickAll" or "PickN". + properties: + clusterAffinity: + description: ClusterAffinity contains cluster affinity scheduling + rules for the selected resources. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler computes a score for each cluster at schedule time by iterating + through the elements of this field and adding "weight" to the sum if the cluster + matches the corresponding matchExpression. The scheduler then chooses the first + `N` clusters with the highest sum to satisfy the placement. + This field is ignored if the placement type is "PickAll". + If the cluster score changes at some point after the placement (e.g. due to an update), + the system may or may not try to eventually move the resource from a cluster with a lower score + to a cluster with higher score. + items: + properties: + preference: + description: A cluster selector term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + LabelSelector is a label query over all the joined member clusters. Clusters matching + the query are selected. + + If you specify both label and property selectors in the same term, the results are AND'd. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + propertySelector: + description: |- + PropertySelector is a property query over all joined member clusters. Clusters matching + the query are selected. + + If you specify both label and property selectors in the same term, the results are AND'd. + + At this moment, PropertySelector can only be used with + `RequiredDuringSchedulingIgnoredDuringExecution` affinity terms. + + This field is beta-level; it is for the property-based scheduling feature and is only + functional when a property provider is enabled in the deployment. + properties: + matchExpressions: + description: MatchExpressions is an array + of PropertySelectorRequirements. The requirements + are AND'd. + items: + description: |- + PropertySelectorRequirement is a specific property requirement when picking clusters for + resource placement. + properties: + name: + description: Name is the name of the + property; it should be a Kubernetes + label name. + type: string + operator: + description: |- + Operator specifies the relationship between a cluster's observed value of the specified + property and the values given in the requirement. + type: string + values: + description: |- + Values are a list of values of the specified property which Fleet will compare against + the observed values of individual member clusters in accordance with the given + operator. + + At this moment, each value should be a Kubernetes quantity. For more information, see + https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity. + + If the operator is Gt (greater than), Ge (greater than or equal to), Lt (less than), + or `Le` (less than or equal to), Eq (equal to), or Ne (ne), exactly one value must be + specified in the list. + items: + type: string + maxItems: 1 + type: array + required: + - name + - operator + - values + type: object + type: array + required: + - matchExpressions + type: object + propertySorter: + description: |- + PropertySorter sorts all matching clusters by a specific property and assigns different weights + to each cluster based on their observed property values. + + At this moment, PropertySorter can only be used with + `PreferredDuringSchedulingIgnoredDuringExecution` affinity terms. + + This field is beta-level; it is for the property-based scheduling feature and is only + functional when a property provider is enabled in the deployment. + properties: + name: + description: Name is the name of the property + which Fleet sorts clusters by. + type: string + sortOrder: + description: |- + SortOrder explains how Fleet should perform the sort; specifically, whether Fleet should + sort in ascending or descending order. + type: string + required: + - name + - sortOrder + type: object + type: object + weight: + description: Weight associated with matching the + corresponding clusterSelectorTerm, in the range + [-100, 100]. + format: int32 + maximum: 100 + minimum: -100 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the resource will not be scheduled onto the cluster. + If the affinity requirements specified by this field cease to be met + at some point after the placement (e.g. due to an update), the system + may or may not try to eventually remove the resource from the cluster. + properties: + clusterSelectorTerms: + description: ClusterSelectorTerms is a list of cluster + selector terms. The terms are `ORed`. + items: + properties: + labelSelector: + description: |- + LabelSelector is a label query over all the joined member clusters. Clusters matching + the query are selected. + + If you specify both label and property selectors in the same term, the results are AND'd. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + propertySelector: + description: |- + PropertySelector is a property query over all joined member clusters. Clusters matching + the query are selected. + + If you specify both label and property selectors in the same term, the results are AND'd. + + At this moment, PropertySelector can only be used with + `RequiredDuringSchedulingIgnoredDuringExecution` affinity terms. + + This field is beta-level; it is for the property-based scheduling feature and is only + functional when a property provider is enabled in the deployment. + properties: + matchExpressions: + description: MatchExpressions is an array + of PropertySelectorRequirements. The requirements + are AND'd. + items: + description: |- + PropertySelectorRequirement is a specific property requirement when picking clusters for + resource placement. + properties: + name: + description: Name is the name of the + property; it should be a Kubernetes + label name. + type: string + operator: + description: |- + Operator specifies the relationship between a cluster's observed value of the specified + property and the values given in the requirement. + type: string + values: + description: |- + Values are a list of values of the specified property which Fleet will compare against + the observed values of individual member clusters in accordance with the given + operator. + + At this moment, each value should be a Kubernetes quantity. For more information, see + https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity. + + If the operator is Gt (greater than), Ge (greater than or equal to), Lt (less than), + or `Le` (less than or equal to), Eq (equal to), or Ne (ne), exactly one value must be + specified in the list. + items: + type: string + maxItems: 1 + type: array + required: + - name + - operator + - values + type: object + type: array + required: + - matchExpressions + type: object + propertySorter: + description: |- + PropertySorter sorts all matching clusters by a specific property and assigns different weights + to each cluster based on their observed property values. + + At this moment, PropertySorter can only be used with + `PreferredDuringSchedulingIgnoredDuringExecution` affinity terms. + + This field is beta-level; it is for the property-based scheduling feature and is only + functional when a property provider is enabled in the deployment. + properties: + name: + description: Name is the name of the property + which Fleet sorts clusters by. + type: string + sortOrder: + description: |- + SortOrder explains how Fleet should perform the sort; specifically, whether Fleet should + sort in ascending or descending order. + type: string + required: + - name + - sortOrder + type: object + type: object + maxItems: 10 + type: array + required: + - clusterSelectorTerms + type: object + type: object + type: object + clusterNames: + description: |- + ClusterNames contains a list of names of MemberCluster to place the selected resources. + Only valid if the placement type is "PickFixed" + items: + type: string + maxItems: 100 + type: array + numberOfClusters: + description: NumberOfClusters of placement. Only valid if the + placement type is "PickN". + format: int32 + minimum: 0 + type: integer + placementType: + default: PickAll + description: Type of placement. Can be "PickAll", "PickN" or "PickFixed". + Default is PickAll. + enum: + - PickAll + - PickN + - PickFixed + type: string + tolerations: + description: |- + If specified, the ClusterResourcePlacement's Tolerations. + Tolerations cannot be updated or deleted. + + This field is beta-level and is for the taints and tolerations feature. + items: + description: |- + Toleration allows ClusterResourcePlacement to tolerate any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, only allowed value is NoSchedule. + enum: + - NoSchedule + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + default: Equal + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a + ClusterResourcePlacement can tolerate all taints of a particular category. + enum: + - Equal + - Exists + type: string + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + maxItems: 100 + type: array + topologySpreadConstraints: + description: |- + TopologySpreadConstraints describes how a group of resources ought to spread across multiple topology + domains. Scheduler will schedule resources in a way which abides by the constraints. + All topologySpreadConstraints are ANDed. + Only valid if the placement type is "PickN". + items: + description: TopologySpreadConstraint specifies how to spread + resources among the given cluster topology. + properties: + maxSkew: + default: 1 + description: |- + MaxSkew describes the degree to which resources may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of resource copies in the target topology and the global minimum. + The global minimum is the minimum number of resource copies in a domain. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's an optional field. Default value is 1 and 0 is not allowed. + format: int32 + minimum: 1 + type: integer + topologyKey: + description: |- + TopologyKey is the key of cluster labels. Clusters that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of replicas of the resource into each bucket honor the `MaxSkew` value. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with the resource if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the resource in any cluster, + but giving higher precedence to topologies that would help reduce the skew. + It's an optional field. + type: string + required: + - topologyKey + type: object + type: array + type: object + resourceSelectors: + description: |- + ResourceSelectors is an array of selectors used to select cluster scoped resources. The selectors are `ORed`. + You can have 1-100 selectors. + items: + description: |- + ClusterResourceSelector is used to select cluster scoped resources as the target resources to be placed. + All the fields are `ANDed`. In other words, a resource must match all the fields to be selected. + properties: + group: + description: |- + Group name of the cluster-scoped resource. + Use an empty string to select resources under the core API group (e.g., namespaces). + type: string + kind: + description: |- + Kind of the cluster-scoped resource. + Note: When `Kind` is `namespace`, ALL the resources under the selected namespaces are selected. + type: string + labelSelector: + description: |- + A label query over all the cluster-scoped resources. Resources matching the query are selected. + Note that namespace-scoped resources can't be selected even if they match the query. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: Name of the cluster-scoped resource. + type: string + version: + description: Version of the cluster-scoped resource. + type: string + required: + - group + - kind + - version + type: object + maxItems: 100 + minItems: 1 + type: array + revisionHistoryLimit: + default: 10 + description: |- + The number of old ClusterSchedulingPolicySnapshot or ClusterResourceSnapshot resources to retain to allow rollback. + This is a pointer to distinguish between explicit zero and not specified. + Defaults to 10. + format: int32 + maximum: 1000 + minimum: 1 + type: integer + strategy: + description: The rollout strategy to use to replace existing placement + with new ones. + properties: + applyStrategy: + description: ApplyStrategy describes when and how to apply the + selected resources to the target cluster. + properties: + allowCoOwnership: + description: |- + AllowCoOwnership controls whether co-ownership between Fleet and other agents are allowed + on a Fleet-managed resource. If set to false, Fleet will refuse to apply manifests to + a resource that has been owned by one or more non-Fleet agents. + + Note that Fleet does not support the case where one resource is being placed multiple + times by different CRPs on the same member cluster. An apply error will be returned if + Fleet finds that a resource has been owned by another placement attempt by Fleet, even + with the AllowCoOwnership setting set to true. + type: boolean + comparisonOption: + default: PartialComparison + description: |- + ComparisonOption controls how Fleet compares the desired state of a resource, as kept in + a hub cluster manifest, with the current state of the resource (if applicable) in the + member cluster. + + Available options are: + + * PartialComparison: with this option, Fleet will compare only fields that are managed by + Fleet, i.e., the fields that are specified explicitly in the hub cluster manifest. + Unmanaged fields are ignored. This is the default option. + + * FullComparison: with this option, Fleet will compare all fields of the resource, + even if the fields are absent from the hub cluster manifest. + + Consider using the PartialComparison option if you would like to: + + * use the default values for certain fields; or + * let another agent, e.g., HPAs, VPAs, etc., on the member cluster side manage some fields; or + * allow ad-hoc or cluster-specific settings on the member cluster side. + + To use the FullComparison option, it is recommended that you: + + * specify all fields as appropriate in the hub cluster, even if you are OK with using default + values; + * make sure that no fields are managed by agents other than Fleet on the member cluster + side, such as HPAs, VPAs, or other controllers. + + See the Fleet documentation for further explanations and usage examples. + enum: + - PartialComparison + - FullComparison + type: string + serverSideApplyConfig: + description: ServerSideApplyConfig defines the configuration + for server side apply. It is honored only when type is ServerSideApply. + properties: + force: + description: |- + Force represents to force apply to succeed when resolving the conflicts + For any conflicting fields, + - If true, use the values from the resource to be applied to overwrite the values of the existing resource in the + target cluster, as well as take over ownership of such fields. + - If false, apply will fail with the reason ApplyConflictWithOtherApplier. + + For non-conflicting fields, values stay unchanged and ownership are shared between appliers. + type: boolean + type: object + type: + default: ClientSideApply + description: |- + Type is the apply strategy to use; it determines how Fleet applies manifests from the + hub cluster to a member cluster. + + Available options are: + + * ClientSideApply: Fleet uses three-way merge to apply manifests, similar to how kubectl + performs a client-side apply. This is the default option. + + Note that this strategy requires that Fleet keep the last applied configuration in the + annotation of an applied resource. If the object gets so large that apply ops can no longer + be executed, Fleet will switch to server-side apply. + + Use ComparisonOption and WhenToApply settings to control when an apply op can be executed. + + * ServerSideApply: Fleet uses server-side apply to apply manifests; Fleet itself will + become the field manager for specified fields in the manifests. Specify + ServerSideApplyConfig as appropriate if you would like Fleet to take over field + ownership upon conflicts. This is the recommended option for most scenarios; it might + help reduce object size and safely resolve conflicts between field values. For more + information, please refer to the Kubernetes documentation + (https://kubernetes.io/docs/reference/using-api/server-side-apply/#comparison-with-client-side-apply). + + Use ComparisonOption and WhenToApply settings to control when an apply op can be executed. + + * ReportDiff: Fleet will compare the desired state of a resource as kept in the hub cluster + with its current state (if applicable) on the member cluster side, and report any + differences. No actual apply ops would be executed, and resources will be left alone as they + are on the member clusters. + + If configuration differences are found on a resource, Fleet will consider this as an apply + error, which might block rollout depending on the specified rollout strategy. + + Use ComparisonOption setting to control how the difference is calculated. + + ClientSideApply and ServerSideApply apply strategies only work when Fleet can assume + ownership of a resource (e.g., the resource is created by Fleet, or Fleet has taken over + the resource). See the comments on the WhenToTakeOver field for more information. + ReportDiff apply strategy, however, will function regardless of Fleet's ownership + status. One may set up a CRP with the ReportDiff strategy and the Never takeover option, + and this will turn Fleet into a detection tool that reports only configuration differences + but do not touch any resources on the member cluster side. + + For a comparison between the different strategies and usage examples, refer to the + Fleet documentation. + enum: + - ClientSideApply + - ServerSideApply + - ReportDiff + type: string + whenToApply: + default: Always + description: |- + WhenToApply controls when Fleet would apply the manifests on the hub cluster to the member + clusters. + + Available options are: + + * Always: with this option, Fleet will periodically apply hub cluster manifests + on the member cluster side; this will effectively overwrite any change in the fields + managed by Fleet (i.e., specified in the hub cluster manifest). This is the default + option. + + Note that this option would revert any ad-hoc changes made on the member cluster side in + the managed fields; if you would like to make temporary edits on the member cluster side + in the managed fields, switch to IfNotDrifted option. Note that changes in unmanaged + fields will be left alone; if you use the FullDiff compare option, such changes will + be reported as drifts. + + * IfNotDrifted: with this option, Fleet will stop applying hub cluster manifests on + clusters that have drifted from the desired state; apply ops would still continue on + the rest of the clusters. Drifts are calculated using the ComparisonOption, + as explained in the corresponding field. + + Use this option if you would like Fleet to detect drifts in your multi-cluster setup. + A drift occurs when an agent makes an ad-hoc change on the member cluster side that + makes affected resources deviate from its desired state as kept in the hub cluster; + and this option grants you an opportunity to view the drift details and take actions + accordingly. The drift details will be reported in the CRP status. + + To fix a drift, you may: + + * revert the changes manually on the member cluster side + * update the hub cluster manifest; this will trigger Fleet to apply the latest revision + of the manifests, which will overwrite the drifted fields + (if they are managed by Fleet) + * switch to the Always option; this will trigger Fleet to apply the current revision + of the manifests, which will overwrite the drifted fields (if they are managed by Fleet). + * if applicable and necessary, delete the drifted resources on the member cluster side; Fleet + will attempt to re-create them using the hub cluster manifests + enum: + - Always + - IfNotDrifted + type: string + whenToTakeOver: + default: Always + description: |- + WhenToTakeOver determines the action to take when Fleet applies resources to a member + cluster for the first time and finds out that the resource already exists in the cluster. + + This setting is most relevant in cases where you would like Fleet to manage pre-existing + resources on a member cluster. + + Available options include: + + * Always: with this action, Fleet will apply the hub cluster manifests to the member + clusters even if the affected resources already exist. This is the default action. + + Note that this might lead to fields being overwritten on the member clusters, if they + are specified in the hub cluster manifests. + + * IfNoDiff: with this action, Fleet will apply the hub cluster manifests to the member + clusters if (and only if) pre-existing resources look the same as the hub cluster manifests. + + This is a safer option as pre-existing resources that are inconsistent with the hub cluster + manifests will not be overwritten; Fleet will ignore them until the inconsistencies + are resolved properly: any change you make to the hub cluster manifests would not be + applied, and if you delete the manifests or even the ClusterResourcePlacement itself + from the hub cluster, these pre-existing resources would not be taken away. + + Fleet will check for inconsistencies in accordance with the ComparisonOption setting. See also + the comments on the ComparisonOption field for more information. + + If a diff has been found in a field that is **managed** by Fleet (i.e., the field + **is specified ** in the hub cluster manifest), consider one of the following actions: + * set the field in the member cluster to be of the same value as that in the hub cluster + manifest. + * update the hub cluster manifest so that its field value matches with that in the member + cluster. + * switch to the Always action, which will allow Fleet to overwrite the field with the + value in the hub cluster manifest. + + If a diff has been found in a field that is **not managed** by Fleet (i.e., the field + **is not specified** in the hub cluster manifest), consider one of the following actions: + * remove the field from the member cluster. + * update the hub cluster manifest so that the field is included in the hub cluster manifest. + + If appropriate, you may also delete the object from the member cluster; Fleet will recreate + it using the hub cluster manifest. + + * Never: with this action, Fleet will not apply a hub cluster manifest to the member + clusters if there is a corresponding pre-existing resource. However, if a manifest + has never been applied yet; or it has a corresponding resource which Fleet has assumed + ownership, apply op will still be executed. + + This is the safest option; one will have to remove the pre-existing resources (so that + Fleet can re-create them) or switch to a different + WhenToTakeOver option before Fleet starts processing the corresponding hub cluster + manifests. + + If you prefer Fleet stop processing all manifests, use this option along with the + ReportDiff apply strategy type. This setup would instruct Fleet to touch nothing + on the member cluster side but still report configuration differences between the + hub cluster and member clusters. Fleet will not give up ownership + that it has already assumed though. + enum: + - Always + - IfNoDiff + - Never + type: string + type: object + rollingUpdate: + description: Rolling update config params. Present only if RolloutStrategyType + = RollingUpdate. + properties: + maxSurge: + anyOf: + - type: integer + - type: string + default: 25% + description: |- + The maximum number of clusters that can be scheduled above the desired number of clusters. + The desired number equals to the `NumberOfClusters` field when the placement type is `PickN`. + The desired number equals to the number of clusters scheduler selected when the placement type is `PickAll`. + Value can be an absolute number (ex: 5) or a percentage of desire (ex: 10%). + Absolute number is calculated from percentage by rounding up. + This does not apply to the case that we do in-place update of resources on the same cluster. + This can not be 0 if MaxUnavailable is 0. + Defaults to 25%. + pattern: ^((100|[0-9]{1,2})%|[0-9]+)$ + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: integer + - type: string + default: 25% + description: |- + The maximum number of clusters that can be unavailable during the rolling update + comparing to the desired number of clusters. + The desired number equals to the `NumberOfClusters` field when the placement type is `PickN`. + The desired number equals to the number of clusters scheduler selected when the placement type is `PickAll`. + Value can be an absolute number (ex: 5) or a percentage of the desired number of clusters (ex: 10%). + Absolute number is calculated from percentage by rounding up. + We consider a resource unavailable when we either remove it from a cluster or in-place + upgrade the resources content on the same cluster. + The minimum of MaxUnavailable is 0 to allow no downtime moving a placement from one cluster to another. + Please set it to be greater than 0 to avoid rolling out stuck during in-place resource update. + Defaults to 25%. + pattern: ^((100|[0-9]{1,2})%|[0-9]+)$ + x-kubernetes-int-or-string: true + unavailablePeriodSeconds: + default: 60 + description: |- + UnavailablePeriodSeconds is used to configure the waiting time between rollout phases when we + cannot determine if the resources have rolled out successfully or not. + We have a built-in resource state detector to determine the availability status of following well-known Kubernetes + native resources: Deployment, StatefulSet, DaemonSet, Service, Namespace, ConfigMap, Secret, + ClusterRole, ClusterRoleBinding, Role, RoleBinding. + Please see [SafeRollout](https://github.com/Azure/fleet/tree/main/docs/concepts/SafeRollout/README.md) for more details. + For other types of resources, we consider them as available after `UnavailablePeriodSeconds` seconds + have passed since they were successfully applied to the target cluster. + Default is 60. + type: integer + type: object + type: + default: RollingUpdate + description: |- + Type of rollout. The only supported types are "RollingUpdate" and "External". + Default is "RollingUpdate". + enum: + - RollingUpdate + - External + type: string + type: object + required: + - resourceSelectors + type: object + status: + description: The observed status of ResourcePlacement. + properties: + conditions: + description: |- + Conditions is an array of current observed conditions for ClusterResourcePlacement. + All conditions except `ClusterResourcePlacementScheduled` correspond to the resource snapshot at the index specified by `ObservedResourceIndex`. + For example, a condition of `ClusterResourcePlacementWorkSynchronized` type + is observing the synchronization status of the resource snapshot with index `ObservedResourceIndex`. + If the rollout strategy type is `External`, and `ObservedResourceIndex` is unset due to clusters reporting different resource indices, + conditions except `ClusterResourcePlacementScheduled` will be empty or set to Unknown. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedResourceIndex: + description: |- + Resource index logically represents the generation of the selected resources. + We take a new snapshot of the selected resources whenever the selection or their content change. + Each snapshot has a different resource index. + One resource snapshot can contain multiple clusterResourceSnapshots CRs in order to store large amount of resources. + To get clusterResourceSnapshot of a given resource index, use the following command: + `kubectl get ClusterResourceSnapshot --selector=kubernetes-fleet.io/resource-index=$ObservedResourceIndex` + If the rollout strategy type is `RollingUpdate`, `ObservedResourceIndex` is the default-latest resource snapshot index. + If the rollout strategy type is `External`, rollout and version control are managed by an external controller, + and this field is not empty only if all targeted clusters observe the same resource index in `PlacementStatuses`. + type: string + placementStatuses: + description: |- + PlacementStatuses contains a list of placement status on the clusters that are selected by PlacementPolicy. + Each selected cluster according to the observed resource placement is guaranteed to have a corresponding placementStatuses. + In the pickN case, there are N placement statuses where N = NumberOfClusters; Or in the pickFixed case, there are + N placement statuses where N = ClusterNames. + In these cases, some of them may not have assigned clusters when we cannot fill the required number of clusters. + items: + description: ResourcePlacementStatus represents the placement status + of selected resources for one target cluster. + properties: + applicableClusterResourceOverrides: + description: |- + ApplicableClusterResourceOverrides contains a list of applicable ClusterResourceOverride snapshots associated with + the selected resources. + + This field is alpha-level and is for the override policy feature. + items: + type: string + type: array + applicableResourceOverrides: + description: |- + ApplicableResourceOverrides contains a list of applicable ResourceOverride snapshots associated with the selected + resources. + + This field is alpha-level and is for the override policy feature. + items: + description: NamespacedName comprises a resource name, with + a mandatory namespace. + properties: + name: + description: Name is the name of the namespaced scope + resource. + type: string + namespace: + description: Namespace is namespace of the namespaced + scope resource. + type: string + required: + - name + - namespace + type: object + type: array + clusterName: + description: |- + ClusterName is the name of the cluster this resource is assigned to. + If it is not empty, its value should be unique cross all placement decisions for the Placement. + type: string + conditions: + description: |- + Conditions is an array of current observed conditions on the cluster. + Each condition corresponds to the resource snapshot at the index specified by `ObservedResourceIndex`. + For example, the condition of type `RolloutStarted` is observing the rollout status of the resource snapshot with index `ObservedResourceIndex`. + items: + description: Condition contains details for one aspect of + the current state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, + Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + diffedPlacements: + description: |- + DiffedPlacements is a list of resources that have configuration differences from their + corresponding hub cluster manifests. Fleet will report such differences when: + + * The CRP uses the ReportDiff apply strategy, which instructs Fleet to compare the hub + cluster manifests against the live resources without actually performing any apply op; or + * Fleet finds a pre-existing resource on the member cluster side that does not match its + hub cluster counterpart, and the CRP has been configured to only take over a resource if + no configuration differences are found. + + To control the object size, only the first 100 diffed resources will be included. + This field is only meaningful if the `ClusterName` is not empty. + items: + description: DiffedResourcePlacement contains the details + of a resource with configuration differences. + properties: + envelope: + description: Envelope identifies the envelope object that + contains this resource. + properties: + name: + description: Name of the envelope object. + type: string + namespace: + description: Namespace is the namespace of the envelope + object. Empty if the envelope object is cluster + scoped. + type: string + type: + default: ConfigMap + description: Type of the envelope object. + enum: + - ConfigMap + - ClusterResourceEnvelope + - ResourceEnvelope + type: string + required: + - name + type: object + firstDiffedObservedTime: + description: |- + FirstDiffedObservedTime is the first time the resource on the target cluster is + observed to have configuration differences. + format: date-time + type: string + group: + description: Group is the group name of the selected resource. + type: string + kind: + description: Kind represents the Kind of the selected + resources. + type: string + name: + description: Name of the target resource. + type: string + namespace: + description: Namespace is the namespace of the resource. + Empty if the resource is cluster scoped. + type: string + observationTime: + description: ObservationTime is the time when we observe + the configuration differences for the resource. + format: date-time + type: string + observedDiffs: + description: |- + ObservedDiffs are the details about the found configuration differences. Note that + Fleet might truncate the details as appropriate to control the object size. + + Each detail entry specifies how the live state (the state on the member + cluster side) compares against the desired state (the state kept in the hub cluster manifest). + + An event about the details will be emitted as well. + items: + description: |- + PatchDetail describes a patch that explains an observed configuration drift or + difference. + + A patch detail can be transcribed as a JSON patch operation, as specified in RFC 6902. + properties: + path: + description: The JSON path that points to a field + that has drifted or has configuration differences. + type: string + valueInHub: + description: |- + The value at the JSON path from the hub cluster side. + + This field can be empty if the JSON path does not exist on the hub cluster side; i.e., + applying the manifest from the hub cluster side would remove the field. + type: string + valueInMember: + description: |- + The value at the JSON path from the member cluster side. + + This field can be empty if the JSON path does not exist on the member cluster side; i.e., + applying the manifest from the hub cluster side would add a new field. + type: string + required: + - path + type: object + type: array + targetClusterObservedGeneration: + description: |- + TargetClusterObservedGeneration is the generation of the resource on the target cluster + that contains the configuration differences. + + This might be nil if the resource has not been created yet on the target cluster. + format: int64 + type: integer + version: + description: Version is the version of the selected resource. + type: string + required: + - firstDiffedObservedTime + - kind + - name + - observationTime + - version + type: object + maxItems: 100 + type: array + driftedPlacements: + description: |- + DriftedPlacements is a list of resources that have drifted from their desired states + kept in the hub cluster, as found by Fleet using the drift detection mechanism. + + To control the object size, only the first 100 drifted resources will be included. + This field is only meaningful if the `ClusterName` is not empty. + items: + description: DriftedResourcePlacement contains the details + of a resource with configuration drifts. + properties: + envelope: + description: Envelope identifies the envelope object that + contains this resource. + properties: + name: + description: Name of the envelope object. + type: string + namespace: + description: Namespace is the namespace of the envelope + object. Empty if the envelope object is cluster + scoped. + type: string + type: + default: ConfigMap + description: Type of the envelope object. + enum: + - ConfigMap + - ClusterResourceEnvelope + - ResourceEnvelope + type: string + required: + - name + type: object + firstDriftedObservedTime: + description: |- + FirstDriftedObservedTime is the first time the resource on the target cluster is + observed to have configuration drifts. + format: date-time + type: string + group: + description: Group is the group name of the selected resource. + type: string + kind: + description: Kind represents the Kind of the selected + resources. + type: string + name: + description: Name of the target resource. + type: string + namespace: + description: Namespace is the namespace of the resource. + Empty if the resource is cluster scoped. + type: string + observationTime: + description: ObservationTime is the time when we observe + the configuration drifts for the resource. + format: date-time + type: string + observedDrifts: + description: |- + ObservedDrifts are the details about the found configuration drifts. Note that + Fleet might truncate the details as appropriate to control the object size. + + Each detail entry specifies how the live state (the state on the member + cluster side) compares against the desired state (the state kept in the hub cluster manifest). + + An event about the details will be emitted as well. + items: + description: |- + PatchDetail describes a patch that explains an observed configuration drift or + difference. + + A patch detail can be transcribed as a JSON patch operation, as specified in RFC 6902. + properties: + path: + description: The JSON path that points to a field + that has drifted or has configuration differences. + type: string + valueInHub: + description: |- + The value at the JSON path from the hub cluster side. + + This field can be empty if the JSON path does not exist on the hub cluster side; i.e., + applying the manifest from the hub cluster side would remove the field. + type: string + valueInMember: + description: |- + The value at the JSON path from the member cluster side. + + This field can be empty if the JSON path does not exist on the member cluster side; i.e., + applying the manifest from the hub cluster side would add a new field. + type: string + required: + - path + type: object + type: array + targetClusterObservedGeneration: + description: |- + TargetClusterObservedGeneration is the generation of the resource on the target cluster + that contains the configuration drifts. + format: int64 + type: integer + version: + description: Version is the version of the selected resource. + type: string + required: + - firstDriftedObservedTime + - kind + - name + - observationTime + - targetClusterObservedGeneration + - version + type: object + maxItems: 100 + type: array + failedPlacements: + description: |- + FailedPlacements is a list of all the resources failed to be placed to the given cluster or the resource is unavailable. + Note that we only include 100 failed resource placements even if there are more than 100. + This field is only meaningful if the `ClusterName` is not empty. + items: + description: FailedResourcePlacement contains the failure + details of a failed resource placement. + properties: + condition: + description: The failed condition status. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, + False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in + foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + envelope: + description: Envelope identifies the envelope object that + contains this resource. + properties: + name: + description: Name of the envelope object. + type: string + namespace: + description: Namespace is the namespace of the envelope + object. Empty if the envelope object is cluster + scoped. + type: string + type: + default: ConfigMap + description: Type of the envelope object. + enum: + - ConfigMap + - ClusterResourceEnvelope + - ResourceEnvelope + type: string + required: + - name + type: object + group: + description: Group is the group name of the selected resource. + type: string + kind: + description: Kind represents the Kind of the selected + resources. + type: string + name: + description: Name of the target resource. + type: string + namespace: + description: Namespace is the namespace of the resource. + Empty if the resource is cluster scoped. + type: string + version: + description: Version is the version of the selected resource. + type: string + required: + - condition + - kind + - name + - version + type: object + maxItems: 100 + type: array + observedResourceIndex: + description: |- + ObservedResourceIndex is the index of the resource snapshot that is currently being rolled out to the given cluster. + This field is only meaningful if the `ClusterName` is not empty. + type: string + type: object + type: array + selectedResources: + description: |- + SelectedResources contains a list of resources selected by ResourceSelectors. + This field is only meaningful if the `ObservedResourceIndex` is not empty. + items: + description: ResourceIdentifier identifies one Kubernetes resource. + properties: + envelope: + description: Envelope identifies the envelope object that contains + this resource. + properties: + name: + description: Name of the envelope object. + type: string + namespace: + description: Namespace is the namespace of the envelope + object. Empty if the envelope object is cluster scoped. + type: string + type: + default: ConfigMap + description: Type of the envelope object. + enum: + - ConfigMap + - ClusterResourceEnvelope + - ResourceEnvelope + type: string + required: + - name + type: object + group: + description: Group is the group name of the selected resource. + type: string + kind: + description: Kind represents the Kind of the selected resources. + type: string + name: + description: Name of the target resource. + type: string + namespace: + description: Namespace is the namespace of the resource. Empty + if the resource is cluster scoped. + type: string + version: + description: Version is the version of the selected resource. + type: string + required: + - kind + - name + - version + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/placement.kubernetes-fleet.io_resourcesnapshots.yaml b/config/crd/bases/placement.kubernetes-fleet.io_resourcesnapshots.yaml new file mode 100644 index 000000000..3a05bd032 --- /dev/null +++ b/config/crd/bases/placement.kubernetes-fleet.io_resourcesnapshots.yaml @@ -0,0 +1,157 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.0 + name: resourcesnapshots.placement.kubernetes-fleet.io +spec: + group: placement.kubernetes-fleet.io + names: + categories: + - fleet + - fleet-placement + kind: ResourceSnapshot + listKind: ResourceSnapshotList + plural: resourcesnapshots + shortNames: + - rs + singular: resourcesnapshot + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.generation + name: Gen + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: |- + ResourceSnapshot is used to store a snapshot of selected resources by a resource placement policy. + Its spec is immutable. + We may need to produce more than one resourceSnapshot for all the resources a ResourcePlacement selected to get around the 1MB size limit of k8s objects. + We assign an ever-increasing index for each such group of resourceSnapshots. + The naming convention of a resourceSnapshot is {RPName}-{resourceIndex}-{subindex} + where the name of the first snapshot of a group has no subindex part so its name is {RPName}-{resourceIndex}-snapshot. + resourceIndex will begin with 0. + Each snapshot MUST have the following labels: + - `CRPTrackingLabel` which points to its owner resource placement. + - `ResourceIndexLabel` which is the index of the snapshot group. + - `IsLatestSnapshotLabel` which indicates whether the snapshot is the latest one. + + All the snapshots within the same index group must have the same ResourceIndexLabel. + + The first snapshot of the index group MUST have the following annotations: + - `NumberOfResourceSnapshotsAnnotation` to store the total number of resource snapshots in the index group. + - `ResourceGroupHashAnnotation` whose value is the sha-256 hash of all the snapshots belong to the same snapshot index. + + Each snapshot (excluding the first snapshot) MUST have the following annotations: + - `SubindexOfResourceSnapshotAnnotation` to store the subindex of resource snapshot in the group. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: The desired state of ResourceSnapshot. + properties: + selectedResources: + description: SelectedResources contains a list of resources selected + by ResourceSelectors. + items: + description: ResourceContent contains the content of a resource + type: object + x-kubernetes-embedded-resource: true + x-kubernetes-preserve-unknown-fields: true + type: array + required: + - selectedResources + type: object + status: + description: The observed status of ResourceSnapshot. + properties: + conditions: + description: Conditions is an array of current observed conditions + for ResourceSnapshot. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/placement.kubernetes-fleet.io_schedulingpolicysnapshots.yaml b/config/crd/bases/placement.kubernetes-fleet.io_schedulingpolicysnapshots.yaml new file mode 100644 index 000000000..4406774f9 --- /dev/null +++ b/config/crd/bases/placement.kubernetes-fleet.io_schedulingpolicysnapshots.yaml @@ -0,0 +1,637 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.0 + name: schedulingpolicysnapshots.placement.kubernetes-fleet.io +spec: + group: placement.kubernetes-fleet.io + names: + categories: + - fleet + - fleet-placement + kind: SchedulingPolicySnapshot + listKind: SchedulingPolicySnapshotList + plural: schedulingpolicysnapshots + shortNames: + - sps + singular: schedulingpolicysnapshot + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.generation + name: Gen + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: |- + SchedulingPolicySnapshot is used to store a snapshot of cluster placement policy. + Its spec is immutable. + The naming convention of a SchedulingPolicySnapshot is {RPName}-{PolicySnapshotIndex}. + PolicySnapshotIndex will begin with 0. + Each snapshot must have the following labels: + - `CRPTrackingLabel` which points to its placement owner. + - `PolicyIndexLabel` which is the index of the policy snapshot. + - `IsLatestSnapshotLabel` which indicates whether the snapshot is the latest one. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: The desired state of SchedulingPolicySnapshot. + properties: + policy: + description: |- + Policy defines how to select member clusters to place the selected resources. + If unspecified, all the joined member clusters are selected. + properties: + affinity: + description: |- + Affinity contains cluster affinity scheduling rules. Defines which member clusters to place the selected resources. + Only valid if the placement type is "PickAll" or "PickN". + properties: + clusterAffinity: + description: ClusterAffinity contains cluster affinity scheduling + rules for the selected resources. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler computes a score for each cluster at schedule time by iterating + through the elements of this field and adding "weight" to the sum if the cluster + matches the corresponding matchExpression. The scheduler then chooses the first + `N` clusters with the highest sum to satisfy the placement. + This field is ignored if the placement type is "PickAll". + If the cluster score changes at some point after the placement (e.g. due to an update), + the system may or may not try to eventually move the resource from a cluster with a lower score + to a cluster with higher score. + items: + properties: + preference: + description: A cluster selector term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + LabelSelector is a label query over all the joined member clusters. Clusters matching + the query are selected. + + If you specify both label and property selectors in the same term, the results are AND'd. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + propertySelector: + description: |- + PropertySelector is a property query over all joined member clusters. Clusters matching + the query are selected. + + If you specify both label and property selectors in the same term, the results are AND'd. + + At this moment, PropertySelector can only be used with + `RequiredDuringSchedulingIgnoredDuringExecution` affinity terms. + + This field is beta-level; it is for the property-based scheduling feature and is only + functional when a property provider is enabled in the deployment. + properties: + matchExpressions: + description: MatchExpressions is an array + of PropertySelectorRequirements. The requirements + are AND'd. + items: + description: |- + PropertySelectorRequirement is a specific property requirement when picking clusters for + resource placement. + properties: + name: + description: Name is the name of the + property; it should be a Kubernetes + label name. + type: string + operator: + description: |- + Operator specifies the relationship between a cluster's observed value of the specified + property and the values given in the requirement. + type: string + values: + description: |- + Values are a list of values of the specified property which Fleet will compare against + the observed values of individual member clusters in accordance with the given + operator. + + At this moment, each value should be a Kubernetes quantity. For more information, see + https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity. + + If the operator is Gt (greater than), Ge (greater than or equal to), Lt (less than), + or `Le` (less than or equal to), Eq (equal to), or Ne (ne), exactly one value must be + specified in the list. + items: + type: string + maxItems: 1 + type: array + required: + - name + - operator + - values + type: object + type: array + required: + - matchExpressions + type: object + propertySorter: + description: |- + PropertySorter sorts all matching clusters by a specific property and assigns different weights + to each cluster based on their observed property values. + + At this moment, PropertySorter can only be used with + `PreferredDuringSchedulingIgnoredDuringExecution` affinity terms. + + This field is beta-level; it is for the property-based scheduling feature and is only + functional when a property provider is enabled in the deployment. + properties: + name: + description: Name is the name of the property + which Fleet sorts clusters by. + type: string + sortOrder: + description: |- + SortOrder explains how Fleet should perform the sort; specifically, whether Fleet should + sort in ascending or descending order. + type: string + required: + - name + - sortOrder + type: object + type: object + weight: + description: Weight associated with matching the + corresponding clusterSelectorTerm, in the range + [-100, 100]. + format: int32 + maximum: 100 + minimum: -100 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the resource will not be scheduled onto the cluster. + If the affinity requirements specified by this field cease to be met + at some point after the placement (e.g. due to an update), the system + may or may not try to eventually remove the resource from the cluster. + properties: + clusterSelectorTerms: + description: ClusterSelectorTerms is a list of cluster + selector terms. The terms are `ORed`. + items: + properties: + labelSelector: + description: |- + LabelSelector is a label query over all the joined member clusters. Clusters matching + the query are selected. + + If you specify both label and property selectors in the same term, the results are AND'd. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + propertySelector: + description: |- + PropertySelector is a property query over all joined member clusters. Clusters matching + the query are selected. + + If you specify both label and property selectors in the same term, the results are AND'd. + + At this moment, PropertySelector can only be used with + `RequiredDuringSchedulingIgnoredDuringExecution` affinity terms. + + This field is beta-level; it is for the property-based scheduling feature and is only + functional when a property provider is enabled in the deployment. + properties: + matchExpressions: + description: MatchExpressions is an array + of PropertySelectorRequirements. The requirements + are AND'd. + items: + description: |- + PropertySelectorRequirement is a specific property requirement when picking clusters for + resource placement. + properties: + name: + description: Name is the name of the + property; it should be a Kubernetes + label name. + type: string + operator: + description: |- + Operator specifies the relationship between a cluster's observed value of the specified + property and the values given in the requirement. + type: string + values: + description: |- + Values are a list of values of the specified property which Fleet will compare against + the observed values of individual member clusters in accordance with the given + operator. + + At this moment, each value should be a Kubernetes quantity. For more information, see + https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity. + + If the operator is Gt (greater than), Ge (greater than or equal to), Lt (less than), + or `Le` (less than or equal to), Eq (equal to), or Ne (ne), exactly one value must be + specified in the list. + items: + type: string + maxItems: 1 + type: array + required: + - name + - operator + - values + type: object + type: array + required: + - matchExpressions + type: object + propertySorter: + description: |- + PropertySorter sorts all matching clusters by a specific property and assigns different weights + to each cluster based on their observed property values. + + At this moment, PropertySorter can only be used with + `PreferredDuringSchedulingIgnoredDuringExecution` affinity terms. + + This field is beta-level; it is for the property-based scheduling feature and is only + functional when a property provider is enabled in the deployment. + properties: + name: + description: Name is the name of the property + which Fleet sorts clusters by. + type: string + sortOrder: + description: |- + SortOrder explains how Fleet should perform the sort; specifically, whether Fleet should + sort in ascending or descending order. + type: string + required: + - name + - sortOrder + type: object + type: object + maxItems: 10 + type: array + required: + - clusterSelectorTerms + type: object + type: object + type: object + clusterNames: + description: |- + ClusterNames contains a list of names of MemberCluster to place the selected resources. + Only valid if the placement type is "PickFixed" + items: + type: string + maxItems: 100 + type: array + numberOfClusters: + description: NumberOfClusters of placement. Only valid if the + placement type is "PickN". + format: int32 + minimum: 0 + type: integer + placementType: + default: PickAll + description: Type of placement. Can be "PickAll", "PickN" or "PickFixed". + Default is PickAll. + enum: + - PickAll + - PickN + - PickFixed + type: string + tolerations: + description: |- + If specified, the ClusterResourcePlacement's Tolerations. + Tolerations cannot be updated or deleted. + + This field is beta-level and is for the taints and tolerations feature. + items: + description: |- + Toleration allows ClusterResourcePlacement to tolerate any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, only allowed value is NoSchedule. + enum: + - NoSchedule + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + default: Equal + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a + ClusterResourcePlacement can tolerate all taints of a particular category. + enum: + - Equal + - Exists + type: string + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + maxItems: 100 + type: array + topologySpreadConstraints: + description: |- + TopologySpreadConstraints describes how a group of resources ought to spread across multiple topology + domains. Scheduler will schedule resources in a way which abides by the constraints. + All topologySpreadConstraints are ANDed. + Only valid if the placement type is "PickN". + items: + description: TopologySpreadConstraint specifies how to spread + resources among the given cluster topology. + properties: + maxSkew: + default: 1 + description: |- + MaxSkew describes the degree to which resources may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of resource copies in the target topology and the global minimum. + The global minimum is the minimum number of resource copies in a domain. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's an optional field. Default value is 1 and 0 is not allowed. + format: int32 + minimum: 1 + type: integer + topologyKey: + description: |- + TopologyKey is the key of cluster labels. Clusters that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of replicas of the resource into each bucket honor the `MaxSkew` value. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with the resource if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the resource in any cluster, + but giving higher precedence to topologies that would help reduce the skew. + It's an optional field. + type: string + required: + - topologyKey + type: object + type: array + type: object + policyHash: + description: PolicyHash is the sha-256 hash value of the Policy field. + format: byte + type: string + required: + - policyHash + type: object + status: + description: The observed status of SchedulingPolicySnapshot. + properties: + conditions: + description: Conditions is an array of current observed conditions + for SchedulingPolicySnapshot. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedCRPGeneration: + description: |- + ObservedCRPGeneration is the generation of the CRP which the scheduler uses to perform + the scheduling cycle and prepare the scheduling status. + format: int64 + type: integer + targetClusters: + description: |- + ClusterDecisions contains a list of names of member clusters considered by the scheduler. + Note that all the selected clusters must present in the list while not all the + member clusters are guaranteed to be listed due to the size limit. We will try to + add the clusters that can provide the most insight to the list first. + items: + description: |- + ClusterDecision represents a decision from a placement + An empty ClusterDecision indicates it is not scheduled yet. + properties: + clusterName: + description: |- + ClusterName is the name of the ManagedCluster. If it is not empty, its value should be unique cross all + placement decisions for the Placement. + type: string + clusterScore: + description: ClusterScore represents the score of the cluster + calculated by the scheduler. + properties: + affinityScore: + description: |- + AffinityScore represents the affinity score of the cluster calculated by the last + scheduling decision based on the preferred affinity selector. + An affinity score may not present if the cluster does not meet the required affinity. + format: int32 + type: integer + priorityScore: + description: |- + TopologySpreadScore represents the priority score of the cluster calculated by the last + scheduling decision based on the topology spread applied to the cluster. + A priority score may not present if the cluster does not meet the topology spread. + format: int32 + type: integer + type: object + reason: + description: Reason represents the reason why the cluster is + selected or not. + type: string + selected: + description: Selected indicates if this cluster is selected + by the scheduler. + type: boolean + required: + - clusterName + - reason + - selected + type: object + maxItems: 1000 + type: array + required: + - observedCRPGeneration + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/pkg/controllers/clusterresourceplacement/controller.go b/pkg/controllers/clusterresourceplacement/controller.go index b94d46e8d..3f7b5e03d 100644 --- a/pkg/controllers/clusterresourceplacement/controller.go +++ b/pkg/controllers/clusterresourceplacement/controller.go @@ -28,6 +28,7 @@ import ( "github.com/prometheus/client_golang/prometheus" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" @@ -975,10 +976,124 @@ func (r *Reconciler) setPlacementStatus( return false, nil } + if crp.Spec.Strategy.Type == fleetv1beta1.ExternalRolloutStrategyType { + // For external rollout strategy, if clusters observe different resource snapshot versions, + // we set RolloutStarted to Unknown without any other conditions since we do not know exactly which version is rolling out. + // We also need to reset ObservedResourceIndex and selectedResources. + rolloutStartedUnknown, err := r.determineRolloutStateForCRPWithExternalRolloutStrategy(ctx, crp, selected, allRPS, selectedResourceIDs) + if err != nil || rolloutStartedUnknown { + return true, err + } + } + setCRPConditions(crp, allRPS, rpsSetCondTypeCounter, expectedCondTypes) return true, nil } +func (r *Reconciler) determineRolloutStateForCRPWithExternalRolloutStrategy( + ctx context.Context, + crp *fleetv1beta1.ClusterResourcePlacement, + selected []*fleetv1beta1.ClusterDecision, + allRPS []fleetv1beta1.ResourcePlacementStatus, + selectedResourceIDs []fleetv1beta1.ResourceIdentifier, +) (bool, error) { + if len(selected) == 0 { + // This should not happen as we already checked in setPlacementStatus. + err := controller.NewUnexpectedBehaviorError(fmt.Errorf("selected cluster list is empty for placement %s when checking per-cluster rollout state", crp.Name)) + klog.ErrorS(err, "Should not happen: selected cluster list is empty in determineRolloutStateForCRPWithExternalRolloutStrategy()") + return false, err + } + + differentResourceIndicesObserved := false + observedResourceIndex := allRPS[0].ObservedResourceIndex + for i := range len(selected) - 1 { + if allRPS[i].ObservedResourceIndex != allRPS[i+1].ObservedResourceIndex { + differentResourceIndicesObserved = true + break + } + } + + if differentResourceIndicesObserved { + // If clusters observe different resource snapshot versions, we set RolloutStarted condition to Unknown. + // ObservedResourceIndex and selectedResources are reset, too. + klog.V(2).InfoS("Placement has External rollout strategy and different resource snapshot versions are observed across clusters, set RolloutStarted condition to Unknown", "clusterResourcePlacement", klog.KObj(crp)) + crp.Status.ObservedResourceIndex = "" + crp.Status.SelectedResources = []fleetv1beta1.ResourceIdentifier{} + crp.SetConditions(metav1.Condition{ + Type: string(fleetv1beta1.ClusterResourcePlacementRolloutStartedConditionType), + Status: metav1.ConditionUnknown, + Reason: condition.RolloutControlledByExternalControllerReason, + Message: "Rollout is controlled by an external controller and different resource snapshot versions are observed across clusters", + ObservedGeneration: crp.Generation, + }) + // As CRP status will refresh even if the spec has not changed, we reset any unused conditions + // to avoid confusion. + for i := condition.RolloutStartedCondition + 1; i < condition.TotalCondition; i++ { + meta.RemoveStatusCondition(&crp.Status.Conditions, string(i.ClusterResourcePlacementConditionType())) + } + return true, nil + } + + if observedResourceIndex == "" { + // All bindings have empty resource snapshot name, we set the rollout condition to Unknown. + // ObservedResourceIndex and selectedResources are reset, too. + klog.V(2).InfoS("Placement has External rollout strategy and no resource snapshot name is observed across clusters, set RolloutStarted condition to Unknown", "clusterResourcePlacement", klog.KObj(crp)) + crp.Status.ObservedResourceIndex = "" + crp.Status.SelectedResources = []fleetv1beta1.ResourceIdentifier{} + crp.SetConditions(metav1.Condition{ + Type: string(fleetv1beta1.ClusterResourcePlacementRolloutStartedConditionType), + Status: metav1.ConditionUnknown, + Reason: condition.RolloutControlledByExternalControllerReason, + Message: "Rollout is controlled by an external controller and no resource snapshot name is observed across clusters, probably rollout has not started yet", + ObservedGeneration: crp.Generation, + }) + // As CRP status will refresh even if the spec has not changed, we reset any unused conditions + // to avoid confusion. + for i := condition.RolloutStartedCondition + 1; i < condition.TotalCondition; i++ { + meta.RemoveStatusCondition(&crp.Status.Conditions, string(i.ClusterResourcePlacementConditionType())) + } + return true, nil + } + + // All bindings have the same observed resource snapshot. + // We only set the ObservedResourceIndex and selectedResources, as the conditions will be set with setCRPConditions. + // If all clusters observe the latest resource snapshot, we do not need to go through all the resource snapshots again to collect selected resources. + if observedResourceIndex == crp.Status.ObservedResourceIndex { + crp.Status.SelectedResources = selectedResourceIDs + } else { + crp.Status.ObservedResourceIndex = observedResourceIndex + selectedResources, err := controller.CollectResourceIdentifiersFromClusterResourceSnapshot(ctx, r.Client, crp.Name, observedResourceIndex) + if err != nil { + klog.ErrorS(err, "Failed to collect resource identifiers from clusterResourceSnapshot", "clusterResourcePlacement", klog.KObj(crp), "resourceSnapshotIndex", observedResourceIndex) + return false, err + } + crp.Status.SelectedResources = selectedResources + } + + for i := range len(selected) { + rolloutStartedCond := meta.FindStatusCondition(allRPS[i].Conditions, string(fleetv1beta1.ResourceRolloutStartedConditionType)) + if !condition.IsConditionStatusTrue(rolloutStartedCond, crp.Generation) && + !condition.IsConditionStatusFalse(rolloutStartedCond, crp.Generation) { + klog.V(2).InfoS("Placement has External rollout strategy and some cluster is in RolloutStarted Unknown state, set RolloutStarted condition to Unknown", + "clusterName", allRPS[i].ClusterName, "observedResourceIndex", observedResourceIndex, "clusterResourcePlacement", klog.KObj(crp)) + crp.SetConditions(metav1.Condition{ + Type: string(fleetv1beta1.ClusterResourcePlacementRolloutStartedConditionType), + Status: metav1.ConditionUnknown, + Reason: condition.RolloutControlledByExternalControllerReason, + Message: fmt.Sprintf("Rollout is controlled by an external controller and cluster %s is in RolloutStarted Unknown state", allRPS[i].ClusterName), + ObservedGeneration: crp.Generation, + }) + // As CRP status will refresh even if the spec has not changed, we reset any unused conditions + // to avoid confusion. + for i := condition.RolloutStartedCondition + 1; i < condition.TotalCondition; i++ { + meta.RemoveStatusCondition(&crp.Status.Conditions, string(i.ClusterResourcePlacementConditionType())) + } + return true, nil + } + } + return false, nil +} + func buildScheduledCondition(crp *fleetv1beta1.ClusterResourcePlacement, latestSchedulingPolicySnapshot *fleetv1beta1.ClusterSchedulingPolicySnapshot) metav1.Condition { scheduledCondition := latestSchedulingPolicySnapshot.GetCondition(string(fleetv1beta1.PolicySnapshotScheduled)) diff --git a/pkg/controllers/clusterresourceplacement/controller_integration_test.go b/pkg/controllers/clusterresourceplacement/controller_integration_test.go index afdeb2644..c5750a931 100644 --- a/pkg/controllers/clusterresourceplacement/controller_integration_test.go +++ b/pkg/controllers/clusterresourceplacement/controller_integration_test.go @@ -387,7 +387,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { ObjectMeta: metav1.ObjectMeta{ Name: testCRPName, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ { Group: corev1.GroupName, @@ -416,7 +416,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { Finalizers: []string{placementv1beta1.ClusterResourcePlacementCleanupFinalizer}, }, Spec: crp.Spec, - Status: placementv1beta1.ClusterResourcePlacementStatus{ + Status: placementv1beta1.PlacementStatus{ ObservedResourceIndex: "0", Conditions: []metav1.Condition{ { @@ -457,7 +457,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { Finalizers: []string{placementv1beta1.ClusterResourcePlacementCleanupFinalizer}, }, Spec: crp.Spec, - Status: placementv1beta1.ClusterResourcePlacementStatus{ + Status: placementv1beta1.PlacementStatus{ ObservedResourceIndex: "0", Conditions: []metav1.Condition{ { @@ -512,7 +512,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { Finalizers: []string{placementv1beta1.ClusterResourcePlacementCleanupFinalizer}, }, Spec: crp.Spec, - Status: placementv1beta1.ClusterResourcePlacementStatus{ + Status: placementv1beta1.PlacementStatus{ ObservedResourceIndex: "0", Conditions: []metav1.Condition{ { @@ -569,7 +569,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { Finalizers: []string{placementv1beta1.ClusterResourcePlacementCleanupFinalizer}, }, Spec: crp.Spec, - Status: placementv1beta1.ClusterResourcePlacementStatus{ + Status: placementv1beta1.PlacementStatus{ ObservedResourceIndex: "0", Conditions: []metav1.Condition{ { @@ -585,7 +585,8 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { }, PlacementStatuses: []placementv1beta1.ResourcePlacementStatus{ { - ClusterName: member1Name, + ClusterName: member1Name, + ObservedResourceIndex: "0", Conditions: []metav1.Condition{ { Status: metav1.ConditionTrue, @@ -610,7 +611,8 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { }, }, { - ClusterName: member2Name, + ClusterName: member2Name, + ObservedResourceIndex: "", // Empty as the binding is not created yet. Conditions: []metav1.Condition{ { Status: metav1.ConditionTrue, @@ -686,7 +688,8 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { } wantCRP.Status.PlacementStatuses = []placementv1beta1.ResourcePlacementStatus{ { - ClusterName: member1Name, + ClusterName: member1Name, + ObservedResourceIndex: "0", Conditions: []metav1.Condition{ { Status: metav1.ConditionTrue, @@ -711,7 +714,8 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { }, }, { - ClusterName: member2Name, + ClusterName: member2Name, + ObservedResourceIndex: "0", Conditions: []metav1.Condition{ { Status: metav1.ConditionTrue, @@ -772,7 +776,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { Finalizers: []string{placementv1beta1.ClusterResourcePlacementCleanupFinalizer}, }, Spec: crp.Spec, - Status: placementv1beta1.ClusterResourcePlacementStatus{ + Status: placementv1beta1.PlacementStatus{ ObservedResourceIndex: "0", Conditions: []metav1.Condition{ { @@ -788,7 +792,8 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { }, PlacementStatuses: []placementv1beta1.ResourcePlacementStatus{ { - ClusterName: member1Name, + ClusterName: member1Name, + ObservedResourceIndex: "0", Conditions: []metav1.Condition{ { Status: metav1.ConditionTrue, @@ -813,7 +818,8 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { }, }, { - ClusterName: member2Name, + ClusterName: member2Name, + ObservedResourceIndex: "", // Empty as the binding is not created yet. Conditions: []metav1.Condition{ { Status: metav1.ConditionTrue, @@ -887,7 +893,8 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { } wantCRP.Status.PlacementStatuses = []placementv1beta1.ResourcePlacementStatus{ { - ClusterName: member1Name, + ClusterName: member1Name, + ObservedResourceIndex: "0", Conditions: []metav1.Condition{ { Status: metav1.ConditionTrue, @@ -912,7 +919,8 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { }, }, { - ClusterName: member2Name, + ClusterName: member2Name, + ObservedResourceIndex: "0", Conditions: []metav1.Condition{ { Status: metav1.ConditionTrue, @@ -1054,7 +1062,8 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { } wantCRP.Status.PlacementStatuses = []placementv1beta1.ResourcePlacementStatus{ { - ClusterName: member1Name, + ClusterName: member1Name, + ObservedResourceIndex: "0", Conditions: []metav1.Condition{ { Status: metav1.ConditionTrue, @@ -1083,7 +1092,8 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { }, }, { - ClusterName: member2Name, + ClusterName: member2Name, + ObservedResourceIndex: "0", Conditions: []metav1.Condition{ { Status: metav1.ConditionTrue, @@ -1155,7 +1165,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { Finalizers: []string{placementv1beta1.ClusterResourcePlacementCleanupFinalizer}, }, Spec: crp.Spec, - Status: placementv1beta1.ClusterResourcePlacementStatus{ + Status: placementv1beta1.PlacementStatus{ ObservedResourceIndex: "0", Conditions: []metav1.Condition{ { @@ -1181,7 +1191,8 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { }, PlacementStatuses: []placementv1beta1.ResourcePlacementStatus{ { - ClusterName: member1Name, + ClusterName: member1Name, + ObservedResourceIndex: "0", Conditions: []metav1.Condition{ { Status: metav1.ConditionTrue, @@ -1211,7 +1222,8 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { }, }, { - ClusterName: member2Name, + ClusterName: member2Name, + ObservedResourceIndex: "0", Conditions: []metav1.Condition{ { Status: metav1.ConditionTrue, @@ -1372,7 +1384,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { ObjectMeta: metav1.ObjectMeta{ Name: testCRPName, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ { Group: corev1.GroupName, @@ -1406,7 +1418,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { Finalizers: []string{placementv1beta1.ClusterResourcePlacementCleanupFinalizer}, }, Spec: crp.Spec, - Status: placementv1beta1.ClusterResourcePlacementStatus{ + Status: placementv1beta1.PlacementStatus{ ObservedResourceIndex: "0", Conditions: []metav1.Condition{ { @@ -1449,7 +1461,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { Finalizers: []string{placementv1beta1.ClusterResourcePlacementCleanupFinalizer}, }, Spec: crp.Spec, - Status: placementv1beta1.ClusterResourcePlacementStatus{ + Status: placementv1beta1.PlacementStatus{ ObservedResourceIndex: "0", Conditions: []metav1.Condition{ { @@ -1475,7 +1487,8 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { }, PlacementStatuses: []placementv1beta1.ResourcePlacementStatus{ { - ClusterName: member1Name, + ClusterName: member1Name, + ObservedResourceIndex: "0", Conditions: []metav1.Condition{ { Status: metav1.ConditionTrue, @@ -1500,7 +1513,8 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { }, }, { - ClusterName: member2Name, + ClusterName: member2Name, + ObservedResourceIndex: "0", Conditions: []metav1.Condition{ { Status: metav1.ConditionTrue, @@ -1626,7 +1640,7 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { Finalizers: []string{placementv1beta1.ClusterResourcePlacementCleanupFinalizer}, }, Spec: crp.Spec, - Status: placementv1beta1.ClusterResourcePlacementStatus{ + Status: placementv1beta1.PlacementStatus{ ObservedResourceIndex: "0", Conditions: []metav1.Condition{ { @@ -1652,7 +1666,8 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { }, PlacementStatuses: []placementv1beta1.ResourcePlacementStatus{ { - ClusterName: member1Name, + ClusterName: member1Name, + ObservedResourceIndex: "0", Conditions: []metav1.Condition{ { Status: metav1.ConditionTrue, @@ -1677,7 +1692,8 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { }, }, { - ClusterName: member2Name, + ClusterName: member2Name, + ObservedResourceIndex: "0", Conditions: []metav1.Condition{ { Status: metav1.ConditionTrue, @@ -1780,7 +1796,8 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { } wantCRP.Status.PlacementStatuses = []placementv1beta1.ResourcePlacementStatus{ { - ClusterName: member1Name, + ClusterName: member1Name, + ObservedResourceIndex: "0", Conditions: []metav1.Condition{ { Status: metav1.ConditionTrue, @@ -1810,7 +1827,8 @@ var _ = Describe("Test ClusterResourcePlacement Controller", func() { }, }, { - ClusterName: member2Name, + ClusterName: member2Name, + ObservedResourceIndex: "0", Conditions: []metav1.Condition{ { Status: metav1.ConditionTrue, diff --git a/pkg/controllers/clusterresourceplacement/controller_test.go b/pkg/controllers/clusterresourceplacement/controller_test.go index 8c2e0eb4c..c2700d734 100644 --- a/pkg/controllers/clusterresourceplacement/controller_test.go +++ b/pkg/controllers/clusterresourceplacement/controller_test.go @@ -38,6 +38,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" fleetv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" + "go.goms.io/fleet/pkg/utils/condition" "go.goms.io/fleet/pkg/utils/controller" "go.goms.io/fleet/pkg/utils/defaulter" "go.goms.io/fleet/test/utils/resource" @@ -101,7 +102,7 @@ func clusterResourcePlacementForTest() *fleetv1beta1.ClusterResourcePlacement { Name: testCRPName, Generation: crpGeneration, }, - Spec: fleetv1beta1.ClusterResourcePlacementSpec{ + Spec: fleetv1beta1.PlacementSpec{ ResourceSelectors: []fleetv1beta1.ClusterResourceSelector{ { Group: corev1.GroupName, @@ -3265,7 +3266,7 @@ func TestIsRolloutComplete(t *testing.T) { Name: testCRPName, Generation: crpGeneration, }, - Status: fleetv1beta1.ClusterResourcePlacementStatus{ + Status: fleetv1beta1.PlacementStatus{ Conditions: tc.conditions, }, } @@ -3276,3 +3277,877 @@ func TestIsRolloutComplete(t *testing.T) { }) } } + +func TestDetermineRolloutStateForCRPWithExternalRolloutStrategy(t *testing.T) { + namespaceResourceContent := *resource.NamespaceResourceContentForTest(t) + deploymentResourceContent := *resource.DeploymentResourceContentForTest(t) + + tests := []struct { + name string + selected []*fleetv1beta1.ClusterDecision + allRPS []fleetv1beta1.ResourcePlacementStatus + resourceSnapshots []*fleetv1beta1.ClusterResourceSnapshot + selectedResources []fleetv1beta1.ResourceIdentifier + existingObservedResourceIndex string + existingConditions []metav1.Condition + wantRolloutUnknown bool + wantObservedResourceIndex string + wantSelectedResources []fleetv1beta1.ResourceIdentifier + wantConditions []metav1.Condition + wantErr bool + }{ + { + name: "no selected clusters", // This should not happen in normal cases. + selected: []*fleetv1beta1.ClusterDecision{}, + allRPS: []fleetv1beta1.ResourcePlacementStatus{}, + resourceSnapshots: []*fleetv1beta1.ClusterResourceSnapshot{}, + existingConditions: []metav1.Condition{}, + wantErr: true, + }, + { + name: "selected clusters with different observed resource indices", + selected: []*fleetv1beta1.ClusterDecision{ + { + ClusterName: "cluster1", + Selected: true, + }, + { + ClusterName: "cluster2", + Selected: true, + }, + }, + allRPS: []fleetv1beta1.ResourcePlacementStatus{ + { + ClusterName: "cluster1", + ObservedResourceIndex: "0", + }, + { + ClusterName: "cluster2", + ObservedResourceIndex: "1", + }, + }, + resourceSnapshots: []*fleetv1beta1.ClusterResourceSnapshot{}, + existingConditions: []metav1.Condition{}, + wantRolloutUnknown: true, + wantObservedResourceIndex: "", + wantSelectedResources: []fleetv1beta1.ResourceIdentifier{}, + wantConditions: []metav1.Condition{ + { + Type: string(fleetv1beta1.ClusterResourcePlacementRolloutStartedConditionType), + Status: metav1.ConditionUnknown, + Reason: "RolloutControlledByExternalController", + Message: "Rollout is controlled by an external controller and different resource snapshot versions are observed across clusters", + ObservedGeneration: 1, + }, + }, + wantErr: false, + }, + { + name: "selected clusters with different observed resource indices and an empty ObservedResourceIndex", + selected: []*fleetv1beta1.ClusterDecision{ + { + ClusterName: "cluster1", + Selected: true, + }, + { + ClusterName: "cluster2", + Selected: true, + }, + }, + allRPS: []fleetv1beta1.ResourcePlacementStatus{ + { + ClusterName: "cluster1", + ObservedResourceIndex: "", + }, + { + ClusterName: "cluster2", + ObservedResourceIndex: "1", + }, + }, + resourceSnapshots: []*fleetv1beta1.ClusterResourceSnapshot{}, + wantRolloutUnknown: true, + wantObservedResourceIndex: "", + wantSelectedResources: []fleetv1beta1.ResourceIdentifier{}, + wantConditions: []metav1.Condition{ + { + Type: string(fleetv1beta1.ClusterResourcePlacementRolloutStartedConditionType), + Status: metav1.ConditionUnknown, + Reason: "RolloutControlledByExternalController", + Message: "Rollout is controlled by an external controller and different resource snapshot versions are observed across clusters", + ObservedGeneration: 1, + }, + }, + wantErr: false, + }, + { + name: "selected clusters with different observed resource indices and crp has some conditions already", + selected: []*fleetv1beta1.ClusterDecision{ + { + ClusterName: "cluster1", + Selected: true, + }, + { + ClusterName: "cluster2", + Selected: true, + }, + }, + allRPS: []fleetv1beta1.ResourcePlacementStatus{ + { + ClusterName: "cluster1", + ObservedResourceIndex: "", + }, + { + ClusterName: "cluster2", + ObservedResourceIndex: "1", + }, + }, + resourceSnapshots: []*fleetv1beta1.ClusterResourceSnapshot{}, + existingConditions: []metav1.Condition{ + { + // Scheduled condition should be kept. + Type: string(fleetv1beta1.ClusterResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + Reason: "Scheduled", + Message: "Scheduling is complete", + ObservedGeneration: 1, + }, + { + // RolloutStarted condition should be updated. + Type: string(fleetv1beta1.ClusterResourcePlacementRolloutStartedConditionType), + Status: metav1.ConditionTrue, + Reason: condition.RolloutStartedReason, + Message: "Rollout is started", + ObservedGeneration: 0, + }, + { + // Overridden condition should be removed. + Type: string(fleetv1beta1.ClusterResourcePlacementOverriddenConditionType), + Status: metav1.ConditionTrue, + Reason: "Overridden", + Message: "Overridden", + ObservedGeneration: 0, + }, + }, + wantRolloutUnknown: true, + wantObservedResourceIndex: "", + wantSelectedResources: []fleetv1beta1.ResourceIdentifier{}, + wantConditions: []metav1.Condition{ + { + // Scheduled condition should be kept. + Type: string(fleetv1beta1.ClusterResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + Reason: "Scheduled", + Message: "Scheduling is complete", + ObservedGeneration: 1, + }, + { + Type: string(fleetv1beta1.ClusterResourcePlacementRolloutStartedConditionType), + Status: metav1.ConditionUnknown, + Reason: "RolloutControlledByExternalController", + Message: "Rollout is controlled by an external controller and different resource snapshot versions are observed across clusters", + ObservedGeneration: 1, + }, + }, + wantErr: false, + }, + { + name: "selected clusters all with empty ObservedResourceIndex", + selected: []*fleetv1beta1.ClusterDecision{ + { + ClusterName: "cluster1", + Selected: true, + }, + { + ClusterName: "cluster2", + Selected: true, + }, + }, + allRPS: []fleetv1beta1.ResourcePlacementStatus{ + { + ClusterName: "cluster1", + ObservedResourceIndex: "", + }, + { + ClusterName: "cluster2", + ObservedResourceIndex: "", + }, + { + ClusterName: "cluster-unselected", + ObservedResourceIndex: "1", // This should not be considered. + }, + }, + resourceSnapshots: []*fleetv1beta1.ClusterResourceSnapshot{}, + existingConditions: []metav1.Condition{}, + wantRolloutUnknown: true, + wantObservedResourceIndex: "", + wantSelectedResources: []fleetv1beta1.ResourceIdentifier{}, + wantConditions: []metav1.Condition{ + { + Type: string(fleetv1beta1.ClusterResourcePlacementRolloutStartedConditionType), + Status: metav1.ConditionUnknown, + Reason: "RolloutControlledByExternalController", + Message: "Rollout is controlled by an external controller and no resource snapshot name is observed across clusters, probably rollout has not started yet", + ObservedGeneration: 1, + }, + }, + wantErr: false, + }, + { + name: "selected clusters all with empty ObservedResourceIndex and crp has some conditions already", + selected: []*fleetv1beta1.ClusterDecision{ + { + ClusterName: "cluster1", + Selected: true, + }, + { + ClusterName: "cluster2", + Selected: true, + }, + }, + allRPS: []fleetv1beta1.ResourcePlacementStatus{ + { + ClusterName: "cluster1", + ObservedResourceIndex: "", + }, + { + ClusterName: "cluster2", + ObservedResourceIndex: "", + }, + { + ClusterName: "cluster-unselected", + ObservedResourceIndex: "1", // This should not be considered. + }, + }, + resourceSnapshots: []*fleetv1beta1.ClusterResourceSnapshot{}, + existingConditions: []metav1.Condition{ + { + // Scheduled condition should be kept. + Type: string(fleetv1beta1.ClusterResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + Reason: "Scheduled", + Message: "Scheduling is complete", + ObservedGeneration: 1, + }, + { + // RolloutStarted condition should be updated. + Type: string(fleetv1beta1.ClusterResourcePlacementRolloutStartedConditionType), + Status: metav1.ConditionTrue, + Reason: condition.RolloutStartedReason, + Message: "Rollout is started", + ObservedGeneration: 0, + }, + { + // Overridden condition should be removed. + Type: string(fleetv1beta1.ClusterResourcePlacementOverriddenConditionType), + Status: metav1.ConditionTrue, + Reason: "Overridden", + Message: "Overridden", + ObservedGeneration: 0, + }, + }, + wantRolloutUnknown: true, + wantObservedResourceIndex: "", + wantSelectedResources: []fleetv1beta1.ResourceIdentifier{}, + wantConditions: []metav1.Condition{ + { + // Scheduled condition should be kept. + Type: string(fleetv1beta1.ClusterResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + Reason: "Scheduled", + Message: "Scheduling is complete", + ObservedGeneration: 1, + }, + { + Type: string(fleetv1beta1.ClusterResourcePlacementRolloutStartedConditionType), + Status: metav1.ConditionUnknown, + Reason: "RolloutControlledByExternalController", + Message: "Rollout is controlled by an external controller and no resource snapshot name is observed across clusters, probably rollout has not started yet", + ObservedGeneration: 1, + }, + }, + wantErr: false, + }, + { + name: "single selected cluster with empty ObservedResourceIndex", + selected: []*fleetv1beta1.ClusterDecision{ + { + ClusterName: "cluster1", + Selected: true, + }, + }, + allRPS: []fleetv1beta1.ResourcePlacementStatus{ + { + ClusterName: "cluster1", + ObservedResourceIndex: "", + }, + { + ClusterName: "cluster-unselected", + ObservedResourceIndex: "1", // This should not be considered. + }, + }, + resourceSnapshots: []*fleetv1beta1.ClusterResourceSnapshot{}, + existingConditions: []metav1.Condition{}, + wantRolloutUnknown: true, + wantObservedResourceIndex: "", + wantSelectedResources: []fleetv1beta1.ResourceIdentifier{}, + wantConditions: []metav1.Condition{ + { + Type: string(fleetv1beta1.ClusterResourcePlacementRolloutStartedConditionType), + Status: metav1.ConditionUnknown, + Reason: "RolloutControlledByExternalController", + Message: "Rollout is controlled by an external controller and no resource snapshot name is observed across clusters, probably rollout has not started yet", + ObservedGeneration: 1, + }, + }, + wantErr: false, + }, + { + name: "single selected cluster with valid ObservedResourceIndex but no clusterResourceSnapshots found", + selected: []*fleetv1beta1.ClusterDecision{ + { + ClusterName: "cluster1", + Selected: true, + }, + }, + allRPS: []fleetv1beta1.ResourcePlacementStatus{ + { + ClusterName: "cluster1", + ObservedResourceIndex: "2", + Conditions: []metav1.Condition{ + { + Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), + Status: metav1.ConditionTrue, + ObservedGeneration: 1, + }, + }, + }, + { + ClusterName: "cluster-unselected", + ObservedResourceIndex: "1", // This should not be considered. + }, + }, + existingConditions: []metav1.Condition{}, + wantRolloutUnknown: false, + wantObservedResourceIndex: "2", + wantSelectedResources: nil, + wantConditions: []metav1.Condition{}, + wantErr: false, + }, + { + name: "single selected cluster with valid ObservedResourceIndex but no master clusterResourceSnapshots with the specified index found", + selected: []*fleetv1beta1.ClusterDecision{ + { + ClusterName: "cluster1", + Selected: true, + }, + }, + allRPS: []fleetv1beta1.ResourcePlacementStatus{ + { + ClusterName: "cluster1", + ObservedResourceIndex: "2", + }, + { + ClusterName: "cluster-unselected", + ObservedResourceIndex: "1", // This should not be considered. + }, + }, + resourceSnapshots: []*fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 2, 1), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "2", + fleetv1beta1.CRPTrackingLabel: testCRPName, + }, + Annotations: map[string]string{ + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{ + SelectedResources: []fleetv1beta1.ResourceContent{ + namespaceResourceContent, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "selected clusters with valid ObservedResourceIndex but no rollout started condition", + selected: []*fleetv1beta1.ClusterDecision{ + { + ClusterName: "cluster1", + Selected: true, + }, + { + ClusterName: "cluster2", + Selected: true, + }, + }, + allRPS: []fleetv1beta1.ResourcePlacementStatus{ + { + ClusterName: "cluster1", + ObservedResourceIndex: "2", + Conditions: []metav1.Condition{ + { + Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), + Status: metav1.ConditionTrue, + ObservedGeneration: 1, + }, + }, + }, + { + ClusterName: "cluster2", + ObservedResourceIndex: "2", + }, + { + ClusterName: "cluster-unselected", + ObservedResourceIndex: "1", // This should not be considered. + }, + }, + resourceSnapshots: []*fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 2), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "2", + fleetv1beta1.CRPTrackingLabel: testCRPName, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "abc", + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{ + SelectedResources: []fleetv1beta1.ResourceContent{ + namespaceResourceContent, + }, + }, + }, + }, + wantRolloutUnknown: true, + wantObservedResourceIndex: "2", + wantSelectedResources: []fleetv1beta1.ResourceIdentifier{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + Namespace: "", + Name: "namespace-name", + }, + }, + wantConditions: []metav1.Condition{ + { + Type: string(fleetv1beta1.ClusterResourcePlacementRolloutStartedConditionType), + Status: metav1.ConditionUnknown, + Reason: "RolloutControlledByExternalController", + Message: "Rollout is controlled by an external controller and cluster cluster2 is in RolloutStarted Unknown state", + ObservedGeneration: 1, + }, + }, + wantErr: false, + }, + { + name: "single selected cluster with valid ObservedResourceIndex", + selected: []*fleetv1beta1.ClusterDecision{ + { + ClusterName: "cluster1", + Selected: true, + }, + }, + allRPS: []fleetv1beta1.ResourcePlacementStatus{ + { + ClusterName: "cluster1", + ObservedResourceIndex: "2", + Conditions: []metav1.Condition{ + { + Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), + Status: metav1.ConditionTrue, + ObservedGeneration: 1, + }, + }, + }, + { + ClusterName: "cluster-unselected", + ObservedResourceIndex: "1", // This should not be considered. + }, + }, + resourceSnapshots: []*fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 2), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "2", + fleetv1beta1.CRPTrackingLabel: testCRPName, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "abc", + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{ + SelectedResources: []fleetv1beta1.ResourceContent{ + namespaceResourceContent, + }, + }, + }, + }, + existingConditions: []metav1.Condition{}, + wantRolloutUnknown: false, + wantObservedResourceIndex: "2", + wantSelectedResources: []fleetv1beta1.ResourceIdentifier{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + Namespace: "", + Name: "namespace-name", + }, + }, + wantConditions: []metav1.Condition{}, + wantErr: false, + }, + { + name: "multiple selected clusters with the same valid ObservedResourceIndex and crp has some conditions already", + selected: []*fleetv1beta1.ClusterDecision{ + { + ClusterName: "cluster1", + Selected: true, + }, + { + ClusterName: "cluster2", + Selected: true, + }, + }, + allRPS: []fleetv1beta1.ResourcePlacementStatus{ + { + ClusterName: "cluster1", + ObservedResourceIndex: "2", + Conditions: []metav1.Condition{ + { + Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), + Status: metav1.ConditionTrue, + ObservedGeneration: 1, + }, + }, + }, + { + ClusterName: "cluster2", + ObservedResourceIndex: "2", + Conditions: []metav1.Condition{ + { + Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), + Status: metav1.ConditionTrue, + ObservedGeneration: 1, + }, + }, + }, + { + ClusterName: "cluster-unselected", + ObservedResourceIndex: "1", // This should not be considered. + }, + }, + resourceSnapshots: []*fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 2), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "2", + fleetv1beta1.CRPTrackingLabel: testCRPName, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "abc", + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "2", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{ + SelectedResources: []fleetv1beta1.ResourceContent{ + namespaceResourceContent, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 2, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "2", + fleetv1beta1.CRPTrackingLabel: testCRPName, + }, + Annotations: map[string]string{ + fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "0", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{ + SelectedResources: []fleetv1beta1.ResourceContent{ + deploymentResourceContent, + }, + }, + }, + }, + existingConditions: []metav1.Condition{ + // All conditions should be kept, which will be updated later in setCRPConditions. + { + Type: string(fleetv1beta1.ClusterResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + Reason: "Scheduled", + Message: "Scheduling is complete", + ObservedGeneration: 1, + }, + { + Type: string(fleetv1beta1.ClusterResourcePlacementRolloutStartedConditionType), + Status: metav1.ConditionTrue, + Reason: condition.RolloutStartedReason, + Message: "Rollout is started", + ObservedGeneration: 0, + }, + { + Type: string(fleetv1beta1.ClusterResourcePlacementOverriddenConditionType), + Status: metav1.ConditionTrue, + Reason: "Overridden", + Message: "Overridden", + ObservedGeneration: 0, + }, + }, + wantRolloutUnknown: false, + wantObservedResourceIndex: "2", + wantSelectedResources: []fleetv1beta1.ResourceIdentifier{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + Namespace: "", + Name: "namespace-name", + }, + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Namespace: "deployment-namespace", + Name: "deployment-name", + }, + }, + wantConditions: []metav1.Condition{ + { + Type: string(fleetv1beta1.ClusterResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + Reason: "Scheduled", + Message: "Scheduling is complete", + ObservedGeneration: 1, + }, + { + Type: string(fleetv1beta1.ClusterResourcePlacementRolloutStartedConditionType), + Status: metav1.ConditionTrue, + Reason: condition.RolloutStartedReason, + Message: "Rollout is started", + ObservedGeneration: 0, + }, + { + Type: string(fleetv1beta1.ClusterResourcePlacementOverriddenConditionType), + Status: metav1.ConditionTrue, + Reason: "Overridden", + Message: "Overridden", + ObservedGeneration: 0, + }, + }, + wantErr: false, + }, + { + name: "multiple selected clusters with the same valid ObservedResourceIndex and multiple clusterResourceSnapshots found", + selected: []*fleetv1beta1.ClusterDecision{ + { + ClusterName: "cluster1", + Selected: true, + }, + { + ClusterName: "cluster2", + Selected: true, + }, + }, + allRPS: []fleetv1beta1.ResourcePlacementStatus{ + { + ClusterName: "cluster1", + ObservedResourceIndex: "2", + Conditions: []metav1.Condition{ + { + Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), + Status: metav1.ConditionTrue, + ObservedGeneration: 1, + }, + }, + }, + { + ClusterName: "cluster2", + ObservedResourceIndex: "2", + Conditions: []metav1.Condition{ + { + Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), + Status: metav1.ConditionTrue, + ObservedGeneration: 1, + }, + }, + }, + { + ClusterName: "cluster-unselected", + ObservedResourceIndex: "1", // This should not be considered. + }, + }, + resourceSnapshots: []*fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 2), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "2", + fleetv1beta1.CRPTrackingLabel: testCRPName, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "abc", + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "2", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{ + SelectedResources: []fleetv1beta1.ResourceContent{ + namespaceResourceContent, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 2, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "2", + fleetv1beta1.CRPTrackingLabel: testCRPName, + }, + Annotations: map[string]string{ + fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "0", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{ + SelectedResources: []fleetv1beta1.ResourceContent{ + deploymentResourceContent, + }, + }, + }, + }, + existingConditions: []metav1.Condition{}, + wantRolloutUnknown: false, + wantObservedResourceIndex: "2", + wantSelectedResources: []fleetv1beta1.ResourceIdentifier{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + Namespace: "", + Name: "namespace-name", + }, + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Namespace: "deployment-namespace", + Name: "deployment-name", + }, + }, + wantConditions: []metav1.Condition{}, + wantErr: false, + }, + { + name: "use selected resources passed in if clusters are on latest resource snapshot", + selected: []*fleetv1beta1.ClusterDecision{ + { + ClusterName: "cluster1", + Selected: true, + }, + }, + allRPS: []fleetv1beta1.ResourcePlacementStatus{ + { + ClusterName: "cluster1", + ObservedResourceIndex: "2", + Conditions: []metav1.Condition{ + { + Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), + Status: metav1.ConditionTrue, + ObservedGeneration: 1, + }, + }, + }, + { + ClusterName: "cluster-unselected", + ObservedResourceIndex: "1", // This should not be considered. + }, + }, + existingObservedResourceIndex: "2", + existingConditions: []metav1.Condition{}, + selectedResources: []fleetv1beta1.ResourceIdentifier{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + Namespace: "", + Name: "namespace-name", + }, + }, + wantRolloutUnknown: false, + wantObservedResourceIndex: "2", + wantSelectedResources: []fleetv1beta1.ResourceIdentifier{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + Namespace: "", + Name: "namespace-name", + }, + }, + wantConditions: []metav1.Condition{}, + wantErr: false, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + crp := &fleetv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: testCRPName, + Generation: 1, + }, + Status: fleetv1beta1.PlacementStatus{ + ObservedResourceIndex: tc.existingObservedResourceIndex, + Conditions: tc.existingConditions, + }, + } + objects := []client.Object{} + for _, snapshot := range tc.resourceSnapshots { + objects = append(objects, snapshot) + } + scheme := serviceScheme(t) + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(objects...). + Build() + r := Reconciler{ + Client: fakeClient, + } + var cmpOptions = []cmp.Option{ + // ignore the message as we may change the message in the future + cmpopts.IgnoreFields(metav1.Condition{}, "LastTransitionTime"), + } + gotRolloutUnknown, gotErr := r.determineRolloutStateForCRPWithExternalRolloutStrategy(context.Background(), crp, tc.selected, tc.allRPS, tc.selectedResources) + if (gotErr != nil) != tc.wantErr { + t.Errorf("determineRolloutStateForCRPWithExternalRolloutStrategy() got error %v, want error %t", gotErr, tc.wantErr) + } + if !tc.wantErr { + if gotRolloutUnknown != tc.wantRolloutUnknown { + t.Errorf("determineRolloutStateForCRPWithExternalRolloutStrategy() got RolloutUnknown set to %v, want %v", gotRolloutUnknown, tc.wantRolloutUnknown) + } + if crp.Status.ObservedResourceIndex != tc.wantObservedResourceIndex { + t.Errorf("determineRolloutStateForCRPWithExternalRolloutStrategy() got crp.Status.ObservedResourceIndex set to %v, want %v", crp.Status.ObservedResourceIndex, tc.wantObservedResourceIndex) + } + if diff := cmp.Diff(tc.wantSelectedResources, crp.Status.SelectedResources); diff != "" { + t.Errorf("determineRolloutStateForCRPWithExternalRolloutStrategy() got crp.Status.SelectedResources mismatch (-want, +got):\n%s", diff) + } + if diff := cmp.Diff(tc.wantConditions, crp.Status.Conditions, cmpOptions...); diff != "" { + t.Errorf("determineRolloutStateForCRPWithExternalRolloutStrategy() got crp.Status.Conditions mismatch (-want, +got):\n%s", diff) + } + } + }) + } +} diff --git a/pkg/controllers/clusterresourceplacement/placement_status.go b/pkg/controllers/clusterresourceplacement/placement_status.go index 00d03b4da..abd90dfea 100644 --- a/pkg/controllers/clusterresourceplacement/placement_status.go +++ b/pkg/controllers/clusterresourceplacement/placement_status.go @@ -20,8 +20,10 @@ import ( "context" "fmt" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" @@ -155,6 +157,11 @@ func (r *Reconciler) appendScheduledResourcePlacementStatuses( return allRPS, rpsSetCondTypeCounter, err } + resourceSnapshotIndexMap, err := r.findClusterResourceSnapshotIndexForBindings(ctx, crp, bindingMap) + if err != nil { + return allRPS, rpsSetCondTypeCounter, err + } + for idx := range selected { clusterDecision := selected[idx] rps := &fleetv1beta1.ResourcePlacementStatus{} @@ -180,7 +187,8 @@ func (r *Reconciler) appendScheduledResourcePlacementStatuses( // Prepare the new conditions. binding := bindingMap[clusterDecision.ClusterName] - setStatusByCondType := r.setResourcePlacementStatusPerCluster(crp, latestClusterResourceSnapshot, binding, rps, expectedCondTypes) + resourceSnapshotIndexOnBinding := resourceSnapshotIndexMap[clusterDecision.ClusterName] + setStatusByCondType := r.setResourcePlacementStatusPerCluster(crp, latestClusterResourceSnapshot, resourceSnapshotIndexOnBinding, binding, rps, expectedCondTypes) // Update the counter. for condType, condStatus := range setStatusByCondType { @@ -306,11 +314,45 @@ func (r *Reconciler) buildClusterResourceBindings(ctx context.Context, crp *flee return res, nil } +// findClusterResourceSnapshotIndexForBindings finds the resource snapshot index for each binding. +// It returns a map which maps the target cluster name to the resource snapshot index string. +func (r *Reconciler) findClusterResourceSnapshotIndexForBindings( + ctx context.Context, + crp *fleetv1beta1.ClusterResourcePlacement, + bindingMap map[string]*fleetv1beta1.ClusterResourceBinding, +) (map[string]string, error) { + crpKObj := klog.KObj(crp) + res := make(map[string]string, len(bindingMap)) + for targetCluster, binding := range bindingMap { + resourceSnapshotName := binding.Spec.ResourceSnapshotName + if resourceSnapshotName == "" { + klog.InfoS("Empty resource snapshot name found in binding, controller might observe in-between state", "binding", klog.KObj(binding), "clusterResourcePlacement", crpKObj) + res[targetCluster] = "" + continue + } + resourceSnapshot := &fleetv1beta1.ClusterResourceSnapshot{} + if err := r.Client.Get(ctx, types.NamespacedName{Name: resourceSnapshotName, Namespace: ""}, resourceSnapshot); err != nil { + if apierrors.IsNotFound(err) { + klog.InfoS("The resource snapshot specified in binding is not found, probably deleted due to revision history limit", + "resourceSnapshotName", resourceSnapshotName, "binding", klog.KObj(binding), "clusterResourcePlacement", crpKObj) + res[targetCluster] = "" + continue + } + klog.ErrorS(err, "Failed to get the cluster resource snapshot specified in binding", "resourceSnapshotName", resourceSnapshotName, "binding", klog.KObj(binding), "clusterResourcePlacement", crpKObj) + return res, controller.NewAPIServerError(true, err) + } + res[targetCluster] = resourceSnapshot.GetLabels()[fleetv1beta1.ResourceIndexLabel] + } + + return res, nil +} + // setResourcePlacementStatusPerCluster sets the resource related fields for each cluster. // It returns a map which tracks the set status for each relevant condition type. func (r *Reconciler) setResourcePlacementStatusPerCluster( crp *fleetv1beta1.ClusterResourcePlacement, latestResourceSnapshot *fleetv1beta1.ClusterResourceSnapshot, + resourceSnapshotIndexOnBinding string, binding *fleetv1beta1.ClusterResourceBinding, status *fleetv1beta1.ResourcePlacementStatus, expectedCondTypes []condition.ResourceCondition, @@ -324,6 +366,15 @@ func (r *Reconciler) setResourcePlacementStatusPerCluster( return res } + // For External rollout strategy, the per-cluster status is set to whatever exists on the binding. + if crp.Spec.Strategy.Type == fleetv1beta1.ExternalRolloutStrategyType { + status.ObservedResourceIndex = resourceSnapshotIndexOnBinding + setResourcePlacementStatusBasedOnBinding(crp, binding, status, expectedCondTypes, res) + return res + } + + // TODO (wantjian): we only change the per-cluster status for External rollout strategy for now, so set the ObservedResourceIndex as the latest. + status.ObservedResourceIndex = latestResourceSnapshot.GetLabels()[fleetv1beta1.ResourceIndexLabel] rolloutStartedCond := binding.GetCondition(string(condition.RolloutStartedCondition.ResourceBindingConditionType())) switch { case binding.Spec.ResourceSnapshotName != latestResourceSnapshot.Name && condition.IsConditionStatusFalse(rolloutStartedCond, binding.Generation): @@ -348,50 +399,62 @@ func (r *Reconciler) setResourcePlacementStatusPerCluster( return res default: // The binding uses the latest resource snapshot. - for _, i := range expectedCondTypes { - bindingCond := binding.GetCondition(string(i.ResourceBindingConditionType())) - if !condition.IsConditionStatusTrue(bindingCond, binding.Generation) && - !condition.IsConditionStatusFalse(bindingCond, binding.Generation) { - meta.SetStatusCondition(&status.Conditions, i.UnknownResourceConditionPerCluster(crp.Generation)) - klog.V(5).InfoS("Find an unknown condition", "bindingCond", bindingCond, "clusterResourceBinding", klog.KObj(binding), "clusterResourcePlacement", klog.KObj(crp)) - res[i] = metav1.ConditionUnknown - break - } + setResourcePlacementStatusBasedOnBinding(crp, binding, status, expectedCondTypes, res) + return res + } +} - switch i { - case condition.RolloutStartedCondition: - if bindingCond.Status == metav1.ConditionTrue { - status.ApplicableResourceOverrides = binding.Spec.ResourceOverrideSnapshots - status.ApplicableClusterResourceOverrides = binding.Spec.ClusterResourceOverrideSnapshots - } - case condition.AppliedCondition, condition.AvailableCondition: - if bindingCond.Status == metav1.ConditionFalse { - status.FailedPlacements = binding.Status.FailedPlacements - status.DiffedPlacements = binding.Status.DiffedPlacements - } - // Note that configuration drifts can occur whether the manifests are applied - // successfully or not. - status.DriftedPlacements = binding.Status.DriftedPlacements - case condition.DiffReportedCondition: - if bindingCond.Status == metav1.ConditionTrue { - status.DiffedPlacements = binding.Status.DiffedPlacements - } - } +// setResourcePlacementStatusBasedOnBinding sets the cluster's resource placement status based on its corresponding binding status. +// It updates the status object in place and tracks the set status for each relevant condition type in setStatusByCondType map provided. +func setResourcePlacementStatusBasedOnBinding( + crp *fleetv1beta1.ClusterResourcePlacement, + binding *fleetv1beta1.ClusterResourceBinding, + status *fleetv1beta1.ResourcePlacementStatus, + expectedCondTypes []condition.ResourceCondition, + setStatusByCondType map[condition.ResourceCondition]metav1.ConditionStatus, +) { + for _, i := range expectedCondTypes { + bindingCond := binding.GetCondition(string(i.ResourceBindingConditionType())) + if !condition.IsConditionStatusTrue(bindingCond, binding.Generation) && + !condition.IsConditionStatusFalse(bindingCond, binding.Generation) { + meta.SetStatusCondition(&status.Conditions, i.UnknownResourceConditionPerCluster(crp.Generation)) + klog.V(5).InfoS("Find an unknown condition", "bindingCond", bindingCond, "clusterResourceBinding", klog.KObj(binding), "clusterResourcePlacement", klog.KObj(crp)) + setStatusByCondType[i] = metav1.ConditionUnknown + break + } - cond := metav1.Condition{ - Type: string(i.ResourcePlacementConditionType()), - Status: bindingCond.Status, - ObservedGeneration: crp.Generation, - Reason: bindingCond.Reason, - Message: bindingCond.Message, + switch i { + case condition.RolloutStartedCondition: + if bindingCond.Status == metav1.ConditionTrue { + status.ApplicableResourceOverrides = binding.Spec.ResourceOverrideSnapshots + status.ApplicableClusterResourceOverrides = binding.Spec.ClusterResourceOverrideSnapshots } - meta.SetStatusCondition(&status.Conditions, cond) - res[i] = bindingCond.Status - + case condition.AppliedCondition, condition.AvailableCondition: if bindingCond.Status == metav1.ConditionFalse { - break // if the current condition is false, no need to populate the rest conditions + status.FailedPlacements = binding.Status.FailedPlacements + status.DiffedPlacements = binding.Status.DiffedPlacements + } + // Note that configuration drifts can occur whether the manifests are applied + // successfully or not. + status.DriftedPlacements = binding.Status.DriftedPlacements + case condition.DiffReportedCondition: + if bindingCond.Status == metav1.ConditionTrue { + status.DiffedPlacements = binding.Status.DiffedPlacements } } - return res + + cond := metav1.Condition{ + Type: string(i.ResourcePlacementConditionType()), + Status: bindingCond.Status, + ObservedGeneration: crp.Generation, + Reason: bindingCond.Reason, + Message: bindingCond.Message, + } + meta.SetStatusCondition(&status.Conditions, cond) + setStatusByCondType[i] = bindingCond.Status + + if bindingCond.Status == metav1.ConditionFalse { + break // if the current condition is false, no need to populate the rest conditions + } } } diff --git a/pkg/controllers/clusterresourceplacement/placement_status_test.go b/pkg/controllers/clusterresourceplacement/placement_status_test.go index b769ed970..a1c7cd1b3 100644 --- a/pkg/controllers/clusterresourceplacement/placement_status_test.go +++ b/pkg/controllers/clusterresourceplacement/placement_status_test.go @@ -35,6 +35,7 @@ import ( fleetv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" "go.goms.io/fleet/pkg/utils/condition" + "go.goms.io/fleet/test/utils/resource" ) var statusCmpOptions = []cmp.Option{ @@ -95,16 +96,231 @@ func TestSetPlacementStatus(t *testing.T) { Namespace: "config-namespace", }, } + + oldClusterResourcePlacementAvailableConditions := []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementOverriddenConditionType), + Reason: condition.OverrideNotSpecifiedReason, + ObservedGeneration: crpGeneration - 1, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementRolloutStartedConditionType), + Reason: condition.RolloutStartedReason, + ObservedGeneration: crpGeneration - 1, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementScheduledConditionType), + Reason: "Scheduled", + ObservedGeneration: crpGeneration - 1, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementWorkSynchronizedConditionType), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: crpGeneration - 1, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementAppliedConditionType), + Reason: condition.ApplySucceededReason, + ObservedGeneration: crpGeneration - 1, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementAvailableConditionType), + Reason: condition.AvailableReason, + ObservedGeneration: crpGeneration - 1, + LastTransitionTime: metav1.NewTime(currentTime), + }, + } + + clusterResourcePlacementAvailableConditions := []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementOverriddenConditionType), + Reason: condition.OverrideNotSpecifiedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementRolloutStartedConditionType), + Reason: condition.RolloutStartedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementScheduledConditionType), + Reason: "Scheduled", + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementWorkSynchronizedConditionType), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementAppliedConditionType), + Reason: condition.ApplySucceededReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementAvailableConditionType), + Reason: condition.AvailableReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + } + + oldResourcePlacementAvailableConditions := []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceOverriddenConditionType), + Reason: condition.OverrideNotSpecifiedReason, + ObservedGeneration: crpGeneration - 1, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), + Reason: condition.RolloutStartedReason, + ObservedGeneration: crpGeneration - 1, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceScheduledConditionType), + Reason: condition.ScheduleSucceededReason, + ObservedGeneration: crpGeneration - 1, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceWorkSynchronizedConditionType), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: crpGeneration - 1, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourcesAppliedConditionType), + Reason: condition.ApplySucceededReason, + ObservedGeneration: crpGeneration - 1, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourcesAvailableConditionType), + Reason: condition.AvailableReason, + ObservedGeneration: crpGeneration - 1, + LastTransitionTime: metav1.NewTime(currentTime), + }, + } + + resourcePlacementAvailableConditions := []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceOverriddenConditionType), + Reason: condition.OverrideNotSpecifiedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), + Reason: condition.RolloutStartedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceScheduledConditionType), + Reason: condition.ScheduleSucceededReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceWorkSynchronizedConditionType), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourcesAppliedConditionType), + Reason: condition.ApplySucceededReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourcesAvailableConditionType), + Reason: condition.AvailableReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + } + + bindingAvailableConditions := []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingRolloutStarted), + Reason: condition.RolloutStartedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingOverridden), + Reason: condition.OverrideNotSpecifiedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingWorkSynchronized), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingApplied), + Reason: condition.ApplySucceededReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingAvailable), + Reason: condition.AvailableReason, + ObservedGeneration: 1, + }, + } + tests := []struct { name string - crpStatus fleetv1beta1.ClusterResourcePlacementStatus + crpStatus fleetv1beta1.PlacementStatus policy *fleetv1beta1.PlacementPolicy strategy fleetv1beta1.RolloutStrategy latestPolicySnapshot *fleetv1beta1.ClusterSchedulingPolicySnapshot latestResourceSnapshot *fleetv1beta1.ClusterResourceSnapshot + otherResourceSnapshots []*fleetv1beta1.ClusterResourceSnapshot clusterResourceBindings []fleetv1beta1.ClusterResourceBinding want bool - wantStatus *fleetv1beta1.ClusterResourcePlacementStatus + wantStatus *fleetv1beta1.PlacementStatus wantErr error }{ { @@ -137,7 +353,7 @@ func TestSetPlacementStatus(t *testing.T) { }, }, want: false, - wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ + wantStatus: &fleetv1beta1.PlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", Conditions: []metav1.Condition{ @@ -196,7 +412,7 @@ func TestSetPlacementStatus(t *testing.T) { }, }, want: false, - wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ + wantStatus: &fleetv1beta1.PlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", Conditions: []metav1.Condition{ @@ -255,7 +471,7 @@ func TestSetPlacementStatus(t *testing.T) { }, }, want: false, - wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ + wantStatus: &fleetv1beta1.PlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", Conditions: []metav1.Condition{ @@ -314,7 +530,7 @@ func TestSetPlacementStatus(t *testing.T) { }, }, want: false, - wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ + wantStatus: &fleetv1beta1.PlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", Conditions: []metav1.Condition{ @@ -394,7 +610,7 @@ func TestSetPlacementStatus(t *testing.T) { }, }, want: true, - wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ + wantStatus: &fleetv1beta1.PlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", Conditions: []metav1.Condition{ @@ -415,7 +631,8 @@ func TestSetPlacementStatus(t *testing.T) { }, PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{ { - ClusterName: "member-1", + ClusterName: "member-1", + ObservedResourceIndex: "", // Empty as there's no binding created. Conditions: []metav1.Condition{ { Status: metav1.ConditionUnknown, @@ -434,7 +651,8 @@ func TestSetPlacementStatus(t *testing.T) { }, }, { - ClusterName: "member-2", + ClusterName: "member-2", + ObservedResourceIndex: "", // Empty as there's no binding created. Conditions: []metav1.Condition{ { Status: metav1.ConditionUnknown, @@ -453,7 +671,8 @@ func TestSetPlacementStatus(t *testing.T) { }, }, { - ClusterName: "member-3", + ClusterName: "member-3", + ObservedResourceIndex: "", // Empty as there's no binding created. Conditions: []metav1.Condition{ { Status: metav1.ConditionUnknown, @@ -528,7 +747,79 @@ func TestSetPlacementStatus(t *testing.T) { }, }, want: false, - wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ + wantStatus: &fleetv1beta1.PlacementStatus{ + SelectedResources: selectedResources, + ObservedResourceIndex: "0", + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementScheduledConditionType), + Reason: "Scheduled", + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + }, + PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{}, + }, + }, + { + // TODO special handling no cluster is selected + name: "the placement uses External rollout strategy; none of clusters are selected; no clusterResourceBindings and works", + strategy: fleetv1beta1.RolloutStrategy{ + Type: fleetv1beta1.ExternalRolloutStrategyType, + }, + latestPolicySnapshot: &fleetv1beta1.ClusterSchedulingPolicySnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testCRPName, 0), + Labels: map[string]string{ + fleetv1beta1.PolicyIndexLabel: "0", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.CRPTrackingLabel: testCRPName, + }, + Annotations: map[string]string{ + fleetv1beta1.NumberOfClustersAnnotation: strconv.Itoa(3), + }, + Generation: 1, + }, + Status: fleetv1beta1.SchedulingPolicySnapshotStatus{ + ObservedCRPGeneration: crpGeneration, + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.PolicySnapshotScheduled), + Reason: "Scheduled", + Message: "message", + ObservedGeneration: 1, + }, + }, + ClusterDecisions: []fleetv1beta1.ClusterDecision{ + { + ClusterName: "member-1", + Reason: "failed", + }, + { + ClusterName: "member-2", + Reason: "failed", + }, + }, + }, + }, + latestResourceSnapshot: &fleetv1beta1.ClusterResourceSnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.CRPTrackingLabel: testCRPName, + fleetv1beta1.IsLatestSnapshotLabel: "true", + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "hash", + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", + }, + }, + }, + want: false, + wantStatus: &fleetv1beta1.PlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", Conditions: []metav1.Condition{ @@ -604,7 +895,7 @@ func TestSetPlacementStatus(t *testing.T) { }, }, want: true, - wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ + wantStatus: &fleetv1beta1.PlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", Conditions: []metav1.Condition{ @@ -625,7 +916,8 @@ func TestSetPlacementStatus(t *testing.T) { }, PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{ { - ClusterName: "member-1", + ClusterName: "member-1", + ObservedResourceIndex: "", // Empty as there's no binding created. Conditions: []metav1.Condition{ { Status: metav1.ConditionUnknown, @@ -644,6 +936,7 @@ func TestSetPlacementStatus(t *testing.T) { }, }, { + ObservedResourceIndex: "", // Empty as schedule failed. Conditions: []metav1.Condition{ { Status: metav1.ConditionFalse, @@ -655,6 +948,7 @@ func TestSetPlacementStatus(t *testing.T) { }, }, { + ObservedResourceIndex: "", // Empty as schedule failed. Conditions: []metav1.Condition{ { Status: metav1.ConditionFalse, @@ -746,7 +1040,7 @@ func TestSetPlacementStatus(t *testing.T) { }, }, want: false, - wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ + wantStatus: &fleetv1beta1.PlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", Conditions: []metav1.Condition{ @@ -879,7 +1173,7 @@ func TestSetPlacementStatus(t *testing.T) { }, }, want: true, - wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ + wantStatus: &fleetv1beta1.PlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", Conditions: []metav1.Condition{ @@ -929,6 +1223,7 @@ func TestSetPlacementStatus(t *testing.T) { PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{ { ClusterName: "member-1", + ObservedResourceIndex: "0", ApplicableClusterResourceOverrides: []string{"o-1", "o-2"}, ApplicableResourceOverrides: []fleetv1beta1.NamespacedName{ { @@ -1064,38 +1359,7 @@ func TestSetPlacementStatus(t *testing.T) { TargetCluster: "member-1", }, Status: fleetv1beta1.ResourceBindingStatus{ - Conditions: []metav1.Condition{ - { - Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourceBindingRolloutStarted), - Reason: condition.RolloutStartedReason, - ObservedGeneration: 1, - }, - { - Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourceBindingOverridden), - Reason: condition.OverrideNotSpecifiedReason, - ObservedGeneration: 1, - }, - { - Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourceBindingWorkSynchronized), - Reason: condition.WorkSynchronizedReason, - ObservedGeneration: 1, - }, - { - Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourceBindingApplied), - Reason: condition.ApplySucceededReason, - ObservedGeneration: 1, - }, - { - Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourceBindingAvailable), - Reason: condition.AvailableReason, - ObservedGeneration: 1, - }, - }, + Conditions: bindingAvailableConditions, }, }, { @@ -1112,43 +1376,12 @@ func TestSetPlacementStatus(t *testing.T) { TargetCluster: "member-2", }, Status: fleetv1beta1.ResourceBindingStatus{ - Conditions: []metav1.Condition{ - { - Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourceBindingRolloutStarted), - Reason: condition.RolloutStartedReason, - ObservedGeneration: 1, - }, - { - Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourceBindingOverridden), - Reason: condition.OverrideNotSpecifiedReason, - ObservedGeneration: 1, - }, - { - Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourceBindingWorkSynchronized), - Reason: condition.WorkSynchronizedReason, - ObservedGeneration: 1, - }, - { - Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourceBindingApplied), - Reason: condition.ApplySucceededReason, - ObservedGeneration: 1, - }, - { - Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourceBindingAvailable), - Reason: condition.AvailableReason, - ObservedGeneration: 1, - }, - }, + Conditions: bindingAvailableConditions, }, }, }, want: true, - wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ + wantStatus: &fleetv1beta1.PlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", Conditions: []metav1.Condition{ @@ -1197,7 +1430,8 @@ func TestSetPlacementStatus(t *testing.T) { }, PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{ { - ClusterName: "member-1", + ClusterName: "member-1", + ObservedResourceIndex: "0", Conditions: []metav1.Condition{ { Status: metav1.ConditionTrue, @@ -1244,7 +1478,8 @@ func TestSetPlacementStatus(t *testing.T) { }, }, { - ClusterName: "member-2", + ClusterName: "member-2", + ObservedResourceIndex: "0", Conditions: []metav1.Condition{ { Status: metav1.ConditionTrue, @@ -1558,7 +1793,7 @@ func TestSetPlacementStatus(t *testing.T) { }, }, want: true, - wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ + wantStatus: &fleetv1beta1.PlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", Conditions: []metav1.Condition{ @@ -1579,7 +1814,8 @@ func TestSetPlacementStatus(t *testing.T) { }, PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{ { - ClusterName: "member-1", + ClusterName: "member-1", + ObservedResourceIndex: "", // Empty as the binding is deleting. Conditions: []metav1.Condition{ { Status: metav1.ConditionUnknown, @@ -1598,7 +1834,8 @@ func TestSetPlacementStatus(t *testing.T) { }, }, { - ClusterName: "member-2", + ClusterName: "member-2", + ObservedResourceIndex: "0", Conditions: []metav1.Condition{ { Status: metav1.ConditionUnknown, @@ -1624,7 +1861,8 @@ func TestSetPlacementStatus(t *testing.T) { }, }, { - ClusterName: "member-3", + ClusterName: "member-3", + ObservedResourceIndex: "0", Conditions: []metav1.Condition{ { Status: metav1.ConditionTrue, @@ -1657,7 +1895,8 @@ func TestSetPlacementStatus(t *testing.T) { }, }, { - ClusterName: "member-4", + ClusterName: "member-4", + ObservedResourceIndex: "", // Empty as there is no binding. Conditions: []metav1.Condition{ { Status: metav1.ConditionUnknown, @@ -1676,7 +1915,8 @@ func TestSetPlacementStatus(t *testing.T) { }, }, { - ClusterName: "member-5", + ClusterName: "member-5", + ObservedResourceIndex: "0", Conditions: []metav1.Condition{ { Status: metav1.ConditionUnknown, @@ -1716,7 +1956,8 @@ func TestSetPlacementStatus(t *testing.T) { }, }, { - ClusterName: "member-6", + ClusterName: "member-6", + ObservedResourceIndex: "0", Conditions: []metav1.Condition{ { Status: metav1.ConditionUnknown, @@ -1735,7 +1976,8 @@ func TestSetPlacementStatus(t *testing.T) { }, }, { - ClusterName: "member-7", + ClusterName: "member-7", + ObservedResourceIndex: "", // Empty as the binding does not have latest policy snapshot. Conditions: []metav1.Condition{ { Status: metav1.ConditionUnknown, @@ -1836,7 +2078,7 @@ func TestSetPlacementStatus(t *testing.T) { }, }, want: true, - wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ + wantStatus: &fleetv1beta1.PlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", Conditions: []metav1.Condition{ @@ -1857,7 +2099,8 @@ func TestSetPlacementStatus(t *testing.T) { }, PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{ { - ClusterName: "member-1", + ClusterName: "member-1", + ObservedResourceIndex: "0", Conditions: []metav1.Condition{ { Status: metav1.ConditionFalse, @@ -2066,7 +2309,7 @@ func TestSetPlacementStatus(t *testing.T) { }, }, want: true, - wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ + wantStatus: &fleetv1beta1.PlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", Conditions: []metav1.Condition{ @@ -2108,7 +2351,8 @@ func TestSetPlacementStatus(t *testing.T) { }, PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{ { - ClusterName: "member-1", + ClusterName: "member-1", + ObservedResourceIndex: "0", FailedPlacements: []fleetv1beta1.FailedResourcePlacement{ { ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ @@ -2182,6 +2426,7 @@ func TestSetPlacementStatus(t *testing.T) { }, { ClusterName: "member-2", + ObservedResourceIndex: "0", ApplicableClusterResourceOverrides: []string{"o-1", "o-2"}, ApplicableResourceOverrides: []fleetv1beta1.NamespacedName{ { @@ -2377,7 +2622,7 @@ func TestSetPlacementStatus(t *testing.T) { }, }, want: true, - wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ + wantStatus: &fleetv1beta1.PlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", Conditions: []metav1.Condition{ @@ -2426,7 +2671,8 @@ func TestSetPlacementStatus(t *testing.T) { }, PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{ { - ClusterName: "member-1", + ClusterName: "member-1", + ObservedResourceIndex: "0", FailedPlacements: []fleetv1beta1.FailedResourcePlacement{ { ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ @@ -2514,7 +2760,7 @@ func TestSetPlacementStatus(t *testing.T) { PlacementType: fleetv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(int32(1)), }, - crpStatus: fleetv1beta1.ClusterResourcePlacementStatus{ + crpStatus: fleetv1beta1.PlacementStatus{ ObservedResourceIndex: "-1", Conditions: []metav1.Condition{ { @@ -2563,6 +2809,7 @@ func TestSetPlacementStatus(t *testing.T) { PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{ { ClusterName: "member-1", + ObservedResourceIndex: "-1", ApplicableClusterResourceOverrides: []string{"o-1", "o-2"}, ApplicableResourceOverrides: []fleetv1beta1.NamespacedName{ { @@ -2672,7 +2919,7 @@ func TestSetPlacementStatus(t *testing.T) { }, }, want: true, - wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ + wantStatus: &fleetv1beta1.PlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", Conditions: []metav1.Condition{ @@ -2693,7 +2940,8 @@ func TestSetPlacementStatus(t *testing.T) { }, PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{ { - ClusterName: "member-1", + ClusterName: "member-1", + ObservedResourceIndex: "", // Empty as there's no binding. Conditions: []metav1.Condition{ { Status: metav1.ConditionUnknown, @@ -2715,7 +2963,7 @@ func TestSetPlacementStatus(t *testing.T) { }, }, { - name: "the placement cannot be fulfilled for picFixed", + name: "the placement cannot be fulfilled for pickFixed", policy: &fleetv1beta1.PlacementPolicy{ PlacementType: fleetv1beta1.PickFixedPlacementType, ClusterNames: []string{ @@ -2771,7 +3019,7 @@ func TestSetPlacementStatus(t *testing.T) { }, }, want: true, - wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ + wantStatus: &fleetv1beta1.PlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", Conditions: []metav1.Condition{ @@ -2792,7 +3040,8 @@ func TestSetPlacementStatus(t *testing.T) { }, PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{ { - ClusterName: "member-1", + ClusterName: "member-1", + ObservedResourceIndex: "", // Empty as there's no binding. Conditions: []metav1.Condition{ { Status: metav1.ConditionUnknown, @@ -2811,6 +3060,7 @@ func TestSetPlacementStatus(t *testing.T) { }, }, { + ObservedResourceIndex: "", // Empty as the cluster is not selected. Conditions: []metav1.Condition{ { Status: metav1.ConditionFalse, @@ -2878,7 +3128,7 @@ func TestSetPlacementStatus(t *testing.T) { }, }, want: true, - wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ + wantStatus: &fleetv1beta1.PlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", Conditions: []metav1.Condition{ @@ -2899,7 +3149,8 @@ func TestSetPlacementStatus(t *testing.T) { }, PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{ { - ClusterName: "member-1", + ClusterName: "member-1", + ObservedResourceIndex: "", // Empty as there's no binding. Conditions: []metav1.Condition{ { Status: metav1.ConditionUnknown, @@ -2918,6 +3169,7 @@ func TestSetPlacementStatus(t *testing.T) { }, }, { + ObservedResourceIndex: "", // Empty as the cluster is not selected. Conditions: []metav1.Condition{ { Status: metav1.ConditionFalse, @@ -2937,7 +3189,7 @@ func TestSetPlacementStatus(t *testing.T) { PlacementType: fleetv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(int32(3)), }, - crpStatus: fleetv1beta1.ClusterResourcePlacementStatus{ + crpStatus: fleetv1beta1.PlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", Conditions: []metav1.Condition{ @@ -2986,7 +3238,8 @@ func TestSetPlacementStatus(t *testing.T) { }, PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{ { - ClusterName: "member-1", + ClusterName: "member-1", + ObservedResourceIndex: "0", Conditions: []metav1.Condition{ { Status: metav1.ConditionTrue, @@ -3082,7 +3335,7 @@ func TestSetPlacementStatus(t *testing.T) { }, }, want: true, - wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ + wantStatus: &fleetv1beta1.PlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", Conditions: []metav1.Condition{ @@ -3103,7 +3356,8 @@ func TestSetPlacementStatus(t *testing.T) { }, PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{ { - ClusterName: "member-1", + ClusterName: "member-1", + ObservedResourceIndex: "", // Empty as there's no binding. Conditions: []metav1.Condition{ { Status: metav1.ConditionUnknown, @@ -3122,6 +3376,7 @@ func TestSetPlacementStatus(t *testing.T) { }, }, { + ObservedResourceIndex: "", // Empty as the cluster is not selected. Conditions: []metav1.Condition{ { Status: metav1.ConditionFalse, @@ -3332,7 +3587,7 @@ func TestSetPlacementStatus(t *testing.T) { }, }, want: true, - wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ + wantStatus: &fleetv1beta1.PlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", Conditions: []metav1.Condition{ @@ -3374,7 +3629,8 @@ func TestSetPlacementStatus(t *testing.T) { }, PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{ { - ClusterName: "member-1", + ClusterName: "member-1", + ObservedResourceIndex: "0", Conditions: []metav1.Condition{ { Status: metav1.ConditionTrue, @@ -3415,6 +3671,7 @@ func TestSetPlacementStatus(t *testing.T) { }, { ClusterName: "member-2", + ObservedResourceIndex: "0", ApplicableClusterResourceOverrides: []string{"o-1", "o-2"}, ApplicableResourceOverrides: []fleetv1beta1.NamespacedName{ { @@ -3651,7 +3908,7 @@ func TestSetPlacementStatus(t *testing.T) { }, }, want: true, - wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ + wantStatus: &fleetv1beta1.PlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", Conditions: []metav1.Condition{ @@ -3693,7 +3950,8 @@ func TestSetPlacementStatus(t *testing.T) { }, PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{ { - ClusterName: "member-1", + ClusterName: "member-1", + ObservedResourceIndex: "0", Conditions: []metav1.Condition{ { Status: metav1.ConditionTrue, @@ -3734,6 +3992,7 @@ func TestSetPlacementStatus(t *testing.T) { }, { ClusterName: "member-2", + ObservedResourceIndex: "0", ApplicableClusterResourceOverrides: []string{"o-1", "o-2"}, ApplicableResourceOverrides: []fleetv1beta1.NamespacedName{ { @@ -3963,7 +4222,7 @@ func TestSetPlacementStatus(t *testing.T) { }, }, want: true, - wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ + wantStatus: &fleetv1beta1.PlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", Conditions: []metav1.Condition{ @@ -4005,7 +4264,8 @@ func TestSetPlacementStatus(t *testing.T) { }, PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{ { - ClusterName: "member-1", + ClusterName: "member-1", + ObservedResourceIndex: "0", Conditions: []metav1.Condition{ { Status: metav1.ConditionTrue, @@ -4046,6 +4306,7 @@ func TestSetPlacementStatus(t *testing.T) { }, { ClusterName: "member-2", + ObservedResourceIndex: "0", ApplicableClusterResourceOverrides: []string{"o-1", "o-2"}, ApplicableResourceOverrides: []fleetv1beta1.NamespacedName{ { @@ -4263,7 +4524,7 @@ func TestSetPlacementStatus(t *testing.T) { }, }, want: true, - wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ + wantStatus: &fleetv1beta1.PlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", Conditions: []metav1.Condition{ @@ -4298,7 +4559,8 @@ func TestSetPlacementStatus(t *testing.T) { }, PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{ { - ClusterName: "member-1", + ClusterName: "member-1", + ObservedResourceIndex: "0", Conditions: []metav1.Condition{ { Status: metav1.ConditionTrue, @@ -4339,6 +4601,7 @@ func TestSetPlacementStatus(t *testing.T) { }, { ClusterName: "member-2", + ObservedResourceIndex: "0", ApplicableClusterResourceOverrides: []string{"o-1", "o-2"}, ApplicableResourceOverrides: []fleetv1beta1.NamespacedName{ { @@ -4495,7 +4758,18 @@ func TestSetPlacementStatus(t *testing.T) { }, }, want: true, - crpStatus: fleetv1beta1.ClusterResourcePlacementStatus{ + crpStatus: fleetv1beta1.PlacementStatus{ + SelectedResources: selectedResources, + ObservedResourceIndex: "0", + Conditions: oldClusterResourcePlacementAvailableConditions, + PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{ + { + ClusterName: "member-1", + Conditions: oldResourcePlacementAvailableConditions, + }, + }, + }, + wantStatus: &fleetv1beta1.PlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", Conditions: []metav1.Condition{ @@ -4503,165 +4777,69 @@ func TestSetPlacementStatus(t *testing.T) { Status: metav1.ConditionTrue, Type: string(fleetv1beta1.ClusterResourcePlacementOverriddenConditionType), Reason: condition.OverrideNotSpecifiedReason, - ObservedGeneration: crpGeneration - 1, + ObservedGeneration: crpGeneration, LastTransitionTime: metav1.NewTime(currentTime), }, { Status: metav1.ConditionTrue, Type: string(fleetv1beta1.ClusterResourcePlacementRolloutStartedConditionType), Reason: condition.RolloutStartedReason, - ObservedGeneration: crpGeneration - 1, + ObservedGeneration: crpGeneration, LastTransitionTime: metav1.NewTime(currentTime), }, { Status: metav1.ConditionTrue, Type: string(fleetv1beta1.ClusterResourcePlacementScheduledConditionType), Reason: "Scheduled", - ObservedGeneration: crpGeneration - 1, + ObservedGeneration: crpGeneration, LastTransitionTime: metav1.NewTime(currentTime), }, { Status: metav1.ConditionTrue, Type: string(fleetv1beta1.ClusterResourcePlacementWorkSynchronizedConditionType), Reason: condition.WorkSynchronizedReason, - ObservedGeneration: crpGeneration - 1, - LastTransitionTime: metav1.NewTime(currentTime), - }, - { - Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ClusterResourcePlacementAppliedConditionType), - Reason: condition.ApplySucceededReason, - ObservedGeneration: crpGeneration - 1, + ObservedGeneration: crpGeneration, LastTransitionTime: metav1.NewTime(currentTime), }, { Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ClusterResourcePlacementAvailableConditionType), - Reason: condition.AvailableReason, - ObservedGeneration: crpGeneration - 1, + Type: string(fleetv1beta1.ClusterResourcePlacementDiffReportedConditionType), + Reason: condition.DiffReportedStatusTrueReason, + ObservedGeneration: crpGeneration, LastTransitionTime: metav1.NewTime(currentTime), }, }, PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{ { - ClusterName: "member-1", + ClusterName: "member-1", + ObservedResourceIndex: "0", Conditions: []metav1.Condition{ { Status: metav1.ConditionTrue, Type: string(fleetv1beta1.ResourceOverriddenConditionType), Reason: condition.OverrideNotSpecifiedReason, - ObservedGeneration: crpGeneration - 1, + ObservedGeneration: crpGeneration, LastTransitionTime: metav1.NewTime(currentTime), }, { Status: metav1.ConditionTrue, Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), Reason: condition.RolloutStartedReason, - ObservedGeneration: crpGeneration - 1, + ObservedGeneration: crpGeneration, LastTransitionTime: metav1.NewTime(currentTime), }, { Status: metav1.ConditionTrue, Type: string(fleetv1beta1.ResourceScheduledConditionType), Reason: condition.ScheduleSucceededReason, - ObservedGeneration: crpGeneration - 1, + ObservedGeneration: crpGeneration, LastTransitionTime: metav1.NewTime(currentTime), }, { Status: metav1.ConditionTrue, Type: string(fleetv1beta1.ResourceWorkSynchronizedConditionType), Reason: condition.WorkSynchronizedReason, - ObservedGeneration: crpGeneration - 1, - LastTransitionTime: metav1.NewTime(currentTime), - }, - { - Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourcesAppliedConditionType), - Reason: condition.ApplySucceededReason, - ObservedGeneration: crpGeneration - 1, - LastTransitionTime: metav1.NewTime(currentTime), - }, - { - Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourcesAvailableConditionType), - Reason: condition.AvailableReason, - ObservedGeneration: crpGeneration - 1, - LastTransitionTime: metav1.NewTime(currentTime), - }, - }, - }, - }, - }, - wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ - SelectedResources: selectedResources, - ObservedResourceIndex: "0", - Conditions: []metav1.Condition{ - { - Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ClusterResourcePlacementOverriddenConditionType), - Reason: condition.OverrideNotSpecifiedReason, - ObservedGeneration: crpGeneration, - LastTransitionTime: metav1.NewTime(currentTime), - }, - { - Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ClusterResourcePlacementRolloutStartedConditionType), - Reason: condition.RolloutStartedReason, - ObservedGeneration: crpGeneration, - LastTransitionTime: metav1.NewTime(currentTime), - }, - { - Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ClusterResourcePlacementScheduledConditionType), - Reason: "Scheduled", - ObservedGeneration: crpGeneration, - LastTransitionTime: metav1.NewTime(currentTime), - }, - { - Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ClusterResourcePlacementWorkSynchronizedConditionType), - Reason: condition.WorkSynchronizedReason, - ObservedGeneration: crpGeneration, - LastTransitionTime: metav1.NewTime(currentTime), - }, - { - Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ClusterResourcePlacementDiffReportedConditionType), - Reason: condition.DiffReportedStatusTrueReason, - ObservedGeneration: crpGeneration, - LastTransitionTime: metav1.NewTime(currentTime), - }, - }, - PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{ - { - ClusterName: "member-1", - Conditions: []metav1.Condition{ - { - Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourceOverriddenConditionType), - Reason: condition.OverrideNotSpecifiedReason, - ObservedGeneration: crpGeneration, - LastTransitionTime: metav1.NewTime(currentTime), - }, - { - Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), - Reason: condition.RolloutStartedReason, - ObservedGeneration: crpGeneration, - LastTransitionTime: metav1.NewTime(currentTime), - }, - { - Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourceScheduledConditionType), - Reason: condition.ScheduleSucceededReason, - ObservedGeneration: crpGeneration, - LastTransitionTime: metav1.NewTime(currentTime), - }, - { - Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourceWorkSynchronizedConditionType), - Reason: condition.WorkSynchronizedReason, - ObservedGeneration: crpGeneration, + ObservedGeneration: crpGeneration, LastTransitionTime: metav1.NewTime(currentTime), }, { @@ -4752,43 +4930,12 @@ func TestSetPlacementStatus(t *testing.T) { }, }, Status: fleetv1beta1.ResourceBindingStatus{ - Conditions: []metav1.Condition{ - { - Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourceBindingRolloutStarted), - Reason: condition.RolloutStartedReason, - ObservedGeneration: 1, - }, - { - Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourceBindingOverridden), - Reason: condition.OverrideNotSpecifiedReason, - ObservedGeneration: 1, - }, - { - Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourceBindingWorkSynchronized), - Reason: condition.WorkSynchronizedReason, - ObservedGeneration: 1, - }, - { - Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourceBindingApplied), - Reason: condition.ApplySucceededReason, - ObservedGeneration: 1, - }, - { - Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourceBindingAvailable), - Reason: condition.AvailableReason, - ObservedGeneration: 1, - }, - }, + Conditions: bindingAvailableConditions, }, }, }, want: true, - crpStatus: fleetv1beta1.ClusterResourcePlacementStatus{ + crpStatus: fleetv1beta1.PlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", Conditions: []metav1.Condition{ @@ -4830,7 +4977,8 @@ func TestSetPlacementStatus(t *testing.T) { }, PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{ { - ClusterName: "member-1", + ClusterName: "member-1", + ObservedResourceIndex: "0", Conditions: []metav1.Condition{ { Status: metav1.ConditionTrue, @@ -4871,7 +5019,7 @@ func TestSetPlacementStatus(t *testing.T) { }, }, }, - wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ + wantStatus: &fleetv1beta1.PlacementStatus{ SelectedResources: selectedResources, ObservedResourceIndex: "0", Conditions: []metav1.Condition{ @@ -4920,19 +5068,132 @@ func TestSetPlacementStatus(t *testing.T) { }, PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{ { - ClusterName: "member-1", + ClusterName: "member-1", + ObservedResourceIndex: "0", + Conditions: resourcePlacementAvailableConditions, + }, + }, + }, + }, + { + name: "update the placement has External rollout strategy and all bindings are scheduled and pending rollout", + policy: &fleetv1beta1.PlacementPolicy{ + PlacementType: fleetv1beta1.PickNPlacementType, + NumberOfClusters: ptr.To(int32(1)), + }, + strategy: fleetv1beta1.RolloutStrategy{ + Type: fleetv1beta1.ExternalRolloutStrategyType, + }, + latestPolicySnapshot: &fleetv1beta1.ClusterSchedulingPolicySnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testCRPName, 0), + Labels: map[string]string{ + fleetv1beta1.PolicyIndexLabel: "0", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.CRPTrackingLabel: testCRPName, + }, + Annotations: map[string]string{ + fleetv1beta1.NumberOfClustersAnnotation: strconv.Itoa(1), + }, + Generation: 1, + }, + Status: fleetv1beta1.SchedulingPolicySnapshotStatus{ + ObservedCRPGeneration: crpGeneration, + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.PolicySnapshotScheduled), + Reason: "Scheduled", + Message: "message", + ObservedGeneration: 1, + }, + }, + ClusterDecisions: []fleetv1beta1.ClusterDecision{ + { + ClusterName: "member-1", + Selected: true, + Reason: "success", + }, + }, + }, + }, + latestResourceSnapshot: &fleetv1beta1.ClusterResourceSnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.CRPTrackingLabel: testCRPName, + fleetv1beta1.IsLatestSnapshotLabel: "true", + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "hash", + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", + }, + }, + }, + clusterResourceBindings: []fleetv1beta1.ClusterResourceBinding{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "binding-with-empty-snapshot-name", + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: testCRPName, + }, + Generation: 1, + }, + Spec: fleetv1beta1.ResourceBindingSpec{ + ResourceSnapshotName: "", + SchedulingPolicySnapshotName: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testCRPName, 0), + TargetCluster: "member-1", + ApplyStrategy: &fleetv1beta1.ApplyStrategy{ + Type: fleetv1beta1.ApplyStrategyTypeServerSideApply, + }, + }, + Status: fleetv1beta1.ResourceBindingStatus{ + Conditions: []metav1.Condition{}, + }, + }, + }, + want: true, + crpStatus: fleetv1beta1.PlacementStatus{ + SelectedResources: selectedResources, + ObservedResourceIndex: "0", + Conditions: oldClusterResourcePlacementAvailableConditions, + PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{ + { + ClusterName: "member-1", + ObservedResourceIndex: "0", + Conditions: oldResourcePlacementAvailableConditions, + }, + }, + }, + wantStatus: &fleetv1beta1.PlacementStatus{ + SelectedResources: []fleetv1beta1.ResourceIdentifier{}, + ObservedResourceIndex: "", + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionUnknown, + Type: string(fleetv1beta1.ClusterResourcePlacementRolloutStartedConditionType), + Reason: condition.RolloutControlledByExternalControllerReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementScheduledConditionType), + Reason: condition.ScheduleSucceededReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + }, + PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{ + { + ClusterName: "member-1", + ObservedResourceIndex: "", Conditions: []metav1.Condition{ { - Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourceOverriddenConditionType), - Reason: condition.OverrideNotSpecifiedReason, - ObservedGeneration: crpGeneration, - LastTransitionTime: metav1.NewTime(currentTime), - }, - { - Status: metav1.ConditionTrue, + Status: metav1.ConditionUnknown, Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), - Reason: condition.RolloutStartedReason, + Reason: condition.RolloutStartedUnknownReason, ObservedGeneration: crpGeneration, LastTransitionTime: metav1.NewTime(currentTime), }, @@ -4943,32 +5204,644 @@ func TestSetPlacementStatus(t *testing.T) { ObservedGeneration: crpGeneration, LastTransitionTime: metav1.NewTime(currentTime), }, - { - Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourceWorkSynchronizedConditionType), - Reason: condition.WorkSynchronizedReason, - ObservedGeneration: crpGeneration, - LastTransitionTime: metav1.NewTime(currentTime), - }, - { - Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourcesAppliedConditionType), - Reason: condition.ApplySucceededReason, - ObservedGeneration: crpGeneration, - LastTransitionTime: metav1.NewTime(currentTime), - }, - { - Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourcesAvailableConditionType), - Reason: condition.AvailableReason, - ObservedGeneration: crpGeneration, - LastTransitionTime: metav1.NewTime(currentTime), - }, }, }, }, }, }, + { + // Simulate the scenario where rollout is first happened to a CRP with External rollout strategy. + name: "placement has External rollout strategy and rollout only reaches to some of the clusters", + policy: &fleetv1beta1.PlacementPolicy{ + PlacementType: fleetv1beta1.PickNPlacementType, + NumberOfClusters: ptr.To(int32(2)), + }, + strategy: fleetv1beta1.RolloutStrategy{ + Type: fleetv1beta1.ExternalRolloutStrategyType, + }, + latestPolicySnapshot: &fleetv1beta1.ClusterSchedulingPolicySnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testCRPName, 0), + Labels: map[string]string{ + fleetv1beta1.PolicyIndexLabel: "0", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.CRPTrackingLabel: testCRPName, + }, + Annotations: map[string]string{ + fleetv1beta1.NumberOfClustersAnnotation: strconv.Itoa(1), + }, + Generation: 1, + }, + Status: fleetv1beta1.SchedulingPolicySnapshotStatus{ + ObservedCRPGeneration: crpGeneration, + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.PolicySnapshotScheduled), + Reason: "Scheduled", + Message: "message", + ObservedGeneration: 1, + }, + }, + ClusterDecisions: []fleetv1beta1.ClusterDecision{ + { + ClusterName: "member-1", + Selected: true, + Reason: "success", + }, + { + ClusterName: "member-2", + Selected: true, + Reason: "success", + }, + }, + }, + }, + latestResourceSnapshot: &fleetv1beta1.ClusterResourceSnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.CRPTrackingLabel: testCRPName, + fleetv1beta1.IsLatestSnapshotLabel: "true", + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "hash", + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", + }, + }, + }, + clusterResourceBindings: []fleetv1beta1.ClusterResourceBinding{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "binding-rolled-out", + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: testCRPName, + }, + Generation: 1, + }, + Spec: fleetv1beta1.ResourceBindingSpec{ + ResourceSnapshotName: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), + SchedulingPolicySnapshotName: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testCRPName, 0), + TargetCluster: "member-1", + ApplyStrategy: &fleetv1beta1.ApplyStrategy{ + Type: fleetv1beta1.ApplyStrategyTypeServerSideApply, + }, + }, + Status: fleetv1beta1.ResourceBindingStatus{ + Conditions: bindingAvailableConditions, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "binding-with-empty-snapshot-name", + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: testCRPName, + }, + Generation: 1, + }, + Spec: fleetv1beta1.ResourceBindingSpec{ + ResourceSnapshotName: "", + SchedulingPolicySnapshotName: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testCRPName, 0), + TargetCluster: "member-2", + ApplyStrategy: &fleetv1beta1.ApplyStrategy{ + Type: fleetv1beta1.ApplyStrategyTypeServerSideApply, + }, + }, + Status: fleetv1beta1.ResourceBindingStatus{ + Conditions: []metav1.Condition{}, + }, + }, + }, + want: true, + wantStatus: &fleetv1beta1.PlacementStatus{ + SelectedResources: []fleetv1beta1.ResourceIdentifier{}, + ObservedResourceIndex: "", // Empty as not all bindings have the resource snapshot name. + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionUnknown, + Type: string(fleetv1beta1.ClusterResourcePlacementRolloutStartedConditionType), + Reason: condition.RolloutControlledByExternalControllerReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementScheduledConditionType), + Reason: condition.ScheduleSucceededReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + }, + PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{ + { + ClusterName: "member-1", + ObservedResourceIndex: "0", + Conditions: resourcePlacementAvailableConditions, + }, + { + ClusterName: "member-2", + ObservedResourceIndex: "", + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionUnknown, + Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), + Reason: condition.RolloutStartedUnknownReason, + ObservedGeneration: crpGeneration, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceScheduledConditionType), + Reason: condition.ScheduleSucceededReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + }, + }, + }, + }, + }, + { + // Simulate the scenario where a new version is being rolled out for CRP with External rollout strategy. + name: "placement has External rollout strategy and is still rolling out and cluster observe different resource indices", + policy: &fleetv1beta1.PlacementPolicy{ + PlacementType: fleetv1beta1.PickNPlacementType, + NumberOfClusters: ptr.To(int32(2)), + }, + strategy: fleetv1beta1.RolloutStrategy{ + Type: fleetv1beta1.ExternalRolloutStrategyType, + }, + latestPolicySnapshot: &fleetv1beta1.ClusterSchedulingPolicySnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testCRPName, 0), + Labels: map[string]string{ + fleetv1beta1.PolicyIndexLabel: "0", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.CRPTrackingLabel: testCRPName, + }, + Annotations: map[string]string{ + fleetv1beta1.NumberOfClustersAnnotation: strconv.Itoa(2), + }, + Generation: 1, + }, + Status: fleetv1beta1.SchedulingPolicySnapshotStatus{ + ObservedCRPGeneration: crpGeneration, + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.PolicySnapshotScheduled), + Reason: "Scheduled", + Message: "message", + ObservedGeneration: 1, + }, + }, + ClusterDecisions: []fleetv1beta1.ClusterDecision{ + { + ClusterName: "member-1", + Selected: true, + Reason: "success", + }, + { + ClusterName: "member-2", + Selected: true, + Reason: "success", + }, + }, + }, + }, + latestResourceSnapshot: &fleetv1beta1.ClusterResourceSnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 1), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "1", + fleetv1beta1.CRPTrackingLabel: testCRPName, + fleetv1beta1.IsLatestSnapshotLabel: "true", + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "hash", + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", + }, + }, + }, + otherResourceSnapshots: []*fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.CRPTrackingLabel: testCRPName, + fleetv1beta1.IsLatestSnapshotLabel: "false", + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "hash", + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", + }, + }, + }, + }, + crpStatus: fleetv1beta1.PlacementStatus{ + SelectedResources: selectedResources, + ObservedResourceIndex: "0", + Conditions: oldClusterResourcePlacementAvailableConditions, + PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{ + { + ClusterName: "member-1", + ObservedResourceIndex: "0", + Conditions: oldResourcePlacementAvailableConditions, + }, + { + ClusterName: "member-2", + ObservedResourceIndex: "0", + Conditions: oldResourcePlacementAvailableConditions, + }, + }, + }, + clusterResourceBindings: []fleetv1beta1.ClusterResourceBinding{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "binding-rolled-out-with-latest-snapshot-name", + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: testCRPName, + }, + Generation: 1, + }, + Spec: fleetv1beta1.ResourceBindingSpec{ + ResourceSnapshotName: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 1), + SchedulingPolicySnapshotName: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testCRPName, 0), + TargetCluster: "member-1", + ApplyStrategy: &fleetv1beta1.ApplyStrategy{ + Type: fleetv1beta1.ApplyStrategyTypeServerSideApply, + }, + }, + Status: fleetv1beta1.ResourceBindingStatus{ + Conditions: bindingAvailableConditions, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "binding-with-old-snapshot-name", + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: testCRPName, + }, + Generation: 1, + }, + Spec: fleetv1beta1.ResourceBindingSpec{ + ResourceSnapshotName: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), + SchedulingPolicySnapshotName: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testCRPName, 0), + TargetCluster: "member-2", + ApplyStrategy: &fleetv1beta1.ApplyStrategy{ + Type: fleetv1beta1.ApplyStrategyTypeServerSideApply, + }, + }, + Status: fleetv1beta1.ResourceBindingStatus{ + Conditions: bindingAvailableConditions, + }, + }, + }, + want: true, + wantStatus: &fleetv1beta1.PlacementStatus{ + SelectedResources: []fleetv1beta1.ResourceIdentifier{}, + ObservedResourceIndex: "", // Empty as not all bindings have the same resource snapshot name. + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionUnknown, + Type: string(fleetv1beta1.ClusterResourcePlacementRolloutStartedConditionType), + Reason: condition.RolloutControlledByExternalControllerReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementScheduledConditionType), + Reason: condition.ScheduleSucceededReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + }, + PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{ + { + ClusterName: "member-1", + ObservedResourceIndex: "1", + Conditions: resourcePlacementAvailableConditions, + }, + { + ClusterName: "member-2", + ObservedResourceIndex: "0", + Conditions: resourcePlacementAvailableConditions, + }, + }, + }, + }, + { + // Simulate the scenario where rollout has completed on CRP with External rollout strategy. + name: "placement has External rollout strategy and all clusters are rolled out to the latest resource snapshot", + policy: &fleetv1beta1.PlacementPolicy{ + PlacementType: fleetv1beta1.PickNPlacementType, + NumberOfClusters: ptr.To(int32(2)), + }, + strategy: fleetv1beta1.RolloutStrategy{ + Type: fleetv1beta1.ExternalRolloutStrategyType, + }, + latestPolicySnapshot: &fleetv1beta1.ClusterSchedulingPolicySnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testCRPName, 0), + Labels: map[string]string{ + fleetv1beta1.PolicyIndexLabel: "0", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.CRPTrackingLabel: testCRPName, + }, + Annotations: map[string]string{ + fleetv1beta1.NumberOfClustersAnnotation: strconv.Itoa(2), + }, + Generation: 1, + }, + Status: fleetv1beta1.SchedulingPolicySnapshotStatus{ + ObservedCRPGeneration: crpGeneration, + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.PolicySnapshotScheduled), + Reason: "Scheduled", + Message: "message", + ObservedGeneration: 1, + }, + }, + ClusterDecisions: []fleetv1beta1.ClusterDecision{ + { + ClusterName: "member-1", + Selected: true, + Reason: "success", + }, + { + ClusterName: "member-2", + Selected: true, + Reason: "success", + }, + }, + }, + }, + latestResourceSnapshot: &fleetv1beta1.ClusterResourceSnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 1), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "1", + fleetv1beta1.CRPTrackingLabel: testCRPName, + fleetv1beta1.IsLatestSnapshotLabel: "true", + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "hash", + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", + }, + }, + }, + crpStatus: fleetv1beta1.PlacementStatus{ + SelectedResources: selectedResources, + ObservedResourceIndex: "0", + Conditions: oldClusterResourcePlacementAvailableConditions, + PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{ + { + ClusterName: "member-1", + ObservedResourceIndex: "0", + Conditions: oldResourcePlacementAvailableConditions, + }, + { + ClusterName: "member-2", + ObservedResourceIndex: "0", + Conditions: oldResourcePlacementAvailableConditions, + }, + }, + }, + clusterResourceBindings: []fleetv1beta1.ClusterResourceBinding{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "binding-rolled-out-with-latest-snapshot-name", + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: testCRPName, + }, + Generation: 1, + }, + Spec: fleetv1beta1.ResourceBindingSpec{ + ResourceSnapshotName: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 1), + SchedulingPolicySnapshotName: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testCRPName, 0), + TargetCluster: "member-1", + ApplyStrategy: &fleetv1beta1.ApplyStrategy{ + Type: fleetv1beta1.ApplyStrategyTypeServerSideApply, + }, + }, + Status: fleetv1beta1.ResourceBindingStatus{ + Conditions: bindingAvailableConditions, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "binding-rolled-out-with-latest-snapshot-name-too", + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: testCRPName, + }, + Generation: 1, + }, + Spec: fleetv1beta1.ResourceBindingSpec{ + ResourceSnapshotName: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 1), + SchedulingPolicySnapshotName: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testCRPName, 0), + TargetCluster: "member-2", + ApplyStrategy: &fleetv1beta1.ApplyStrategy{ + Type: fleetv1beta1.ApplyStrategyTypeServerSideApply, + }, + }, + Status: fleetv1beta1.ResourceBindingStatus{ + Conditions: bindingAvailableConditions, + }, + }, + }, + want: true, + wantStatus: &fleetv1beta1.PlacementStatus{ + SelectedResources: selectedResources, + ObservedResourceIndex: "1", + Conditions: clusterResourcePlacementAvailableConditions, + PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{ + { + ClusterName: "member-1", + ObservedResourceIndex: "1", + Conditions: resourcePlacementAvailableConditions, + }, + { + ClusterName: "member-2", + ObservedResourceIndex: "1", + Conditions: resourcePlacementAvailableConditions, + }, + }, + }, + }, + { + // Simulate the scenario where rollback to an older version has completed on CRP with External rollout strategy. + name: "placement has External rollout strategy and all clusters are rolled out to a not-latest resource snapshot", + policy: &fleetv1beta1.PlacementPolicy{ + PlacementType: fleetv1beta1.PickNPlacementType, + NumberOfClusters: ptr.To(int32(2)), + }, + strategy: fleetv1beta1.RolloutStrategy{ + Type: fleetv1beta1.ExternalRolloutStrategyType, + }, + latestPolicySnapshot: &fleetv1beta1.ClusterSchedulingPolicySnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testCRPName, 0), + Labels: map[string]string{ + fleetv1beta1.PolicyIndexLabel: "0", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.CRPTrackingLabel: testCRPName, + }, + Annotations: map[string]string{ + fleetv1beta1.NumberOfClustersAnnotation: strconv.Itoa(2), + }, + Generation: 1, + }, + Status: fleetv1beta1.SchedulingPolicySnapshotStatus{ + ObservedCRPGeneration: crpGeneration, + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.PolicySnapshotScheduled), + Reason: "Scheduled", + Message: "message", + ObservedGeneration: 1, + }, + }, + ClusterDecisions: []fleetv1beta1.ClusterDecision{ + { + ClusterName: "member-1", + Selected: true, + Reason: "success", + }, + { + ClusterName: "member-2", + Selected: true, + Reason: "success", + }, + }, + }, + }, + latestResourceSnapshot: &fleetv1beta1.ClusterResourceSnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 1), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "1", + fleetv1beta1.CRPTrackingLabel: testCRPName, + fleetv1beta1.IsLatestSnapshotLabel: "true", + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "hash", + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", + }, + }, + }, + otherResourceSnapshots: []*fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.CRPTrackingLabel: testCRPName, + fleetv1beta1.IsLatestSnapshotLabel: "true", + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "hash", + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{ + SelectedResources: []fleetv1beta1.ResourceContent{ + *resource.NamespaceResourceContentForTest(t), + }, + }, + }, + }, + crpStatus: fleetv1beta1.PlacementStatus{ + SelectedResources: selectedResources, + ObservedResourceIndex: "1", + Conditions: oldClusterResourcePlacementAvailableConditions, + PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{ + { + ClusterName: "member-1", + ObservedResourceIndex: "1", + Conditions: oldResourcePlacementAvailableConditions, + }, + { + ClusterName: "member-2", + ObservedResourceIndex: "1", + Conditions: oldResourcePlacementAvailableConditions, + }, + }, + }, + clusterResourceBindings: []fleetv1beta1.ClusterResourceBinding{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "binding-rolled-out-with-old-snapshot-name", + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: testCRPName, + }, + Generation: 1, + }, + Spec: fleetv1beta1.ResourceBindingSpec{ + ResourceSnapshotName: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), + SchedulingPolicySnapshotName: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testCRPName, 0), + TargetCluster: "member-1", + ApplyStrategy: &fleetv1beta1.ApplyStrategy{ + Type: fleetv1beta1.ApplyStrategyTypeServerSideApply, + }, + }, + Status: fleetv1beta1.ResourceBindingStatus{ + Conditions: bindingAvailableConditions, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "binding-rolled-out-with-old-snapshot-name-too", + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: testCRPName, + }, + Generation: 1, + }, + Spec: fleetv1beta1.ResourceBindingSpec{ + ResourceSnapshotName: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), + SchedulingPolicySnapshotName: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testCRPName, 0), + TargetCluster: "member-2", + ApplyStrategy: &fleetv1beta1.ApplyStrategy{ + Type: fleetv1beta1.ApplyStrategyTypeServerSideApply, + }, + }, + Status: fleetv1beta1.ResourceBindingStatus{ + Conditions: bindingAvailableConditions, + }, + }, + }, + want: true, + wantStatus: &fleetv1beta1.PlacementStatus{ + SelectedResources: []fleetv1beta1.ResourceIdentifier{ + // Only show resources on the old snapshot. + { + Group: "", + Version: "v1", + Kind: "Namespace", + Namespace: "", + Name: "namespace-name", + }, + }, + ObservedResourceIndex: "0", + Conditions: clusterResourcePlacementAvailableConditions, + PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{ + { + ClusterName: "member-1", + ObservedResourceIndex: "0", + Conditions: resourcePlacementAvailableConditions, + }, + { + ClusterName: "member-2", + ObservedResourceIndex: "0", + Conditions: resourcePlacementAvailableConditions, + }, + }, + }, + }, } for _, tc := range tests { @@ -4977,7 +5850,7 @@ func TestSetPlacementStatus(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: testCRPName, }, - Spec: fleetv1beta1.ClusterResourcePlacementSpec{ + Spec: fleetv1beta1.PlacementSpec{ ResourceSelectors: []fleetv1beta1.ClusterResourceSelector{ { Group: corev1.GroupName, @@ -4998,6 +5871,10 @@ func TestSetPlacementStatus(t *testing.T) { for i := range tc.clusterResourceBindings { objects = append(objects, &tc.clusterResourceBindings[i]) } + for _, snapshot := range tc.otherResourceSnapshots { + objects = append(objects, snapshot) + } + objects = append(objects, tc.latestResourceSnapshot) fakeClient := fake.NewClientBuilder(). WithScheme(scheme). WithObjects(objects...). @@ -5102,7 +5979,7 @@ func TestBuildResourcePlacementStatusMap(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { crp := fleetv1beta1.ClusterResourcePlacement{ - Status: fleetv1beta1.ClusterResourcePlacementStatus{ + Status: fleetv1beta1.PlacementStatus{ PlacementStatuses: tc.status, }, } @@ -5289,6 +6166,8 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { crpWithReportDiffApplyStrategy.Spec.Strategy.ApplyStrategy = &fleetv1beta1.ApplyStrategy{ Type: fleetv1beta1.ApplyStrategyTypeReportDiff, } + crpWithExternalRolloutStrategy := crp.DeepCopy() + crpWithExternalRolloutStrategy.Spec.Strategy.Type = fleetv1beta1.ExternalRolloutStrategyType tests := []struct { name string @@ -5306,7 +6185,8 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { condition.RolloutStartedCondition: metav1.ConditionUnknown, }, wantResourcePlacementStatus: fleetv1beta1.ResourcePlacementStatus{ - ClusterName: cluster, + ClusterName: cluster, + ObservedResourceIndex: "", // Empty as binding not found. Conditions: []metav1.Condition{ { Status: metav1.ConditionUnknown, @@ -5343,7 +6223,8 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { condition.RolloutStartedCondition: metav1.ConditionFalse, }, wantResourcePlacementStatus: fleetv1beta1.ResourcePlacementStatus{ - ClusterName: cluster, + ClusterName: cluster, + ObservedResourceIndex: "1", Conditions: []metav1.Condition{ { Status: metav1.ConditionFalse, @@ -5379,7 +6260,8 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { condition.RolloutStartedCondition: metav1.ConditionUnknown, }, wantResourcePlacementStatus: fleetv1beta1.ResourcePlacementStatus{ - ClusterName: cluster, + ClusterName: cluster, + ObservedResourceIndex: "1", Conditions: []metav1.Condition{ { Status: metav1.ConditionUnknown, @@ -5457,6 +6339,7 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { }, wantResourcePlacementStatus: fleetv1beta1.ResourcePlacementStatus{ ClusterName: cluster, + ObservedResourceIndex: "1", ApplicableClusterResourceOverrides: []string{"o-1", "o-2"}, ApplicableResourceOverrides: []fleetv1beta1.NamespacedName{ { @@ -5527,7 +6410,8 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { condition.RolloutStartedCondition: metav1.ConditionUnknown, }, wantResourcePlacementStatus: fleetv1beta1.ResourcePlacementStatus{ - ClusterName: cluster, + ClusterName: cluster, + ObservedResourceIndex: "1", Conditions: []metav1.Condition{ { Status: metav1.ConditionUnknown, @@ -5583,6 +6467,7 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { }, wantResourcePlacementStatus: fleetv1beta1.ResourcePlacementStatus{ ClusterName: cluster, + ObservedResourceIndex: "1", ApplicableClusterResourceOverrides: []string{"o-1", "o-2"}, ApplicableResourceOverrides: []fleetv1beta1.NamespacedName{ { @@ -5662,6 +6547,7 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { }, wantResourcePlacementStatus: fleetv1beta1.ResourcePlacementStatus{ ClusterName: cluster, + ObservedResourceIndex: "1", ApplicableClusterResourceOverrides: []string{"o-1", "o-2"}, ApplicableResourceOverrides: []fleetv1beta1.NamespacedName{ { @@ -5770,6 +6656,7 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { }, wantResourcePlacementStatus: fleetv1beta1.ResourcePlacementStatus{ ClusterName: cluster, + ObservedResourceIndex: "1", ApplicableClusterResourceOverrides: []string{"o-1", "o-2"}, ApplicableResourceOverrides: []fleetv1beta1.NamespacedName{ { @@ -5906,6 +6793,7 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { }, wantResourcePlacementStatus: fleetv1beta1.ResourcePlacementStatus{ ClusterName: cluster, + ObservedResourceIndex: "1", ApplicableClusterResourceOverrides: []string{"o-1", "o-2"}, ApplicableResourceOverrides: []fleetv1beta1.NamespacedName{ { @@ -6081,6 +6969,7 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { }, wantResourcePlacementStatus: fleetv1beta1.ResourcePlacementStatus{ ClusterName: cluster, + ObservedResourceIndex: "1", ApplicableResourceOverrides: []fleetv1beta1.NamespacedName{}, ApplicableClusterResourceOverrides: []string{}, FailedPlacements: []fleetv1beta1.FailedResourcePlacement{ @@ -6249,6 +7138,7 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { }, wantResourcePlacementStatus: fleetv1beta1.ResourcePlacementStatus{ ClusterName: cluster, + ObservedResourceIndex: "1", ApplicableResourceOverrides: []fleetv1beta1.NamespacedName{}, ApplicableClusterResourceOverrides: []string{}, DriftedPlacements: []fleetv1beta1.DriftedResourcePlacement{ @@ -6389,6 +7279,7 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { }, wantResourcePlacementStatus: fleetv1beta1.ResourcePlacementStatus{ ClusterName: cluster, + ObservedResourceIndex: "1", ApplicableResourceOverrides: []fleetv1beta1.NamespacedName{}, ApplicableClusterResourceOverrides: []string{}, DiffedPlacements: []fleetv1beta1.DiffedResourcePlacement{ @@ -6496,98 +7387,294 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { }, { Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourceBindingWorkSynchronized), - Reason: condition.WorkSynchronizedReason, - ObservedGeneration: 2, - }, - { - Status: metav1.ConditionFalse, - Type: string(fleetv1beta1.ResourceBindingApplied), - Reason: condition.ApplyFailedReason, + Type: string(fleetv1beta1.ResourceBindingWorkSynchronized), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: 2, + }, + { + Status: metav1.ConditionFalse, + Type: string(fleetv1beta1.ResourceBindingApplied), + Reason: condition.ApplyFailedReason, + ObservedGeneration: 1, + }, + }, + }, + }, + wantConditionStatusMap: map[condition.ResourceCondition]metav1.ConditionStatus{ + condition.RolloutStartedCondition: metav1.ConditionTrue, + condition.OverriddenCondition: metav1.ConditionTrue, + condition.WorkSynchronizedCondition: metav1.ConditionTrue, + condition.DiffReportedCondition: metav1.ConditionUnknown, + }, + wantResourcePlacementStatus: fleetv1beta1.ResourcePlacementStatus{ + ClusterName: cluster, + ObservedResourceIndex: "1", + ApplicableResourceOverrides: []fleetv1beta1.NamespacedName{}, + ApplicableClusterResourceOverrides: []string{}, + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingRolloutStarted), + Reason: condition.RolloutStartedReason, + ObservedGeneration: crpGeneration, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingOverridden), + Reason: condition.OverriddenSucceededReason, + ObservedGeneration: crpGeneration, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingWorkSynchronized), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: crpGeneration, + }, + { + Status: metav1.ConditionUnknown, + Type: string(fleetv1beta1.ResourceBindingDiffReported), + Reason: condition.DiffReportedStatusUnknownReason, + ObservedGeneration: crpGeneration, + }, + }, + }, + expectedCondTypes: condition.CondTypesForReportDiffApplyStrategy, + }, + { + name: "ReportDiff apply strategy (failed to report diff)", + crp: crpWithReportDiffApplyStrategy.DeepCopy(), + binding: &fleetv1beta1.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: bindingName, + Generation: 1, + }, + Spec: fleetv1beta1.ResourceBindingSpec{ + ResourceSnapshotName: resourceSnapshotName, + ResourceOverrideSnapshots: []fleetv1beta1.NamespacedName{}, + ClusterResourceOverrideSnapshots: []string{}, + SchedulingPolicySnapshotName: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testCRPName, 0), + TargetCluster: cluster, + ApplyStrategy: &fleetv1beta1.ApplyStrategy{ + Type: fleetv1beta1.ApplyStrategyTypeReportDiff, + }, + }, + Status: fleetv1beta1.ResourceBindingStatus{ + DiffedPlacements: []fleetv1beta1.DiffedResourcePlacement{ + { + ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "cm-1", + Namespace: "ns-1", + }, + ObservationTime: metav1.Time{Time: time.Now()}, + TargetClusterObservedGeneration: ptr.To(int64(1)), + FirstDiffedObservedTime: metav1.Time{Time: time.Now()}, + ObservedDiffs: []fleetv1beta1.PatchDetail{ + { + Path: "/data", + ValueInMember: "k=1", + ValueInHub: "k=2", + }, + }, + }, + }, + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingRolloutStarted), + Reason: condition.RolloutStartedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingOverridden), + Reason: condition.OverriddenSucceededReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingWorkSynchronized), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionFalse, + Type: string(fleetv1beta1.ResourceBindingDiffReported), + Reason: condition.DiffReportedStatusFalseReason, + ObservedGeneration: 1, + }, + }, + }, + }, + wantConditionStatusMap: map[condition.ResourceCondition]metav1.ConditionStatus{ + condition.RolloutStartedCondition: metav1.ConditionTrue, + condition.OverriddenCondition: metav1.ConditionTrue, + condition.WorkSynchronizedCondition: metav1.ConditionTrue, + condition.DiffReportedCondition: metav1.ConditionFalse, + }, + wantResourcePlacementStatus: fleetv1beta1.ResourcePlacementStatus{ + ClusterName: cluster, + ObservedResourceIndex: "1", + ApplicableResourceOverrides: []fleetv1beta1.NamespacedName{}, + ApplicableClusterResourceOverrides: []string{}, + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingRolloutStarted), + Reason: condition.RolloutStartedReason, + ObservedGeneration: crpGeneration, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingOverridden), + Reason: condition.OverriddenSucceededReason, + ObservedGeneration: crpGeneration, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingWorkSynchronized), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: crpGeneration, + }, + { + Status: metav1.ConditionFalse, + Type: string(fleetv1beta1.ResourceBindingDiffReported), + Reason: condition.DiffReportedStatusFalseReason, + ObservedGeneration: crpGeneration, + }, + }, + }, + expectedCondTypes: condition.CondTypesForReportDiffApplyStrategy, + }, + { + name: "crp with External rollout strategy and binding not found", + crp: crpWithExternalRolloutStrategy.DeepCopy(), + binding: nil, + wantConditionStatusMap: map[condition.ResourceCondition]metav1.ConditionStatus{ + condition.RolloutStartedCondition: metav1.ConditionUnknown, + }, + wantResourcePlacementStatus: fleetv1beta1.ResourcePlacementStatus{ + ClusterName: cluster, + ObservedResourceIndex: "", // Empty as binding not found. + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionUnknown, + Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), + Reason: condition.RolloutStartedUnknownReason, + ObservedGeneration: crpGeneration, + }, + }, + }, + expectedCondTypes: condition.CondTypesForClientSideServerSideApplyStrategies, + }, + { + name: "crp with External rollout strategy and stale binding with false rollout started condition", + crp: crpWithExternalRolloutStrategy.DeepCopy(), + binding: &fleetv1beta1.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + }, + Spec: fleetv1beta1.ResourceBindingSpec{ + ResourceSnapshotName: "not-latest", + }, + Status: fleetv1beta1.ResourceBindingStatus{ + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionFalse, + Type: string(fleetv1beta1.ResourceBindingRolloutStarted), + Reason: condition.RolloutNotStartedYetReason, + ObservedGeneration: 1, + }, + }, + }, + }, + wantConditionStatusMap: map[condition.ResourceCondition]metav1.ConditionStatus{ + condition.RolloutStartedCondition: metav1.ConditionFalse, + }, + wantResourcePlacementStatus: fleetv1beta1.ResourcePlacementStatus{ + ClusterName: cluster, + ObservedResourceIndex: "0", // Depends on the resourceSnapshotIndexOnBinding passed in. + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionFalse, + Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), + Reason: condition.RolloutNotStartedYetReason, + ObservedGeneration: crpGeneration, + }, + }, + }, + expectedCondTypes: condition.CondTypesForClientSideServerSideApplyStrategies, + }, + { + name: "crp with External rollout strategy and stale binding with true rollout started condition", + crp: crpWithExternalRolloutStrategy.DeepCopy(), + binding: &fleetv1beta1.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + }, + Spec: fleetv1beta1.ResourceBindingSpec{ + ResourceSnapshotName: "not-latest", + }, + Status: fleetv1beta1.ResourceBindingStatus{ + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingRolloutStarted), ObservedGeneration: 1, + Reason: condition.RolloutStartedReason, }, }, }, }, wantConditionStatusMap: map[condition.ResourceCondition]metav1.ConditionStatus{ - condition.RolloutStartedCondition: metav1.ConditionTrue, - condition.OverriddenCondition: metav1.ConditionTrue, - condition.WorkSynchronizedCondition: metav1.ConditionTrue, - condition.DiffReportedCondition: metav1.ConditionUnknown, + condition.RolloutStartedCondition: metav1.ConditionTrue, + condition.OverriddenCondition: metav1.ConditionUnknown, }, wantResourcePlacementStatus: fleetv1beta1.ResourcePlacementStatus{ - ClusterName: cluster, - ApplicableResourceOverrides: []fleetv1beta1.NamespacedName{}, - ApplicableClusterResourceOverrides: []string{}, + ClusterName: cluster, + ObservedResourceIndex: "0", // Depends on the resourceSnapshotIndexOnBinding passed in. Conditions: []metav1.Condition{ { Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourceBindingRolloutStarted), + Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), Reason: condition.RolloutStartedReason, ObservedGeneration: crpGeneration, }, - { - Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourceBindingOverridden), - Reason: condition.OverriddenSucceededReason, - ObservedGeneration: crpGeneration, - }, - { - Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourceBindingWorkSynchronized), - Reason: condition.WorkSynchronizedReason, - ObservedGeneration: crpGeneration, - }, { Status: metav1.ConditionUnknown, - Type: string(fleetv1beta1.ResourceBindingDiffReported), - Reason: condition.DiffReportedStatusUnknownReason, + Type: string(fleetv1beta1.ResourceOverriddenConditionType), + Reason: condition.OverriddenPendingReason, ObservedGeneration: crpGeneration, }, }, }, - expectedCondTypes: condition.CondTypesForReportDiffApplyStrategy, + expectedCondTypes: condition.CondTypesForClientSideServerSideApplyStrategies, }, { - name: "ReportDiff apply strategy (failed to report diff)", - crp: crpWithReportDiffApplyStrategy.DeepCopy(), + name: "crp with External rollout strategy and completed binding", + crp: crpWithExternalRolloutStrategy.DeepCopy(), binding: &fleetv1beta1.ClusterResourceBinding{ ObjectMeta: metav1.ObjectMeta{ - Name: bindingName, Generation: 1, }, Spec: fleetv1beta1.ResourceBindingSpec{ - ResourceSnapshotName: resourceSnapshotName, - ResourceOverrideSnapshots: []fleetv1beta1.NamespacedName{}, - ClusterResourceOverrideSnapshots: []string{}, + ResourceSnapshotName: resourceSnapshotName, + ResourceOverrideSnapshots: []fleetv1beta1.NamespacedName{ + { + Name: "override-1", + Namespace: "override-ns", + }, + { + Name: "override-2", + }, + }, + ClusterResourceOverrideSnapshots: []string{"o-1", "o-2"}, SchedulingPolicySnapshotName: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testCRPName, 0), TargetCluster: cluster, - ApplyStrategy: &fleetv1beta1.ApplyStrategy{ - Type: fleetv1beta1.ApplyStrategyTypeReportDiff, - }, }, Status: fleetv1beta1.ResourceBindingStatus{ - DiffedPlacements: []fleetv1beta1.DiffedResourcePlacement{ - { - ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ - Group: "", - Version: "v1", - Kind: "ConfigMap", - Name: "cm-1", - Namespace: "ns-1", - }, - ObservationTime: metav1.Time{Time: time.Now()}, - TargetClusterObservedGeneration: ptr.To(int64(1)), - FirstDiffedObservedTime: metav1.Time{Time: time.Now()}, - ObservedDiffs: []fleetv1beta1.PatchDetail{ - { - Path: "/data", - ValueInMember: "k=1", - ValueInHub: "k=2", - }, - }, - }, - }, Conditions: []metav1.Condition{ { Status: metav1.ConditionTrue, @@ -6608,9 +7695,15 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { ObservedGeneration: 1, }, { - Status: metav1.ConditionFalse, - Type: string(fleetv1beta1.ResourceBindingDiffReported), - Reason: condition.DiffReportedStatusFalseReason, + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingApplied), + Reason: condition.ApplySucceededReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingAvailable), + Reason: condition.AvailableReason, ObservedGeneration: 1, }, }, @@ -6620,40 +7713,56 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { condition.RolloutStartedCondition: metav1.ConditionTrue, condition.OverriddenCondition: metav1.ConditionTrue, condition.WorkSynchronizedCondition: metav1.ConditionTrue, - condition.DiffReportedCondition: metav1.ConditionFalse, + condition.AppliedCondition: metav1.ConditionTrue, + condition.AvailableCondition: metav1.ConditionTrue, }, wantResourcePlacementStatus: fleetv1beta1.ResourcePlacementStatus{ ClusterName: cluster, - ApplicableResourceOverrides: []fleetv1beta1.NamespacedName{}, - ApplicableClusterResourceOverrides: []string{}, + ObservedResourceIndex: "0", // Depends on the resourceSnapshotIndexOnBinding passed in. + ApplicableClusterResourceOverrides: []string{"o-1", "o-2"}, + ApplicableResourceOverrides: []fleetv1beta1.NamespacedName{ + { + Name: "override-1", + Namespace: "override-ns", + }, + { + Name: "override-2", + }, + }, Conditions: []metav1.Condition{ { Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourceBindingRolloutStarted), - Reason: condition.RolloutStartedReason, + Type: string(fleetv1beta1.ResourcesAppliedConditionType), + Reason: condition.ApplySucceededReason, ObservedGeneration: crpGeneration, }, { Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourceBindingOverridden), + Type: string(fleetv1beta1.ResourcesAvailableConditionType), + Reason: condition.AvailableReason, + ObservedGeneration: crpGeneration, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceOverriddenConditionType), Reason: condition.OverriddenSucceededReason, ObservedGeneration: crpGeneration, }, { Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourceBindingWorkSynchronized), - Reason: condition.WorkSynchronizedReason, + Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), + Reason: condition.RolloutStartedReason, ObservedGeneration: crpGeneration, }, { - Status: metav1.ConditionFalse, - Type: string(fleetv1beta1.ResourceBindingDiffReported), - Reason: condition.DiffReportedStatusFalseReason, + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceWorkSynchronizedConditionType), + Reason: condition.WorkSynchronizedReason, ObservedGeneration: crpGeneration, }, }, }, - expectedCondTypes: condition.CondTypesForReportDiffApplyStrategy, + expectedCondTypes: condition.CondTypesForClientSideServerSideApplyStrategies, }, } for _, tc := range tests { @@ -6661,13 +7770,16 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { resourceSnapshot := &fleetv1beta1.ClusterResourceSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: resourceSnapshotName, + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "1", + }, }, } r := Reconciler{ Recorder: record.NewFakeRecorder(10), } status := fleetv1beta1.ResourcePlacementStatus{ClusterName: cluster} - got := r.setResourcePlacementStatusPerCluster(tc.crp, resourceSnapshot, tc.binding, &status, tc.expectedCondTypes) + got := r.setResourcePlacementStatusPerCluster(tc.crp, resourceSnapshot, "0", tc.binding, &status, tc.expectedCondTypes) if diff := cmp.Diff(got, tc.wantConditionStatusMap); diff != "" { t.Errorf("setResourcePlacementStatusPerCluster() conditionStatus mismatch (-got, +want):\n%s", diff) } @@ -6677,3 +7789,178 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { }) } } + +func TestFindClusterResourceSnapshotIndexForBindings(t *testing.T) { + crp := fleetv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: testCRPName, + }, + } + tests := []struct { + name string + bindingMap map[string]*fleetv1beta1.ClusterResourceBinding + resourceSnapshots []*fleetv1beta1.ClusterResourceSnapshot + wantResourceSnapshotIndexMap map[string]string + }{ + { + name: "empty binding map", + bindingMap: map[string]*fleetv1beta1.ClusterResourceBinding{}, + resourceSnapshots: []*fleetv1beta1.ClusterResourceSnapshot{}, + wantResourceSnapshotIndexMap: map[string]string{}, + }, + { + name: "binding with empty resource snapshot name", + bindingMap: map[string]*fleetv1beta1.ClusterResourceBinding{ + "member-1": { + ObjectMeta: metav1.ObjectMeta{ + Name: "binding-1", + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: testCRPName, + }, + }, + Spec: fleetv1beta1.ResourceBindingSpec{ + ResourceSnapshotName: "", + }, + }, + }, + resourceSnapshots: []*fleetv1beta1.ClusterResourceSnapshot{}, + wantResourceSnapshotIndexMap: map[string]string{"member-1": ""}, + }, + { + name: "binding with not found resource snapshot", + bindingMap: map[string]*fleetv1beta1.ClusterResourceBinding{ + "member-1": { + ObjectMeta: metav1.ObjectMeta{ + Name: "binding-1", + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: testCRPName, + }, + }, + Spec: fleetv1beta1.ResourceBindingSpec{ + ResourceSnapshotName: "not-found", + }, + }, + }, + resourceSnapshots: []*fleetv1beta1.ClusterResourceSnapshot{}, + wantResourceSnapshotIndexMap: map[string]string{"member-1": ""}, + }, + { + name: "single binding with found resource snapshot", + bindingMap: map[string]*fleetv1beta1.ClusterResourceBinding{ + "member-1": { + ObjectMeta: metav1.ObjectMeta{ + Name: "binding-1", + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: testCRPName, + }, + }, + Spec: fleetv1beta1.ResourceBindingSpec{ + ResourceSnapshotName: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), + }, + }, + }, + resourceSnapshots: []*fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.CRPTrackingLabel: testCRPName, + }, + }, + }, + }, + wantResourceSnapshotIndexMap: map[string]string{"member-1": "0"}, + }, + { + name: "multiple bindings with both found and not found resource snapshots", + bindingMap: map[string]*fleetv1beta1.ClusterResourceBinding{ + "member-1": { + ObjectMeta: metav1.ObjectMeta{ + Name: "binding-1", + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: testCRPName, + }, + }, + Spec: fleetv1beta1.ResourceBindingSpec{ + ResourceSnapshotName: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), + }, + }, + "member-2": { + ObjectMeta: metav1.ObjectMeta{ + Name: "binding-2", + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: testCRPName, + }, + }, + Spec: fleetv1beta1.ResourceBindingSpec{ + ResourceSnapshotName: "not-found", + }, + }, + "member-3": { + ObjectMeta: metav1.ObjectMeta{ + Name: "binding-3", + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: testCRPName, + }, + }, + Spec: fleetv1beta1.ResourceBindingSpec{ + ResourceSnapshotName: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 1), + }, + }, + }, + resourceSnapshots: []*fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.CRPTrackingLabel: testCRPName, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 1), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "1", + fleetv1beta1.CRPTrackingLabel: testCRPName, + }, + }, + }, + }, + wantResourceSnapshotIndexMap: map[string]string{ + "member-1": "0", + "member-2": "", + "member-3": "1", + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + scheme := serviceScheme(t) + var objects []client.Object + for _, resourceSnapshot := range tc.resourceSnapshots { + objects = append(objects, resourceSnapshot) + } + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(objects...). + Build() + r := Reconciler{ + Client: fakeClient, + } + got, err := r.findClusterResourceSnapshotIndexForBindings(ctx, &crp, tc.bindingMap) + if err != nil { + t.Fatalf("findClusterResourceSnapshotIndexForBindings() got err %v, want nil", err) + } + cmpOptions := cmp.Options{ + cmpopts.SortMaps(func(a, b string) bool { return a < b }), + } + if diff := cmp.Diff(tc.wantResourceSnapshotIndexMap, got, cmpOptions...); diff != "" { + t.Errorf("findClusterResourceSnapshotIndexForBindings() returned resource snapshot index map mismatch (-want, +got):\n%s", diff) + } + }) + } +} diff --git a/pkg/controllers/clusterresourceplacementeviction/controller_intergration_test.go b/pkg/controllers/clusterresourceplacementeviction/controller_intergration_test.go index 419a56b48..a1369d88d 100644 --- a/pkg/controllers/clusterresourceplacementeviction/controller_intergration_test.go +++ b/pkg/controllers/clusterresourceplacementeviction/controller_intergration_test.go @@ -491,7 +491,7 @@ var _ = Describe("Test ClusterResourcePlacementEviction Controller", func() { ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickFixedPlacementType, ClusterNames: []string{"test-cluster-1"}, @@ -537,7 +537,7 @@ func buildTestPickNCRP(crpName string, clusterCount int32) placementv1beta1.Clus ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(clusterCount), diff --git a/pkg/controllers/clusterresourceplacementeviction/controller_test.go b/pkg/controllers/clusterresourceplacementeviction/controller_test.go index e2e88d384..ec1a6621f 100644 --- a/pkg/controllers/clusterresourceplacementeviction/controller_test.go +++ b/pkg/controllers/clusterresourceplacementeviction/controller_test.go @@ -63,7 +63,7 @@ func TestValidateEviction(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: testCRPName, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, }, @@ -134,7 +134,7 @@ func TestValidateEviction(t *testing.T) { DeletionTimestamp: &metav1.Time{Time: time.Now()}, Finalizers: []string{"test-finalizer"}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, }, @@ -159,7 +159,7 @@ func TestValidateEviction(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: testCRPName, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickFixedPlacementType, }, @@ -223,7 +223,7 @@ func TestValidateEviction(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "test-crp", }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{}, + Spec: placementv1beta1.PlacementSpec{}, }, bindings: []placementv1beta1.ClusterResourceBinding{testBinding2}, wantValidationResult: &evictionValidationResult{ @@ -1555,7 +1555,7 @@ func buildTestPickAllCRP(crpName string) placementv1beta1.ClusterResourcePlaceme ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, }, diff --git a/pkg/controllers/clusterresourceplacementwatcher/watcher_integration_test.go b/pkg/controllers/clusterresourceplacementwatcher/watcher_integration_test.go index 5deaa5988..e1a5ffc3a 100644 --- a/pkg/controllers/clusterresourceplacementwatcher/watcher_integration_test.go +++ b/pkg/controllers/clusterresourceplacementwatcher/watcher_integration_test.go @@ -37,7 +37,7 @@ func clusterResourcePlacementForTest() *fleetv1beta1.ClusterResourcePlacement { ObjectMeta: metav1.ObjectMeta{ Name: testCRPName, }, - Spec: fleetv1beta1.ClusterResourcePlacementSpec{ + Spec: fleetv1beta1.PlacementSpec{ ResourceSelectors: []fleetv1beta1.ClusterResourceSelector{ { Group: corev1.GroupName, diff --git a/pkg/controllers/resourcechange/resourcechange_controller_test.go b/pkg/controllers/resourcechange/resourcechange_controller_test.go index 80a9701b2..f83e0e5a6 100644 --- a/pkg/controllers/resourcechange/resourcechange_controller_test.go +++ b/pkg/controllers/resourcechange/resourcechange_controller_test.go @@ -190,7 +190,7 @@ func TestFindPlacementsSelectedDeletedResV1Beta11(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "resource-selected", }, - Status: placementv1beta1.ClusterResourcePlacementStatus{ + Status: placementv1beta1.PlacementStatus{ SelectedResources: []placementv1beta1.ResourceIdentifier{ deletedResV1Beta1, }, @@ -206,7 +206,7 @@ func TestFindPlacementsSelectedDeletedResV1Beta11(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "resource-selected", }, - Status: placementv1beta1.ClusterResourcePlacementStatus{ + Status: placementv1beta1.PlacementStatus{ SelectedResources: []placementv1beta1.ResourceIdentifier{ deletedResV1Beta1, }, @@ -216,7 +216,7 @@ func TestFindPlacementsSelectedDeletedResV1Beta11(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "resource-selected-2", }, - Status: placementv1beta1.ClusterResourcePlacementStatus{ + Status: placementv1beta1.PlacementStatus{ SelectedResources: []placementv1beta1.ResourceIdentifier{ deletedResV1Beta1, { @@ -237,7 +237,7 @@ func TestFindPlacementsSelectedDeletedResV1Beta11(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "resource-selected", }, - Status: placementv1beta1.ClusterResourcePlacementStatus{ + Status: placementv1beta1.PlacementStatus{ SelectedResources: []placementv1beta1.ResourceIdentifier{ { Group: "xyz", @@ -257,7 +257,7 @@ func TestFindPlacementsSelectedDeletedResV1Beta11(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "resource-selected", }, - Status: placementv1beta1.ClusterResourcePlacementStatus{ + Status: placementv1beta1.PlacementStatus{ SelectedResources: []placementv1beta1.ResourceIdentifier{}, }, }, @@ -680,7 +680,7 @@ func TestCollectAllAffectedPlacementsV1Beta1(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "resource-selected", }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ { Group: corev1.GroupName, @@ -703,7 +703,7 @@ func TestCollectAllAffectedPlacementsV1Beta1(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "resource-selected", }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: []placementv1beta1.ClusterResourceSelector{}, }, }, @@ -717,7 +717,7 @@ func TestCollectAllAffectedPlacementsV1Beta1(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "resource-selected", }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ { Group: corev1.GroupName, @@ -738,7 +738,7 @@ func TestCollectAllAffectedPlacementsV1Beta1(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "resource-selected", }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ { Group: corev1.GroupName, @@ -766,7 +766,7 @@ func TestCollectAllAffectedPlacementsV1Beta1(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "resource-selected", }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ { Group: corev1.GroupName, @@ -789,7 +789,7 @@ func TestCollectAllAffectedPlacementsV1Beta1(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "resource-selected", }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ { Group: corev1.GroupName, @@ -816,7 +816,7 @@ func TestCollectAllAffectedPlacementsV1Beta1(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "resource-selected", }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ { Group: corev1.GroupName, @@ -852,7 +852,7 @@ func TestCollectAllAffectedPlacementsV1Beta1(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "resource-selected", }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ { Group: corev1.GroupName, @@ -889,7 +889,7 @@ func TestCollectAllAffectedPlacementsV1Beta1(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "resource-selected", }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ // the mis-matching resource selector ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ { @@ -904,7 +904,7 @@ func TestCollectAllAffectedPlacementsV1Beta1(t *testing.T) { }, }, }, - Status: placementv1beta1.ClusterResourcePlacementStatus{ + Status: placementv1beta1.PlacementStatus{ SelectedResources: []placementv1beta1.ResourceIdentifier{ { Group: corev1.GroupName, @@ -933,7 +933,7 @@ func TestCollectAllAffectedPlacementsV1Beta1(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "resource-selected", }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ { Group: corev1.GroupName, @@ -947,7 +947,7 @@ func TestCollectAllAffectedPlacementsV1Beta1(t *testing.T) { }, }, }, - Status: placementv1beta1.ClusterResourcePlacementStatus{ + Status: placementv1beta1.PlacementStatus{ SelectedResources: []placementv1beta1.ResourceIdentifier{ { Group: corev1.GroupName, @@ -969,7 +969,7 @@ func TestCollectAllAffectedPlacementsV1Beta1(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "resource-selected", }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ { Group: corev1.GroupName, @@ -990,7 +990,7 @@ func TestCollectAllAffectedPlacementsV1Beta1(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "resource-selected", }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ { Group: corev1.GroupName, diff --git a/pkg/controllers/rollout/controller.go b/pkg/controllers/rollout/controller.go index 0c78f1a91..f96545b36 100644 --- a/pkg/controllers/rollout/controller.go +++ b/pkg/controllers/rollout/controller.go @@ -89,6 +89,9 @@ func (r *Reconciler) Reconcile(ctx context.Context, req runtime.Request) (runtim return runtime.Result{}, nil } + // fill out all the default values for CRP just in case the mutation webhook is not enabled. + defaulter.SetDefaultsClusterResourcePlacement(&crp) + // check that it's actually rollingUpdate strategy // TODO: support the rollout all at once type of RolloutStrategy if crp.Spec.Strategy.Type != fleetv1beta1.RollingUpdateRolloutStrategyType { @@ -158,8 +161,6 @@ func (r *Reconciler) Reconcile(ctx context.Context, req runtime.Request) (runtim } klog.V(2).InfoS("Found the latest resourceSnapshot for the clusterResourcePlacement", "clusterResourcePlacement", crpName, "latestResourceSnapshot", klog.KObj(latestResourceSnapshot)) - // fill out all the default values for CRP just in case the mutation webhook is not enabled. - defaulter.SetDefaultsClusterResourcePlacement(&crp) // Note: there is a corner case that an override is in-between snapshots (the old one is marked as not the latest while the new one is not created yet) // This will result in one of the override is removed by the rollout controller so the first instance of the updated cluster can experience // a complete removal of the override effect following by applying the new override effect. @@ -1097,7 +1098,7 @@ func handleCRP(newCRPObj, oldCRPObj client.Object, q workqueue.TypedRateLimiting // Check if the rollout strategy type has been updated. if newCRP.Spec.Strategy.Type != oldCRP.Spec.Strategy.Type { - klog.V(2).InfoS("Detected an update to the rollout strategy type on the CRP", "clusterResourcePlacement", klog.KObj(newCRP)) + klog.V(2).InfoS("Detected an update to the rollout strategy type on the CRP", "clusterResourcePlacement", klog.KObj(newCRP), "newType", newCRP.Spec.Strategy.Type, "oldType", oldCRP.Spec.Strategy.Type) q.Add(reconcile.Request{ NamespacedName: types.NamespacedName{Name: newCRP.GetName()}, }) diff --git a/pkg/controllers/rollout/controller_test.go b/pkg/controllers/rollout/controller_test.go index 88353cff9..c92c94b66 100644 --- a/pkg/controllers/rollout/controller_test.go +++ b/pkg/controllers/rollout/controller_test.go @@ -2263,7 +2263,7 @@ func clusterResourcePlacementForTest(crpName string, policy *fleetv1beta1.Placem ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, - Spec: fleetv1beta1.ClusterResourcePlacementSpec{ + Spec: fleetv1beta1.PlacementSpec{ ResourceSelectors: []fleetv1beta1.ClusterResourceSelector{ { Group: corev1.GroupName, @@ -2924,7 +2924,7 @@ func TestProcessApplyStrategyUpdates(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, - Spec: fleetv1beta1.ClusterResourcePlacementSpec{ + Spec: fleetv1beta1.PlacementSpec{ Strategy: fleetv1beta1.RolloutStrategy{}, }, }, @@ -2959,7 +2959,7 @@ func TestProcessApplyStrategyUpdates(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, - Spec: fleetv1beta1.ClusterResourcePlacementSpec{ + Spec: fleetv1beta1.PlacementSpec{ Strategy: fleetv1beta1.RolloutStrategy{ ApplyStrategy: &fleetv1beta1.ApplyStrategy{ Type: fleetv1beta1.ApplyStrategyTypeServerSideApply, @@ -3066,7 +3066,7 @@ func TestProcessApplyStrategyUpdates(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, - Spec: fleetv1beta1.ClusterResourcePlacementSpec{ + Spec: fleetv1beta1.PlacementSpec{ Strategy: fleetv1beta1.RolloutStrategy{ ApplyStrategy: &fleetv1beta1.ApplyStrategy{ Type: fleetv1beta1.ApplyStrategyTypeClientSideApply, diff --git a/pkg/controllers/updaterun/controller_integration_test.go b/pkg/controllers/updaterun/controller_integration_test.go index d0d555202..4201c28c9 100644 --- a/pkg/controllers/updaterun/controller_integration_test.go +++ b/pkg/controllers/updaterun/controller_integration_test.go @@ -368,7 +368,7 @@ func generateTestClusterResourcePlacement() *placementv1beta1.ClusterResourcePla ObjectMeta: metav1.ObjectMeta{ Name: testCRPName, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ { Group: "", diff --git a/pkg/scheduler/watchers/clusterresourceplacement/controller_integration_test.go b/pkg/scheduler/watchers/clusterresourceplacement/controller_integration_test.go index d858062a7..8df465644 100644 --- a/pkg/scheduler/watchers/clusterresourceplacement/controller_integration_test.go +++ b/pkg/scheduler/watchers/clusterresourceplacement/controller_integration_test.go @@ -98,7 +98,7 @@ var _ = Describe("scheduler cluster resource placement source controller", Seria ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, - Spec: fleetv1beta1.ClusterResourcePlacementSpec{ + Spec: fleetv1beta1.PlacementSpec{ ResourceSelectors: resourceSelectors, }, } @@ -221,7 +221,7 @@ var _ = Describe("scheduler cluster resource placement source controller", Seria ObjectMeta: metav1.ObjectMeta{ Name: noFinalizerCRP, }, - Spec: fleetv1beta1.ClusterResourcePlacementSpec{ + Spec: fleetv1beta1.PlacementSpec{ ResourceSelectors: resourceSelectors, }, } diff --git a/pkg/scheduler/watchers/membercluster/controller_integration_test.go b/pkg/scheduler/watchers/membercluster/controller_integration_test.go index 22fb64464..90641b0f1 100644 --- a/pkg/scheduler/watchers/membercluster/controller_integration_test.go +++ b/pkg/scheduler/watchers/membercluster/controller_integration_test.go @@ -24,6 +24,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -512,19 +513,31 @@ var _ = Describe("scheduler member cluster source controller", Serial, Ordered, }) }) - Context("ready cluster has left", func() { + Context("leaving clusters should not trigger rescheduling until the member cluster object is fully deleted", func() { BeforeAll(func() { Consistently(noKeyEnqueuedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Workqueue is not empty") // Retrieve the cluster. memberCluster := &clusterv1beta1.MemberCluster{} Expect(hubClient.Get(ctx, types.NamespacedName{Name: clusterName1}, memberCluster)).To(Succeed(), "Failed to get member cluster") - - // Update the spec as leave. + // Delete the cluster to simulate leaving which will not delete the MC as it has finalizers. Expect(hubClient.Delete(ctx, memberCluster)).To(Succeed(), "Failed to delete member cluster") }) - It("should enqueue all CRPs for cluster left (case 1b)", func() { + It("should not enqueue all CRPs for cluster leaving", func() { + Consistently(noKeyEnqueuedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Workqueue is not empty") + }) + + It("remove the finalizer", func() { + // Retrieve the cluster. + memberCluster := &clusterv1beta1.MemberCluster{} + Expect(hubClient.Get(ctx, types.NamespacedName{Name: clusterName1}, memberCluster)).To(Succeed(), "Failed to get member cluster") + // Remove the finalizer from cluster. + controllerutil.RemoveFinalizer(memberCluster, placementv1beta1.MemberClusterFinalizer) + Expect(hubClient.Update(ctx, memberCluster)).Should(Succeed(), "Failed to update member cluster taints") + }) + + It("should enqueue all CRPs for cluster left (case 2c)", func() { Eventually(allKeysEnqueuedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Keys are not enqueued as expected") Consistently(allKeysEnqueuedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Keys are not enqueued as expected") }) diff --git a/pkg/scheduler/watchers/membercluster/suite_test.go b/pkg/scheduler/watchers/membercluster/suite_test.go index 94801ad55..72bc45606 100644 --- a/pkg/scheduler/watchers/membercluster/suite_test.go +++ b/pkg/scheduler/watchers/membercluster/suite_test.go @@ -30,6 +30,7 @@ import ( "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/envtest" "sigs.k8s.io/controller-runtime/pkg/log/zap" metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" @@ -62,11 +63,13 @@ var ( var ( newMemberCluster = func(name string) *clusterv1beta1.MemberCluster { - return &clusterv1beta1.MemberCluster{ + memberCluster := &clusterv1beta1.MemberCluster{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, } + controllerutil.AddFinalizer(memberCluster, placementv1beta1.MemberClusterFinalizer) + return memberCluster } newCRP = func(name string, policy *placementv1beta1.PlacementPolicy) *placementv1beta1.ClusterResourcePlacement { @@ -74,7 +77,7 @@ var ( ObjectMeta: metav1.ObjectMeta{ Name: name, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: defaultResourceSelectors, Policy: policy, }, @@ -143,7 +146,9 @@ func setupResources() { } var _ = BeforeSuite(func() { - klog.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + logger := zap.New(zap.UseDevMode(true), zap.WriteTo(GinkgoWriter)) + klog.SetLogger(logger) + ctrl.SetLogger(logger) ctx, cancel = context.WithCancel(context.TODO()) diff --git a/pkg/scheduler/watchers/membercluster/utils_test.go b/pkg/scheduler/watchers/membercluster/utils_test.go index 58ee581f7..29d98ddda 100644 --- a/pkg/scheduler/watchers/membercluster/utils_test.go +++ b/pkg/scheduler/watchers/membercluster/utils_test.go @@ -54,13 +54,13 @@ func TestIsPickNCRPFullyScheduled(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, NumberOfClusters: &numOfClusters, }, }, - Status: placementv1beta1.ClusterResourcePlacementStatus{ + Status: placementv1beta1.PlacementStatus{ Conditions: []metav1.Condition{}, }, }, @@ -71,13 +71,13 @@ func TestIsPickNCRPFullyScheduled(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, NumberOfClusters: &numOfClusters, }, }, - Status: placementv1beta1.ClusterResourcePlacementStatus{ + Status: placementv1beta1.PlacementStatus{ Conditions: []metav1.Condition{ { Type: string(placementv1beta1.ClusterResourcePlacementScheduledConditionType), @@ -94,13 +94,13 @@ func TestIsPickNCRPFullyScheduled(t *testing.T) { Name: crpName, Generation: 1, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, NumberOfClusters: &numOfClusters, }, }, - Status: placementv1beta1.ClusterResourcePlacementStatus{ + Status: placementv1beta1.PlacementStatus{ Conditions: []metav1.Condition{ { Type: string(placementv1beta1.ClusterResourcePlacementScheduledConditionType), @@ -118,13 +118,13 @@ func TestIsPickNCRPFullyScheduled(t *testing.T) { Name: crpName, Generation: 1, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, NumberOfClusters: &numOfClusters, }, }, - Status: placementv1beta1.ClusterResourcePlacementStatus{ + Status: placementv1beta1.PlacementStatus{ Conditions: []metav1.Condition{ { Type: string(placementv1beta1.ClusterResourcePlacementScheduledConditionType), @@ -179,7 +179,7 @@ func TestClassifyCRPs(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ Policy: &placementv1beta1.PlacementPolicy{ ClusterNames: []string{clusterName1}, }, @@ -191,7 +191,7 @@ func TestClassifyCRPs(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ Policy: &placementv1beta1.PlacementPolicy{ ClusterNames: []string{clusterName1}, }, @@ -207,12 +207,12 @@ func TestClassifyCRPs(t *testing.T) { Name: crpName, Generation: 1, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ Policy: &placementv1beta1.PlacementPolicy{ ClusterNames: []string{clusterName1}, }, }, - Status: placementv1beta1.ClusterResourcePlacementStatus{ + Status: placementv1beta1.PlacementStatus{ Conditions: []metav1.Condition{ { Type: string(placementv1beta1.ClusterResourcePlacementScheduledConditionType), @@ -232,7 +232,7 @@ func TestClassifyCRPs(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, }, @@ -244,7 +244,7 @@ func TestClassifyCRPs(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, }, @@ -259,7 +259,7 @@ func TestClassifyCRPs(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, NumberOfClusters: &numOfClusters, @@ -272,7 +272,7 @@ func TestClassifyCRPs(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, NumberOfClusters: &numOfClusters, @@ -289,13 +289,13 @@ func TestClassifyCRPs(t *testing.T) { Name: crpName, Generation: 1, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, NumberOfClusters: &numOfClusters, }, }, - Status: placementv1beta1.ClusterResourcePlacementStatus{ + Status: placementv1beta1.PlacementStatus{ Conditions: []metav1.Condition{ { Type: string(placementv1beta1.ClusterResourcePlacementScheduledConditionType), @@ -315,7 +315,7 @@ func TestClassifyCRPs(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: crpName2, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ Policy: &placementv1beta1.PlacementPolicy{ ClusterNames: []string{clusterName1}, }, @@ -331,13 +331,13 @@ func TestClassifyCRPs(t *testing.T) { Name: crpName5, Generation: 1, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, NumberOfClusters: &numOfClusters, }, }, - Status: placementv1beta1.ClusterResourcePlacementStatus{ + Status: placementv1beta1.PlacementStatus{ Conditions: []metav1.Condition{ { Type: string(placementv1beta1.ClusterResourcePlacementScheduledConditionType), @@ -351,7 +351,7 @@ func TestClassifyCRPs(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: crpName4, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, }, @@ -361,7 +361,7 @@ func TestClassifyCRPs(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: crpName3, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, NumberOfClusters: &numOfClusters, @@ -374,7 +374,7 @@ func TestClassifyCRPs(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: crpName2, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ Policy: &placementv1beta1.PlacementPolicy{ ClusterNames: []string{clusterName1}, }, @@ -389,7 +389,7 @@ func TestClassifyCRPs(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: crpName4, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, }, @@ -399,7 +399,7 @@ func TestClassifyCRPs(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: crpName3, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, NumberOfClusters: &numOfClusters, diff --git a/pkg/scheduler/watchers/membercluster/watcher.go b/pkg/scheduler/watchers/membercluster/watcher.go index a97f5c758..aeeabcd18 100644 --- a/pkg/scheduler/watchers/membercluster/watcher.go +++ b/pkg/scheduler/watchers/membercluster/watcher.go @@ -80,7 +80,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu // a) the cluster's setup (e.g., its labels) or status (e.g., resource/non-resource properties), // has changed; and/or // b) an unexpected development (e.g., agents failing, network partition, etc.) has occurred. - // c) the cluster, which may or may not have resources placed on it, has left the fleet (deleting). + // c) the cluster, which may or may not have resources placed on it, has left the fleet (deleted). // // Among the cases, // @@ -123,9 +123,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu memberClusterGetErr := r.Client.Get(ctx, memberClusterKey, memberCluster) switch { case errors.IsNotFound(memberClusterGetErr): - // On very unlikely occasions, it could happen that the member cluster is deleted - // before this controller gets a chance to process it, it may happen when a member cluster - // leaves the fleet. In such cases, this controller will request the scheduler to check + // This could happen when the member cluster is deleted. In this case, controller will request the scheduler to check // all CRPs just in case. isMemberClusterMissing = true case memberClusterGetErr != nil: @@ -185,10 +183,9 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { return false }, DeleteFunc: func(e event.DeleteEvent) bool { - // Ignore deletion events; the removal of a cluster is first signaled by adding a deleteTimeStamp, - // which is an update event - klog.V(3).InfoS("Ignoring delete events for member cluster objects", "eventObject", klog.KObj(e.Object)) - return false + // We only notify the scheduler when a member cluster is deleted which means the member agent has finished the leaving process. + klog.V(2).InfoS("Member cluster object is deleted", "eventObject", klog.KObj(e.Object)) + return true }, UpdateFunc: func(e event.UpdateEvent) bool { // Check if the update event is valid. @@ -208,9 +205,9 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { clusterKObj := klog.KObj(newCluster) // The cluster is being deleted. - if oldCluster.GetDeletionTimestamp().IsZero() && !newCluster.GetDeletionTimestamp().IsZero() { - klog.V(2).InfoS("A member cluster is leaving the fleet", "memberCluster", clusterKObj) - return true + if !newCluster.GetDeletionTimestamp().IsZero() { + klog.V(2).InfoS("A member cluster is leaving the fleet, ignore until it is left", "memberCluster", clusterKObj) + return false } // Capture label changes. diff --git a/pkg/utils/condition/condition.go b/pkg/utils/condition/condition.go index b2deeb322..8632ab9b2 100644 --- a/pkg/utils/condition/condition.go +++ b/pkg/utils/condition/condition.go @@ -33,6 +33,11 @@ const ( // unknown. RolloutStartedUnknownReason = "RolloutStartedUnknown" + // RolloutControlledByExternalControllerReason is the reason string of the placement condition if + // the placement rollout strategy type is set to External, and either rollout not started at all or + // clusters observes different resource snapshot versions. This is a special case for unknown rolloutStarted condition. + RolloutControlledByExternalControllerReason = "RolloutControlledByExternalController" + // RolloutNotStartedYetReason is the reason string of placement condition if the rollout has not started yet. RolloutNotStartedYetReason = "RolloutNotStartedYet" @@ -392,7 +397,7 @@ func (c ResourceCondition) UnknownResourceConditionPerCluster(generation int64) Status: metav1.ConditionUnknown, Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), Reason: RolloutStartedUnknownReason, - Message: "In the process of deciding whether to roll out the latest resources or not", + Message: "In the process of deciding whether to roll out some version of the resources or not", ObservedGeneration: generation, }, { diff --git a/pkg/utils/controller/controller.go b/pkg/utils/controller/controller.go index b27a91534..b63df285b 100644 --- a/pkg/utils/controller/controller.go +++ b/pkg/utils/controller/controller.go @@ -26,6 +26,7 @@ import ( "time" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" @@ -362,6 +363,91 @@ func FetchAllClusterResourceSnapshots(ctx context.Context, k8Client client.Clien return resourceSnapshots, nil } +// CollectResourceIdentifiersFromClusterResourceSnapshot collects the resource identifiers selected by a series of clusterResourceSnapshot. +// Given the index of the clusterResourceSnapshot, it collects resources from all of the master snapshots as well as +func CollectResourceIdentifiersFromClusterResourceSnapshot( + ctx context.Context, + k8Client client.Client, + crpName string, + resourceSnapshotIndex string, +) ([]fleetv1beta1.ResourceIdentifier, error) { + labelMatcher := client.MatchingLabels{ + fleetv1beta1.CRPTrackingLabel: crpName, + fleetv1beta1.ResourceIndexLabel: resourceSnapshotIndex, + } + resourceSnapshotList := &fleetv1beta1.ClusterResourceSnapshotList{} + if err := k8Client.List(ctx, resourceSnapshotList, labelMatcher); err != nil { + klog.ErrorS(err, "Failed to list the clusterResourceSnapshots associated with the clusterResourcePlacement", + "resourceSnapshotIndex", resourceSnapshotIndex, "clusterResourcePlacement", crpName) + return nil, NewAPIServerError(true, err) + } + + if len(resourceSnapshotList.Items) == 0 { + klog.V(2).InfoS("No clusterResourceSnapshots found for the clusterResourcePlacement when collecting resource identifiers", + "resourceSnapshotIndex", resourceSnapshotIndex, "clusterResourcePlacement", crpName) + return nil, nil + } + + // Look for the master clusterResourceSnapshot. + var masterResourceSnapshot *fleetv1beta1.ClusterResourceSnapshot + for i, resourceSnapshot := range resourceSnapshotList.Items { + // only master has this annotation + if len(resourceSnapshot.Annotations[fleetv1beta1.ResourceGroupHashAnnotation]) != 0 { + masterResourceSnapshot = &resourceSnapshotList.Items[i] + break + } + } + if masterResourceSnapshot == nil { + err := NewUnexpectedBehaviorError(fmt.Errorf("no master clusterResourceSnapshot found for clusterResourcePlacement `%s`", crpName)) + klog.ErrorS(err, "Found clusterResourceSnapshots without master snapshot", "clusterResourcePlacement", crpName, "resourceSnapshotIndex", resourceSnapshotIndex, "resourceSnapshotCount", len(resourceSnapshotList.Items)) + return nil, err + } + + allResourceSnapshots, err := FetchAllClusterResourceSnapshots(ctx, k8Client, crpName, masterResourceSnapshot) + if err != nil { + klog.ErrorS(err, "Failed to fetch all the clusterResourceSnapshots", "resourceSnapshotIndex", resourceSnapshotIndex, "clusterResourcePlacement", crpName) + return nil, err + } + + selectedResources := make([]fleetv1beta1.ResourceIdentifier, 0) + retrieveResourceIdentifierFromSnapshot := func(snapshot *fleetv1beta1.ClusterResourceSnapshot) error { + for _, res := range snapshot.Spec.SelectedResources { + var uResource unstructured.Unstructured + if err := uResource.UnmarshalJSON(res.Raw); err != nil { + klog.ErrorS(err, "Resource has invalid content", "snapshot", klog.KObj(snapshot), "selectedResource", res.Raw) + return NewUnexpectedBehaviorError(err) + } + identifier := fleetv1beta1.ResourceIdentifier{ + Group: uResource.GetObjectKind().GroupVersionKind().Group, + Version: uResource.GetObjectKind().GroupVersionKind().Version, + Kind: uResource.GetObjectKind().GroupVersionKind().Kind, + Name: uResource.GetName(), + Namespace: uResource.GetNamespace(), + } + selectedResources = append(selectedResources, identifier) + } + return nil + } + + // Retrieve the resource identitifers from snapshots following the order to preserve the order of the resource identifiers. + if err := retrieveResourceIdentifierFromSnapshot(masterResourceSnapshot); err != nil { + return nil, err + } + for i := range len(allResourceSnapshots) - 1 { + snapshotName := fmt.Sprintf("%s-%s-%d", crpName, resourceSnapshotIndex, i) + if resourceSnapshot, ok := allResourceSnapshots[snapshotName]; ok { + if err := retrieveResourceIdentifierFromSnapshot(resourceSnapshot); err != nil { + return nil, err + } + } else { + err := NewUnexpectedBehaviorError(fmt.Errorf("failed to find clusterResourceSnapshot with name %s", snapshotName)) + klog.ErrorS(err, "Failed to retrieve resource identifiers from clusterResourceSnapshots", "resourceSnapshotIndex", resourceSnapshotIndex, "clusterResourcePlacement", crpName) + return nil, err + } + } + return selectedResources, nil +} + // MemberController configures how to join or leave the fleet as a member. type MemberController interface { // Join describes the process of joining the fleet as a member. diff --git a/pkg/utils/controller/controller_test.go b/pkg/utils/controller/controller_test.go index 53a4ec645..1821c45ba 100644 --- a/pkg/utils/controller/controller_test.go +++ b/pkg/utils/controller/controller_test.go @@ -32,6 +32,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" fleetv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" + "go.goms.io/fleet/test/utils/resource" ) func TestNewUnexpectedBehaviorError(t *testing.T) { @@ -464,3 +465,294 @@ func TestFetchAllClusterResourceSnapshots(t *testing.T) { }) } } + +func TestCollectResourceIdentifiersFromClusterResourceSnapshot(t *testing.T) { + crpName := "my-test-crp" + + namespaceResourceContent := *resource.NamespaceResourceContentForTest(t) + deploymentResourceContent := *resource.DeploymentResourceContentForTest(t) + clusterResourceEnvelopeContent := *resource.ClusterResourceEnvelopeResourceContentForTest(t) + resourceEnvelopeContent := *resource.ResourceEnvelopeResourceContentForTest(t) + + tests := []struct { + name string + resourceSnapshotIndex string + snapshots []fleetv1beta1.ClusterResourceSnapshot + want []fleetv1beta1.ResourceIdentifier + wantErr error + }{ + { + name: "no resource snapshots found", + resourceSnapshotIndex: "0", + snapshots: []fleetv1beta1.ClusterResourceSnapshot{}, + want: nil, + wantErr: nil, + }, + { + name: "no master resource snapshot found", + resourceSnapshotIndex: "0", + snapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, crpName, 0, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.CRPTrackingLabel: crpName, + }, + Annotations: map[string]string{ + fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "0", + }, + }, + }, + }, + want: []fleetv1beta1.ResourceIdentifier{}, + wantErr: ErrUnexpectedBehavior, + }, + { + name: "some of resource snapshots have not been created yet", + resourceSnapshotIndex: "0", + snapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, crpName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.CRPTrackingLabel: crpName, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "abc", + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "3", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, crpName, 0, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.CRPTrackingLabel: crpName, + }, + Annotations: map[string]string{ + fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "0", + }, + }, + }, + }, + wantErr: ErrExpectedBehavior, + }, + { + name: "resource snapshot without any resources", + resourceSnapshotIndex: "0", + snapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, crpName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.CRPTrackingLabel: crpName, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "abc", + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", + fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "0", + }, + }, + }, + }, + want: []fleetv1beta1.ResourceIdentifier{}, + wantErr: nil, + }, + { + name: "only master resource snapshot found with cluster-scoped resource, namespace-scoped resource and resource wrapped with envelope", + resourceSnapshotIndex: "0", + snapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, crpName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.CRPTrackingLabel: crpName, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "abc", + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", + fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "2", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{ + SelectedResources: []fleetv1beta1.ResourceContent{ + namespaceResourceContent, + deploymentResourceContent, + clusterResourceEnvelopeContent, + resourceEnvelopeContent, + }, + }, + }, + }, + want: []fleetv1beta1.ResourceIdentifier{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + Namespace: "", + Name: "namespace-name", + }, + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Namespace: "deployment-namespace", + Name: "deployment-name", + }, + // The envelope resources themselves are included, not the wrapped resources. + { + Group: "placement.kubernetes-fleet.io", + Version: "v1beta1", + Kind: "ClusterResourceEnvelope", + Namespace: "", + Name: "test-cluster-resource-envelope", + }, + { + Group: "placement.kubernetes-fleet.io", + Version: "v1beta1", + Kind: "ResourceEnvelope", + Namespace: "test-namespace", + Name: "test-resource-envelope", + }, + }, + wantErr: nil, + }, + { + name: "both master and subindex resource snapshots found with cluster-scoped resource, namespace-scoped resource and resource wrapped with envelope", + resourceSnapshotIndex: "0", + snapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, crpName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.CRPTrackingLabel: crpName, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "abc", + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "4", + fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "2", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{ + SelectedResources: []fleetv1beta1.ResourceContent{ + namespaceResourceContent, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, crpName, 0, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.CRPTrackingLabel: crpName, + }, + Annotations: map[string]string{ + fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "0", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{ + SelectedResources: []fleetv1beta1.ResourceContent{ + deploymentResourceContent, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, crpName, 0, 1), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.CRPTrackingLabel: crpName, + }, + Annotations: map[string]string{ + fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "1", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{ + SelectedResources: []fleetv1beta1.ResourceContent{ + clusterResourceEnvelopeContent, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, crpName, 0, 2), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.CRPTrackingLabel: crpName, + }, + Annotations: map[string]string{ + fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "2", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{ + SelectedResources: []fleetv1beta1.ResourceContent{ + resourceEnvelopeContent, + }, + }, + }, + }, + want: []fleetv1beta1.ResourceIdentifier{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + Namespace: "", + Name: "namespace-name", + }, + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Namespace: "deployment-namespace", + Name: "deployment-name", + }, + // The envelope resources themselves are included, not the wrapped resources. + { + Group: "placement.kubernetes-fleet.io", + Version: "v1beta1", + Kind: "ClusterResourceEnvelope", + Namespace: "", + Name: "test-cluster-resource-envelope", + }, + { + Group: "placement.kubernetes-fleet.io", + Version: "v1beta1", + Kind: "ResourceEnvelope", + Namespace: "test-namespace", + Name: "test-resource-envelope", + }, + }, + wantErr: nil, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + scheme := serviceScheme(t) + objects := []client.Object{} + for i := range tc.snapshots { + objects = append(objects, &tc.snapshots[i]) + } + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(objects...). + Build() + got, err := CollectResourceIdentifiersFromClusterResourceSnapshot(context.Background(), fakeClient, crpName, tc.resourceSnapshotIndex) + if gotErr, wantErr := err != nil, tc.wantErr != nil; gotErr != wantErr || !errors.Is(err, tc.wantErr) { + t.Fatalf("CollectResourceIdentifiersFromClusterResourceSnapshot() got error %v, want error %v", err, tc.wantErr) + } + if tc.wantErr != nil { + return + } + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("FetchAllClusterResourceSnapshots() mismatch (-want, +got):\n%s", diff) + } + }) + } +} diff --git a/pkg/utils/defaulter/clusterresourceplacement_test.go b/pkg/utils/defaulter/clusterresourceplacement_test.go index b2469b4c9..0c93dd9e8 100644 --- a/pkg/utils/defaulter/clusterresourceplacement_test.go +++ b/pkg/utils/defaulter/clusterresourceplacement_test.go @@ -34,10 +34,10 @@ func TestSetDefaultsClusterResourcePlacement(t *testing.T) { }{ "ClusterResourcePlacement with nil Spec": { obj: &fleetv1beta1.ClusterResourcePlacement{ - Spec: fleetv1beta1.ClusterResourcePlacementSpec{}, + Spec: fleetv1beta1.PlacementSpec{}, }, wantObj: &fleetv1beta1.ClusterResourcePlacement{ - Spec: fleetv1beta1.ClusterResourcePlacementSpec{ + Spec: fleetv1beta1.PlacementSpec{ Policy: &fleetv1beta1.PlacementPolicy{ PlacementType: fleetv1beta1.PickAllPlacementType, }, @@ -61,7 +61,7 @@ func TestSetDefaultsClusterResourcePlacement(t *testing.T) { }, "ClusterResourcePlacement with nil TopologySpreadConstraints & Tolerations fields": { obj: &fleetv1beta1.ClusterResourcePlacement{ - Spec: fleetv1beta1.ClusterResourcePlacementSpec{ + Spec: fleetv1beta1.PlacementSpec{ Policy: &fleetv1beta1.PlacementPolicy{ TopologySpreadConstraints: []fleetv1beta1.TopologySpreadConstraint{ { @@ -93,7 +93,7 @@ func TestSetDefaultsClusterResourcePlacement(t *testing.T) { }, }, wantObj: &fleetv1beta1.ClusterResourcePlacement{ - Spec: fleetv1beta1.ClusterResourcePlacementSpec{ + Spec: fleetv1beta1.PlacementSpec{ Policy: &fleetv1beta1.PlacementPolicy{ TopologySpreadConstraints: []fleetv1beta1.TopologySpreadConstraint{ { @@ -130,7 +130,7 @@ func TestSetDefaultsClusterResourcePlacement(t *testing.T) { }, "ClusterResourcePlacement with serverside apply config not set": { obj: &fleetv1beta1.ClusterResourcePlacement{ - Spec: fleetv1beta1.ClusterResourcePlacementSpec{ + Spec: fleetv1beta1.PlacementSpec{ Strategy: fleetv1beta1.RolloutStrategy{ ApplyStrategy: &fleetv1beta1.ApplyStrategy{ Type: fleetv1beta1.ApplyStrategyTypeServerSideApply, @@ -139,7 +139,7 @@ func TestSetDefaultsClusterResourcePlacement(t *testing.T) { }, }, wantObj: &fleetv1beta1.ClusterResourcePlacement{ - Spec: fleetv1beta1.ClusterResourcePlacementSpec{ + Spec: fleetv1beta1.PlacementSpec{ Policy: &fleetv1beta1.PlacementPolicy{ PlacementType: fleetv1beta1.PickAllPlacementType, }, diff --git a/pkg/utils/overrider/overrider_test.go b/pkg/utils/overrider/overrider_test.go index 7e869304e..85b467612 100644 --- a/pkg/utils/overrider/overrider_test.go +++ b/pkg/utils/overrider/overrider_test.go @@ -65,7 +65,7 @@ func TestFetchAllMatchingOverridesForResourceSnapshot(t *testing.T) { Kind: "Service", }: true, { - Group: "", + Group: "apps", Version: "v1", Kind: "Deployment", }: true, @@ -468,7 +468,7 @@ func TestFetchAllMatchingOverridesForResourceSnapshot(t *testing.T) { OverrideSpec: placementv1alpha1.ResourceOverrideSpec{ ResourceSelectors: []placementv1alpha1.ResourceSelector{ { - Group: "", + Group: "apps", Version: "v1", Kind: "Deployment", Name: "not-exist", @@ -495,7 +495,7 @@ func TestFetchAllMatchingOverridesForResourceSnapshot(t *testing.T) { OverrideSpec: placementv1alpha1.ResourceOverrideSpec{ ResourceSelectors: []placementv1alpha1.ResourceSelector{ { - Group: "", + Group: "apps", Version: "v1", Kind: "Deployment", Name: "deployment-name", @@ -540,7 +540,7 @@ func TestFetchAllMatchingOverridesForResourceSnapshot(t *testing.T) { OverrideSpec: placementv1alpha1.ResourceOverrideSpec{ ResourceSelectors: []placementv1alpha1.ResourceSelector{ { - Group: "", + Group: "apps", Version: "v1", Kind: "Deployment", Name: "deployment-name", @@ -561,7 +561,7 @@ func TestFetchAllMatchingOverridesForResourceSnapshot(t *testing.T) { OverrideSpec: placementv1alpha1.ResourceOverrideSpec{ ResourceSelectors: []placementv1alpha1.ResourceSelector{ { - Group: "", + Group: "apps", Version: "v1", Kind: "Deployment", Name: "not-exist", @@ -693,7 +693,7 @@ func TestFetchAllMatchingOverridesForResourceSnapshot(t *testing.T) { OverrideSpec: placementv1alpha1.ResourceOverrideSpec{ ResourceSelectors: []placementv1alpha1.ResourceSelector{ { - Group: "", + Group: "apps", Version: "v1", Kind: "Deployment", Name: "not-exist", @@ -723,7 +723,7 @@ func TestFetchAllMatchingOverridesForResourceSnapshot(t *testing.T) { }, ResourceSelectors: []placementv1alpha1.ResourceSelector{ { - Group: "", + Group: "apps", Version: "v1", Kind: "Deployment", Name: "deployment-name", @@ -771,7 +771,7 @@ func TestFetchAllMatchingOverridesForResourceSnapshot(t *testing.T) { }, ResourceSelectors: []placementv1alpha1.ResourceSelector{ { - Group: "", + Group: "apps", Version: "v1", Kind: "Deployment", Name: "deployment-name", @@ -792,7 +792,7 @@ func TestFetchAllMatchingOverridesForResourceSnapshot(t *testing.T) { OverrideSpec: placementv1alpha1.ResourceOverrideSpec{ ResourceSelectors: []placementv1alpha1.ResourceSelector{ { - Group: "", + Group: "apps", Version: "v1", Kind: "Deployment", Name: "not-exist", @@ -1121,7 +1121,7 @@ func TestFetchAllMatchingOverridesForResourceSnapshot(t *testing.T) { }, ResourceSelectors: []placementv1alpha1.ResourceSelector{ { - Group: "", + Group: "apps", Version: "v1", Kind: "Deployment", Name: "not-exist", @@ -1151,7 +1151,7 @@ func TestFetchAllMatchingOverridesForResourceSnapshot(t *testing.T) { }, ResourceSelectors: []placementv1alpha1.ResourceSelector{ { - Group: "", + Group: "apps", Version: "v1", Kind: "Deployment", Name: "deployment-name", diff --git a/pkg/utils/validator/clusterresourceplacement_test.go b/pkg/utils/validator/clusterresourceplacement_test.go index 3f7ec64d4..bee65cd52 100644 --- a/pkg/utils/validator/clusterresourceplacement_test.go +++ b/pkg/utils/validator/clusterresourceplacement_test.go @@ -221,7 +221,7 @@ func TestValidateClusterResourcePlacement(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "test-crp", }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, @@ -238,7 +238,7 @@ func TestValidateClusterResourcePlacement(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "test-crp-with-very-long-name-field-exceeding-DNS1035LabelMaxLength", }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, @@ -256,7 +256,7 @@ func TestValidateClusterResourcePlacement(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "test-crp", }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ { Group: "rbac.authorization.k8s.io", @@ -284,7 +284,7 @@ func TestValidateClusterResourcePlacement(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "test-crp", }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ { Group: "rbac.authorization.k8s.io", @@ -304,7 +304,7 @@ func TestValidateClusterResourcePlacement(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "test-crp", }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ { Group: "apps", @@ -326,7 +326,7 @@ func TestValidateClusterResourcePlacement(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "test-crp", }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, }, }, diff --git a/pkg/webhook/clusterresourceplacement/v1beta1_clusterresourceplacement_validating_webhook_test.go b/pkg/webhook/clusterresourceplacement/v1beta1_clusterresourceplacement_validating_webhook_test.go index fdb407d68..2717be508 100644 --- a/pkg/webhook/clusterresourceplacement/v1beta1_clusterresourceplacement_validating_webhook_test.go +++ b/pkg/webhook/clusterresourceplacement/v1beta1_clusterresourceplacement_validating_webhook_test.go @@ -40,7 +40,7 @@ func TestHandle(t *testing.T) { Name: "test-crp", Finalizers: []string{placementv1beta1.ClusterResourcePlacementCleanupFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, }, @@ -60,7 +60,7 @@ func TestHandle(t *testing.T) { Finalizers: []string{placementv1beta1.ClusterResourcePlacementCleanupFinalizer}, DeletionTimestamp: ptr.To(metav1.NewTime(time.Now())), }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, }, @@ -81,7 +81,7 @@ func TestHandle(t *testing.T) { Finalizers: []string{}, DeletionTimestamp: ptr.To(metav1.NewTime(time.Now())), }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, }, @@ -100,7 +100,7 @@ func TestHandle(t *testing.T) { Name: "test-crp", Finalizers: []string{}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, }, @@ -119,7 +119,7 @@ func TestHandle(t *testing.T) { Name: "test-crp", Finalizers: []string{placementv1beta1.ClusterResourcePlacementCleanupFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, }, @@ -134,7 +134,7 @@ func TestHandle(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "test-crp", }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, @@ -156,7 +156,7 @@ func TestHandle(t *testing.T) { Name: "test-crp", Finalizers: []string{placementv1beta1.ClusterResourcePlacementCleanupFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, }, @@ -176,7 +176,7 @@ func TestHandle(t *testing.T) { Finalizers: []string{}, DeletionTimestamp: ptr.To(metav1.NewTime(time.Now())), }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, }, @@ -196,7 +196,7 @@ func TestHandle(t *testing.T) { Labels: map[string]string{"key1": "value1"}, Finalizers: []string{placementv1beta1.ClusterResourcePlacementCleanupFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: []placementv1beta1.ClusterResourceSelector{resourceSelector}, Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, @@ -211,7 +211,7 @@ func TestHandle(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "test-crp", }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(int32(2)), diff --git a/pkg/webhook/clusterresourceplacementdisruptionbudget/clusterresourceplacementdisruptionbudget_validating_webhook_test.go b/pkg/webhook/clusterresourceplacementdisruptionbudget/clusterresourceplacementdisruptionbudget_validating_webhook_test.go index c7908d1db..16f904a71 100644 --- a/pkg/webhook/clusterresourceplacementdisruptionbudget/clusterresourceplacementdisruptionbudget_validating_webhook_test.go +++ b/pkg/webhook/clusterresourceplacementdisruptionbudget/clusterresourceplacementdisruptionbudget_validating_webhook_test.go @@ -123,7 +123,7 @@ func TestHandle(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "pick-all-crp", }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: []placementv1beta1.ClusterResourceSelector{}, Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, @@ -134,7 +134,7 @@ func TestHandle(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "crp-pickn", }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: []placementv1beta1.ClusterResourceSelector{}, Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, @@ -146,7 +146,7 @@ func TestHandle(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "crp-pickfixed", }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: []placementv1beta1.ClusterResourceSelector{}, Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickFixedPlacementType, diff --git a/pkg/webhook/clusterresourceplacementeviction/clusterresourceplacementeviction_validating_webhook_test.go b/pkg/webhook/clusterresourceplacementeviction/clusterresourceplacementeviction_validating_webhook_test.go index 352ac47a7..67daa668a 100644 --- a/pkg/webhook/clusterresourceplacementeviction/clusterresourceplacementeviction_validating_webhook_test.go +++ b/pkg/webhook/clusterresourceplacementeviction/clusterresourceplacementeviction_validating_webhook_test.go @@ -83,7 +83,7 @@ func TestHandle(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "test-crp", }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: []placementv1beta1.ClusterResourceSelector{}, Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, @@ -98,7 +98,7 @@ func TestHandle(t *testing.T) { }, Finalizers: []string{placementv1beta1.ClusterResourcePlacementCleanupFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: []placementv1beta1.ClusterResourceSelector{}, Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, @@ -109,7 +109,7 @@ func TestHandle(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "crp-pickfixed", }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: []placementv1beta1.ClusterResourceSelector{}, Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickFixedPlacementType, diff --git a/test/e2e/actuals_test.go b/test/e2e/actuals_test.go index 3d47be1d0..2ab86521b 100644 --- a/test/e2e/actuals_test.go +++ b/test/e2e/actuals_test.go @@ -255,6 +255,23 @@ func crpRolloutStuckConditions(generation int64) []metav1.Condition { } } +func crpRolloutPendingDueToExternalStrategyConditions(generation int64) []metav1.Condition { + return []metav1.Condition{ + { + Type: string(placementv1beta1.ClusterResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + Reason: scheduler.FullyScheduledReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ClusterResourcePlacementRolloutStartedConditionType), + Status: metav1.ConditionUnknown, + Reason: condition.RolloutControlledByExternalControllerReason, + ObservedGeneration: generation, + }, + } +} + func crpAppliedFailedConditions(generation int64) []metav1.Condition { return []metav1.Condition{ { @@ -436,6 +453,23 @@ func resourcePlacementSyncPendingConditions(generation int64) []metav1.Condition } } +func resourcePlacementRolloutUnknownConditions(generation int64) []metav1.Condition { + return []metav1.Condition{ + { + Type: string(placementv1beta1.ResourceScheduledConditionType), + Status: metav1.ConditionTrue, + Reason: condition.ScheduleSucceededReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourceRolloutStartedConditionType), + Status: metav1.ConditionUnknown, + Reason: condition.RolloutStartedUnknownReason, + ObservedGeneration: generation, + }, + } +} + func resourcePlacementApplyFailedConditions(generation int64) []metav1.Condition { return []metav1.Condition{ { @@ -718,9 +752,10 @@ func crpStatusWithOverrideUpdatedActual( Conditions: resourcePlacementRolloutCompletedConditions(crp.Generation, true, hasOverride), ApplicableResourceOverrides: wantResourceOverrides, ApplicableClusterResourceOverrides: wantClusterResourceOverrides, + ObservedResourceIndex: wantObservedResourceIndex, }) } - wantStatus := placementv1beta1.ClusterResourcePlacementStatus{ + wantStatus := placementv1beta1.PlacementStatus{ Conditions: crpRolloutCompletedConditions(crp.Generation, hasOverride), PlacementStatuses: wantPlacementStatus, SelectedResources: wantSelectedResourceIdentifiers, @@ -759,10 +794,11 @@ func crpStatusWithOverrideUpdatedFailedActual( Conditions: resourcePlacementOverrideFailedConditions(crp.Generation), ApplicableResourceOverrides: wantResourceOverrides, ApplicableClusterResourceOverrides: wantClusterResourceOverrides, + ObservedResourceIndex: wantObservedResourceIndex, }) } - wantStatus := placementv1beta1.ClusterResourcePlacementStatus{ + wantStatus := placementv1beta1.PlacementStatus{ Conditions: crpOverrideFailedConditions(crp.Generation), PlacementStatuses: wantPlacementStatus, SelectedResources: wantSelectedResourceIdentifiers, @@ -797,10 +833,11 @@ func crpStatusWithWorkSynchronizedUpdatedFailedActual( Conditions: resourcePlacementWorkSynchronizedFailedConditions(crp.Generation, hasOverrides), ApplicableResourceOverrides: wantResourceOverrides, ApplicableClusterResourceOverrides: wantClusterResourceOverrides, + ObservedResourceIndex: wantObservedResourceIndex, }) } - wantStatus := placementv1beta1.ClusterResourcePlacementStatus{ + wantStatus := placementv1beta1.PlacementStatus{ Conditions: crpWorkSynchronizedFailedConditions(crp.Generation, hasOverrides), PlacementStatuses: wantPlacementStatus, SelectedResources: wantSelectedResourceIdentifiers, @@ -813,6 +850,69 @@ func crpStatusWithWorkSynchronizedUpdatedFailedActual( } } +func crpStatusWithExternalStrategyActual( + wantSelectedResourceIdentifiers []placementv1beta1.ResourceIdentifier, + wantObservedResourceIndex string, + wantAvailable bool, + wantSelectedClusters []string, + wantObservedResourceIndexPerCluster []string, + wantAvailablePerCluster []bool, + wantClusterResourceOverrides map[string][]string, + wantResourceOverrides map[string][]placementv1beta1.NamespacedName, +) func() error { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + + return func() error { + crp := &placementv1beta1.ClusterResourcePlacement{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: crpName}, crp); err != nil { + return err + } + + var wantPlacementStatus []placementv1beta1.ResourcePlacementStatus + crpHasOverrides := false + for i, name := range wantSelectedClusters { + if !wantAvailablePerCluster[i] { + // No observed resource index for this cluster, assume rollout is still pending. + wantPlacementStatus = append(wantPlacementStatus, placementv1beta1.ResourcePlacementStatus{ + ClusterName: name, + Conditions: resourcePlacementRolloutUnknownConditions(crp.Generation), + ObservedResourceIndex: wantObservedResourceIndexPerCluster[i], + }) + } else { + wantResourceOverrides, hasRO := wantResourceOverrides[name] + wantClusterResourceOverrides, hasCRO := wantClusterResourceOverrides[name] + hasOverrides := (hasRO && len(wantResourceOverrides) > 0) || (hasCRO && len(wantClusterResourceOverrides) > 0) + if hasOverrides { + crpHasOverrides = true + } + wantPlacementStatus = append(wantPlacementStatus, placementv1beta1.ResourcePlacementStatus{ + ClusterName: name, + Conditions: resourcePlacementRolloutCompletedConditions(crp.Generation, true, hasOverrides), + ApplicableResourceOverrides: wantResourceOverrides, + ApplicableClusterResourceOverrides: wantClusterResourceOverrides, + ObservedResourceIndex: wantObservedResourceIndexPerCluster[i], + }) + } + } + + wantStatus := placementv1beta1.PlacementStatus{ + PlacementStatuses: wantPlacementStatus, + SelectedResources: wantSelectedResourceIdentifiers, + ObservedResourceIndex: wantObservedResourceIndex, + } + if wantAvailable { + wantStatus.Conditions = crpRolloutCompletedConditions(crp.Generation, crpHasOverrides) + } else { + wantStatus.Conditions = crpRolloutPendingDueToExternalStrategyConditions(crp.Generation) + } + + if diff := cmp.Diff(crp.Status, wantStatus, crpStatusCmpOptions...); diff != "" { + return fmt.Errorf("CRP status diff (-got, +want): %s", diff) + } + return nil + } +} + func customizedCRPStatusUpdatedActual(crpName string, wantSelectedResourceIdentifiers []placementv1beta1.ResourceIdentifier, wantSelectedClusters, wantUnselectedClusters []string, @@ -827,8 +927,9 @@ func customizedCRPStatusUpdatedActual(crpName string, wantPlacementStatus := []placementv1beta1.ResourcePlacementStatus{} for _, name := range wantSelectedClusters { wantPlacementStatus = append(wantPlacementStatus, placementv1beta1.ResourcePlacementStatus{ - ClusterName: name, - Conditions: resourcePlacementRolloutCompletedConditions(crp.Generation, resourceIsTrackable, false), + ClusterName: name, + ObservedResourceIndex: wantObservedResourceIndex, + Conditions: resourcePlacementRolloutCompletedConditions(crp.Generation, resourceIsTrackable, false), }) } for i := 0; i < len(wantUnselectedClusters); i++ { @@ -865,7 +966,7 @@ func customizedCRPStatusUpdatedActual(crpName string, // // * The CRP is of the PickN placement type and the required N count cannot be fulfilled; or // * The CRP is of the PickFixed placement type and the list of target clusters specified cannot be fulfilled. - wantStatus := placementv1beta1.ClusterResourcePlacementStatus{ + wantStatus := placementv1beta1.PlacementStatus{ Conditions: wantCRPConditions, PlacementStatuses: wantPlacementStatus, SelectedResources: wantSelectedResourceIdentifiers, @@ -889,6 +990,7 @@ func safeRolloutWorkloadCRPStatusUpdatedActual(wantSelectedResourceIdentifiers [ var wantPlacementStatus []placementv1beta1.ResourcePlacementStatus // We only expect the deployment to not be available on one cluster. unavailableResourcePlacementStatus := placementv1beta1.ResourcePlacementStatus{ + ObservedResourceIndex: wantObservedResourceIndex, Conditions: []metav1.Condition{ { Type: string(placementv1beta1.ResourceScheduledConditionType), @@ -943,6 +1045,7 @@ func safeRolloutWorkloadCRPStatusUpdatedActual(wantSelectedResourceIdentifiers [ // For all the other connected member clusters rollout will be blocked. rolloutBlockedPlacementStatus := placementv1beta1.ResourcePlacementStatus{ + ObservedResourceIndex: wantObservedResourceIndex, Conditions: []metav1.Condition{ { Type: string(placementv1beta1.ResourceScheduledConditionType), @@ -978,7 +1081,7 @@ func safeRolloutWorkloadCRPStatusUpdatedActual(wantSelectedResourceIdentifiers [ }, } - wantStatus := placementv1beta1.ClusterResourcePlacementStatus{ + wantStatus := placementv1beta1.PlacementStatus{ Conditions: wantCRPConditions, PlacementStatuses: wantPlacementStatus, SelectedResources: wantSelectedResourceIdentifiers, diff --git a/test/e2e/enveloped_object_placement_test.go b/test/e2e/enveloped_object_placement_test.go index 98a26bc93..c11430cdb 100644 --- a/test/e2e/enveloped_object_placement_test.go +++ b/test/e2e/enveloped_object_placement_test.go @@ -78,7 +78,7 @@ var _ = Describe("placing wrapped resources using a CRP", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ { Group: "", @@ -247,7 +247,7 @@ var _ = Describe("placing wrapped resources using a CRP", func() { // Add a custom finalizer; this would allow us to better observe the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, @@ -278,7 +278,8 @@ var _ = Describe("placing wrapped resources using a CRP", func() { PlacementStatuses := make([]placementv1beta1.ResourcePlacementStatus, 0) for _, memberClusterName := range allMemberClusterNames { unavailableResourcePlacementStatus := placementv1beta1.ResourcePlacementStatus{ - ClusterName: memberClusterName, + ClusterName: memberClusterName, + ObservedResourceIndex: "0", Conditions: []metav1.Condition{ { Type: string(placementv1beta1.ResourceScheduledConditionType), @@ -345,7 +346,7 @@ var _ = Describe("placing wrapped resources using a CRP", func() { Namespace: workNamespace.Name, }, } - wantStatus := placementv1beta1.ClusterResourcePlacementStatus{ + wantStatus := placementv1beta1.PlacementStatus{ Conditions: crpNotAvailableConditions(1, false), PlacementStatuses: PlacementStatuses, SelectedResources: wantSelectedResources, @@ -440,7 +441,7 @@ var _ = Describe("placing wrapped resources using a CRP", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickFixedPlacementType, @@ -466,12 +467,13 @@ var _ = Describe("placing wrapped resources using a CRP", func() { return err } - wantStatus := placementv1beta1.ClusterResourcePlacementStatus{ + wantStatus := placementv1beta1.PlacementStatus{ Conditions: crpWorkSynchronizedFailedConditions(crp.Generation, false), PlacementStatuses: []placementv1beta1.ResourcePlacementStatus{ { - ClusterName: memberCluster1EastProdName, - Conditions: resourcePlacementWorkSynchronizedFailedConditions(crp.Generation, false), + ClusterName: memberCluster1EastProdName, + ObservedResourceIndex: "0", + Conditions: resourcePlacementWorkSynchronizedFailedConditions(crp.Generation, false), }, }, SelectedResources: []placementv1beta1.ResourceIdentifier{ @@ -559,7 +561,7 @@ var _ = Describe("Process objects with generate name", Ordered, func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickFixedPlacementType, @@ -585,11 +587,12 @@ var _ = Describe("Process objects with generate name", Ordered, func() { return err } - wantStatus := placementv1beta1.ClusterResourcePlacementStatus{ + wantStatus := placementv1beta1.PlacementStatus{ Conditions: crpAppliedFailedConditions(crp.Generation), PlacementStatuses: []placementv1beta1.ResourcePlacementStatus{ { - ClusterName: memberCluster1EastProdName, + ClusterName: memberCluster1EastProdName, + ObservedResourceIndex: "0", FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ diff --git a/test/e2e/join_and_leave_test.go b/test/e2e/join_and_leave_test.go index b0b9f3ac8..515e956d5 100644 --- a/test/e2e/join_and_leave_test.go +++ b/test/e2e/join_and_leave_test.go @@ -72,7 +72,7 @@ var _ = Describe("Test member cluster join and leave flow", Ordered, Serial, fun }, { Group: placementv1beta1.GroupVersion.Group, - Kind: "ClusterResourceEnvelope", + Kind: placementv1beta1.ClusterResourceEnvelopeKind, Version: placementv1beta1.GroupVersion.Version, Name: testClusterResourceEnvelope.Name, }, @@ -90,7 +90,7 @@ var _ = Describe("Test member cluster join and leave flow", Ordered, Serial, fun // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ { Group: "", @@ -212,12 +212,20 @@ var _ = Describe("Test member cluster join and leave flow", Ordered, Serial, fun checkIfAllMemberClustersHaveLeft() }) - It("should update CRP status to not placing any resources since all clusters are left", func() { + It("Should update CRP status to not placing any resources since all clusters are left", func() { // resourceQuota is enveloped so it's not trackable yet crpStatusUpdatedActual := customizedCRPStatusUpdatedActual(crpName, wantSelectedResources, nil, nil, "0", false) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") }) + It("Valdiating if the resources are still on all member clusters", func() { + for idx := range allMemberClusters { + memberCluster := allMemberClusters[idx] + workResourcesPlacedActual := checkAllResourcesPlacement(memberCluster) + Consistently(workResourcesPlacedActual, 3*consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster.ClusterName) + } + }) + It("Should be able to rejoin the cluster", func() { By("rejoin all the clusters without deleting the CRP") setAllMemberClustersToJoin() diff --git a/test/e2e/placement_apply_strategy_test.go b/test/e2e/placement_apply_strategy_test.go index 117a0226e..98f56a635 100644 --- a/test/e2e/placement_apply_strategy_test.go +++ b/test/e2e/placement_apply_strategy_test.go @@ -266,11 +266,12 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { workNamespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) appConfigMapName := fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()) - wantStatus := placementv1beta1.ClusterResourcePlacementStatus{ + wantStatus := placementv1beta1.PlacementStatus{ Conditions: crpAppliedFailedConditions(crp.Generation), PlacementStatuses: []placementv1beta1.ResourcePlacementStatus{ { - ClusterName: memberCluster1EastProdName, + ClusterName: memberCluster1EastProdName, + ObservedResourceIndex: "0", FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -289,12 +290,14 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { Conditions: resourcePlacementApplyFailedConditions(crp.Generation), }, { - ClusterName: memberCluster2EastCanaryName, - Conditions: resourcePlacementRolloutCompletedConditions(crp.Generation, true, false), + ClusterName: memberCluster2EastCanaryName, + ObservedResourceIndex: "0", + Conditions: resourcePlacementRolloutCompletedConditions(crp.Generation, true, false), }, { - ClusterName: memberCluster3WestProdName, - Conditions: resourcePlacementRolloutCompletedConditions(crp.Generation, true, false), + ClusterName: memberCluster3WestProdName, + ObservedResourceIndex: "0", + Conditions: resourcePlacementRolloutCompletedConditions(crp.Generation, true, false), }, }, SelectedResources: []placementv1beta1.ResourceIdentifier{ @@ -448,7 +451,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickFixedPlacementType, @@ -487,7 +490,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { Name: conflictedCRPName, // No need for the custom deletion blocker finalizer. }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickFixedPlacementType, @@ -510,14 +513,15 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { }) It("should update conflicted CRP status as expected", func() { - buildWantCRPStatus := func(crpGeneration int64) *placementv1beta1.ClusterResourcePlacementStatus { - return &placementv1beta1.ClusterResourcePlacementStatus{ + buildWantCRPStatus := func(crpGeneration int64) *placementv1beta1.PlacementStatus { + return &placementv1beta1.PlacementStatus{ Conditions: crpAppliedFailedConditions(crpGeneration), SelectedResources: workResourceIdentifiers(), PlacementStatuses: []placementv1beta1.ResourcePlacementStatus{ { - ClusterName: memberCluster1EastProdName, - Conditions: resourcePlacementApplyFailedConditions(crpGeneration), + ClusterName: memberCluster1EastProdName, + ObservedResourceIndex: "0", + Conditions: resourcePlacementApplyFailedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -642,7 +646,7 @@ var _ = Describe("SSA", Ordered, func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, @@ -758,7 +762,7 @@ var _ = Describe("switching apply strategies", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickFixedPlacementType, @@ -787,14 +791,15 @@ var _ = Describe("switching apply strategies", func() { }) It("should update CRP status as expected", func() { - buildWantCRPStatus := func(crpGeneration int64) *placementv1beta1.ClusterResourcePlacementStatus { - return &placementv1beta1.ClusterResourcePlacementStatus{ + buildWantCRPStatus := func(crpGeneration int64) *placementv1beta1.PlacementStatus { + return &placementv1beta1.PlacementStatus{ Conditions: crpAppliedFailedConditions(crpGeneration), SelectedResources: workResourceIdentifiers(), PlacementStatuses: []placementv1beta1.ResourcePlacementStatus{ { - ClusterName: memberCluster1EastProdName, - Conditions: resourcePlacementApplyFailedConditions(crpGeneration), + ClusterName: memberCluster1EastProdName, + ObservedResourceIndex: "0", + Conditions: resourcePlacementApplyFailedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -811,8 +816,9 @@ var _ = Describe("switching apply strategies", func() { }, }, { - ClusterName: memberCluster2EastCanaryName, - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + ClusterName: memberCluster2EastCanaryName, + ObservedResourceIndex: "0", + Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), }, }, ObservedResourceIndex: "0", @@ -852,14 +858,15 @@ var _ = Describe("switching apply strategies", func() { // The rollout of the previous change will be blocked due to the rollout // strategy configuration (1 member cluster has failed; 0 clusters are // allowed to become unavailable). - buildWantCRPStatus := func(crpGeneration int64) *placementv1beta1.ClusterResourcePlacementStatus { - return &placementv1beta1.ClusterResourcePlacementStatus{ + buildWantCRPStatus := func(crpGeneration int64) *placementv1beta1.PlacementStatus { + return &placementv1beta1.PlacementStatus{ Conditions: crpRolloutStuckConditions(crpGeneration), SelectedResources: workResourceIdentifiers(), PlacementStatuses: []placementv1beta1.ResourcePlacementStatus{ { - ClusterName: memberCluster1EastProdName, - Conditions: resourcePlacementApplyFailedConditions(crpGeneration), + ClusterName: memberCluster1EastProdName, + ObservedResourceIndex: "1", + Conditions: resourcePlacementApplyFailedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -876,8 +883,9 @@ var _ = Describe("switching apply strategies", func() { }, }, { - ClusterName: memberCluster2EastCanaryName, - Conditions: resourcePlacementSyncPendingConditions(crpGeneration), + ClusterName: memberCluster2EastCanaryName, + ObservedResourceIndex: "1", + Conditions: resourcePlacementSyncPendingConditions(crpGeneration), }, }, ObservedResourceIndex: "1", @@ -921,18 +929,20 @@ var _ = Describe("switching apply strategies", func() { // The rollout of the previous change will be blocked due to the rollout // strategy configuration (1 member cluster has failed; 0 clusters are // allowed to become unavailable). - buildWantCRPStatus := func(crpGeneration int64) *placementv1beta1.ClusterResourcePlacementStatus { - return &placementv1beta1.ClusterResourcePlacementStatus{ + buildWantCRPStatus := func(crpGeneration int64) *placementv1beta1.PlacementStatus { + return &placementv1beta1.PlacementStatus{ Conditions: crpDiffReportedConditions(crpGeneration, false), SelectedResources: workResourceIdentifiers(), PlacementStatuses: []placementv1beta1.ResourcePlacementStatus{ { - ClusterName: memberCluster1EastProdName, - Conditions: resourcePlacementDiffReportedConditions(crpGeneration), + ClusterName: memberCluster1EastProdName, + ObservedResourceIndex: "1", + Conditions: resourcePlacementDiffReportedConditions(crpGeneration), }, { - ClusterName: memberCluster2EastCanaryName, - Conditions: resourcePlacementDiffReportedConditions(crpGeneration), + ClusterName: memberCluster2EastCanaryName, + ObservedResourceIndex: "1", + Conditions: resourcePlacementDiffReportedConditions(crpGeneration), DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -1003,7 +1013,7 @@ var _ = Describe("switching apply strategies", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickFixedPlacementType, @@ -1030,14 +1040,15 @@ var _ = Describe("switching apply strategies", func() { }) It("should update CRP status as expected", func() { - buildWantCRPStatus := func(crpGeneration int64) *placementv1beta1.ClusterResourcePlacementStatus { - return &placementv1beta1.ClusterResourcePlacementStatus{ + buildWantCRPStatus := func(crpGeneration int64) *placementv1beta1.PlacementStatus { + return &placementv1beta1.PlacementStatus{ Conditions: crpDiffReportedConditions(crpGeneration, false), SelectedResources: workResourceIdentifiers(), PlacementStatuses: []placementv1beta1.ResourcePlacementStatus{ { - ClusterName: memberCluster1EastProdName, - Conditions: resourcePlacementDiffReportedConditions(crpGeneration), + ClusterName: memberCluster1EastProdName, + ObservedResourceIndex: "0", + Conditions: resourcePlacementDiffReportedConditions(crpGeneration), DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -1070,8 +1081,9 @@ var _ = Describe("switching apply strategies", func() { }, }, { - ClusterName: memberCluster2EastCanaryName, - Conditions: resourcePlacementDiffReportedConditions(crpGeneration), + ClusterName: memberCluster2EastCanaryName, + ObservedResourceIndex: "0", + Conditions: resourcePlacementDiffReportedConditions(crpGeneration), DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -1140,14 +1152,15 @@ var _ = Describe("switching apply strategies", func() { }) It("should update CRP status as expected", func() { - buildWantCRPStatus := func(crpGeneration int64) *placementv1beta1.ClusterResourcePlacementStatus { - return &placementv1beta1.ClusterResourcePlacementStatus{ + buildWantCRPStatus := func(crpGeneration int64) *placementv1beta1.PlacementStatus { + return &placementv1beta1.PlacementStatus{ Conditions: crpDiffReportedConditions(crpGeneration, false), SelectedResources: workResourceIdentifiers(), PlacementStatuses: []placementv1beta1.ResourcePlacementStatus{ { - ClusterName: memberCluster1EastProdName, - Conditions: resourcePlacementDiffReportedConditions(crpGeneration), + ClusterName: memberCluster1EastProdName, + ObservedResourceIndex: "1", + Conditions: resourcePlacementDiffReportedConditions(crpGeneration), DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -1166,8 +1179,9 @@ var _ = Describe("switching apply strategies", func() { }, }, { - ClusterName: memberCluster2EastCanaryName, - Conditions: resourcePlacementDiffReportedConditions(crpGeneration), + ClusterName: memberCluster2EastCanaryName, + ObservedResourceIndex: "1", + Conditions: resourcePlacementDiffReportedConditions(crpGeneration), DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -1240,18 +1254,20 @@ var _ = Describe("switching apply strategies", func() { }) It("should update CRP status as expected", func() { - buildWantCRPStatus := func(crpGeneration int64) *placementv1beta1.ClusterResourcePlacementStatus { - return &placementv1beta1.ClusterResourcePlacementStatus{ + buildWantCRPStatus := func(crpGeneration int64) *placementv1beta1.PlacementStatus { + return &placementv1beta1.PlacementStatus{ Conditions: crpRolloutCompletedConditions(crpGeneration, false), SelectedResources: workResourceIdentifiers(), PlacementStatuses: []placementv1beta1.ResourcePlacementStatus{ { - ClusterName: memberCluster1EastProdName, - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + ClusterName: memberCluster1EastProdName, + ObservedResourceIndex: "1", + Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), }, { - ClusterName: memberCluster2EastCanaryName, - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + ClusterName: memberCluster2EastCanaryName, + ObservedResourceIndex: "1", + Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), }, }, ObservedResourceIndex: "1", diff --git a/test/e2e/placement_drift_diff_test.go b/test/e2e/placement_drift_diff_test.go index 30603e80b..80bd936d8 100644 --- a/test/e2e/placement_drift_diff_test.go +++ b/test/e2e/placement_drift_diff_test.go @@ -178,14 +178,15 @@ var _ = Describe("take over existing resources", func() { }) It("should update CRP status as expected", func() { - buildWantCRPStatus := func(crpGeneration int64) *placementv1beta1.ClusterResourcePlacementStatus { - return &placementv1beta1.ClusterResourcePlacementStatus{ + buildWantCRPStatus := func(crpGeneration int64) *placementv1beta1.PlacementStatus { + return &placementv1beta1.PlacementStatus{ Conditions: crpAppliedFailedConditions(crpGeneration), SelectedResources: workResourceIdentifiers(), PlacementStatuses: []placementv1beta1.ResourcePlacementStatus{ { - ClusterName: memberCluster1EastProdName, - Conditions: resourcePlacementApplyFailedConditions(crpGeneration), + ClusterName: memberCluster1EastProdName, + ObservedResourceIndex: "0", + Conditions: resourcePlacementApplyFailedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -222,12 +223,14 @@ var _ = Describe("take over existing resources", func() { }, }, { - ClusterName: memberCluster2EastCanaryName, - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + ClusterName: memberCluster2EastCanaryName, + ObservedResourceIndex: "0", + Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), }, { - ClusterName: memberCluster3WestProdName, - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + ClusterName: memberCluster3WestProdName, + ObservedResourceIndex: "0", + Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), }, }, ObservedResourceIndex: "0", @@ -347,14 +350,15 @@ var _ = Describe("take over existing resources", func() { }) It("should update CRP status as expected", func() { - buildWantCRPStatus := func(crpGeneration int64) *placementv1beta1.ClusterResourcePlacementStatus { - return &placementv1beta1.ClusterResourcePlacementStatus{ + buildWantCRPStatus := func(crpGeneration int64) *placementv1beta1.PlacementStatus { + return &placementv1beta1.PlacementStatus{ Conditions: crpAppliedFailedConditions(crpGeneration), SelectedResources: workResourceIdentifiers(), PlacementStatuses: []placementv1beta1.ResourcePlacementStatus{ { - ClusterName: memberCluster1EastProdName, - Conditions: resourcePlacementApplyFailedConditions(crpGeneration), + ClusterName: memberCluster1EastProdName, + ObservedResourceIndex: "0", + Conditions: resourcePlacementApplyFailedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -418,12 +422,14 @@ var _ = Describe("take over existing resources", func() { }, }, { - ClusterName: memberCluster2EastCanaryName, - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + ClusterName: memberCluster2EastCanaryName, + ObservedResourceIndex: "0", + Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), }, { - ClusterName: memberCluster3WestProdName, - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + ClusterName: memberCluster3WestProdName, + ObservedResourceIndex: "0", + Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), }, }, ObservedResourceIndex: "0", @@ -549,15 +555,16 @@ var _ = Describe("detect drifts on placed resources", func() { }) It("should update CRP status as expected", func() { - buildWantCRPStatus := func(crpGeneration int64) *placementv1beta1.ClusterResourcePlacementStatus { - return &placementv1beta1.ClusterResourcePlacementStatus{ + buildWantCRPStatus := func(crpGeneration int64) *placementv1beta1.PlacementStatus { + return &placementv1beta1.PlacementStatus{ Conditions: crpRolloutCompletedConditions(crpGeneration, false), SelectedResources: workResourceIdentifiers(), PlacementStatuses: []placementv1beta1.ResourcePlacementStatus{ { - ClusterName: memberCluster1EastProdName, - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), - FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, + ClusterName: memberCluster1EastProdName, + ObservedResourceIndex: "0", + Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, DriftedPlacements: []placementv1beta1.DriftedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -576,12 +583,14 @@ var _ = Describe("detect drifts on placed resources", func() { }, }, { - ClusterName: memberCluster2EastCanaryName, - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + ClusterName: memberCluster2EastCanaryName, + ObservedResourceIndex: "0", + Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), }, { - ClusterName: memberCluster3WestProdName, - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + ClusterName: memberCluster3WestProdName, + ObservedResourceIndex: "0", + Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), }, }, ObservedResourceIndex: "0", @@ -685,14 +694,15 @@ var _ = Describe("detect drifts on placed resources", func() { }) It("should update CRP status as expected", func() { - buildWantCRPStatus := func(crpGeneration int64) *placementv1beta1.ClusterResourcePlacementStatus { - return &placementv1beta1.ClusterResourcePlacementStatus{ + buildWantCRPStatus := func(crpGeneration int64) *placementv1beta1.PlacementStatus { + return &placementv1beta1.PlacementStatus{ Conditions: crpAppliedFailedConditions(crpGeneration), SelectedResources: workResourceIdentifiers(), PlacementStatuses: []placementv1beta1.ResourcePlacementStatus{ { - ClusterName: memberCluster1EastProdName, - Conditions: resourcePlacementApplyFailedConditions(crpGeneration), + ClusterName: memberCluster1EastProdName, + ObservedResourceIndex: "0", + Conditions: resourcePlacementApplyFailedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -729,12 +739,14 @@ var _ = Describe("detect drifts on placed resources", func() { }, }, { - ClusterName: memberCluster2EastCanaryName, - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + ClusterName: memberCluster2EastCanaryName, + ObservedResourceIndex: "0", + Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), }, { - ClusterName: memberCluster3WestProdName, - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + ClusterName: memberCluster3WestProdName, + ObservedResourceIndex: "0", + Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), }, }, ObservedResourceIndex: "0", @@ -866,14 +878,15 @@ var _ = Describe("detect drifts on placed resources", func() { }) It("should update CRP status as expected", func() { - buildWantCRPStatus := func(crpGeneration int64) *placementv1beta1.ClusterResourcePlacementStatus { - return &placementv1beta1.ClusterResourcePlacementStatus{ + buildWantCRPStatus := func(crpGeneration int64) *placementv1beta1.PlacementStatus { + return &placementv1beta1.PlacementStatus{ Conditions: crpAppliedFailedConditions(crpGeneration), SelectedResources: workResourceIdentifiers(), PlacementStatuses: []placementv1beta1.ResourcePlacementStatus{ { - ClusterName: memberCluster1EastProdName, - Conditions: resourcePlacementApplyFailedConditions(crpGeneration), + ClusterName: memberCluster1EastProdName, + ObservedResourceIndex: "0", + Conditions: resourcePlacementApplyFailedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -937,12 +950,14 @@ var _ = Describe("detect drifts on placed resources", func() { }, }, { - ClusterName: memberCluster2EastCanaryName, - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + ClusterName: memberCluster2EastCanaryName, + ObservedResourceIndex: "0", + Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), }, { - ClusterName: memberCluster3WestProdName, - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + ClusterName: memberCluster3WestProdName, + ObservedResourceIndex: "0", + Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), }, }, ObservedResourceIndex: "0", @@ -1068,15 +1083,16 @@ var _ = Describe("report diff mode", func() { }) It("should update CRP status as expected", func() { - buildWantCRPStatus := func(crpGeneration int64) *placementv1beta1.ClusterResourcePlacementStatus { - return &placementv1beta1.ClusterResourcePlacementStatus{ + buildWantCRPStatus := func(crpGeneration int64) *placementv1beta1.PlacementStatus { + return &placementv1beta1.PlacementStatus{ Conditions: crpDiffReportedConditions(crpGeneration, false), SelectedResources: workResourceIdentifiers(), PlacementStatuses: []placementv1beta1.ResourcePlacementStatus{ { - ClusterName: memberCluster1EastProdName, - Conditions: resourcePlacementDiffReportedConditions(crpGeneration), - FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, + ClusterName: memberCluster1EastProdName, + ObservedResourceIndex: "0", + Conditions: resourcePlacementDiffReportedConditions(crpGeneration), + FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -1111,9 +1127,10 @@ var _ = Describe("report diff mode", func() { }, }, { - ClusterName: memberCluster2EastCanaryName, - Conditions: resourcePlacementDiffReportedConditions(crpGeneration), - FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, + ClusterName: memberCluster2EastCanaryName, + ObservedResourceIndex: "0", + Conditions: resourcePlacementDiffReportedConditions(crpGeneration), + FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -1145,9 +1162,10 @@ var _ = Describe("report diff mode", func() { }, }, { - ClusterName: memberCluster3WestProdName, - Conditions: resourcePlacementDiffReportedConditions(crpGeneration), - FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, + ClusterName: memberCluster3WestProdName, + ObservedResourceIndex: "0", + Conditions: resourcePlacementDiffReportedConditions(crpGeneration), + FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -1274,21 +1292,23 @@ var _ = Describe("report diff mode", func() { }) It("should update CRP status as expected", func() { - buildWantCRPStatus := func(crpGeneration int64) *placementv1beta1.ClusterResourcePlacementStatus { - return &placementv1beta1.ClusterResourcePlacementStatus{ + buildWantCRPStatus := func(crpGeneration int64) *placementv1beta1.PlacementStatus { + return &placementv1beta1.PlacementStatus{ Conditions: crpDiffReportedConditions(crpGeneration, false), SelectedResources: workResourceIdentifiers(), PlacementStatuses: []placementv1beta1.ResourcePlacementStatus{ { - ClusterName: memberCluster1EastProdName, - Conditions: resourcePlacementDiffReportedConditions(crpGeneration), - FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, - DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{}, + ClusterName: memberCluster1EastProdName, + ObservedResourceIndex: "0", + Conditions: resourcePlacementDiffReportedConditions(crpGeneration), + FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, + DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{}, }, { - ClusterName: memberCluster2EastCanaryName, - Conditions: resourcePlacementDiffReportedConditions(crpGeneration), - FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, + ClusterName: memberCluster2EastCanaryName, + ObservedResourceIndex: "0", + Conditions: resourcePlacementDiffReportedConditions(crpGeneration), + FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -1320,9 +1340,10 @@ var _ = Describe("report diff mode", func() { }, }, { - ClusterName: memberCluster3WestProdName, - Conditions: resourcePlacementDiffReportedConditions(crpGeneration), - FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, + ClusterName: memberCluster3WestProdName, + ObservedResourceIndex: "0", + Conditions: resourcePlacementDiffReportedConditions(crpGeneration), + FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -1423,7 +1444,7 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, @@ -1475,8 +1496,8 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { }) It("should update CRP status as expected", func() { - buildWantCRPStatus := func(crpGeneration int64) *placementv1beta1.ClusterResourcePlacementStatus { - return &placementv1beta1.ClusterResourcePlacementStatus{ + buildWantCRPStatus := func(crpGeneration int64) *placementv1beta1.PlacementStatus { + return &placementv1beta1.PlacementStatus{ Conditions: crpAppliedFailedConditions(crpGeneration), SelectedResources: []placementv1beta1.ResourceIdentifier{ { @@ -1494,8 +1515,9 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { }, PlacementStatuses: []placementv1beta1.ResourcePlacementStatus{ { - ClusterName: memberCluster1EastProdName, - Conditions: resourcePlacementApplyFailedConditions(crpGeneration), + ClusterName: memberCluster1EastProdName, + ObservedResourceIndex: "0", + Conditions: resourcePlacementApplyFailedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -1534,8 +1556,9 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { }, }, { - ClusterName: memberCluster2EastCanaryName, - Conditions: resourcePlacementApplyFailedConditions(crpGeneration), + ClusterName: memberCluster2EastCanaryName, + ObservedResourceIndex: "0", + Conditions: resourcePlacementApplyFailedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -1574,8 +1597,9 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { }, }, { - ClusterName: memberCluster3WestProdName, - Conditions: resourcePlacementApplyFailedConditions(crpGeneration), + ClusterName: memberCluster3WestProdName, + ObservedResourceIndex: "0", + Conditions: resourcePlacementApplyFailedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -1719,8 +1743,8 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { var refreshedLastDeployDriftObservedTimeOnCluster2 metav1.Time var refreshedFirstDeployDriftObservedTimeOnCluster2 metav1.Time - buildWantCRPStatus := func(crpGeneration int64) *placementv1beta1.ClusterResourcePlacementStatus { - return &placementv1beta1.ClusterResourcePlacementStatus{ + buildWantCRPStatus := func(crpGeneration int64) *placementv1beta1.PlacementStatus { + return &placementv1beta1.PlacementStatus{ Conditions: crpAppliedFailedConditions(crpGeneration), SelectedResources: []placementv1beta1.ResourceIdentifier{ { @@ -1738,8 +1762,9 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { }, PlacementStatuses: []placementv1beta1.ResourcePlacementStatus{ { - ClusterName: memberCluster1EastProdName, - Conditions: resourcePlacementApplyFailedConditions(crpGeneration), + ClusterName: memberCluster1EastProdName, + ObservedResourceIndex: "0", + Conditions: resourcePlacementApplyFailedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -1778,8 +1803,9 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { }, }, { - ClusterName: memberCluster2EastCanaryName, - Conditions: resourcePlacementApplyFailedConditions(crpGeneration), + ClusterName: memberCluster2EastCanaryName, + ObservedResourceIndex: "0", + Conditions: resourcePlacementApplyFailedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -1818,8 +1844,9 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { }, }, { - ClusterName: memberCluster3WestProdName, - Conditions: resourcePlacementApplyFailedConditions(crpGeneration), + ClusterName: memberCluster3WestProdName, + ObservedResourceIndex: "0", + Conditions: resourcePlacementApplyFailedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -1953,8 +1980,8 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { }) It("should update CRP status as expected", func() { - buildWantCRPStatus := func(crpGeneration int64) *placementv1beta1.ClusterResourcePlacementStatus { - return &placementv1beta1.ClusterResourcePlacementStatus{ + buildWantCRPStatus := func(crpGeneration int64) *placementv1beta1.PlacementStatus { + return &placementv1beta1.PlacementStatus{ Conditions: crpAppliedFailedConditions(crpGeneration), SelectedResources: []placementv1beta1.ResourceIdentifier{ { @@ -1972,20 +1999,23 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { }, PlacementStatuses: []placementv1beta1.ResourcePlacementStatus{ { - ClusterName: memberCluster1EastProdName, - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), - FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, - DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{}, + ClusterName: memberCluster1EastProdName, + ObservedResourceIndex: "0", + Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, + DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{}, }, { - ClusterName: memberCluster2EastCanaryName, - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), - FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, - DriftedPlacements: []placementv1beta1.DriftedResourcePlacement{}, + ClusterName: memberCluster2EastCanaryName, + ObservedResourceIndex: "0", + Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, + DriftedPlacements: []placementv1beta1.DriftedResourcePlacement{}, }, { - ClusterName: memberCluster3WestProdName, - Conditions: resourcePlacementApplyFailedConditions(crpGeneration), + ClusterName: memberCluster3WestProdName, + ObservedResourceIndex: "0", + Conditions: resourcePlacementApplyFailedConditions(crpGeneration), FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -2103,9 +2133,10 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { }) It("should update CRP status as expected", func() { - buildWantCRPStatus := func(crpGeneration int64) *placementv1beta1.ClusterResourcePlacementStatus { - return &placementv1beta1.ClusterResourcePlacementStatus{ - Conditions: crpRolloutCompletedConditions(crpGeneration, false), + buildWantCRPStatus := func(crpGeneration int64, observedResourceIndex string) *placementv1beta1.PlacementStatus { + return &placementv1beta1.PlacementStatus{ + ObservedResourceIndex: observedResourceIndex, + Conditions: crpRolloutCompletedConditions(crpGeneration, false), SelectedResources: []placementv1beta1.ResourceIdentifier{ { Kind: "Namespace", @@ -2122,23 +2153,26 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { }, PlacementStatuses: []placementv1beta1.ResourcePlacementStatus{ { - ClusterName: memberCluster1EastProdName, - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), - FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, - DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{}, + ClusterName: memberCluster1EastProdName, + ObservedResourceIndex: observedResourceIndex, + Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, + DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{}, }, { - ClusterName: memberCluster2EastCanaryName, - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), - FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, - DriftedPlacements: []placementv1beta1.DriftedResourcePlacement{}, + ClusterName: memberCluster2EastCanaryName, + ObservedResourceIndex: observedResourceIndex, + Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, + DriftedPlacements: []placementv1beta1.DriftedResourcePlacement{}, }, { - ClusterName: memberCluster3WestProdName, - Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), - FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, - DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{}, - DriftedPlacements: []placementv1beta1.DriftedResourcePlacement{}, + ClusterName: memberCluster3WestProdName, + ObservedResourceIndex: observedResourceIndex, + Conditions: resourcePlacementRolloutCompletedConditions(crpGeneration, true, false), + FailedPlacements: []placementv1beta1.FailedResourcePlacement{}, + DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{}, + DriftedPlacements: []placementv1beta1.DriftedResourcePlacement{}, }, }, } @@ -2149,12 +2183,11 @@ var _ = Describe("mixed diff and drift reportings", Ordered, func() { if err := hubClient.Get(ctx, types.NamespacedName{Name: crpName}, crp); err != nil { return err } - wantCRPStatus := buildWantCRPStatus(crp.Generation) // There is no guarantee on how many resource snapshots Fleet will create based // on the previous round of changes; consequently the test spec here drops the field // for comparison. - wantCRPStatus.ObservedResourceIndex = crp.Status.ObservedResourceIndex + wantCRPStatus := buildWantCRPStatus(crp.Generation, crp.Status.ObservedResourceIndex) if diff := cmp.Diff(crp.Status, *wantCRPStatus, crpStatusCmpOptions...); diff != "" { return fmt.Errorf("CRP status diff (-got, +want): %s", diff) diff --git a/test/e2e/placement_eviction_test.go b/test/e2e/placement_eviction_test.go index 74cc3010a..34d182251 100644 --- a/test/e2e/placement_eviction_test.go +++ b/test/e2e/placement_eviction_test.go @@ -544,7 +544,7 @@ var _ = Describe("ClusterResourcePlacement eviction of bound binding - PickN CRP // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(int32(len(allMemberClusterNames))), @@ -636,7 +636,7 @@ var _ = Describe("ClusterResourcePlacement eviction of bound binding - PickN CRP // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(int32(len(allMemberClusterNames))), @@ -742,7 +742,7 @@ var _ = Describe("ClusterResourcePlacement eviction of bound binding - PickN CRP // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(int32(len(allMemberClusterNames))), @@ -840,7 +840,7 @@ var _ = Describe("ClusterResourcePlacement eviction of bound binding - PickN CRP // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(int32(len(allMemberClusterNames))), @@ -932,7 +932,7 @@ var _ = Describe("ClusterResourcePlacement eviction of bound binding - PickN CRP // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(int32(len(allMemberClusterNames))), @@ -1024,7 +1024,7 @@ var _ = Describe("ClusterResourcePlacement eviction of bound binding - PickN CRP // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(int32(len(allMemberClusterNames))), @@ -1130,7 +1130,7 @@ var _ = Describe("ClusterResourcePlacement eviction of bound binding - PickN CRP // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(int32(len(allMemberClusterNames))), @@ -1228,7 +1228,7 @@ var _ = Describe("ClusterResourcePlacement eviction of bound binding - PickN CRP // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(int32(len(allMemberClusterNames))), diff --git a/test/e2e/placement_negative_cases_test.go b/test/e2e/placement_negative_cases_test.go index 7e028ef27..bfd7e46bc 100644 --- a/test/e2e/placement_negative_cases_test.go +++ b/test/e2e/placement_negative_cases_test.go @@ -96,7 +96,7 @@ var _ = Describe("handling errors and failures gracefully", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickFixedPlacementType, @@ -122,11 +122,12 @@ var _ = Describe("handling errors and failures gracefully", func() { return err } - wantStatus := placementv1beta1.ClusterResourcePlacementStatus{ + wantStatus := placementv1beta1.PlacementStatus{ Conditions: crpAppliedFailedConditions(crp.Generation), PlacementStatuses: []placementv1beta1.ResourcePlacementStatus{ { - ClusterName: memberCluster1EastProdName, + ClusterName: memberCluster1EastProdName, + ObservedResourceIndex: "0", FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ diff --git a/test/e2e/placement_pickall_test.go b/test/e2e/placement_pickall_test.go index b682f5f75..fffc403d7 100644 --- a/test/e2e/placement_pickall_test.go +++ b/test/e2e/placement_pickall_test.go @@ -46,7 +46,7 @@ var _ = Describe("placing resources using a CRP with no placement policy specifi // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, @@ -87,7 +87,7 @@ var _ = Describe("placing resources using a CRP of PickAll placement type", func // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, @@ -131,7 +131,7 @@ var _ = Describe("placing resources using a CRP of PickAll placement type", func // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, @@ -201,7 +201,7 @@ var _ = Describe("placing resources using a CRP of PickAll placement type", func // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, @@ -316,7 +316,7 @@ var _ = Describe("placing resources using a CRP of PickAll placement type", func // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, @@ -387,7 +387,7 @@ var _ = Describe("placing resources using a CRP of PickAll placement type", func // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, @@ -461,7 +461,7 @@ var _ = Describe("placing resources using a CRP of PickAll placement type", func // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, @@ -614,7 +614,7 @@ var _ = Describe("placing resources using a CRP of PickAll placement type", func // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, @@ -689,7 +689,7 @@ var _ = Describe("placing resources using a CRP of PickAll placement type", func // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, @@ -765,7 +765,7 @@ var _ = Describe("placing resources using a CRP of PickAll placement type", func // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, @@ -944,7 +944,7 @@ var _ = Describe("placing resources using a CRP of PickAll placement type", func // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, diff --git a/test/e2e/placement_pickfixed_test.go b/test/e2e/placement_pickfixed_test.go index 32225b435..9adba284b 100644 --- a/test/e2e/placement_pickfixed_test.go +++ b/test/e2e/placement_pickfixed_test.go @@ -45,7 +45,7 @@ var _ = Describe("placing resources using a CRP of PickFixed placement type", fu // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, @@ -94,7 +94,7 @@ var _ = Describe("placing resources using a CRP of PickFixed placement type", fu // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, @@ -160,7 +160,7 @@ var _ = Describe("placing resources using a CRP of PickFixed placement type", fu // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, diff --git a/test/e2e/placement_pickn_test.go b/test/e2e/placement_pickn_test.go index cffc3e9ae..f35dd1e5e 100644 --- a/test/e2e/placement_pickn_test.go +++ b/test/e2e/placement_pickn_test.go @@ -50,7 +50,7 @@ var _ = Describe("placing resources using a CRP of PickN placement", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, @@ -97,7 +97,7 @@ var _ = Describe("placing resources using a CRP of PickN placement", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, @@ -168,7 +168,7 @@ var _ = Describe("placing resources using a CRP of PickN placement", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, @@ -236,7 +236,7 @@ var _ = Describe("placing resources using a CRP of PickN placement", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, @@ -314,7 +314,7 @@ var _ = Describe("placing resources using a CRP of PickN placement", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, @@ -433,7 +433,7 @@ var _ = Describe("placing resources using a CRP of PickN placement", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, @@ -507,7 +507,7 @@ var _ = Describe("placing resources using a CRP of PickN placement", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, @@ -578,7 +578,7 @@ var _ = Describe("placing resources using a CRP of PickN placement", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, @@ -653,7 +653,7 @@ var _ = Describe("placing resources using a CRP of PickN placement", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, @@ -737,7 +737,7 @@ var _ = Describe("placing resources using a CRP of PickN placement", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, @@ -831,7 +831,7 @@ var _ = Describe("placing resources using a CRP of PickN placement", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, diff --git a/test/e2e/placement_selecting_resources_test.go b/test/e2e/placement_selecting_resources_test.go index 1b21ff3b1..b2bced397 100644 --- a/test/e2e/placement_selecting_resources_test.go +++ b/test/e2e/placement_selecting_resources_test.go @@ -60,7 +60,7 @@ var _ = Describe("creating CRP and selecting resources by name", Ordered, func() // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), }, } @@ -113,7 +113,7 @@ var _ = Describe("creating CRP and selecting resources by label", Ordered, func( // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ { Group: "", @@ -177,7 +177,7 @@ var _ = Describe("validating CRP when cluster-scoped resources become selected a // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ { Group: "", @@ -263,7 +263,7 @@ var _ = Describe("validating CRP when cluster-scoped resources become unselected // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ { Group: "", @@ -348,7 +348,7 @@ var _ = Describe("validating CRP when cluster-scoped and namespace-scoped resour // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Strategy: placementv1beta1.RolloutStrategy{ RollingUpdate: &placementv1beta1.RollingUpdateConfig{ @@ -441,7 +441,7 @@ var _ = Describe("validating CRP when adding resources in a matching namespace", // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Strategy: placementv1beta1.RolloutStrategy{ RollingUpdate: &placementv1beta1.RollingUpdateConfig{ @@ -528,7 +528,7 @@ var _ = Describe("validating CRP when deleting resources in a matching namespace // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Strategy: placementv1beta1.RolloutStrategy{ RollingUpdate: &placementv1beta1.RollingUpdateConfig{ @@ -619,7 +619,7 @@ var _ = Describe("validating CRP when selecting a reserved resource", Ordered, f // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ { Group: "", @@ -651,7 +651,7 @@ var _ = Describe("validating CRP when selecting a reserved resource", Ordered, f return err } - wantStatus := placementv1beta1.ClusterResourcePlacementStatus{ + wantStatus := placementv1beta1.PlacementStatus{ Conditions: []metav1.Condition{ { Type: string(placementv1beta1.ClusterResourcePlacementScheduledConditionType), @@ -701,7 +701,7 @@ var _ = Describe("When creating a pickN ClusterResourcePlacement with duplicated // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ { Group: corev1.GroupName, @@ -733,7 +733,7 @@ var _ = Describe("When creating a pickN ClusterResourcePlacement with duplicated if err := hubClient.Get(ctx, types.NamespacedName{Name: crpName}, gotCRP); err != nil { return err } - wantStatus := placementv1beta1.ClusterResourcePlacementStatus{ + wantStatus := placementv1beta1.PlacementStatus{ Conditions: []metav1.Condition{ { Status: metav1.ConditionFalse, @@ -799,7 +799,7 @@ var _ = Describe("validating CRP when failed to apply resources", Ordered, func( // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), }, } @@ -825,11 +825,12 @@ var _ = Describe("validating CRP when failed to apply resources", Ordered, func( workNamespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) appConfigMapName := fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()) - wantStatus := placementv1beta1.ClusterResourcePlacementStatus{ + wantStatus := placementv1beta1.PlacementStatus{ Conditions: crpAppliedFailedConditions(crp.Generation), PlacementStatuses: []placementv1beta1.ResourcePlacementStatus{ { - ClusterName: memberCluster1EastProdName, + ClusterName: memberCluster1EastProdName, + ObservedResourceIndex: "0", FailedPlacements: []placementv1beta1.FailedResourcePlacement{ { ResourceIdentifier: placementv1beta1.ResourceIdentifier{ @@ -848,12 +849,14 @@ var _ = Describe("validating CRP when failed to apply resources", Ordered, func( Conditions: resourcePlacementApplyFailedConditions(crp.Generation), }, { - ClusterName: memberCluster2EastCanaryName, - Conditions: resourcePlacementRolloutCompletedConditions(crp.Generation, true, false), + ClusterName: memberCluster2EastCanaryName, + ObservedResourceIndex: "0", + Conditions: resourcePlacementRolloutCompletedConditions(crp.Generation, true, false), }, { - ClusterName: memberCluster3WestProdName, - Conditions: resourcePlacementRolloutCompletedConditions(crp.Generation, true, false), + ClusterName: memberCluster3WestProdName, + ObservedResourceIndex: "0", + Conditions: resourcePlacementRolloutCompletedConditions(crp.Generation, true, false), }, }, SelectedResources: []placementv1beta1.ResourceIdentifier{ @@ -937,7 +940,7 @@ var _ = Describe("validating CRP when placing cluster scope resource (other than // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ { Group: "rbac.authorization.k8s.io", @@ -1037,7 +1040,7 @@ var _ = Describe("validating CRP revision history allowing single revision when // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ { Group: "", @@ -1131,7 +1134,7 @@ var _ = Describe("validating CRP revision history allowing multiple revisions wh // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ { Group: "", @@ -1224,7 +1227,7 @@ var _ = Describe("validating CRP when selected resources cross the 1MB limit", O // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickFixedPlacementType, ClusterNames: []string{memberCluster1EastProdName, memberCluster2EastCanaryName}, @@ -1363,7 +1366,7 @@ var _ = Describe("creating CRP and checking selected resources order", Ordered, Name: crpName, Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ { Group: "", diff --git a/test/e2e/rollout_test.go b/test/e2e/rollout_test.go index 727045e6c..b90f7de34 100644 --- a/test/e2e/rollout_test.go +++ b/test/e2e/rollout_test.go @@ -91,7 +91,7 @@ var _ = Describe("placing wrapped resources using a CRP", Ordered, func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, @@ -1164,7 +1164,7 @@ func buildCRPForSafeRollout() *placementv1beta1.ClusterResourcePlacement { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, diff --git a/test/e2e/scheduler_watchers_test.go b/test/e2e/scheduler_watchers_test.go index d4334caa5..7033c0c92 100644 --- a/test/e2e/scheduler_watchers_test.go +++ b/test/e2e/scheduler_watchers_test.go @@ -76,7 +76,7 @@ var _ = Describe("responding to specific member cluster changes", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, @@ -145,7 +145,7 @@ var _ = Describe("responding to specific member cluster changes", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, @@ -228,7 +228,7 @@ var _ = Describe("responding to specific member cluster changes", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, @@ -338,7 +338,7 @@ var _ = Describe("responding to specific member cluster changes", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, @@ -407,7 +407,7 @@ var _ = Describe("responding to specific member cluster changes", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, @@ -496,7 +496,7 @@ var _ = Describe("responding to specific member cluster changes", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, @@ -561,7 +561,7 @@ var _ = Describe("responding to specific member cluster changes", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, @@ -589,6 +589,17 @@ var _ = Describe("responding to specific member cluster changes", func() { Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update CRP status as expected") }) + It("can mark the cluster as leaving", func() { + markMemberClusterAsLeaving(fakeClusterName1ForWatcherTests) + }) + + It("should not remove the leaving cluster from the scheduling decision", func() { + targetClusterNames := allMemberClusterNames + targetClusterNames = append(targetClusterNames, fakeClusterName1ForWatcherTests) + crpStatusUpdatedActual := crpStatusUpdatedActual(workResourceIdentifiers(), targetClusterNames, nil, "0") + Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + It("can mark the cluster as left", func() { markMemberClusterAsLeft(fakeClusterName1ForWatcherTests) }) @@ -620,7 +631,7 @@ var _ = Describe("responding to specific member cluster changes", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, @@ -725,7 +736,7 @@ var _ = Describe("responding to specific member cluster changes", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickFixedPlacementType, @@ -792,7 +803,7 @@ var _ = Describe("responding to specific member cluster changes", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickFixedPlacementType, @@ -856,7 +867,7 @@ var _ = Describe("responding to specific member cluster changes", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickFixedPlacementType, @@ -921,7 +932,7 @@ var _ = Describe("responding to specific member cluster changes", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickFixedPlacementType, @@ -980,7 +991,7 @@ var _ = Describe("responding to specific member cluster changes", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, @@ -1051,7 +1062,7 @@ var _ = Describe("responding to specific member cluster changes", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, @@ -1140,7 +1151,7 @@ var _ = Describe("responding to specific member cluster changes", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, @@ -1214,7 +1225,7 @@ var _ = Describe("responding to specific member cluster changes", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, @@ -1298,7 +1309,7 @@ var _ = Describe("responding to specific member cluster changes", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, @@ -1417,7 +1428,7 @@ var _ = Describe("responding to specific member cluster changes", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, @@ -1486,7 +1497,7 @@ var _ = Describe("responding to specific member cluster changes", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, @@ -1515,6 +1526,17 @@ var _ = Describe("responding to specific member cluster changes", func() { Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update CRP status as expected") }) + It("can mark the cluster as leaving", func() { + markMemberClusterAsLeaving(fakeClusterName1ForWatcherTests) + }) + + It("should not remove the leaving cluster from the scheduling decision", func() { + targetClusterNames := allMemberClusterNames + targetClusterNames = append(targetClusterNames, fakeClusterName1ForWatcherTests) + crpStatusUpdatedActual := crpStatusUpdatedActual(workResourceIdentifiers(), targetClusterNames, nil, "0") + Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + It("can mark the cluster as left", func() { markMemberClusterAsLeft(fakeClusterName1ForWatcherTests) }) @@ -1551,7 +1573,7 @@ var _ = Describe("responding to specific member cluster changes", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, @@ -1638,7 +1660,7 @@ var _ = Describe("responding to specific member cluster changes", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, @@ -1719,7 +1741,7 @@ var _ = Describe("responding to specific member cluster changes", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, @@ -1801,7 +1823,7 @@ var _ = Describe("responding to specific member cluster changes", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, diff --git a/test/e2e/taint_toleration_test.go b/test/e2e/taint_toleration_test.go index 7024b91f3..523fb4ad3 100644 --- a/test/e2e/taint_toleration_test.go +++ b/test/e2e/taint_toleration_test.go @@ -45,7 +45,7 @@ var _ = Describe("placing resource using a cluster resource placement with pickF // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickFixedPlacementType, @@ -95,7 +95,7 @@ var _ = Describe("placing resources using a cluster resource placement with no p // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, @@ -168,7 +168,7 @@ var _ = Describe("placing resources using a cluster resource placement with no p // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, @@ -241,7 +241,7 @@ var _ = Describe("picking N clusters with affinities and topology spread constra // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, @@ -327,7 +327,7 @@ var _ = Describe("picking all clusters using pickAll placement policy, add taint // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, }, diff --git a/test/e2e/updaterun_test.go b/test/e2e/updaterun_test.go index 678eff88f..c9a215326 100644 --- a/test/e2e/updaterun_test.go +++ b/test/e2e/updaterun_test.go @@ -67,7 +67,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.ExternalRolloutStrategyType, @@ -111,6 +111,11 @@ var _ = Describe("test CRP rollout with staged update run", func() { validateLatestPolicySnapshot(crpName, policySnapshotIndex1st) }) + It("Should update crp status as pending rollout", func() { + crpStatusUpdatedActual := crpStatusWithExternalStrategyActual(nil, "", false, allMemberClusterNames, []string{"", "", ""}, []bool{false, false, false}, nil, nil) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) + }) + It("Should create a staged update run successfully", func() { createStagedUpdateRunSucceed(updateRunNames[0], crpName, resourceSnapshotIndex1st, strategyName) }) @@ -118,6 +123,11 @@ var _ = Describe("test CRP rollout with staged update run", func() { It("Should rollout resources to member-cluster-2 only and completes stage canary", func() { checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun([]*framework.Cluster{allMemberClusters[1]}) checkIfRemovedWorkResourcesFromMemberClustersConsistently([]*framework.Cluster{allMemberClusters[0], allMemberClusters[2]}) + + By("Validating crp status as member-cluster-2 updated") + crpStatusUpdatedActual := crpStatusWithExternalStrategyActual(nil, "", false, allMemberClusterNames, []string{"", resourceSnapshotIndex1st, ""}, []bool{false, true, false}, nil, nil) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) + validateAndApproveClusterApprovalRequests(updateRunNames[0], envCanary) }) @@ -131,6 +141,12 @@ var _ = Describe("test CRP rollout with staged update run", func() { checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun(allMemberClusters) }) + It("Should update crp status as completed", func() { + crpStatusUpdatedActual := crpStatusWithExternalStrategyActual(workResourceIdentifiers(), resourceSnapshotIndex1st, true, allMemberClusterNames, + []string{resourceSnapshotIndex1st, resourceSnapshotIndex1st, resourceSnapshotIndex1st}, []bool{true, true, true}, nil, nil) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) + }) + It("Should update the configmap successfully on hub but not change member clusters", func() { Eventually(func() error { return hubClient.Update(ctx, &newConfigMap) }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update configmap on hub") @@ -140,6 +156,12 @@ var _ = Describe("test CRP rollout with staged update run", func() { } }) + It("Should not update crp status, should still be completed", func() { + crpStatusUpdatedActual := crpStatusWithExternalStrategyActual(workResourceIdentifiers(), resourceSnapshotIndex1st, true, allMemberClusterNames, + []string{resourceSnapshotIndex1st, resourceSnapshotIndex1st, resourceSnapshotIndex1st}, []bool{true, true, true}, nil, nil) + Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to keep CRP %s status as expected", crpName) + }) + It("Should create a new latest resource snapshot", func() { crsList := &placementv1beta1.ClusterResourceSnapshotList{} Eventually(func() error { @@ -169,6 +191,12 @@ var _ = Describe("test CRP rollout with staged update run", func() { configMapActual := configMapPlacedOnClusterActual(cluster, &oldConfigMap) Consistently(configMapActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to keep configmap %s data as expected", newConfigMap.Name) } + + By("Validating crp status as member-cluster-2 updated") + crpStatusUpdatedActual := crpStatusWithExternalStrategyActual(nil, "", false, allMemberClusterNames, + []string{resourceSnapshotIndex1st, resourceSnapshotIndex2nd, resourceSnapshotIndex1st}, []bool{true, true, true}, nil, nil) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) + validateAndApproveClusterApprovalRequests(updateRunNames[1], envCanary) }) @@ -182,6 +210,12 @@ var _ = Describe("test CRP rollout with staged update run", func() { } }) + It("Should update crp status as completed", func() { + crpStatusUpdatedActual := crpStatusWithExternalStrategyActual(workResourceIdentifiers(), resourceSnapshotIndex2nd, true, allMemberClusterNames, + []string{resourceSnapshotIndex2nd, resourceSnapshotIndex2nd, resourceSnapshotIndex2nd}, []bool{true, true, true}, nil, nil) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) + }) + It("Should create a new staged update run with old resourceSnapshotIndex successfully to rollback", func() { createStagedUpdateRunSucceed(updateRunNames[2], crpName, resourceSnapshotIndex1st, strategyName) }) @@ -195,6 +229,12 @@ var _ = Describe("test CRP rollout with staged update run", func() { configMapActual := configMapPlacedOnClusterActual(cluster, &newConfigMap) Consistently(configMapActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to keep configmap %s data as expected", newConfigMap.Name) } + + By("Validating crp status as member-cluster-2 updated") + crpStatusUpdatedActual := crpStatusWithExternalStrategyActual(nil, "", false, allMemberClusterNames, + []string{resourceSnapshotIndex2nd, resourceSnapshotIndex1st, resourceSnapshotIndex2nd}, []bool{true, true, true}, nil, nil) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) + validateAndApproveClusterApprovalRequests(updateRunNames[2], envCanary) }) @@ -206,6 +246,12 @@ var _ = Describe("test CRP rollout with staged update run", func() { Eventually(configMapActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to rollback the configmap %s data on cluster %s as expected", oldConfigMap.Name, allMemberClusterNames[idx]) } }) + + It("Should update crp status as completed", func() { + crpStatusUpdatedActual := crpStatusWithExternalStrategyActual(workResourceIdentifiers(), resourceSnapshotIndex1st, true, allMemberClusterNames, + []string{resourceSnapshotIndex1st, resourceSnapshotIndex1st, resourceSnapshotIndex1st}, []bool{true, true, true}, nil, nil) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) + }) }) Context("Test cluster scale out and shrink with staged update run", Ordered, func() { @@ -224,7 +270,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickFixedPlacementType, @@ -268,6 +314,11 @@ var _ = Describe("test CRP rollout with staged update run", func() { validateLatestPolicySnapshot(crpName, policySnapshotIndex1st) }) + It("Should update crp status as pending rollout", func() { + crpStatusUpdatedActual := crpStatusWithExternalStrategyActual(nil, "", false, allMemberClusterNames[:2], []string{"", ""}, []bool{false, false}, nil, nil) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) + }) + It("Should create a staged update run successfully", func() { createStagedUpdateRunSucceed(updateRunNames[0], crpName, resourceSnapshotIndex1st, strategyName) }) @@ -275,6 +326,11 @@ var _ = Describe("test CRP rollout with staged update run", func() { It("Should rollout resources to member-cluster-2 only and completes stage canary", func() { checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun([]*framework.Cluster{allMemberClusters[1]}) checkIfRemovedWorkResourcesFromMemberClustersConsistently([]*framework.Cluster{allMemberClusters[0], allMemberClusters[2]}) + + By("Validating crp status as member-cluster-2 updated") + crpStatusUpdatedActual := crpStatusWithExternalStrategyActual(nil, "", false, allMemberClusterNames[:2], []string{"", resourceSnapshotIndex1st}, []bool{false, true}, nil, nil) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) + validateAndApproveClusterApprovalRequests(updateRunNames[0], envCanary) }) @@ -285,6 +341,12 @@ var _ = Describe("test CRP rollout with staged update run", func() { checkIfRemovedWorkResourcesFromMemberClustersConsistently([]*framework.Cluster{allMemberClusters[2]}) }) + It("Should update crp status as completed", func() { + crpStatusUpdatedActual := crpStatusWithExternalStrategyActual(workResourceIdentifiers(), resourceSnapshotIndex1st, true, allMemberClusterNames[:2], + []string{resourceSnapshotIndex1st, resourceSnapshotIndex1st}, []bool{true, true}, nil, nil) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) + }) + It("Update the crp to pick member-cluster-3 too", func() { Eventually(func() error { crp := &placementv1beta1.ClusterResourcePlacement{} @@ -300,6 +362,11 @@ var _ = Describe("test CRP rollout with staged update run", func() { validateLatestPolicySnapshot(crpName, policySnapshotIndex2nd) }) + It("Should update crp status as rollout pending", func() { + crpStatusUpdatedActual := crpStatusWithExternalStrategyActual(nil, "", false, allMemberClusterNames, []string{resourceSnapshotIndex1st, resourceSnapshotIndex1st, ""}, []bool{false, false, false}, nil, nil) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) + }) + It("Should create a staged update run successfully", func() { createStagedUpdateRunSucceed(updateRunNames[1], crpName, resourceSnapshotIndex1st, strategyName) }) @@ -309,6 +376,11 @@ var _ = Describe("test CRP rollout with staged update run", func() { checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun([]*framework.Cluster{allMemberClusters[0], allMemberClusters[1]}) // TODO: need a way to check the status of staged update run that are have member-cluster-1 and member-cluster-2 updated checkIfRemovedWorkResourcesFromMemberClustersConsistently([]*framework.Cluster{allMemberClusters[2]}) + + By("Validating crp status as member-cluster-2 updated") + crpStatusUpdatedActual := crpStatusWithExternalStrategyActual(nil, "", false, allMemberClusterNames, []string{resourceSnapshotIndex1st, resourceSnapshotIndex1st, ""}, []bool{false, true, false}, nil, nil) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to keep CRP %s status as expected", crpName) + validateAndApproveClusterApprovalRequests(updateRunNames[1], envCanary) }) @@ -318,6 +390,12 @@ var _ = Describe("test CRP rollout with staged update run", func() { checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun(allMemberClusters) }) + It("Should update crp status as completed", func() { + crpStatusUpdatedActual := crpStatusWithExternalStrategyActual(workResourceIdentifiers(), resourceSnapshotIndex1st, true, allMemberClusterNames, + []string{resourceSnapshotIndex1st, resourceSnapshotIndex1st, resourceSnapshotIndex1st}, []bool{true, true, true}, nil, nil) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) + }) + It("Update the crp to only keep member-cluster-3", func() { Eventually(func() error { crp := &placementv1beta1.ClusterResourcePlacement{} @@ -333,12 +411,22 @@ var _ = Describe("test CRP rollout with staged update run", func() { validateLatestPolicySnapshot(crpName, policySnapshotIndex3rd) }) + It("Should update crp status as rollout pending with member-cluster-3 only", func() { + crpStatusUpdatedActual := crpStatusWithExternalStrategyActual(workResourceIdentifiers(), resourceSnapshotIndex1st, false, []string{allMemberClusterNames[2]}, []string{resourceSnapshotIndex1st}, []bool{false}, nil, nil) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) + }) + It("Should create a staged update run successfully", func() { createStagedUpdateRunSucceed(updateRunNames[2], crpName, resourceSnapshotIndex1st, strategyName) }) It("Should still have resources on all member clusters and complete stage canary", func() { checkIfPlacedWorkResourcesOnMemberClustersConsistently(allMemberClusters) + + By("Validating crp status keeping as rollout pending with member-cluster-3 only") + crpStatusUpdatedActual := crpStatusWithExternalStrategyActual(workResourceIdentifiers(), resourceSnapshotIndex1st, false, []string{allMemberClusterNames[2]}, []string{resourceSnapshotIndex1st}, []bool{false}, nil, nil) + Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) + validateAndApproveClusterApprovalRequests(updateRunNames[2], envCanary) }) @@ -349,6 +437,11 @@ var _ = Describe("test CRP rollout with staged update run", func() { checkIfRemovedWorkResourcesFromMemberClusters([]*framework.Cluster{allMemberClusters[0], allMemberClusters[1]}) checkIfPlacedWorkResourcesOnMemberClustersConsistently([]*framework.Cluster{allMemberClusters[2]}) }) + + It("Should update crp status as completed with member-cluster-3 only", func() { + crpStatusUpdatedActual := crpStatusWithExternalStrategyActual(workResourceIdentifiers(), resourceSnapshotIndex1st, true, []string{allMemberClusterNames[2]}, []string{resourceSnapshotIndex1st}, []bool{true}, nil, nil) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to keep CRP %s status as expected", crpName) + }) }) Context("Test staged update run with overrides", Ordered, func() { @@ -358,6 +451,9 @@ var _ = Describe("test CRP rollout with staged update run", func() { roName := fmt.Sprintf(roNameTemplate, GinkgoParallelProcess()) roNamespace := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + var wantCROs map[string][]string + var wantROs map[string][]placementv1beta1.NamespacedName + BeforeAll(func() { // Create a test namespace and a configMap inside it on the hub cluster. createWorkResources() @@ -410,7 +506,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ { LabelSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{regionLabelName: regionEast, envLabelName: envCanary}, + MatchLabels: map[string]string{regionLabelName: regionEast, envLabelName: envCanary}, // member-cluster-2 }, }, }, @@ -429,6 +525,12 @@ var _ = Describe("test CRP rollout with staged update run", func() { } Expect(hubClient.Create(ctx, ro)).To(Succeed(), "Failed to create resourceOverride %s", roName) + // Set the wanted overrides. + wantCROs = map[string][]string{allMemberClusterNames[0]: {croName + "-0"}} // with override snapshot index 0 + wantROs = map[string][]placementv1beta1.NamespacedName{ + allMemberClusterNames[1]: {placementv1beta1.NamespacedName{Namespace: roNamespace, Name: roName + "-0"}}, // with override snapshot index 0 + } + // Create the CRP with external rollout strategy and pick fixed policy. crp := &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ @@ -437,7 +539,7 @@ var _ = Describe("test CRP rollout with staged update run", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, @@ -476,6 +578,11 @@ var _ = Describe("test CRP rollout with staged update run", func() { validateLatestResourceSnapshot(crpName, resourceSnapshotIndex1st) }) + It("Should update crp status as pending rollout", func() { + crpStatusUpdatedActual := crpStatusWithExternalStrategyActual(nil, "", false, allMemberClusterNames, []string{"", "", ""}, []bool{false, false, false}, nil, nil) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) + }) + It("Should successfully schedule the crp", func() { validateLatestPolicySnapshot(crpName, policySnapshotIndex1st) }) @@ -487,17 +594,27 @@ var _ = Describe("test CRP rollout with staged update run", func() { It("Should rollout resources to member-cluster-2 only and completes stage canary", func() { checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun([]*framework.Cluster{allMemberClusters[1]}) checkIfRemovedWorkResourcesFromMemberClustersConsistently([]*framework.Cluster{allMemberClusters[0], allMemberClusters[2]}) + + By("Validating crp status as member-cluster-2 updated") + crpStatusUpdatedActual := crpStatusWithExternalStrategyActual(nil, "", false, allMemberClusterNames, + []string{"", resourceSnapshotIndex1st, ""}, []bool{false, true, false}, nil, wantROs) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) + validateAndApproveClusterApprovalRequests(updateRunName, envCanary) }) It("Should rollout resources to member-cluster-1 and member-cluster-3 too and complete the staged update run successfully", func() { - wantCROs := map[string][]string{allMemberClusterNames[0]: {croName + "-0"}} // with override snapshot index 0 - wantROs := map[string][]placementv1beta1.NamespacedName{allMemberClusterNames[1]: {placementv1beta1.NamespacedName{Namespace: roNamespace, Name: roName + "-0"}}} // with override snapshot index 0 updateRunSucceededActual := updateRunStatusSucceededActual(updateRunName, policySnapshotIndex1st, len(allMemberClusters), nil, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, wantCROs, wantROs) Eventually(updateRunSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s succeeded", updateRunName) checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun(allMemberClusters) }) + It("Should update crp status as completed", func() { + crpStatusUpdatedActual := crpStatusWithExternalStrategyActual(workResourceIdentifiers(), resourceSnapshotIndex1st, true, allMemberClusterNames, + []string{resourceSnapshotIndex1st, resourceSnapshotIndex1st, resourceSnapshotIndex1st}, []bool{true, true, true}, wantCROs, wantROs) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) + }) + It("should have override annotations on the member cluster 1 and member cluster 2", func() { wantCROAnnotations := map[string]string{croTestAnnotationKey: fmt.Sprintf("%s-%d", croTestAnnotationValue, 0)} wantROAnnotations := map[string]string{roTestAnnotationKey: fmt.Sprintf("%s-%d", roTestAnnotationValue, 1)} diff --git a/test/e2e/utils_test.go b/test/e2e/utils_test.go index b5ff7d364..a85da4220 100644 --- a/test/e2e/utils_test.go +++ b/test/e2e/utils_test.go @@ -166,8 +166,8 @@ func markMemberClusterAsUnhealthy(name string) { }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to mark member cluster as unhealthy") } -// markMemberClusterAsLeft marks the specified member cluster as left. -func markMemberClusterAsLeft(name string) { +// markMemberClusterAsLeaving marks the specified member cluster as leaving. +func markMemberClusterAsLeaving(name string) { mcObj := &clusterv1beta1.MemberCluster{} Eventually(func() error { // Add a custom deletion blocker finalizer to the member cluster. @@ -177,9 +177,27 @@ func markMemberClusterAsLeft(name string) { mcObj.Finalizers = append(mcObj.Finalizers, customDeletionBlockerFinalizer) return hubClient.Update(ctx, mcObj) - }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to mark member cluster as left") + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add finalizer") + + Expect(hubClient.Delete(ctx, mcObj)).To(Succeed(), "Failed to mark member cluster as leaving") +} + +// markMemberClusterAsLeft deletes the specified member cluster. +func markMemberClusterAsLeft(name string) { + mcObj := &clusterv1beta1.MemberCluster{} + Eventually(func() error { + // remove finalizer to the member cluster. + if err := hubClient.Get(ctx, types.NamespacedName{Name: name}, mcObj); err != nil { + return err + } + if len(mcObj.Finalizers) > 0 { + mcObj.Finalizers = []string{} + return hubClient.Update(ctx, mcObj) + } + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove finalizer") - Expect(hubClient.Delete(ctx, mcObj)).To(Succeed(), "Failed to delete member cluster") + Expect(hubClient.Delete(ctx, mcObj)).To(SatisfyAny(Succeed(), utils.NotFoundMatcher{}), "Failed to delete member cluster") } // setAllMemberClustersToJoin creates a MemberCluster object for each member cluster. @@ -1278,7 +1296,7 @@ func createCRPWithApplyStrategy(crpName string, applyStrategy *placementv1beta1. // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, diff --git a/test/e2e/webhook_test.go b/test/e2e/webhook_test.go index d86ab9214..0b7d43c29 100644 --- a/test/e2e/webhook_test.go +++ b/test/e2e/webhook_test.go @@ -45,7 +45,7 @@ var _ = Describe("webhook tests for CRP CREATE operations", func() { ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: selector, }, } @@ -63,7 +63,7 @@ var _ = Describe("webhook tests for CRP CREATE operations", func() { ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickFixedPlacementType, @@ -86,7 +86,7 @@ var _ = Describe("webhook tests for CRP CREATE operations", func() { ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickNPlacementType, @@ -137,7 +137,7 @@ var _ = Describe("webhook tests for CRP CREATE operations", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ { Group: "", @@ -167,7 +167,7 @@ var _ = Describe("webhook tests for CRP CREATE operations", func() { // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ { Group: "apps", @@ -200,7 +200,7 @@ var _ = Describe("webhook tests for CRP UPDATE operations", Ordered, func() { ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), }, } @@ -305,7 +305,7 @@ var _ = Describe("webhook tests for CRP tolerations", Ordered, func() { ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ Tolerations: []placementv1beta1.Toleration{ @@ -1308,7 +1308,7 @@ var _ = Describe("webhook tests for ClusterResourcePlacementEviction CREATE oper Name: crpName, Finalizers: []string{"example.com/finalizer"}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, @@ -1340,7 +1340,7 @@ var _ = Describe("webhook tests for ClusterResourcePlacementEviction CREATE oper ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickFixedPlacementType, diff --git a/test/scheduler/utils_test.go b/test/scheduler/utils_test.go index 50da929c4..9b7246e03 100644 --- a/test/scheduler/utils_test.go +++ b/test/scheduler/utils_test.go @@ -284,7 +284,7 @@ func createPickFixedCRPWithPolicySnapshot(crpName string, targetClusters []strin Name: crpName, Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: defaultResourceSelectors, Policy: policy, }, @@ -320,7 +320,7 @@ func createNilSchedulingPolicyCRPWithPolicySnapshot(crpName string, policySnapsh Name: crpName, Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: defaultResourceSelectors, Policy: policy, }, @@ -493,7 +493,7 @@ func createPickAllCRPWithPolicySnapshot(crpName string, policySnapshotName strin Name: crpName, Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: defaultResourceSelectors, Policy: policy, }, @@ -567,7 +567,7 @@ func createPickNCRPWithPolicySnapshot(crpName string, policySnapshotName string, Name: crpName, Finalizers: []string{customDeletionBlockerFinalizer}, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: defaultResourceSelectors, Policy: policy, }, diff --git a/test/upgrade/after/actuals_test.go b/test/upgrade/after/actuals_test.go index 10a83b5ea..53a9c7a9f 100644 --- a/test/upgrade/after/actuals_test.go +++ b/test/upgrade/after/actuals_test.go @@ -348,8 +348,9 @@ func customizedCRPStatusUpdatedActual(crpName string, wantPlacementStatus := []placementv1beta1.ResourcePlacementStatus{} for _, name := range wantSelectedClusters { wantPlacementStatus = append(wantPlacementStatus, placementv1beta1.ResourcePlacementStatus{ - ClusterName: name, - Conditions: resourcePlacementRolloutCompletedConditions(crp.Generation, resourceIsTrackable, false), + ClusterName: name, + ObservedResourceIndex: wantObservedResourceIndex, + Conditions: resourcePlacementRolloutCompletedConditions(crp.Generation, resourceIsTrackable, false), }) } for i := 0; i < len(wantUnselectedClusters); i++ { @@ -386,7 +387,7 @@ func customizedCRPStatusUpdatedActual(crpName string, // // * The CRP is of the PickN placement type and the required N count cannot be fulfilled; or // * The CRP is of the PickFixed placement type and the list of target clusters specified cannot be fulfilled. - wantStatus := placementv1beta1.ClusterResourcePlacementStatus{ + wantStatus := placementv1beta1.PlacementStatus{ Conditions: wantCRPConditions, PlacementStatuses: wantPlacementStatus, SelectedResources: wantSelectedResourceIdentifiers, @@ -441,7 +442,7 @@ func crpWithOneFailedAvailabilityCheckStatusUpdatedActual( }) } - wantStatus := placementv1beta1.ClusterResourcePlacementStatus{ + wantStatus := placementv1beta1.PlacementStatus{ Conditions: crpNotAvailableConditions(crp.Generation, false), PlacementStatuses: wantPlacementStatus, SelectedResources: wantSelectedResourceIdentifiers, @@ -498,7 +499,7 @@ func crpWithOneFailedApplyOpStatusUpdatedActual( }) } - wantStatus := placementv1beta1.ClusterResourcePlacementStatus{ + wantStatus := placementv1beta1.PlacementStatus{ Conditions: crpNotAppliedConditions(crp.Generation), PlacementStatuses: wantPlacementStatus, SelectedResources: wantSelectedResourceIdentifiers, @@ -619,7 +620,7 @@ func crpWithStuckRolloutDueToOneFailedAvailabilityCheckStatusUpdatedActual( }, } - wantStatus := placementv1beta1.ClusterResourcePlacementStatus{ + wantStatus := placementv1beta1.PlacementStatus{ Conditions: wantCRPConditions, PlacementStatuses: wantPlacementStatus, SelectedResources: wantSelectedResourceIdentifiers, @@ -734,7 +735,7 @@ func crpWithStuckRolloutDueToOneFailedApplyOpStatusUpdatedActual( }, } - wantStatus := placementv1beta1.ClusterResourcePlacementStatus{ + wantStatus := placementv1beta1.PlacementStatus{ Conditions: wantCRPConditions, PlacementStatuses: wantPlacementStatus, SelectedResources: wantSelectedResourceIdentifiers, @@ -798,7 +799,7 @@ func crpWithStuckRolloutDueToUntrackableResourcesStatusUpdatedActual( }, } - wantStatus := placementv1beta1.ClusterResourcePlacementStatus{ + wantStatus := placementv1beta1.PlacementStatus{ Conditions: wantCRPConditions, PlacementStatuses: wantPlacementStatus, SelectedResources: wantSelectedResourceIdentifiers, diff --git a/test/upgrade/after/setup_test.go b/test/upgrade/after/setup_test.go index 49667ebd7..d762e205c 100644 --- a/test/upgrade/after/setup_test.go +++ b/test/upgrade/after/setup_test.go @@ -116,6 +116,8 @@ var ( ignoreServiceSpecIPAndPolicyFields = cmpopts.IgnoreFields(corev1.ServiceSpec{}, "ClusterIP", "ClusterIPs", "ExternalIPs", "SessionAffinity", "IPFamilies", "IPFamilyPolicy", "InternalTrafficPolicy") ignoreServicePortNodePortProtocolField = cmpopts.IgnoreFields(corev1.ServicePort{}, "NodePort", "Protocol") ignoreRPSClusterNameField = cmpopts.IgnoreFields(placementv1beta1.ResourcePlacementStatus{}, "ClusterName") + // TODO (wantjian): Remove this ignore option with next release. + ignoreRPSObservedResourceIndexField = cmpopts.IgnoreFields(placementv1beta1.ResourcePlacementStatus{}, "ObservedResourceIndex") crpStatusCmpOptions = cmp.Options{ cmpopts.SortSlices(lessFuncConditionByType), @@ -124,6 +126,7 @@ var ( cmpopts.SortSlices(utils.LessFuncFailedResourcePlacements), utils.IgnoreConditionLTTAndMessageFields, cmpopts.EquateEmpty(), + ignoreRPSObservedResourceIndexField, } crpWithStuckRolloutStatusCmpOptions = cmp.Options{ cmpopts.SortSlices(lessFuncConditionByType), @@ -133,6 +136,7 @@ var ( utils.IgnoreConditionLTTAndMessageFields, ignoreRPSClusterNameField, cmpopts.EquateEmpty(), + ignoreRPSObservedResourceIndexField, } ) diff --git a/test/upgrade/before/actuals_test.go b/test/upgrade/before/actuals_test.go index 5ed3059bf..0c54f117a 100644 --- a/test/upgrade/before/actuals_test.go +++ b/test/upgrade/before/actuals_test.go @@ -387,7 +387,7 @@ func customizedCRPStatusUpdatedActual(crpName string, // // * The CRP is of the PickN placement type and the required N count cannot be fulfilled; or // * The CRP is of the PickFixed placement type and the list of target clusters specified cannot be fulfilled. - wantStatus := placementv1beta1.ClusterResourcePlacementStatus{ + wantStatus := placementv1beta1.PlacementStatus{ Conditions: wantCRPConditions, PlacementStatuses: wantPlacementStatus, SelectedResources: wantSelectedResourceIdentifiers, @@ -444,7 +444,7 @@ func crpWithOneFailedAvailabilityCheckStatusUpdatedActual( }) } - wantStatus := placementv1beta1.ClusterResourcePlacementStatus{ + wantStatus := placementv1beta1.PlacementStatus{ Conditions: crpNotAvailableConditions(crp.Generation, false), PlacementStatuses: wantPlacementStatus, SelectedResources: wantSelectedResourceIdentifiers, @@ -502,7 +502,7 @@ func crpWithOneFailedApplyOpStatusUpdatedActual( }) } - wantStatus := placementv1beta1.ClusterResourcePlacementStatus{ + wantStatus := placementv1beta1.PlacementStatus{ Conditions: crpNotAppliedConditions(crp.Generation), PlacementStatuses: wantPlacementStatus, SelectedResources: wantSelectedResourceIdentifiers, @@ -624,7 +624,7 @@ func crpWithStuckRolloutDueToOneFailedAvailabilityCheckStatusUpdatedActual( }, } - wantStatus := placementv1beta1.ClusterResourcePlacementStatus{ + wantStatus := placementv1beta1.PlacementStatus{ Conditions: wantCRPConditions, PlacementStatuses: wantPlacementStatus, SelectedResources: wantSelectedResourceIdentifiers, @@ -740,7 +740,7 @@ func crpWithStuckRolloutDueToOneFailedApplyOpStatusUpdatedActual( }, } - wantStatus := placementv1beta1.ClusterResourcePlacementStatus{ + wantStatus := placementv1beta1.PlacementStatus{ Conditions: wantCRPConditions, PlacementStatuses: wantPlacementStatus, SelectedResources: wantSelectedResourceIdentifiers, @@ -804,7 +804,7 @@ func crpWithStuckRolloutDueToUntrackableResourcesStatusUpdatedActual( }, } - wantStatus := placementv1beta1.ClusterResourcePlacementStatus{ + wantStatus := placementv1beta1.PlacementStatus{ Conditions: wantCRPConditions, PlacementStatuses: wantPlacementStatus, SelectedResources: wantSelectedResourceIdentifiers, diff --git a/test/upgrade/before/scenarios_test.go b/test/upgrade/before/scenarios_test.go index 6046091f0..cfc8f4630 100644 --- a/test/upgrade/before/scenarios_test.go +++ b/test/upgrade/before/scenarios_test.go @@ -50,7 +50,7 @@ var _ = Describe("CRP with trackable resources, all available (before upgrade)", ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(workNamespaceName), Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, @@ -113,7 +113,7 @@ var _ = Describe("CRP with non-trackable resources, all available (before upgrad ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(workNamespaceName), Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, @@ -204,7 +204,7 @@ var _ = Describe("CRP with availability failure (before upgrade)", Ordered, func ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(workNamespaceName), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickFixedPlacementType, @@ -292,7 +292,7 @@ var _ = Describe("CRP with apply op failure (before upgrade)", Ordered, func() { ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(workNamespaceName), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickFixedPlacementType, @@ -413,7 +413,7 @@ var _ = Describe("CRP stuck in the rollout process (blocked by availability fail ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(workNamespaceName), Policy: &placementv1beta1.PlacementPolicy{ PlacementType: placementv1beta1.PickAllPlacementType, @@ -585,7 +585,7 @@ var _ = Describe("CRP stuck in the rollout process (blocked by apply op failure) ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(workNamespaceName), Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, @@ -739,7 +739,7 @@ var _ = Describe("CRP stuck in the rollout process (long wait time)", Ordered, f ObjectMeta: metav1.ObjectMeta{ Name: crpName, }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Spec: placementv1beta1.PlacementSpec{ ResourceSelectors: workResourceSelector(workNamespaceName), Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, diff --git a/test/upgrade/before/setup_test.go b/test/upgrade/before/setup_test.go index 89f7991cb..7d3723ad9 100644 --- a/test/upgrade/before/setup_test.go +++ b/test/upgrade/before/setup_test.go @@ -125,6 +125,8 @@ var ( ignoreServiceSpecIPAndPolicyFields = cmpopts.IgnoreFields(corev1.ServiceSpec{}, "ClusterIP", "ClusterIPs", "ExternalIPs", "SessionAffinity", "IPFamilies", "IPFamilyPolicy", "InternalTrafficPolicy") ignoreServicePortNodePortProtocolField = cmpopts.IgnoreFields(corev1.ServicePort{}, "NodePort", "Protocol") ignoreRPSClusterNameField = cmpopts.IgnoreFields(placementv1beta1.ResourcePlacementStatus{}, "ClusterName") + // TODO (wantjian): Remove this ignore option with next release. + ignoreRPSObservedResourceIndexField = cmpopts.IgnoreFields(placementv1beta1.ResourcePlacementStatus{}, "ObservedResourceIndex") // Since Fleet agents v0.14.0 a minor reason string change was applied on the hub side that // affects CRP availability status reportings in the resource placement section when untrackable @@ -148,6 +150,7 @@ var ( utils.IgnoreConditionLTTAndMessageFields, availableDueToUntrackableResCondAcyclicTransformer, cmpopts.EquateEmpty(), + ignoreRPSObservedResourceIndexField, } crpWithStuckRolloutStatusCmpOptions = cmp.Options{ cmpopts.SortSlices(lessFuncConditionByType), @@ -158,6 +161,7 @@ var ( ignoreRPSClusterNameField, availableDueToUntrackableResCondAcyclicTransformer, cmpopts.EquateEmpty(), + ignoreRPSObservedResourceIndexField, } ) diff --git a/test/utils/resource/resource.go b/test/utils/resource/resource.go index 3411d4a1b..a181d2661 100644 --- a/test/utils/resource/resource.go +++ b/test/utils/resource/resource.go @@ -18,6 +18,7 @@ limitations under the License. package resource import ( + "encoding/json" "testing" appsv1 "k8s.io/api/apps/v1" @@ -67,7 +68,7 @@ func ServiceResourceContentForTest(t *testing.T) *fleetv1beta1.ResourceContent { func DeploymentResourceContentForTest(t *testing.T) *fleetv1beta1.ResourceContent { d := appsv1.Deployment{ TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", + APIVersion: "apps/v1", Kind: "Deployment", }, ObjectMeta: metav1.ObjectMeta{ @@ -154,6 +155,73 @@ func ClusterRoleResourceContentForTest(t *testing.T) *fleetv1beta1.ResourceConte return CreateResourceContentForTest(t, role) } +// ClusterResourceEnvelopeResourceContentForTest creates a ClusterResourceEnvelope for testing. +// It contains a ClusterRole as the resource. +func ClusterResourceEnvelopeResourceContentForTest(t *testing.T) *fleetv1beta1.ResourceContent { + clusterRole := rbacv1.ClusterRole{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "rbac.authorization.k8s.io/v1", + Kind: "ClusterRole", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "clusterrole-name", + }, + } + roleBytes, err := json.Marshal(clusterRole) + if err != nil { + t.Fatalf("ClusterRole marshalJSON failed: %v", err) + } + + clusterResourceEnvelope := fleetv1beta1.ClusterResourceEnvelope{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "placement.kubernetes-fleet.io/v1beta1", + Kind: "ClusterResourceEnvelope", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-resource-envelope", + }, + Data: map[string]runtime.RawExtension{"clusterRole.yaml": {Raw: roleBytes}}, + } + + return CreateResourceContentForTest(t, clusterResourceEnvelope) +} + +// ResourceEnvelopeResourceContentForTest creates a ResourceEnvelope for testing. +// It contains a ConfigMap as the resource. +func ResourceEnvelopeResourceContentForTest(t *testing.T) *fleetv1beta1.ResourceContent { + cm := corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "cm-name", + Namespace: "test-namespace", + }, + Data: map[string]string{ + "key1": "value1", + }, + } + cmBytes, err := json.Marshal(cm) + if err != nil { + t.Fatalf("ConfigMap marshalJSON failed: %v", err) + } + + resourceEnvelope := fleetv1beta1.ResourceEnvelope{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "placement.kubernetes-fleet.io/v1beta1", + Kind: "ResourceEnvelope", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-resource-envelope", + Namespace: "test-namespace", + }, + Data: map[string]runtime.RawExtension{"cm.yaml": {Raw: cmBytes}}, + } + + return CreateResourceContentForTest(t, resourceEnvelope) +} + // CreateResourceContentForTest creates a ResourceContent for testing. func CreateResourceContentForTest(t *testing.T, obj interface{}) *fleetv1beta1.ResourceContent { t.Helper() diff --git a/tools/draincluster/drain_test.go b/tools/draincluster/drain_test.go index 810c697b1..9cac1a119 100644 --- a/tools/draincluster/drain_test.go +++ b/tools/draincluster/drain_test.go @@ -190,7 +190,7 @@ func TestCollectClusterScopedResourcesSelectedByCRP(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "test-crp", }, - Status: placementv1beta1.ClusterResourcePlacementStatus{ + Status: placementv1beta1.PlacementStatus{ SelectedResources: []placementv1beta1.ResourceIdentifier{ { Group: "rbac.authorization.k8s.io", @@ -237,7 +237,7 @@ func TestCollectClusterScopedResourcesSelectedByCRP(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "test-crp", }, - Status: placementv1beta1.ClusterResourcePlacementStatus{ + Status: placementv1beta1.PlacementStatus{ SelectedResources: []placementv1beta1.ResourceIdentifier{ { Group: "",