diff --git a/Makefile b/Makefile index 615916b32eb..85bfe11c2b9 100644 --- a/Makefile +++ b/Makefile @@ -67,6 +67,7 @@ PREFETCHER_DOCKERFILE ?= docker/Dockerfile.fileprefetch CSI_BINARY ?= bin/fluid-csi DATASET_BINARY ?= bin/dataset-controller APPLICATION_BINARY ?= bin/fluidapp-controller +WORKLOAD_BINARY ?= bin/workload-controller ALLUXIORUNTIME_BINARY ?= bin/alluxioruntime-controller JINDORUNTIME_BINARY ?= bin/jindoruntime-controller GOOSEFSRUNTIME_BINARY ?= bin/goosefsruntime-controller @@ -254,6 +255,15 @@ webhook-build: application-controller-build: CGO_ENABLED=${CGO_ENABLED} GOOS=${GOOS} GOARCH=${ARCH} GO111MODULE=${GO_MODULE} go build ${GC_FLAGS} -a -o ${APPLICATION_BINARY} -ldflags '${LDFLAGS}' cmd/fluidapp/main.go +.PHONY: workload-controller-build +workload-controller-build: + CGO_ENABLED=${CGO_ENABLED} GOOS=${GOOS} GOARCH=${ARCH} GO111MODULE=${GO_MODULE} go build ${GC_FLAGS} -a -o ${WORKLOAD_BINARY} -ldflags '${LDFLAGS}' cmd/workload/main.go + +# Run workload-controller locally (requires KUBECONFIG and CRD installed) +.PHONY: run-workload +run-workload: manifests + go run cmd/workload/main.go start + # Build the docker image .PHONY: docker-build-dataset-controller docker-build-dataset-controller: diff --git a/api/workload/v1alpha1/advanced_statefulset_types.go b/api/workload/v1alpha1/advanced_statefulset_types.go new file mode 100644 index 00000000000..c099bb53286 --- /dev/null +++ b/api/workload/v1alpha1/advanced_statefulset_types.go @@ -0,0 +1,574 @@ +/* +Copyright 2026 The Fluid Authors. +Copyright 2024 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "encoding/json" + "fmt" + "strings" + + apps "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// ============================================================ +// Well-known labels and annotations +// ============================================================ + +const ( + ControllerRevisionHashLabelKey = "workload.fluid.io/controller-revision-hash" + ReservedPodLabelKey = "workload.fluid.io/united-deployment-reserved-pod" + SubSetNameLabelKey = "workload.fluid.io/subset-name" + SpecifiedDeleteKey = "workload.fluid.io/specified-delete" + AnnotationSubsetPatchKey = "workload.fluid.io/subset-patch" + + // ContainerLaunchBarrierEnvName is the env name used to indicate container launch priority barrier. + ContainerLaunchBarrierEnvName = "KRUISE_CONTAINER_LAUNCH_BARRIER" +) + +// ============================================================ +// InPlace Update types +// ============================================================ + +const ( + InPlaceUpdateReady v1.PodConditionType = "InPlaceUpdateReady" + + InPlaceUpdateStateKey string = "workload.fluid.io/inplace-update-state" + InPlaceUpdateStateKeyOld string = "inplace-update-state" + + InPlaceUpdateGraceKey string = "workload.fluid.io/inplace-update-grace" + InPlaceUpdateGraceKeyOld string = "inplace-update-grace" + + RuntimeContainerMetaKey = "workload.fluid.io/runtime-containers-meta" +) + +// InPlaceUpdateState is the state of an in-place update, recorded in pod annotations. +type InPlaceUpdateState struct { + Revision string `json:"revision"` + UpdateTimestamp metav1.Time `json:"updateTimestamp"` + LastContainerStatuses map[string]InPlaceUpdateContainerStatus `json:"lastContainerStatuses"` + UpdateEnvFromMetadata bool `json:"updateEnvFromMetadata,omitempty"` + UpdateResources bool `json:"updateResources,omitempty"` + UpdateImages bool `json:"updateImages,omitempty"` + NextContainerImages map[string]string `json:"nextContainerImages,omitempty"` + NextContainerRefMetadata map[string]metav1.ObjectMeta `json:"nextContainerRefMetadata,omitempty"` + NextContainerResources map[string]v1.ResourceRequirements `json:"nextContainerResources,omitempty"` + PreCheckBeforeNext *InPlaceUpdatePreCheckBeforeNext `json:"preCheckBeforeNext,omitempty"` + ContainerBatchesRecord []InPlaceUpdateContainerBatch `json:"containerBatchesRecord,omitempty"` +} + +// InPlaceUpdatePreCheckBeforeNext specifies containers that must be ready before the next batch update. +type InPlaceUpdatePreCheckBeforeNext struct { + ContainersRequiredReady []string `json:"containersRequiredReady,omitempty"` +} + +// InPlaceUpdateContainerBatch records a batch of containers updated at a given time. +type InPlaceUpdateContainerBatch struct { + Timestamp metav1.Time `json:"timestamp"` + Containers []string `json:"containers"` +} + +// InPlaceUpdateContainerStatus records the image ID of a container before in-place update. +type InPlaceUpdateContainerStatus struct { + ImageID string `json:"imageID,omitempty"` +} + +// InPlaceUpdateStrategy defines the strategy for in-place updates. +type InPlaceUpdateStrategy struct { + GracePeriodSeconds int32 `json:"gracePeriodSeconds,omitempty"` +} + +// GetInPlaceUpdateState returns the in-place update state annotation value. +func GetInPlaceUpdateState(obj metav1.Object) (string, bool) { + if v, ok := obj.GetAnnotations()[InPlaceUpdateStateKey]; ok { + return v, ok + } + v, ok := obj.GetAnnotations()[InPlaceUpdateStateKeyOld] + return v, ok +} + +// GetInPlaceUpdateGrace returns the in-place update grace annotation value. +func GetInPlaceUpdateGrace(obj metav1.Object) (string, bool) { + if v, ok := obj.GetAnnotations()[InPlaceUpdateGraceKey]; ok { + return v, ok + } + v, ok := obj.GetAnnotations()[InPlaceUpdateGraceKeyOld] + return v, ok +} + +// RemoveInPlaceUpdateGrace removes the in-place update grace annotations from the object. +func RemoveInPlaceUpdateGrace(obj metav1.Object) { + delete(obj.GetAnnotations(), InPlaceUpdateGraceKey) + delete(obj.GetAnnotations(), InPlaceUpdateGraceKeyOld) +} + +// RuntimeContainerMetaSet holds metadata for all containers at runtime. +type RuntimeContainerMetaSet struct { + Containers []RuntimeContainerMeta `json:"containers"` +} + +// RuntimeContainerMeta holds runtime metadata for a single container. +type RuntimeContainerMeta struct { + Name string `json:"name"` + ContainerID string `json:"containerID"` + RestartCount int32 `json:"restartCount"` + Hashes RuntimeContainerHashes `json:"hashes"` +} + +// RuntimeContainerHashes holds hash values used to detect container spec changes. +type RuntimeContainerHashes struct { + PlainHash uint64 `json:"plainHash"` + ExtractedEnvFromMetadataHash uint64 `json:"extractedEnvFromMetadataHash,omitempty"` +} + +// GetRuntimeContainerMetaSet parses the runtime container meta annotation. +func GetRuntimeContainerMetaSet(obj metav1.Object) (*RuntimeContainerMetaSet, error) { + str, ok := obj.GetAnnotations()[RuntimeContainerMetaKey] + if !ok { + return nil, nil + } + s := RuntimeContainerMetaSet{} + if err := json.Unmarshal([]byte(str), &s); err != nil { + return nil, err + } + return &s, nil +} + +// ============================================================ +// Lifecycle types +// ============================================================ + +const ( + LifecycleStateKey = "lifecycle.workload.fluid.io/state" + LifecycleTimestampKey = "lifecycle.workload.fluid.io/timestamp" + LifecycleStatePreparingNormal LifecycleStateType = "PreparingNormal" + LifecycleStateNormal LifecycleStateType = "Normal" + LifecycleStatePreparingUpdate LifecycleStateType = "PreparingUpdate" + LifecycleStateUpdating LifecycleStateType = "Updating" + LifecycleStateUpdated LifecycleStateType = "Updated" + LifecycleStatePreparingDelete LifecycleStateType = "PreparingDelete" +) + +// LifecycleStateType represents the lifecycle state of a pod. +type LifecycleStateType string + +// Lifecycle defines hooks for pod lifecycle events. +type Lifecycle struct { + // PreDelete is the hook before a pod is deleted. + // +optional + PreDelete *LifecycleHook `json:"preDelete,omitempty"` + // InPlaceUpdate is the hook during in-place updates. + // +optional + InPlaceUpdate *LifecycleHook `json:"inPlaceUpdate,omitempty"` + // PreNormal is the hook before a pod transitions to normal state. + // +optional + PreNormal *LifecycleHook `json:"preNormal,omitempty"` +} + +// LifecycleHook defines label/finalizer handlers for a lifecycle event. +type LifecycleHook struct { + // LabelsHandler holds label key-value pairs to set on the pod during this lifecycle hook. + // +optional + LabelsHandler map[string]string `json:"labelsHandler,omitempty"` + // FinalizersHandler holds finalizers to add on the pod during this lifecycle hook. + // +optional + FinalizersHandler []string `json:"finalizersHandler,omitempty"` + // MarkPodNotReady marks the pod as not ready during this lifecycle hook. + // +optional + MarkPodNotReady bool `json:"markPodNotReady,omitempty"` +} + +// ============================================================ +// Pod Readiness Gate +// ============================================================ + +const ( + // KruisePodReadyConditionType is the pod condition type for kruise readiness gate. + KruisePodReadyConditionType v1.PodConditionType = "KruisePodReady" + // InPlaceUpdateStrategyKruisePodReadyConditionType is an alias for KruisePodReadyConditionType. + InPlaceUpdateStrategyKruisePodReadyConditionType = KruisePodReadyConditionType +) + +// ============================================================ +// Pod Unavailable Label +// ============================================================ + +const ( + PubUnavailablePodLabelPrefix = "unavailable-pod.fluid.io/" +) + +// HasUnavailableLabel returns true if the given labels contain an unavailable pod label. +func HasUnavailableLabel(labels map[string]string) bool { + if len(labels) == 0 { + return false + } + for key := range labels { + if strings.HasPrefix(key, PubUnavailablePodLabelPrefix) { + return true + } + } + return false +} + +// ============================================================ +// Update Priority types +// ============================================================ + +// UpdatePriorityStrategy defines how to select pods to update in priority order. +type UpdatePriorityStrategy struct { + // OrderPriority specifies the ordered key for pod update priority. + // +optional + OrderPriority []UpdatePriorityOrderTerm `json:"orderPriority,omitempty"` + // WeightPriority specifies the weight-based priority for pod update. + // +optional + WeightPriority []UpdatePriorityWeightTerm `json:"weightPriority,omitempty"` +} + +// UpdatePriorityOrderTerm defines a single ordered key for pod update priority. +type UpdatePriorityOrderTerm struct { + OrderedKey string `json:"orderedKey"` +} + +// UpdatePriorityWeightTerm defines a weight-based priority for pods matching a selector. +type UpdatePriorityWeightTerm struct { + Weight int32 `json:"weight"` + MatchSelector metav1.LabelSelector `json:"matchSelector"` +} + +// FieldsValidation validates the UpdatePriorityStrategy fields. +func (strategy *UpdatePriorityStrategy) FieldsValidation() error { + if strategy == nil { + return nil + } + if len(strategy.WeightPriority) > 0 && len(strategy.OrderPriority) > 0 { + return fmt.Errorf("only one of weightPriority and orderPriority can be used") + } + for _, w := range strategy.WeightPriority { + if w.Weight < 0 || w.Weight > 100 { + return fmt.Errorf("weight must be valid number in the range 1-100") + } + if w.MatchSelector.Size() == 0 { + return fmt.Errorf("selector can not be empty") + } + if _, err := metav1.LabelSelectorAsSelector(&w.MatchSelector); err != nil { + return fmt.Errorf("invalid selector %v", err) + } + } + for _, o := range strategy.OrderPriority { + if len(o.OrderedKey) == 0 { + return fmt.Errorf("order key can not be empty") + } + } + return nil +} + +// ============================================================ +// Scatter Strategy +// ============================================================ + +// UpdateScatterStrategy defines how to scatter pod updates across different label values. +type UpdateScatterStrategy []UpdateScatterTerm + +// UpdateScatterTerm defines a single label key-value pair for scatter strategy. +type UpdateScatterTerm struct { + Key string `json:"key"` + Value string `json:"value"` +} + +// FieldsValidation validates the UpdateScatterStrategy fields. +func (strategy UpdateScatterStrategy) FieldsValidation() error { + if len(strategy) == 0 { + return nil + } + m := make(map[string]struct{}, len(strategy)) + for _, term := range strategy { + if term.Key == "" { + return fmt.Errorf("key should not be empty") + } + id := term.Key + ":" + term.Value + if _, ok := m[id]; !ok { + m[id] = struct{}{} + } else { + return fmt.Errorf("duplicated key=%v value=%v", term.Key, term.Value) + } + } + return nil +} + +// ============================================================ +// AdvancedStatefulSet types +// ============================================================ + +const ( + // MaxMinReadySeconds is the maximum value for minReadySeconds in rolling update. + MaxMinReadySeconds = 300 + + // FailedCreatePod is the condition type for failed pod creation. + FailedCreatePod apps.StatefulSetConditionType = "FailedCreatePod" + // FailedUpdatePod is the condition type for failed pod update. + FailedUpdatePod apps.StatefulSetConditionType = "FailedUpdatePod" +) + +// VolumeClaimUpdateStrategyType defines the strategy for updating volume claims. +// +enum +type VolumeClaimUpdateStrategyType string + +const ( + // OnPodRollingUpdateVolumeClaimUpdateStrategyType updates volume claims during pod rolling update. + OnPodRollingUpdateVolumeClaimUpdateStrategyType VolumeClaimUpdateStrategyType = "OnPodRollingUpdate" + // OnPVCDeleteVolumeClaimUpdateStrategyType updates volume claims when PVC is deleted. + OnPVCDeleteVolumeClaimUpdateStrategyType VolumeClaimUpdateStrategyType = "OnDelete" +) + +// VolumeClaimStatus records the compatibility status of a volume claim template. +type VolumeClaimStatus struct { + VolumeClaimName string `json:"volumeClaimName"` + CompatibleReplicas int32 `json:"compatibleReplicas"` + CompatibleReadyReplicas int32 `json:"compatibleReadyReplicas"` +} + +// VolumeClaimUpdateStrategy defines the strategy for updating volume claim templates. +type VolumeClaimUpdateStrategy struct { + Type VolumeClaimUpdateStrategyType `json:"type,omitempty"` +} + +// PodUpdateStrategyType defines the strategy for updating pods in-place. +// +enum +type PodUpdateStrategyType string + +const ( + // RecreatePodUpdateStrategyType deletes and recreates the pod on update. + RecreatePodUpdateStrategyType PodUpdateStrategyType = "ReCreate" + // InPlaceIfPossiblePodUpdateStrategyType updates pods in-place if possible, falls back to recreate. + InPlaceIfPossiblePodUpdateStrategyType PodUpdateStrategyType = "InPlaceIfPossible" + // InPlaceOnlyPodUpdateStrategyType requires in-place update; fails if not possible. + InPlaceOnlyPodUpdateStrategyType PodUpdateStrategyType = "InPlaceOnly" +) + +// PersistentVolumeClaimRetentionPolicyType defines the retention policy for PVCs. +// +enum +type PersistentVolumeClaimRetentionPolicyType string + +const ( + // RetainPersistentVolumeClaimRetentionPolicyType retains the PVC after pod deletion/scale. + RetainPersistentVolumeClaimRetentionPolicyType PersistentVolumeClaimRetentionPolicyType = "Retain" + // DeletePersistentVolumeClaimRetentionPolicyType deletes the PVC after pod deletion/scale. + DeletePersistentVolumeClaimRetentionPolicyType PersistentVolumeClaimRetentionPolicyType = "Delete" +) + +// StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy for PVC retention. +type StatefulSetPersistentVolumeClaimRetentionPolicy struct { + // WhenDeleted specifies what happens to PVCs when the AdvancedStatefulSet is deleted. + WhenDeleted PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty"` + // WhenScaled specifies what happens to PVCs when the AdvancedStatefulSet is scaled down. + WhenScaled PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"` +} + +// StatefulSetOrdinals defines the start ordinal for pod naming. +type StatefulSetOrdinals struct { + // Start is the starting ordinal for pod naming. + // +optional + Start int32 `json:"start" protobuf:"varint,1,opt,name=start"` +} + +// StatefulSetScaleStrategy defines the strategy for scaling AdvancedStatefulSet pods. +type StatefulSetScaleStrategy struct { + // MaxUnavailable is the maximum number of pods that can be unavailable during scaling. + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` +} + +// UnorderedUpdateStrategy allows updating pods without strict ordering. +type UnorderedUpdateStrategy struct { + // PriorityStrategy defines the priority order for pod updates. + // +optional + PriorityStrategy *UpdatePriorityStrategy `json:"priorityStrategy,omitempty"` +} + +// RollingUpdateStatefulSetStrategy defines the rolling update strategy for AdvancedStatefulSet. +type RollingUpdateStatefulSetStrategy struct { + // Partition indicates the ordinal at which the AdvancedStatefulSet should be partitioned for updates. + // +optional + Partition *int32 `json:"partition,omitempty"` + // MaxUnavailable is the maximum number of pods that can be unavailable during rolling update. + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` + // PodUpdatePolicy indicates the policy for pod updates. + // +optional + PodUpdatePolicy PodUpdateStrategyType `json:"podUpdatePolicy,omitempty"` + // Paused indicates that the AdvancedStatefulSet is paused. + // +optional + Paused bool `json:"paused,omitempty"` + // UnorderedUpdate allows updating pods out of order. + // +optional + UnorderedUpdate *UnorderedUpdateStrategy `json:"unorderedUpdate,omitempty"` + // InPlaceUpdateStrategy defines the in-place update strategy. + // +optional + InPlaceUpdateStrategy *InPlaceUpdateStrategy `json:"inPlaceUpdateStrategy,omitempty"` + // MinReadySeconds is the minimum number of seconds a pod must be ready before being considered available. + // +optional + MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` +} + +// StatefulSetUpdateStrategy defines how a AdvancedStatefulSet is updated. +type StatefulSetUpdateStrategy struct { + // Type indicates the type of the StatefulSetUpdateStrategy. + // +optional + Type apps.StatefulSetUpdateStrategyType `json:"type,omitempty"` + // RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType. + // +optional + RollingUpdate *RollingUpdateStatefulSetStrategy `json:"rollingUpdate,omitempty"` +} + +// AdvancedStatefulSetSpec defines the desired state of AdvancedStatefulSet. +type AdvancedStatefulSetSpec struct { + // Replicas is the desired number of replicas of the given Template. + // +optional + Replicas *int32 `json:"replicas,omitempty"` + // Selector is a label query over pods that should match the replica count. + Selector *metav1.LabelSelector `json:"selector"` + // Template is the object that describes the pod that will be created. + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless + Template v1.PodTemplateSpec `json:"template"` + // VolumeClaimTemplates is a list of claims that pods are allowed to reference. + // +optional + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless + VolumeClaimTemplates []v1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty"` + // VolumeClaimUpdateStrategy defines the strategy for updating volume claim templates. + // +optional + VolumeClaimUpdateStrategy VolumeClaimUpdateStrategy `json:"volumeClaimUpdateStrategy,omitempty"` + // ServiceName is the name of the service that governs this AdvancedStatefulSet. + // +optional + ServiceName string `json:"serviceName,omitempty"` + // PodManagementPolicy controls how pods are created during initial scale up, + // when replacing pods on nodes, or when scaling down. + // +optional + PodManagementPolicy apps.PodManagementPolicyType `json:"podManagementPolicy,omitempty"` + // UpdateStrategy indicates the StatefulSetUpdateStrategy that will be employed. + UpdateStrategy StatefulSetUpdateStrategy `json:"updateStrategy,omitempty"` + // RevisionHistoryLimit is the maximum number of revisions that will be maintained + // in the AdvancedStatefulSet's revision history. + // +optional + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` + // ReserveOrdinals is the list of ordinals to skip when assigning pod ordinals. + ReserveOrdinals []intstr.IntOrString `json:"reserveOrdinals,omitempty"` + // Lifecycle defines the lifecycle hooks for pods. + // +optional + Lifecycle *Lifecycle `json:"lifecycle,omitempty"` + // ScaleStrategy defines the strategy for scaling AdvancedStatefulSet pods. + // +optional + ScaleStrategy *StatefulSetScaleStrategy `json:"scaleStrategy,omitempty"` + // PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from + // the AdvancedStatefulSet VolumeClaimTemplates. + // +optional + PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicy `json:"persistentVolumeClaimRetentionPolicy,omitempty"` + // Ordinals controls the numbering of replica indices in a AdvancedStatefulSet. + // +optional + Ordinals *StatefulSetOrdinals `json:"ordinals,omitempty"` +} + +// AdvancedStatefulSetStatus defines the observed state of AdvancedStatefulSet. +type AdvancedStatefulSetStatus struct { + // ObservedGeneration is the most recent generation observed for this AdvancedStatefulSet. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + // Replicas is the number of Pods created by the AdvancedStatefulSet controller. + Replicas int32 `json:"replicas"` + // ReadyReplicas is the number of Pods created by the AdvancedStatefulSet controller that have a Ready Condition. + ReadyReplicas int32 `json:"readyReplicas"` + // AvailableReplicas is the number of Pods created by the AdvancedStatefulSet controller that have been Ready + // for minReadySeconds. + AvailableReplicas int32 `json:"availableReplicas"` + // CurrentReplicas is the number of Pods created by the AdvancedStatefulSet controller from the AdvancedStatefulSet version + // indicated by currentRevision. + CurrentReplicas int32 `json:"currentReplicas"` + // UpdatedReplicas is the number of Pods created by the AdvancedStatefulSet controller from the AdvancedStatefulSet version + // indicated by updateRevision. + UpdatedReplicas int32 `json:"updatedReplicas"` + // UpdatedReadyReplicas is the number of updated Pods that have a Ready Condition. + // +optional + UpdatedReadyReplicas int32 `json:"updatedReadyReplicas,omitempty"` + // UpdatedAvailableReplicas is the number of updated Pods that have been Ready for minReadySeconds. + // +optional + UpdatedAvailableReplicas int32 `json:"updatedAvailableReplicas,omitempty"` + // CurrentRevision, if not empty, indicates the version of the AdvancedStatefulSet used to generate Pods in the sequence + // [0,currentReplicas). + // +optional + CurrentRevision string `json:"currentRevision,omitempty"` + // UpdateRevision, if not empty, indicates the version of the AdvancedStatefulSet used to generate Pods in the sequence + // [replicas-updatedReplicas,replicas) + // +optional + UpdateRevision string `json:"updateRevision,omitempty"` + // CollisionCount is the count of hash collisions for the AdvancedStatefulSet. + // +optional + CollisionCount *int32 `json:"collisionCount,omitempty"` + // Conditions represent the latest available observations of a AdvancedStatefulSet's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []apps.StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + // LabelSelector is the label selector for pods scaling purposes. + // +optional + LabelSelector string `json:"labelSelector,omitempty"` + // VolumeClaims records the compatibility status of each volume claim template. + // +optional + VolumeClaims []VolumeClaimStatus `json:"volumeClaims,omitempty"` +} + +// +genclient +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale +// +k8s:openapi-gen=true +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.labelSelector +// +kubebuilder:resource:shortName=asts,categories=fluid +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="DESIRED",type="integer",JSONPath=".spec.replicas" +// +kubebuilder:printcolumn:name="CURRENT",type="integer",JSONPath=".status.replicas" +// +kubebuilder:printcolumn:name="UPDATED",type="integer",JSONPath=".status.updatedReplicas" +// +kubebuilder:printcolumn:name="READY",type="integer",JSONPath=".status.readyReplicas" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="CONTAINERS",type="string",priority=1,JSONPath=".spec.template.spec.containers[*].name" +// +kubebuilder:printcolumn:name="IMAGES",type="string",priority=1,JSONPath=".spec.template.spec.containers[*].image" + +// AdvancedStatefulSet is the Schema for the statefulsets API +type AdvancedStatefulSet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AdvancedStatefulSetSpec `json:"spec,omitempty"` + Status AdvancedStatefulSetStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AdvancedStatefulSetList contains a list of AdvancedStatefulSet +type AdvancedStatefulSetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AdvancedStatefulSet `json:"items"` +} + +func init() { + SchemeBuilder.Register(&AdvancedStatefulSet{}, &AdvancedStatefulSetList{}) +} diff --git a/api/workload/v1alpha1/groupversion_info.go b/api/workload/v1alpha1/groupversion_info.go new file mode 100644 index 00000000000..d591d58f6e4 --- /dev/null +++ b/api/workload/v1alpha1/groupversion_info.go @@ -0,0 +1,57 @@ +/* +Copyright 2026 The Fluid Authors. +Copyright 2024 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha1 contains API Schema definitions for the workload v1alpha1 API group +// +kubebuilder:object:generate=true +// +groupName=workload.fluid.io +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "workload.fluid.io", Version: "v1alpha1"} + + SchemeGroupVersion = GroupVersion + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme + + // Scheme is the runtime.Scheme to which all workload v1alpha1 types are registered. + Scheme = runtime.NewScheme() + + // Codecs provides access to encoding and decoding for the scheme. + Codecs serializer.CodecFactory +) + +func init() { + _ = AddToScheme(Scheme) + Codecs = serializer.NewCodecFactory(Scheme) +} + +// Resource is required by pkg/client/listers/... +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} diff --git a/api/workload/v1alpha1/zz_generated.deepcopy.go b/api/workload/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..23449661ec1 --- /dev/null +++ b/api/workload/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,671 @@ +//go:build !ignore_autogenerated + +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InPlaceUpdateContainerBatch) DeepCopyInto(out *InPlaceUpdateContainerBatch) { + *out = *in + in.Timestamp.DeepCopyInto(&out.Timestamp) + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InPlaceUpdateContainerBatch. +func (in *InPlaceUpdateContainerBatch) DeepCopy() *InPlaceUpdateContainerBatch { + if in == nil { + return nil + } + out := new(InPlaceUpdateContainerBatch) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InPlaceUpdateContainerStatus) DeepCopyInto(out *InPlaceUpdateContainerStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InPlaceUpdateContainerStatus. +func (in *InPlaceUpdateContainerStatus) DeepCopy() *InPlaceUpdateContainerStatus { + if in == nil { + return nil + } + out := new(InPlaceUpdateContainerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InPlaceUpdatePreCheckBeforeNext) DeepCopyInto(out *InPlaceUpdatePreCheckBeforeNext) { + *out = *in + if in.ContainersRequiredReady != nil { + in, out := &in.ContainersRequiredReady, &out.ContainersRequiredReady + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InPlaceUpdatePreCheckBeforeNext. +func (in *InPlaceUpdatePreCheckBeforeNext) DeepCopy() *InPlaceUpdatePreCheckBeforeNext { + if in == nil { + return nil + } + out := new(InPlaceUpdatePreCheckBeforeNext) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InPlaceUpdateState) DeepCopyInto(out *InPlaceUpdateState) { + *out = *in + in.UpdateTimestamp.DeepCopyInto(&out.UpdateTimestamp) + if in.LastContainerStatuses != nil { + in, out := &in.LastContainerStatuses, &out.LastContainerStatuses + *out = make(map[string]InPlaceUpdateContainerStatus, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.NextContainerImages != nil { + in, out := &in.NextContainerImages, &out.NextContainerImages + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.NextContainerRefMetadata != nil { + in, out := &in.NextContainerRefMetadata, &out.NextContainerRefMetadata + *out = make(map[string]v1.ObjectMeta, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.NextContainerResources != nil { + in, out := &in.NextContainerResources, &out.NextContainerResources + *out = make(map[string]corev1.ResourceRequirements, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.PreCheckBeforeNext != nil { + in, out := &in.PreCheckBeforeNext, &out.PreCheckBeforeNext + *out = new(InPlaceUpdatePreCheckBeforeNext) + (*in).DeepCopyInto(*out) + } + if in.ContainerBatchesRecord != nil { + in, out := &in.ContainerBatchesRecord, &out.ContainerBatchesRecord + *out = make([]InPlaceUpdateContainerBatch, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InPlaceUpdateState. +func (in *InPlaceUpdateState) DeepCopy() *InPlaceUpdateState { + if in == nil { + return nil + } + out := new(InPlaceUpdateState) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InPlaceUpdateStrategy) DeepCopyInto(out *InPlaceUpdateStrategy) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InPlaceUpdateStrategy. +func (in *InPlaceUpdateStrategy) DeepCopy() *InPlaceUpdateStrategy { + if in == nil { + return nil + } + out := new(InPlaceUpdateStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Lifecycle) DeepCopyInto(out *Lifecycle) { + *out = *in + if in.PreDelete != nil { + in, out := &in.PreDelete, &out.PreDelete + *out = new(LifecycleHook) + (*in).DeepCopyInto(*out) + } + if in.InPlaceUpdate != nil { + in, out := &in.InPlaceUpdate, &out.InPlaceUpdate + *out = new(LifecycleHook) + (*in).DeepCopyInto(*out) + } + if in.PreNormal != nil { + in, out := &in.PreNormal, &out.PreNormal + *out = new(LifecycleHook) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Lifecycle. +func (in *Lifecycle) DeepCopy() *Lifecycle { + if in == nil { + return nil + } + out := new(Lifecycle) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LifecycleHook) DeepCopyInto(out *LifecycleHook) { + *out = *in + if in.LabelsHandler != nil { + in, out := &in.LabelsHandler, &out.LabelsHandler + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.FinalizersHandler != nil { + in, out := &in.FinalizersHandler, &out.FinalizersHandler + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecycleHook. +func (in *LifecycleHook) DeepCopy() *LifecycleHook { + if in == nil { + return nil + } + out := new(LifecycleHook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdateStatefulSetStrategy) DeepCopyInto(out *RollingUpdateStatefulSetStrategy) { + *out = *in + if in.Partition != nil { + in, out := &in.Partition, &out.Partition + *out = new(int32) + **out = **in + } + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(intstr.IntOrString) + **out = **in + } + if in.UnorderedUpdate != nil { + in, out := &in.UnorderedUpdate, &out.UnorderedUpdate + *out = new(UnorderedUpdateStrategy) + (*in).DeepCopyInto(*out) + } + if in.InPlaceUpdateStrategy != nil { + in, out := &in.InPlaceUpdateStrategy, &out.InPlaceUpdateStrategy + *out = new(InPlaceUpdateStrategy) + (*in).DeepCopyInto(*out) + } + if in.MinReadySeconds != nil { + in, out := &in.MinReadySeconds, &out.MinReadySeconds + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateStatefulSetStrategy. +func (in *RollingUpdateStatefulSetStrategy) DeepCopy() *RollingUpdateStatefulSetStrategy { + if in == nil { + return nil + } + out := new(RollingUpdateStatefulSetStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeContainerHashes) DeepCopyInto(out *RuntimeContainerHashes) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeContainerHashes. +func (in *RuntimeContainerHashes) DeepCopy() *RuntimeContainerHashes { + if in == nil { + return nil + } + out := new(RuntimeContainerHashes) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeContainerMeta) DeepCopyInto(out *RuntimeContainerMeta) { + *out = *in + out.Hashes = in.Hashes +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeContainerMeta. +func (in *RuntimeContainerMeta) DeepCopy() *RuntimeContainerMeta { + if in == nil { + return nil + } + out := new(RuntimeContainerMeta) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeContainerMetaSet) DeepCopyInto(out *RuntimeContainerMetaSet) { + *out = *in + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]RuntimeContainerMeta, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeContainerMetaSet. +func (in *RuntimeContainerMetaSet) DeepCopy() *RuntimeContainerMetaSet { + if in == nil { + return nil + } + out := new(RuntimeContainerMetaSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdvancedStatefulSet) DeepCopyInto(out *AdvancedStatefulSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedStatefulSet. +func (in *AdvancedStatefulSet) DeepCopy() *AdvancedStatefulSet { + if in == nil { + return nil + } + out := new(AdvancedStatefulSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AdvancedStatefulSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdvancedStatefulSetList) DeepCopyInto(out *AdvancedStatefulSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AdvancedStatefulSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedStatefulSetList. +func (in *AdvancedStatefulSetList) DeepCopy() *AdvancedStatefulSetList { + if in == nil { + return nil + } + out := new(AdvancedStatefulSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AdvancedStatefulSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetOrdinals) DeepCopyInto(out *StatefulSetOrdinals) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetOrdinals. +func (in *StatefulSetOrdinals) DeepCopy() *StatefulSetOrdinals { + if in == nil { + return nil + } + out := new(StatefulSetOrdinals) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetPersistentVolumeClaimRetentionPolicy) DeepCopyInto(out *StatefulSetPersistentVolumeClaimRetentionPolicy) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetPersistentVolumeClaimRetentionPolicy. +func (in *StatefulSetPersistentVolumeClaimRetentionPolicy) DeepCopy() *StatefulSetPersistentVolumeClaimRetentionPolicy { + if in == nil { + return nil + } + out := new(StatefulSetPersistentVolumeClaimRetentionPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetScaleStrategy) DeepCopyInto(out *StatefulSetScaleStrategy) { + *out = *in + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(intstr.IntOrString) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetScaleStrategy. +func (in *StatefulSetScaleStrategy) DeepCopy() *StatefulSetScaleStrategy { + if in == nil { + return nil + } + out := new(StatefulSetScaleStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdvancedStatefulSetSpec) DeepCopyInto(out *AdvancedStatefulSetSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + in.Template.DeepCopyInto(&out.Template) + if in.VolumeClaimTemplates != nil { + in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates + *out = make([]corev1.PersistentVolumeClaim, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.VolumeClaimUpdateStrategy = in.VolumeClaimUpdateStrategy + in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy) + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int32) + **out = **in + } + if in.ReserveOrdinals != nil { + in, out := &in.ReserveOrdinals, &out.ReserveOrdinals + *out = make([]intstr.IntOrString, len(*in)) + copy(*out, *in) + } + if in.Lifecycle != nil { + in, out := &in.Lifecycle, &out.Lifecycle + *out = new(Lifecycle) + (*in).DeepCopyInto(*out) + } + if in.ScaleStrategy != nil { + in, out := &in.ScaleStrategy, &out.ScaleStrategy + *out = new(StatefulSetScaleStrategy) + (*in).DeepCopyInto(*out) + } + if in.PersistentVolumeClaimRetentionPolicy != nil { + in, out := &in.PersistentVolumeClaimRetentionPolicy, &out.PersistentVolumeClaimRetentionPolicy + *out = new(StatefulSetPersistentVolumeClaimRetentionPolicy) + **out = **in + } + if in.Ordinals != nil { + in, out := &in.Ordinals, &out.Ordinals + *out = new(StatefulSetOrdinals) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedStatefulSetSpec. +func (in *AdvancedStatefulSetSpec) DeepCopy() *AdvancedStatefulSetSpec { + if in == nil { + return nil + } + out := new(AdvancedStatefulSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdvancedStatefulSetStatus) DeepCopyInto(out *AdvancedStatefulSetStatus) { + *out = *in + if in.CollisionCount != nil { + in, out := &in.CollisionCount, &out.CollisionCount + *out = new(int32) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]appsv1.StatefulSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeClaims != nil { + in, out := &in.VolumeClaims, &out.VolumeClaims + *out = make([]VolumeClaimStatus, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedStatefulSetStatus. +func (in *AdvancedStatefulSetStatus) DeepCopy() *AdvancedStatefulSetStatus { + if in == nil { + return nil + } + out := new(AdvancedStatefulSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetUpdateStrategy) DeepCopyInto(out *StatefulSetUpdateStrategy) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateStatefulSetStrategy) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetUpdateStrategy. +func (in *StatefulSetUpdateStrategy) DeepCopy() *StatefulSetUpdateStrategy { + if in == nil { + return nil + } + out := new(StatefulSetUpdateStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UnorderedUpdateStrategy) DeepCopyInto(out *UnorderedUpdateStrategy) { + *out = *in + if in.PriorityStrategy != nil { + in, out := &in.PriorityStrategy, &out.PriorityStrategy + *out = new(UpdatePriorityStrategy) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnorderedUpdateStrategy. +func (in *UnorderedUpdateStrategy) DeepCopy() *UnorderedUpdateStrategy { + if in == nil { + return nil + } + out := new(UnorderedUpdateStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpdatePriorityOrderTerm) DeepCopyInto(out *UpdatePriorityOrderTerm) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdatePriorityOrderTerm. +func (in *UpdatePriorityOrderTerm) DeepCopy() *UpdatePriorityOrderTerm { + if in == nil { + return nil + } + out := new(UpdatePriorityOrderTerm) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpdatePriorityStrategy) DeepCopyInto(out *UpdatePriorityStrategy) { + *out = *in + if in.OrderPriority != nil { + in, out := &in.OrderPriority, &out.OrderPriority + *out = make([]UpdatePriorityOrderTerm, len(*in)) + copy(*out, *in) + } + if in.WeightPriority != nil { + in, out := &in.WeightPriority, &out.WeightPriority + *out = make([]UpdatePriorityWeightTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdatePriorityStrategy. +func (in *UpdatePriorityStrategy) DeepCopy() *UpdatePriorityStrategy { + if in == nil { + return nil + } + out := new(UpdatePriorityStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpdatePriorityWeightTerm) DeepCopyInto(out *UpdatePriorityWeightTerm) { + *out = *in + in.MatchSelector.DeepCopyInto(&out.MatchSelector) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdatePriorityWeightTerm. +func (in *UpdatePriorityWeightTerm) DeepCopy() *UpdatePriorityWeightTerm { + if in == nil { + return nil + } + out := new(UpdatePriorityWeightTerm) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in UpdateScatterStrategy) DeepCopyInto(out *UpdateScatterStrategy) { + { + in := &in + *out = make(UpdateScatterStrategy, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateScatterStrategy. +func (in UpdateScatterStrategy) DeepCopy() UpdateScatterStrategy { + if in == nil { + return nil + } + out := new(UpdateScatterStrategy) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpdateScatterTerm) DeepCopyInto(out *UpdateScatterTerm) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateScatterTerm. +func (in *UpdateScatterTerm) DeepCopy() *UpdateScatterTerm { + if in == nil { + return nil + } + out := new(UpdateScatterTerm) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeClaimStatus) DeepCopyInto(out *VolumeClaimStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeClaimStatus. +func (in *VolumeClaimStatus) DeepCopy() *VolumeClaimStatus { + if in == nil { + return nil + } + out := new(VolumeClaimStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeClaimUpdateStrategy) DeepCopyInto(out *VolumeClaimUpdateStrategy) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeClaimUpdateStrategy. +func (in *VolumeClaimUpdateStrategy) DeepCopy() *VolumeClaimUpdateStrategy { + if in == nil { + return nil + } + out := new(VolumeClaimUpdateStrategy) + in.DeepCopyInto(out) + return out +} diff --git a/cmd/workload/app/workload.go b/cmd/workload/app/workload.go new file mode 100644 index 00000000000..c48045feed8 --- /dev/null +++ b/cmd/workload/app/workload.go @@ -0,0 +1,113 @@ +/* +Copyright 2026 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package app + +import ( + "os" + + "github.com/fluid-cloudnative/fluid" + workloadv1alpha1 "github.com/fluid-cloudnative/fluid/api/workload/v1alpha1" + advancedstatefulset "github.com/fluid-cloudnative/fluid/pkg/controllers/workload/v1alpha1/advancedstatefulset" + "github.com/fluid-cloudnative/fluid/pkg/utils" + "github.com/spf13/cobra" + zapOpt "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "k8s.io/apimachinery/pkg/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" +) + +var ( + scheme = runtime.NewScheme() + setupLog = ctrl.Log.WithName("setup") + + metricsAddr string + enableLeaderElection bool + leaderElectionNamespace string + development bool + pprofAddr string +) + +var workloadCmd = &cobra.Command{ + Use: "start", + Short: "start workload-controller in Kubernetes", + Run: func(cmd *cobra.Command, args []string) { + handle() + }, +} + +func init() { + _ = clientgoscheme.AddToScheme(scheme) + _ = workloadv1alpha1.AddToScheme(scheme) + + workloadCmd.Flags().StringVarP(&metricsAddr, "metrics-addr", "", ":8084", "The address the metric endpoint binds to.") + workloadCmd.Flags().BoolVarP(&enableLeaderElection, "enable-leader-election", "", false, "Enable leader election for controller manager.") + workloadCmd.Flags().StringVarP(&leaderElectionNamespace, "leader-election-namespace", "", "fluid-system", "The namespace in which the leader election resource will be created.") + workloadCmd.Flags().BoolVarP(&development, "development", "", true, "Enable development mode for workload controller.") + workloadCmd.Flags().StringVarP(&pprofAddr, "pprof-addr", "", "", "The address for pprof to use while exporting profiling results") +} + +// NewWorkloadCommand creates the cobra command for the workload controller. +func NewWorkloadCommand() *cobra.Command { + return workloadCmd +} + +func handle() { + fluid.LogVersion() + + ctrl.SetLogger(zap.New(func(o *zap.Options) { + o.Development = development + }, func(o *zap.Options) { + o.ZapOpts = append(o.ZapOpts, zapOpt.AddCaller()) + }, func(o *zap.Options) { + if !development { + encCfg := zapOpt.NewProductionEncoderConfig() + encCfg.EncodeLevel = zapcore.CapitalLevelEncoder + encCfg.EncodeTime = zapcore.ISO8601TimeEncoder + o.Encoder = zapcore.NewConsoleEncoder(encCfg) + } + })) + + utils.NewPprofServer(setupLog, pprofAddr, development) + + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ + Scheme: scheme, + Metrics: metricsserver.Options{ + BindAddress: metricsAddr, + }, + LeaderElection: enableLeaderElection, + LeaderElectionNamespace: leaderElectionNamespace, + LeaderElectionID: "workload.fluid.io", + }) + if err != nil { + setupLog.Error(err, "unable to start workload controller manager") + os.Exit(1) + } + + if err := advancedstatefulset.Add(mgr); err != nil { + setupLog.Error(err, "unable to add AdvancedStatefulSet controller") + os.Exit(1) + } + + setupLog.Info("starting workload-controller") + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + setupLog.Error(err, "problem running workload-controller") + os.Exit(1) + } +} diff --git a/cmd/workload/main.go b/cmd/workload/main.go new file mode 100644 index 00000000000..c5266359baa --- /dev/null +++ b/cmd/workload/main.go @@ -0,0 +1,32 @@ +/* +Copyright 2026 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "fmt" + "os" + + "github.com/fluid-cloudnative/fluid/cmd/workload/app" +) + +func main() { + command := app.NewWorkloadCommand() + if err := command.Execute(); err != nil { + fmt.Fprintf(os.Stderr, "%s", err.Error()) + os.Exit(1) + } +} diff --git a/config/crd/bases/workload.fluid.io_advancedstatefulsets.yaml b/config/crd/bases/workload.fluid.io_advancedstatefulsets.yaml new file mode 100644 index 00000000000..b307f25f605 --- /dev/null +++ b/config/crd/bases/workload.fluid.io_advancedstatefulsets.yaml @@ -0,0 +1,339 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.19.0 + name: advancedstatefulsets.workload.fluid.io +spec: + group: workload.fluid.io + names: + categories: + - fluid + kind: AdvancedStatefulSet + listKind: AdvancedStatefulSetList + plural: advancedstatefulsets + shortNames: + - asts + singular: advancedstatefulset + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.replicas + name: DESIRED + type: integer + - jsonPath: .status.replicas + name: CURRENT + type: integer + - jsonPath: .status.updatedReplicas + name: UPDATED + type: integer + - jsonPath: .status.readyReplicas + name: READY + type: integer + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + - jsonPath: .spec.template.spec.containers[*].name + name: CONTAINERS + priority: 1 + type: string + - jsonPath: .spec.template.spec.containers[*].image + name: IMAGES + priority: 1 + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + lifecycle: + properties: + inPlaceUpdate: + properties: + finalizersHandler: + items: + type: string + type: array + labelsHandler: + additionalProperties: + type: string + type: object + markPodNotReady: + type: boolean + type: object + preDelete: + properties: + finalizersHandler: + items: + type: string + type: array + labelsHandler: + additionalProperties: + type: string + type: object + markPodNotReady: + type: boolean + type: object + preNormal: + properties: + finalizersHandler: + items: + type: string + type: array + labelsHandler: + additionalProperties: + type: string + type: object + markPodNotReady: + type: boolean + type: object + type: object + ordinals: + properties: + start: + format: int32 + type: integer + type: object + persistentVolumeClaimRetentionPolicy: + properties: + whenDeleted: + type: string + whenScaled: + type: string + type: object + podManagementPolicy: + type: string + replicas: + format: int32 + type: integer + reserveOrdinals: + items: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: array + revisionHistoryLimit: + format: int32 + type: integer + scaleStrategy: + properties: + maxUnavailable: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + serviceName: + type: string + template: + x-kubernetes-preserve-unknown-fields: true + updateStrategy: + properties: + rollingUpdate: + properties: + inPlaceUpdateStrategy: + properties: + gracePeriodSeconds: + format: int32 + type: integer + type: object + maxUnavailable: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + minReadySeconds: + format: int32 + type: integer + partition: + format: int32 + type: integer + paused: + type: boolean + podUpdatePolicy: + type: string + unorderedUpdate: + properties: + priorityStrategy: + properties: + orderPriority: + items: + properties: + orderedKey: + type: string + required: + - orderedKey + type: object + type: array + weightPriority: + items: + properties: + matchSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + weight: + format: int32 + type: integer + required: + - matchSelector + - weight + type: object + type: array + type: object + type: object + type: object + type: + type: string + type: object + volumeClaimTemplates: + x-kubernetes-preserve-unknown-fields: true + volumeClaimUpdateStrategy: + properties: + type: + type: string + type: object + required: + - selector + - template + type: object + status: + properties: + availableReplicas: + format: int32 + type: integer + collisionCount: + format: int32 + type: integer + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + required: + - status + - type + type: object + type: array + currentReplicas: + format: int32 + type: integer + currentRevision: + type: string + labelSelector: + type: string + observedGeneration: + format: int64 + type: integer + readyReplicas: + format: int32 + type: integer + replicas: + format: int32 + type: integer + updateRevision: + type: string + updatedAvailableReplicas: + format: int32 + type: integer + updatedReadyReplicas: + format: int32 + type: integer + updatedReplicas: + format: int32 + type: integer + volumeClaims: + items: + properties: + compatibleReadyReplicas: + format: int32 + type: integer + compatibleReplicas: + format: int32 + type: integer + volumeClaimName: + type: string + required: + - compatibleReadyReplicas + - compatibleReplicas + - volumeClaimName + type: object + type: array + required: + - availableReplicas + - currentReplicas + - readyReplicas + - replicas + - updatedReplicas + type: object + type: object + served: true + storage: true + subresources: + scale: + labelSelectorPath: .status.labelSelector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + status: {} diff --git a/config/samples/workload_v1alpha1_advancedstatefulset.yaml b/config/samples/workload_v1alpha1_advancedstatefulset.yaml new file mode 100644 index 00000000000..99f2b3eb067 --- /dev/null +++ b/config/samples/workload_v1alpha1_advancedstatefulset.yaml @@ -0,0 +1,25 @@ +apiVersion: workload.fluid.io/v1alpha1 +kind: AdvancedStatefulSet +metadata: + name: test-asts + namespace: default +spec: + replicas: 2 + selector: + matchLabels: + app: test-asts + template: + metadata: + labels: + app: test-asts + spec: + containers: + - name: nginx + image: nginx:1.25 + ports: + - containerPort: 80 + updateStrategy: + type: RollingUpdate + rollingUpdate: + podUpdatePolicy: InPlaceIfPossible + partition: 0 diff --git a/go.mod b/go.mod index 8c6a048c868..d48b0fc4a40 100644 --- a/go.mod +++ b/go.mod @@ -74,9 +74,9 @@ require ( go.uber.org/zap v1.27.0 golang.org/x/net v0.43.0 golang.org/x/time v0.11.0 + gomodules.xyz/jsonpatch/v2 v2.4.0 google.golang.org/grpc v1.75.1 gopkg.in/yaml.v2 v2.4.0 - gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.29.15 k8s.io/apimachinery v0.29.15 k8s.io/client-go v0.29.15 @@ -143,10 +143,10 @@ require ( golang.org/x/term v0.34.0 // indirect golang.org/x/text v0.28.0 // indirect golang.org/x/tools v0.36.0 // indirect - gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 // indirect google.golang.org/protobuf v1.36.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apiextensions-apiserver v0.29.2 // indirect k8s.io/apiserver v0.29.15 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect diff --git a/pkg/controllers/workload/v1alpha1/advancedstatefulset/pvc_event_handler.go b/pkg/controllers/workload/v1alpha1/advancedstatefulset/pvc_event_handler.go new file mode 100644 index 00000000000..ff3e88b0592 --- /dev/null +++ b/pkg/controllers/workload/v1alpha1/advancedstatefulset/pvc_event_handler.go @@ -0,0 +1,64 @@ +/* +Copyright 2026 The Fluid Authors. +Copyright 2024 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package advancedstatefulset + +import ( + "context" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +type pvcEventHandler struct { +} + +var _ handler.EventHandler = &pvcEventHandler{} + +func (e *pvcEventHandler) Create(ctx context.Context, evt event.CreateEvent, q workqueue.RateLimitingInterface) { +} + +func (e *pvcEventHandler) Update(ctx context.Context, evt event.UpdateEvent, q workqueue.RateLimitingInterface) { + newPVC, ok := evt.ObjectNew.(*v1.PersistentVolumeClaim) + if !ok { + return + } + if len(newPVC.Annotations) == 0 { + return + } + ownedByAstsName, exist := newPVC.Annotations[PVCOwnedByStsAnnotationKey] + if !exist { + return + } + + klog.InfoS("pvc update trigger asts reconcile", "pvc", klog.KObj(newPVC), "sts", ownedByAstsName) + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Namespace: newPVC.Namespace, + Name: ownedByAstsName, + }}) +} + +func (e *pvcEventHandler) Delete(ctx context.Context, evt event.DeleteEvent, q workqueue.RateLimitingInterface) { +} + +func (e *pvcEventHandler) Generic(ctx context.Context, evt event.GenericEvent, q workqueue.RateLimitingInterface) { +} diff --git a/pkg/controllers/workload/v1alpha1/advancedstatefulset/stateful_pod_control.go b/pkg/controllers/workload/v1alpha1/advancedstatefulset/stateful_pod_control.go new file mode 100644 index 00000000000..8bce69e905b --- /dev/null +++ b/pkg/controllers/workload/v1alpha1/advancedstatefulset/stateful_pod_control.go @@ -0,0 +1,365 @@ +/* +Copyright 2026 The Fluid Authors. +Copyright 2019 The Kruise Authors. +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package advancedstatefulset + +import ( + "context" + "fmt" + "strings" + "unicode" + "unicode/utf8" + + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + errorutils "k8s.io/apimachinery/pkg/util/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientset "k8s.io/client-go/kubernetes" + corelisters "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/retry" + "k8s.io/klog/v2" + + workloadv1alpha1 "github.com/fluid-cloudnative/fluid/api/workload/v1alpha1" +) + +// StatefulPodControlObjectManager abstracts the manipulation of Pods and PVCs. The real controller implements this +// with a clientset for writes and listers for reads; for tests we provide stubs. +type StatefulPodControlObjectManager interface { + CreatePod(ctx context.Context, pod *v1.Pod) error + GetPod(namespace, podName string) (*v1.Pod, error) + UpdatePod(pod *v1.Pod) error + DeletePod(pod *v1.Pod) error + CreateClaim(claim *v1.PersistentVolumeClaim) error + GetClaim(namespace, claimName string) (*v1.PersistentVolumeClaim, error) + UpdateClaim(claim *v1.PersistentVolumeClaim) error +} + +// StatefulPodControl defines the interface that StatefulSetController uses to create, update, and delete Pods, +// Manipulation of objects is provided through objectMgr, which allows the k8s API to be mocked out for testing. +type StatefulPodControl struct { + objectMgr StatefulPodControlObjectManager + recorder record.EventRecorder +} + +// NewStatefulPodControl constructs a StatefulPodControl using a realStatefulPodControlObjectManager with the given +// clientset, listers and EventRecorder. +func NewStatefulPodControl( + client clientset.Interface, + podLister corelisters.PodLister, + claimLister corelisters.PersistentVolumeClaimLister, + recorder record.EventRecorder, +) *StatefulPodControl { + return &StatefulPodControl{&realStatefulPodControlObjectManager{client, podLister, claimLister}, recorder} +} + +// NewStatefulPodControlFromManager creates a StatefulPodControl using the given StatefulPodControlObjectManager and recorder. +func NewStatefulPodControlFromManager(om StatefulPodControlObjectManager, recorder record.EventRecorder) *StatefulPodControl { + return &StatefulPodControl{om, recorder} +} + +// realStatefulPodControlObjectManager uses a clientset.Interface and listers. +type realStatefulPodControlObjectManager struct { + client clientset.Interface + podLister corelisters.PodLister + claimLister corelisters.PersistentVolumeClaimLister +} + +func (om *realStatefulPodControlObjectManager) CreatePod(ctx context.Context, pod *v1.Pod) error { + _, err := om.client.CoreV1().Pods(pod.Namespace).Create(ctx, pod, metav1.CreateOptions{}) + return err +} + +func (om *realStatefulPodControlObjectManager) GetPod(namespace, podName string) (*v1.Pod, error) { + return om.podLister.Pods(namespace).Get(podName) +} + +func (om *realStatefulPodControlObjectManager) UpdatePod(pod *v1.Pod) error { + _, err := om.client.CoreV1().Pods(pod.Namespace).Update(context.TODO(), pod, metav1.UpdateOptions{}) + return err +} + +func (om *realStatefulPodControlObjectManager) DeletePod(pod *v1.Pod) error { + return om.client.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}) +} + +func (om *realStatefulPodControlObjectManager) CreateClaim(claim *v1.PersistentVolumeClaim) error { + _, err := om.client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(context.TODO(), claim, metav1.CreateOptions{}) + return err +} + +func (om *realStatefulPodControlObjectManager) GetClaim(namespace, claimName string) (*v1.PersistentVolumeClaim, error) { + return om.claimLister.PersistentVolumeClaims(namespace).Get(claimName) +} + +func (om *realStatefulPodControlObjectManager) UpdateClaim(claim *v1.PersistentVolumeClaim) error { + _, err := om.client.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(context.TODO(), claim, metav1.UpdateOptions{}) + return err +} + +func (spc *StatefulPodControl) CreateStatefulPod(ctx context.Context, set *workloadv1alpha1.AdvancedStatefulSet, pod *v1.Pod) error { + // Create the Pod's PVCs prior to creating the Pod + if err := spc.createPersistentVolumeClaims(set, pod); err != nil { + spc.recordPodEvent("create", set, pod, err) + return err + } + // If we created the PVCs attempt to create the Pod + err := spc.objectMgr.CreatePod(ctx, pod) + // sink already exists errors + if apierrors.IsAlreadyExists(err) { + return err + } + // Set PVC policy as much as is possible at this point. + if err := spc.UpdatePodClaimForRetentionPolicy(set, pod); err != nil { + spc.recordPodEvent("update", set, pod, err) + return err + } + spc.recordPodEvent("create", set, pod, err) + return err +} + +func (spc *StatefulPodControl) UpdateStatefulPod(set *workloadv1alpha1.AdvancedStatefulSet, pod *v1.Pod) error { + attemptedUpdate := false + err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + // assume the Pod is consistent + consistent := true + // if the Pod does not conform to its identity, update the identity and dirty the Pod + if !identityMatches(set, pod) { + updateIdentity(set, pod) + consistent = false + } + // if the Pod does not conform to the AdvancedStatefulSet's storage requirements, update the Pod's PVC's, + // dirty the Pod, and create any missing PVCs + if !storageMatches(set, pod) { + updateStorage(set, pod) + consistent = false + if err := spc.createPersistentVolumeClaims(set, pod); err != nil { + spc.recordPodEvent("update", set, pod, err) + return err + } + } + // if the Pod's PVCs are not consistent with the AdvancedStatefulSet's PVC deletion policy, update the PVC + // and dirty the pod. + if match, err := spc.ClaimsMatchRetentionPolicy(set, pod); err != nil { + spc.recordPodEvent("update", set, pod, err) + return err + } else if !match { + if err := spc.UpdatePodClaimForRetentionPolicy(set, pod); err != nil { + spc.recordPodEvent("update", set, pod, err) + return err + } + consistent = false + } + + // if the Pod is not dirty, do nothing + if consistent { + return nil + } + + attemptedUpdate = true + // commit the update, retrying on conflicts + + updateErr := spc.objectMgr.UpdatePod(pod) + if updateErr == nil { + return nil + } + + if updated, err := spc.objectMgr.GetPod(set.Namespace, pod.Name); err == nil { + // make a copy so we don't mutate the shared cache + pod = updated.DeepCopy() + } else { + utilruntime.HandleError(fmt.Errorf("error getting updated Pod %s/%s: %w", set.Namespace, pod.Name, err)) + } + + return updateErr + }) + if attemptedUpdate { + spc.recordPodEvent("update", set, pod, err) + } + return err +} + +func (spc *StatefulPodControl) DeleteStatefulPod(set *workloadv1alpha1.AdvancedStatefulSet, pod *v1.Pod) error { + err := spc.objectMgr.DeletePod(pod) + spc.recordPodEvent("delete", set, pod, err) + return err +} + +// ClaimsMatchRetentionPolicy returns false if the PVCs for pod are not consistent with set's PVC deletion policy. +// An error is returned if something is not consistent. This is expected if the pod is being otherwise updated, +// but a problem otherwise (see usage of this method in UpdateStatefulPod). +func (spc *StatefulPodControl) ClaimsMatchRetentionPolicy(set *workloadv1alpha1.AdvancedStatefulSet, pod *v1.Pod) (bool, error) { + ordinal := getOrdinal(pod) + templates := set.Spec.VolumeClaimTemplates + for i := range templates { + claimName := getPersistentVolumeClaimName(set, &templates[i], ordinal) + claim, err := spc.objectMgr.GetClaim(set.Namespace, claimName) + switch { + case apierrors.IsNotFound(err): + klog.V(4).InfoS("Expected claim missing, continuing to pick up in next iteration", "claimName", claimName) + case err != nil: + return false, fmt.Errorf("Could not retrieve claim %s for %s when checking PVC deletion policy", claimName, pod.Name) + default: + if !claimOwnerMatchesSetAndPod(claim, set, pod) { + return false, nil + } + } + } + return true, nil +} + +// UpdatePodClaimForRetentionPolicy updates the PVCs used by pod to match the PVC deletion policy of set. +func (spc *StatefulPodControl) UpdatePodClaimForRetentionPolicy(set *workloadv1alpha1.AdvancedStatefulSet, pod *v1.Pod) error { + ordinal := getOrdinal(pod) + templates := set.Spec.VolumeClaimTemplates + for i := range templates { + claimName := getPersistentVolumeClaimName(set, &templates[i], ordinal) + claim, err := spc.objectMgr.GetClaim(set.Namespace, claimName) + switch { + case apierrors.IsNotFound(err): + klog.V(4).InfoS("Expected claim missing, continuing to pick up in next iteration", "claimName", claimName) + case err != nil: + return fmt.Errorf("Could not retrieve claim %s not found for %s when checking PVC deletion policy: %w", claimName, pod.Name, err) + default: + if !claimOwnerMatchesSetAndPod(claim, set, pod) { + claimClone := claim.DeepCopy() + needsUpdate := updateClaimOwnerRefForSetAndPod(claimClone, set, pod) + if needsUpdate { + err := spc.objectMgr.UpdateClaim(claimClone) + if err != nil { + return fmt.Errorf("Could not update claim %s for delete policy ownerRefs: %w", claimName, err) + } + } + } + } + } + return nil +} + +// PodClaimIsStale returns true for a stale PVC that should block pod creation. If the scaling +// policy is deletion, and a PVC has an ownerRef that does not match the pod, the PVC is stale. This +// includes pods whose UID has not been created. +func (spc *StatefulPodControl) PodClaimIsStale(set *workloadv1alpha1.AdvancedStatefulSet, pod *v1.Pod) (bool, error) { + policy := getPersistentVolumeClaimRetentionPolicy(set) + if policy.WhenScaled == workloadv1alpha1.RetainPersistentVolumeClaimRetentionPolicyType { + // PVCs are meant to be reused and so can't be stale. + return false, nil + } + for _, claim := range getPersistentVolumeClaims(set, pod) { + pvc, err := spc.objectMgr.GetClaim(claim.Namespace, claim.Name) + switch { + case apierrors.IsNotFound(err): + // If the claim doesn't exist yet, it can't be stale. + continue + case err != nil: + return false, err + case err == nil: + // A claim is stale if it doesn't match the pod's UID, including if the pod has no UID. + if hasStaleOwnerRef(pvc, pod) { + return true, nil + } + } + } + return false, nil +} + +// recordPodEvent records an event for verb applied to a Pod in a AdvancedStatefulSet. If err is nil the generated event will +// have a reason of v1.EventTypeNormal. If err is not nil the generated event will have a reason of v1.EventTypeWarning. +func (spc *StatefulPodControl) recordPodEvent(verb string, set *workloadv1alpha1.AdvancedStatefulSet, pod *v1.Pod, err error) { + if err == nil { + reason := fmt.Sprintf("Successful%s", titleCase(verb)) + message := fmt.Sprintf("%s Pod %s in AdvancedStatefulSet %s successful", + strings.ToLower(verb), pod.Name, set.Name) + spc.recorder.Event(set, v1.EventTypeNormal, reason, message) + } else { + reason := fmt.Sprintf("Failed%s", titleCase(verb)) + message := fmt.Sprintf("%s Pod %s in AdvancedStatefulSet %s failed error: %s", + strings.ToLower(verb), pod.Name, set.Name, err) + spc.recorder.Event(set, v1.EventTypeWarning, reason, message) + } +} + +// recordClaimEvent records an event for verb applied to the PersistentVolumeClaim of a Pod in a AdvancedStatefulSet. If err is +// nil the generated event will have a reason of v1.EventTypeNormal. If err is not nil the generated event will have a +// reason of v1.EventTypeWarning. +func (spc *StatefulPodControl) recordClaimEvent(verb string, set *workloadv1alpha1.AdvancedStatefulSet, pod *v1.Pod, claim *v1.PersistentVolumeClaim, err error) { + if err == nil { + reason := fmt.Sprintf("Successful%s", titleCase(verb)) + message := fmt.Sprintf("%s Claim %s Pod %s in AdvancedStatefulSet %s success", + strings.ToLower(verb), claim.Name, pod.Name, set.Name) + spc.recorder.Event(set, v1.EventTypeNormal, reason, message) + } else { + reason := fmt.Sprintf("Failed%s", titleCase(verb)) + message := fmt.Sprintf("%s Claim %s for Pod %s in AdvancedStatefulSet %s failed error: %s", + strings.ToLower(verb), claim.Name, pod.Name, set.Name, err) + spc.recorder.Event(set, v1.EventTypeWarning, reason, message) + } +} + +// titleCase returns the string with the first rune converted to title case. +func titleCase(s string) string { + if s == "" { + return s + } + r, size := utf8.DecodeRuneInString(s) + return string(unicode.ToTitle(r)) + s[size:] +} + +// createMissingPersistentVolumeClaims creates all of the required PersistentVolumeClaims for pod, and updates its retention policy +func (spc *StatefulPodControl) createMissingPersistentVolumeClaims(ctx context.Context, set *workloadv1alpha1.AdvancedStatefulSet, pod *v1.Pod) error { + if err := spc.createPersistentVolumeClaims(set, pod); err != nil { + return err + } + + // Set PVC policy as much as is possible at this point. + if err := spc.UpdatePodClaimForRetentionPolicy(set, pod); err != nil { + spc.recordPodEvent("update", set, pod, err) + return err + } + return nil +} + +// createPersistentVolumeClaims creates all of the required PersistentVolumeClaims for pod, which must be a member of +// set. If all of the claims for Pod are successfully created, the returned error is nil. If creation fails, this method +// may be called again until no error is returned, indicating the PersistentVolumeClaims for pod are consistent with +// set's Spec. +func (spc *StatefulPodControl) createPersistentVolumeClaims(set *workloadv1alpha1.AdvancedStatefulSet, pod *v1.Pod) error { + var errs []error + for _, claim := range getPersistentVolumeClaims(set, pod) { + pvc, err := spc.objectMgr.GetClaim(claim.Namespace, claim.Name) + switch { + case apierrors.IsNotFound(err): + err := spc.objectMgr.CreateClaim(&claim) + if err != nil { + errs = append(errs, fmt.Errorf("failed to create PVC %s: %s", claim.Name, err)) + } + if err == nil || !apierrors.IsAlreadyExists(err) { + spc.recordClaimEvent("create", set, pod, &claim, err) + } + case err != nil: + errs = append(errs, fmt.Errorf("failed to retrieve PVC %s: %s", claim.Name, err)) + spc.recordClaimEvent("create", set, pod, &claim, err) + case pvc.DeletionTimestamp != nil: + errs = append(errs, fmt.Errorf("pvc %s is to be deleted", claim.Name)) + } + // TODO: Check resource requirements and accessmodes, update if necessary + // Don't forget to deep copy the PVC if you need to update it + } + return errorutils.NewAggregate(errs) +} diff --git a/pkg/controllers/workload/v1alpha1/advancedstatefulset/stateful_set_control.go b/pkg/controllers/workload/v1alpha1/advancedstatefulset/stateful_set_control.go new file mode 100644 index 00000000000..8d3a7cf6ce0 --- /dev/null +++ b/pkg/controllers/workload/v1alpha1/advancedstatefulset/stateful_set_control.go @@ -0,0 +1,1241 @@ +/* +Copyright 2026 The Fluid Authors. +Copyright 2019 The Kruise Authors. +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package advancedstatefulset + +import ( + "context" + "fmt" + "math" + "sort" + "sync" + "time" + + history "github.com/fluid-cloudnative/fluid/pkg/controllers/workload/v1alpha1/utils/controllerhistory" + inplaceupdate2 "github.com/fluid-cloudnative/fluid/pkg/controllers/workload/v1alpha1/utils/inplaceupdate" + "github.com/fluid-cloudnative/fluid/pkg/controllers/workload/v1alpha1/utils/lifecycle" + "github.com/fluid-cloudnative/fluid/pkg/controllers/workload/v1alpha1/utils/specifieddelete" + "github.com/fluid-cloudnative/fluid/pkg/controllers/workload/v1alpha1/utils/util" + apps "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + intstrutil "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/tools/record" + "k8s.io/klog/v2" + "k8s.io/utils/integer" + "k8s.io/utils/ptr" + + workloadv1alpha1 "github.com/fluid-cloudnative/fluid/api/workload/v1alpha1" +) + +// Realistic value for maximum in-flight requests when processing in parallel mode. +const MaxBatchSize = 500 + +const PVCOwnedByStsAnnotationKey = "workload.fluid.io/owned-by-asts" + +// StatefulSetControlInterface implements the control logic for updating StatefulSets and their children Pods. It is implemented +// as an interface to allow for extensions that provide different semantics. Currently, there is only one implementation. +type StatefulSetControlInterface interface { + // UpdateStatefulSet implements the control logic for Pod creation, update, and deletion, and + // persistent volume creation, update, and deletion. + // If an implementation returns a non-nil error, the invocation will be retried using a rate-limited strategy. + // Implementors should sink any errors that they do not wish to trigger a retry, and they may feel free to + // exit exceptionally at any point provided they wish the update to be re-run at a later point in time. + UpdateStatefulSet(ctx context.Context, set *workloadv1alpha1.AdvancedStatefulSet, pods []*v1.Pod) error + // ListRevisions returns an array of the ControllerRevisions that represent the revisions of set. If the returned + // error is nil, the returns slice of ControllerRevisions is valid. + ListRevisions(set *workloadv1alpha1.AdvancedStatefulSet) ([]*apps.ControllerRevision, error) + // AdoptOrphanRevisions adopts any orphaned ControllerRevisions that match set's Selector. If all adoptions are + // successful the returned error is nil. + AdoptOrphanRevisions(set *workloadv1alpha1.AdvancedStatefulSet, revisions []*apps.ControllerRevision) error +} + +// NewDefaultStatefulSetControl returns a new instance of the default implementation ControlInterface that +// implements the documented semantics for StatefulSets. podControl is the PodControlInterface used to create, update, +// and delete Pods and to create PersistentVolumeClaims. statusUpdater is the StatusUpdaterInterface used +// to update the status of StatefulSets. You should use an instance returned from NewRealStatefulPodControl() for any +// scenario other than testing. +func NewDefaultStatefulSetControl( + podControl *StatefulPodControl, + inplaceControl inplaceupdate2.Interface, + lifecycleControl lifecycle.Interface, + statusUpdater StatusUpdaterInterface, + controllerHistory history.Interface, + recorder record.EventRecorder) StatefulSetControlInterface { + return &defaultStatefulSetControl{ + podControl, + statusUpdater, + controllerHistory, + recorder, + inplaceControl, + lifecycleControl, + } +} + +// defaultStatefulSetControl implements ControlInterface +var _ StatefulSetControlInterface = &defaultStatefulSetControl{} + +type defaultStatefulSetControl struct { + podControl *StatefulPodControl + statusUpdater StatusUpdaterInterface + controllerHistory history.Interface + recorder record.EventRecorder + inplaceControl inplaceupdate2.Interface + lifecycleControl lifecycle.Interface +} + +// UpdateStatefulSet executes the core logic loop for a stateful set, applying the predictable and +// consistent monotonic update strategy by default - scale up proceeds in ordinal order, no new pod +// is created while any pod is unhealthy, and pods are terminated in descending order. The burst +// strategy allows these constraints to be relaxed - pods will be created and deleted eagerly and +// in no particular order. Clients using the burst strategy should be careful to ensure they +// understand the consistency implications of having unpredictable numbers of pods available. +func (ssc *defaultStatefulSetControl) UpdateStatefulSet(ctx context.Context, set *workloadv1alpha1.AdvancedStatefulSet, pods []*v1.Pod) error { + set = set.DeepCopy() + + // list all revisions and sort them + revisions, err := ssc.ListRevisions(set) + if err != nil { + return err + } + history.SortControllerRevisions(revisions) + + currentRevision, updateRevision, err := ssc.performUpdate(ctx, set, pods, revisions) + if err != nil { + return utilerrors.NewAggregate([]error{err, ssc.truncateHistory(set, pods, revisions, currentRevision, updateRevision)}) + } + + // maintain the set's revision history limit + return ssc.truncateHistory(set, pods, revisions, currentRevision, updateRevision) +} + +func (ssc *defaultStatefulSetControl) performUpdate( + ctx context.Context, set *workloadv1alpha1.AdvancedStatefulSet, pods []*v1.Pod, revisions []*apps.ControllerRevision) (*apps.ControllerRevision, *apps.ControllerRevision, error) { + var currentStatus *workloadv1alpha1.AdvancedStatefulSetStatus + // get the current, and update revisions + currentRevision, updateRevision, collisionCount, err := ssc.getStatefulSetRevisions(set, revisions) + if err != nil { + return currentRevision, updateRevision, err + } + + // Refresh update expectations + for _, pod := range pods { + updateExpectations.ObserveUpdated(getStatefulSetKey(set), updateRevision.Name, pod) + } + + // perform the main update function and get the status + currentStatus, getStatusErr := ssc.updateStatefulSet(ctx, set, currentRevision, updateRevision, collisionCount, pods, revisions) + if getStatusErr != nil && currentStatus == nil { + return currentRevision, updateRevision, getStatusErr + } + + // make sure to update the latest status even if there is an error with non-nil currentStatus + updateStatusErr := ssc.updateStatefulSetStatus(ctx, set, currentStatus) + if updateStatusErr == nil { + klog.V(4).InfoS("Updated status", "statefulSet", klog.KObj(set), + "replicas", currentStatus.Replicas, + "readyReplicas", currentStatus.ReadyReplicas, + "currentReplicas", currentStatus.CurrentReplicas, + "updatedReplicas", currentStatus.UpdatedReplicas) + } + + switch { + case getStatusErr != nil && updateStatusErr != nil: + klog.ErrorS(updateStatusErr, "Could not update status", "statefulSet", klog.KObj(set)) + return currentRevision, updateRevision, getStatusErr + case getStatusErr != nil: + return currentRevision, updateRevision, getStatusErr + case updateStatusErr != nil: + return currentRevision, updateRevision, updateStatusErr + } + + klog.V(4).InfoS("AdvancedStatefulSet revisions", "statefulSet", klog.KObj(set), + "currentRevision", currentStatus.CurrentRevision, + "updateRevision", currentStatus.UpdateRevision) + + return currentRevision, updateRevision, nil +} + +func (ssc *defaultStatefulSetControl) ListRevisions(set *workloadv1alpha1.AdvancedStatefulSet) ([]*apps.ControllerRevision, error) { + selector, err := util.ValidatedLabelSelectorAsSelector(set.Spec.Selector) + if err != nil { + return nil, err + } + return ssc.controllerHistory.ListControllerRevisions(set, selector) +} + +func (ssc *defaultStatefulSetControl) AdoptOrphanRevisions( + set *workloadv1alpha1.AdvancedStatefulSet, + revisions []*apps.ControllerRevision) error { + for i := range revisions { + adopted, err := ssc.controllerHistory.AdoptControllerRevision(set, controllerKind, revisions[i]) + if err != nil { + return err + } + revisions[i] = adopted + } + return nil +} + +// truncateHistory truncates any non-live ControllerRevisions in revisions from set's history. The UpdateRevision and +// CurrentRevision in set's Status are considered to be live. Any revisions associated with the Pods in pods are also +// considered to be live. Non-live revisions are deleted, starting with the revision with the lowest Revision, until +// only RevisionHistoryLimit revisions remain. If the returned error is nil the operation was successful. This method +// expects that revisions is sorted when supplied. +func (ssc *defaultStatefulSetControl) truncateHistory( + set *workloadv1alpha1.AdvancedStatefulSet, + pods []*v1.Pod, + revisions []*apps.ControllerRevision, + current *apps.ControllerRevision, + update *apps.ControllerRevision) error { + history := make([]*apps.ControllerRevision, 0, len(revisions)) + // mark all live revisions + live := map[string]bool{} + if current != nil { + live[current.Name] = true + } + if update != nil { + live[update.Name] = true + } + for i := range pods { + live[getPodRevision(pods[i])] = true + } + // collect live revisions and historic revisions + for i := range revisions { + if !live[revisions[i].Name] { + history = append(history, revisions[i]) + } + } + historyLen := len(history) + var historyLimit int + if set.Spec.RevisionHistoryLimit != nil { + historyLimit = int(*set.Spec.RevisionHistoryLimit) + } else { + historyLimit = 10 + } + if historyLen <= historyLimit { + return nil + } + // delete any non-live history to maintain the revision limit. + history = history[:(historyLen - historyLimit)] + for i := 0; i < len(history); i++ { + if err := ssc.controllerHistory.DeleteControllerRevision(history[i]); err != nil { + return err + } + } + return nil +} + +// getStatefulSetRevisions returns the current and update ControllerRevisions for set. It also +// returns a collision count that records the number of name collisions set saw when creating +// new ControllerRevisions. This count is incremented on every name collision and is used in +// building the ControllerRevision names for name collision avoidance. This method may create +// a new revision, or modify the Revision of an existing revision if an update to set is detected. +// This method expects that revisions is sorted when supplied. +func (ssc *defaultStatefulSetControl) getStatefulSetRevisions( + set *workloadv1alpha1.AdvancedStatefulSet, + revisions []*apps.ControllerRevision) (*apps.ControllerRevision, *apps.ControllerRevision, int32, error) { + var currentRevision, updateRevision *apps.ControllerRevision + + revisionCount := len(revisions) + history.SortControllerRevisions(revisions) + + // Use a local copy of set.Status.CollisionCount to avoid modifying set.Status directly. + // This copy is returned so the value gets carried over to set.Status in updateStatefulSet. + var collisionCount int32 + if set.Status.CollisionCount != nil { + collisionCount = *set.Status.CollisionCount + } + + // create a new revision from the current set + updateRevision, err := newRevision(set, nextRevision(revisions), &collisionCount) + if err != nil { + return nil, nil, collisionCount, err + } + + // find any equivalent revisions + equalRevisions := history.FindEqualRevisions(revisions, updateRevision) + equalCount := len(equalRevisions) + + if equalCount > 0 { + if history.EqualRevision(revisions[revisionCount-1], equalRevisions[equalCount-1]) { + // if the equivalent revision is immediately prior the update revision has not changed + updateRevision = revisions[revisionCount-1] + } else { + // if the equivalent revision is not immediately prior we will roll back by incrementing the + // Revision of the equivalent revision + updateRevision, err = ssc.controllerHistory.UpdateControllerRevision( + equalRevisions[equalCount-1], + updateRevision.Revision) + if err != nil { + return nil, nil, collisionCount, err + } + } + } else { + // if there is no equivalent revision we create a new one + updateRevision, err = ssc.controllerHistory.CreateControllerRevision(set, updateRevision, &collisionCount) + if err != nil { + return nil, nil, collisionCount, err + } + } + + // attempt to find the revision that corresponds to the current revision + for i := range revisions { + if revisions[i].Name == set.Status.CurrentRevision { + currentRevision = revisions[i] + break + } + } + + // if the current revision is nil we initialize the history by setting it to the update revision + if currentRevision == nil { + currentRevision = updateRevision + } + + return currentRevision, updateRevision, collisionCount, nil +} + +// updateStatefulSet performs the update function for a AdvancedStatefulSet. This method creates, updates, and deletes Pods in +// the set in order to conform the system to the target state for the set. The target state always contains +// set.Spec.Replicas Pods with a Ready Condition. If the UpdateStrategy.Type for the set is +// RollingUpdateStatefulSetStrategyType then all Pods in the set must be at set.Status.CurrentRevision. +// If the UpdateStrategy.Type for the set is OnDeleteStatefulSetStrategyType, the target state implies nothing about +// the revisions of Pods in the set. If the UpdateStrategy.Type for the set is PartitionStatefulSetStrategyType, then +// all Pods with ordinal less than UpdateStrategy.Partition.Ordinal must be at Status.CurrentRevision and all other +// Pods must be at Status.UpdateRevision. If the returned error is nil, the returned AdvancedStatefulSetStatus is valid and the +// update must be recorded. If the error is not nil, the method should be retried until successful. +func (ssc *defaultStatefulSetControl) updateStatefulSet( + ctx context.Context, + set *workloadv1alpha1.AdvancedStatefulSet, + currentRevision *apps.ControllerRevision, + updateRevision *apps.ControllerRevision, + collisionCount int32, + pods []*v1.Pod, + revisions []*apps.ControllerRevision) (*workloadv1alpha1.AdvancedStatefulSetStatus, error) { + selector, err := util.ValidatedLabelSelectorAsSelector(set.Spec.Selector) + if err != nil { + return set.Status.DeepCopy(), err + } + + // get the current and update revisions of the set. + currentSet, err := ApplyRevision(set, currentRevision) + if err != nil { + return set.Status.DeepCopy(), err + } + updateSet, err := ApplyRevision(set, updateRevision) + if err != nil { + return set.Status.DeepCopy(), err + } + + // set the generation, and revisions in the returned status + status := workloadv1alpha1.AdvancedStatefulSetStatus{} + status.ObservedGeneration = set.Generation + status.CurrentRevision = currentRevision.Name + status.UpdateRevision = updateRevision.Name + status.CollisionCount = ptr.To[int32](collisionCount) + status.LabelSelector = selector.String() + minReadySeconds := getMinReadySeconds(set) + + updateStatus(&status, minReadySeconds, currentRevision, updateRevision, pods) + + startOrdinal, endOrdinal, reserveOrdinals := getStatefulSetReplicasRange(set) + // slice that will contain all Pods such that startOrdinal <= getOrdinal(pod) < endOrdinal and not in reserveOrdinals + replicas := make([]*v1.Pod, endOrdinal-startOrdinal) + // slice that will contain all Pods such that getOrdinal(pod) < startOrdinal or getOrdinal(pod) >= endOrdinal or in reserveOrdinals + condemned := make([]*v1.Pod, 0, len(pods)) + unhealthy := 0 + firstUnhealthyOrdinal := math.MaxInt32 + var firstUnhealthyPod *v1.Pod + monotonic := !allowsBurst(set) + + // First we partition pods into two lists valid replicas and condemned Pods + for i := range pods { + if ord := getOrdinal(pods[i]); podInOrdinalRangeWithParams(pods[i], startOrdinal, endOrdinal, reserveOrdinals) { + // if the ordinal of the pod is within the range of the current number of replicas and not in reserveOrdinals, + // insert it at the indirection of its ordinal + replicas[ord-startOrdinal] = pods[i] + + } else if ord >= 0 { + // if the ordinal is valid, but not within the range or in reserveOrdinals, + // add it to the condemned list + condemned = append(condemned, pods[i]) + } + // If the ordinal could not be parsed (ord < 0), ignore the Pod. + } + + // for any empty indices in the sequence [0,set.Spec.Replicas) create a new Pod at the correct revision + for ord := startOrdinal; ord < endOrdinal; ord++ { + if reserveOrdinals.Has(ord) { + continue + } + replicaIdx := ord - startOrdinal + if replicas[replicaIdx] == nil { + replicas[replicaIdx] = newVersionedStatefulSetPod( + currentSet, + updateSet, + currentRevision.Name, + updateRevision.Name, ord, replicas) + } + } + + // sort the condemned Pods by their ordinals + sort.Sort(descendingOrdinal(condemned)) + + // find the first unhealthy Pod + for i := range replicas { + if replicas[i] == nil { + continue + } + if !isHealthy(replicas[i]) { + unhealthy++ + if ord := getOrdinal(replicas[i]); ord < firstUnhealthyOrdinal { + firstUnhealthyOrdinal = ord + firstUnhealthyPod = replicas[i] + } + } + } + + // or the first unhealthy condemned Pod (condemned are sorted in descending order for ease of use) + for i := len(condemned) - 1; i >= 0; i-- { + if !isHealthy(condemned[i]) { + unhealthy++ + if ord := getOrdinal(condemned[i]); ord < firstUnhealthyOrdinal { + firstUnhealthyOrdinal = ord + firstUnhealthyPod = condemned[i] + } + } + } + + if unhealthy > 0 { + klog.V(4).InfoS("AdvancedStatefulSet has unhealthy Pods", "statefulSet", klog.KObj(set), "unhealthyReplicas", unhealthy, "pod", klog.KObj(firstUnhealthyPod)) + } + + // If the AdvancedStatefulSet is being deleted, don't do anything other than updating + // status. + if set.DeletionTimestamp != nil { + return &status, nil + } + + // First, process each living replica. Exit if we run into an error or something blocking in monotonic mode. + scaleMaxUnavailable, err := getScaleMaxUnavailable(set) + if err != nil { + return &status, err + } + processReplicaFn := func(i int) (bool, bool, error) { + return ssc.processReplica(ctx, set, updateSet, monotonic, replicas, i, &status, scaleMaxUnavailable) + } + if shouldExit, err := runForAllWithBreak(replicas, processReplicaFn, monotonic); shouldExit || err != nil { + updateStatus(&status, minReadySeconds, currentRevision, updateRevision, replicas, condemned) + return &status, err + } + + // Ensure ownerRefs are set correctly for the condemned pods. + fixPodClaim := func(i int) (bool, error) { + if matchPolicy, err := ssc.podControl.ClaimsMatchRetentionPolicy(updateSet, condemned[i]); err != nil { + return true, err + } else if !matchPolicy { + if err := ssc.podControl.UpdatePodClaimForRetentionPolicy(updateSet, condemned[i]); err != nil { + return true, err + } + } + return false, nil + } + if shouldExit, err := runForAll(condemned, fixPodClaim, monotonic); shouldExit || err != nil { + updateStatus(&status, minReadySeconds, currentRevision, updateRevision, replicas, condemned) + return &status, err + } + + // At this point, in monotonic mode all of the current Replicas are Running, Ready and Available, + // and we can consider termination. + // We will wait for all predecessors to be Running and Ready prior to attempting a deletion. + // We will terminate Pods in a monotonically decreasing order. + // Note that we do not resurrect Pods in this interval. Also note that scaling will take precedence over + // updates. + processCondemnedFn := func(i int) (bool, error) { + return ssc.processCondemned(ctx, set, firstUnhealthyPod, monotonic, condemned, i) + } + if shouldExit, err := runForAll(condemned, processCondemnedFn, monotonic); shouldExit || err != nil { + updateStatus(&status, minReadySeconds, currentRevision, updateRevision, replicas, condemned) + return &status, err + } + updateStatus(&status, minReadySeconds, currentRevision, updateRevision, replicas, condemned) + + // for the OnDelete strategy we short circuit. Pods will be updated when they are manually deleted. + if set.Spec.UpdateStrategy.Type == apps.OnDeleteStatefulSetStrategyType { + return &status, nil + } + + return ssc.rollingUpdateStatefulsetPods( + set, &status, currentRevision, updateRevision, revisions, pods, replicas, minReadySeconds, + ) +} + +func (ssc *defaultStatefulSetControl) rollingUpdateStatefulsetPods( + set *workloadv1alpha1.AdvancedStatefulSet, + status *workloadv1alpha1.AdvancedStatefulSetStatus, + currentRevision *apps.ControllerRevision, + updateRevision *apps.ControllerRevision, + revisions []*apps.ControllerRevision, + pods []*v1.Pod, + replicas []*v1.Pod, + minReadySeconds int32, +) (*workloadv1alpha1.AdvancedStatefulSetStatus, error) { + + // If update expectations have not satisfied yet, skip updating pods + if updateSatisfied, _, updateDirtyPods := updateExpectations.SatisfiedExpectations(getStatefulSetKey(set), updateRevision.Name); !updateSatisfied { + klog.V(4).InfoS("Not satisfied update for statefulSet", "statefulSet", klog.KObj(set), "updateDirtyPods", updateDirtyPods) + return status, nil + } + + // refresh states for all pods + var modified bool + for _, pod := range pods { + if pod == nil { + continue + } + refreshed, duration, err := ssc.refreshPodState(set, pod, updateRevision.Name) + if err != nil { + return status, err + } else if duration > 0 { + durationStore.Push(getStatefulSetKey(set), duration) + } + if refreshed { + modified = true + } + } + if modified { + return status, nil + } + + var err error + // we compute the minimum ordinal of the target sequence for a destructive update based on the strategy. + maxUnavailable := 1 + if set.Spec.UpdateStrategy.RollingUpdate != nil { + if set.Spec.UpdateStrategy.RollingUpdate.Paused { + return status, nil + } + + maxUnavailable, err = intstrutil.GetValueFromIntOrPercent(intstrutil.ValueOrDefault(set.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable, intstrutil.FromInt(1)), int(*set.Spec.Replicas), false) + if err != nil { + return status, err + } + // maxUnavailable should not be less than 1 + if maxUnavailable < 1 { + maxUnavailable = 1 + } + } + + minWaitTime := workloadv1alpha1.MaxMinReadySeconds * time.Second + unavailablePods := sets.NewString() + opts := &inplaceupdate2.UpdateOptions{} + opts = inplaceupdate2.SetOptionsDefaults(opts) + // counts any targets in the replicas that are unhealthy (terminated / in-place update not-ready / not running and + // ready for minReadySeconds) for checking if the count satisfied the MaxUnavailable limit. + for target := range replicas { + if replicas[target] == nil { + continue + } + if !isHealthy(replicas[target]) { + // 1. count pod as unavailable if it's unhealthy or terminating + unavailablePods.Insert(replicas[target].Name) + } else if completedErr := opts.CheckPodUpdateCompleted(replicas[target]); completedErr != nil { + // 2. count pod as unavailable if it's in-place updating and not ready + klog.V(4).ErrorS(completedErr, "AdvancedStatefulSet found Pod in-place update not-ready", + "statefulSet", klog.KObj(set), "pod", klog.KObj(replicas[target])) + unavailablePods.Insert(replicas[target].Name) + } else if isAvailable, waitTime := isRunningAndAvailable(replicas[target], minReadySeconds); !isAvailable { + // 3. count pod as unavailable if it's not available yet given the minReadySeconds requirement + unavailablePods.Insert(replicas[target].Name) + // make sure that we will wait for the first pod to get available + if waitTime != 0 && waitTime <= minWaitTime { + minWaitTime = waitTime + durationStore.Push(getStatefulSetKey(set), waitTime) + } + } + } + + // handle specified deleted pod under maxUnavailable constrain + // NOTE: specified deletion is not constraint by partition setting + specifiedDeletedPods, err := ssc.handleSpecifiedDeletedPods(set, status, currentRevision, updateRevision, replicas, maxUnavailable, unavailablePods) + if err != nil { + return status, err + } + + updateIndexes := sortPodsToUpdate(set.Spec.UpdateStrategy.RollingUpdate, updateRevision.Name, *set.Spec.Replicas, replicas) + klog.V(3).InfoS("Prepare to update pods indexes for AdvancedStatefulSet", "statefulSet", klog.KObj(set), "podIndexes", updateIndexes) + // update pods in sequence + for _, target := range updateIndexes { + // the target is already up-to-date, go to next + if getPodRevision(replicas[target]) == updateRevision.Name { + continue + } + + // the unavailable pods count exceed the maxUnavailable and the target is available, so we can't process it, + // wait for unhealthy Pods on update + if len(unavailablePods) >= maxUnavailable && !unavailablePods.Has(replicas[target].Name) { + klog.V(4).InfoS("AdvancedStatefulSet was waiting for unavailable Pods to update, blocked pod", + "statefulSet", klog.KObj(set), "unavailablePods", unavailablePods.List(), "blockedPod", klog.KObj(replicas[target])) + return status, nil + } + + // delete the Pod if it is not already terminating and does not match the update revision. + if !specifiedDeletedPods.Has(replicas[target].Name) && !isTerminating(replicas[target]) { + // todo validate in-place for pub + inplacing, inplaceUpdateErr := ssc.inPlaceUpdatePod(set, replicas[target], updateRevision, revisions) + if inplaceUpdateErr != nil { + return status, inplaceUpdateErr + } + // if pod is inplacing or actual deleting, decrease revision + revisionNeedDecrease := inplacing + if !inplacing { + klog.V(2).InfoS("AdvancedStatefulSet terminating Pod for update", "statefulSet", klog.KObj(set), "pod", klog.KObj(replicas[target])) + if _, actualDeleting, err := ssc.deletePod(set, replicas[target]); err != nil { + return status, err + } else { + revisionNeedDecrease = actualDeleting + } + } + // mark target as unavailable because it's updated + unavailablePods.Insert(replicas[target].Name) + + if revisionNeedDecrease && getPodRevision(replicas[target]) == currentRevision.Name { + status.CurrentReplicas-- + } + } + } + + return status, nil +} + +func (ssc *defaultStatefulSetControl) handleSpecifiedDeletedPods( + set *workloadv1alpha1.AdvancedStatefulSet, + status *workloadv1alpha1.AdvancedStatefulSetStatus, + currentRevision *apps.ControllerRevision, + updateRevision *apps.ControllerRevision, + replicas []*v1.Pod, + maxUnavailable int, + unavailablePods sets.String) (sets.String, error) { + specifiedDeletedPods := sets.NewString() + for target := len(replicas) - 1; target >= 0; target-- { + if replicas[target] == nil || !specifieddelete.IsSpecifiedDelete(replicas[target]) { + continue + } + // the unavailable pods count exceed the maxUnavailable and the target is available, so we can't process it, + // why skip here rather than return? + // case: pod 0 ready, pod1 unready, pod 2 unready, pod3 ready, pod4 ready + // when maxUnavailable = 3, pod4 with specified deleted will be deleted but pod3 can't + // pod 2 and pod 1 can be deleted because they were unavailable + if len(unavailablePods) >= maxUnavailable && !unavailablePods.Has(replicas[target].Name) { + klog.V(4).InfoS("AdvancedStatefulSet was waiting for unavailable Pods to update, blocked pod", + "statefulSet", klog.KObj(set), "unavailablePods", unavailablePods.List(), "blockedPod", klog.KObj(replicas[target])) + continue + } + + specifiedDeletedPods.Insert(replicas[target].Name) + if _, actualDeleting, err := ssc.deletePod(set, replicas[target]); err != nil { + return specifiedDeletedPods, err + } else if actualDeleting { + // if actual deleted, update revision count in status + if getPodRevision(replicas[target]) == currentRevision.Name { + status.CurrentReplicas-- + } else if getPodRevision(replicas[target]) == updateRevision.Name { + status.UpdatedReplicas-- + } + } + // mark target as unavailable because it's deleting or pre-deleting + unavailablePods.Insert(replicas[target].Name) + } + return specifiedDeletedPods, nil +} + +func (ssc *defaultStatefulSetControl) deletePod(set *workloadv1alpha1.AdvancedStatefulSet, pod *v1.Pod) (modified, actualDeleting bool, err error) { + if set.Spec.Lifecycle != nil && lifecycle.IsPodHooked(set.Spec.Lifecycle.PreDelete, pod) { + markPodNotReady := set.Spec.Lifecycle.PreDelete.MarkPodNotReady + if updated, _, err := ssc.lifecycleControl.UpdatePodLifecycle(pod, workloadv1alpha1.LifecycleStatePreparingDelete, markPodNotReady); err != nil { + return false, false, err + } else if updated { + klog.V(3).InfoS("AdvancedStatefulSet scaling update pod lifecycle to PreparingDelete", "statefulSet", klog.KObj(set), "pod", klog.KObj(pod)) + return true, false, nil + } + return false, false, nil + } + if err := ssc.podControl.DeleteStatefulPod(set, pod); err != nil { + ssc.recorder.Eventf(set, v1.EventTypeWarning, "FailedDelete", "failed to delete pod %s: %v", pod.Name, err) + return false, false, err + } + return true, true, nil +} + +func (ssc *defaultStatefulSetControl) refreshPodState(set *workloadv1alpha1.AdvancedStatefulSet, pod *v1.Pod, updateRevision string) (bool, time.Duration, error) { + opts := &inplaceupdate2.UpdateOptions{} + if set.Spec.UpdateStrategy.RollingUpdate != nil && set.Spec.UpdateStrategy.RollingUpdate.InPlaceUpdateStrategy != nil { + opts.GracePeriodSeconds = set.Spec.UpdateStrategy.RollingUpdate.InPlaceUpdateStrategy.GracePeriodSeconds + } + opts = inplaceupdate2.SetOptionsDefaults(opts) + + res := ssc.inplaceControl.Refresh(pod, opts) + if res.RefreshErr != nil { + klog.ErrorS(res.RefreshErr, "AdvancedStatefulSet failed to update pod condition for inplace", + "statefulSet", klog.KObj(set), "pod", klog.KObj(pod)) + return false, 0, res.RefreshErr + } + + var state workloadv1alpha1.LifecycleStateType + switch lifecycle.GetPodLifecycleState(pod) { + case workloadv1alpha1.LifecycleStatePreparingNormal: + if set.Spec.Lifecycle == nil || + set.Spec.Lifecycle.PreNormal == nil || + lifecycle.IsPodAllHooked(set.Spec.Lifecycle.PreNormal, pod) { + state = workloadv1alpha1.LifecycleStateNormal + } + case workloadv1alpha1.LifecycleStatePreparingUpdate: + // when pod updated to PreparingUpdate state to wait lifecycle blocker to remove, + // then rollback, do not need update pod inplace since it is the update revision, + // so just update pod lifecycle state. ref: https://github.com/openkruise/kruise/issues/1156 + if getPodRevision(pod) == updateRevision { + if set.Spec.Lifecycle != nil && !lifecycle.IsPodAllHooked(set.Spec.Lifecycle.InPlaceUpdate, pod) { + state = workloadv1alpha1.LifecycleStateUpdated + } else { + state = workloadv1alpha1.LifecycleStateNormal + } + } + case workloadv1alpha1.LifecycleStateUpdating: + if opts.CheckPodUpdateCompleted(pod) == nil { + if set.Spec.Lifecycle != nil && !lifecycle.IsPodAllHooked(set.Spec.Lifecycle.InPlaceUpdate, pod) { + state = workloadv1alpha1.LifecycleStateUpdated + } else { + state = workloadv1alpha1.LifecycleStateNormal + } + } + case workloadv1alpha1.LifecycleStateUpdated: + if set.Spec.Lifecycle == nil || + set.Spec.Lifecycle.InPlaceUpdate == nil || + lifecycle.IsPodAllHooked(set.Spec.Lifecycle.InPlaceUpdate, pod) { + state = workloadv1alpha1.LifecycleStateNormal + } + } + + if state != "" { + var markPodNotReady bool + if set.Spec.Lifecycle != nil && set.Spec.Lifecycle.InPlaceUpdate != nil { + markPodNotReady = set.Spec.Lifecycle.InPlaceUpdate.MarkPodNotReady + } + if updated, _, err := ssc.lifecycleControl.UpdatePodLifecycle(pod, state, markPodNotReady); err != nil { + return false, 0, err + } else if updated { + klog.V(3).InfoS("AdvancedStatefulSet updated pod lifecycle", "statefulSet", klog.KObj(set), "pod", klog.KObj(pod), "lifecycleState", state) + return true, res.DelayDuration, nil + } + } + + return false, res.DelayDuration, nil +} + +func (ssc *defaultStatefulSetControl) inPlaceUpdatePod( + set *workloadv1alpha1.AdvancedStatefulSet, pod *v1.Pod, + updateRevision *apps.ControllerRevision, revisions []*apps.ControllerRevision, +) (bool, error) { + if set.Spec.UpdateStrategy.RollingUpdate == nil { + return false, nil + } + if set.Spec.UpdateStrategy.RollingUpdate.PodUpdatePolicy != workloadv1alpha1.InPlaceIfPossiblePodUpdateStrategyType && + set.Spec.UpdateStrategy.RollingUpdate.PodUpdatePolicy != workloadv1alpha1.InPlaceOnlyPodUpdateStrategyType { + return false, nil + } + + var oldRevision *apps.ControllerRevision + for _, r := range revisions { + if r.Name == getPodRevision(pod) { + oldRevision = r + break + } + } + + opts := &inplaceupdate2.UpdateOptions{} + if set.Spec.UpdateStrategy.RollingUpdate.InPlaceUpdateStrategy != nil { + opts.GracePeriodSeconds = set.Spec.UpdateStrategy.RollingUpdate.InPlaceUpdateStrategy.GracePeriodSeconds + } + + if ssc.inplaceControl.CanUpdateInPlace(oldRevision, updateRevision, opts) { + state := lifecycle.GetPodLifecycleState(pod) + switch state { + case "", workloadv1alpha1.LifecycleStatePreparingNormal, workloadv1alpha1.LifecycleStateNormal: + var err error + var updated bool + if set.Spec.Lifecycle != nil && lifecycle.IsPodHooked(set.Spec.Lifecycle.InPlaceUpdate, pod) { + markPodNotReady := set.Spec.Lifecycle.InPlaceUpdate.MarkPodNotReady + if updated, _, err = ssc.lifecycleControl.UpdatePodLifecycle(pod, workloadv1alpha1.LifecycleStatePreparingUpdate, markPodNotReady); err == nil && updated { + klog.V(3).InfoS("AdvancedStatefulSet updated pod lifecycle to PreparingUpdate", "statefulSet", klog.KObj(set), "pod", klog.KObj(pod)) + } + return true, err + } + case workloadv1alpha1.LifecycleStateUpdated: + var err error + var updated bool + var inPlaceUpdateHandler *workloadv1alpha1.LifecycleHook + if set.Spec.Lifecycle != nil { + inPlaceUpdateHandler = set.Spec.Lifecycle.InPlaceUpdate + } + if updated, _, err = ssc.lifecycleControl.UpdatePodLifecycleWithHandler(pod, workloadv1alpha1.LifecycleStatePreparingUpdate, inPlaceUpdateHandler); err == nil && updated { + klog.V(3).InfoS("AdvancedStatefulSet updated pod lifecycle to PreparingUpdate", "statefulSet", klog.KObj(set), "pod", klog.KObj(pod)) + } + return true, err + case workloadv1alpha1.LifecycleStatePreparingUpdate: + if set.Spec.Lifecycle != nil && lifecycle.IsPodHooked(set.Spec.Lifecycle.InPlaceUpdate, pod) { + return true, nil + } + case workloadv1alpha1.LifecycleStateUpdating: + default: + return true, fmt.Errorf("not allowed to in-place update pod %s in state %s", pod.Name, state) + } + + if state != "" { + opts.AdditionalFuncs = append(opts.AdditionalFuncs, lifecycle.SetPodLifecycle(workloadv1alpha1.LifecycleStateUpdating)) + } + res := ssc.inplaceControl.Update(pod, oldRevision, updateRevision, opts) + if res.InPlaceUpdate { + if res.DelayDuration > 0 { + durationStore.Push(getStatefulSetKey(set), res.DelayDuration) + } + + if res.UpdateErr == nil { + updateExpectations.ExpectUpdated(getStatefulSetKey(set), updateRevision.Name, pod) + ssc.recorder.Eventf(set, v1.EventTypeNormal, "SuccessfulUpdatePodInPlace", "successfully update pod %s in-place(revision %v)", pod.Name, updateRevision.Name) + return res.InPlaceUpdate, nil + } + ssc.recorder.Eventf(set, v1.EventTypeWarning, "FailedUpdatePodInPlace", "failed to update pod %s in-place(revision %v): %v", pod.Name, updateRevision.Name, res.UpdateErr) + return res.InPlaceUpdate, res.UpdateErr + } + } + + if set.Spec.UpdateStrategy.RollingUpdate.PodUpdatePolicy == workloadv1alpha1.InPlaceOnlyPodUpdateStrategyType { + return false, fmt.Errorf("find strategy is InPlaceOnly but Pod %s can not update in-place", pod.Name) + } + + return false, nil +} + +// updateStatefulSetStatus updates set's Status to be equal to status. If status indicates a complete update, it is +// mutated to indicate completion. If status is semantically equivalent to set's Status no update is performed. If the +// returned error is nil, the update is successful. +func (ssc *defaultStatefulSetControl) updateStatefulSetStatus( + ctx context.Context, + set *workloadv1alpha1.AdvancedStatefulSet, + status *workloadv1alpha1.AdvancedStatefulSetStatus) error { + + // complete any in progress rolling update if necessary + completeRollingUpdate(set, status) + + // if the status is not inconsistent do not perform an update + if !inconsistentStatus(set, status) { + return nil + } + + // copy set and update its status + set = set.DeepCopy() + if err := ssc.statusUpdater.UpdateStatefulSetStatus(ctx, set, status); err != nil { + return err + } + + return nil +} + +type replicaStatus struct { + replicas int32 + readyReplicas int32 + availableReplicas int32 + currentReplicas int32 + + updatedReplicas int32 + updatedReadyReplicas int32 + updatedAvailableReplicas int32 +} + +func computeReplicaStatus(pods []*v1.Pod, minReadySeconds int32, currentRevision, updateRevision *apps.ControllerRevision) replicaStatus { + status := replicaStatus{} + for _, pod := range pods { + if pod == nil { + continue + } + if isCreated(pod) { + status.replicas++ + } + + // count the number of running and ready replicas + if isRunningAndReady(pod) { + status.readyReplicas++ + if getPodRevision(pod) == updateRevision.Name { + status.updatedReadyReplicas++ + if avail, _ := isRunningAndAvailable(pod, minReadySeconds); avail { + status.updatedAvailableReplicas++ + } + } + // count the number of running and available replicas + ok, _ := isRunningAndAvailable(pod, minReadySeconds) + if ok { + status.availableReplicas++ + } + } + + // count the number of current and update replicas + if isCreated(pod) && !isTerminating(pod) { + revision := getPodRevision(pod) + if revision == currentRevision.Name { + status.currentReplicas++ + } + if revision == updateRevision.Name { + status.updatedReplicas++ + } + } + } + return status +} + +func updateStatus(status *workloadv1alpha1.AdvancedStatefulSetStatus, minReadySeconds int32, currentRevision, updateRevision *apps.ControllerRevision, podLists ...[]*v1.Pod) { + status.Replicas = 0 + status.ReadyReplicas = 0 + status.AvailableReplicas = 0 + status.CurrentReplicas = 0 + status.UpdatedReplicas = 0 + status.UpdatedReadyReplicas = 0 + status.UpdatedAvailableReplicas = 0 + for _, list := range podLists { + replicaStatus := computeReplicaStatus(list, minReadySeconds, currentRevision, updateRevision) + status.Replicas += replicaStatus.replicas + status.ReadyReplicas += replicaStatus.readyReplicas + status.AvailableReplicas += replicaStatus.availableReplicas + status.UpdatedReadyReplicas += replicaStatus.updatedReadyReplicas + status.UpdatedAvailableReplicas += replicaStatus.updatedAvailableReplicas + status.CurrentReplicas += replicaStatus.currentReplicas + status.UpdatedReplicas += replicaStatus.updatedReplicas + } +} + +// getStartOrdinal gets the first possible ordinal (inclusive). +// Returns spec.ordinals.start if spec.ordinals is set, otherwise returns 0. +func getStartOrdinal(set *workloadv1alpha1.AdvancedStatefulSet) int { + if set.Spec.Ordinals != nil { + return int(set.Spec.Ordinals.Start) + } + return 0 +} + +func (ssc *defaultStatefulSetControl) processCondemned(ctx context.Context, set *workloadv1alpha1.AdvancedStatefulSet, firstUnhealthyPod *v1.Pod, monotonic bool, condemned []*v1.Pod, i int) (bool, error) { + logger := klog.FromContext(ctx) + if isTerminating(condemned[i]) { + // if we are in monotonic mode, block and wait for terminating pods to expire + if monotonic { + logger.V(4).Info("AdvancedStatefulSet is waiting for Pod to Terminate prior to scale down", + "statefulSet", klog.KObj(set), "pod", klog.KObj(condemned[i])) + return true, nil + } + return false, nil + } + // if we are in monotonic mode and the condemned target is not the first unhealthy Pod block + if !isRunningAndReady(condemned[i]) && monotonic && condemned[i] != firstUnhealthyPod { + logger.V(4).Info("AdvancedStatefulSet is waiting for Pod to be Running and Ready prior to scale down", + "statefulSet", klog.KObj(set), "pod", klog.KObj(firstUnhealthyPod)) + return true, nil + } + // if we are in monotonic mode and the condemned target is not the first unhealthy Pod, block. + if avail, waitTime := isRunningAndAvailable(condemned[i], getMinReadySeconds(set)); !avail && monotonic && condemned[i] != firstUnhealthyPod { + logger.V(4).Info("AdvancedStatefulSet is waiting for Pod to be Available prior to scale down", + "statefulSet", klog.KObj(set), "pod", klog.KObj(firstUnhealthyPod)) + if waitTime > 0 { + durationStore.Push(getStatefulSetKey(condemned[i]), waitTime) + } + return true, nil + } + + logger.V(2).Info("Pod of AdvancedStatefulSet is terminating for scale down", + "statefulSet", klog.KObj(set), "pod", klog.KObj(condemned[i])) + + modified, _, err := ssc.deletePod(set, condemned[i]) + if err != nil || (monotonic && modified) { + return true, err + } + return false, nil +} + +// processReplica handles an individual replica within a AdvancedStatefulSet based on its current state. +// It decides whether to delete, create, update the replica, or await its readiness. +// +// different from stateful set: +// +// If decreaseAndCheckMaxUnavailable(scaleMaxUnavailable) returns true, +// break the pod for-loop and proceed with the update logic, +// which will apply in-place conditions to make the pod ready. +// For example: update unhealthy pods or add some conditions +// +// Returns: +// - bool shouldExit: whether to exit. +// - bool shouldBreak: whether to break out of the loop. +// - An error if encountered during processing; nil otherwise. +func (ssc *defaultStatefulSetControl) processReplica( + ctx context.Context, + set *workloadv1alpha1.AdvancedStatefulSet, + updateSet *workloadv1alpha1.AdvancedStatefulSet, + monotonic bool, + replicas []*v1.Pod, + i int, + status *workloadv1alpha1.AdvancedStatefulSetStatus, + scaleMaxUnavailable *int) (bool, bool, error) { + minReadySeconds := getMinReadySeconds(set) + logger := klog.FromContext(ctx) + + if replicas[i] == nil { + return false, false, nil + } + // Note that pods with phase Succeeded will also trigger this event. This is + // because final pod phase of evicted or otherwise forcibly stopped pods + // (e.g. terminated on node reboot) is determined by the exit code of the + // container, not by the reason for pod termination. We should restart the pod + // regardless of the exit code. + if isFailed(replicas[i]) || isSucceeded(replicas[i]) { + if decreaseAndCheckMaxUnavailable(scaleMaxUnavailable) { + // We must wait for pods to finish terminating or we violate max scale + return false, true, nil + } + if replicas[i].DeletionTimestamp == nil { + if _, _, err := ssc.deletePod(set, replicas[i]); err != nil { + return true, false, err + } + } + // Continue with next pods (unless monotonic) + return true, false, nil + } + // If we find a Pod that has not been created we create the Pod + if !isCreated(replicas[i]) { + if isStale, err := ssc.podControl.PodClaimIsStale(set, replicas[i]); err != nil { + return true, false, err + } else if isStale { + // If a pod has a stale PVC, no more work can be done this round. + return true, false, err + } + // asts ut invoke updateStatefulset once by once, + // so we can update pod into normal state to avoid changing so many ut cases + state := workloadv1alpha1.LifecycleStatePreparingNormal + if set.Spec.Lifecycle == nil || + set.Spec.Lifecycle.PreNormal == nil || + lifecycle.IsPodAllHooked(set.Spec.Lifecycle.PreNormal, replicas[i]) { + state = workloadv1alpha1.LifecycleStateNormal + } + lifecycle.SetPodLifecycle(state)(replicas[i]) + if err := ssc.podControl.CreateStatefulPod(ctx, set, replicas[i]); err != nil { + msg := fmt.Sprintf("StatefulPodControl failed to create Pod error: %s", err) + condition := NewStatefulsetCondition(workloadv1alpha1.FailedCreatePod, v1.ConditionTrue, "", msg) + SetStatefulsetCondition(status, condition) + return true, false, err + } + if monotonic { + // if the set does not allow bursting, return immediately + return true, false, nil + } else if decreaseAndCheckMaxUnavailable(scaleMaxUnavailable) { + logger.V(4).Info( + "AdvancedStatefulSet pod is Creating, and break pods scale", + "statefulSet", klog.KObj(set), "pod", klog.KObj(replicas[i])) + return false, true, nil + } + // pod created, no more work possible for this round + return false, false, nil + } + + // If the Pod is in pending state then trigger PVC creation to create missing PVCs + if isPending(replicas[i]) { + logger.V(4).Info( + "AdvancedStatefulSet is triggering PVC creation for pending Pod", + "statefulSet", klog.KObj(set), "pod", klog.KObj(replicas[i])) + if err := ssc.podControl.createMissingPersistentVolumeClaims(ctx, set, replicas[i]); err != nil { + return true, false, err + } + } + + // If we find a Pod that is currently terminating, we must wait until graceful deletion + // completes before we continue to make progress. + if isTerminating(replicas[i]) && monotonic { + logger.V(4).Info("AdvancedStatefulSet is waiting for Pod to Terminate", + "statefulSet", klog.KObj(set), "pod", klog.KObj(replicas[i])) + return true, false, nil + } else if isTerminating(replicas[i]) && decreaseAndCheckMaxUnavailable(scaleMaxUnavailable) { + logger.V(4).Info( + "AdvancedStatefulSet pod is Terminating, and break pods scale", + "statefulSet", klog.KObj(set), "pod", klog.KObj(replicas[i])) + return false, true, nil + } + + // Update InPlaceUpdateReady condition for pod + _, duration, err := ssc.refreshPodState(set, replicas[i], status.UpdateRevision) + if err != nil { + logger.Error(err, "AdvancedStatefulSet failed to update pod condition for inplace", + "statefulSet", klog.KObj(set), "pod", klog.KObj(replicas[i])) + return true, false, err + } else if duration > 0 { + durationStore.Push(getStatefulSetKey(set), duration) + } + + // If we have a Pod that has been created but is not running and available we can not make progress. + // We must ensure that all for each Pod, when we create it, all of its predecessors, with respect to its + // ordinal, are Running and Available. + if monotonic || scaleMaxUnavailable != nil { + isAvailable, waitTime := isRunningAndAvailable(replicas[i], minReadySeconds) + if !isAvailable && monotonic { + if waitTime > 0 { + // make sure we check later + durationStore.Push(getStatefulSetKey(set), waitTime) + logger.V(4).Info( + "AdvancedStatefulSet needs to wait for the pod to be Running and Available after being"+ + " Ready for minReadySeconds", "statefulSet", klog.KObj(set), "waitTime", waitTime, + "pod", klog.KObj(replicas[i]), "minReadySeconds", minReadySeconds) + } else { + logger.V(4).Info("AdvancedStatefulSet is waiting for Pod to be Available", + "statefulSet", klog.KObj(set), "pod", klog.KObj(replicas[i])) + } + return true, false, nil + } else if !isAvailable && decreaseAndCheckMaxUnavailable(scaleMaxUnavailable) { + logger.V(4).Info( + "AdvancedStatefulSet pod is unavailable, and break pods scale", + "statefulSet", klog.KObj(set), "pod", klog.KObj(replicas[i])) + if waitTime > 0 { + // make sure we check later + durationStore.Push(getStatefulSetKey(set), waitTime) + } + return false, true, nil + } + } + + // Enforce the AdvancedStatefulSet invariants + retentionMatch := true + retentionMatch, err = ssc.podControl.ClaimsMatchRetentionPolicy(updateSet, replicas[i]) + // An error is expected if the pod is not yet fully updated, and so return is treated as matching. + if err != nil { + retentionMatch = true + } + + if identityMatches(set, replicas[i]) && storageMatches(set, replicas[i]) && retentionMatch { + return false, false, nil + } + + // Make a deep copy so we don't mutate the shared cache + replica := replicas[i].DeepCopy() + if err := ssc.podControl.UpdateStatefulPod(updateSet, replica); err != nil { + msg := fmt.Sprintf("StatefulPodControl failed to update Pod error: %s", err) + condition := NewStatefulsetCondition(workloadv1alpha1.FailedUpdatePod, v1.ConditionTrue, "", msg) + SetStatefulsetCondition(status, condition) + return true, false, err + } + + return false, false, nil +} + +func slowStartBatch(initialBatchSize int, remaining int, fn func(int) (bool, error)) (int, error) { + successes := 0 + j := 0 + for batchSize := integer.IntMin(remaining, initialBatchSize); batchSize > 0; batchSize = integer.IntMin(integer.IntMin(2*batchSize, remaining), MaxBatchSize) { + errCh := make(chan error, batchSize) + var wg sync.WaitGroup + wg.Add(batchSize) + for i := 0; i < batchSize; i++ { + go func(k int) { + defer wg.Done() + // Ignore the first parameter - relevant for monotonic only. + if _, err := fn(k); err != nil { + errCh <- err + } + }(j) + j++ + } + wg.Wait() + successes += batchSize - len(errCh) + close(errCh) + if len(errCh) > 0 { + errs := make([]error, 0) + for err := range errCh { + errs = append(errs, err) + } + return successes, utilerrors.NewAggregate(errs) + } + remaining -= batchSize + } + return successes, nil +} + +// runForAllWithBreak iterates through all pod objects, applying the given function until a specified condition is met. +// The function can decide whether to continue, break out of the loop, or return an error. +// Parameters: +// - pods: An array of pointers to Pod objects, representing the collection of pods to be processed. +// - fn: A function that takes an index as a parameter and returns three values: +// 1. A boolean indicating whether to exit the current iteration. +// 2. A boolean indicating whether to break out of the loop. +// 3. An error object, in case an error occurs during function execution. +// +// Returns: +// - A boolean indicating whether an exit condition was met or an error occurred during iteration, only applicable to monotonic updates +// - An error object, if an error was encountered during the execution of the provided function. +func runForAllWithBreak(pods []*v1.Pod, fn func(i int) (bool, bool, error), monotonic bool) (bool, error) { + if monotonic { + for i := range pods { + if shouldExit, shouldBreak, err := fn(i); shouldExit || err != nil { + return true, err + } else if shouldBreak { + // Introduce this branch to exit the for-loop while proceeding with subsequent update logic. + break + } + } + } else { + for i := range pods { + if _, shouldBreak, err := fn(i); err != nil { + return true, err + } else if shouldBreak { + break + } + } + } + return false, nil +} + +func runForAll(pods []*v1.Pod, fn func(i int) (bool, error), monotonic bool) (bool, error) { + if monotonic { + for i := range pods { + if shouldExit, err := fn(i); shouldExit || err != nil { + return true, err + } + } + } else { + if _, err := slowStartBatch(1, len(pods), fn); err != nil { + return true, err + } + } + return false, nil +} + +func getScaleMaxUnavailable(set *workloadv1alpha1.AdvancedStatefulSet) (*int, error) { + var scaleMaxUnavailable *int + if set.Spec.ScaleStrategy != nil && set.Spec.ScaleStrategy.MaxUnavailable != nil { + maxUnavailable, err := intstrutil.GetValueFromIntOrPercent(set.Spec.ScaleStrategy.MaxUnavailable, int(*set.Spec.Replicas), false) + if err != nil { + return scaleMaxUnavailable, err + } + // maxUnavailable should not be less than 1 + if maxUnavailable < 1 { + maxUnavailable = 1 + } + scaleMaxUnavailable = &maxUnavailable + } + return scaleMaxUnavailable, nil +} diff --git a/pkg/controllers/workload/v1alpha1/advancedstatefulset/stateful_set_status_updater.go b/pkg/controllers/workload/v1alpha1/advancedstatefulset/stateful_set_status_updater.go new file mode 100644 index 00000000000..7672bbdf3fe --- /dev/null +++ b/pkg/controllers/workload/v1alpha1/advancedstatefulset/stateful_set_status_updater.go @@ -0,0 +1,65 @@ +/* +Copyright 2026 The Fluid Authors. +Copyright 2019 The Kruise Authors. +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package advancedstatefulset + +import ( + "context" + + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/retry" + "sigs.k8s.io/controller-runtime/pkg/client" + + workloadv1alpha1 "github.com/fluid-cloudnative/fluid/api/workload/v1alpha1" +) + +// StatusUpdaterInterface is an interface used to update the AdvancedStatefulSetStatus associated with a AdvancedStatefulSet. +// For any use other than testing, clients should create an instance using NewRealStatefulSetStatusUpdater. +type StatusUpdaterInterface interface { + // UpdateStatefulSetStatus sets the set's Status to status. Implementations are required to retry on conflicts, + // but fail on other errors. If the returned error is nil set's Status has been successfully set to status. + UpdateStatefulSetStatus(ctx context.Context, set *workloadv1alpha1.AdvancedStatefulSet, status *workloadv1alpha1.AdvancedStatefulSetStatus) error +} + +// NewRealStatefulSetStatusUpdater returns a StatusUpdaterInterface that updates the Status of a AdvancedStatefulSet, +// using the supplied controller-runtime client. +func NewRealStatefulSetStatusUpdater(c client.Client) StatusUpdaterInterface { + return &realStatefulSetStatusUpdater{client: c} +} + +type realStatefulSetStatusUpdater struct { + client client.Client +} + +func (ssu *realStatefulSetStatusUpdater) UpdateStatefulSetStatus( + ctx context.Context, + set *workloadv1alpha1.AdvancedStatefulSet, + status *workloadv1alpha1.AdvancedStatefulSetStatus) error { + // don't wait due to limited number of clients, but backoff after the default number of steps + return retry.RetryOnConflict(retry.DefaultRetry, func() error { + // Fetch latest version to apply status update + fresh := &workloadv1alpha1.AdvancedStatefulSet{} + if err := ssu.client.Get(ctx, types.NamespacedName{Namespace: set.Namespace, Name: set.Name}, fresh); err != nil { + return err + } + fresh.Status = *status + return ssu.client.Status().Update(ctx, fresh) + }) +} + +var _ StatusUpdaterInterface = &realStatefulSetStatusUpdater{} diff --git a/pkg/controllers/workload/v1alpha1/advancedstatefulset/stateful_set_utils.go b/pkg/controllers/workload/v1alpha1/advancedstatefulset/stateful_set_utils.go new file mode 100644 index 00000000000..1b6f3f87e59 --- /dev/null +++ b/pkg/controllers/workload/v1alpha1/advancedstatefulset/stateful_set_utils.go @@ -0,0 +1,806 @@ +/* +Copyright 2026 The Fluid Authors. +Copyright 2019 The Kruise Authors. +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package advancedstatefulset + +import ( + "bytes" + "encoding/json" + "fmt" + "regexp" + "sort" + "strconv" + "time" + + apiutil "github.com/fluid-cloudnative/fluid/pkg/controllers/workload/v1alpha1/utils/api" + history "github.com/fluid-cloudnative/fluid/pkg/controllers/workload/v1alpha1/utils/controllerhistory" + "github.com/fluid-cloudnative/fluid/pkg/controllers/workload/v1alpha1/utils/kubecontroller" + "github.com/fluid-cloudnative/fluid/pkg/controllers/workload/v1alpha1/utils/lifecycle" + "github.com/fluid-cloudnative/fluid/pkg/controllers/workload/v1alpha1/utils/revision" + apps "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/klog/v2" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" + + workloadv1alpha1 "github.com/fluid-cloudnative/fluid/api/workload/v1alpha1" +) + +var patchCodec = workloadv1alpha1.Codecs.LegacyCodec(workloadv1alpha1.SchemeGroupVersion) + +// statefulPodRegex is a regular expression that extracts the parent AdvancedStatefulSet and ordinal from the Name of a Pod +var statefulPodRegex = regexp.MustCompile("(.*)-([0-9]+)$") + +// getParentNameAndOrdinal gets the name of pod's parent AdvancedStatefulSet and pod's ordinal as extracted from its Name. If +// the Pod was not created by a AdvancedStatefulSet, its parent is considered to be empty string, and its ordinal is considered +// to be -1. +func getParentNameAndOrdinal(pod *v1.Pod) (string, int) { + parent := "" + ordinal := -1 + subMatches := statefulPodRegex.FindStringSubmatch(pod.Name) + if len(subMatches) < 3 { + return parent, ordinal + } + parent = subMatches[1] + if i, err := strconv.ParseInt(subMatches[2], 10, 32); err == nil { + ordinal = int(i) + } + return parent, ordinal +} + +// getParentName gets the name of pod's parent AdvancedStatefulSet. If pod has not parent, the empty string is returned. +func getParentName(pod *v1.Pod) string { + parent, _ := getParentNameAndOrdinal(pod) + return parent +} + +// getOrdinal gets pod's ordinal. If pod has no ordinal, -1 is returned. +func getOrdinal(pod *v1.Pod) int { + _, ordinal := getParentNameAndOrdinal(pod) + return ordinal +} + +/** + * Determines if the given pod's ordinal number is within the permissible range + * managed by this AdvancedStatefulSet and is not listed in the reserveOrdinals. + * + * @return {boolean} True if the pod's ordinal is both within the allowed range and + * not reserved; false otherwise. + */ +func podInOrdinalRange(pod *v1.Pod, set *workloadv1alpha1.AdvancedStatefulSet) bool { + startOrdinal, endOrdinal, reserveOrdinals := getStatefulSetReplicasRange(set) + return podInOrdinalRangeWithParams(pod, startOrdinal, endOrdinal, reserveOrdinals) +} + +func podInOrdinalRangeWithParams(pod *v1.Pod, startOrdinal, endOrdinal int, reserveOrdinals sets.Set[int]) bool { + ordinal := getOrdinal(pod) + return ordinal >= startOrdinal && ordinal < endOrdinal && + !reserveOrdinals.Has(ordinal) +} + +// getPodName gets the name of set's child Pod with an ordinal index of ordinal +func getPodName(set *workloadv1alpha1.AdvancedStatefulSet, ordinal int) string { + return fmt.Sprintf("%s-%d", set.Name, ordinal) +} + +// getPersistentVolumeClaimName gets the name of PersistentVolumeClaim for a Pod with an ordinal index of ordinal. claim +// must be a PersistentVolumeClaim from set's VolumeClaimTemplates template. +func getPersistentVolumeClaimName(set *workloadv1alpha1.AdvancedStatefulSet, claim *v1.PersistentVolumeClaim, ordinal int) string { + // NOTE: This name format is used by the heuristics for zone spreading in ChooseZoneForVolume + return fmt.Sprintf("%s-%s-%d", claim.Name, set.Name, ordinal) +} + +// isMemberOf tests if pod is a member of set. +func isMemberOf(set *workloadv1alpha1.AdvancedStatefulSet, pod *v1.Pod) bool { + return getParentName(pod) == set.Name +} + +// identityMatches returns true if pod has a valid identity and network identity for a member of set. +func identityMatches(set *workloadv1alpha1.AdvancedStatefulSet, pod *v1.Pod) bool { + parent, ordinal := getParentNameAndOrdinal(pod) + return ordinal >= 0 && + set.Name == parent && + pod.Name == getPodName(set, ordinal) && + pod.Namespace == set.Namespace && + pod.Labels[apps.StatefulSetPodNameLabel] == pod.Name +} + +// storageMatches returns true if pod's Volumes cover the set of PersistentVolumeClaims +func storageMatches(set *workloadv1alpha1.AdvancedStatefulSet, pod *v1.Pod) bool { + ordinal := getOrdinal(pod) + if ordinal < 0 { + return false + } + volumes := make(map[string]v1.Volume, len(pod.Spec.Volumes)) + for _, volume := range pod.Spec.Volumes { + volumes[volume.Name] = volume + } + for _, claim := range set.Spec.VolumeClaimTemplates { + volume, found := volumes[claim.Name] + if !found || + volume.VolumeSource.PersistentVolumeClaim == nil || + volume.VolumeSource.PersistentVolumeClaim.ClaimName != + getPersistentVolumeClaimName(set, &claim, ordinal) { + return false + } + } + return true +} + +// getPersistentVolumeClaimPolicy returns the PVC policy for a AdvancedStatefulSet, returning a retain policy if the set policy is nil. +func getPersistentVolumeClaimRetentionPolicy(set *workloadv1alpha1.AdvancedStatefulSet) workloadv1alpha1.StatefulSetPersistentVolumeClaimRetentionPolicy { + policy := workloadv1alpha1.StatefulSetPersistentVolumeClaimRetentionPolicy{ + WhenDeleted: workloadv1alpha1.RetainPersistentVolumeClaimRetentionPolicyType, + WhenScaled: workloadv1alpha1.RetainPersistentVolumeClaimRetentionPolicyType, + } + if set.Spec.PersistentVolumeClaimRetentionPolicy != nil { + policy = *set.Spec.PersistentVolumeClaimRetentionPolicy + } + return policy +} + +// claimOwnerMatchesSetAndPod returns false if the ownerRefs of the claim are not set consistently with the +// PVC deletion policy for the AdvancedStatefulSet. +func claimOwnerMatchesSetAndPod(claim *v1.PersistentVolumeClaim, set *workloadv1alpha1.AdvancedStatefulSet, pod *v1.Pod) bool { + policy := getPersistentVolumeClaimRetentionPolicy(set) + const retain = workloadv1alpha1.RetainPersistentVolumeClaimRetentionPolicyType + const delete = workloadv1alpha1.DeletePersistentVolumeClaimRetentionPolicyType + switch { + default: + klog.InfoS("Unknown policy, treating as Retain", "policy", set.Spec.PersistentVolumeClaimRetentionPolicy) + fallthrough + case policy.WhenScaled == retain && policy.WhenDeleted == retain: + if hasOwnerRef(claim, set) || + hasOwnerRef(claim, pod) { + return false + } + case policy.WhenScaled == retain && policy.WhenDeleted == delete: + if !hasOwnerRef(claim, set) || + hasOwnerRef(claim, pod) { + return false + } + case policy.WhenScaled == delete && policy.WhenDeleted == retain: + if hasOwnerRef(claim, set) { + return false + } + podScaledDown := !podInOrdinalRange(pod, set) + if podScaledDown != hasOwnerRef(claim, pod) { + return false + } + case policy.WhenScaled == delete && policy.WhenDeleted == delete: + podScaledDown := !podInOrdinalRange(pod, set) + // If a pod is scaled down, there should be no set ref and a pod ref; + // if the pod is not scaled down it's the other way around. + if podScaledDown == hasOwnerRef(claim, set) { + return false + } + if podScaledDown != hasOwnerRef(claim, pod) { + return false + } + } + return true +} + +// updateClaimOwnerRefForSetAndPod updates the ownerRefs for the claim according to the deletion policy of +// the AdvancedStatefulSet. Returns true if the claim was changed and should be updated and false otherwise. +func updateClaimOwnerRefForSetAndPod(claim *v1.PersistentVolumeClaim, set *workloadv1alpha1.AdvancedStatefulSet, pod *v1.Pod) bool { + needsUpdate := false + // Sometimes the version and kind are not set {pod,set}.TypeMeta. These are necessary for the ownerRef. + // This is the case both in real clusters and the unittests. + // TODO: there must be a better way to do this other than hardcoding the pod version? + updateMeta := func(tm *metav1.TypeMeta, kind string) { + if tm.APIVersion == "" { + if kind == "AdvancedStatefulSet" { + tm.APIVersion = "workload.fluid.io/v1alpha1" + } else { + tm.APIVersion = "v1" + } + } + if tm.Kind == "" { + tm.Kind = kind + } + } + podMeta := pod.TypeMeta + updateMeta(&podMeta, "Pod") + setMeta := set.TypeMeta + updateMeta(&setMeta, "AdvancedStatefulSet") + policy := getPersistentVolumeClaimRetentionPolicy(set) + const retain = workloadv1alpha1.RetainPersistentVolumeClaimRetentionPolicyType + const delete = workloadv1alpha1.DeletePersistentVolumeClaimRetentionPolicyType + switch { + default: + klog.InfoS("Unknown policy, treating as Retain", "policy", set.Spec.PersistentVolumeClaimRetentionPolicy) + fallthrough + case policy.WhenScaled == retain && policy.WhenDeleted == retain: + needsUpdate = removeOwnerRef(claim, set) || needsUpdate + needsUpdate = removeOwnerRef(claim, pod) || needsUpdate + case policy.WhenScaled == retain && policy.WhenDeleted == delete: + needsUpdate = setOwnerRef(claim, set, &setMeta) || needsUpdate + needsUpdate = removeOwnerRef(claim, pod) || needsUpdate + case policy.WhenScaled == delete && policy.WhenDeleted == retain: + needsUpdate = removeOwnerRef(claim, set) || needsUpdate + podScaledDown := !podInOrdinalRange(pod, set) + if podScaledDown { + needsUpdate = setOwnerRef(claim, pod, &podMeta) || needsUpdate + } + if !podScaledDown { + needsUpdate = removeOwnerRef(claim, pod) || needsUpdate + } + case policy.WhenScaled == delete && policy.WhenDeleted == delete: + podScaledDown := !podInOrdinalRange(pod, set) + if podScaledDown { + needsUpdate = removeOwnerRef(claim, set) || needsUpdate + needsUpdate = setOwnerRef(claim, pod, &podMeta) || needsUpdate + } + if !podScaledDown { + needsUpdate = setOwnerRef(claim, set, &setMeta) || needsUpdate + needsUpdate = removeOwnerRef(claim, pod) || needsUpdate + } + } + return needsUpdate +} + +// hasOwnerRef returns true if target has an ownerRef to owner. +func hasOwnerRef(target, owner metav1.Object) bool { + ownerUID := owner.GetUID() + for _, ownerRef := range target.GetOwnerReferences() { + if ownerRef.UID == ownerUID { + return true + } + } + return false +} + +// hasStaleOwnerRef returns true if target has a ref to owner that appears to be stale. +func hasStaleOwnerRef(target, owner metav1.Object) bool { + for _, ownerRef := range target.GetOwnerReferences() { + if ownerRef.Name == owner.GetName() && ownerRef.UID != owner.GetUID() { + return true + } + } + return false +} + +// setOwnerRef adds owner to the ownerRefs of target, if necessary. Returns true if target needs to be +// updated and false otherwise. +func setOwnerRef(target, owner metav1.Object, ownerType *metav1.TypeMeta) bool { + if hasOwnerRef(target, owner) { + return false + } + ownerRefs := append( + target.GetOwnerReferences(), + metav1.OwnerReference{ + APIVersion: ownerType.APIVersion, + Kind: ownerType.Kind, + Name: owner.GetName(), + UID: owner.GetUID(), + }) + target.SetOwnerReferences(ownerRefs) + return true +} + +// removeOwnerRef removes owner from the ownerRefs of target, if necessary. Returns true if target needs +// to be updated and false otherwise. +func removeOwnerRef(target, owner metav1.Object) bool { + if !hasOwnerRef(target, owner) { + return false + } + ownerUID := owner.GetUID() + oldRefs := target.GetOwnerReferences() + newRefs := make([]metav1.OwnerReference, len(oldRefs)-1) + skip := 0 + for i := range oldRefs { + if oldRefs[i].UID == ownerUID { + skip = -1 + } else { + newRefs[i+skip] = oldRefs[i] + } + } + target.SetOwnerReferences(newRefs) + return true +} + +// getPersistentVolumeClaims gets a map of PersistentVolumeClaims to their template names, as defined in set. The +// returned PersistentVolumeClaims are each constructed with a the name specific to the Pod. This name is determined +// by getPersistentVolumeClaimName. +func getPersistentVolumeClaims(set *workloadv1alpha1.AdvancedStatefulSet, pod *v1.Pod) map[string]v1.PersistentVolumeClaim { + ordinal := getOrdinal(pod) + templates := set.Spec.VolumeClaimTemplates + claims := make(map[string]v1.PersistentVolumeClaim, len(templates)) + for i := range templates { + claim := templates[i].DeepCopy() + claim.Name = getPersistentVolumeClaimName(set, claim, ordinal) + claim.Namespace = set.Namespace + if claim.Labels != nil { + for key, value := range set.Spec.Selector.MatchLabels { + claim.Labels[key] = value + } + } else { + claim.Labels = set.Spec.Selector.MatchLabels + } + claims[templates[i].Name] = *claim + } + return claims +} + +// updateStorage updates pod's Volumes to conform with the PersistentVolumeClaim of set's templates. If pod has +// conflicting local Volumes these are replaced with Volumes that conform to the set's templates. +func updateStorage(set *workloadv1alpha1.AdvancedStatefulSet, pod *v1.Pod) { + currentVolumes := pod.Spec.Volumes + claims := getPersistentVolumeClaims(set, pod) + newVolumes := make([]v1.Volume, 0, len(claims)) + for name, claim := range claims { + newVolumes = append(newVolumes, v1.Volume{ + Name: name, + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: claim.Name, + // TODO: Use source definition to set this value when we have one. + ReadOnly: false, + }, + }, + }) + } + for i := range currentVolumes { + if _, ok := claims[currentVolumes[i].Name]; !ok { + newVolumes = append(newVolumes, currentVolumes[i]) + } + } + pod.Spec.Volumes = newVolumes +} + +func initIdentity(set *workloadv1alpha1.AdvancedStatefulSet, pod *v1.Pod) { + updateIdentity(set, pod) + // Set these immutable fields only on initial Pod creation, not updates. + pod.Spec.Hostname = pod.Name + pod.Spec.Subdomain = set.Spec.ServiceName +} + +// updateIdentity updates pod's name, hostname, and subdomain, and StatefulSetPodNameLabel to conform to set's name +// and headless service. +func updateIdentity(set *workloadv1alpha1.AdvancedStatefulSet, pod *v1.Pod) { + ordinal := getOrdinal(pod) + pod.Name = getPodName(set, ordinal) + pod.Namespace = set.Namespace + if pod.Labels == nil { + pod.Labels = make(map[string]string) + } + pod.Labels[apps.StatefulSetPodNameLabel] = pod.Name + pod.Labels[apps.PodIndexLabel] = strconv.Itoa(ordinal) +} + +// isRunningAndAvailable returns true if pod is in the PodRunning Phase, +// and it has a condition of PodReady for a minimum of minReadySeconds. +// return true if it's available +// return false with zero means it's not ready +// return false with a positive value means it's not available and should recheck with that time +func isRunningAndAvailable(pod *v1.Pod, minReadySeconds int32) (bool, time.Duration) { + state := lifecycle.GetPodLifecycleState(pod) + if state != "" && state != workloadv1alpha1.LifecycleStateNormal { + // when state exists and is not normal, it is unavailable + return false, 0 + } + if pod.Status.Phase != v1.PodRunning || !podutil.IsPodReady(pod) { + return false, 0 + } + c := podutil.GetPodReadyCondition(pod.Status) + minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second + if minReadySeconds == 0 { + return true, 0 + } + if c.LastTransitionTime.IsZero() { + return false, minReadySecondsDuration + } + waitTime := c.LastTransitionTime.Time.Add(minReadySecondsDuration).Sub(time.Now()) + if waitTime > 0 { + return false, waitTime + } + return true, 0 +} + +// isRunningAndReady returns true if pod is in the PodRunning Phase, if it has a condition of PodReady. +func isRunningAndReady(pod *v1.Pod) bool { + return pod.Status.Phase == v1.PodRunning && podutil.IsPodReady(pod) +} + +// isCreated returns true if pod has been created and is maintained by the API server +func isCreated(pod *v1.Pod) bool { + return pod.Status.Phase != "" +} + +// isPending returns true if pod has a Phase of PodPending +func isPending(pod *v1.Pod) bool { + return pod.Status.Phase == v1.PodPending +} + +// isFailed returns true if pod has a Phase of PodFailed +func isFailed(pod *v1.Pod) bool { + return pod.Status.Phase == v1.PodFailed +} + +// isSucceeded returns true if pod has a Phase of PodSucceeded +func isSucceeded(pod *v1.Pod) bool { + return pod.Status.Phase == v1.PodSucceeded +} + +// isTerminating returns true if pod's DeletionTimestamp has been set +func isTerminating(pod *v1.Pod) bool { + return pod.DeletionTimestamp != nil +} + +// isHealthy returns true if pod is running and ready and has not been terminated +func isHealthy(pod *v1.Pod) bool { + state := lifecycle.GetPodLifecycleState(pod) + if state != "" && state != workloadv1alpha1.LifecycleStateNormal { + return false + } + return isRunningAndReady(pod) && !isTerminating(pod) +} + +// allowsBurst is true if the alpha burst annotation is set. +func allowsBurst(set *workloadv1alpha1.AdvancedStatefulSet) bool { + return set.Spec.PodManagementPolicy == apps.ParallelPodManagement +} + +// getMinReadySeconds returns the minReadySeconds set in the rollingUpdate, default is 0 +func getMinReadySeconds(set *workloadv1alpha1.AdvancedStatefulSet) int32 { + if set.Spec.UpdateStrategy.RollingUpdate == nil || + set.Spec.UpdateStrategy.RollingUpdate.MinReadySeconds == nil { + return 0 + } + return *set.Spec.UpdateStrategy.RollingUpdate.MinReadySeconds +} + +// setPodRevision sets the revision of Pod to revision by adding the StatefulSetRevisionLabel +func setPodRevision(pod *v1.Pod, revision string) { + if pod.Labels == nil { + pod.Labels = make(map[string]string) + } + pod.Labels[apps.StatefulSetRevisionLabel] = revision +} + +// getPodRevision gets the revision of Pod by inspecting the StatefulSetRevisionLabel. If pod has no revision the empty +// string is returned. +func getPodRevision(pod *v1.Pod) string { + if pod.Labels == nil { + return "" + } + return pod.Labels[apps.StatefulSetRevisionLabel] +} + +// newStatefulSetPod returns a new Pod conforming to the set's Spec with an identity generated from ordinal. +func newStatefulSetPod(set *workloadv1alpha1.AdvancedStatefulSet, ordinal int) *v1.Pod { + pod, _ := kubecontroller.GetPodFromTemplate(&set.Spec.Template, set, metav1.NewControllerRef(set, controllerKind)) + pod.Name = getPodName(set, ordinal) + initIdentity(set, pod) + updateStorage(set, pod) + return pod +} + +// newVersionedStatefulSetPod creates a new Pod for a AdvancedStatefulSet. currentSet is the representation of the set at the +// current revision. updateSet is the representation of the set at the updateRevision. currentRevision is the name of +// the current revision. updateRevision is the name of the update revision. ordinal is the ordinal of the Pod. If the +// returned error is nil, the returned Pod is valid. +func newVersionedStatefulSetPod(currentSet, updateSet *workloadv1alpha1.AdvancedStatefulSet, currentRevision, updateRevision string, + ordinal int, replicas []*v1.Pod, +) *v1.Pod { + if isCurrentRevisionNeeded(currentSet, updateRevision, ordinal, replicas) { + pod := newStatefulSetPod(currentSet, ordinal) + setPodRevision(pod, currentRevision) + return pod + } + pod := newStatefulSetPod(updateSet, ordinal) + setPodRevision(pod, updateRevision) + return pod +} + +// isCurrentRevisionNeeded calculate if the 'ordinal' Pod should be current revision. +func isCurrentRevisionNeeded(set *workloadv1alpha1.AdvancedStatefulSet, updateRevision string, ordinal int, replicas []*v1.Pod) bool { + if set.Spec.UpdateStrategy.Type != apps.RollingUpdateStatefulSetStrategyType { + return false + } + if set.Spec.UpdateStrategy.RollingUpdate == nil { + return ordinal < getStartOrdinal(set)+int(set.Status.CurrentReplicas) + } + if set.Spec.UpdateStrategy.RollingUpdate.UnorderedUpdate == nil { + unreservedPodsNum := 0 + // assume all pods [0, idx) are created and only reserved pods are nil + idx := ordinal - getStartOrdinal(set) + for i := 0; i < idx; i++ { + if replicas[i] != nil { + unreservedPodsNum++ + } + } + // if all pods [0, idx] are current revision + return unreservedPodsNum+1 <= int(*set.Spec.UpdateStrategy.RollingUpdate.Partition) + } + + var noUpdatedReplicas int + for _, pod := range replicas { + if pod == nil || getOrdinal(pod) == ordinal { + continue + } + if !revision.IsPodUpdate(pod, updateRevision) { + noUpdatedReplicas++ + } + } + return noUpdatedReplicas < int(*set.Spec.UpdateStrategy.RollingUpdate.Partition) +} + +// Match check if the given AdvancedStatefulSet's template matches the template stored in the given history. +func Match(ss *workloadv1alpha1.AdvancedStatefulSet, history *apps.ControllerRevision) (bool, error) { + // Encoding the set for the patch may update its GVK metadata, which causes data races if this + // set is in an informer cache. + clone := ss.DeepCopy() + patch, err := getPatch(clone) + if err != nil { + return false, err + } + return bytes.Equal(patch, history.Data.Raw), nil +} + +// getPatch returns a strategic merge patch that can be applied to restore a AdvancedStatefulSet to a +// previous version. If the returned error is nil the patch is valid. The current state that we save is just the +// PodSpecTemplate. We can modify this later to encompass more state (or less) and remain compatible with previously +// recorded patches. +func getPatch(set *workloadv1alpha1.AdvancedStatefulSet) ([]byte, error) { + str, err := runtime.Encode(patchCodec, set) + if err != nil { + return nil, err + } + var raw map[string]interface{} + err = json.Unmarshal(str, &raw) + if err != nil { + return nil, err + } + objCopy := make(map[string]interface{}) + specCopy := make(map[string]interface{}) + spec := raw["spec"].(map[string]interface{}) + template := spec["template"].(map[string]interface{}) + specCopy["template"] = template + template["$patch"] = "replace" + objCopy["spec"] = specCopy + patch, err := json.Marshal(objCopy) + return patch, err +} + +// newRevision creates a new ControllerRevision containing a patch that reapplies the target state of set. +// The Revision of the returned ControllerRevision is set to revision. If the returned error is nil, the returned +// ControllerRevision is valid. AdvancedStatefulSet revisions are stored as patches that re-apply the current state of set +// to a new AdvancedStatefulSet using a strategic merge patch to replace the saved state of the new AdvancedStatefulSet. +func newRevision(set *workloadv1alpha1.AdvancedStatefulSet, revision int64, collisionCount *int32) (*apps.ControllerRevision, error) { + patch, err := getPatch(set) + if err != nil { + return nil, err + } + cr, err := history.NewControllerRevision(set, + controllerKind, + set.Spec.Template.Labels, + runtime.RawExtension{Raw: patch}, + revision, + collisionCount) + if err != nil { + return nil, err + } + if cr.ObjectMeta.Annotations == nil { + cr.ObjectMeta.Annotations = make(map[string]string) + } + for key, value := range set.Annotations { + cr.ObjectMeta.Annotations[key] = value + } + return cr, nil +} + +// ApplyRevision returns a new AdvancedStatefulSet constructed by restoring the state in revision to set. If the returned error +// is nil, the returned AdvancedStatefulSet is valid. +func ApplyRevision(set *workloadv1alpha1.AdvancedStatefulSet, revision *apps.ControllerRevision) (*workloadv1alpha1.AdvancedStatefulSet, error) { + clone := set.DeepCopy() + patched, err := strategicpatch.StrategicMergePatch([]byte(runtime.EncodeOrDie(patchCodec, clone)), revision.Data.Raw, clone) + if err != nil { + return nil, err + } + restoredSet := &workloadv1alpha1.AdvancedStatefulSet{} + err = json.Unmarshal(patched, restoredSet) + if err != nil { + return nil, err + } + return restoredSet, nil +} + +// nextRevision finds the next valid revision number based on revisions. If the length of revisions +// is 0 this is 1. Otherwise, it is 1 greater than the largest revision's Revision. This method +// assumes that revisions has been sorted by Revision. +func nextRevision(revisions []*apps.ControllerRevision) int64 { + count := len(revisions) + if count <= 0 { + return 1 + } + return revisions[count-1].Revision + 1 +} + +// inconsistentStatus returns true if the ObservedGeneration of status is greater than set's +// Generation or if any of the status's fields do not match those of set's status. +func inconsistentStatus(set *workloadv1alpha1.AdvancedStatefulSet, status *workloadv1alpha1.AdvancedStatefulSetStatus) bool { + if status.ObservedGeneration > set.Status.ObservedGeneration || + status.Replicas != set.Status.Replicas || + status.CurrentReplicas != set.Status.CurrentReplicas || + status.ReadyReplicas != set.Status.ReadyReplicas || + status.AvailableReplicas != set.Status.AvailableReplicas || + status.UpdatedReplicas != set.Status.UpdatedReplicas || + status.CurrentRevision != set.Status.CurrentRevision || + status.UpdateRevision != set.Status.UpdateRevision || + status.LabelSelector != set.Status.LabelSelector { + return true + } + + volumeClaimName2StatusIdx := map[string]int{} + for i, v := range status.VolumeClaims { + volumeClaimName2StatusIdx[v.VolumeClaimName] = i + } + for _, v := range set.Status.VolumeClaims { + if idx, ok := volumeClaimName2StatusIdx[v.VolumeClaimName]; !ok { + // raw template not exist in current status => inconsistent + return true + } else if status.VolumeClaims[idx].CompatibleReplicas != v.CompatibleReplicas || + status.VolumeClaims[idx].CompatibleReadyReplicas != v.CompatibleReadyReplicas { + return true + } + } + return false +} + +// completeRollingUpdate completes a rolling update when all of set's replica Pods have been updated +// to the updateRevision. status's currentRevision is set to updateRevision and its' updateRevision +// is set to the empty string. status's currentReplicas is set to updateReplicas and its updateReplicas +// are set to 0. +func completeRollingUpdate(set *workloadv1alpha1.AdvancedStatefulSet, status *workloadv1alpha1.AdvancedStatefulSetStatus) { + if set.Spec.UpdateStrategy.Type == apps.RollingUpdateStatefulSetStrategyType && + status.UpdatedReplicas == status.Replicas && + status.ReadyReplicas == status.Replicas { + status.CurrentReplicas = status.UpdatedReplicas + status.CurrentRevision = status.UpdateRevision + } +} + +// SortPodsAscendingOrdinal sorts the given Pods according to their ordinals. +func SortPodsAscendingOrdinal(pods []*v1.Pod) { + sort.Sort(ascendingOrdinal(pods)) +} + +// ascendingOrdinal is a sort.Interface that Sorts a list of Pods based on the ordinals extracted +// from the Pod. Pod's that have not been constructed by AdvancedStatefulSet's have an ordinal of -1, and are therefore pushed +// to the front of the list. +type ascendingOrdinal []*v1.Pod + +func (ao ascendingOrdinal) Len() int { + return len(ao) +} + +func (ao ascendingOrdinal) Swap(i, j int) { + ao[i], ao[j] = ao[j], ao[i] +} + +func (ao ascendingOrdinal) Less(i, j int) bool { + return getOrdinal(ao[i]) < getOrdinal(ao[j]) +} + +type descendingOrdinal []*v1.Pod + +func (do descendingOrdinal) Len() int { + return len(do) +} + +func (do descendingOrdinal) Swap(i, j int) { + do[i], do[j] = do[j], do[i] +} + +func (do descendingOrdinal) Less(i, j int) bool { + return getOrdinal(do[i]) > getOrdinal(do[j]) +} + +// NewStatefulsetCondition creates a new advancedstatefulset condition. +func NewStatefulsetCondition(conditionType apps.StatefulSetConditionType, conditionStatus v1.ConditionStatus, reason, message string) apps.StatefulSetCondition { + return apps.StatefulSetCondition{ + Type: conditionType, + Status: conditionStatus, + LastTransitionTime: metav1.Now(), + Reason: reason, + Message: message, + } +} + +// GetStatefulsetConditition returns the condition with the provided type. +func GetStatefulsetConditition(status workloadv1alpha1.AdvancedStatefulSetStatus, condType apps.StatefulSetConditionType) *apps.StatefulSetCondition { + for i := range status.Conditions { + c := status.Conditions[i] + if c.Type == condType { + return &c + } + } + return nil +} + +// SetStatefulsetCondition updates the advancedstatefulset to include the provided condition. If the condition that +func SetStatefulsetCondition(status *workloadv1alpha1.AdvancedStatefulSetStatus, condition apps.StatefulSetCondition) { + currentCond := GetStatefulsetConditition(*status, condition.Type) + if currentCond != nil && currentCond.Status == condition.Status && currentCond.Reason == condition.Reason { + return + } + if currentCond != nil && currentCond.Status == condition.Status { + condition.LastTransitionTime = currentCond.LastTransitionTime + } + + newConditions := filterOutCondition(status.Conditions, condition.Type) + status.Conditions = append(newConditions, condition) +} + +func filterOutCondition(conditions []apps.StatefulSetCondition, condType apps.StatefulSetConditionType) []apps.StatefulSetCondition { + var newCondititions []apps.StatefulSetCondition + for _, c := range conditions { + if c.Type == condType { + continue + } + newCondititions = append(newCondititions, c) + } + return newCondititions +} + +func getStatefulSetKey(o metav1.Object) string { + return o.GetNamespace() + "/" + o.GetName() +} + +func decreaseAndCheckMaxUnavailable(maxUnavailable *int) bool { + if maxUnavailable == nil { + return false + } + val := *maxUnavailable - 1 + *maxUnavailable = val + return val <= 0 +} + +// return parameters is startOrdinal(inclusive), endOrdinal(exclusive) and reserveOrdinals, +// and they are used to support reserveOrdinals scenarios. +// When configured as follows: +/* + apiVersion: workload.kruise.io/v1beta1 + kind: AdvancedStatefulSet + spec: + # ... + replicas: 4 + reserveOrdinals: + - 1 + - 3 + Spec.Ordinals.Start: 2 +*/ +// result is startOrdinal 2(inclusive), endOrdinal 7(exclusive), reserveOrdinals = {1, 3} +// replicas[endOrdinal - startOrdinal] stores [replica-2, nil(reserveOrdinal 3), replica-4, replica-5, replica-6] +// todo: maybe we should remove ineffective reserveOrdinals in webhook, reserveOrdinals = {3} +func getStatefulSetReplicasRange(set *workloadv1alpha1.AdvancedStatefulSet) (int, int, sets.Set[int]) { + reserveOrdinals := apiutil.GetReserveOrdinalIntSet(set.Spec.ReserveOrdinals) + replicaMaxOrdinal := getStartOrdinal(set) + for realReplicaCount := 0; realReplicaCount < int(*set.Spec.Replicas); replicaMaxOrdinal++ { + if reserveOrdinals.Has(replicaMaxOrdinal) { + continue + } + realReplicaCount++ + } + return getStartOrdinal(set), replicaMaxOrdinal, reserveOrdinals +} diff --git a/pkg/controllers/workload/v1alpha1/advancedstatefulset/stateful_update_utils.go b/pkg/controllers/workload/v1alpha1/advancedstatefulset/stateful_update_utils.go new file mode 100644 index 00000000000..ad7ec9cbb8b --- /dev/null +++ b/pkg/controllers/workload/v1alpha1/advancedstatefulset/stateful_update_utils.go @@ -0,0 +1,77 @@ +/* +Copyright 2026 The Fluid Authors. +Copyright 2019 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package advancedstatefulset + +import ( + "github.com/fluid-cloudnative/fluid/pkg/controllers/workload/v1alpha1/utils/revision" + "github.com/fluid-cloudnative/fluid/pkg/controllers/workload/v1alpha1/utils/updatesort" + v1 "k8s.io/api/core/v1" + + workloadv1alpha1 "github.com/fluid-cloudnative/fluid/api/workload/v1alpha1" +) + +func sortPodsToUpdate(rollingUpdateStrategy *workloadv1alpha1.RollingUpdateStatefulSetStrategy, updateRevision string, totalReplicas int32, replicas []*v1.Pod) []int { + var updateMin int + if rollingUpdateStrategy != nil && rollingUpdateStrategy.Partition != nil { + updateMin = int(*rollingUpdateStrategy.Partition) + } + + maxUpdate := int(totalReplicas) - updateMin + if maxUpdate <= 0 { + return []int{} + } + + if rollingUpdateStrategy == nil || rollingUpdateStrategy.UnorderedUpdate == nil { + var indexes []int + for target := len(replicas) - 1; target >= updateMin && len(indexes) < maxUpdate; target-- { + if replicas[target] == nil { + continue + } + indexes = append(indexes, target) + } + return indexes + } + + priorityStrategy := rollingUpdateStrategy.UnorderedUpdate.PriorityStrategy + + var updatedIdxs []int + var waitUpdateIdxs []int + for target := len(replicas) - 1; target >= 0; target-- { + if replicas[target] == nil { + continue + } + if isTerminating(replicas[target]) { + updatedIdxs = append(updatedIdxs, target) + } else if revision.IsPodUpdate(replicas[target], updateRevision) { + updatedIdxs = append(updatedIdxs, target) + } else { + waitUpdateIdxs = append(waitUpdateIdxs, target) + } + } + + if priorityStrategy != nil { + waitUpdateIdxs = updatesort.NewPrioritySorter(priorityStrategy).Sort(replicas, waitUpdateIdxs) + } + + allIdxs := append(updatedIdxs, waitUpdateIdxs...) + if len(allIdxs) > maxUpdate { + allIdxs = allIdxs[:maxUpdate] + } + + return allIdxs +} diff --git a/pkg/controllers/workload/v1alpha1/advancedstatefulset/statefulset_controller.go b/pkg/controllers/workload/v1alpha1/advancedstatefulset/statefulset_controller.go new file mode 100644 index 00000000000..2e084896481 --- /dev/null +++ b/pkg/controllers/workload/v1alpha1/advancedstatefulset/statefulset_controller.go @@ -0,0 +1,341 @@ +/* +Copyright 2026 The Fluid Authors. +Copyright 2019 The Kruise Authors. +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package advancedstatefulset + +import ( + "context" + "flag" + "fmt" + "time" + + history "github.com/fluid-cloudnative/fluid/pkg/controllers/workload/v1alpha1/utils/controllerhistory" + utildiscovery "github.com/fluid-cloudnative/fluid/pkg/controllers/workload/v1alpha1/utils/discovery" + "github.com/fluid-cloudnative/fluid/pkg/controllers/workload/v1alpha1/utils/expectations" + "github.com/fluid-cloudnative/fluid/pkg/controllers/workload/v1alpha1/utils/inplaceupdate" + kubecontroller2 "github.com/fluid-cloudnative/fluid/pkg/controllers/workload/v1alpha1/utils/kubecontroller" + "github.com/fluid-cloudnative/fluid/pkg/controllers/workload/v1alpha1/utils/lifecycle" + "github.com/fluid-cloudnative/fluid/pkg/controllers/workload/v1alpha1/utils/ratelimiter" + "github.com/fluid-cloudnative/fluid/pkg/controllers/workload/v1alpha1/utils/requeueduration" + "github.com/fluid-cloudnative/fluid/pkg/controllers/workload/v1alpha1/utils/revisionadapter" + "github.com/fluid-cloudnative/fluid/pkg/controllers/workload/v1alpha1/utils/util" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + discoverylib "k8s.io/client-go/discovery" + kubeclientset "k8s.io/client-go/kubernetes" + v1core "k8s.io/client-go/kubernetes/typed/core/v1" + appslisters "k8s.io/client-go/listers/apps/v1" + corelisters "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/rest" + toolscache "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/klog/v2" + runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + workloadv1alpha1 "github.com/fluid-cloudnative/fluid/api/workload/v1alpha1" +) + +func init() { + flag.IntVar(&concurrentReconciles, "advancedstatefulset-workers", concurrentReconciles, "Max concurrent workers for AdvancedStatefulSet controller.") +} + +var ( + // controllerKind contains the schema.GroupVersionKind for this controller type. + controllerKind = workloadv1alpha1.SchemeGroupVersion.WithKind("AdvancedStatefulSet") + concurrentReconciles = 3 + + updateExpectations = expectations.NewUpdateExpectations(revisionadapter.NewDefaultImpl()) + // this is a short cut for any sub-functions to notify the reconcile how long to wait to requeue + durationStore = requeueduration.DurationStore{} +) + +// Add creates a new AdvancedStatefulSet Controller and adds it to the Manager with default RBAC. The Manager will set fields on the Controller +// and Start it when the Manager is Started. +func Add(mgr manager.Manager) error { + // Initialize discovery client for GVK detection + dc, err := discoverylib.NewDiscoveryClientForConfig(mgr.GetConfig()) + if err != nil { + return err + } + utildiscovery.Init(dc) + + if !utildiscovery.DiscoverGVK(controllerKind) { + return nil + } + r, err := newReconciler(mgr) + if err != nil { + return err + } + return add(mgr, r) +} + +// newReconciler returns a new reconcile.Reconciler +func newReconciler(mgr manager.Manager) (reconcile.Reconciler, error) { + cacher := mgr.GetCache() + podInformer, err := cacher.GetInformerForKind(context.TODO(), v1.SchemeGroupVersion.WithKind("Pod")) + if err != nil { + return nil, err + } + pvcInformer, err := cacher.GetInformerForKind(context.TODO(), v1.SchemeGroupVersion.WithKind("PersistentVolumeClaim")) + if err != nil { + return nil, err + } + revInformer, err := cacher.GetInformerForKind(context.TODO(), appsv1.SchemeGroupVersion.WithKind("ControllerRevision")) + if err != nil { + return nil, err + } + + podLister := corelisters.NewPodLister(podInformer.(toolscache.SharedIndexInformer).GetIndexer()) + pvcLister := corelisters.NewPersistentVolumeClaimLister(pvcInformer.(toolscache.SharedIndexInformer).GetIndexer()) + + genericCfg := rest.CopyConfig(mgr.GetConfig()) + genericCfg.UserAgent = "advancedstatefulset-controller" + kubeClient, err := kubeclientset.NewForConfig(genericCfg) + if err != nil { + return nil, err + } + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartLogging(klog.Infof) + eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")}) + recorder := eventBroadcaster.NewRecorder(workloadv1alpha1.Scheme, v1.EventSource{Component: "advancedstatefulset-controller"}) + + runtimeClient := mgr.GetClient() + return &ReconcileStatefulSet{ + Client: runtimeClient, + control: NewDefaultStatefulSetControl( + NewStatefulPodControl( + kubeClient, + podLister, + pvcLister, + recorder), + inplaceupdate.New(runtimeClient, revisionadapter.NewDefaultImpl()), + lifecycle.New(runtimeClient), + NewRealStatefulSetStatusUpdater(runtimeClient), + history.NewHistory(kubeClient, appslisters.NewControllerRevisionLister(revInformer.(toolscache.SharedIndexInformer).GetIndexer())), + recorder, + ), + podControl: kubecontroller2.RealPodControl{KubeClient: kubeClient, Recorder: recorder}, + podLister: podLister, + }, nil +} + +var _ reconcile.Reconciler = &ReconcileStatefulSet{} + +// ReconcileStatefulSet reconciles a AdvancedStatefulSet object +type ReconcileStatefulSet struct { + // client provides controller-runtime client for AdvancedStatefulSet operations + runtimeclient.Client + // control returns an interface capable of syncing a stateful set. + // Abstracted out for testing. + control StatefulSetControlInterface + // podControl is used for patching pods. + podControl kubecontroller2.PodControlInterface + // podLister is able to list/get pods from a shared informer's store + podLister corelisters.PodLister +} + +// add adds a new Controller to mgr with r as the reconcile.Reconciler +func add(mgr manager.Manager, r reconcile.Reconciler) error { + // Create a new controller + c, err := controller.New("advancedstatefulset-controller", mgr, controller.Options{ + Reconciler: r, MaxConcurrentReconciles: concurrentReconciles, CacheSyncTimeout: util.GetControllerCacheSyncTimeout(), + RateLimiter: ratelimiter.DefaultControllerRateLimiter[reconcile.Request]()}) + if err != nil { + return err + } + + // Watch for changes to AdvancedStatefulSet + err = c.Watch(source.Kind(mgr.GetCache(), &workloadv1alpha1.AdvancedStatefulSet{}), + &handler.EnqueueRequestForObject{}, + predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + oldSS, ok1 := e.ObjectOld.(*workloadv1alpha1.AdvancedStatefulSet) + newSS, ok2 := e.ObjectNew.(*workloadv1alpha1.AdvancedStatefulSet) + if ok1 && ok2 && oldSS.Status.Replicas != newSS.Status.Replicas { + klog.V(4).InfoS("Observed updated replica count for AdvancedStatefulSet", + "statefulSet", klog.KObj(newSS), "oldReplicas", oldSS.Status.Replicas, "newReplicas", newSS.Status.Replicas) + } + return true + }, + }) + if err != nil { + return err + } + + // Watch for changes to PVC patched by AdvancedStatefulSet + err = c.Watch(source.Kind(mgr.GetCache(), &v1.PersistentVolumeClaim{}), &pvcEventHandler{}) + if err != nil { + return err + } + + // Watch for changes to Pod created by AdvancedStatefulSet + err = c.Watch(source.Kind(mgr.GetCache(), &v1.Pod{}), + handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &workloadv1alpha1.AdvancedStatefulSet{}, handler.OnlyControllerOwner())) + if err != nil { + return err + } + + klog.V(4).InfoS("Finished to add advancedstatefulset-controller") + + return nil +} + +// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core,resources=pods/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=core,resources=events,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core,resources=persistentvolumeclaims,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=apps,resources=controllerrevisions,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=workload.fluid.io,resources=advancedstatefulsets,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=workload.fluid.io,resources=advancedstatefulsets/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=workload.fluid.io,resources=advancedstatefulsets/finalizers,verbs=update + +// Reconcile reads that state of the cluster for a AdvancedStatefulSet object and makes changes based on the state read +// and what is in the AdvancedStatefulSet.Spec +// Automatically generate RBAC rules to allow the Controller to read and write Pods +func (ssc *ReconcileStatefulSet) Reconcile(ctx context.Context, request reconcile.Request) (res reconcile.Result, retErr error) { + key := request.NamespacedName.String() + namespace := request.Namespace + name := request.Name + + startTime := time.Now() + defer func() { + if retErr == nil { + if res.Requeue || res.RequeueAfter > 0 { + klog.InfoS("Finished syncing AdvancedStatefulSet", "statefulSet", request, "elapsedTime", time.Since(startTime), "result", res) + } else { + klog.InfoS("Finished syncing AdvancedStatefulSet", "statefulSet", request, "elapsedTime", time.Since(startTime)) + } + } else { + klog.ErrorS(retErr, "Finished syncing AdvancedStatefulSet error", "statefulSet", request, "elapsedTime", time.Since(startTime)) + } + }() + + set := &workloadv1alpha1.AdvancedStatefulSet{} + if err := ssc.Get(ctx, types.NamespacedName{Namespace: namespace, Name: name}, set); err != nil { + if errors.IsNotFound(err) { + klog.InfoS("AdvancedStatefulSet deleted", "statefulSet", key) + updateExpectations.DeleteExpectations(key) + return reconcile.Result{}, nil + } + utilruntime.HandleError(fmt.Errorf("unable to retrieve AdvancedStatefulSet %v from store: %v", key, err)) + return reconcile.Result{}, err + } + + selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector) + if err != nil { + utilruntime.HandleError(fmt.Errorf("error converting AdvancedStatefulSet %v selector: %v", key, err)) + // This is a non-transient error, so don't retry. + return reconcile.Result{}, nil + } + + if err := ssc.adoptOrphanRevisions(set); err != nil { + return reconcile.Result{}, err + } + + pods, err := ssc.getPodsForStatefulSet(ctx, set, selector) + if err != nil { + return reconcile.Result{}, err + } + + err = ssc.syncStatefulSet(ctx, set, pods) + return reconcile.Result{RequeueAfter: durationStore.Pop(getStatefulSetKey(set))}, err +} + +// adoptOrphanRevisions adopts any orphaned ControllerRevisions matched by set's Selector. +func (ssc *ReconcileStatefulSet) adoptOrphanRevisions(set *workloadv1alpha1.AdvancedStatefulSet) error { + revisions, err := ssc.control.ListRevisions(set) + if err != nil { + return err + } + orphanRevisions := make([]*appsv1.ControllerRevision, 0) + for i := range revisions { + if metav1.GetControllerOf(revisions[i]) == nil { + orphanRevisions = append(orphanRevisions, revisions[i]) + } + } + if len(orphanRevisions) > 0 { + fresh := &workloadv1alpha1.AdvancedStatefulSet{} + if err := ssc.Get(context.TODO(), types.NamespacedName{Namespace: set.Namespace, Name: set.Name}, fresh); err != nil { + return err + } + if fresh.UID != set.UID { + return fmt.Errorf("original AdvancedStatefulSet %v/%v is gone: got uid %v, wanted %v", set.Namespace, set.Name, fresh.UID, set.UID) + } + return ssc.control.AdoptOrphanRevisions(set, orphanRevisions) + } + return nil +} + +// getPodsForStatefulSet returns the Pods that a given AdvancedStatefulSet should manage. +// It also reconciles ControllerRef by adopting/orphaning. +// +// NOTE: Returned Pods are pointers to objects from the cache. +// +// If you need to modify one, you need to copy it first. +func (ssc *ReconcileStatefulSet) getPodsForStatefulSet(ctx context.Context, set *workloadv1alpha1.AdvancedStatefulSet, selector labels.Selector) ([]*v1.Pod, error) { + // List all pods to include the pods that don't match the selector anymore but + // has a ControllerRef pointing to this AdvancedStatefulSet. + pods, err := ssc.podLister.Pods(set.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + filter := func(pod *v1.Pod) bool { + // Only claim if it matches our AdvancedStatefulSet name. Otherwise release/ignore. + return isMemberOf(set, pod) + } + + // If any adoptions are attempted, we should first recheck for deletion with + // an uncached quorum read sometime after listing Pods (see #42639). + canAdoptFunc := kubecontroller2.RecheckDeletionTimestamp(func(ctx context.Context) (metav1.Object, error) { + fresh := &workloadv1alpha1.AdvancedStatefulSet{} + if err := ssc.Get(ctx, types.NamespacedName{Namespace: set.Namespace, Name: set.Name}, fresh); err != nil { + return nil, err + } + if fresh.UID != set.UID { + return nil, fmt.Errorf("original AdvancedStatefulSet %v/%v is gone: got uid %v, wanted %v", set.Namespace, set.Name, fresh.UID, set.UID) + } + return fresh, nil + }) + + cm := kubecontroller2.NewPodControllerRefManager(ssc.podControl, set, selector, controllerKind, canAdoptFunc) + return cm.ClaimPods(ctx, pods, filter) +} + +// syncStatefulSet syncs a tuple of (advancedstatefulset, []*v1.Pod). +func (ssc *ReconcileStatefulSet) syncStatefulSet(ctx context.Context, set *workloadv1alpha1.AdvancedStatefulSet, pods []*v1.Pod) error { + klog.V(4).InfoS("Syncing AdvancedStatefulSet with pods", "statefulSet", klog.KObj(set), "podCount", len(pods)) + // TODO: investigate where we mutate the set during the update as it is not obvious. + if err := ssc.control.UpdateStatefulSet(ctx, set.DeepCopy(), pods); err != nil { + return err + } + klog.V(4).InfoS("Successfully synced AdvancedStatefulSet", "statefulSet", klog.KObj(set)) + return nil +} diff --git a/pkg/controllers/workload/v1alpha1/utils/api/asts.go b/pkg/controllers/workload/v1alpha1/utils/api/asts.go new file mode 100644 index 00000000000..3f68beab28d --- /dev/null +++ b/pkg/controllers/workload/v1alpha1/utils/api/asts.go @@ -0,0 +1,67 @@ +/* +Copyright 2025 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "fmt" + "strconv" + "strings" + + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog/v2" +) + +// ParseRange parses the start and end value from a string like "1-3" +func ParseRange(s string) (start int, end int, err error) { + split := strings.Split(s, "-") + if len(split) != 2 { + return 0, 0, fmt.Errorf("invalid range %s", s) + } + start, err = strconv.Atoi(split[0]) + if err != nil { + return + } + end, err = strconv.Atoi(split[1]) + if err != nil { + return + } + if start > end { + return 0, 0, fmt.Errorf("invalid range %s", s) + } + return +} + +// GetReserveOrdinalIntSet returns a set of ints from parsed reserveOrdinal +func GetReserveOrdinalIntSet(r []intstr.IntOrString) sets.Set[int] { + values := sets.New[int]() + for _, elem := range r { + if elem.Type == intstr.Int { + values.Insert(int(elem.IntVal)) + } else { + start, end, err := ParseRange(elem.StrVal) + if err != nil { + klog.ErrorS(err, "invalid range reserveOrdinal found, an empty slice will be returned", "reserveOrdinal", elem.StrVal) + return nil + } + for i := start; i <= end; i++ { + values.Insert(i) + } + } + } + return values +} diff --git a/pkg/controllers/workload/v1alpha1/utils/client/client.go b/pkg/controllers/workload/v1alpha1/utils/client/client.go new file mode 100644 index 00000000000..b5363dbcbd3 --- /dev/null +++ b/pkg/controllers/workload/v1alpha1/utils/client/client.go @@ -0,0 +1,39 @@ +/* +Copyright 2021 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "fmt" + + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" +) + +func NewClientFromManager(mgr manager.Manager, name string) client.Client { + cfg := rest.CopyConfig(mgr.GetConfig()) + cfg.UserAgent = fmt.Sprintf("kruise-manager/%s", name) + + delegatingClient, _ := client.New(cfg, client.Options{ + Scheme: mgr.GetScheme(), + Cache: &client.CacheOptions{ + Reader: mgr.GetCache(), + Unstructured: true, + }, + }) + return delegatingClient +} diff --git a/pkg/controllers/workload/v1alpha1/utils/containermeta/env_hash.go b/pkg/controllers/workload/v1alpha1/utils/containermeta/env_hash.go new file mode 100644 index 00000000000..1b880f2ad8d --- /dev/null +++ b/pkg/controllers/workload/v1alpha1/utils/containermeta/env_hash.go @@ -0,0 +1,146 @@ +/* +Copyright 2021 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package containermeta + +import ( + "encoding/json" + "hash/fnv" + "sort" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/kubernetes/pkg/fieldpath" + hashutil "k8s.io/kubernetes/pkg/util/hash" +) + +var ( + excludeEnvs = sets.NewString( + "SIDECARSET_VERSION", + "SIDECARSET_VERSION_ALT", + ) +) + +type EnvGetter func(key string) (string, error) + +type EnvFromMetadataHasher interface { + GetExpectHash(c *v1.Container, objMeta metav1.Object) uint64 + GetCurrentHash(c *v1.Container, getter EnvGetter) (uint64, error) +} + +type envFromMetadataHasher struct{} + +func NewEnvFromMetadataHasher() EnvFromMetadataHasher { + return &envFromMetadataHasher{} +} + +func (h *envFromMetadataHasher) GetExpectHash(c *v1.Container, objMeta metav1.Object) uint64 { + var envs []v1.EnvVar + for i := range c.Env { + if c.Env[i].Value != "" || c.Env[i].ValueFrom == nil || c.Env[i].ValueFrom.FieldRef == nil { + continue + } else if excludeEnvs.Has(c.Env[i].Name) { + continue + } + + // Currently only supports `metadata.labels['']`, `metadata.annotations['']` + path, subscript, ok := fieldpath.SplitMaybeSubscriptedPath(c.Env[i].ValueFrom.FieldRef.FieldPath) + if !ok { + continue + } + + env := v1.EnvVar{Name: c.Env[i].Name} + switch path { + case "metadata.annotations": + env.Value = objMeta.GetAnnotations()[subscript] + case "metadata.labels": + env.Value = objMeta.GetLabels()[subscript] + default: + continue + } + + envs = append(envs, env) + } + + sort.SliceStable(envs, func(i, j int) bool { + return envs[i].Name < envs[j].Name + }) + return hashEnvs(envs) +} + +func (h *envFromMetadataHasher) GetCurrentHash(c *v1.Container, getter EnvGetter) (uint64, error) { + var envs []v1.EnvVar + for i := range c.Env { + if c.Env[i].Value != "" || c.Env[i].ValueFrom == nil || c.Env[i].ValueFrom.FieldRef == nil { + continue + } else if excludeEnvs.Has(c.Env[i].Name) { + continue + } + + // Currently only supports `metadata.labels['']`, `metadata.annotations['']` + path, _, ok := fieldpath.SplitMaybeSubscriptedPath(c.Env[i].ValueFrom.FieldRef.FieldPath) + if !ok { + continue + } + + var err error + env := v1.EnvVar{Name: c.Env[i].Name} + switch path { + case "metadata.annotations", "metadata.labels": + env.Value, err = getter(c.Env[i].Name) + if err != nil { + return 0, err + } + default: + continue + } + + envs = append(envs, env) + } + + sort.SliceStable(envs, func(i, j int) bool { + return envs[i].Name < envs[j].Name + }) + return hashEnvs(envs), nil +} + +func hashEnvs(envs []v1.EnvVar) uint64 { + hash := fnv.New32a() + envsJSON, _ := json.Marshal(envs) + hashutil.DeepHashObject(hash, envsJSON) + return uint64(hash.Sum32()) +} + +func IsContainerReferenceToMeta(c *v1.Container, path, key string) bool { + for i := range c.Env { + if c.Env[i].Value != "" || c.Env[i].ValueFrom == nil || c.Env[i].ValueFrom.FieldRef == nil { + continue + } else if excludeEnvs.Has(c.Env[i].Name) { + continue + } + + reqPath, subscript, ok := fieldpath.SplitMaybeSubscriptedPath(c.Env[i].ValueFrom.FieldRef.FieldPath) + if !ok { + continue + } + + if reqPath == path && subscript == key { + return true + } + } + return false +} diff --git a/pkg/controllers/workload/v1alpha1/utils/controllerhistory/controller_history.go b/pkg/controllers/workload/v1alpha1/utils/controllerhistory/controller_history.go new file mode 100644 index 00000000000..26f2ff15731 --- /dev/null +++ b/pkg/controllers/workload/v1alpha1/utils/controllerhistory/controller_history.go @@ -0,0 +1,420 @@ +/* +Copyright 2017 The Kubernetes Authors. +Copyright 2024 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package controllerhistory provides utilities for managing ControllerRevisions. +// Adapted from k8s.io/kubernetes/pkg/controller/history. +package controllerhistory + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "hash/fnv" + "sort" + "strconv" + + "github.com/fluid-cloudnative/fluid/pkg/controllers/workload/v1alpha1/utils/kubecontroller" + apps "k8s.io/api/apps/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/rand" + clientset "k8s.io/client-go/kubernetes" + appslisters "k8s.io/client-go/listers/apps/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/retry" + hashutil "k8s.io/kubernetes/pkg/util/hash" +) + +// ControllerRevisionHashLabel is the label used to indicate the hash value of a ControllerRevision's Data. +const ControllerRevisionHashLabel = "controller.kubernetes.io/hash" + +// ControllerRevisionName returns the Name for a ControllerRevision in the form prefix-hash. +func ControllerRevisionName(prefix string, hash string) string { + if len(prefix) > 223 { + prefix = prefix[:223] + } + return fmt.Sprintf("%s-%s", prefix, hash) +} + +// NewControllerRevision returns a ControllerRevision with a ControllerRef pointing to parent. +func NewControllerRevision(parent metav1.Object, + parentKind schema.GroupVersionKind, + templateLabels map[string]string, + data runtime.RawExtension, + revision int64, + collisionCount *int32) (*apps.ControllerRevision, error) { + labelMap := make(map[string]string) + for k, v := range templateLabels { + labelMap[k] = v + } + cr := &apps.ControllerRevision{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labelMap, + OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(parent, parentKind)}, + }, + Data: data, + Revision: revision, + } + hash := HashControllerRevision(cr, collisionCount) + cr.Name = ControllerRevisionName(parent.GetName(), hash) + cr.Labels[ControllerRevisionHashLabel] = hash + return cr, nil +} + +// HashControllerRevision hashes the contents of revision's Data using FNV hashing. +func HashControllerRevision(revision *apps.ControllerRevision, probe *int32) string { + hf := fnv.New32() + if len(revision.Data.Raw) > 0 { + hf.Write(revision.Data.Raw) + } + if revision.Data.Object != nil { + hashutil.DeepHashObject(hf, revision.Data.Object) + } + if probe != nil { + hf.Write([]byte(strconv.FormatInt(int64(*probe), 10))) + } + return rand.SafeEncodeString(fmt.Sprint(hf.Sum32())) +} + +// SortControllerRevisions sorts revisions by their Revision. +func SortControllerRevisions(revisions []*apps.ControllerRevision) { + sort.Stable(byRevision(revisions)) +} + +// EqualRevision returns true if lhs and rhs are equal. +func EqualRevision(lhs *apps.ControllerRevision, rhs *apps.ControllerRevision) bool { + var lhsHash, rhsHash *uint32 + if lhs == nil || rhs == nil { + return lhs == rhs + } + if hs, found := lhs.Labels[ControllerRevisionHashLabel]; found { + hash, err := strconv.ParseInt(hs, 10, 32) + if err == nil { + lhsHash = new(uint32) + *lhsHash = uint32(hash) + } + } + if hs, found := rhs.Labels[ControllerRevisionHashLabel]; found { + hash, err := strconv.ParseInt(hs, 10, 32) + if err == nil { + rhsHash = new(uint32) + *rhsHash = uint32(hash) + } + } + if lhsHash != nil && rhsHash != nil && *lhsHash != *rhsHash { + return false + } + return bytes.Equal(lhs.Data.Raw, rhs.Data.Raw) && apiequality.Semantic.DeepEqual(lhs.Data.Object, rhs.Data.Object) +} + +// FindEqualRevisions returns all ControllerRevisions in revisions that are equal to needle. +func FindEqualRevisions(revisions []*apps.ControllerRevision, needle *apps.ControllerRevision) []*apps.ControllerRevision { + var eq []*apps.ControllerRevision + for i := range revisions { + if EqualRevision(revisions[i], needle) { + eq = append(eq, revisions[i]) + } + } + return eq +} + +// byRevision implements sort.Interface to allow ControllerRevisions to be sorted by Revision. +type byRevision []*apps.ControllerRevision + +func (br byRevision) Len() int { return len(br) } + +func (br byRevision) Less(i, j int) bool { + if br[i].Revision == br[j].Revision { + if br[j].CreationTimestamp.Equal(&br[i].CreationTimestamp) { + return br[i].Name < br[j].Name + } + return br[j].CreationTimestamp.After(br[i].CreationTimestamp.Time) + } + return br[i].Revision < br[j].Revision +} + +func (br byRevision) Swap(i, j int) { br[i], br[j] = br[j], br[i] } + +// Interface provides an interface for management of a Controller's history. +type Interface interface { + ListControllerRevisions(parent metav1.Object, selector labels.Selector) ([]*apps.ControllerRevision, error) + CreateControllerRevision(parent metav1.Object, revision *apps.ControllerRevision, collisionCount *int32) (*apps.ControllerRevision, error) + DeleteControllerRevision(revision *apps.ControllerRevision) error + UpdateControllerRevision(revision *apps.ControllerRevision, newRevision int64) (*apps.ControllerRevision, error) + AdoptControllerRevision(parent metav1.Object, parentKind schema.GroupVersionKind, revision *apps.ControllerRevision) (*apps.ControllerRevision, error) + ReleaseControllerRevision(parent metav1.Object, revision *apps.ControllerRevision) (*apps.ControllerRevision, error) +} + +// NewHistory returns an instance of Interface that uses client to communicate with the API Server. +func NewHistory(client clientset.Interface, lister appslisters.ControllerRevisionLister) Interface { + return &realHistory{client, lister} +} + +// NewFakeHistory returns an instance of Interface for testing. +func NewFakeHistory(indexer cache.Indexer, lister appslisters.ControllerRevisionLister) Interface { + return &fakeHistory{indexer, lister} +} + +type realHistory struct { + client clientset.Interface + lister appslisters.ControllerRevisionLister +} + +func (rh *realHistory) ListControllerRevisions(parent metav1.Object, selector labels.Selector) ([]*apps.ControllerRevision, error) { + history, err := rh.lister.ControllerRevisions(parent.GetNamespace()).List(selector) + if err != nil { + return nil, err + } + var owned []*apps.ControllerRevision + for i := range history { + ref := metav1.GetControllerOfNoCopy(history[i]) + if ref == nil || ref.UID == parent.GetUID() { + owned = append(owned, history[i]) + } + } + return owned, err +} + +func (rh *realHistory) CreateControllerRevision(parent metav1.Object, revision *apps.ControllerRevision, collisionCount *int32) (*apps.ControllerRevision, error) { + if collisionCount == nil { + return nil, fmt.Errorf("collisionCount should not be nil") + } + clone := revision.DeepCopy() + for { + hash := HashControllerRevision(revision, collisionCount) + clone.Name = ControllerRevisionName(parent.GetName(), hash) + ns := parent.GetNamespace() + created, err := rh.client.AppsV1().ControllerRevisions(ns).Create(context.TODO(), clone, metav1.CreateOptions{}) + if errors.IsAlreadyExists(err) { + exists, err := rh.client.AppsV1().ControllerRevisions(ns).Get(context.TODO(), clone.Name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + if bytes.Equal(exists.Data.Raw, clone.Data.Raw) { + return exists, nil + } + *collisionCount++ + continue + } + return created, err + } +} + +func (rh *realHistory) UpdateControllerRevision(revision *apps.ControllerRevision, newRevision int64) (*apps.ControllerRevision, error) { + clone := revision.DeepCopy() + err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if clone.Revision == newRevision { + return nil + } + clone.Revision = newRevision + updated, updateErr := rh.client.AppsV1().ControllerRevisions(clone.Namespace).Update(context.TODO(), clone, metav1.UpdateOptions{}) + if updateErr == nil { + return nil + } + if updated != nil { + clone = updated + } + if updated, err := rh.lister.ControllerRevisions(clone.Namespace).Get(clone.Name); err == nil { + clone = updated.DeepCopy() + } + return updateErr + }) + return clone, err +} + +func (rh *realHistory) DeleteControllerRevision(revision *apps.ControllerRevision) error { + return rh.client.AppsV1().ControllerRevisions(revision.Namespace).Delete(context.TODO(), revision.Name, metav1.DeleteOptions{}) +} + +type objectForPatch struct { + Metadata objectMetaForPatch `json:"metadata"` +} + +type objectMetaForPatch struct { + OwnerReferences []metav1.OwnerReference `json:"ownerReferences"` + UID types.UID `json:"uid"` +} + +func (rh *realHistory) AdoptControllerRevision(parent metav1.Object, parentKind schema.GroupVersionKind, revision *apps.ControllerRevision) (*apps.ControllerRevision, error) { + blockOwnerDeletion := true + isController := true + if owner := metav1.GetControllerOfNoCopy(revision); owner != nil { + return nil, fmt.Errorf("attempt to adopt revision owned by %v", owner) + } + addControllerPatch := objectForPatch{ + Metadata: objectMetaForPatch{ + UID: revision.UID, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: parentKind.GroupVersion().String(), + Kind: parentKind.Kind, + Name: parent.GetName(), + UID: parent.GetUID(), + Controller: &isController, + BlockOwnerDeletion: &blockOwnerDeletion, + }}, + }, + } + patchBytes, err := json.Marshal(&addControllerPatch) + if err != nil { + return nil, err + } + return rh.client.AppsV1().ControllerRevisions(parent.GetNamespace()).Patch(context.TODO(), revision.GetName(), + types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) +} + +func (rh *realHistory) ReleaseControllerRevision(parent metav1.Object, revision *apps.ControllerRevision) (*apps.ControllerRevision, error) { + dataBytes, err := kubecontroller.GenerateDeleteOwnerRefStrategicMergeBytes(revision.UID, []types.UID{parent.GetUID()}) + if err != nil { + return nil, err + } + released, err := rh.client.AppsV1().ControllerRevisions(revision.GetNamespace()).Patch(context.TODO(), revision.GetName(), + types.StrategicMergePatchType, dataBytes, metav1.PatchOptions{}) + if err != nil { + if errors.IsNotFound(err) { + return nil, nil + } + if errors.IsInvalid(err) { + return nil, nil + } + } + return released, err +} + +// fakeHistory is used for testing +type fakeHistory struct { + indexer cache.Indexer + lister appslisters.ControllerRevisionLister +} + +func (fh *fakeHistory) ListControllerRevisions(parent metav1.Object, selector labels.Selector) ([]*apps.ControllerRevision, error) { + history, err := fh.lister.ControllerRevisions(parent.GetNamespace()).List(selector) + if err != nil { + return nil, err + } + var owned []*apps.ControllerRevision + for i := range history { + ref := metav1.GetControllerOf(history[i]) + if ref == nil || ref.UID == parent.GetUID() { + owned = append(owned, history[i]) + } + } + return owned, err +} + +func (fh *fakeHistory) addRevision(revision *apps.ControllerRevision) (*apps.ControllerRevision, error) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(revision) + if err != nil { + return nil, err + } + obj, found, err := fh.indexer.GetByKey(key) + if err != nil { + return nil, err + } + if found { + foundRevision := obj.(*apps.ControllerRevision) + return foundRevision, errors.NewAlreadyExists(apps.Resource("controllerrevision"), revision.Name) + } + return revision, fh.indexer.Update(revision) +} + +func (fh *fakeHistory) CreateControllerRevision(parent metav1.Object, revision *apps.ControllerRevision, collisionCount *int32) (*apps.ControllerRevision, error) { + if collisionCount == nil { + return nil, fmt.Errorf("collisionCount should not be nil") + } + clone := revision.DeepCopy() + clone.Namespace = parent.GetNamespace() + for { + hash := HashControllerRevision(revision, collisionCount) + clone.Name = ControllerRevisionName(parent.GetName(), hash) + created, err := fh.addRevision(clone) + if errors.IsAlreadyExists(err) { + *collisionCount++ + continue + } + return created, err + } +} + +func (fh *fakeHistory) DeleteControllerRevision(revision *apps.ControllerRevision) error { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(revision) + if err != nil { + return err + } + obj, found, err := fh.indexer.GetByKey(key) + if err != nil { + return err + } + if !found { + return errors.NewNotFound(apps.Resource("controllerrevisions"), revision.Name) + } + return fh.indexer.Delete(obj) +} + +func (fh *fakeHistory) UpdateControllerRevision(revision *apps.ControllerRevision, newRevision int64) (*apps.ControllerRevision, error) { + clone := revision.DeepCopy() + clone.Revision = newRevision + return clone, fh.indexer.Update(clone) +} + +func (fh *fakeHistory) AdoptControllerRevision(parent metav1.Object, parentKind schema.GroupVersionKind, revision *apps.ControllerRevision) (*apps.ControllerRevision, error) { + if owner := metav1.GetControllerOf(revision); owner != nil { + return nil, fmt.Errorf("attempt to adopt revision owned by %v", owner) + } + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(revision) + if err != nil { + return nil, err + } + _, found, err := fh.indexer.GetByKey(key) + if err != nil { + return nil, err + } + if !found { + return nil, errors.NewNotFound(apps.Resource("controllerrevisions"), revision.Name) + } + clone := revision.DeepCopy() + clone.OwnerReferences = append(clone.OwnerReferences, *metav1.NewControllerRef(parent, parentKind)) + return clone, fh.indexer.Update(clone) +} + +func (fh *fakeHistory) ReleaseControllerRevision(parent metav1.Object, revision *apps.ControllerRevision) (*apps.ControllerRevision, error) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(revision) + if err != nil { + return nil, err + } + _, found, err := fh.indexer.GetByKey(key) + if err != nil { + return nil, err + } + if !found { + return nil, nil + } + clone := revision.DeepCopy() + refs := clone.OwnerReferences + clone.OwnerReferences = nil + for i := range refs { + if refs[i].UID != parent.GetUID() { + clone.OwnerReferences = append(clone.OwnerReferences, refs[i]) + } + } + return clone, fh.indexer.Update(clone) +} diff --git a/pkg/controllers/workload/v1alpha1/utils/discovery/discovery.go b/pkg/controllers/workload/v1alpha1/utils/discovery/discovery.go new file mode 100644 index 00000000000..6c41a3332a3 --- /dev/null +++ b/pkg/controllers/workload/v1alpha1/utils/discovery/discovery.go @@ -0,0 +1,99 @@ +/* +Copyright 2021 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package discovery + +import ( + "fmt" + "time" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/discovery" + "k8s.io/client-go/util/retry" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + + workloadv1alpha1 "github.com/fluid-cloudnative/fluid/api/workload/v1alpha1" +) + +var ( + internalScheme = runtime.NewScheme() + + errKindNotFound = fmt.Errorf("kind not found in group version resources") + backOff = wait.Backoff{ + Steps: 4, + Duration: 500 * time.Millisecond, + Factor: 5.0, + Jitter: 0.1, + } + + // defaultDiscoveryClient is the global discovery client set during initialization. + defaultDiscoveryClient discovery.DiscoveryInterface +) + +func init() { + utilruntime.Must(workloadv1alpha1.AddToScheme(internalScheme)) +} + +// Init sets the global discovery client used by DiscoverGVK. +// Should be called once during controller setup with the cluster discovery client. +func Init(dc discovery.DiscoveryInterface) { + defaultDiscoveryClient = dc +} + +func DiscoverGVK(gvk schema.GroupVersionKind) bool { + if defaultDiscoveryClient == nil { + return true + } + + startTime := time.Now() + err := retry.OnError(backOff, func(err error) bool { return true }, func() error { + resourceList, err := defaultDiscoveryClient.ServerResourcesForGroupVersion(gvk.GroupVersion().String()) + if err != nil { + return err + } + for _, r := range resourceList.APIResources { + if r.Kind == gvk.Kind { + return nil + } + } + return errKindNotFound + }) + + if err != nil { + if err == errKindNotFound { + klog.InfoS("Not found kind in group version", "kind", gvk.Kind, "groupVersion", gvk.GroupVersion().String(), "cost", time.Since(startTime)) + return false + } + + // This might be caused by abnormal apiserver or etcd, ignore it + klog.ErrorS(err, "Failed to find resources in group version", "groupVersion", gvk.GroupVersion().String(), "cost", time.Since(startTime)) + } + + return true +} + +func DiscoverObject(obj runtime.Object) bool { + gvk, err := apiutil.GVKForObject(obj, internalScheme) + if err != nil { + klog.ErrorS(err, "Not recognized object in scheme", "object", obj) + return false + } + return DiscoverGVK(gvk) +} diff --git a/pkg/controllers/workload/v1alpha1/utils/expectations/init.go b/pkg/controllers/workload/v1alpha1/utils/expectations/init.go new file mode 100644 index 00000000000..52cd7bd52f6 --- /dev/null +++ b/pkg/controllers/workload/v1alpha1/utils/expectations/init.go @@ -0,0 +1,28 @@ +/* +Copyright 2020 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package expectations + +import ( + "flag" + "time" +) + +func init() { + flag.DurationVar(&ExpectationTimeout, "expectation-timeout", time.Minute*5, "The expectation timeout. Defaults 5min") +} + +var ExpectationTimeout time.Duration diff --git a/pkg/controllers/workload/v1alpha1/utils/expectations/resource_version_expectation.go b/pkg/controllers/workload/v1alpha1/utils/expectations/resource_version_expectation.go new file mode 100644 index 00000000000..fc11c21fc29 --- /dev/null +++ b/pkg/controllers/workload/v1alpha1/utils/expectations/resource_version_expectation.go @@ -0,0 +1,121 @@ +/* +Copyright 2020 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package expectations + +import ( + "strconv" + "sync" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +type ResourceVersionExpectation interface { + Expect(obj metav1.Object) + Observe(obj metav1.Object) + IsSatisfied(obj metav1.Object) (bool, time.Duration) + Delete(obj metav1.Object) +} + +func NewResourceVersionExpectation() ResourceVersionExpectation { + return &realResourceVersionExpectation{objectVersions: make(map[types.UID]*objectCacheVersions, 100)} +} + +type realResourceVersionExpectation struct { + sync.Mutex + objectVersions map[types.UID]*objectCacheVersions +} + +type objectCacheVersions struct { + version string + firstUnsatisfiedTimestamp time.Time +} + +func (r *realResourceVersionExpectation) Expect(obj metav1.Object) { + r.Lock() + defer r.Unlock() + + expectations := r.objectVersions[obj.GetUID()] + if expectations == nil { + r.objectVersions[obj.GetUID()] = &objectCacheVersions{} + } + if isResourceVersionNewer(r.objectVersions[obj.GetUID()].version, obj.GetResourceVersion()) { + r.objectVersions[obj.GetUID()].version = obj.GetResourceVersion() + } +} + +func (r *realResourceVersionExpectation) Observe(obj metav1.Object) { + r.Lock() + defer r.Unlock() + + expectations := r.objectVersions[obj.GetUID()] + if expectations == nil { + return + } + if isResourceVersionNewer(r.objectVersions[obj.GetUID()].version, obj.GetResourceVersion()) { + delete(r.objectVersions, obj.GetUID()) + } +} + +func (r *realResourceVersionExpectation) IsSatisfied(obj metav1.Object) (bool, time.Duration) { + r.Lock() + defer r.Unlock() + + expectations := r.objectVersions[obj.GetUID()] + if expectations == nil { + return true, 0 + } + + if isResourceVersionNewer(r.objectVersions[obj.GetUID()].version, obj.GetResourceVersion()) { + delete(r.objectVersions, obj.GetUID()) + } + _, existing := r.objectVersions[obj.GetUID()] + if existing { + if r.objectVersions[obj.GetUID()].firstUnsatisfiedTimestamp.IsZero() { + r.objectVersions[obj.GetUID()].firstUnsatisfiedTimestamp = time.Now() + } + + return false, time.Since(r.objectVersions[obj.GetUID()].firstUnsatisfiedTimestamp) + } + + return !existing, 0 +} + +func (r *realResourceVersionExpectation) Delete(obj metav1.Object) { + r.Lock() + defer r.Unlock() + delete(r.objectVersions, obj.GetUID()) +} + +func isResourceVersionNewer(old, new string) bool { + if len(old) == 0 { + return true + } + + oldCount, err := strconv.ParseUint(old, 10, 64) + if err != nil { + return true + } + + newCount, err := strconv.ParseUint(new, 10, 64) + if err != nil { + return false + } + + return newCount >= oldCount +} diff --git a/pkg/controllers/workload/v1alpha1/utils/expectations/scale_expectations.go b/pkg/controllers/workload/v1alpha1/utils/expectations/scale_expectations.go new file mode 100644 index 00000000000..a8d7d09df2f --- /dev/null +++ b/pkg/controllers/workload/v1alpha1/utils/expectations/scale_expectations.go @@ -0,0 +1,133 @@ +package expectations + +import ( + "sync" + "time" + + "k8s.io/apimachinery/pkg/util/sets" +) + +// ScaleAction is the action of scale, like create and delete. +type ScaleAction string + +const ( + // Create action + Create ScaleAction = "create" + // Delete action + Delete ScaleAction = "delete" +) + +// ScaleExpectations is an interface that allows users to set and wait on expectations of pods scale. +type ScaleExpectations interface { + ExpectScale(controllerKey string, action ScaleAction, name string) + ObserveScale(controllerKey string, action ScaleAction, name string) + SatisfiedExpectations(controllerKey string) (bool, time.Duration, map[ScaleAction][]string) + DeleteExpectations(controllerKey string) + GetExpectations(controllerKey string) map[ScaleAction]sets.String +} + +// NewScaleExpectations returns a common ScaleExpectations. +func NewScaleExpectations() ScaleExpectations { + return &realScaleExpectations{ + controllerCache: make(map[string]*realControllerScaleExpectations), + } +} + +type realScaleExpectations struct { + sync.Mutex + // key: parent key, workload namespace/name + controllerCache map[string]*realControllerScaleExpectations +} + +type realControllerScaleExpectations struct { + // item: name for this object + objsCache map[ScaleAction]sets.String + firstUnsatisfiedTimestamp time.Time +} + +func (r *realScaleExpectations) GetExpectations(controllerKey string) map[ScaleAction]sets.String { + r.Lock() + defer r.Unlock() + + expectations := r.controllerCache[controllerKey] + if expectations == nil { + return nil + } + + res := make(map[ScaleAction]sets.String, len(expectations.objsCache)) + for k, v := range expectations.objsCache { + res[k] = sets.NewString(v.List()...) + } + + return res +} + +func (r *realScaleExpectations) ExpectScale(controllerKey string, action ScaleAction, name string) { + r.Lock() + defer r.Unlock() + + expectations := r.controllerCache[controllerKey] + if expectations == nil { + expectations = &realControllerScaleExpectations{ + objsCache: make(map[ScaleAction]sets.String), + } + r.controllerCache[controllerKey] = expectations + } + + if s := expectations.objsCache[action]; s != nil { + s.Insert(name) + } else { + expectations.objsCache[action] = sets.NewString(name) + } +} + +func (r *realScaleExpectations) ObserveScale(controllerKey string, action ScaleAction, name string) { + r.Lock() + defer r.Unlock() + + expectations := r.controllerCache[controllerKey] + if expectations == nil { + return + } + + s := expectations.objsCache[action] + if s == nil { + return + } + s.Delete(name) + + for _, s := range expectations.objsCache { + if s.Len() > 0 { + return + } + } + delete(r.controllerCache, controllerKey) +} + +func (r *realScaleExpectations) SatisfiedExpectations(controllerKey string) (bool, time.Duration, map[ScaleAction][]string) { + r.Lock() + defer r.Unlock() + + expectations := r.controllerCache[controllerKey] + if expectations == nil { + return true, 0, nil + } + + for a, s := range expectations.objsCache { + if s.Len() > 0 { + if expectations.firstUnsatisfiedTimestamp.IsZero() { + expectations.firstUnsatisfiedTimestamp = time.Now() + } + return false, time.Since(expectations.firstUnsatisfiedTimestamp), map[ScaleAction][]string{a: s.List()} + } + } + + delete(r.controllerCache, controllerKey) + return true, 0, nil +} + +func (r *realScaleExpectations) DeleteExpectations(controllerKey string) { + r.Lock() + defer r.Unlock() + delete(r.controllerCache, controllerKey) +} diff --git a/pkg/controllers/workload/v1alpha1/utils/expectations/update_expectations.go b/pkg/controllers/workload/v1alpha1/utils/expectations/update_expectations.go new file mode 100644 index 00000000000..7c49d5949f9 --- /dev/null +++ b/pkg/controllers/workload/v1alpha1/utils/expectations/update_expectations.go @@ -0,0 +1,138 @@ +/* +Copyright 2019 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package expectations + +import ( + "sync" + "time" + + "github.com/fluid-cloudnative/fluid/pkg/controllers/workload/v1alpha1/utils/revisionadapter" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" +) + +// UpdateExpectations is an interface that allows users to set and wait on expectations of pods update. +type UpdateExpectations interface { + ExpectUpdated(controllerKey, revision string, obj metav1.Object) + ObserveUpdated(controllerKey, revision string, obj metav1.Object) + DeleteObject(controllerKey string, obj metav1.Object) + SatisfiedExpectations(controllerKey, revision string) (bool, time.Duration, []string) + DeleteExpectations(controllerKey string) +} + +// NewUpdateExpectations returns a common UpdateExpectations. +func NewUpdateExpectations(revisionAdapter revisionadapter.Interface) UpdateExpectations { + return &realUpdateExpectations{ + controllerCache: make(map[string]*realControllerUpdateExpectations), + revisionAdapter: revisionAdapter, + } +} + +type realUpdateExpectations struct { + sync.Mutex + // key: parent key, workload namespace/name + controllerCache map[string]*realControllerUpdateExpectations + // the impl of interface + revisionAdapter revisionadapter.Interface +} + +type realControllerUpdateExpectations struct { + // latest revision + revision string + // item: pod name for this revision + objsUpdated sets.String + firstUnsatisfiedTimestamp time.Time +} + +func (r *realUpdateExpectations) ExpectUpdated(controllerKey, revision string, obj metav1.Object) { + r.Lock() + defer r.Unlock() + + expectations := r.controllerCache[controllerKey] + if expectations == nil || expectations.revision != revision { + expectations = &realControllerUpdateExpectations{ + revision: revision, + objsUpdated: sets.NewString(), + } + r.controllerCache[controllerKey] = expectations + } + + expectations.objsUpdated.Insert(getKey(obj)) +} + +func (r *realUpdateExpectations) ObserveUpdated(controllerKey, revision string, obj metav1.Object) { + r.Lock() + defer r.Unlock() + + expectations := r.controllerCache[controllerKey] + if expectations == nil { + return + } + + if expectations.revision == revision && expectations.objsUpdated.Has(getKey(obj)) && r.revisionAdapter.EqualToRevisionHash(controllerKey, obj, revision) { + expectations.objsUpdated.Delete(getKey(obj)) + } + + if expectations.revision != revision || expectations.objsUpdated.Len() == 0 { + delete(r.controllerCache, controllerKey) + } +} + +func (r *realUpdateExpectations) DeleteObject(controllerKey string, obj metav1.Object) { + r.Lock() + defer r.Unlock() + + expectations := r.controllerCache[controllerKey] + if expectations == nil { + return + } + + expectations.objsUpdated.Delete(getKey(obj)) +} + +func (r *realUpdateExpectations) SatisfiedExpectations(controllerKey, revision string) (bool, time.Duration, []string) { + r.Lock() + defer r.Unlock() + + oldExpectations := r.controllerCache[controllerKey] + if oldExpectations == nil { + return true, 0, nil + } else if oldExpectations.revision != revision { + oldExpectations.firstUnsatisfiedTimestamp = time.Time{} + return true, 0, nil + } + + if oldExpectations.objsUpdated.Len() > 0 { + if oldExpectations.firstUnsatisfiedTimestamp.IsZero() { + oldExpectations.firstUnsatisfiedTimestamp = time.Now() + } + return false, time.Since(oldExpectations.firstUnsatisfiedTimestamp), oldExpectations.objsUpdated.List() + } + + oldExpectations.firstUnsatisfiedTimestamp = time.Time{} + return true, 0, oldExpectations.objsUpdated.List() +} + +func (r *realUpdateExpectations) DeleteExpectations(controllerKey string) { + r.Lock() + defer r.Unlock() + delete(r.controllerCache, controllerKey) +} + +func getKey(obj metav1.Object) string { + return obj.GetNamespace() + "/" + obj.GetName() +} diff --git a/pkg/controllers/workload/v1alpha1/utils/inplaceupdate/inplace_update.go b/pkg/controllers/workload/v1alpha1/utils/inplaceupdate/inplace_update.go new file mode 100644 index 00000000000..7510f133378 --- /dev/null +++ b/pkg/controllers/workload/v1alpha1/utils/inplaceupdate/inplace_update.go @@ -0,0 +1,448 @@ +/* +Copyright 2019 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package inplaceupdate + +import ( + "encoding/json" + "fmt" + "regexp" + "strings" + "time" + + "github.com/fluid-cloudnative/fluid/pkg/controllers/workload/v1alpha1/utils/podadapter" + "github.com/fluid-cloudnative/fluid/pkg/controllers/workload/v1alpha1/utils/revisionadapter" + "github.com/fluid-cloudnative/fluid/pkg/controllers/workload/v1alpha1/utils/util" + apps "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + coreinformers "k8s.io/client-go/informers/core/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/util/retry" + "k8s.io/klog/v2" + "k8s.io/utils/clock" + "sigs.k8s.io/controller-runtime/pkg/client" + + workloadv1alpha1 "github.com/fluid-cloudnative/fluid/api/workload/v1alpha1" +) + +var ( + containerImagePatchRexp = regexp.MustCompile("^/spec/containers/([0-9]+)/image$") + containerResourcesPatchRexp = regexp.MustCompile("^/spec/containers/([0-9]+)/resources/.*$") + rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~") + + Clock clock.Clock = clock.RealClock{} +) + +type RefreshResult struct { + RefreshErr error + DelayDuration time.Duration +} + +type UpdateResult struct { + InPlaceUpdate bool + UpdateErr error + DelayDuration time.Duration + NewResourceVersion string +} + +type UpdateOptions struct { + IgnoreVolumeClaimTemplatesHashDiff bool + + GracePeriodSeconds int32 + AdditionalFuncs []func(*v1.Pod) + + CalculateSpec func(oldRevision, newRevision *apps.ControllerRevision, opts *UpdateOptions) *UpdateSpec + PatchSpecToPod func(pod *v1.Pod, spec *UpdateSpec, state *workloadv1alpha1.InPlaceUpdateState) (*v1.Pod, map[string]*v1.ResourceRequirements, error) + CheckPodUpdateCompleted func(pod *v1.Pod) error + CheckContainersUpdateCompleted func(pod *v1.Pod, state *workloadv1alpha1.InPlaceUpdateState) error + CheckPodNeedsBeUnready func(pod *v1.Pod, spec *UpdateSpec) bool + GetRevision func(rev *apps.ControllerRevision) string +} + +// Interface for managing pods in-place update. +type Interface interface { + CanUpdateInPlace(oldRevision, newRevision *apps.ControllerRevision, opts *UpdateOptions) bool + Update(pod *v1.Pod, oldRevision, newRevision *apps.ControllerRevision, opts *UpdateOptions) UpdateResult + Refresh(pod *v1.Pod, opts *UpdateOptions) RefreshResult +} + +// UpdateSpec records the images of containers which need to in-place update. +type UpdateSpec struct { + Revision string `json:"revision"` + + ContainerImages map[string]string `json:"containerImages,omitempty"` + ContainerRefMetadata map[string]metav1.ObjectMeta `json:"containerRefMetadata,omitempty"` + ContainerResources map[string]v1.ResourceRequirements `json:"containerResources,omitempty"` + MetaDataPatch []byte `json:"metaDataPatch,omitempty"` + UpdateEnvFromMetadata bool `json:"updateEnvFromMetadata,omitempty"` + GraceSeconds int32 `json:"graceSeconds,omitempty"` + + OldTemplate *v1.PodTemplateSpec `json:"oldTemplate,omitempty"` + NewTemplate *v1.PodTemplateSpec `json:"newTemplate,omitempty"` +} + +func (u *UpdateSpec) VerticalUpdateOnly() bool { + return len(u.ContainerResources) > 0 && len(u.ContainerImages) == 0 && !u.UpdateEnvFromMetadata +} + +type realControl struct { + podAdapter podadapter.Adapter + revisionAdapter revisionadapter.Interface +} + +func New(c client.Client, revisionAdapter revisionadapter.Interface) Interface { + return &realControl{podAdapter: &podadapter.AdapterRuntimeClient{Client: c}, revisionAdapter: revisionAdapter} +} + +func NewForTypedClient(c clientset.Interface, revisionAdapter revisionadapter.Interface) Interface { + return &realControl{podAdapter: &podadapter.AdapterTypedClient{Client: c}, revisionAdapter: revisionAdapter} +} + +func NewForInformer(informer coreinformers.PodInformer, revisionAdapter revisionadapter.Interface) Interface { + return &realControl{podAdapter: &podadapter.AdapterInformer{PodInformer: informer}, revisionAdapter: revisionAdapter} +} + +func (c *realControl) Refresh(pod *v1.Pod, opts *UpdateOptions) RefreshResult { + opts = SetOptionsDefaults(opts) + + // check if it is in grace period + if gracePeriod, _ := workloadv1alpha1.GetInPlaceUpdateGrace(pod); gracePeriod != "" { + delayDuration, err := c.finishGracePeriod(pod, opts) + if err != nil { + return RefreshResult{RefreshErr: err} + } + return RefreshResult{DelayDuration: delayDuration} + } + + if stateStr, ok := workloadv1alpha1.GetInPlaceUpdateState(pod); ok { + state := workloadv1alpha1.InPlaceUpdateState{} + if err := json.Unmarshal([]byte(stateStr), &state); err != nil { + return RefreshResult{RefreshErr: err} + } + + // check in-place updating has not completed yet + if checkErr := opts.CheckContainersUpdateCompleted(pod, &state); checkErr != nil { + klog.V(6).ErrorS(checkErr, "Check Pod in-place update not completed yet", "namespace", pod.Namespace, "name", pod.Name) + return RefreshResult{} + } + + // check if there are containers with lower-priority that have to in-place update in next batch + if len(state.NextContainerImages) > 0 || len(state.NextContainerRefMetadata) > 0 || len(state.NextContainerResources) > 0 { + + // pre-check the previous updated containers + if checkErr := doPreCheckBeforeNext(pod, state.PreCheckBeforeNext); checkErr != nil { + klog.V(5).ErrorS(checkErr, "Pod in-place update pre-check not passed", "namespace", pod.Namespace, "name", pod.Name) + return RefreshResult{} + } + + // do update the next containers + if updated, err := c.updateNextBatch(pod, opts); err != nil { + return RefreshResult{RefreshErr: err} + } else if updated { + return RefreshResult{} + } + } + } + + if !containsReadinessGate(pod) { + return RefreshResult{} + } + + newCondition := v1.PodCondition{ + Type: workloadv1alpha1.InPlaceUpdateReady, + Status: v1.ConditionTrue, + LastTransitionTime: metav1.NewTime(Clock.Now()), + } + if hasEqualCondition(pod, &newCondition) { + return RefreshResult{} + } + + // Do not retry on conflict; only update the condition for the checked Pod version. + // see https://github.com/openkruise/kruise/pull/2274 + err := c.updateCondition(pod, newCondition) + return RefreshResult{RefreshErr: err} +} + +// updateCondition update the given Pod's condition by updating status on a copy with the same ResourceVersion; +// no conflict retry or refetch is performed, so it only applies to this specific Pod version. +func (c *realControl) updateCondition(pod *v1.Pod, condition v1.PodCondition) error { + clone := pod.DeepCopy() + util.SetPodCondition(clone, condition) + // We only update the ready condition to False, and let Kubelet update it to True + if condition.Status == v1.ConditionFalse { + util.SetPodReadyCondition(clone) + } + return c.podAdapter.UpdatePodStatus(clone) +} + +func (c *realControl) finishGracePeriod(pod *v1.Pod, opts *UpdateOptions) (time.Duration, error) { + var delayDuration time.Duration + err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + clone, err := c.podAdapter.GetPod(pod.Namespace, pod.Name) + if err != nil { + return err + } + + spec := UpdateSpec{} + updateSpecJSON, ok := workloadv1alpha1.GetInPlaceUpdateGrace(clone) + if !ok { + return nil + } + if err := json.Unmarshal([]byte(updateSpecJSON), &spec); err != nil { + return err + } + graceDuration := time.Second * time.Duration(spec.GraceSeconds) + + updateState := workloadv1alpha1.InPlaceUpdateState{} + updateStateJSON, ok := workloadv1alpha1.GetInPlaceUpdateState(clone) + if !ok { + return fmt.Errorf("pod has %s but %s not found", workloadv1alpha1.InPlaceUpdateGraceKey, workloadv1alpha1.InPlaceUpdateStateKey) + } + if err := json.Unmarshal([]byte(updateStateJSON), &updateState); err != nil { + return nil + } + + if !c.revisionAdapter.EqualToRevisionHash("", clone, spec.Revision) { + // If revision-hash has changed, just drop this GracePeriodSpec and go through the normal update process again. + workloadv1alpha1.RemoveInPlaceUpdateGrace(clone) + } else { + if span := time.Since(updateState.UpdateTimestamp.Time); span < graceDuration { + delayDuration = roundupSeconds(graceDuration - span) + return nil + } + + clone, _, err = opts.PatchSpecToPod(clone, &spec, &updateState) + if err != nil { + return err + } + workloadv1alpha1.RemoveInPlaceUpdateGrace(clone) + } + + _, err = c.podAdapter.UpdatePod(clone) + return err + }) + + return delayDuration, err +} + +func (c *realControl) updateNextBatch(pod *v1.Pod, opts *UpdateOptions) (bool, error) { + var updated bool + err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + updated = false + clone, err := c.podAdapter.GetPod(pod.Namespace, pod.Name) + if err != nil { + return err + } + + state := workloadv1alpha1.InPlaceUpdateState{} + if stateStr, ok := workloadv1alpha1.GetInPlaceUpdateState(pod); !ok { + return nil + } else if err := json.Unmarshal([]byte(stateStr), &state); err != nil { + return err + } + + if len(state.NextContainerImages) == 0 && len(state.NextContainerRefMetadata) == 0 { + return nil + } + + spec := UpdateSpec{ + ContainerImages: state.NextContainerImages, + ContainerRefMetadata: state.NextContainerRefMetadata, + UpdateEnvFromMetadata: state.UpdateEnvFromMetadata, + ContainerResources: state.NextContainerResources, + } + clone, _, err = opts.PatchSpecToPod(clone, &spec, &state) + if err != nil { + return err + } + + updated = true + _, err = c.podAdapter.UpdatePod(clone) + return err + }) + return updated, err +} + +func (c *realControl) CanUpdateInPlace(oldRevision, newRevision *apps.ControllerRevision, opts *UpdateOptions) bool { + opts = SetOptionsDefaults(opts) + return opts.CalculateSpec(oldRevision, newRevision, opts) != nil +} + +func (c *realControl) Update(pod *v1.Pod, oldRevision, newRevision *apps.ControllerRevision, opts *UpdateOptions) UpdateResult { + opts = SetOptionsDefaults(opts) + + // 1. calculate inplace update spec + spec := opts.CalculateSpec(oldRevision, newRevision, opts) + if spec == nil { + return UpdateResult{} + } + + // TODO(FillZpp): maybe we should check if the previous in-place update has completed + + // 2. update condition for pod with readiness-gate + // When only workload resources are updated, they are marked as not needing to remove traffic + if opts.CheckPodNeedsBeUnready(pod, spec) { + newCondition := v1.PodCondition{ + Type: workloadv1alpha1.InPlaceUpdateReady, + LastTransitionTime: metav1.NewTime(Clock.Now()), + Status: v1.ConditionFalse, + Reason: "StartInPlaceUpdate", + } + // ensure the condition is updated with conflict retry + err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + clone, err := c.podAdapter.GetPod(pod.Namespace, pod.Name) + if err != nil { + return err + } + // The Pod from informer cache may be not synced as the latest, so we + // can't use hasEqualCondition() to compare with the newCondition here. + // More details: https://github.com/openkruise/kruise/issues/2276 + return c.updateCondition(clone, newCondition) + }) + if err != nil { + return UpdateResult{InPlaceUpdate: true, UpdateErr: err} + } + } + + // 3. update container images + newResourceVersion, err := c.updatePodInPlace(pod, spec, opts) + if err != nil { + return UpdateResult{InPlaceUpdate: true, UpdateErr: err} + } + + var delayDuration time.Duration + if opts.GracePeriodSeconds > 0 { + delayDuration = time.Second * time.Duration(opts.GracePeriodSeconds) + } + return UpdateResult{InPlaceUpdate: true, DelayDuration: delayDuration, NewResourceVersion: newResourceVersion} +} + +func (c *realControl) updatePodInPlace(pod *v1.Pod, spec *UpdateSpec, opts *UpdateOptions) (string, error) { + var newResourceVersion string + retryErr := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + clone, err := c.podAdapter.GetPod(pod.Namespace, pod.Name) + if err != nil { + return err + } + + // update new revision + c.revisionAdapter.WriteRevisionHash(clone, spec.Revision) + if clone.Annotations == nil { + clone.Annotations = map[string]string{} + } + for _, f := range opts.AdditionalFuncs { + f(clone) + } + + inPlaceUpdateState := workloadv1alpha1.InPlaceUpdateState{ + Revision: spec.Revision, + UpdateTimestamp: metav1.NewTime(Clock.Now()), + UpdateEnvFromMetadata: spec.UpdateEnvFromMetadata, + UpdateImages: len(spec.ContainerImages) > 0, + UpdateResources: len(spec.ContainerResources) > 0, + } + inPlaceUpdateStateJSON, _ := json.Marshal(inPlaceUpdateState) + clone.Annotations[workloadv1alpha1.InPlaceUpdateStateKey] = string(inPlaceUpdateStateJSON) + delete(clone.Annotations, workloadv1alpha1.InPlaceUpdateStateKeyOld) + + if spec.GraceSeconds <= 0 { + clone, _, err = opts.PatchSpecToPod(clone, spec, &inPlaceUpdateState) + if err != nil { + return err + } + workloadv1alpha1.RemoveInPlaceUpdateGrace(clone) + } else { + inPlaceUpdateSpecJSON, _ := json.Marshal(spec) + clone.Annotations[workloadv1alpha1.InPlaceUpdateGraceKey] = string(inPlaceUpdateSpecJSON) + } + + // TODO: if only resources need to be updated, we maybe should not update the pod spec. because patch the resize-subresource is enough. + newPod, updateErr := c.podAdapter.UpdatePod(clone) + if updateErr == nil { + newResourceVersion = newPod.ResourceVersion + } + return updateErr + }) + return newResourceVersion, retryErr +} + +// GetTemplateFromRevision returns the pod template parsed from ControllerRevision. +func GetTemplateFromRevision(revision *apps.ControllerRevision) (*v1.PodTemplateSpec, error) { + var patchObj *struct { + Spec struct { + Template v1.PodTemplateSpec `json:"template"` + } `json:"spec"` + } + if err := json.Unmarshal(revision.Data.Raw, &patchObj); err != nil { + return nil, err + } + return &patchObj.Spec.Template, nil +} + +// InjectReadinessGate injects InPlaceUpdateReady into pod.spec.readinessGates +func InjectReadinessGate(pod *v1.Pod) { + for _, r := range pod.Spec.ReadinessGates { + if r.ConditionType == workloadv1alpha1.InPlaceUpdateReady { + return + } + } + pod.Spec.ReadinessGates = append(pod.Spec.ReadinessGates, v1.PodReadinessGate{ConditionType: workloadv1alpha1.InPlaceUpdateReady}) +} + +func containsReadinessGate(pod *v1.Pod) bool { + for _, r := range pod.Spec.ReadinessGates { + if r.ConditionType == workloadv1alpha1.InPlaceUpdateReady { + return true + } + } + return false +} + +// GetCondition returns the InPlaceUpdateReady condition in Pod. +func GetCondition(pod *v1.Pod) *v1.PodCondition { + return util.GetCondition(pod, workloadv1alpha1.InPlaceUpdateReady) +} + +func roundupSeconds(d time.Duration) time.Duration { + if d%time.Second == 0 { + return d + } + return (d/time.Second + 1) * time.Second +} + +func doPreCheckBeforeNext(pod *v1.Pod, preCheck *workloadv1alpha1.InPlaceUpdatePreCheckBeforeNext) error { + if preCheck == nil { + return nil + } + for _, cName := range preCheck.ContainersRequiredReady { + cStatus := util.GetContainerStatus(cName, pod) + if cStatus == nil { + return fmt.Errorf("not found container %s in pod status", cName) + } + if !cStatus.Ready { + return fmt.Errorf("waiting container %s to be ready", cName) + } + } + return nil +} + +func hasEqualCondition(pod *v1.Pod, newCondition *v1.PodCondition) bool { + oldCondition := util.GetCondition(pod, newCondition.Type) + isEqual := oldCondition != nil && oldCondition.Status == newCondition.Status && + oldCondition.Reason == newCondition.Reason && oldCondition.Message == newCondition.Message + return isEqual +} diff --git a/pkg/controllers/workload/v1alpha1/utils/inplaceupdate/inplace_update_defaults.go b/pkg/controllers/workload/v1alpha1/utils/inplaceupdate/inplace_update_defaults.go new file mode 100644 index 00000000000..97b3de6796d --- /dev/null +++ b/pkg/controllers/workload/v1alpha1/utils/inplaceupdate/inplace_update_defaults.go @@ -0,0 +1,485 @@ +/* +Copyright 2021 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package inplaceupdate + +import ( + "encoding/json" + "fmt" + "hash/fnv" + "strconv" + "strings" + + utilcontainermeta "github.com/fluid-cloudnative/fluid/pkg/controllers/workload/v1alpha1/utils/containermeta" + "github.com/fluid-cloudnative/fluid/pkg/controllers/workload/v1alpha1/utils/util" + "gomodules.xyz/jsonpatch/v2" + apps "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/klog/v2" + hashutil "k8s.io/kubernetes/pkg/util/hash" + + workloadv1alpha1 "github.com/fluid-cloudnative/fluid/api/workload/v1alpha1" +) + +func SetOptionsDefaults(opts *UpdateOptions) *UpdateOptions { + if opts == nil { + opts = &UpdateOptions{} + } + + if opts.CalculateSpec == nil { + opts.CalculateSpec = defaultCalculateInPlaceUpdateSpec + } + + if opts.PatchSpecToPod == nil { + opts.PatchSpecToPod = defaultPatchUpdateSpecToPod + } + + if opts.CheckPodUpdateCompleted == nil { + opts.CheckPodUpdateCompleted = DefaultCheckInPlaceUpdateCompleted + } + + if opts.CheckContainersUpdateCompleted == nil { + opts.CheckContainersUpdateCompleted = defaultCheckContainersInPlaceUpdateCompleted + } + + if opts.CheckPodNeedsBeUnready == nil { + opts.CheckPodNeedsBeUnready = defaultCheckPodNeedsBeUnready + } + + return opts +} + +// defaultPatchUpdateSpecToPod returns new pod that merges spec into old pod +func defaultPatchUpdateSpecToPod(pod *v1.Pod, spec *UpdateSpec, state *workloadv1alpha1.InPlaceUpdateState) (*v1.Pod, map[string]*v1.ResourceRequirements, error) { + klog.V(5).InfoS("Begin to in-place update pod", "namespace", pod.Namespace, "name", pod.Name, "spec", util.DumpJSON(spec), "state", util.DumpJSON(state)) + + state.NextContainerImages = make(map[string]string) + state.NextContainerRefMetadata = make(map[string]metav1.ObjectMeta) + state.NextContainerResources = make(map[string]v1.ResourceRequirements) + + if spec.MetaDataPatch != nil { + cloneBytes, _ := json.Marshal(pod) + modified, err := strategicpatch.StrategicMergePatch(cloneBytes, spec.MetaDataPatch, &v1.Pod{}) + if err != nil { + return nil, nil, err + } + pod = &v1.Pod{} + if err = json.Unmarshal(modified, pod); err != nil { + return nil, nil, err + } + } + + if pod.Labels == nil { + pod.Labels = make(map[string]string) + } + if pod.Annotations == nil { + pod.Annotations = make(map[string]string) + } + + // prepare containers that should update this time and next time, according to their priorities + containersToUpdate := sets.NewString() + var highestPriority *int + var containersWithHighestPriority []string + for i := range pod.Spec.Containers { + c := &pod.Spec.Containers[i] + _, existImage := spec.ContainerImages[c.Name] + _, existMetadata := spec.ContainerRefMetadata[c.Name] + _, existResource := spec.ContainerResources[c.Name] + if !existImage && !existMetadata && !existResource { + continue + } + priority := getContainerPriority(c) + if priority == nil { + containersToUpdate.Insert(c.Name) + } else if highestPriority == nil || *highestPriority < *priority { + highestPriority = priority + containersWithHighestPriority = []string{c.Name} + } else if *highestPriority == *priority { + containersWithHighestPriority = append(containersWithHighestPriority, c.Name) + } + } + for _, cName := range containersWithHighestPriority { + containersToUpdate.Insert(cName) + } + addMetadataSharedContainersToUpdate(pod, containersToUpdate, spec.ContainerRefMetadata) + + // DO NOT modify the fields in spec for it may have to retry on conflict in updatePodInPlace + + // update images and record current imageIDs for the containers to update + containersImageChanged := sets.NewString() + for i := range pod.Spec.Containers { + c := &pod.Spec.Containers[i] + newImage, exists := spec.ContainerImages[c.Name] + if !exists { + continue + } + if containersToUpdate.Has(c.Name) { + pod.Spec.Containers[i].Image = newImage + containersImageChanged.Insert(c.Name) + } else { + state.NextContainerImages[c.Name] = newImage + } + } + for _, c := range pod.Status.ContainerStatuses { + if containersImageChanged.Has(c.Name) { + if state.LastContainerStatuses == nil { + state.LastContainerStatuses = map[string]workloadv1alpha1.InPlaceUpdateContainerStatus{} + } + if cs, ok := state.LastContainerStatuses[c.Name]; !ok { + state.LastContainerStatuses[c.Name] = workloadv1alpha1.InPlaceUpdateContainerStatus{ImageID: c.ImageID} + } else { + // now just update imageID + cs.ImageID = c.ImageID + } + } + } + + // update annotations and labels for the containers to update + for cName, objMeta := range spec.ContainerRefMetadata { + if containersToUpdate.Has(cName) { + for k, v := range objMeta.Labels { + pod.Labels[k] = v + } + for k, v := range objMeta.Annotations { + pod.Annotations[k] = v + } + } else { + state.NextContainerRefMetadata[cName] = objMeta + } + } + + // add the containers that update this time into PreCheckBeforeNext, so that next containers can only + // start to update when these containers have updated ready + // TODO: currently we only support ContainersRequiredReady, not sure if we have to add ContainersPreferredReady in future + if len(state.NextContainerImages) > 0 || len(state.NextContainerRefMetadata) > 0 || len(state.NextContainerResources) > 0 { + state.PreCheckBeforeNext = &workloadv1alpha1.InPlaceUpdatePreCheckBeforeNext{ContainersRequiredReady: containersToUpdate.List()} + } else { + state.PreCheckBeforeNext = nil + } + + state.ContainerBatchesRecord = append(state.ContainerBatchesRecord, workloadv1alpha1.InPlaceUpdateContainerBatch{ + Timestamp: metav1.NewTime(Clock.Now()), + Containers: containersToUpdate.List(), + }) + + klog.V(5).InfoS("Decide to in-place update pod", "namespace", pod.Namespace, "name", pod.Name, "state", util.DumpJSON(state)) + + inPlaceUpdateStateJSON, _ := json.Marshal(state) + pod.Annotations[workloadv1alpha1.InPlaceUpdateStateKey] = string(inPlaceUpdateStateJSON) + return pod, nil, nil +} + +func addMetadataSharedContainersToUpdate(pod *v1.Pod, containersToUpdate sets.String, containerRefMetadata map[string]metav1.ObjectMeta) { + labelsToUpdate := sets.NewString() + annotationsToUpdate := sets.NewString() + newToUpdate := containersToUpdate + // We need a for-loop to merge the indirect shared containers + for newToUpdate.Len() > 0 { + for _, cName := range newToUpdate.UnsortedList() { + if objMeta, exists := containerRefMetadata[cName]; exists { + for key := range objMeta.Labels { + labelsToUpdate.Insert(key) + } + for key := range objMeta.Annotations { + annotationsToUpdate.Insert(key) + } + } + } + newToUpdate = sets.NewString() + + for cName, objMeta := range containerRefMetadata { + if containersToUpdate.Has(cName) { + continue + } + for _, key := range labelsToUpdate.UnsortedList() { + if _, exists := objMeta.Labels[key]; exists { + klog.InfoS("Has to in-place update container with lower priority in Pod, for the label it shared has changed", + "containerName", cName, "namespace", pod.Namespace, "name", pod.Name, "label", key) + containersToUpdate.Insert(cName) + newToUpdate.Insert(cName) + break + } + } + for _, key := range annotationsToUpdate.UnsortedList() { + if _, exists := objMeta.Annotations[key]; exists { + klog.InfoS("Has to in-place update container with lower priority in Pod, for the annotation it shared has changed", + "containerName", cName, "namespace", pod.Namespace, "podName", pod.Name, "annotation", key) + containersToUpdate.Insert(cName) + newToUpdate.Insert(cName) + break + } + } + } + } +} + +// defaultCalculateInPlaceUpdateSpec calculates diff between old and update revisions. +// If the diff just contains replace operation of spec.containers[x].image, it will returns an UpdateSpec. +// Otherwise, it returns nil which means can not use in-place update. +func defaultCalculateInPlaceUpdateSpec(oldRevision, newRevision *apps.ControllerRevision, opts *UpdateOptions) *UpdateSpec { + if oldRevision == nil || newRevision == nil { + return nil + } + opts = SetOptionsDefaults(opts) + + patches, err := jsonpatch.CreatePatch(oldRevision.Data.Raw, newRevision.Data.Raw) + if err != nil { + return nil + } + + oldTemp, err := GetTemplateFromRevision(oldRevision) + if err != nil { + return nil + } + newTemp, err := GetTemplateFromRevision(newRevision) + if err != nil { + return nil + } + + updateSpec := &UpdateSpec{ + Revision: newRevision.Name, + ContainerImages: make(map[string]string), + ContainerResources: make(map[string]v1.ResourceRequirements), + ContainerRefMetadata: make(map[string]metav1.ObjectMeta), + GraceSeconds: opts.GracePeriodSeconds, + } + if opts.GetRevision != nil { + updateSpec.Revision = opts.GetRevision(newRevision) + } + + // all patches for podSpec can just update images in pod spec + var metadataPatches []jsonpatch.Operation + for _, op := range patches { + op.Path = strings.Replace(op.Path, "/spec/template", "", 1) + + if !strings.HasPrefix(op.Path, "/spec/") { + if strings.HasPrefix(op.Path, "/metadata/") { + metadataPatches = append(metadataPatches, op) + continue + } + return nil + } + + if op.Operation != "replace" { + return nil + } + if containerImagePatchRexp.MatchString(op.Path) { + // for example: /spec/containers/0/image + words := strings.Split(op.Path, "/") + idx, _ := strconv.Atoi(words[3]) + if len(oldTemp.Spec.Containers) <= idx { + return nil + } + updateSpec.ContainerImages[oldTemp.Spec.Containers[idx].Name] = op.Value.(string) + continue + } + + return nil + } + + if len(metadataPatches) > 0 { + oldBytes, _ := json.Marshal(v1.Pod{ObjectMeta: oldTemp.ObjectMeta}) + newBytes, _ := json.Marshal(v1.Pod{ObjectMeta: newTemp.ObjectMeta}) + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldBytes, newBytes, &v1.Pod{}) + if err != nil { + return nil + } + updateSpec.MetaDataPatch = patchBytes + } + + return updateSpec +} + +// DefaultCheckInPlaceUpdateCompleted checks whether imageID in pod status has been changed since in-place update. +// If the imageID in containerStatuses has not been changed, we assume that kubelet has not updated +// containers in Pod. +func DefaultCheckInPlaceUpdateCompleted(pod *v1.Pod) error { + if _, isInGraceState := workloadv1alpha1.GetInPlaceUpdateGrace(pod); isInGraceState { + return fmt.Errorf("still in grace period of in-place update") + } + + inPlaceUpdateState := workloadv1alpha1.InPlaceUpdateState{} + if stateStr, ok := workloadv1alpha1.GetInPlaceUpdateState(pod); !ok { + return nil + } else if err := json.Unmarshal([]byte(stateStr), &inPlaceUpdateState); err != nil { + return err + } + if len(inPlaceUpdateState.NextContainerImages) > 0 || len(inPlaceUpdateState.NextContainerRefMetadata) > 0 || len(inPlaceUpdateState.NextContainerResources) > 0 { + return fmt.Errorf("existing containers to in-place update in next batches") + } + return defaultCheckContainersInPlaceUpdateCompleted(pod, &inPlaceUpdateState) +} + +func defaultCheckContainersInPlaceUpdateCompleted(pod *v1.Pod, inPlaceUpdateState *workloadv1alpha1.InPlaceUpdateState) error { + runtimeContainerMetaSet, err := workloadv1alpha1.GetRuntimeContainerMetaSet(pod) + if err != nil { + return err + } + + if inPlaceUpdateState.UpdateEnvFromMetadata { + if runtimeContainerMetaSet == nil { + return fmt.Errorf("waiting for all containers hash consistent, but runtime-container-meta not found") + } + if !checkAllContainersHashConsistent(pod, runtimeContainerMetaSet, extractedEnvFromMetadataHash) { + return fmt.Errorf("waiting for all containers hash consistent") + } + } + + // only UpdateResources, we check resources in status updated + + if runtimeContainerMetaSet != nil { + metaHashType := plainHash + if checkAllContainersHashConsistent(pod, runtimeContainerMetaSet, metaHashType) { + klog.V(5).InfoS("Check Pod in-place update completed for all container hash consistent", "namespace", pod.Namespace, "name", pod.Name) + return nil + } + // If it needs not to update envs from metadata, we don't have to return error here, + // in case kruise-daemon has broken for some reason and runtime-container-meta is still in an old version. + } + + containerImages := make(map[string]string, len(pod.Spec.Containers)) + for i := range pod.Spec.Containers { + c := &pod.Spec.Containers[i] + containerImages[c.Name] = c.Image + if len(strings.Split(c.Image, ":")) <= 1 { + containerImages[c.Name] = fmt.Sprintf("%s:latest", c.Image) + } + } + + for _, cs := range pod.Status.ContainerStatuses { + if oldStatus, ok := inPlaceUpdateState.LastContainerStatuses[cs.Name]; ok { + // TODO: we assume that users should not update workload template with new image which actually has the same imageID as the old image + if oldStatus.ImageID == cs.ImageID { + if containerImages[cs.Name] != cs.Image { + return fmt.Errorf("container %s imageID not changed", cs.Name) + } + } + delete(inPlaceUpdateState.LastContainerStatuses, cs.Name) + } + } + + if len(inPlaceUpdateState.LastContainerStatuses) > 0 { + return fmt.Errorf("not found statuses of containers %v", inPlaceUpdateState.LastContainerStatuses) + } + + return nil +} + +type hashType string + +const ( + plainHash hashType = "PlainHash" + extractedEnvFromMetadataHash hashType = "ExtractedEnvFromMetadataHash" +) + +// The requirements for hash consistent: +// 1. all containers in spec.containers should also be in status.containerStatuses and runtime-container-meta +// 2. all containers in status.containerStatuses and runtime-container-meta should have the same containerID +// 3. all containers in spec.containers and runtime-container-meta should have the same hashes +func checkAllContainersHashConsistent(pod *v1.Pod, runtimeContainerMetaSet *workloadv1alpha1.RuntimeContainerMetaSet, hashType hashType) bool { + for i := range pod.Spec.Containers { + containerSpec := &pod.Spec.Containers[i] + + var containerStatus *v1.ContainerStatus + for j := range pod.Status.ContainerStatuses { + if pod.Status.ContainerStatuses[j].Name == containerSpec.Name { + containerStatus = &pod.Status.ContainerStatuses[j] + break + } + } + if containerStatus == nil { + klog.InfoS("Find no container in status for Pod", "containerName", containerSpec.Name, "namespace", pod.Namespace, "podName", pod.Name) + return false + } + + var containerMeta *workloadv1alpha1.RuntimeContainerMeta + for i := range runtimeContainerMetaSet.Containers { + if runtimeContainerMetaSet.Containers[i].Name == containerSpec.Name { + containerMeta = &runtimeContainerMetaSet.Containers[i] + continue + } + } + if containerMeta == nil { + klog.InfoS("Find no container in runtime-container-meta for Pod", "containerName", containerSpec.Name, "namespace", pod.Namespace, "podName", pod.Name) + return false + } + + if containerMeta.ContainerID != containerStatus.ContainerID { + klog.InfoS("Find container in runtime-container-meta for Pod has different containerID with status", + "containerName", containerSpec.Name, "namespace", pod.Namespace, "podName", pod.Name, + "metaID", containerMeta.ContainerID, "statusID", containerStatus.ContainerID) + return false + } + + switch hashType { + case plainHash: + isConsistentInNewVersion := hashContainer(containerSpec) == containerMeta.Hashes.PlainHash + isConsistentInOldVersion := hashContainer(containerSpec) == containerMeta.Hashes.PlainHash + if !isConsistentInNewVersion && !isConsistentInOldVersion { + klog.InfoS("Find container in runtime-container-meta for Pod has different plain hash with spec", + "containerName", containerSpec.Name, "namespace", pod.Namespace, "podName", pod.Name, + "metaHash", containerMeta.Hashes.PlainHash, "expectedHashInNewVersion", hashContainer(containerSpec), "expectedHashInOldVersion", hashContainer(containerSpec)) + return false + } + case extractedEnvFromMetadataHash: + hasher := utilcontainermeta.NewEnvFromMetadataHasher() + if expectedHash := hasher.GetExpectHash(containerSpec, pod); containerMeta.Hashes.ExtractedEnvFromMetadataHash != expectedHash { + klog.InfoS("Find container in runtime-container-meta for Pod has different extractedEnvFromMetadataHash with spec", + "containerName", containerSpec.Name, "namespace", pod.Namespace, "podName", pod.Name, + "metaHash", containerMeta.Hashes.ExtractedEnvFromMetadataHash, "expectedHash", expectedHash) + return false + } + } + } + + return true +} + +// hashContainer copy from kubelet v1.31- +// in 1.31+, kubeletcontainer.HashContainer will only pick some fields to hash +// in order to be compatible with 1.31 and earlier, here is the implementation of kubeletcontainer.HashContainer(1.31-) copied. +func hashContainer(container *v1.Container) uint64 { + hash := fnv.New32a() + // Omit nil or empty field when calculating hash value + // Please see https://github.com/kubernetes/kubernetes/issues/53644 + containerJSON, _ := json.Marshal(container) + hashutil.DeepHashObject(hash, containerJSON) + return uint64(hash.Sum32()) +} + +const ( + cpuMask = 1 + memMask = 2 +) + +func defaultCheckPodNeedsBeUnready(pod *v1.Pod, spec *UpdateSpec) bool { + return containsReadinessGate(pod) +} + +// getContainerPriority returns the container launch priority if set via ContainerLaunchBarrierEnvName env. +func getContainerPriority(c *v1.Container) *int { + const priorityStartIndex = 2 + for _, e := range c.Env { + if e.Name == workloadv1alpha1.ContainerLaunchBarrierEnvName { + p, _ := strconv.Atoi(e.ValueFrom.ConfigMapKeyRef.Key[priorityStartIndex:]) + return &p + } + } + return nil +} diff --git a/pkg/controllers/workload/v1alpha1/utils/inplaceupdate/inplace_update_vertical.go b/pkg/controllers/workload/v1alpha1/utils/inplaceupdate/inplace_update_vertical.go new file mode 100644 index 00000000000..abc73dca4f3 --- /dev/null +++ b/pkg/controllers/workload/v1alpha1/utils/inplaceupdate/inplace_update_vertical.go @@ -0,0 +1,329 @@ +/* +Copyright 2023 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package inplaceupdate + +import ( + "encoding/json" + "errors" + "fmt" + "strconv" + "strings" + + "github.com/google/go-cmp/cmp" + "gomodules.xyz/jsonpatch/v2" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" +) + +// interface for In-place workload vertical scaling +type VerticalUpdateInterface interface { + // UpdateInplaceUpdateMetadata validates and applies the resource patch to the UpdateSpec. + UpdateInplaceUpdateMetadata(op *jsonpatch.Operation, oldTemp *v1.PodTemplateSpec, updateSpec *UpdateSpec) error + + // IsPodQoSChanged check whether the pod qos has changed + IsPodQoSChanged(oldTemp, newTemp *v1.PodTemplateSpec) bool + + // UpdateResource some or all containers of a pod can be updated at once within this interface. + // container or pod level resources can be updated by this interface + UpdateResource(pod *v1.Pod, expectedResources map[string]*v1.ResourceRequirements) + // IsUpdateCompleted To determine whether the pod has been successfully vertical updated + IsUpdateCompleted(pod *v1.Pod) (bool, error) + // GenerateResourcePatch generate the resource patch for the pod + GenerateResourcePatch(pod *v1.Pod, expectedResources map[string]*v1.ResourceRequirements) []byte +} + +var verticalUpdateImpl VerticalUpdateInterface = nil + +// To register vertical update operations, +// you can register different vertical update implementations here +func init() { + // Now, we assume that there is a single standard per cluster, so register in init() + // TODO(Abner-1): Perhaps we should dynamically select the verticalUpdateImpl based on the pod metadata being processed. + // give us more suggestions if you need + if verticalUpdateImpl == nil { + verticalUpdateImpl = &NativeVerticalUpdate{} + } +} + +func GetNativeVerticalUpdateImpl() *NativeVerticalUpdate { + return verticalUpdateImpl.(*NativeVerticalUpdate) +} + +// NativeVerticalUpdate represents the vertical scaling of k8s standard +type NativeVerticalUpdate struct{} + +var _ VerticalUpdateInterface = &NativeVerticalUpdate{} + +func (v *NativeVerticalUpdate) UpdateInplaceUpdateMetadata(op *jsonpatch.Operation, oldTemp *v1.PodTemplateSpec, updateSpec *UpdateSpec) error { + // for example: /spec/containers/0/resources/limits/cpu + words := strings.Split(op.Path, "/") + if len(words) != 7 { + return fmt.Errorf("invalid resource path: %s", op.Path) + } + idx, err := strconv.Atoi(words[3]) + if err != nil || len(oldTemp.Spec.Containers) <= idx { + return fmt.Errorf("invalid container index: %s", op.Path) + } + if op.Operation == "remove" || op.Operation == "add" { + // Before k8s 1.32, we can not resize resources for a container with no limit or request + // TODO(Abner-1) change it if 1.32 released and allowing this operation + return errors.New("can not add or remove resources") + } + + if op.Value == nil { + return errors.New("json patch value is nil") + } + quantity, err := resource.ParseQuantity(op.Value.(string)) + if err != nil { + return fmt.Errorf("parse quantity error: %v", err) + } + + if !v.CanResourcesResizeInPlace(words[6]) { + return fmt.Errorf("disallowed inplace update resource: %s", words[6]) + } + + cName := oldTemp.Spec.Containers[idx].Name + if _, ok := updateSpec.ContainerResources[cName]; !ok { + updateSpec.ContainerResources[cName] = v1.ResourceRequirements{ + Limits: make(v1.ResourceList), + Requests: make(v1.ResourceList), + } + } + switch words[5] { + case "limits": + updateSpec.ContainerResources[cName].Limits[v1.ResourceName(words[6])] = quantity + case "requests": + updateSpec.ContainerResources[cName].Requests[v1.ResourceName(words[6])] = quantity + } + return nil +} + +func (v *NativeVerticalUpdate) IsPodQoSChanged(oldTemp, newTemp *v1.PodTemplateSpec) bool { + oldPod := &v1.Pod{ + Spec: oldTemp.Spec, + } + newPod := &v1.Pod{ + Spec: newTemp.Spec, + } + if getPodQOS(oldPod) != getPodQOS(newPod) { + return true + } + return false +} + +// getPodQOS returns the QoS class of a pod, simplified version +func getPodQOS(pod *v1.Pod) v1.PodQOSClass { + if pod.Status.QOSClass != "" { + return pod.Status.QOSClass + } + return computePodQOS(pod) +} + +// computePodQOS computes the QoS class of a pod based on its resource requests/limits +func computePodQOS(pod *v1.Pod) v1.PodQOSClass { + requests := v1.ResourceList{} + limits := v1.ResourceList{} + zeroQuantity := resource.MustParse("0") + isGuaranteed := true + allContainers := []v1.Container{} + allContainers = append(allContainers, pod.Spec.Containers...) + allContainers = append(allContainers, pod.Spec.InitContainers...) + for _, container := range allContainers { + // process requests + for name, quantity := range container.Resources.Requests { + if name == v1.ResourceCPU || name == v1.ResourceMemory { + if quantity.Cmp(zeroQuantity) == 1 { + delta := quantity.DeepCopy() + if _, exists := requests[name]; !exists { + requests[name] = delta + } else { + existing := requests[name] + existing.Add(delta) + requests[name] = existing + } + } + } + } + // process limits + for name, quantity := range container.Resources.Limits { + if name == v1.ResourceCPU || name == v1.ResourceMemory { + if quantity.Cmp(zeroQuantity) == 1 { + delta := quantity.DeepCopy() + if _, exists := limits[name]; !exists { + limits[name] = delta + } else { + existing := limits[name] + existing.Add(delta) + limits[name] = existing + } + } + } + } + if len(container.Resources.Limits) == 0 { + isGuaranteed = false + continue + } + for name, quantity := range container.Resources.Requests { + if name == v1.ResourceCPU || name == v1.ResourceMemory { + limitQuantity, exists := container.Resources.Limits[name] + if !exists || limitQuantity.Cmp(quantity) != 0 { + isGuaranteed = false + } + } + } + for name := range container.Resources.Limits { + if name == v1.ResourceCPU || name == v1.ResourceMemory { + if _, exists := container.Resources.Requests[name]; !exists { + isGuaranteed = false + } + } + } + } + if len(requests) == 0 && len(limits) == 0 { + return v1.PodQOSBestEffort + } + if isGuaranteed { + return v1.PodQOSGuaranteed + } + return v1.PodQOSBurstable +} + +// updateContainerResource implements vertical updates by directly modifying the container's resources, +// conforming to the k8s community standard +func (v *NativeVerticalUpdate) updateContainerResource(container *v1.Container, newResource *v1.ResourceRequirements) { + if container == nil || newResource == nil { + return + } + for key, quantity := range newResource.Limits { + if !v.CanResourcesResizeInPlace(string(key)) { + continue + } + container.Resources.Limits[key] = quantity + } + for key, quantity := range newResource.Requests { + if !v.CanResourcesResizeInPlace(string(key)) { + continue + } + container.Resources.Requests[key] = quantity + } +} + +// isContainerUpdateCompleted directly determines whether the current container is vertically updated by the spec and status of the container, +// which conforms to the k8s community standard +func (v *NativeVerticalUpdate) isContainerUpdateCompleted(container *v1.Container, containerStatus *v1.ContainerStatus) bool { + if containerStatus == nil || containerStatus.Resources == nil || container == nil { + return false + } + if !cmp.Equal(container.Resources.Limits, containerStatus.Resources.Limits) || + !cmp.Equal(container.Resources.Requests, containerStatus.Resources.Requests) { + return false + } + return true +} + +func (v *NativeVerticalUpdate) UpdateResource(pod *v1.Pod, expectedResources map[string]*v1.ResourceRequirements) { + if len(expectedResources) == 0 { + // pod level hook, ignore in native implementation + return + } + for i := range pod.Spec.Containers { + c := &pod.Spec.Containers[i] + newResource, resourceExists := expectedResources[c.Name] + if !resourceExists { + continue + } + v.updateContainerResource(c, newResource) + } + return +} + +func (v *NativeVerticalUpdate) GenerateResourcePatch(pod *v1.Pod, expectedResources map[string]*v1.ResourceRequirements) []byte { + var patchContainers []string + for i := range pod.Spec.Containers { + c := &pod.Spec.Containers[i] + newResource, resourceExists := expectedResources[c.Name] + if !resourceExists { + continue + } + resources := v1.ResourceRequirements{} + for key, quantity := range newResource.Limits { + if !v.CanResourcesResizeInPlace(string(key)) { + continue + } + if resources.Limits == nil { + resources.Limits = make(v1.ResourceList) + } + if pod.Spec.Containers[i].Resources.Limits != nil && pod.Spec.Containers[i].Resources.Limits[key].Equal(quantity) { + continue + } + resources.Limits[key] = quantity + } + for key, quantity := range newResource.Requests { + if !v.CanResourcesResizeInPlace(string(key)) { + continue + } + if resources.Requests == nil { + resources.Requests = make(v1.ResourceList) + } + if pod.Spec.Containers[i].Resources.Requests != nil && pod.Spec.Containers[i].Resources.Requests[key].Equal(quantity) { + continue + } + resources.Requests[key] = quantity + } + if len(resources.Limits) > 0 || len(resources.Requests) > 0 { + resourcePatch, _ := json.Marshal(resources) + patchContainers = append(patchContainers, fmt.Sprintf(`{"name":"%s","resources":%s}`, c.Name, string(resourcePatch))) + } + } + if len(patchContainers) == 0 { + return nil + } + return []byte(fmt.Sprintf(`{"spec":{"containers":[%s]}}`, strings.Join(patchContainers, ","))) +} + +func (v *NativeVerticalUpdate) IsUpdateCompleted(pod *v1.Pod) (bool, error) { + containers := make(map[string]*v1.Container, len(pod.Spec.Containers)) + for i := range pod.Spec.Containers { + c := &pod.Spec.Containers[i] + containers[c.Name] = c + } + if len(pod.Status.ContainerStatuses) != len(containers) { + return false, fmt.Errorf("some container status is not reported") + } + for _, cs := range pod.Status.ContainerStatuses { + if !v.isContainerUpdateCompleted(containers[cs.Name], &cs) { + return false, fmt.Errorf("container %s resources not changed", cs.Name) + } + } + return true, nil +} + +// only cpu and memory are allowed to be inplace updated +var allowedResizeResourceKey = map[string]bool{ + string(v1.ResourceCPU): true, + string(v1.ResourceMemory): true, +} + +func (v *NativeVerticalUpdate) CanResourcesResizeInPlace(resourceKey string) bool { + allowed, exist := allowedResizeResourceKey[resourceKey] + return exist && allowed +} + +// Internal implementation of vertical updates +// type VerticalUpdateInternal struct{} + +// var _ VerticalUpdateInterface = &VerticalUpdateInternal{} diff --git a/pkg/controllers/workload/v1alpha1/utils/kubecontroller/controller_ref_manager.go b/pkg/controllers/workload/v1alpha1/utils/kubecontroller/controller_ref_manager.go new file mode 100644 index 00000000000..e83ce4649ea --- /dev/null +++ b/pkg/controllers/workload/v1alpha1/utils/kubecontroller/controller_ref_manager.go @@ -0,0 +1,275 @@ +/* +Copyright 2016 The Kubernetes Authors. +Copyright 2024 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubecontroller + +import ( + "context" + "encoding/json" + "fmt" + "sync" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog/v2" +) + +// BaseControllerRefManager provides common functionality for controller ref managers. +type BaseControllerRefManager struct { + Controller metav1.Object + Selector labels.Selector + + canAdoptErr error + canAdoptOnce sync.Once + CanAdoptFunc func(ctx context.Context) error +} + +// CanAdopt checks whether the controller can adopt the object. +func (m *BaseControllerRefManager) CanAdopt(ctx context.Context) error { + m.canAdoptOnce.Do(func() { + if m.CanAdoptFunc != nil { + m.canAdoptErr = m.CanAdoptFunc(ctx) + } + }) + return m.canAdoptErr +} + +// ClaimObject tries to take ownership of an object for this controller. +func (m *BaseControllerRefManager) ClaimObject(ctx context.Context, obj metav1.Object, match func(metav1.Object) bool, adopt, release func(context.Context, metav1.Object) error) (bool, error) { + controllerRef := metav1.GetControllerOfNoCopy(obj) + if controllerRef != nil { + if controllerRef.UID != m.Controller.GetUID() { + return false, nil + } + if match(obj) { + return true, nil + } + if m.Controller.GetDeletionTimestamp() != nil { + return false, nil + } + if err := release(ctx, obj); err != nil { + if errors.IsNotFound(err) { + return false, nil + } + return false, err + } + return false, nil + } + + if m.Controller.GetDeletionTimestamp() != nil || !match(obj) { + return false, nil + } + if obj.GetDeletionTimestamp() != nil { + return false, nil + } + + if len(m.Controller.GetNamespace()) > 0 && m.Controller.GetNamespace() != obj.GetNamespace() { + return false, nil + } + + if err := adopt(ctx, obj); err != nil { + if errors.IsNotFound(err) { + return false, nil + } + return false, err + } + return true, nil +} + +// PodControllerRefManager manages the controllerRef of pods. +type PodControllerRefManager struct { + BaseControllerRefManager + controllerKind schema.GroupVersionKind + podControl PodControlInterface + finalizers []string +} + +// NewPodControllerRefManager returns a PodControllerRefManager. +func NewPodControllerRefManager( + podControl PodControlInterface, + controller metav1.Object, + selector labels.Selector, + controllerKind schema.GroupVersionKind, + canAdopt func(ctx context.Context) error, + finalizers ...string, +) *PodControllerRefManager { + return &PodControllerRefManager{ + BaseControllerRefManager: BaseControllerRefManager{ + Controller: controller, + Selector: selector, + CanAdoptFunc: canAdopt, + }, + controllerKind: controllerKind, + podControl: podControl, + finalizers: finalizers, + } +} + +// ClaimPods tries to take ownership of a list of Pods. +func (m *PodControllerRefManager) ClaimPods(ctx context.Context, pods []*v1.Pod, filters ...func(*v1.Pod) bool) ([]*v1.Pod, error) { + var claimed []*v1.Pod + var errlist []error + + match := func(obj metav1.Object) bool { + pod := obj.(*v1.Pod) + if !m.Selector.Matches(labels.Set(pod.Labels)) { + return false + } + for _, filter := range filters { + if !filter(pod) { + return false + } + } + return true + } + adopt := func(ctx context.Context, obj metav1.Object) error { + return m.AdoptPod(ctx, obj.(*v1.Pod)) + } + release := func(ctx context.Context, obj metav1.Object) error { + return m.ReleasePod(ctx, obj.(*v1.Pod)) + } + + for _, pod := range pods { + ok, err := m.ClaimObject(ctx, pod, match, adopt, release) + if err != nil { + errlist = append(errlist, err) + continue + } + if ok { + claimed = append(claimed, pod) + } + } + return claimed, utilerrors.NewAggregate(errlist) +} + +// AdoptPod sends a patch to take control of the pod. +func (m *PodControllerRefManager) AdoptPod(ctx context.Context, pod *v1.Pod) error { + if err := m.CanAdopt(ctx); err != nil { + return fmt.Errorf("can't adopt Pod %v/%v (%v): %v", pod.Namespace, pod.Name, pod.UID, err) + } + addControllerPatch, err := ownerRefControllerPatch(m.Controller, m.controllerKind, pod.UID, m.finalizers...) + if err != nil { + return err + } + return m.podControl.PatchPod(ctx, pod.Namespace, pod.Name, addControllerPatch) +} + +// ReleasePod sends a patch to free the pod from the control of this controller. +func (m *PodControllerRefManager) ReleasePod(ctx context.Context, pod *v1.Pod) error { + logger := klog.FromContext(ctx) + logger.V(2).Info("Patching pod to remove its controllerRef", "pod", klog.KObj(pod), "gvk", m.controllerKind, "controller", m.Controller.GetName()) + patchBytes, err := GenerateDeleteOwnerRefStrategicMergeBytes(pod.UID, []types.UID{m.Controller.GetUID()}, m.finalizers...) + if err != nil { + return err + } + err = m.podControl.PatchPod(ctx, pod.Namespace, pod.Name, patchBytes) + if err != nil { + if errors.IsNotFound(err) { + return nil + } + if errors.IsInvalid(err) { + return nil + } + } + return err +} + +// RecheckDeletionTimestamp returns a CanAdopt() function to recheck deletion. +func RecheckDeletionTimestamp(getObject func(context.Context) (metav1.Object, error)) func(context.Context) error { + return func(ctx context.Context) error { + obj, err := getObject(ctx) + if err != nil { + return fmt.Errorf("can't recheck DeletionTimestamp: %v", err) + } + if obj.GetDeletionTimestamp() != nil { + return fmt.Errorf("%v/%v has just been deleted at %v", obj.GetNamespace(), obj.GetName(), obj.GetDeletionTimestamp()) + } + return nil + } +} + +type objectForOwnerRefPatch struct { + Metadata objectMetaForPatch `json:"metadata"` +} + +type objectMetaForPatch struct { + OwnerReferences []metav1.OwnerReference `json:"ownerReferences"` + UID types.UID `json:"uid"` + Finalizers []string `json:"finalizers,omitempty"` +} + +func ownerRefControllerPatch(controller metav1.Object, gvk schema.GroupVersionKind, uid types.UID, finalizers ...string) ([]byte, error) { + blockOwnerDeletion := true + isController := true + addControllerPatch := objectForOwnerRefPatch{ + Metadata: objectMetaForPatch{ + UID: uid, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: gvk.GroupVersion().String(), + Kind: gvk.Kind, + Name: controller.GetName(), + UID: controller.GetUID(), + Controller: &isController, + BlockOwnerDeletion: &blockOwnerDeletion, + }, + }, + }, + } + if len(finalizers) > 0 { + addControllerPatch.Metadata.Finalizers = finalizers + } + patchBytes, err := json.Marshal(&addControllerPatch) + if err != nil { + return nil, err + } + return patchBytes, nil +} + +// GenerateDeleteOwnerRefStrategicMergeBytes generates the patch bytes to remove +// the owner references with given uids from an object. +func GenerateDeleteOwnerRefStrategicMergeBytes(objUID types.UID, ownerUIDs []types.UID, finalizers ...string) ([]byte, error) { + var ownerRefs []map[string]interface{} + for _, ownerUID := range ownerUIDs { + ownerRefs = append(ownerRefs, map[string]interface{}{ + "uid": ownerUID, + "$patch": "delete", + }) + } + patch := map[string]interface{}{ + "metadata": map[string]interface{}{ + "uid": objUID, + "ownerReferences": ownerRefs, + }, + } + if len(finalizers) > 0 { + var finalizersToDelete []map[string]interface{} + for _, f := range finalizers { + finalizersToDelete = append(finalizersToDelete, map[string]interface{}{ + "$patch": "delete", + "value": f, + }) + } + patch["metadata"].(map[string]interface{})["$deleteFromPrimitiveList/finalizers"] = finalizers + } + return json.Marshal(patch) +} diff --git a/pkg/controllers/workload/v1alpha1/utils/kubecontroller/pod_control.go b/pkg/controllers/workload/v1alpha1/utils/kubecontroller/pod_control.go new file mode 100644 index 00000000000..68fc029c848 --- /dev/null +++ b/pkg/controllers/workload/v1alpha1/utils/kubecontroller/pod_control.go @@ -0,0 +1,216 @@ +/* +Copyright 2014 The Kubernetes Authors. +Copyright 2024 The Fluid Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package kubecontroller provides controller utilities adapted from k8s.io/kubernetes/pkg/controller +// to avoid deep dependency chains. +package kubecontroller + +import ( + "context" + "fmt" + + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/record" + "k8s.io/klog/v2" +) + +const ( + // FailedCreatePodReason is added in an event and in a replica set condition + // when a pod for a replica set is failed to be created. + FailedCreatePodReason = "FailedCreate" + // SuccessfulCreatePodReason is added in an event when a pod for a replica set + // is successfully created. + SuccessfulCreatePodReason = "SuccessfulCreate" + // FailedDeletePodReason is added in an event and in a replica set condition + // when a pod for a replica set is failed to be deleted. + FailedDeletePodReason = "FailedDelete" + // SuccessfulDeletePodReason is added in an event when a pod for a replica set + // is successfully deleted. + SuccessfulDeletePodReason = "SuccessfulDelete" +) + +// PodControlInterface is an interface that knows how to add or delete pods +// created as an interface to allow testing. +type PodControlInterface interface { + // CreatePods creates new pods according to the spec, and sets object as the pod's controller. + CreatePods(ctx context.Context, namespace string, template *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference) error + // CreatePodsWithGenerateName creates new pods according to the spec, sets object as the pod's controller and sets pod's generateName. + CreatePodsWithGenerateName(ctx context.Context, namespace string, template *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference, generateName string) error + // DeletePod deletes the pod identified by podID. + DeletePod(ctx context.Context, namespace string, podID string, object runtime.Object) error + // PatchPod patches the pod. + PatchPod(ctx context.Context, namespace, name string, data []byte) error +} + +// RealPodControl is the default implementation of PodControlInterface. +type RealPodControl struct { + KubeClient clientset.Interface + Recorder record.EventRecorder +} + +var _ PodControlInterface = &RealPodControl{} + +func getPodsLabelSet(template *v1.PodTemplateSpec) labels.Set { + desiredLabels := make(labels.Set) + for k, v := range template.Labels { + desiredLabels[k] = v + } + return desiredLabels +} + +func getPodsFinalizers(template *v1.PodTemplateSpec) []string { + desiredFinalizers := make([]string, len(template.Finalizers)) + copy(desiredFinalizers, template.Finalizers) + return desiredFinalizers +} + +func getPodsAnnotationSet(template *v1.PodTemplateSpec) labels.Set { + desiredAnnotations := make(labels.Set) + for k, v := range template.Annotations { + desiredAnnotations[k] = v + } + return desiredAnnotations +} + +func getPodsPrefix(controllerName string) string { + // use the dash (if the name isn't too long) to make the pod name a bit prettier + prefix := fmt.Sprintf("%s-", controllerName) + // simplified: just check length instead of calling k8s validation + if len(prefix) > 253 { + prefix = controllerName + } + return prefix +} + +func validateControllerRef(controllerRef *metav1.OwnerReference) error { + if controllerRef == nil { + return fmt.Errorf("controllerRef is nil") + } + if len(controllerRef.APIVersion) == 0 { + return fmt.Errorf("controllerRef has empty APIVersion") + } + if len(controllerRef.Kind) == 0 { + return fmt.Errorf("controllerRef has empty Kind") + } + if controllerRef.Controller == nil || !*controllerRef.Controller { + return fmt.Errorf("controllerRef.Controller is not set to true") + } + if controllerRef.BlockOwnerDeletion == nil || !*controllerRef.BlockOwnerDeletion { + return fmt.Errorf("controllerRef.BlockOwnerDeletion is not set") + } + return nil +} + +func (r RealPodControl) CreatePods(ctx context.Context, namespace string, template *v1.PodTemplateSpec, controllerObject runtime.Object, controllerRef *metav1.OwnerReference) error { + return r.CreatePodsWithGenerateName(ctx, namespace, template, controllerObject, controllerRef, "") +} + +func (r RealPodControl) CreatePodsWithGenerateName(ctx context.Context, namespace string, template *v1.PodTemplateSpec, controllerObject runtime.Object, controllerRef *metav1.OwnerReference, generateName string) error { + if err := validateControllerRef(controllerRef); err != nil { + return err + } + pod, err := GetPodFromTemplate(template, controllerObject, controllerRef) + if err != nil { + return err + } + if len(generateName) > 0 { + pod.ObjectMeta.GenerateName = generateName + } + return r.createPods(ctx, namespace, pod, controllerObject) +} + +func (r RealPodControl) PatchPod(ctx context.Context, namespace, name string, data []byte) error { + _, err := r.KubeClient.CoreV1().Pods(namespace).Patch(ctx, name, types.StrategicMergePatchType, data, metav1.PatchOptions{}) + return err +} + +// GetPodFromTemplate creates a pod from a pod template spec. +func GetPodFromTemplate(template *v1.PodTemplateSpec, parentObject runtime.Object, controllerRef *metav1.OwnerReference) (*v1.Pod, error) { + desiredLabels := getPodsLabelSet(template) + desiredFinalizers := getPodsFinalizers(template) + desiredAnnotations := getPodsAnnotationSet(template) + accessor, err := meta.Accessor(parentObject) + if err != nil { + return nil, fmt.Errorf("parentObject does not have ObjectMeta, %v", err) + } + prefix := getPodsPrefix(accessor.GetName()) + + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: desiredLabels, + Annotations: desiredAnnotations, + GenerateName: prefix, + Finalizers: desiredFinalizers, + }, + } + if controllerRef != nil { + pod.OwnerReferences = append(pod.OwnerReferences, *controllerRef) + } + pod.Spec = *template.Spec.DeepCopy() + return pod, nil +} + +func (r RealPodControl) createPods(ctx context.Context, namespace string, pod *v1.Pod, object runtime.Object) error { + if len(labels.Set(pod.Labels)) == 0 { + return fmt.Errorf("unable to create pods, no labels") + } + newPod, err := r.KubeClient.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{}) + if err != nil { + // only send an event if the namespace isn't terminating + if !apierrors.HasStatusCause(err, v1.NamespaceTerminatingCause) { + r.Recorder.Eventf(object, v1.EventTypeWarning, FailedCreatePodReason, "Error creating: %v", err) + } + return err + } + logger := klog.FromContext(ctx) + accessor, err := meta.Accessor(object) + if err != nil { + logger.Error(err, "parentObject does not have ObjectMeta") + return nil + } + logger.V(4).Info("Controller created pod", "controller", accessor.GetName(), "pod", klog.KObj(newPod)) + r.Recorder.Eventf(object, v1.EventTypeNormal, SuccessfulCreatePodReason, "Created pod: %v", newPod.Name) + + return nil +} + +func (r RealPodControl) DeletePod(ctx context.Context, namespace string, podID string, object runtime.Object) error { + accessor, err := meta.Accessor(object) + if err != nil { + return fmt.Errorf("object does not have ObjectMeta, %v", err) + } + logger := klog.FromContext(ctx) + logger.V(2).Info("Deleting pod", "controller", accessor.GetName(), "pod", klog.KRef(namespace, podID)) + if err := r.KubeClient.CoreV1().Pods(namespace).Delete(ctx, podID, metav1.DeleteOptions{}); err != nil { + if apierrors.IsNotFound(err) { + logger.V(4).Info("Pod has already been deleted.", "pod", klog.KRef(namespace, podID)) + return err + } + r.Recorder.Eventf(object, v1.EventTypeWarning, FailedDeletePodReason, "Error deleting: %v", err) + return fmt.Errorf("unable to delete pods: %v", err) + } + r.Recorder.Eventf(object, v1.EventTypeNormal, SuccessfulDeletePodReason, "Deleted pod: %v", podID) + + return nil +} diff --git a/pkg/controllers/workload/v1alpha1/utils/lifecycle/lifecycle_utils.go b/pkg/controllers/workload/v1alpha1/utils/lifecycle/lifecycle_utils.go new file mode 100644 index 00000000000..38908e29d26 --- /dev/null +++ b/pkg/controllers/workload/v1alpha1/utils/lifecycle/lifecycle_utils.go @@ -0,0 +1,249 @@ +/* +Copyright 2020 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lifecycle + +import ( + "fmt" + "strings" + "time" + + "github.com/fluid-cloudnative/fluid/pkg/controllers/workload/v1alpha1/utils/podadapter" + "github.com/fluid-cloudnative/fluid/pkg/controllers/workload/v1alpha1/utils/podreadiness" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + coreinformers "k8s.io/client-go/informers/core/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + workloadv1alpha1 "github.com/fluid-cloudnative/fluid/api/workload/v1alpha1" +) + +const ( + // these keys for MarkPodNotReady Policy of pod lifecycle + preparingDeleteHookKey = "preDeleteHook" + preparingUpdateHookKey = "preUpdateHook" +) + +// Interface for managing pods lifecycle. +type Interface interface { + UpdatePodLifecycle(pod *v1.Pod, state workloadv1alpha1.LifecycleStateType, markPodNotReady bool) (bool, *v1.Pod, error) + UpdatePodLifecycleWithHandler(pod *v1.Pod, state workloadv1alpha1.LifecycleStateType, inPlaceUpdateHandler *workloadv1alpha1.LifecycleHook) (bool, *v1.Pod, error) +} + +type realControl struct { + adp podadapter.Adapter + podReadinessControl podreadiness.Interface +} + +func New(c client.Client) Interface { + adp := &podadapter.AdapterRuntimeClient{Client: c} + return &realControl{ + adp: adp, + podReadinessControl: podreadiness.NewForAdapter(adp), + } +} + +func NewForTypedClient(c clientset.Interface) Interface { + adp := &podadapter.AdapterTypedClient{Client: c} + return &realControl{ + adp: adp, + podReadinessControl: podreadiness.NewForAdapter(adp), + } +} + +func NewForInformer(informer coreinformers.PodInformer) Interface { + adp := &podadapter.AdapterInformer{PodInformer: informer} + return &realControl{ + adp: adp, + podReadinessControl: podreadiness.NewForAdapter(adp), + } +} + +func GetPodLifecycleState(pod *v1.Pod) workloadv1alpha1.LifecycleStateType { + if pod == nil || pod.Labels == nil { + return "" + } + return workloadv1alpha1.LifecycleStateType(pod.Labels[workloadv1alpha1.LifecycleStateKey]) +} + +func IsHookMarkPodNotReady(lifecycleHook *workloadv1alpha1.LifecycleHook) bool { + if lifecycleHook == nil { + return false + } + return lifecycleHook.MarkPodNotReady +} + +func IsLifecycleMarkPodNotReady(lifecycle *workloadv1alpha1.Lifecycle) bool { + if lifecycle == nil { + return false + } + return IsHookMarkPodNotReady(lifecycle.PreDelete) || IsHookMarkPodNotReady(lifecycle.InPlaceUpdate) +} + +func SetPodLifecycle(state workloadv1alpha1.LifecycleStateType) func(*v1.Pod) { + return func(pod *v1.Pod) { + if pod == nil { + return + } + if pod.Labels == nil { + pod.Labels = make(map[string]string) + } + if pod.Annotations == nil { + pod.Annotations = make(map[string]string) + } + pod.Labels[workloadv1alpha1.LifecycleStateKey] = string(state) + pod.Annotations[workloadv1alpha1.LifecycleTimestampKey] = time.Now().Format(time.RFC3339) + } +} + +func (c *realControl) executePodNotReadyPolicy(pod *v1.Pod, state workloadv1alpha1.LifecycleStateType) (err error) { + switch state { + case workloadv1alpha1.LifecycleStatePreparingDelete: + err = c.podReadinessControl.AddNotReadyKey(pod, getReadinessMessage(preparingDeleteHookKey)) + case workloadv1alpha1.LifecycleStatePreparingUpdate: + err = c.podReadinessControl.AddNotReadyKey(pod, getReadinessMessage(preparingUpdateHookKey)) + case workloadv1alpha1.LifecycleStateUpdated: + err = c.podReadinessControl.RemoveNotReadyKey(pod, getReadinessMessage(preparingUpdateHookKey)) + } + + if err != nil { + klog.ErrorS(err, "Failed to set pod Ready/NotReady at lifecycle state", + "pod", client.ObjectKeyFromObject(pod), "state", state) + } + return +} + +func (c *realControl) UpdatePodLifecycle(pod *v1.Pod, state workloadv1alpha1.LifecycleStateType, markPodNotReady bool) (updated bool, gotPod *v1.Pod, err error) { + if markPodNotReady { + if err = c.executePodNotReadyPolicy(pod, state); err != nil { + return false, nil, err + } + } + + if GetPodLifecycleState(pod) == state { + return false, pod, nil + } + + pod = pod.DeepCopy() + if adp, ok := c.adp.(podadapter.AdapterWithPatch); ok { + body := fmt.Sprintf( + `{"metadata":{"labels":{"%s":"%s"},"annotations":{"%s":"%s"}}}`, + workloadv1alpha1.LifecycleStateKey, + string(state), + workloadv1alpha1.LifecycleTimestampKey, + time.Now().Format(time.RFC3339), + ) + gotPod, err = adp.PatchPod(pod, client.RawPatch(types.StrategicMergePatchType, []byte(body))) + } else { + SetPodLifecycle(state)(pod) + gotPod, err = c.adp.UpdatePod(pod) + } + + return true, gotPod, err +} + +func (c *realControl) UpdatePodLifecycleWithHandler(pod *v1.Pod, state workloadv1alpha1.LifecycleStateType, inPlaceUpdateHandler *workloadv1alpha1.LifecycleHook) (updated bool, gotPod *v1.Pod, err error) { + if inPlaceUpdateHandler == nil || pod == nil { + return false, pod, nil + } + + if inPlaceUpdateHandler.MarkPodNotReady { + if err = c.executePodNotReadyPolicy(pod, state); err != nil { + return false, nil, err + } + } + + if GetPodLifecycleState(pod) == state { + return false, pod, nil + } + + pod = pod.DeepCopy() + if adp, ok := c.adp.(podadapter.AdapterWithPatch); ok { + var labelsHandler, finalizersHandler string + for k, v := range inPlaceUpdateHandler.LabelsHandler { + labelsHandler = fmt.Sprintf(`%s,"%s":"%s"`, labelsHandler, k, v) + } + for _, v := range inPlaceUpdateHandler.FinalizersHandler { + finalizersHandler = fmt.Sprintf(`%s,"%s"`, finalizersHandler, v) + } + finalizersHandler = fmt.Sprintf(`[%s]`, strings.TrimLeft(finalizersHandler, ",")) + + body := fmt.Sprintf( + `{"metadata":{"labels":{"%s":"%s"%s},"annotations":{"%s":"%s"},"finalizers":%s}}`, + workloadv1alpha1.LifecycleStateKey, + string(state), + labelsHandler, + workloadv1alpha1.LifecycleTimestampKey, + time.Now().Format(time.RFC3339), + finalizersHandler, + ) + gotPod, err = adp.PatchPod(pod, client.RawPatch(types.StrategicMergePatchType, []byte(body))) + } else { + if pod.Labels == nil { + pod.Labels = make(map[string]string) + } + for k, v := range inPlaceUpdateHandler.LabelsHandler { + pod.Labels[k] = v + } + pod.Finalizers = append(pod.Finalizers, inPlaceUpdateHandler.FinalizersHandler...) + + SetPodLifecycle(state)(pod) + gotPod, err = c.adp.UpdatePod(pod) + } + + return true, gotPod, err +} + +func IsPodHooked(hook *workloadv1alpha1.LifecycleHook, pod *v1.Pod) bool { + if hook == nil || pod == nil { + return false + } + for _, f := range hook.FinalizersHandler { + if controllerutil.ContainsFinalizer(pod, f) { + return true + } + } + for k, v := range hook.LabelsHandler { + if pod.Labels[k] == v { + return true + } + } + return false +} + +func IsPodAllHooked(hook *workloadv1alpha1.LifecycleHook, pod *v1.Pod) bool { + if hook == nil || pod == nil { + return false + } + for _, f := range hook.FinalizersHandler { + if !controllerutil.ContainsFinalizer(pod, f) { + return false + } + } + for k, v := range hook.LabelsHandler { + if pod.Labels[k] != v { + return false + } + } + return true +} + +func getReadinessMessage(key string) podreadiness.Message { + return podreadiness.Message{UserAgent: "Lifecycle", Key: key} +} diff --git a/pkg/controllers/workload/v1alpha1/utils/podadapter/adapter.go b/pkg/controllers/workload/v1alpha1/utils/podadapter/adapter.go new file mode 100644 index 00000000000..adbf33a3e40 --- /dev/null +++ b/pkg/controllers/workload/v1alpha1/utils/podadapter/adapter.go @@ -0,0 +1,119 @@ +/* +Copyright 2021 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podadapter + +import ( + "context" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + coreinformers "k8s.io/client-go/informers/core/v1" + clientset "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type Adapter interface { + GetPod(namespace, name string) (*v1.Pod, error) + UpdatePod(pod *v1.Pod) (*v1.Pod, error) + UpdatePodStatus(pod *v1.Pod) error +} + +type AdapterWithPatch interface { + Adapter + PatchPod(pod *v1.Pod, patch client.Patch) (*v1.Pod, error) + PatchPodResource(pod *v1.Pod, patch client.Patch) (*v1.Pod, error) +} + +type AdapterRuntimeClient struct { + client.Client +} + +func (c *AdapterRuntimeClient) GetPod(namespace, name string) (*v1.Pod, error) { + pod := &v1.Pod{} + err := c.Get(context.TODO(), types.NamespacedName{Namespace: namespace, Name: name}, pod) + return pod, err +} + +func (c *AdapterRuntimeClient) UpdatePod(pod *v1.Pod) (*v1.Pod, error) { + return pod, c.Update(context.TODO(), pod) +} + +func (c *AdapterRuntimeClient) UpdatePodStatus(pod *v1.Pod) error { + return c.Status().Update(context.TODO(), pod) +} + +func (c *AdapterRuntimeClient) PatchPod(pod *v1.Pod, patch client.Patch) (*v1.Pod, error) { + return pod, c.Patch(context.TODO(), pod, patch) +} + +func (c *AdapterRuntimeClient) PatchPodResource(pod *v1.Pod, patch client.Patch) (*v1.Pod, error) { + return pod, c.SubResource("resize").Patch(context.TODO(), pod, patch) +} + +type AdapterTypedClient struct { + Client clientset.Interface +} + +func (c *AdapterTypedClient) GetPod(namespace, name string) (*v1.Pod, error) { + return c.Client.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}) +} + +func (c *AdapterTypedClient) UpdatePod(pod *v1.Pod) (*v1.Pod, error) { + return c.Client.CoreV1().Pods(pod.Namespace).Update(context.TODO(), pod, metav1.UpdateOptions{}) +} + +func (c *AdapterTypedClient) UpdatePodStatus(pod *v1.Pod) error { + _, err := c.Client.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), pod, metav1.UpdateOptions{}) + return err +} + +func (c *AdapterTypedClient) PatchPod(pod *v1.Pod, patch client.Patch) (*v1.Pod, error) { + patchData, err := patch.Data(pod) + if err != nil { + return nil, err + } + return c.Client.CoreV1().Pods(pod.Namespace).Patch(context.TODO(), pod.Name, patch.Type(), patchData, metav1.PatchOptions{}) +} + +func (c *AdapterTypedClient) PatchPodResource(pod *v1.Pod, patch client.Patch) (*v1.Pod, error) { + patchData, err := patch.Data(pod) + if err != nil { + return nil, err + } + return c.Client.CoreV1().Pods(pod.Namespace).Patch(context.TODO(), pod.Name, patch.Type(), patchData, metav1.PatchOptions{}, "resize") +} + +type AdapterInformer struct { + PodInformer coreinformers.PodInformer +} + +func (c *AdapterInformer) GetPod(namespace, name string) (*v1.Pod, error) { + pod, err := c.PodInformer.Lister().Pods(namespace).Get(name) + if err == nil { + return pod.DeepCopy(), nil + } + return nil, err +} + +func (c *AdapterInformer) UpdatePod(pod *v1.Pod) (*v1.Pod, error) { + return pod, c.PodInformer.Informer().GetIndexer().Update(pod) +} + +func (c *AdapterInformer) UpdatePodStatus(pod *v1.Pod) error { + return c.PodInformer.Informer().GetIndexer().Update(pod) +} diff --git a/pkg/controllers/workload/v1alpha1/utils/podreadiness/pod_readiness.go b/pkg/controllers/workload/v1alpha1/utils/podreadiness/pod_readiness.go new file mode 100644 index 00000000000..8609bf7da70 --- /dev/null +++ b/pkg/controllers/workload/v1alpha1/utils/podreadiness/pod_readiness.go @@ -0,0 +1,74 @@ +/* +Copyright 2022 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podreadiness + +import ( + "sort" + + "github.com/fluid-cloudnative/fluid/pkg/controllers/workload/v1alpha1/utils/podadapter" + "github.com/fluid-cloudnative/fluid/pkg/controllers/workload/v1alpha1/utils/util" + v1 "k8s.io/api/core/v1" + + workloadv1alpha1 "github.com/fluid-cloudnative/fluid/api/workload/v1alpha1" +) + +type Interface interface { + ContainsReadinessGate(pod *v1.Pod) bool + AddNotReadyKey(pod *v1.Pod, msg Message) error + RemoveNotReadyKey(pod *v1.Pod, msg Message) error +} + +func NewForAdapter(adp podadapter.Adapter) Interface { + return &commonControl{adp: adp} +} + +type commonControl struct { + adp podadapter.Adapter +} + +func (c *commonControl) ContainsReadinessGate(pod *v1.Pod) bool { + return containsReadinessGate(pod, workloadv1alpha1.KruisePodReadyConditionType) +} + +func (c *commonControl) AddNotReadyKey(pod *v1.Pod, msg Message) error { + return addNotReadyKey(c.adp, pod, msg, workloadv1alpha1.KruisePodReadyConditionType) +} + +func (c *commonControl) RemoveNotReadyKey(pod *v1.Pod, msg Message) error { + return removeNotReadyKey(c.adp, pod, msg, workloadv1alpha1.KruisePodReadyConditionType) +} + +type Message struct { + UserAgent string `json:"userAgent"` + Key string `json:"key"` +} + +type messageList []Message + +func (c messageList) Len() int { return len(c) } +func (c messageList) Swap(i, j int) { c[i], c[j] = c[j], c[i] } +func (c messageList) Less(i, j int) bool { + if c[i].UserAgent == c[j].UserAgent { + return c[i].Key < c[j].Key + } + return c[i].UserAgent < c[j].UserAgent +} + +func (c messageList) dump() string { + sort.Sort(c) + return util.DumpJSON(c) +} diff --git a/pkg/controllers/workload/v1alpha1/utils/podreadiness/pod_readiness_utils.go b/pkg/controllers/workload/v1alpha1/utils/podreadiness/pod_readiness_utils.go new file mode 100644 index 00000000000..07e5895c19e --- /dev/null +++ b/pkg/controllers/workload/v1alpha1/utils/podreadiness/pod_readiness_utils.go @@ -0,0 +1,176 @@ +/* +Copyright 2021 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podreadiness + +import ( + "encoding/json" + + "github.com/fluid-cloudnative/fluid/pkg/controllers/workload/v1alpha1/utils/podadapter" + "github.com/fluid-cloudnative/fluid/pkg/controllers/workload/v1alpha1/utils/util" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/util/retry" + + workloadv1alpha1 "github.com/fluid-cloudnative/fluid/api/workload/v1alpha1" +) + +func addNotReadyKey(adp podadapter.Adapter, pod *v1.Pod, msg Message, condType v1.PodConditionType) error { + if alreadyHasKey(pod, msg, condType) { + return nil + } + + if !containsReadinessGate(pod, condType) { + return nil + } + + err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + newPod, err := adp.GetPod(pod.Namespace, pod.Name) + if err != nil { + return err + } + + condition := getReadinessCondition(newPod, condType) + if condition == nil { + _, messages := addMessage("", msg) + newPod.Status.Conditions = append(newPod.Status.Conditions, v1.PodCondition{ + Type: condType, + Message: messages.dump(), + LastTransitionTime: metav1.Now(), + }) + } else { + changed, messages := addMessage(condition.Message, msg) + if !changed { + return nil + } + condition.Status = v1.ConditionFalse + condition.Message = messages.dump() + condition.LastTransitionTime = metav1.Now() + } + + // set pod ready condition to "False" + util.SetPodReadyCondition(newPod) + return adp.UpdatePodStatus(newPod) + }) + return err +} + +func removeNotReadyKey(adp podadapter.Adapter, pod *v1.Pod, msg Message, condType v1.PodConditionType) error { + if !containsReadinessGate(pod, condType) { + return nil + } + + return retry.RetryOnConflict(retry.DefaultBackoff, func() error { + newPod, err := adp.GetPod(pod.Namespace, pod.Name) + if err != nil { + return err + } + + condition := getReadinessCondition(newPod, condType) + if condition == nil { + return nil + } + changed, messages := removeMessage(condition.Message, msg) + if !changed { + return nil + } + if len(messages) == 0 { + condition.Status = v1.ConditionTrue + } + condition.Message = messages.dump() + condition.LastTransitionTime = metav1.Now() + return adp.UpdatePodStatus(newPod) + }) +} + +func addMessage(base string, msg Message) (bool, messageList) { + messages := messageList{} + if base != "" { + _ = json.Unmarshal([]byte(base), &messages) + } + for _, m := range messages { + if m.UserAgent == msg.UserAgent && m.Key == msg.Key { + return false, messages + } + } + messages = append(messages, msg) + return true, messages +} + +func removeMessage(base string, msg Message) (bool, messageList) { + messages := messageList{} + if base != "" { + _ = json.Unmarshal([]byte(base), &messages) + } + var removed bool + newMessages := messageList{} + for _, m := range messages { + if m.UserAgent == msg.UserAgent && m.Key == msg.Key { + removed = true + continue + } + newMessages = append(newMessages, m) + } + return removed, newMessages +} + +func GetReadinessCondition(pod *v1.Pod) *v1.PodCondition { + return getReadinessCondition(pod, workloadv1alpha1.KruisePodReadyConditionType) +} + +func ContainsReadinessGate(pod *v1.Pod) bool { + return containsReadinessGate(pod, workloadv1alpha1.KruisePodReadyConditionType) +} + +func getReadinessCondition(pod *v1.Pod, condType v1.PodConditionType) *v1.PodCondition { + if pod == nil { + return nil + } + for i := range pod.Status.Conditions { + c := &pod.Status.Conditions[i] + if c.Type == condType { + return c + } + } + return nil +} + +func containsReadinessGate(pod *v1.Pod, condType v1.PodConditionType) bool { + for _, g := range pod.Spec.ReadinessGates { + if g.ConditionType == condType { + return true + } + } + return false +} + +func alreadyHasKey(pod *v1.Pod, msg Message, condType v1.PodConditionType) bool { + condition := getReadinessCondition(pod, condType) + if condition == nil { + return false + } + if condition.Status == v1.ConditionTrue || condition.Message == "" { + return false + } + messages := messageList{} + _ = json.Unmarshal([]byte(condition.Message), &messages) + for _, m := range messages { + if m.UserAgent == msg.UserAgent && m.Key == msg.Key { + return true + } + } + return false +} diff --git a/pkg/controllers/workload/v1alpha1/utils/ratelimiter/rate_limiter.go b/pkg/controllers/workload/v1alpha1/utils/ratelimiter/rate_limiter.go new file mode 100644 index 00000000000..2d26e4530bd --- /dev/null +++ b/pkg/controllers/workload/v1alpha1/utils/ratelimiter/rate_limiter.go @@ -0,0 +1,27 @@ +package ratelimiter + +import ( + "flag" + "time" + + "golang.org/x/time/rate" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/ratelimiter" +) + +func init() { + flag.DurationVar(&baseDelay, "rate-limiter-base-delay", time.Millisecond*5, "The base delay for rate limiter. Defaults 5ms") + flag.DurationVar(&maxDelay, "rate-limiter-max-delay", time.Second*1000, "The max delay for rate limiter. Defaults 1000s") + flag.IntVar(&qps, "rate-limiter-qps", 10, "The qps for rate limier. Defaults 10") + flag.IntVar(&bucketSize, "rate-limiter-bucket-size", 100, "The bucket size for rate limier. Defaults 100") +} + +var baseDelay, maxDelay time.Duration +var qps, bucketSize int + +func DefaultControllerRateLimiter[T comparable]() ratelimiter.RateLimiter { + return workqueue.NewMaxOfRateLimiter( + workqueue.NewItemExponentialFailureRateLimiter(baseDelay, maxDelay), + &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(qps), bucketSize)}, + ) +} diff --git a/pkg/controllers/workload/v1alpha1/utils/requeueduration/duration.go b/pkg/controllers/workload/v1alpha1/utils/requeueduration/duration.go new file mode 100644 index 00000000000..a84d8b2a2af --- /dev/null +++ b/pkg/controllers/workload/v1alpha1/utils/requeueduration/duration.go @@ -0,0 +1,97 @@ +/* +Copyright 2019 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package requeueduration + +import ( + "fmt" + "sync" + "time" +) + +// DurationStore can store a duration map for multiple workloads +type DurationStore struct { + store sync.Map +} + +func (dm *DurationStore) Push(key string, newDuration time.Duration) { + value, _ := dm.store.LoadOrStore(key, &Duration{}) + requeueDuration, ok := value.(*Duration) + if !ok { + dm.store.Delete(key) + return + } + requeueDuration.Update(newDuration) +} + +func (dm *DurationStore) Pop(key string) time.Duration { + value, ok := dm.store.Load(key) + if !ok { + return 0 + } + defer dm.store.Delete(key) + requeueDuration, ok := value.(*Duration) + if !ok { + return 0 + } + return requeueDuration.Get() +} + +// Duration helps calculate the shortest non-zore duration to requeue +type Duration struct { + sync.Mutex + duration time.Duration + message string +} + +func (rd *Duration) Update(newDuration time.Duration) { + rd.Lock() + defer rd.Unlock() + if newDuration > 0 { + if rd.duration <= 0 || newDuration < rd.duration { + rd.duration = newDuration + } + } +} + +func (rd *Duration) UpdateWithMsg(newDuration time.Duration, format string, args ...interface{}) { + rd.Lock() + defer rd.Unlock() + if newDuration > 0 { + if rd.duration <= 0 || newDuration < rd.duration { + rd.duration = newDuration + rd.message = fmt.Sprintf(format, args...) + } + } +} + +func (rd *Duration) Merge(rd2 *Duration) { + rd2.Lock() + defer rd2.Unlock() + rd.UpdateWithMsg(rd2.duration, rd2.message) +} + +func (rd *Duration) Get() time.Duration { + rd.Lock() + defer rd.Unlock() + return rd.duration +} + +func (rd *Duration) GetWithMsg() (time.Duration, string) { + rd.Lock() + defer rd.Unlock() + return rd.duration, rd.message +} diff --git a/pkg/controllers/workload/v1alpha1/utils/revision/revision.go b/pkg/controllers/workload/v1alpha1/utils/revision/revision.go new file mode 100644 index 00000000000..afc2840085c --- /dev/null +++ b/pkg/controllers/workload/v1alpha1/utils/revision/revision.go @@ -0,0 +1,44 @@ +/* +Copyright 2023 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package revision + +import ( + "strings" + + apps "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" +) + +// IsPodUpdate return true when Pod controller-revision-hash equals to updateRevision. +func IsPodUpdate(pod *v1.Pod, updateRevision string) bool { + return equalToRevisionHash("", pod, updateRevision) +} + +func equalToRevisionHash(s string, pod *v1.Pod, hash string) bool { + objHash := pod.GetLabels()[apps.ControllerRevisionHashLabelKey] + if objHash == hash { + return true + } + return getShortHash(hash) == getShortHash(objHash) +} + +func getShortHash(hash string) string { + // This makes sure the real hash must be the last '-' substring of revision name + // vendor/k8s.io/kubernetes/pkg/controller/history/controller_history.go#82 + list := strings.Split(hash, "-") + return list[len(list)-1] +} diff --git a/pkg/controllers/workload/v1alpha1/utils/revisionadapter/revision_adapter.go b/pkg/controllers/workload/v1alpha1/utils/revisionadapter/revision_adapter.go new file mode 100644 index 00000000000..2c53d313296 --- /dev/null +++ b/pkg/controllers/workload/v1alpha1/utils/revisionadapter/revision_adapter.go @@ -0,0 +1,44 @@ +/* +Copyright 2021 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package revisionadapter + +import ( + apps "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type Interface interface { + EqualToRevisionHash(controllerKey string, obj metav1.Object, hash string) bool + WriteRevisionHash(obj metav1.Object, hash string) +} + +func NewDefaultImpl() Interface { + return &defaultImpl{} +} + +type defaultImpl struct{} + +func (r *defaultImpl) EqualToRevisionHash(_ string, obj metav1.Object, hash string) bool { + return obj.GetLabels()[apps.ControllerRevisionHashLabelKey] == hash +} + +func (r *defaultImpl) WriteRevisionHash(obj metav1.Object, hash string) { + if obj.GetLabels() == nil { + obj.SetLabels(make(map[string]string, 1)) + } + obj.GetLabels()[apps.ControllerRevisionHashLabelKey] = hash +} diff --git a/pkg/controllers/workload/v1alpha1/utils/specifieddelete/specified_delete.go b/pkg/controllers/workload/v1alpha1/utils/specifieddelete/specified_delete.go new file mode 100644 index 00000000000..fb1dd6d83b1 --- /dev/null +++ b/pkg/controllers/workload/v1alpha1/utils/specifieddelete/specified_delete.go @@ -0,0 +1,47 @@ +/* +Copyright 2020 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package specifieddelete + +import ( + "context" + "fmt" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + workloadv1alpha1 "github.com/fluid-cloudnative/fluid/api/workload/v1alpha1" +) + +func IsSpecifiedDelete(obj metav1.Object) bool { + _, ok := obj.GetLabels()[workloadv1alpha1.SpecifiedDeleteKey] + return ok +} + +func PatchPodSpecifiedDelete(c client.Client, pod *v1.Pod, value string) (bool, error) { + if _, ok := pod.Labels[workloadv1alpha1.SpecifiedDeleteKey]; ok { + return false, nil + } + + body := fmt.Sprintf( + `{"metadata":{"labels":{"%s":"%s"}}}`, + workloadv1alpha1.SpecifiedDeleteKey, + value, + ) + return true, c.Patch(context.TODO(), pod, client.RawPatch(types.StrategicMergePatchType, []byte(body))) +} diff --git a/pkg/controllers/workload/v1alpha1/utils/updatesort/priority_sort.go b/pkg/controllers/workload/v1alpha1/utils/updatesort/priority_sort.go new file mode 100644 index 00000000000..f7e0a24270f --- /dev/null +++ b/pkg/controllers/workload/v1alpha1/utils/updatesort/priority_sort.go @@ -0,0 +1,110 @@ +/* +Copyright 2019 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package updatesort + +import ( + "sort" + "strconv" + + "github.com/fluid-cloudnative/fluid/pkg/controllers/workload/v1alpha1/utils/util" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" + + workloadv1alpha1 "github.com/fluid-cloudnative/fluid/api/workload/v1alpha1" +) + +type prioritySort struct { + strategy *workloadv1alpha1.UpdatePriorityStrategy +} + +func NewPrioritySorter(s *workloadv1alpha1.UpdatePriorityStrategy) Sorter { + return &prioritySort{strategy: s} +} + +// Sort helps sort the indexes of pods by UpdatePriorityStrategy. +func (ps *prioritySort) Sort(pods []*v1.Pod, indexes []int) []int { + if ps.strategy == nil || (len(ps.strategy.WeightPriority) == 0 && len(ps.strategy.OrderPriority) == 0) { + return indexes + } + + f := func(i, j int) bool { + podI := pods[indexes[i]] + podJ := pods[indexes[j]] + return ps.compare(podI.Labels, podJ.Labels, i < j) + } + + sort.SliceStable(indexes, f) + return indexes +} + +func (ps *prioritySort) compare(podI, podJ map[string]string, defaultVal bool) bool { + if len(ps.strategy.WeightPriority) > 0 { + if wI, wJ := ps.getPodWeightPriority(podI), ps.getPodWeightPriority(podJ); wI != wJ { + return wI > wJ + } + } else if len(ps.strategy.OrderPriority) > 0 { + levelI, orderI := ps.getPodOrderPriority(podI) + levelJ, orderJ := ps.getPodOrderPriority(podJ) + if levelI != levelJ { + return levelI < levelJ + } else if orderI != orderJ { + return orderI > orderJ + } + } + return defaultVal +} + +func (ps *prioritySort) getPodWeightPriority(podLabels map[string]string) int64 { + var weight int64 + for _, p := range ps.strategy.WeightPriority { + selector, err := util.ValidatedLabelSelectorAsSelector(&p.MatchSelector) + if err != nil { + continue + } + if selector.Matches(labels.Set(podLabels)) { + weight += int64(p.Weight) + } + } + return weight +} + +func (ps *prioritySort) getPodOrderPriority(podLabels map[string]string) (int64, int64) { + for i, p := range ps.strategy.OrderPriority { + if value, ok := podLabels[p.OrderedKey]; ok { + return int64(i), getIntFromStringSuffix(value) + } + } + return -1, 0 +} + +func getIntFromStringSuffix(v string) int64 { + startIdx := -1 + for i := len(v) - 1; i >= 0; i-- { + if v[i] >= '0' && v[i] <= '9' { + startIdx = i + } else { + break + } + } + if startIdx < 0 { + return 0 + } + if order, err := strconv.ParseInt(v[startIdx:], 10, 64); err == nil { + return order + } + return 0 +} diff --git a/pkg/controllers/workload/v1alpha1/utils/updatesort/scatter_sort.go b/pkg/controllers/workload/v1alpha1/utils/updatesort/scatter_sort.go new file mode 100644 index 00000000000..965a41c4d0e --- /dev/null +++ b/pkg/controllers/workload/v1alpha1/utils/updatesort/scatter_sort.go @@ -0,0 +1,209 @@ +/* +Copyright 2019 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package updatesort + +import ( + "math" + "sort" + + v1 "k8s.io/api/core/v1" + + workloadv1alpha1 "github.com/fluid-cloudnative/fluid/api/workload/v1alpha1" +) + +type scatterSort struct { + strategy workloadv1alpha1.UpdateScatterStrategy +} + +func NewScatterSorter(s workloadv1alpha1.UpdateScatterStrategy) Sorter { + return &scatterSort{strategy: s} +} + +func NewScatterSorterV1beta1(s workloadv1alpha1.UpdateScatterStrategy) Sorter { + // Convert v1beta1 to v1alpha1 + v1alpha1Strategy := make(workloadv1alpha1.UpdateScatterStrategy, len(s)) + for i, term := range s { + v1alpha1Strategy[i] = workloadv1alpha1.UpdateScatterTerm{ + Key: term.Key, + Value: term.Value, + } + } + return &scatterSort{strategy: v1alpha1Strategy} +} + +// Sort helps scatter the indexes of pods by ScatterStrategy. +func (ss *scatterSort) Sort(pods []*v1.Pod, indexes []int) []int { + if len(ss.strategy) == 0 || len(indexes) <= 1 { + return indexes + } + + terms := ss.getScatterTerms(pods, indexes) + for _, term := range terms { + indexes = ss.scatterPodsByRule(term, pods, indexes) + } + return indexes +} + +// getScatterTerms returns all scatter terms in current sorting. It will sort all terms by sum of pods matched. +func (ss *scatterSort) getScatterTerms(pods []*v1.Pod, indexes []int) []workloadv1alpha1.UpdateScatterTerm { + if len(ss.strategy) == 1 { + return ss.strategy + } + + var termSlice []workloadv1alpha1.UpdateScatterTerm + ruleCounter := map[string]int{} + + termID := func(term workloadv1alpha1.UpdateScatterTerm) string { + return term.Key + ":" + term.Value + } + + for _, term := range ss.strategy { + for _, idx := range indexes { + if val, ok := pods[idx].Labels[term.Key]; ok && val == term.Value { + newTerm := workloadv1alpha1.UpdateScatterTerm{Key: term.Key, Value: val} + id := termID(newTerm) + if count, ok := ruleCounter[id]; !ok { + termSlice = append(termSlice, newTerm) + ruleCounter[id] = 1 + } else { + ruleCounter[id] = count + 1 + } + } + } + } + + sort.SliceStable(termSlice, func(i, j int) bool { + cI := ruleCounter[termID(termSlice[i])] + cJ := ruleCounter[termID(termSlice[j])] + if cI != cJ { + return cI > cJ + } + return i <= j + }) + + return termSlice +} + +// scatterPodsByRule scatters pods by given rule term. +func (ss *scatterSort) scatterPodsByRule(term workloadv1alpha1.UpdateScatterTerm, pods []*v1.Pod, indexes []int) (ret []int) { + + // 1. counts the total number of matched and unmatched pods; find matched and unmatched pods in indexes waiting to update + var matchedIndexes, unmatchedIndexes []int + var totalMatched, totalUnmatched int + + for _, i := range indexes { + if pods[i].Labels[term.Key] == term.Value { + matchedIndexes = append(matchedIndexes, i) + } else { + unmatchedIndexes = append(unmatchedIndexes, i) + } + } + + if len(matchedIndexes) <= 1 || len(unmatchedIndexes) <= 1 { + return indexes + } + + for i := 0; i < len(pods); i++ { + if pods[i] == nil { + continue + } + if pods[i].Labels[term.Key] == term.Value { + totalMatched++ + } else { + totalUnmatched++ + } + } + + // 2. keep the last matched one and append to the indexes returned + lastMatchedIndex := matchedIndexes[len(matchedIndexes)-1] + defer func() { + ret = append(ret, lastMatchedIndex) + }() + matchedIndexes = matchedIndexes[:len(matchedIndexes)-1] + totalMatched-- + + // 3. calculate group number and size that pods to update should be combined + group := calculateGroupByDensity(totalMatched, totalUnmatched, len(matchedIndexes), len(unmatchedIndexes)) + newIndexes := make([]int, 0, len(indexes)) + + if group.unmatchedRemainder > 0 { + newIndexes = append(newIndexes, unmatchedIndexes[:group.unmatchedRemainder]...) + unmatchedIndexes = unmatchedIndexes[group.unmatchedRemainder:] + } + + for i := 0; i < group.groupNum; i++ { + matchedIndexes, newIndexes = migrateItems(matchedIndexes, newIndexes, group.matchedGroupSize) + unmatchedIndexes, newIndexes = migrateItems(unmatchedIndexes, newIndexes, group.unmatchedGroupSize) + } + + if len(matchedIndexes) > 0 { + newIndexes = append(newIndexes, matchedIndexes...) + } + if len(unmatchedIndexes) > 0 { + newIndexes = append(newIndexes, unmatchedIndexes...) + } + + return newIndexes +} + +func migrateItems(src, dst []int, size int) ([]int, []int) { + if len(src) == 0 { + return src, dst + } else if len(src) < size { + return []int{}, append(dst, src...) + } + dst = append(dst, src[:size]...) + return src[size:], dst +} + +type scatterGroup struct { + groupNum int + matchedGroupSize int + unmatchedGroupSize int + unmatchedRemainder int +} + +func newScatterGroup(matched, unmatched int) scatterGroup { + sg := scatterGroup{} + if matched < unmatched { + sg.groupNum = matched + } else { + sg.groupNum = unmatched + } + + sg.matchedGroupSize = int(math.Round(float64(matched) / float64(sg.groupNum))) + sg.unmatchedGroupSize = int(math.Round(float64(unmatched) / float64(sg.groupNum))) + return sg +} + +func calculateGroupByDensity(totalMatched, totalUnmatched, updateMatched, updateUnmatched int) scatterGroup { + totalGroup := newScatterGroup(totalMatched, totalUnmatched) + updateGroup := newScatterGroup(updateMatched, updateUnmatched) + + if float32(totalUnmatched)/float32(totalMatched) >= float32(updateUnmatched)/float32(updateMatched) { + return updateGroup + } + + newGroup := scatterGroup{matchedGroupSize: totalGroup.matchedGroupSize, unmatchedGroupSize: totalGroup.unmatchedGroupSize} + if updateMatched/newGroup.matchedGroupSize < updateUnmatched/newGroup.unmatchedGroupSize { + newGroup.groupNum = updateMatched / newGroup.matchedGroupSize + } else { + newGroup.groupNum = updateUnmatched / newGroup.unmatchedGroupSize + } + newGroup.unmatchedRemainder = updateUnmatched - newGroup.groupNum*newGroup.unmatchedGroupSize + return newGroup +} diff --git a/pkg/controllers/workload/v1alpha1/utils/updatesort/sort.go b/pkg/controllers/workload/v1alpha1/utils/updatesort/sort.go new file mode 100644 index 00000000000..37c175dfd2a --- /dev/null +++ b/pkg/controllers/workload/v1alpha1/utils/updatesort/sort.go @@ -0,0 +1,23 @@ +/* +Copyright 2019 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package updatesort + +import v1 "k8s.io/api/core/v1" + +type Sorter interface { + Sort(pods []*v1.Pod, indexes []int) []int +} diff --git a/pkg/controllers/workload/v1alpha1/utils/util/cache.go b/pkg/controllers/workload/v1alpha1/utils/util/cache.go new file mode 100644 index 00000000000..14057d6e210 --- /dev/null +++ b/pkg/controllers/workload/v1alpha1/utils/util/cache.go @@ -0,0 +1,52 @@ +/* +Copyright 2021 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/cache" +) + +var controllerCacheSyncTimeout time.Duration + +func SetControllerCacheSyncTimeout(t time.Duration) { + controllerCacheSyncTimeout = t +} + +func GetControllerCacheSyncTimeout() time.Duration { + return controllerCacheSyncTimeout +} + +// GlobalCache using GVK/namespace/name as key +var GlobalCache = cache.NewStore(func(obj interface{}) (string, error) { + metaObj, ok := obj.(metav1.Object) + if !ok { + return "", fmt.Errorf("failed to convert obj to metav1.Object") + } + namespacedName := fmt.Sprintf("%s/%s", metaObj.GetNamespace(), metaObj.GetName()) + + runtimeObj, ok := obj.(runtime.Object) + if !ok { + return "", fmt.Errorf("failed to convert obj to runtime.Object") + } + key := fmt.Sprintf("%v/%s", runtimeObj.GetObjectKind().GroupVersionKind(), namespacedName) + return key, nil +}) diff --git a/pkg/controllers/workload/v1alpha1/utils/util/image.go b/pkg/controllers/workload/v1alpha1/utils/util/image.go new file mode 100644 index 00000000000..4cd211f14d8 --- /dev/null +++ b/pkg/controllers/workload/v1alpha1/utils/util/image.go @@ -0,0 +1,68 @@ +/* +Copyright 2020 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "strings" +) + +// IsImageDigest indicates whether image is digest format, +// for example: docker.io/busybox@sha256:a9286defaba7b3a519d585ba0e37d0b2cbee74ebfe590960b0b1d6a5e97d1e1d +func IsImageDigest(image string) bool { + return strings.Contains(image, "@") +} + +// IsContainerImageEqual indicates whether container images are equal. +// Supports digest-format and tag-format images. +func IsContainerImageEqual(image1, image2 string) bool { + if IsImageDigest(image1) && IsImageDigest(image2) { + // Compare repo + digest + repo1, digest1 := splitImageDigest(image1) + repo2, digest2 := splitImageDigest(image2) + return repo1 == repo2 && digest1 == digest2 + } + if !IsImageDigest(image1) && !IsImageDigest(image2) { + // Compare repo + tag + repo1, tag1 := splitImageTag(image1) + repo2, tag2 := splitImageTag(image2) + return repo1 == repo2 && tag1 == tag2 + } + return false +} + +// splitImageDigest splits an image string into repo and digest parts. +func splitImageDigest(image string) (repo, digest string) { + parts := strings.SplitN(image, "@", 2) + if len(parts) == 2 { + return parts[0], parts[1] + } + return image, "" +} + +// splitImageTag splits an image string into repo and tag parts. +func splitImageTag(image string) (repo, tag string) { + // Remove digest part if present + image = strings.SplitN(image, "@", 2)[0] + // Find the last colon that is a tag separator + // Handle cases like docker.io/library/ubuntu:20.04 + lastColon := strings.LastIndex(image, ":") + lastSlash := strings.LastIndex(image, "/") + if lastColon > lastSlash { + return image[:lastColon], image[lastColon+1:] + } + return image, "latest" +} diff --git a/pkg/controllers/workload/v1alpha1/utils/util/json.go b/pkg/controllers/workload/v1alpha1/utils/util/json.go new file mode 100644 index 00000000000..c83f1560da9 --- /dev/null +++ b/pkg/controllers/workload/v1alpha1/utils/util/json.go @@ -0,0 +1,50 @@ +/* +Copyright 2019 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "encoding/json" + "reflect" +) + +// DumpJSON returns the JSON encoding +func DumpJSON(o interface{}) string { + j, _ := json.Marshal(o) + return string(j) +} + +// IsJSONObjectEqual checks if two objects are equal after encoding json +func IsJSONObjectEqual(o1, o2 interface{}) bool { + if reflect.DeepEqual(o1, o2) { + return true + } + + oj1, _ := json.Marshal(o1) + oj2, _ := json.Marshal(o2) + os1 := string(oj1) + os2 := string(oj2) + if os1 == os2 { + return true + } + + om1 := make(map[string]interface{}) + om2 := make(map[string]interface{}) + _ = json.Unmarshal(oj1, &om1) + _ = json.Unmarshal(oj2, &om2) + + return reflect.DeepEqual(om1, om2) +} diff --git a/pkg/controllers/workload/v1alpha1/utils/util/pods.go b/pkg/controllers/workload/v1alpha1/utils/util/pods.go new file mode 100644 index 00000000000..ee71cebac11 --- /dev/null +++ b/pkg/controllers/workload/v1alpha1/utils/util/pods.go @@ -0,0 +1,447 @@ +/* +Copyright 2020 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + "strconv" + "strings" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog/v2" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" + + workloadv1alpha1 "github.com/fluid-cloudnative/fluid/api/workload/v1alpha1" +) + +// GetPodNames returns names of the given Pods array +func GetPodNames(pods []*v1.Pod) sets.String { + set := sets.NewString() + for _, pod := range pods { + set.Insert(pod.Name) + } + return set +} + +// MergePods merges two pods arrays +func MergePods(pods1, pods2 []*v1.Pod) []*v1.Pod { + var ret []*v1.Pod + names := sets.NewString() + + for _, pod := range pods1 { + if !names.Has(pod.Name) { + ret = append(ret, pod) + names.Insert(pod.Name) + } + } + for _, pod := range pods2 { + if !names.Has(pod.Name) { + ret = append(ret, pod) + names.Insert(pod.Name) + } + } + return ret +} + +// DiffPods returns pods in pods1 but not in pods2 +func DiffPods(pods1, pods2 []*v1.Pod) (ret []*v1.Pod) { + names2 := sets.NewString() + for _, pod := range pods2 { + names2.Insert(pod.Name) + } + for _, pod := range pods1 { + if names2.Has(pod.Name) { + continue + } + ret = append(ret, pod) + } + return +} + +func MergeVolumeMounts(container v1.Container, additional []v1.VolumeMount) []v1.VolumeMount { + mountPoints := sets.NewString() + var original []v1.VolumeMount + for _, mount := range container.VolumeMounts { + mountPoints.Insert(mount.MountPath) + original = append(original, mount) + } + for _, mount := range container.VolumeDevices { + mountPoints.Insert(mount.DevicePath) + } + + for _, mount := range additional { + if mountPoints.Has(mount.MountPath) { + continue + } + original = append(original, mount) + mountPoints.Insert(mount.MountPath) + } + return original +} + +func MergeVolumeDevices(container v1.Container, additional []v1.VolumeDevice) []v1.VolumeDevice { + mountPoints := sets.NewString() + var original []v1.VolumeDevice + for _, mount := range container.VolumeDevices { + mountPoints.Insert(mount.DevicePath) + original = append(original, mount) + } + for _, mount := range container.VolumeMounts { + mountPoints.Insert(mount.MountPath) + } + + for _, mount := range additional { + if mountPoints.Has(mount.DevicePath) { + continue + } + original = append(original, mount) + mountPoints.Insert(mount.DevicePath) + } + return original +} + +func MergeEnvVar(original []v1.EnvVar, additional []v1.EnvVar) []v1.EnvVar { + exists := sets.NewString() + for _, env := range original { + exists.Insert(env.Name) + } + + for _, env := range additional { + if exists.Has(env.Name) { + continue + } + original = append(original, env) + exists.Insert(env.Name) + } + + return original +} + +func MergeVolumes(original []v1.Volume, additional []v1.Volume) []v1.Volume { + exists := sets.NewString() + for _, volume := range original { + exists.Insert(volume.Name) + } + + for _, volume := range additional { + if exists.Has(volume.Name) { + continue + } + original = append(original, volume) + exists.Insert(volume.Name) + } + + return original +} + +func GetContainerEnvVar(container *v1.Container, key string) *v1.EnvVar { + if container == nil { + return nil + } + for i, e := range container.Env { + if e.Name == key { + return &container.Env[i] + } + } + return nil +} + +func GetContainerEnvValue(container *v1.Container, key string) string { + if container == nil { + return "" + } + for i, e := range container.Env { + if e.Name == key { + return container.Env[i].Value + } + } + return "" +} + +func GetContainerVolumeMount(container *v1.Container, key string) *v1.VolumeMount { + if container == nil { + return nil + } + for i, m := range container.VolumeMounts { + if m.MountPath == key { + return &container.VolumeMounts[i] + } + } + return nil +} + +func GetContainer(name string, pod *v1.Pod) *v1.Container { + if pod == nil { + return nil + } + for i := range pod.Spec.InitContainers { + v := &pod.Spec.InitContainers[i] + if v.Name == name { + return v + } + } + + for i := range pod.Spec.Containers { + v := &pod.Spec.Containers[i] + if v.Name == name { + return v + } + } + return nil +} + +func GetContainerStatus(name string, pod *v1.Pod) *v1.ContainerStatus { + if pod == nil { + return nil + } + for i := range pod.Status.ContainerStatuses { + v := &pod.Status.ContainerStatuses[i] + if v.Name == name { + return v + } + } + return nil +} + +func GetPodVolume(pod *v1.Pod, volumeName string) *v1.Volume { + for idx, v := range pod.Spec.Volumes { + if v.Name == volumeName { + return &pod.Spec.Volumes[idx] + } + } + return nil +} + +func IsRunningAndReady(pod *v1.Pod) bool { + return pod.Status.Phase == v1.PodRunning && podutil.IsPodReady(pod) && pod.DeletionTimestamp.IsZero() +} + +func GetPodContainerImageIDs(pod *v1.Pod) map[string]string { + cImageIDs := make(map[string]string, len(pod.Status.ContainerStatuses)) + for i := range pod.Status.ContainerStatuses { + c := &pod.Status.ContainerStatuses[i] + //ImageID format: docker-pullable://busybox@sha256:a9286defaba7b3a519d585ba0e37d0b2cbee74ebfe590960b0b1d6a5e97d1e1d + imageID := c.ImageID + if strings.Contains(imageID, "://") { + imageID = strings.Split(imageID, "://")[1] + } + cImageIDs[c.Name] = imageID + } + return cImageIDs +} + +func IsPodContainerDigestEqual(containers sets.String, pod *v1.Pod) bool { + cImageIDs := GetPodContainerImageIDs(pod) + + for _, container := range pod.Spec.Containers { + if !containers.Has(container.Name) { + continue + } + // image must be digest format + if !IsImageDigest(container.Image) { + return false + } + imageID, ok := cImageIDs[container.Name] + if !ok { + return false + } + if !IsContainerImageEqual(container.Image, imageID) { + return false + } + } + return true +} + +func MergeVolumeMountsInContainer(origin *v1.Container, other v1.Container) { + mountExist := make(map[string]bool) + for _, volume := range origin.VolumeMounts { + mountExist[volume.MountPath] = true + + } + + for _, volume := range other.VolumeMounts { + if mountExist[volume.MountPath] { + continue + } + + origin.VolumeMounts = append(origin.VolumeMounts, volume) + } +} + +func IsPodOwnedByKruise(pod *v1.Pod) bool { + ownerRef := metav1.GetControllerOf(pod) + if ownerRef == nil { + return false + } + gv, _ := schema.ParseGroupVersion(ownerRef.APIVersion) + return gv.Group == workloadv1alpha1.GroupVersion.Group +} + +func InjectReadinessGateToPod(pod *v1.Pod, conditionType v1.PodConditionType) { + for _, g := range pod.Spec.ReadinessGates { + if g.ConditionType == conditionType { + return + } + } + pod.Spec.ReadinessGates = append(pod.Spec.ReadinessGates, v1.PodReadinessGate{ConditionType: conditionType}) +} + +func ContainsObjectRef(slice []v1.ObjectReference, obj v1.ObjectReference) bool { + for _, o := range slice { + if o.UID == obj.UID { + return true + } + } + return false +} + +func GetCondition(pod *v1.Pod, cType v1.PodConditionType) *v1.PodCondition { + if pod == nil { + return nil + } + for _, c := range pod.Status.Conditions { + if c.Type == cType { + return &c + } + } + return nil +} + +func SetPodCondition(pod *v1.Pod, condition v1.PodCondition) { + for i, c := range pod.Status.Conditions { + if c.Type == condition.Type { + if c.Status != condition.Status { + pod.Status.Conditions[i] = condition + } + return + } + } + pod.Status.Conditions = append(pod.Status.Conditions, condition) +} + +func SetPodConditionIfMsgChanged(pod *v1.Pod, condition v1.PodCondition) { + for i, c := range pod.Status.Conditions { + if c.Type == condition.Type { + if c.Status != condition.Status || c.Message != condition.Message { + pod.Status.Conditions[i] = condition + } + return + } + } + pod.Status.Conditions = append(pod.Status.Conditions, condition) +} + +func SetPodReadyCondition(pod *v1.Pod) { + podReady := GetCondition(pod, v1.PodReady) + if podReady == nil { + return + } + + containersReady := GetCondition(pod, v1.ContainersReady) + if containersReady == nil || containersReady.Status != v1.ConditionTrue { + return + } + + var unreadyMessages []string + for _, rg := range pod.Spec.ReadinessGates { + c := GetCondition(pod, rg.ConditionType) + if c == nil { + unreadyMessages = append(unreadyMessages, fmt.Sprintf("corresponding condition of pod readiness gate %q does not exist.", string(rg.ConditionType))) + } else if c.Status != v1.ConditionTrue { + unreadyMessages = append(unreadyMessages, fmt.Sprintf("the status of pod readiness gate %q is not \"True\", but %v", string(rg.ConditionType), c.Status)) + } + } + + newPodReady := v1.PodCondition{ + Type: v1.PodReady, + Status: v1.ConditionTrue, + LastTransitionTime: metav1.Now(), + } + // Set "Ready" condition to "False" if any readiness gate is not ready. + if len(unreadyMessages) != 0 { + unreadyMessage := strings.Join(unreadyMessages, ", ") + newPodReady = v1.PodCondition{ + Type: v1.PodReady, + Status: v1.ConditionFalse, + Reason: "ReadinessGatesNotReady", + Message: unreadyMessage, + } + } + + SetPodCondition(pod, newPodReady) +} + +func ExtractPort(param intstr.IntOrString, container v1.Container) (int, error) { + port := -1 + var err error + switch param.Type { + case intstr.Int: + port = param.IntValue() + case intstr.String: + if port, err = findPortByName(container, param.StrVal); err != nil { + // Last ditch effort - maybe it was an int stored as string? + klog.ErrorS(err, "failed to find port by name") + if port, err = strconv.Atoi(param.StrVal); err != nil { + return port, err + } + } + default: + return port, fmt.Errorf("intOrString had no kind: %+v", param) + } + if port > 0 && port < 65536 { + return port, nil + } + return port, fmt.Errorf("invalid port number: %v", port) +} + +// findPortByName is a helper function to look up a port in a container by name. +func findPortByName(container v1.Container, portName string) (int, error) { + for _, port := range container.Ports { + if port.Name == portName { + return int(port.ContainerPort), nil + } + } + return 0, fmt.Errorf("port %s not found", portName) +} + +func GetPodContainerByName(cName string, pod *v1.Pod) *v1.Container { + for _, container := range pod.Spec.Containers { + if cName == container.Name { + return &container + } + } + + return nil +} + +// IsRestartableInitContainer returns true if the initContainer has +// ContainerRestartPolicyAlways. +func IsRestartableInitContainer(initContainer *v1.Container) bool { + if initContainer.RestartPolicy == nil { + return false + } + + return *initContainer.RestartPolicy == v1.ContainerRestartPolicyAlways +} + +func HasPodScheduled(pod *v1.Pod) bool { + return len(pod.Spec.NodeName) > 0 +} diff --git a/pkg/controllers/workload/v1alpha1/utils/util/selector.go b/pkg/controllers/workload/v1alpha1/utils/util/selector.go new file mode 100644 index 00000000000..03e674e2c71 --- /dev/null +++ b/pkg/controllers/workload/v1alpha1/utils/util/selector.go @@ -0,0 +1,279 @@ +/* +Copyright 2020 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + "reflect" + "unsafe" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/kubernetes/pkg/util/slice" +) + +// IsSelectorOverlapping indicates whether selector overlaps, the criteria: +// if exist one same key has different value and not overlap, then it is judged non-overlap, for examples: +// - a=b and a=c +// - a in [b,c] and a not in [b,c...] +// - a not in [b] and a not exist +// - a=b,c=d,e=f and a=x,c=d,e=f +// +// then others is overlap: +// - a=b and c=d +func IsSelectorOverlapping(selector1, selector2 *metav1.LabelSelector) bool { + return !(isDisjoint(selector1, selector2) || isDisjoint(selector2, selector1)) +} + +func isDisjoint(selector1, selector2 *metav1.LabelSelector) bool { + // label -> values + // a=b convert to a -> [b] + // a in [b,c] convert to a -> [b,c] + // a exist convert to a -> [ALL] + matchedLabels1 := make(map[string][]string) + for key, value := range selector1.MatchLabels { + matchedLabels1[key] = []string{value} + } + for _, req := range selector1.MatchExpressions { + switch req.Operator { + case metav1.LabelSelectorOpIn: + for _, value := range req.Values { + matchedLabels1[req.Key] = append(matchedLabels1[req.Key], value) + } + case metav1.LabelSelectorOpExists: + matchedLabels1[req.Key] = []string{"ALL"} + } + } + + for key, value := range selector2.MatchLabels { + values, ok := matchedLabels1[key] + if ok { + if !slice.ContainsString(values, "ALL", nil) && !slice.ContainsString(values, value, nil) { + return true + } + } + } + for _, req := range selector2.MatchExpressions { + values, ok := matchedLabels1[req.Key] + + switch req.Operator { + case metav1.LabelSelectorOpIn: + if ok && !slice.ContainsString(values, "ALL", nil) && !sliceOverlaps(values, req.Values) { + return true + } + case metav1.LabelSelectorOpNotIn: + if ok && sliceContains(req.Values, values) { + return true + } + case metav1.LabelSelectorOpExists: + if !ok { + return true + } + case metav1.LabelSelectorOpDoesNotExist: + if ok { + return true + } + } + } + + return false +} + +func sliceOverlaps(a, b []string) bool { + keyExist := make(map[string]bool, len(a)) + for _, key := range a { + keyExist[key] = true + } + for _, key := range b { + if keyExist[key] { + return true + } + } + return false +} + +// a contains b +func sliceContains(a, b []string) bool { + keyExist := make(map[string]bool, len(a)) + for _, key := range a { + keyExist[key] = true + } + for _, key := range b { + if !keyExist[key] { + return false + } + } + return true +} + +// ValidatedLabelSelectorAsSelector is faster than native `metav1.LabelSelectorAsSelector` for the newRequirement function +// performs no validation. MAKE SURE the `ps` param is validated with `metav1.LabelSelectorAsSelector` before. +func ValidatedLabelSelectorAsSelector(ps *metav1.LabelSelector) (labels.Selector, error) { + if ps == nil { + return labels.Nothing(), nil + } + if len(ps.MatchLabels)+len(ps.MatchExpressions) == 0 { + return labels.Everything(), nil + } + + selector := labels.NewSelector() + for k, v := range ps.MatchLabels { + r, err := newRequirement(k, selection.Equals, []string{v}) + if err != nil { + return nil, err + } + selector = selector.Add(*r) + } + for _, expr := range ps.MatchExpressions { + var op selection.Operator + switch expr.Operator { + case metav1.LabelSelectorOpIn: + op = selection.In + case metav1.LabelSelectorOpNotIn: + op = selection.NotIn + case metav1.LabelSelectorOpExists: + op = selection.Exists + case metav1.LabelSelectorOpDoesNotExist: + op = selection.DoesNotExist + default: + return nil, fmt.Errorf("%q is not a valid pod selector operator", expr.Operator) + } + r, err := newRequirement(expr.Key, op, append([]string(nil), expr.Values...)) + if err != nil { + return nil, err + } + selector = selector.Add(*r) + } + return selector, nil +} + +func newRequirement(key string, op selection.Operator, vals []string) (*labels.Requirement, error) { + sel := &labels.Requirement{} + selVal := reflect.ValueOf(sel) + val := reflect.Indirect(selVal) + + keyField := val.FieldByName("key") + keyFieldPtr := (*string)(unsafe.Pointer(keyField.UnsafeAddr())) + *keyFieldPtr = key + + opField := val.FieldByName("operator") + opFieldPtr := (*selection.Operator)(unsafe.Pointer(opField.UnsafeAddr())) + *opFieldPtr = op + + if len(vals) > 0 { + valuesField := val.FieldByName("strValues") + valuesFieldPtr := (*[]string)(unsafe.Pointer(valuesField.UnsafeAddr())) + *valuesFieldPtr = vals + } + + return sel, nil +} + +// IsSelectorLooseOverlap indicates whether selectors overlap (indicates that selector1, selector2 have same key, and there is a certain intersection) +// 1. when selector1、selector2 don't have same key, it is considered non-overlap, e.g. selector1(a=b) and selector2(c=d) +// 2. when selector1、selector2 have same key, and matchLabels & matchExps are intersection, it is considered overlap. +// For examples: +// +// a In [b,c] And a Exist +// a In [b,...] [c,...] [Include any b,c,...] +// a NotIn [a,...] [b,....] [c,....] [All other cases are allowed except for the inclusion of both b,c...] [b,c,e] +// a Exist And a Exist +// a In [x,y,Any,...] +// a NotIn [a,b,Any...] +// a NotIn [b,c] And a Exist +// a NotExist +// a NotIn [a,b,Any...] +// a In [a,b] [a,c] [e,f] [Any,...] other than [b],[c],[b,c] +// a NotExist And a NotExist +// a NotIn [Any,...] +// When selector1 and selector2 contain the same key, except for the above case, they are considered non-overlap +func IsSelectorLooseOverlap(selector1, selector2 *metav1.LabelSelector) bool { + matchExp1 := convertSelectorToMatchExpressions(selector1) + matchExp2 := convertSelectorToMatchExpressions(selector2) + + for k, exp1 := range matchExp1 { + exp2, ok := matchExp2[k] + if !ok { + return false + } + + if !isMatchExpOverlap(exp1, exp2) { + return false + } + } + + for k, exp2 := range matchExp2 { + exp1, ok := matchExp1[k] + if !ok { + return false + } + + if !isMatchExpOverlap(exp2, exp1) { + return false + } + } + + return true +} + +func isMatchExpOverlap(matchExp1, matchExp2 metav1.LabelSelectorRequirement) bool { + switch matchExp1.Operator { + case metav1.LabelSelectorOpIn: + if matchExp2.Operator == metav1.LabelSelectorOpExists { + return true + } else if matchExp2.Operator == metav1.LabelSelectorOpIn && sliceOverlaps(matchExp2.Values, matchExp1.Values) { + return true + } else if matchExp2.Operator == metav1.LabelSelectorOpNotIn && !sliceContains(matchExp2.Values, matchExp1.Values) { + return true + } + case metav1.LabelSelectorOpExists: + if matchExp2.Operator == metav1.LabelSelectorOpIn || matchExp2.Operator == metav1.LabelSelectorOpNotIn || + matchExp2.Operator == metav1.LabelSelectorOpExists { + return true + } + case metav1.LabelSelectorOpNotIn: + if matchExp2.Operator == metav1.LabelSelectorOpExists || matchExp2.Operator == metav1.LabelSelectorOpDoesNotExist || + matchExp2.Operator == metav1.LabelSelectorOpNotIn { + return true + } else if matchExp2.Operator == metav1.LabelSelectorOpIn && !sliceContains(matchExp1.Values, matchExp2.Values) { + return true + } + case metav1.LabelSelectorOpDoesNotExist: + if matchExp2.Operator == metav1.LabelSelectorOpDoesNotExist || matchExp2.Operator == metav1.LabelSelectorOpNotIn { + return true + } + } + + return false +} + +func convertSelectorToMatchExpressions(selector *metav1.LabelSelector) map[string]metav1.LabelSelectorRequirement { + matchExps := map[string]metav1.LabelSelectorRequirement{} + for _, exp := range selector.MatchExpressions { + matchExps[exp.Key] = exp + } + + for k, v := range selector.MatchLabels { + matchExps[k] = metav1.LabelSelectorRequirement{ + Operator: metav1.LabelSelectorOpIn, + Values: []string{v}, + } + } + + return matchExps +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/componentstatus.go b/vendor/k8s.io/client-go/informers/core/v1/componentstatus.go new file mode 100644 index 00000000000..ccdee535bc2 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/componentstatus.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" +) + +// ComponentStatusInformer provides access to a shared informer and lister for +// ComponentStatuses. +type ComponentStatusInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ComponentStatusLister +} + +type componentStatusInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewComponentStatusInformer constructs a new informer for ComponentStatus type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewComponentStatusInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredComponentStatusInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredComponentStatusInformer constructs a new informer for ComponentStatus type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredComponentStatusInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().ComponentStatuses().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().ComponentStatuses().Watch(context.TODO(), options) + }, + }, + &corev1.ComponentStatus{}, + resyncPeriod, + indexers, + ) +} + +func (f *componentStatusInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredComponentStatusInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *componentStatusInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&corev1.ComponentStatus{}, f.defaultInformer) +} + +func (f *componentStatusInformer) Lister() v1.ComponentStatusLister { + return v1.NewComponentStatusLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/configmap.go b/vendor/k8s.io/client-go/informers/core/v1/configmap.go new file mode 100644 index 00000000000..6253581784e --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/configmap.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" +) + +// ConfigMapInformer provides access to a shared informer and lister for +// ConfigMaps. +type ConfigMapInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ConfigMapLister +} + +type configMapInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewConfigMapInformer constructs a new informer for ConfigMap type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewConfigMapInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredConfigMapInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredConfigMapInformer constructs a new informer for ConfigMap type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredConfigMapInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().ConfigMaps(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().ConfigMaps(namespace).Watch(context.TODO(), options) + }, + }, + &corev1.ConfigMap{}, + resyncPeriod, + indexers, + ) +} + +func (f *configMapInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredConfigMapInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *configMapInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&corev1.ConfigMap{}, f.defaultInformer) +} + +func (f *configMapInformer) Lister() v1.ConfigMapLister { + return v1.NewConfigMapLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/endpoints.go b/vendor/k8s.io/client-go/informers/core/v1/endpoints.go new file mode 100644 index 00000000000..cd0f25b7f70 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/endpoints.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" +) + +// EndpointsInformer provides access to a shared informer and lister for +// Endpoints. +type EndpointsInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.EndpointsLister +} + +type endpointsInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewEndpointsInformer constructs a new informer for Endpoints type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewEndpointsInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredEndpointsInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredEndpointsInformer constructs a new informer for Endpoints type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredEndpointsInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().Endpoints(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().Endpoints(namespace).Watch(context.TODO(), options) + }, + }, + &corev1.Endpoints{}, + resyncPeriod, + indexers, + ) +} + +func (f *endpointsInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredEndpointsInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *endpointsInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&corev1.Endpoints{}, f.defaultInformer) +} + +func (f *endpointsInformer) Lister() v1.EndpointsLister { + return v1.NewEndpointsLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/event.go b/vendor/k8s.io/client-go/informers/core/v1/event.go new file mode 100644 index 00000000000..8825e9b7a49 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/event.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" +) + +// EventInformer provides access to a shared informer and lister for +// Events. +type EventInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.EventLister +} + +type eventInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewEventInformer constructs a new informer for Event type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewEventInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredEventInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredEventInformer constructs a new informer for Event type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredEventInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().Events(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().Events(namespace).Watch(context.TODO(), options) + }, + }, + &corev1.Event{}, + resyncPeriod, + indexers, + ) +} + +func (f *eventInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredEventInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *eventInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&corev1.Event{}, f.defaultInformer) +} + +func (f *eventInformer) Lister() v1.EventLister { + return v1.NewEventLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/interface.go b/vendor/k8s.io/client-go/informers/core/v1/interface.go new file mode 100644 index 00000000000..b2216a05c88 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/interface.go @@ -0,0 +1,150 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // ComponentStatuses returns a ComponentStatusInformer. + ComponentStatuses() ComponentStatusInformer + // ConfigMaps returns a ConfigMapInformer. + ConfigMaps() ConfigMapInformer + // Endpoints returns a EndpointsInformer. + Endpoints() EndpointsInformer + // Events returns a EventInformer. + Events() EventInformer + // LimitRanges returns a LimitRangeInformer. + LimitRanges() LimitRangeInformer + // Namespaces returns a NamespaceInformer. + Namespaces() NamespaceInformer + // Nodes returns a NodeInformer. + Nodes() NodeInformer + // PersistentVolumes returns a PersistentVolumeInformer. + PersistentVolumes() PersistentVolumeInformer + // PersistentVolumeClaims returns a PersistentVolumeClaimInformer. + PersistentVolumeClaims() PersistentVolumeClaimInformer + // Pods returns a PodInformer. + Pods() PodInformer + // PodTemplates returns a PodTemplateInformer. + PodTemplates() PodTemplateInformer + // ReplicationControllers returns a ReplicationControllerInformer. + ReplicationControllers() ReplicationControllerInformer + // ResourceQuotas returns a ResourceQuotaInformer. + ResourceQuotas() ResourceQuotaInformer + // Secrets returns a SecretInformer. + Secrets() SecretInformer + // Services returns a ServiceInformer. + Services() ServiceInformer + // ServiceAccounts returns a ServiceAccountInformer. + ServiceAccounts() ServiceAccountInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// ComponentStatuses returns a ComponentStatusInformer. +func (v *version) ComponentStatuses() ComponentStatusInformer { + return &componentStatusInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// ConfigMaps returns a ConfigMapInformer. +func (v *version) ConfigMaps() ConfigMapInformer { + return &configMapInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Endpoints returns a EndpointsInformer. +func (v *version) Endpoints() EndpointsInformer { + return &endpointsInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Events returns a EventInformer. +func (v *version) Events() EventInformer { + return &eventInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// LimitRanges returns a LimitRangeInformer. +func (v *version) LimitRanges() LimitRangeInformer { + return &limitRangeInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Namespaces returns a NamespaceInformer. +func (v *version) Namespaces() NamespaceInformer { + return &namespaceInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// Nodes returns a NodeInformer. +func (v *version) Nodes() NodeInformer { + return &nodeInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// PersistentVolumes returns a PersistentVolumeInformer. +func (v *version) PersistentVolumes() PersistentVolumeInformer { + return &persistentVolumeInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// PersistentVolumeClaims returns a PersistentVolumeClaimInformer. +func (v *version) PersistentVolumeClaims() PersistentVolumeClaimInformer { + return &persistentVolumeClaimInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Pods returns a PodInformer. +func (v *version) Pods() PodInformer { + return &podInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// PodTemplates returns a PodTemplateInformer. +func (v *version) PodTemplates() PodTemplateInformer { + return &podTemplateInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// ReplicationControllers returns a ReplicationControllerInformer. +func (v *version) ReplicationControllers() ReplicationControllerInformer { + return &replicationControllerInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// ResourceQuotas returns a ResourceQuotaInformer. +func (v *version) ResourceQuotas() ResourceQuotaInformer { + return &resourceQuotaInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Secrets returns a SecretInformer. +func (v *version) Secrets() SecretInformer { + return &secretInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Services returns a ServiceInformer. +func (v *version) Services() ServiceInformer { + return &serviceInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// ServiceAccounts returns a ServiceAccountInformer. +func (v *version) ServiceAccounts() ServiceAccountInformer { + return &serviceAccountInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/limitrange.go b/vendor/k8s.io/client-go/informers/core/v1/limitrange.go new file mode 100644 index 00000000000..4cbfda1f7a6 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/limitrange.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" +) + +// LimitRangeInformer provides access to a shared informer and lister for +// LimitRanges. +type LimitRangeInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.LimitRangeLister +} + +type limitRangeInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewLimitRangeInformer constructs a new informer for LimitRange type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewLimitRangeInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredLimitRangeInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredLimitRangeInformer constructs a new informer for LimitRange type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredLimitRangeInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().LimitRanges(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().LimitRanges(namespace).Watch(context.TODO(), options) + }, + }, + &corev1.LimitRange{}, + resyncPeriod, + indexers, + ) +} + +func (f *limitRangeInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredLimitRangeInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *limitRangeInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&corev1.LimitRange{}, f.defaultInformer) +} + +func (f *limitRangeInformer) Lister() v1.LimitRangeLister { + return v1.NewLimitRangeLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/namespace.go b/vendor/k8s.io/client-go/informers/core/v1/namespace.go new file mode 100644 index 00000000000..506f930a7d5 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/namespace.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" +) + +// NamespaceInformer provides access to a shared informer and lister for +// Namespaces. +type NamespaceInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.NamespaceLister +} + +type namespaceInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewNamespaceInformer constructs a new informer for Namespace type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewNamespaceInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredNamespaceInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredNamespaceInformer constructs a new informer for Namespace type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredNamespaceInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().Namespaces().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().Namespaces().Watch(context.TODO(), options) + }, + }, + &corev1.Namespace{}, + resyncPeriod, + indexers, + ) +} + +func (f *namespaceInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredNamespaceInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *namespaceInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&corev1.Namespace{}, f.defaultInformer) +} + +func (f *namespaceInformer) Lister() v1.NamespaceLister { + return v1.NewNamespaceLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/node.go b/vendor/k8s.io/client-go/informers/core/v1/node.go new file mode 100644 index 00000000000..9939fc2cb6c --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/node.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" +) + +// NodeInformer provides access to a shared informer and lister for +// Nodes. +type NodeInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.NodeLister +} + +type nodeInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewNodeInformer constructs a new informer for Node type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewNodeInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredNodeInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredNodeInformer constructs a new informer for Node type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredNodeInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().Nodes().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().Nodes().Watch(context.TODO(), options) + }, + }, + &corev1.Node{}, + resyncPeriod, + indexers, + ) +} + +func (f *nodeInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredNodeInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *nodeInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&corev1.Node{}, f.defaultInformer) +} + +func (f *nodeInformer) Lister() v1.NodeLister { + return v1.NewNodeLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/persistentvolume.go b/vendor/k8s.io/client-go/informers/core/v1/persistentvolume.go new file mode 100644 index 00000000000..c82445997ca --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/persistentvolume.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" +) + +// PersistentVolumeInformer provides access to a shared informer and lister for +// PersistentVolumes. +type PersistentVolumeInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.PersistentVolumeLister +} + +type persistentVolumeInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewPersistentVolumeInformer constructs a new informer for PersistentVolume type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPersistentVolumeInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPersistentVolumeInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredPersistentVolumeInformer constructs a new informer for PersistentVolume type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPersistentVolumeInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().PersistentVolumes().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().PersistentVolumes().Watch(context.TODO(), options) + }, + }, + &corev1.PersistentVolume{}, + resyncPeriod, + indexers, + ) +} + +func (f *persistentVolumeInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPersistentVolumeInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *persistentVolumeInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&corev1.PersistentVolume{}, f.defaultInformer) +} + +func (f *persistentVolumeInformer) Lister() v1.PersistentVolumeLister { + return v1.NewPersistentVolumeLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go b/vendor/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go new file mode 100644 index 00000000000..7a7df1cff81 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" +) + +// PersistentVolumeClaimInformer provides access to a shared informer and lister for +// PersistentVolumeClaims. +type PersistentVolumeClaimInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.PersistentVolumeClaimLister +} + +type persistentVolumeClaimInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewPersistentVolumeClaimInformer constructs a new informer for PersistentVolumeClaim type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPersistentVolumeClaimInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPersistentVolumeClaimInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredPersistentVolumeClaimInformer constructs a new informer for PersistentVolumeClaim type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPersistentVolumeClaimInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().PersistentVolumeClaims(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().PersistentVolumeClaims(namespace).Watch(context.TODO(), options) + }, + }, + &corev1.PersistentVolumeClaim{}, + resyncPeriod, + indexers, + ) +} + +func (f *persistentVolumeClaimInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPersistentVolumeClaimInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *persistentVolumeClaimInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&corev1.PersistentVolumeClaim{}, f.defaultInformer) +} + +func (f *persistentVolumeClaimInformer) Lister() v1.PersistentVolumeClaimLister { + return v1.NewPersistentVolumeClaimLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/pod.go b/vendor/k8s.io/client-go/informers/core/v1/pod.go new file mode 100644 index 00000000000..5c713a9b6f4 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/pod.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" +) + +// PodInformer provides access to a shared informer and lister for +// Pods. +type PodInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.PodLister +} + +type podInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewPodInformer constructs a new informer for Pod type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPodInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPodInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredPodInformer constructs a new informer for Pod type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPodInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().Pods(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().Pods(namespace).Watch(context.TODO(), options) + }, + }, + &corev1.Pod{}, + resyncPeriod, + indexers, + ) +} + +func (f *podInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPodInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *podInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&corev1.Pod{}, f.defaultInformer) +} + +func (f *podInformer) Lister() v1.PodLister { + return v1.NewPodLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/podtemplate.go b/vendor/k8s.io/client-go/informers/core/v1/podtemplate.go new file mode 100644 index 00000000000..2a16e910dbe --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/podtemplate.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" +) + +// PodTemplateInformer provides access to a shared informer and lister for +// PodTemplates. +type PodTemplateInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.PodTemplateLister +} + +type podTemplateInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewPodTemplateInformer constructs a new informer for PodTemplate type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPodTemplateInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPodTemplateInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredPodTemplateInformer constructs a new informer for PodTemplate type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPodTemplateInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().PodTemplates(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().PodTemplates(namespace).Watch(context.TODO(), options) + }, + }, + &corev1.PodTemplate{}, + resyncPeriod, + indexers, + ) +} + +func (f *podTemplateInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPodTemplateInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *podTemplateInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&corev1.PodTemplate{}, f.defaultInformer) +} + +func (f *podTemplateInformer) Lister() v1.PodTemplateLister { + return v1.NewPodTemplateLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/replicationcontroller.go b/vendor/k8s.io/client-go/informers/core/v1/replicationcontroller.go new file mode 100644 index 00000000000..930beb4cd5f --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/replicationcontroller.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" +) + +// ReplicationControllerInformer provides access to a shared informer and lister for +// ReplicationControllers. +type ReplicationControllerInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ReplicationControllerLister +} + +type replicationControllerInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewReplicationControllerInformer constructs a new informer for ReplicationController type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewReplicationControllerInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredReplicationControllerInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredReplicationControllerInformer constructs a new informer for ReplicationController type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredReplicationControllerInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().ReplicationControllers(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().ReplicationControllers(namespace).Watch(context.TODO(), options) + }, + }, + &corev1.ReplicationController{}, + resyncPeriod, + indexers, + ) +} + +func (f *replicationControllerInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredReplicationControllerInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *replicationControllerInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&corev1.ReplicationController{}, f.defaultInformer) +} + +func (f *replicationControllerInformer) Lister() v1.ReplicationControllerLister { + return v1.NewReplicationControllerLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/resourcequota.go b/vendor/k8s.io/client-go/informers/core/v1/resourcequota.go new file mode 100644 index 00000000000..619262a6128 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/resourcequota.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" +) + +// ResourceQuotaInformer provides access to a shared informer and lister for +// ResourceQuotas. +type ResourceQuotaInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ResourceQuotaLister +} + +type resourceQuotaInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewResourceQuotaInformer constructs a new informer for ResourceQuota type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewResourceQuotaInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredResourceQuotaInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredResourceQuotaInformer constructs a new informer for ResourceQuota type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredResourceQuotaInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().ResourceQuotas(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().ResourceQuotas(namespace).Watch(context.TODO(), options) + }, + }, + &corev1.ResourceQuota{}, + resyncPeriod, + indexers, + ) +} + +func (f *resourceQuotaInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredResourceQuotaInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *resourceQuotaInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&corev1.ResourceQuota{}, f.defaultInformer) +} + +func (f *resourceQuotaInformer) Lister() v1.ResourceQuotaLister { + return v1.NewResourceQuotaLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/secret.go b/vendor/k8s.io/client-go/informers/core/v1/secret.go new file mode 100644 index 00000000000..a6be0706930 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/secret.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" +) + +// SecretInformer provides access to a shared informer and lister for +// Secrets. +type SecretInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.SecretLister +} + +type secretInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewSecretInformer constructs a new informer for Secret type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewSecretInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredSecretInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredSecretInformer constructs a new informer for Secret type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredSecretInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().Secrets(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().Secrets(namespace).Watch(context.TODO(), options) + }, + }, + &corev1.Secret{}, + resyncPeriod, + indexers, + ) +} + +func (f *secretInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredSecretInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *secretInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&corev1.Secret{}, f.defaultInformer) +} + +func (f *secretInformer) Lister() v1.SecretLister { + return v1.NewSecretLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/service.go b/vendor/k8s.io/client-go/informers/core/v1/service.go new file mode 100644 index 00000000000..3d9ecc6e954 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/service.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" +) + +// ServiceInformer provides access to a shared informer and lister for +// Services. +type ServiceInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ServiceLister +} + +type serviceInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewServiceInformer constructs a new informer for Service type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewServiceInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredServiceInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredServiceInformer constructs a new informer for Service type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredServiceInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().Services(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().Services(namespace).Watch(context.TODO(), options) + }, + }, + &corev1.Service{}, + resyncPeriod, + indexers, + ) +} + +func (f *serviceInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredServiceInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *serviceInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&corev1.Service{}, f.defaultInformer) +} + +func (f *serviceInformer) Lister() v1.ServiceLister { + return v1.NewServiceLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/serviceaccount.go b/vendor/k8s.io/client-go/informers/core/v1/serviceaccount.go new file mode 100644 index 00000000000..44371c9fa4f --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/serviceaccount.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" +) + +// ServiceAccountInformer provides access to a shared informer and lister for +// ServiceAccounts. +type ServiceAccountInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ServiceAccountLister +} + +type serviceAccountInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewServiceAccountInformer constructs a new informer for ServiceAccount type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewServiceAccountInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredServiceAccountInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredServiceAccountInformer constructs a new informer for ServiceAccount type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredServiceAccountInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().ServiceAccounts(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().ServiceAccounts(namespace).Watch(context.TODO(), options) + }, + }, + &corev1.ServiceAccount{}, + resyncPeriod, + indexers, + ) +} + +func (f *serviceAccountInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredServiceAccountInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *serviceAccountInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&corev1.ServiceAccount{}, f.defaultInformer) +} + +func (f *serviceAccountInformer) Lister() v1.ServiceAccountLister { + return v1.NewServiceAccountLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/internalinterfaces/factory_interfaces.go b/vendor/k8s.io/client-go/informers/internalinterfaces/factory_interfaces.go new file mode 100644 index 00000000000..b00ed70cfdb --- /dev/null +++ b/vendor/k8s.io/client-go/informers/internalinterfaces/factory_interfaces.go @@ -0,0 +1,40 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + kubernetes "k8s.io/client-go/kubernetes" + cache "k8s.io/client-go/tools/cache" +) + +// NewInformerFunc takes kubernetes.Interface and time.Duration to return a SharedIndexInformer. +type NewInformerFunc func(kubernetes.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/k8s.io/client-go/listers/apps/v1/controllerrevision.go b/vendor/k8s.io/client-go/listers/apps/v1/controllerrevision.go new file mode 100644 index 00000000000..9e2f973746b --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1/controllerrevision.go @@ -0,0 +1,99 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ControllerRevisionLister helps list ControllerRevisions. +// All objects returned here must be treated as read-only. +type ControllerRevisionLister interface { + // List lists all ControllerRevisions in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ControllerRevision, err error) + // ControllerRevisions returns an object that can list and get ControllerRevisions. + ControllerRevisions(namespace string) ControllerRevisionNamespaceLister + ControllerRevisionListerExpansion +} + +// controllerRevisionLister implements the ControllerRevisionLister interface. +type controllerRevisionLister struct { + indexer cache.Indexer +} + +// NewControllerRevisionLister returns a new ControllerRevisionLister. +func NewControllerRevisionLister(indexer cache.Indexer) ControllerRevisionLister { + return &controllerRevisionLister{indexer: indexer} +} + +// List lists all ControllerRevisions in the indexer. +func (s *controllerRevisionLister) List(selector labels.Selector) (ret []*v1.ControllerRevision, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ControllerRevision)) + }) + return ret, err +} + +// ControllerRevisions returns an object that can list and get ControllerRevisions. +func (s *controllerRevisionLister) ControllerRevisions(namespace string) ControllerRevisionNamespaceLister { + return controllerRevisionNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ControllerRevisionNamespaceLister helps list and get ControllerRevisions. +// All objects returned here must be treated as read-only. +type ControllerRevisionNamespaceLister interface { + // List lists all ControllerRevisions in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ControllerRevision, err error) + // Get retrieves the ControllerRevision from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.ControllerRevision, error) + ControllerRevisionNamespaceListerExpansion +} + +// controllerRevisionNamespaceLister implements the ControllerRevisionNamespaceLister +// interface. +type controllerRevisionNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ControllerRevisions in the indexer for a given namespace. +func (s controllerRevisionNamespaceLister) List(selector labels.Selector) (ret []*v1.ControllerRevision, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ControllerRevision)) + }) + return ret, err +} + +// Get retrieves the ControllerRevision from the indexer for a given namespace and name. +func (s controllerRevisionNamespaceLister) Get(name string) (*v1.ControllerRevision, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("controllerrevision"), name) + } + return obj.(*v1.ControllerRevision), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1/daemonset.go b/vendor/k8s.io/client-go/listers/apps/v1/daemonset.go new file mode 100644 index 00000000000..061959e3daf --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1/daemonset.go @@ -0,0 +1,99 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// DaemonSetLister helps list DaemonSets. +// All objects returned here must be treated as read-only. +type DaemonSetLister interface { + // List lists all DaemonSets in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.DaemonSet, err error) + // DaemonSets returns an object that can list and get DaemonSets. + DaemonSets(namespace string) DaemonSetNamespaceLister + DaemonSetListerExpansion +} + +// daemonSetLister implements the DaemonSetLister interface. +type daemonSetLister struct { + indexer cache.Indexer +} + +// NewDaemonSetLister returns a new DaemonSetLister. +func NewDaemonSetLister(indexer cache.Indexer) DaemonSetLister { + return &daemonSetLister{indexer: indexer} +} + +// List lists all DaemonSets in the indexer. +func (s *daemonSetLister) List(selector labels.Selector) (ret []*v1.DaemonSet, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.DaemonSet)) + }) + return ret, err +} + +// DaemonSets returns an object that can list and get DaemonSets. +func (s *daemonSetLister) DaemonSets(namespace string) DaemonSetNamespaceLister { + return daemonSetNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// DaemonSetNamespaceLister helps list and get DaemonSets. +// All objects returned here must be treated as read-only. +type DaemonSetNamespaceLister interface { + // List lists all DaemonSets in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.DaemonSet, err error) + // Get retrieves the DaemonSet from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.DaemonSet, error) + DaemonSetNamespaceListerExpansion +} + +// daemonSetNamespaceLister implements the DaemonSetNamespaceLister +// interface. +type daemonSetNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all DaemonSets in the indexer for a given namespace. +func (s daemonSetNamespaceLister) List(selector labels.Selector) (ret []*v1.DaemonSet, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.DaemonSet)) + }) + return ret, err +} + +// Get retrieves the DaemonSet from the indexer for a given namespace and name. +func (s daemonSetNamespaceLister) Get(name string) (*v1.DaemonSet, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("daemonset"), name) + } + return obj.(*v1.DaemonSet), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1/daemonset_expansion.go b/vendor/k8s.io/client-go/listers/apps/v1/daemonset_expansion.go new file mode 100644 index 00000000000..667d6fb88ea --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1/daemonset_expansion.go @@ -0,0 +1,114 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + apps "k8s.io/api/apps/v1" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// DaemonSetListerExpansion allows custom methods to be added to +// DaemonSetLister. +type DaemonSetListerExpansion interface { + GetPodDaemonSets(pod *v1.Pod) ([]*apps.DaemonSet, error) + GetHistoryDaemonSets(history *apps.ControllerRevision) ([]*apps.DaemonSet, error) +} + +// DaemonSetNamespaceListerExpansion allows custom methods to be added to +// DaemonSetNamespaceLister. +type DaemonSetNamespaceListerExpansion interface{} + +// GetPodDaemonSets returns a list of DaemonSets that potentially match a pod. +// Only the one specified in the Pod's ControllerRef will actually manage it. +// Returns an error only if no matching DaemonSets are found. +func (s *daemonSetLister) GetPodDaemonSets(pod *v1.Pod) ([]*apps.DaemonSet, error) { + var selector labels.Selector + var daemonSet *apps.DaemonSet + + if len(pod.Labels) == 0 { + return nil, fmt.Errorf("no daemon sets found for pod %v because it has no labels", pod.Name) + } + + list, err := s.DaemonSets(pod.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var daemonSets []*apps.DaemonSet + for i := range list { + daemonSet = list[i] + if daemonSet.Namespace != pod.Namespace { + continue + } + selector, err = metav1.LabelSelectorAsSelector(daemonSet.Spec.Selector) + if err != nil { + // This object has an invalid selector, it does not match the pod + continue + } + + // If a daemonSet with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { + continue + } + daemonSets = append(daemonSets, daemonSet) + } + + if len(daemonSets) == 0 { + return nil, fmt.Errorf("could not find daemon set for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + } + + return daemonSets, nil +} + +// GetHistoryDaemonSets returns a list of DaemonSets that potentially +// match a ControllerRevision. Only the one specified in the ControllerRevision's ControllerRef +// will actually manage it. +// Returns an error only if no matching DaemonSets are found. +func (s *daemonSetLister) GetHistoryDaemonSets(history *apps.ControllerRevision) ([]*apps.DaemonSet, error) { + if len(history.Labels) == 0 { + return nil, fmt.Errorf("no DaemonSet found for ControllerRevision %s because it has no labels", history.Name) + } + + list, err := s.DaemonSets(history.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var daemonSets []*apps.DaemonSet + for _, ds := range list { + selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector) + if err != nil { + // This object has an invalid selector, it does not match the history + continue + } + // If a DaemonSet with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(history.Labels)) { + continue + } + daemonSets = append(daemonSets, ds) + } + + if len(daemonSets) == 0 { + return nil, fmt.Errorf("could not find DaemonSets for ControllerRevision %s in namespace %s with labels: %v", history.Name, history.Namespace, history.Labels) + } + + return daemonSets, nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1/deployment.go b/vendor/k8s.io/client-go/listers/apps/v1/deployment.go new file mode 100644 index 00000000000..77040341729 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1/deployment.go @@ -0,0 +1,99 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// DeploymentLister helps list Deployments. +// All objects returned here must be treated as read-only. +type DeploymentLister interface { + // List lists all Deployments in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.Deployment, err error) + // Deployments returns an object that can list and get Deployments. + Deployments(namespace string) DeploymentNamespaceLister + DeploymentListerExpansion +} + +// deploymentLister implements the DeploymentLister interface. +type deploymentLister struct { + indexer cache.Indexer +} + +// NewDeploymentLister returns a new DeploymentLister. +func NewDeploymentLister(indexer cache.Indexer) DeploymentLister { + return &deploymentLister{indexer: indexer} +} + +// List lists all Deployments in the indexer. +func (s *deploymentLister) List(selector labels.Selector) (ret []*v1.Deployment, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Deployment)) + }) + return ret, err +} + +// Deployments returns an object that can list and get Deployments. +func (s *deploymentLister) Deployments(namespace string) DeploymentNamespaceLister { + return deploymentNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// DeploymentNamespaceLister helps list and get Deployments. +// All objects returned here must be treated as read-only. +type DeploymentNamespaceLister interface { + // List lists all Deployments in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.Deployment, err error) + // Get retrieves the Deployment from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.Deployment, error) + DeploymentNamespaceListerExpansion +} + +// deploymentNamespaceLister implements the DeploymentNamespaceLister +// interface. +type deploymentNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Deployments in the indexer for a given namespace. +func (s deploymentNamespaceLister) List(selector labels.Selector) (ret []*v1.Deployment, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Deployment)) + }) + return ret, err +} + +// Get retrieves the Deployment from the indexer for a given namespace and name. +func (s deploymentNamespaceLister) Get(name string) (*v1.Deployment, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("deployment"), name) + } + return obj.(*v1.Deployment), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/apps/v1/expansion_generated.go new file mode 100644 index 00000000000..0c357589d07 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1/expansion_generated.go @@ -0,0 +1,35 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// ControllerRevisionListerExpansion allows custom methods to be added to +// ControllerRevisionLister. +type ControllerRevisionListerExpansion interface{} + +// ControllerRevisionNamespaceListerExpansion allows custom methods to be added to +// ControllerRevisionNamespaceLister. +type ControllerRevisionNamespaceListerExpansion interface{} + +// DeploymentListerExpansion allows custom methods to be added to +// DeploymentLister. +type DeploymentListerExpansion interface{} + +// DeploymentNamespaceListerExpansion allows custom methods to be added to +// DeploymentNamespaceLister. +type DeploymentNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/apps/v1/replicaset.go b/vendor/k8s.io/client-go/listers/apps/v1/replicaset.go new file mode 100644 index 00000000000..3ca7757eb94 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1/replicaset.go @@ -0,0 +1,99 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ReplicaSetLister helps list ReplicaSets. +// All objects returned here must be treated as read-only. +type ReplicaSetLister interface { + // List lists all ReplicaSets in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ReplicaSet, err error) + // ReplicaSets returns an object that can list and get ReplicaSets. + ReplicaSets(namespace string) ReplicaSetNamespaceLister + ReplicaSetListerExpansion +} + +// replicaSetLister implements the ReplicaSetLister interface. +type replicaSetLister struct { + indexer cache.Indexer +} + +// NewReplicaSetLister returns a new ReplicaSetLister. +func NewReplicaSetLister(indexer cache.Indexer) ReplicaSetLister { + return &replicaSetLister{indexer: indexer} +} + +// List lists all ReplicaSets in the indexer. +func (s *replicaSetLister) List(selector labels.Selector) (ret []*v1.ReplicaSet, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ReplicaSet)) + }) + return ret, err +} + +// ReplicaSets returns an object that can list and get ReplicaSets. +func (s *replicaSetLister) ReplicaSets(namespace string) ReplicaSetNamespaceLister { + return replicaSetNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ReplicaSetNamespaceLister helps list and get ReplicaSets. +// All objects returned here must be treated as read-only. +type ReplicaSetNamespaceLister interface { + // List lists all ReplicaSets in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ReplicaSet, err error) + // Get retrieves the ReplicaSet from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.ReplicaSet, error) + ReplicaSetNamespaceListerExpansion +} + +// replicaSetNamespaceLister implements the ReplicaSetNamespaceLister +// interface. +type replicaSetNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ReplicaSets in the indexer for a given namespace. +func (s replicaSetNamespaceLister) List(selector labels.Selector) (ret []*v1.ReplicaSet, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ReplicaSet)) + }) + return ret, err +} + +// Get retrieves the ReplicaSet from the indexer for a given namespace and name. +func (s replicaSetNamespaceLister) Get(name string) (*v1.ReplicaSet, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("replicaset"), name) + } + return obj.(*v1.ReplicaSet), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1/replicaset_expansion.go b/vendor/k8s.io/client-go/listers/apps/v1/replicaset_expansion.go new file mode 100644 index 00000000000..8e093de0a01 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1/replicaset_expansion.go @@ -0,0 +1,74 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + apps "k8s.io/api/apps/v1" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// ReplicaSetListerExpansion allows custom methods to be added to +// ReplicaSetLister. +type ReplicaSetListerExpansion interface { + GetPodReplicaSets(pod *v1.Pod) ([]*apps.ReplicaSet, error) +} + +// ReplicaSetNamespaceListerExpansion allows custom methods to be added to +// ReplicaSetNamespaceLister. +type ReplicaSetNamespaceListerExpansion interface{} + +// GetPodReplicaSets returns a list of ReplicaSets that potentially match a pod. +// Only the one specified in the Pod's ControllerRef will actually manage it. +// Returns an error only if no matching ReplicaSets are found. +func (s *replicaSetLister) GetPodReplicaSets(pod *v1.Pod) ([]*apps.ReplicaSet, error) { + if len(pod.Labels) == 0 { + return nil, fmt.Errorf("no ReplicaSets found for pod %v because it has no labels", pod.Name) + } + + list, err := s.ReplicaSets(pod.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var rss []*apps.ReplicaSet + for _, rs := range list { + if rs.Namespace != pod.Namespace { + continue + } + selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector) + if err != nil { + // This object has an invalid selector, it does not match the pod + continue + } + + // If a ReplicaSet with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { + continue + } + rss = append(rss, rs) + } + + if len(rss) == 0 { + return nil, fmt.Errorf("could not find ReplicaSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + } + + return rss, nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1/statefulset.go b/vendor/k8s.io/client-go/listers/apps/v1/statefulset.go new file mode 100644 index 00000000000..f6899d5ff9b --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1/statefulset.go @@ -0,0 +1,99 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// StatefulSetLister helps list StatefulSets. +// All objects returned here must be treated as read-only. +type StatefulSetLister interface { + // List lists all StatefulSets in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.StatefulSet, err error) + // StatefulSets returns an object that can list and get StatefulSets. + StatefulSets(namespace string) StatefulSetNamespaceLister + StatefulSetListerExpansion +} + +// statefulSetLister implements the StatefulSetLister interface. +type statefulSetLister struct { + indexer cache.Indexer +} + +// NewStatefulSetLister returns a new StatefulSetLister. +func NewStatefulSetLister(indexer cache.Indexer) StatefulSetLister { + return &statefulSetLister{indexer: indexer} +} + +// List lists all StatefulSets in the indexer. +func (s *statefulSetLister) List(selector labels.Selector) (ret []*v1.StatefulSet, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.StatefulSet)) + }) + return ret, err +} + +// StatefulSets returns an object that can list and get StatefulSets. +func (s *statefulSetLister) StatefulSets(namespace string) StatefulSetNamespaceLister { + return statefulSetNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// StatefulSetNamespaceLister helps list and get StatefulSets. +// All objects returned here must be treated as read-only. +type StatefulSetNamespaceLister interface { + // List lists all StatefulSets in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.StatefulSet, err error) + // Get retrieves the StatefulSet from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.StatefulSet, error) + StatefulSetNamespaceListerExpansion +} + +// statefulSetNamespaceLister implements the StatefulSetNamespaceLister +// interface. +type statefulSetNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all StatefulSets in the indexer for a given namespace. +func (s statefulSetNamespaceLister) List(selector labels.Selector) (ret []*v1.StatefulSet, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.StatefulSet)) + }) + return ret, err +} + +// Get retrieves the StatefulSet from the indexer for a given namespace and name. +func (s statefulSetNamespaceLister) Get(name string) (*v1.StatefulSet, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("statefulset"), name) + } + return obj.(*v1.StatefulSet), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1/statefulset_expansion.go b/vendor/k8s.io/client-go/listers/apps/v1/statefulset_expansion.go new file mode 100644 index 00000000000..e79f8a2b46a --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1/statefulset_expansion.go @@ -0,0 +1,78 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + apps "k8s.io/api/apps/v1" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// StatefulSetListerExpansion allows custom methods to be added to +// StatefulSetLister. +type StatefulSetListerExpansion interface { + GetPodStatefulSets(pod *v1.Pod) ([]*apps.StatefulSet, error) +} + +// StatefulSetNamespaceListerExpansion allows custom methods to be added to +// StatefulSetNamespaceLister. +type StatefulSetNamespaceListerExpansion interface{} + +// GetPodStatefulSets returns a list of StatefulSets that potentially match a pod. +// Only the one specified in the Pod's ControllerRef will actually manage it. +// Returns an error only if no matching StatefulSets are found. +func (s *statefulSetLister) GetPodStatefulSets(pod *v1.Pod) ([]*apps.StatefulSet, error) { + var selector labels.Selector + var ps *apps.StatefulSet + + if len(pod.Labels) == 0 { + return nil, fmt.Errorf("no StatefulSets found for pod %v because it has no labels", pod.Name) + } + + list, err := s.StatefulSets(pod.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var psList []*apps.StatefulSet + for i := range list { + ps = list[i] + if ps.Namespace != pod.Namespace { + continue + } + selector, err = metav1.LabelSelectorAsSelector(ps.Spec.Selector) + if err != nil { + // This object has an invalid selector, it does not match the pod + continue + } + + // If a StatefulSet with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { + continue + } + psList = append(psList, ps) + } + + if len(psList) == 0 { + return nil, fmt.Errorf("could not find StatefulSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + } + + return psList, nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/componentstatus.go b/vendor/k8s.io/client-go/listers/core/v1/componentstatus.go new file mode 100644 index 00000000000..5fcdac3c764 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/componentstatus.go @@ -0,0 +1,68 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ComponentStatusLister helps list ComponentStatuses. +// All objects returned here must be treated as read-only. +type ComponentStatusLister interface { + // List lists all ComponentStatuses in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ComponentStatus, err error) + // Get retrieves the ComponentStatus from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.ComponentStatus, error) + ComponentStatusListerExpansion +} + +// componentStatusLister implements the ComponentStatusLister interface. +type componentStatusLister struct { + indexer cache.Indexer +} + +// NewComponentStatusLister returns a new ComponentStatusLister. +func NewComponentStatusLister(indexer cache.Indexer) ComponentStatusLister { + return &componentStatusLister{indexer: indexer} +} + +// List lists all ComponentStatuses in the indexer. +func (s *componentStatusLister) List(selector labels.Selector) (ret []*v1.ComponentStatus, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ComponentStatus)) + }) + return ret, err +} + +// Get retrieves the ComponentStatus from the index for a given name. +func (s *componentStatusLister) Get(name string) (*v1.ComponentStatus, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("componentstatus"), name) + } + return obj.(*v1.ComponentStatus), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/configmap.go b/vendor/k8s.io/client-go/listers/core/v1/configmap.go new file mode 100644 index 00000000000..6a410e47c4a --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/configmap.go @@ -0,0 +1,99 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ConfigMapLister helps list ConfigMaps. +// All objects returned here must be treated as read-only. +type ConfigMapLister interface { + // List lists all ConfigMaps in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ConfigMap, err error) + // ConfigMaps returns an object that can list and get ConfigMaps. + ConfigMaps(namespace string) ConfigMapNamespaceLister + ConfigMapListerExpansion +} + +// configMapLister implements the ConfigMapLister interface. +type configMapLister struct { + indexer cache.Indexer +} + +// NewConfigMapLister returns a new ConfigMapLister. +func NewConfigMapLister(indexer cache.Indexer) ConfigMapLister { + return &configMapLister{indexer: indexer} +} + +// List lists all ConfigMaps in the indexer. +func (s *configMapLister) List(selector labels.Selector) (ret []*v1.ConfigMap, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ConfigMap)) + }) + return ret, err +} + +// ConfigMaps returns an object that can list and get ConfigMaps. +func (s *configMapLister) ConfigMaps(namespace string) ConfigMapNamespaceLister { + return configMapNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ConfigMapNamespaceLister helps list and get ConfigMaps. +// All objects returned here must be treated as read-only. +type ConfigMapNamespaceLister interface { + // List lists all ConfigMaps in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ConfigMap, err error) + // Get retrieves the ConfigMap from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.ConfigMap, error) + ConfigMapNamespaceListerExpansion +} + +// configMapNamespaceLister implements the ConfigMapNamespaceLister +// interface. +type configMapNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ConfigMaps in the indexer for a given namespace. +func (s configMapNamespaceLister) List(selector labels.Selector) (ret []*v1.ConfigMap, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ConfigMap)) + }) + return ret, err +} + +// Get retrieves the ConfigMap from the indexer for a given namespace and name. +func (s configMapNamespaceLister) Get(name string) (*v1.ConfigMap, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("configmap"), name) + } + return obj.(*v1.ConfigMap), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/endpoints.go b/vendor/k8s.io/client-go/listers/core/v1/endpoints.go new file mode 100644 index 00000000000..4759ce808fe --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/endpoints.go @@ -0,0 +1,99 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// EndpointsLister helps list Endpoints. +// All objects returned here must be treated as read-only. +type EndpointsLister interface { + // List lists all Endpoints in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.Endpoints, err error) + // Endpoints returns an object that can list and get Endpoints. + Endpoints(namespace string) EndpointsNamespaceLister + EndpointsListerExpansion +} + +// endpointsLister implements the EndpointsLister interface. +type endpointsLister struct { + indexer cache.Indexer +} + +// NewEndpointsLister returns a new EndpointsLister. +func NewEndpointsLister(indexer cache.Indexer) EndpointsLister { + return &endpointsLister{indexer: indexer} +} + +// List lists all Endpoints in the indexer. +func (s *endpointsLister) List(selector labels.Selector) (ret []*v1.Endpoints, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Endpoints)) + }) + return ret, err +} + +// Endpoints returns an object that can list and get Endpoints. +func (s *endpointsLister) Endpoints(namespace string) EndpointsNamespaceLister { + return endpointsNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// EndpointsNamespaceLister helps list and get Endpoints. +// All objects returned here must be treated as read-only. +type EndpointsNamespaceLister interface { + // List lists all Endpoints in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.Endpoints, err error) + // Get retrieves the Endpoints from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.Endpoints, error) + EndpointsNamespaceListerExpansion +} + +// endpointsNamespaceLister implements the EndpointsNamespaceLister +// interface. +type endpointsNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Endpoints in the indexer for a given namespace. +func (s endpointsNamespaceLister) List(selector labels.Selector) (ret []*v1.Endpoints, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Endpoints)) + }) + return ret, err +} + +// Get retrieves the Endpoints from the indexer for a given namespace and name. +func (s endpointsNamespaceLister) Get(name string) (*v1.Endpoints, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("endpoints"), name) + } + return obj.(*v1.Endpoints), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/event.go b/vendor/k8s.io/client-go/listers/core/v1/event.go new file mode 100644 index 00000000000..4416e20120b --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/event.go @@ -0,0 +1,99 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// EventLister helps list Events. +// All objects returned here must be treated as read-only. +type EventLister interface { + // List lists all Events in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.Event, err error) + // Events returns an object that can list and get Events. + Events(namespace string) EventNamespaceLister + EventListerExpansion +} + +// eventLister implements the EventLister interface. +type eventLister struct { + indexer cache.Indexer +} + +// NewEventLister returns a new EventLister. +func NewEventLister(indexer cache.Indexer) EventLister { + return &eventLister{indexer: indexer} +} + +// List lists all Events in the indexer. +func (s *eventLister) List(selector labels.Selector) (ret []*v1.Event, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Event)) + }) + return ret, err +} + +// Events returns an object that can list and get Events. +func (s *eventLister) Events(namespace string) EventNamespaceLister { + return eventNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// EventNamespaceLister helps list and get Events. +// All objects returned here must be treated as read-only. +type EventNamespaceLister interface { + // List lists all Events in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.Event, err error) + // Get retrieves the Event from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.Event, error) + EventNamespaceListerExpansion +} + +// eventNamespaceLister implements the EventNamespaceLister +// interface. +type eventNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Events in the indexer for a given namespace. +func (s eventNamespaceLister) List(selector labels.Selector) (ret []*v1.Event, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Event)) + }) + return ret, err +} + +// Get retrieves the Event from the indexer for a given namespace and name. +func (s eventNamespaceLister) Get(name string) (*v1.Event, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("event"), name) + } + return obj.(*v1.Event), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/core/v1/expansion_generated.go new file mode 100644 index 00000000000..2168a7f4834 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/expansion_generated.go @@ -0,0 +1,123 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// ComponentStatusListerExpansion allows custom methods to be added to +// ComponentStatusLister. +type ComponentStatusListerExpansion interface{} + +// ConfigMapListerExpansion allows custom methods to be added to +// ConfigMapLister. +type ConfigMapListerExpansion interface{} + +// ConfigMapNamespaceListerExpansion allows custom methods to be added to +// ConfigMapNamespaceLister. +type ConfigMapNamespaceListerExpansion interface{} + +// EndpointsListerExpansion allows custom methods to be added to +// EndpointsLister. +type EndpointsListerExpansion interface{} + +// EndpointsNamespaceListerExpansion allows custom methods to be added to +// EndpointsNamespaceLister. +type EndpointsNamespaceListerExpansion interface{} + +// EventListerExpansion allows custom methods to be added to +// EventLister. +type EventListerExpansion interface{} + +// EventNamespaceListerExpansion allows custom methods to be added to +// EventNamespaceLister. +type EventNamespaceListerExpansion interface{} + +// LimitRangeListerExpansion allows custom methods to be added to +// LimitRangeLister. +type LimitRangeListerExpansion interface{} + +// LimitRangeNamespaceListerExpansion allows custom methods to be added to +// LimitRangeNamespaceLister. +type LimitRangeNamespaceListerExpansion interface{} + +// NamespaceListerExpansion allows custom methods to be added to +// NamespaceLister. +type NamespaceListerExpansion interface{} + +// NodeListerExpansion allows custom methods to be added to +// NodeLister. +type NodeListerExpansion interface{} + +// PersistentVolumeListerExpansion allows custom methods to be added to +// PersistentVolumeLister. +type PersistentVolumeListerExpansion interface{} + +// PersistentVolumeClaimListerExpansion allows custom methods to be added to +// PersistentVolumeClaimLister. +type PersistentVolumeClaimListerExpansion interface{} + +// PersistentVolumeClaimNamespaceListerExpansion allows custom methods to be added to +// PersistentVolumeClaimNamespaceLister. +type PersistentVolumeClaimNamespaceListerExpansion interface{} + +// PodListerExpansion allows custom methods to be added to +// PodLister. +type PodListerExpansion interface{} + +// PodNamespaceListerExpansion allows custom methods to be added to +// PodNamespaceLister. +type PodNamespaceListerExpansion interface{} + +// PodTemplateListerExpansion allows custom methods to be added to +// PodTemplateLister. +type PodTemplateListerExpansion interface{} + +// PodTemplateNamespaceListerExpansion allows custom methods to be added to +// PodTemplateNamespaceLister. +type PodTemplateNamespaceListerExpansion interface{} + +// ResourceQuotaListerExpansion allows custom methods to be added to +// ResourceQuotaLister. +type ResourceQuotaListerExpansion interface{} + +// ResourceQuotaNamespaceListerExpansion allows custom methods to be added to +// ResourceQuotaNamespaceLister. +type ResourceQuotaNamespaceListerExpansion interface{} + +// SecretListerExpansion allows custom methods to be added to +// SecretLister. +type SecretListerExpansion interface{} + +// SecretNamespaceListerExpansion allows custom methods to be added to +// SecretNamespaceLister. +type SecretNamespaceListerExpansion interface{} + +// ServiceListerExpansion allows custom methods to be added to +// ServiceLister. +type ServiceListerExpansion interface{} + +// ServiceNamespaceListerExpansion allows custom methods to be added to +// ServiceNamespaceLister. +type ServiceNamespaceListerExpansion interface{} + +// ServiceAccountListerExpansion allows custom methods to be added to +// ServiceAccountLister. +type ServiceAccountListerExpansion interface{} + +// ServiceAccountNamespaceListerExpansion allows custom methods to be added to +// ServiceAccountNamespaceLister. +type ServiceAccountNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/core/v1/limitrange.go b/vendor/k8s.io/client-go/listers/core/v1/limitrange.go new file mode 100644 index 00000000000..d8fa569cd38 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/limitrange.go @@ -0,0 +1,99 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// LimitRangeLister helps list LimitRanges. +// All objects returned here must be treated as read-only. +type LimitRangeLister interface { + // List lists all LimitRanges in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.LimitRange, err error) + // LimitRanges returns an object that can list and get LimitRanges. + LimitRanges(namespace string) LimitRangeNamespaceLister + LimitRangeListerExpansion +} + +// limitRangeLister implements the LimitRangeLister interface. +type limitRangeLister struct { + indexer cache.Indexer +} + +// NewLimitRangeLister returns a new LimitRangeLister. +func NewLimitRangeLister(indexer cache.Indexer) LimitRangeLister { + return &limitRangeLister{indexer: indexer} +} + +// List lists all LimitRanges in the indexer. +func (s *limitRangeLister) List(selector labels.Selector) (ret []*v1.LimitRange, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.LimitRange)) + }) + return ret, err +} + +// LimitRanges returns an object that can list and get LimitRanges. +func (s *limitRangeLister) LimitRanges(namespace string) LimitRangeNamespaceLister { + return limitRangeNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// LimitRangeNamespaceLister helps list and get LimitRanges. +// All objects returned here must be treated as read-only. +type LimitRangeNamespaceLister interface { + // List lists all LimitRanges in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.LimitRange, err error) + // Get retrieves the LimitRange from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.LimitRange, error) + LimitRangeNamespaceListerExpansion +} + +// limitRangeNamespaceLister implements the LimitRangeNamespaceLister +// interface. +type limitRangeNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all LimitRanges in the indexer for a given namespace. +func (s limitRangeNamespaceLister) List(selector labels.Selector) (ret []*v1.LimitRange, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.LimitRange)) + }) + return ret, err +} + +// Get retrieves the LimitRange from the indexer for a given namespace and name. +func (s limitRangeNamespaceLister) Get(name string) (*v1.LimitRange, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("limitrange"), name) + } + return obj.(*v1.LimitRange), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/namespace.go b/vendor/k8s.io/client-go/listers/core/v1/namespace.go new file mode 100644 index 00000000000..454aa1a0a23 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/namespace.go @@ -0,0 +1,68 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// NamespaceLister helps list Namespaces. +// All objects returned here must be treated as read-only. +type NamespaceLister interface { + // List lists all Namespaces in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.Namespace, err error) + // Get retrieves the Namespace from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.Namespace, error) + NamespaceListerExpansion +} + +// namespaceLister implements the NamespaceLister interface. +type namespaceLister struct { + indexer cache.Indexer +} + +// NewNamespaceLister returns a new NamespaceLister. +func NewNamespaceLister(indexer cache.Indexer) NamespaceLister { + return &namespaceLister{indexer: indexer} +} + +// List lists all Namespaces in the indexer. +func (s *namespaceLister) List(selector labels.Selector) (ret []*v1.Namespace, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Namespace)) + }) + return ret, err +} + +// Get retrieves the Namespace from the index for a given name. +func (s *namespaceLister) Get(name string) (*v1.Namespace, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("namespace"), name) + } + return obj.(*v1.Namespace), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/node.go b/vendor/k8s.io/client-go/listers/core/v1/node.go new file mode 100644 index 00000000000..596049857f1 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/node.go @@ -0,0 +1,68 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// NodeLister helps list Nodes. +// All objects returned here must be treated as read-only. +type NodeLister interface { + // List lists all Nodes in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.Node, err error) + // Get retrieves the Node from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.Node, error) + NodeListerExpansion +} + +// nodeLister implements the NodeLister interface. +type nodeLister struct { + indexer cache.Indexer +} + +// NewNodeLister returns a new NodeLister. +func NewNodeLister(indexer cache.Indexer) NodeLister { + return &nodeLister{indexer: indexer} +} + +// List lists all Nodes in the indexer. +func (s *nodeLister) List(selector labels.Selector) (ret []*v1.Node, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Node)) + }) + return ret, err +} + +// Get retrieves the Node from the index for a given name. +func (s *nodeLister) Get(name string) (*v1.Node, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("node"), name) + } + return obj.(*v1.Node), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/persistentvolume.go b/vendor/k8s.io/client-go/listers/core/v1/persistentvolume.go new file mode 100644 index 00000000000..e7dfd4ac9f6 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/persistentvolume.go @@ -0,0 +1,68 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PersistentVolumeLister helps list PersistentVolumes. +// All objects returned here must be treated as read-only. +type PersistentVolumeLister interface { + // List lists all PersistentVolumes in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.PersistentVolume, err error) + // Get retrieves the PersistentVolume from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.PersistentVolume, error) + PersistentVolumeListerExpansion +} + +// persistentVolumeLister implements the PersistentVolumeLister interface. +type persistentVolumeLister struct { + indexer cache.Indexer +} + +// NewPersistentVolumeLister returns a new PersistentVolumeLister. +func NewPersistentVolumeLister(indexer cache.Indexer) PersistentVolumeLister { + return &persistentVolumeLister{indexer: indexer} +} + +// List lists all PersistentVolumes in the indexer. +func (s *persistentVolumeLister) List(selector labels.Selector) (ret []*v1.PersistentVolume, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.PersistentVolume)) + }) + return ret, err +} + +// Get retrieves the PersistentVolume from the index for a given name. +func (s *persistentVolumeLister) Get(name string) (*v1.PersistentVolume, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("persistentvolume"), name) + } + return obj.(*v1.PersistentVolume), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/persistentvolumeclaim.go b/vendor/k8s.io/client-go/listers/core/v1/persistentvolumeclaim.go new file mode 100644 index 00000000000..fc71bb5a1fd --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/persistentvolumeclaim.go @@ -0,0 +1,99 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PersistentVolumeClaimLister helps list PersistentVolumeClaims. +// All objects returned here must be treated as read-only. +type PersistentVolumeClaimLister interface { + // List lists all PersistentVolumeClaims in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.PersistentVolumeClaim, err error) + // PersistentVolumeClaims returns an object that can list and get PersistentVolumeClaims. + PersistentVolumeClaims(namespace string) PersistentVolumeClaimNamespaceLister + PersistentVolumeClaimListerExpansion +} + +// persistentVolumeClaimLister implements the PersistentVolumeClaimLister interface. +type persistentVolumeClaimLister struct { + indexer cache.Indexer +} + +// NewPersistentVolumeClaimLister returns a new PersistentVolumeClaimLister. +func NewPersistentVolumeClaimLister(indexer cache.Indexer) PersistentVolumeClaimLister { + return &persistentVolumeClaimLister{indexer: indexer} +} + +// List lists all PersistentVolumeClaims in the indexer. +func (s *persistentVolumeClaimLister) List(selector labels.Selector) (ret []*v1.PersistentVolumeClaim, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.PersistentVolumeClaim)) + }) + return ret, err +} + +// PersistentVolumeClaims returns an object that can list and get PersistentVolumeClaims. +func (s *persistentVolumeClaimLister) PersistentVolumeClaims(namespace string) PersistentVolumeClaimNamespaceLister { + return persistentVolumeClaimNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// PersistentVolumeClaimNamespaceLister helps list and get PersistentVolumeClaims. +// All objects returned here must be treated as read-only. +type PersistentVolumeClaimNamespaceLister interface { + // List lists all PersistentVolumeClaims in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.PersistentVolumeClaim, err error) + // Get retrieves the PersistentVolumeClaim from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.PersistentVolumeClaim, error) + PersistentVolumeClaimNamespaceListerExpansion +} + +// persistentVolumeClaimNamespaceLister implements the PersistentVolumeClaimNamespaceLister +// interface. +type persistentVolumeClaimNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all PersistentVolumeClaims in the indexer for a given namespace. +func (s persistentVolumeClaimNamespaceLister) List(selector labels.Selector) (ret []*v1.PersistentVolumeClaim, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.PersistentVolumeClaim)) + }) + return ret, err +} + +// Get retrieves the PersistentVolumeClaim from the indexer for a given namespace and name. +func (s persistentVolumeClaimNamespaceLister) Get(name string) (*v1.PersistentVolumeClaim, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("persistentvolumeclaim"), name) + } + return obj.(*v1.PersistentVolumeClaim), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/pod.go b/vendor/k8s.io/client-go/listers/core/v1/pod.go new file mode 100644 index 00000000000..ab8f0946c3d --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/pod.go @@ -0,0 +1,99 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PodLister helps list Pods. +// All objects returned here must be treated as read-only. +type PodLister interface { + // List lists all Pods in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.Pod, err error) + // Pods returns an object that can list and get Pods. + Pods(namespace string) PodNamespaceLister + PodListerExpansion +} + +// podLister implements the PodLister interface. +type podLister struct { + indexer cache.Indexer +} + +// NewPodLister returns a new PodLister. +func NewPodLister(indexer cache.Indexer) PodLister { + return &podLister{indexer: indexer} +} + +// List lists all Pods in the indexer. +func (s *podLister) List(selector labels.Selector) (ret []*v1.Pod, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Pod)) + }) + return ret, err +} + +// Pods returns an object that can list and get Pods. +func (s *podLister) Pods(namespace string) PodNamespaceLister { + return podNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// PodNamespaceLister helps list and get Pods. +// All objects returned here must be treated as read-only. +type PodNamespaceLister interface { + // List lists all Pods in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.Pod, err error) + // Get retrieves the Pod from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.Pod, error) + PodNamespaceListerExpansion +} + +// podNamespaceLister implements the PodNamespaceLister +// interface. +type podNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Pods in the indexer for a given namespace. +func (s podNamespaceLister) List(selector labels.Selector) (ret []*v1.Pod, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Pod)) + }) + return ret, err +} + +// Get retrieves the Pod from the indexer for a given namespace and name. +func (s podNamespaceLister) Get(name string) (*v1.Pod, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("pod"), name) + } + return obj.(*v1.Pod), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/podtemplate.go b/vendor/k8s.io/client-go/listers/core/v1/podtemplate.go new file mode 100644 index 00000000000..6c310045b71 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/podtemplate.go @@ -0,0 +1,99 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PodTemplateLister helps list PodTemplates. +// All objects returned here must be treated as read-only. +type PodTemplateLister interface { + // List lists all PodTemplates in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.PodTemplate, err error) + // PodTemplates returns an object that can list and get PodTemplates. + PodTemplates(namespace string) PodTemplateNamespaceLister + PodTemplateListerExpansion +} + +// podTemplateLister implements the PodTemplateLister interface. +type podTemplateLister struct { + indexer cache.Indexer +} + +// NewPodTemplateLister returns a new PodTemplateLister. +func NewPodTemplateLister(indexer cache.Indexer) PodTemplateLister { + return &podTemplateLister{indexer: indexer} +} + +// List lists all PodTemplates in the indexer. +func (s *podTemplateLister) List(selector labels.Selector) (ret []*v1.PodTemplate, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.PodTemplate)) + }) + return ret, err +} + +// PodTemplates returns an object that can list and get PodTemplates. +func (s *podTemplateLister) PodTemplates(namespace string) PodTemplateNamespaceLister { + return podTemplateNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// PodTemplateNamespaceLister helps list and get PodTemplates. +// All objects returned here must be treated as read-only. +type PodTemplateNamespaceLister interface { + // List lists all PodTemplates in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.PodTemplate, err error) + // Get retrieves the PodTemplate from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.PodTemplate, error) + PodTemplateNamespaceListerExpansion +} + +// podTemplateNamespaceLister implements the PodTemplateNamespaceLister +// interface. +type podTemplateNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all PodTemplates in the indexer for a given namespace. +func (s podTemplateNamespaceLister) List(selector labels.Selector) (ret []*v1.PodTemplate, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.PodTemplate)) + }) + return ret, err +} + +// Get retrieves the PodTemplate from the indexer for a given namespace and name. +func (s podTemplateNamespaceLister) Get(name string) (*v1.PodTemplate, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("podtemplate"), name) + } + return obj.(*v1.PodTemplate), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller.go b/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller.go new file mode 100644 index 00000000000..e28e2ef768e --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller.go @@ -0,0 +1,99 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ReplicationControllerLister helps list ReplicationControllers. +// All objects returned here must be treated as read-only. +type ReplicationControllerLister interface { + // List lists all ReplicationControllers in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ReplicationController, err error) + // ReplicationControllers returns an object that can list and get ReplicationControllers. + ReplicationControllers(namespace string) ReplicationControllerNamespaceLister + ReplicationControllerListerExpansion +} + +// replicationControllerLister implements the ReplicationControllerLister interface. +type replicationControllerLister struct { + indexer cache.Indexer +} + +// NewReplicationControllerLister returns a new ReplicationControllerLister. +func NewReplicationControllerLister(indexer cache.Indexer) ReplicationControllerLister { + return &replicationControllerLister{indexer: indexer} +} + +// List lists all ReplicationControllers in the indexer. +func (s *replicationControllerLister) List(selector labels.Selector) (ret []*v1.ReplicationController, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ReplicationController)) + }) + return ret, err +} + +// ReplicationControllers returns an object that can list and get ReplicationControllers. +func (s *replicationControllerLister) ReplicationControllers(namespace string) ReplicationControllerNamespaceLister { + return replicationControllerNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ReplicationControllerNamespaceLister helps list and get ReplicationControllers. +// All objects returned here must be treated as read-only. +type ReplicationControllerNamespaceLister interface { + // List lists all ReplicationControllers in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ReplicationController, err error) + // Get retrieves the ReplicationController from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.ReplicationController, error) + ReplicationControllerNamespaceListerExpansion +} + +// replicationControllerNamespaceLister implements the ReplicationControllerNamespaceLister +// interface. +type replicationControllerNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ReplicationControllers in the indexer for a given namespace. +func (s replicationControllerNamespaceLister) List(selector labels.Selector) (ret []*v1.ReplicationController, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ReplicationController)) + }) + return ret, err +} + +// Get retrieves the ReplicationController from the indexer for a given namespace and name. +func (s replicationControllerNamespaceLister) Get(name string) (*v1.ReplicationController, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("replicationcontroller"), name) + } + return obj.(*v1.ReplicationController), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller_expansion.go b/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller_expansion.go new file mode 100644 index 00000000000..b031d52173a --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller_expansion.go @@ -0,0 +1,66 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// ReplicationControllerListerExpansion allows custom methods to be added to +// ReplicationControllerLister. +type ReplicationControllerListerExpansion interface { + GetPodControllers(pod *v1.Pod) ([]*v1.ReplicationController, error) +} + +// ReplicationControllerNamespaceListerExpansion allows custom methods to be added to +// ReplicationControllerNamespaceLister. +type ReplicationControllerNamespaceListerExpansion interface{} + +// GetPodControllers returns a list of ReplicationControllers that potentially match a pod. +// Only the one specified in the Pod's ControllerRef will actually manage it. +// Returns an error only if no matching ReplicationControllers are found. +func (s *replicationControllerLister) GetPodControllers(pod *v1.Pod) ([]*v1.ReplicationController, error) { + if len(pod.Labels) == 0 { + return nil, fmt.Errorf("no controllers found for pod %v because it has no labels", pod.Name) + } + + items, err := s.ReplicationControllers(pod.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var controllers []*v1.ReplicationController + for i := range items { + rc := items[i] + selector := labels.Set(rc.Spec.Selector).AsSelectorPreValidated() + + // If an rc with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { + continue + } + controllers = append(controllers, rc) + } + + if len(controllers) == 0 { + return nil, fmt.Errorf("could not find controller for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + } + + return controllers, nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/resourcequota.go b/vendor/k8s.io/client-go/listers/core/v1/resourcequota.go new file mode 100644 index 00000000000..9c00b49d4f5 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/resourcequota.go @@ -0,0 +1,99 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ResourceQuotaLister helps list ResourceQuotas. +// All objects returned here must be treated as read-only. +type ResourceQuotaLister interface { + // List lists all ResourceQuotas in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ResourceQuota, err error) + // ResourceQuotas returns an object that can list and get ResourceQuotas. + ResourceQuotas(namespace string) ResourceQuotaNamespaceLister + ResourceQuotaListerExpansion +} + +// resourceQuotaLister implements the ResourceQuotaLister interface. +type resourceQuotaLister struct { + indexer cache.Indexer +} + +// NewResourceQuotaLister returns a new ResourceQuotaLister. +func NewResourceQuotaLister(indexer cache.Indexer) ResourceQuotaLister { + return &resourceQuotaLister{indexer: indexer} +} + +// List lists all ResourceQuotas in the indexer. +func (s *resourceQuotaLister) List(selector labels.Selector) (ret []*v1.ResourceQuota, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ResourceQuota)) + }) + return ret, err +} + +// ResourceQuotas returns an object that can list and get ResourceQuotas. +func (s *resourceQuotaLister) ResourceQuotas(namespace string) ResourceQuotaNamespaceLister { + return resourceQuotaNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ResourceQuotaNamespaceLister helps list and get ResourceQuotas. +// All objects returned here must be treated as read-only. +type ResourceQuotaNamespaceLister interface { + // List lists all ResourceQuotas in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ResourceQuota, err error) + // Get retrieves the ResourceQuota from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.ResourceQuota, error) + ResourceQuotaNamespaceListerExpansion +} + +// resourceQuotaNamespaceLister implements the ResourceQuotaNamespaceLister +// interface. +type resourceQuotaNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ResourceQuotas in the indexer for a given namespace. +func (s resourceQuotaNamespaceLister) List(selector labels.Selector) (ret []*v1.ResourceQuota, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ResourceQuota)) + }) + return ret, err +} + +// Get retrieves the ResourceQuota from the indexer for a given namespace and name. +func (s resourceQuotaNamespaceLister) Get(name string) (*v1.ResourceQuota, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("resourcequota"), name) + } + return obj.(*v1.ResourceQuota), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/secret.go b/vendor/k8s.io/client-go/listers/core/v1/secret.go new file mode 100644 index 00000000000..d386d4d5cbd --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/secret.go @@ -0,0 +1,99 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// SecretLister helps list Secrets. +// All objects returned here must be treated as read-only. +type SecretLister interface { + // List lists all Secrets in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.Secret, err error) + // Secrets returns an object that can list and get Secrets. + Secrets(namespace string) SecretNamespaceLister + SecretListerExpansion +} + +// secretLister implements the SecretLister interface. +type secretLister struct { + indexer cache.Indexer +} + +// NewSecretLister returns a new SecretLister. +func NewSecretLister(indexer cache.Indexer) SecretLister { + return &secretLister{indexer: indexer} +} + +// List lists all Secrets in the indexer. +func (s *secretLister) List(selector labels.Selector) (ret []*v1.Secret, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Secret)) + }) + return ret, err +} + +// Secrets returns an object that can list and get Secrets. +func (s *secretLister) Secrets(namespace string) SecretNamespaceLister { + return secretNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// SecretNamespaceLister helps list and get Secrets. +// All objects returned here must be treated as read-only. +type SecretNamespaceLister interface { + // List lists all Secrets in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.Secret, err error) + // Get retrieves the Secret from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.Secret, error) + SecretNamespaceListerExpansion +} + +// secretNamespaceLister implements the SecretNamespaceLister +// interface. +type secretNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Secrets in the indexer for a given namespace. +func (s secretNamespaceLister) List(selector labels.Selector) (ret []*v1.Secret, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Secret)) + }) + return ret, err +} + +// Get retrieves the Secret from the indexer for a given namespace and name. +func (s secretNamespaceLister) Get(name string) (*v1.Secret, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("secret"), name) + } + return obj.(*v1.Secret), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/service.go b/vendor/k8s.io/client-go/listers/core/v1/service.go new file mode 100644 index 00000000000..51026d7b4b6 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/service.go @@ -0,0 +1,99 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ServiceLister helps list Services. +// All objects returned here must be treated as read-only. +type ServiceLister interface { + // List lists all Services in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.Service, err error) + // Services returns an object that can list and get Services. + Services(namespace string) ServiceNamespaceLister + ServiceListerExpansion +} + +// serviceLister implements the ServiceLister interface. +type serviceLister struct { + indexer cache.Indexer +} + +// NewServiceLister returns a new ServiceLister. +func NewServiceLister(indexer cache.Indexer) ServiceLister { + return &serviceLister{indexer: indexer} +} + +// List lists all Services in the indexer. +func (s *serviceLister) List(selector labels.Selector) (ret []*v1.Service, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Service)) + }) + return ret, err +} + +// Services returns an object that can list and get Services. +func (s *serviceLister) Services(namespace string) ServiceNamespaceLister { + return serviceNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ServiceNamespaceLister helps list and get Services. +// All objects returned here must be treated as read-only. +type ServiceNamespaceLister interface { + // List lists all Services in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.Service, err error) + // Get retrieves the Service from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.Service, error) + ServiceNamespaceListerExpansion +} + +// serviceNamespaceLister implements the ServiceNamespaceLister +// interface. +type serviceNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Services in the indexer for a given namespace. +func (s serviceNamespaceLister) List(selector labels.Selector) (ret []*v1.Service, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Service)) + }) + return ret, err +} + +// Get retrieves the Service from the indexer for a given namespace and name. +func (s serviceNamespaceLister) Get(name string) (*v1.Service, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("service"), name) + } + return obj.(*v1.Service), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/serviceaccount.go b/vendor/k8s.io/client-go/listers/core/v1/serviceaccount.go new file mode 100644 index 00000000000..aa9554d8bb5 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/serviceaccount.go @@ -0,0 +1,99 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ServiceAccountLister helps list ServiceAccounts. +// All objects returned here must be treated as read-only. +type ServiceAccountLister interface { + // List lists all ServiceAccounts in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ServiceAccount, err error) + // ServiceAccounts returns an object that can list and get ServiceAccounts. + ServiceAccounts(namespace string) ServiceAccountNamespaceLister + ServiceAccountListerExpansion +} + +// serviceAccountLister implements the ServiceAccountLister interface. +type serviceAccountLister struct { + indexer cache.Indexer +} + +// NewServiceAccountLister returns a new ServiceAccountLister. +func NewServiceAccountLister(indexer cache.Indexer) ServiceAccountLister { + return &serviceAccountLister{indexer: indexer} +} + +// List lists all ServiceAccounts in the indexer. +func (s *serviceAccountLister) List(selector labels.Selector) (ret []*v1.ServiceAccount, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ServiceAccount)) + }) + return ret, err +} + +// ServiceAccounts returns an object that can list and get ServiceAccounts. +func (s *serviceAccountLister) ServiceAccounts(namespace string) ServiceAccountNamespaceLister { + return serviceAccountNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ServiceAccountNamespaceLister helps list and get ServiceAccounts. +// All objects returned here must be treated as read-only. +type ServiceAccountNamespaceLister interface { + // List lists all ServiceAccounts in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ServiceAccount, err error) + // Get retrieves the ServiceAccount from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.ServiceAccount, error) + ServiceAccountNamespaceListerExpansion +} + +// serviceAccountNamespaceLister implements the ServiceAccountNamespaceLister +// interface. +type serviceAccountNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ServiceAccounts in the indexer for a given namespace. +func (s serviceAccountNamespaceLister) List(selector labels.Selector) (ret []*v1.ServiceAccount, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ServiceAccount)) + }) + return ret, err +} + +// Get retrieves the ServiceAccount from the indexer for a given namespace and name. +func (s serviceAccountNamespaceLister) Get(name string) (*v1.ServiceAccount, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("serviceaccount"), name) + } + return obj.(*v1.ServiceAccount), nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/fieldpath/doc.go b/vendor/k8s.io/kubernetes/pkg/fieldpath/doc.go new file mode 100644 index 00000000000..400d001e7f2 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/fieldpath/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package fieldpath supplies methods for extracting fields from objects +// given a path to a field. +package fieldpath // import "k8s.io/kubernetes/pkg/fieldpath" diff --git a/vendor/k8s.io/kubernetes/pkg/fieldpath/fieldpath.go b/vendor/k8s.io/kubernetes/pkg/fieldpath/fieldpath.go new file mode 100644 index 00000000000..f8466d40348 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/fieldpath/fieldpath.go @@ -0,0 +1,120 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldpath + +import ( + "fmt" + "sort" + "strconv" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/util/validation" +) + +// FormatMap formats map[string]string to a string. +func FormatMap(m map[string]string) (fmtStr string) { + // output with keys in sorted order to provide stable output + keys := make([]string, 0, len(m)) + var grow int + for k, v := range m { + keys = append(keys, k) + // why add 4: (for =, \n, " and ") + grow += len(k) + len(v) + 4 + } + sort.Strings(keys) + // allocate space to avoid expansion + dst := make([]byte, 0, grow) + for _, key := range keys { + if len(dst) > 0 { + dst = append(dst, '\n') + } + dst = append(dst, key...) + dst = append(dst, '=') + dst = strconv.AppendQuote(dst, m[key]) + } + return string(dst) +} + +// ExtractFieldPathAsString extracts the field from the given object +// and returns it as a string. The object must be a pointer to an +// API type. +func ExtractFieldPathAsString(obj interface{}, fieldPath string) (string, error) { + accessor, err := meta.Accessor(obj) + if err != nil { + return "", err + } + + if path, subscript, ok := SplitMaybeSubscriptedPath(fieldPath); ok { + switch path { + case "metadata.annotations": + if errs := validation.IsQualifiedName(strings.ToLower(subscript)); len(errs) != 0 { + return "", fmt.Errorf("invalid key subscript in %s: %s", fieldPath, strings.Join(errs, ";")) + } + return accessor.GetAnnotations()[subscript], nil + case "metadata.labels": + if errs := validation.IsQualifiedName(subscript); len(errs) != 0 { + return "", fmt.Errorf("invalid key subscript in %s: %s", fieldPath, strings.Join(errs, ";")) + } + return accessor.GetLabels()[subscript], nil + default: + return "", fmt.Errorf("fieldPath %q does not support subscript", fieldPath) + } + } + + switch fieldPath { + case "metadata.annotations": + return FormatMap(accessor.GetAnnotations()), nil + case "metadata.labels": + return FormatMap(accessor.GetLabels()), nil + case "metadata.name": + return accessor.GetName(), nil + case "metadata.namespace": + return accessor.GetNamespace(), nil + case "metadata.uid": + return string(accessor.GetUID()), nil + } + + return "", fmt.Errorf("unsupported fieldPath: %v", fieldPath) +} + +// SplitMaybeSubscriptedPath checks whether the specified fieldPath is +// subscripted, and +// - if yes, this function splits the fieldPath into path and subscript, and +// returns (path, subscript, true). +// - if no, this function returns (fieldPath, "", false). +// +// Example inputs and outputs: +// +// "metadata.annotations['myKey']" --> ("metadata.annotations", "myKey", true) +// "metadata.annotations['a[b]c']" --> ("metadata.annotations", "a[b]c", true) +// "metadata.labels['']" --> ("metadata.labels", "", true) +// "metadata.labels" --> ("metadata.labels", "", false) +func SplitMaybeSubscriptedPath(fieldPath string) (string, string, bool) { + if !strings.HasSuffix(fieldPath, "']") { + return fieldPath, "", false + } + s := strings.TrimSuffix(fieldPath, "']") + parts := strings.SplitN(s, "['", 2) + if len(parts) < 2 { + return fieldPath, "", false + } + if len(parts[0]) == 0 { + return fieldPath, "", false + } + return parts[0], parts[1], true +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/hash/hash.go b/vendor/k8s.io/kubernetes/pkg/util/hash/hash.go new file mode 100644 index 00000000000..0962e5cfb5b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/hash/hash.go @@ -0,0 +1,32 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package hash + +import ( + "fmt" + "hash" + + "k8s.io/apimachinery/pkg/util/dump" +) + +// DeepHashObject writes specified object to hash using the spew library +// which follows pointers and prints actual values of the nested objects +// ensuring the hash does not change when a pointer changes. +func DeepHashObject(hasher hash.Hash, objectToWrite interface{}) { + hasher.Reset() + fmt.Fprintf(hasher, "%v", dump.ForHash(objectToWrite)) +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/slice/slice.go b/vendor/k8s.io/kubernetes/pkg/util/slice/slice.go new file mode 100644 index 00000000000..872fbdcad65 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/slice/slice.go @@ -0,0 +1,75 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package slice provides utility methods for common operations on slices. +package slice + +import ( + "sort" +) + +// CopyStrings copies the contents of the specified string slice +// into a new slice. +func CopyStrings(s []string) []string { + if s == nil { + return nil + } + c := make([]string, len(s)) + copy(c, s) + return c +} + +// SortStrings sorts the specified string slice in place. It returns the same +// slice that was provided in order to facilitate method chaining. +func SortStrings(s []string) []string { + sort.Strings(s) + return s +} + +// ContainsString checks if a given slice of strings contains the provided string. +// If a modifier func is provided, it is called with the slice item before the comparation. +func ContainsString(slice []string, s string, modifier func(s string) string) bool { + for _, item := range slice { + if item == s { + return true + } + if modifier != nil && modifier(item) == s { + return true + } + } + return false +} + +// RemoveString returns a newly created []string that contains all items from slice that +// are not equal to s and modifier(s) in case modifier func is provided. +func RemoveString(slice []string, s string, modifier func(s string) string) []string { + newSlice := make([]string, 0) + for _, item := range slice { + if item == s { + continue + } + if modifier != nil && modifier(item) == s { + continue + } + newSlice = append(newSlice, item) + } + if len(newSlice) == 0 { + // Sanitize for unit tests so we don't need to distinguish empty array + // and nil. + newSlice = nil + } + return newSlice +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 437d7783273..ca0cca3325b 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -656,6 +656,8 @@ k8s.io/client-go/applyconfigurations/storage/v1alpha1 k8s.io/client-go/applyconfigurations/storage/v1beta1 k8s.io/client-go/discovery k8s.io/client-go/dynamic +k8s.io/client-go/informers/core/v1 +k8s.io/client-go/informers/internalinterfaces k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/scheme k8s.io/client-go/kubernetes/typed/admissionregistration/v1 @@ -709,6 +711,8 @@ k8s.io/client-go/kubernetes/typed/scheduling/v1beta1 k8s.io/client-go/kubernetes/typed/storage/v1 k8s.io/client-go/kubernetes/typed/storage/v1alpha1 k8s.io/client-go/kubernetes/typed/storage/v1beta1 +k8s.io/client-go/listers/apps/v1 +k8s.io/client-go/listers/core/v1 k8s.io/client-go/metadata k8s.io/client-go/openapi k8s.io/client-go/pkg/apis/clientauthentication @@ -784,8 +788,11 @@ k8s.io/kube-openapi/pkg/validation/spec k8s.io/kubernetes/pkg/api/v1/pod k8s.io/kubernetes/pkg/apis/core k8s.io/kubernetes/pkg/features +k8s.io/kubernetes/pkg/fieldpath k8s.io/kubernetes/pkg/registry/core/service/allocator k8s.io/kubernetes/pkg/registry/core/service/portallocator +k8s.io/kubernetes/pkg/util/hash +k8s.io/kubernetes/pkg/util/slice # k8s.io/utils v0.0.0-20230726121419-3b25d923346b ## explicit; go 1.18 k8s.io/utils/buffer