diff --git a/controllers/idler/idler_controller.go b/controllers/idler/idler_controller.go index 0c3a0cb3..141bc1f1 100644 --- a/controllers/idler/idler_controller.go +++ b/controllers/idler/idler_controller.go @@ -13,7 +13,6 @@ import ( "github.com/codeready-toolchain/toolchain-common/pkg/condition" notify "github.com/codeready-toolchain/toolchain-common/pkg/notification" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" "k8s.io/client-go/rest" "k8s.io/client-go/scale" @@ -42,8 +41,6 @@ const ( vmSubresourceURLFmt = "/apis/subresources.kubevirt.io/%s" ) -var vmGVR = schema.GroupVersionResource{Group: "kubevirt.io", Version: "v1", Resource: "virtualmachines"} - // SetupWithManager sets up the controller with the Manager. func (r *Reconciler) SetupWithManager(mgr manager.Manager, allNamespaceCluster runtimeCluster.Cluster) error { return ctrl.NewControllerManagedBy(mgr). @@ -70,11 +67,12 @@ type Reconciler struct { //+kubebuilder:rbac:groups=toolchain.dev.openshift.com,resources=idlers/status,verbs=get;update;patch //+kubebuilder:rbac:groups=toolchain.dev.openshift.com,resources=idlers/finalizers,verbs=update -//+kubebuilder:rbac:groups="",resources=pods;replicationcontrollers,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups="",resources=pods;replicationcontrollers;persistentvolumeclaims,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=apps,resources=deployments;daemonsets;replicasets;statefulsets,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=apps.openshift.io,resources=deploymentconfigs,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=kubevirt.io,resources=virtualmachines;virtualmachineinstances,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=cdi.kubevirt.io,resources=datavolumes,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=serving.kserve.io,resources=inferenceservices;servingruntimes,verbs=get;list;watch;create;update;patch;delete // needed to stop the VMs - we need to make a PUT request for the "stop" subresource. Kubernetes internally classifies these as either create or update diff --git a/controllers/idler/idler_controller_test.go b/controllers/idler/idler_controller_test.go index c23b4227..0e58b5ca 100644 --- a/controllers/idler/idler_controller_test.go +++ b/controllers/idler/idler_controller_test.go @@ -198,6 +198,9 @@ func TestEnsureIdling(t *testing.T) { JobDoesNotExist(podsRunningForTooLong.job). JobExists(podsTooEarlyToKill.job). JobExists(noise.job). + DataVolumeDoesNotExist(podsRunningForTooLong.dataVolume). + DataVolumeExists(podsTooEarlyToKill.dataVolume). + DataVolumeExists(noise.dataVolume). DeploymentScaledDown(podsRunningForTooLong.deployment). ScaleSubresourceScaledDown(podsRunningForTooLong.integration). ScaleSubresourceScaledDown(podsRunningForTooLong.kameletBinding). @@ -321,6 +324,7 @@ func TestEnsureIdling(t *testing.T) { PodsDoNotExist(toKill.standalonePods). DaemonSetDoesNotExist(toKill.daemonSet). JobDoesNotExist(toKill.job). + DataVolumeDoesNotExist(toKill.dataVolume). DeploymentScaledDown(toKill.deployment). ScaleSubresourceScaledDown(toKill.integration). ScaleSubresourceScaledDown(toKill.kameletBinding). @@ -536,6 +540,7 @@ func TestEnsureIdlingFailed(t *testing.T) { PodsDoNotExist(toKill.standalonePods). DaemonSetDoesNotExist(toKill.daemonSet). JobDoesNotExist(toKill.job). + DataVolumeDoesNotExist(toKill.dataVolume). ReplicaSetScaledDown(toKill.replicaSet). DeploymentScaledDown(toKill.deployment). ScaleSubresourceScaledDown(toKill.integration). @@ -871,6 +876,7 @@ type payloads struct { deploymentConfig *openshiftappsv1.DeploymentConfig replicationController *corev1.ReplicationController job *batchv1.Job + dataVolume *unstructured.Unstructured virtualmachine *unstructured.Unstructured vmStopCallCounter *int virtualmachineinstance *unstructured.Unstructured @@ -982,6 +988,21 @@ func preparePayloads(t *testing.T, clients *memberoperatortest.FakeClientSet, na createObjectWithDynamicClient(t, clients.DynamicClient, job) controlledPods = createPods(t, clients.AllNamespacesClient, job, sTime, controlledPods, noRestart()) + // DataVolume + dv := &unstructured.Unstructured{} + dv.SetAPIVersion("cdi.kubevirt.io/v1beta1") + dv.SetKind("DataVolume") + dv.SetName(fmt.Sprintf("%s%s-datavolume", namePrefix, namespace)) + dv.SetNamespace(namespace) + createObjectWithDynamicClient(t, clients.DynamicClient, dv) + // PersistentVolumeClaim owned by DataVolume + dvPvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("%s-pvc", dv.GetName()), Namespace: namespace}, + } + require.NoError(t, controllerutil.SetControllerReference(dv, dvPvc, scheme.Scheme)) + createObjectWithDynamicClient(t, clients.DynamicClient, dvPvc) + controlledPods = createPods(t, clients.AllNamespacesClient, dvPvc, sTime, controlledPods, noRestart()) + // StatefulSet sts := &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("%s%s-statefulset", namePrefix, namespace), Namespace: namespace}, @@ -1107,6 +1128,7 @@ func preparePayloads(t *testing.T, clients *memberoperatortest.FakeClientSet, na deploymentConfig: dc, replicationController: standaloneRC, job: job, + dataVolume: dv, virtualmachine: vm, vmStopCallCounter: stopCallCounter, virtualmachineinstance: vmi, diff --git a/controllers/idler/owners.go b/controllers/idler/owners.go index 9d7c099e..7b5f2a1c 100644 --- a/controllers/idler/owners.go +++ b/controllers/idler/owners.go @@ -60,7 +60,7 @@ func (i *ownerIdler) scaleOwnerToZero(ctx context.Context, pod *corev1.Pod) (str switch ownerKind { case "Deployment", "ReplicaSet", "Integration", "KameletBinding", "StatefulSet", "ReplicationController": err = i.scaleToZero(ctx, ownerWithGVR) - case "DaemonSet", "Job": + case "DaemonSet", "Job", "DataVolume": err = i.deleteResource(ctx, ownerWithGVR) // Nothing to scale down. Delete instead. case "DeploymentConfig": err = i.scaleDeploymentConfigToZero(ctx, ownerWithGVR) diff --git a/controllers/idler/owners_test.go b/controllers/idler/owners_test.go index 88e51733..53386e7c 100644 --- a/controllers/idler/owners_test.go +++ b/controllers/idler/owners_test.go @@ -157,6 +157,20 @@ var testConfigs = map[string]createTestConfigFunc{ }, } }, + "DataVolume": func(plds payloads) payloadTestConfig { + return payloadTestConfig{ + // We are testing the case with nested controllers (DataVolume -> PersistentVolumeClaim -> Pod) here, + // so the pod's owner is PersistentVolumeClaim but the expected scaled app is the parent DataVolume. + podOwnerName: fmt.Sprintf("%s-pvc", plds.dataVolume.GetName()), + expectedAppName: plds.dataVolume.GetName(), + ownerScaledUp: func(assertion *test.IdleablePayloadAssertion) { + assertion.DataVolumeExists(plds.dataVolume) + }, + ownerScaledDown: func(assertion *test.IdleablePayloadAssertion) { + assertion.DataVolumeDoesNotExist(plds.dataVolume) + }, + } + }, "VirtualMachine": func(plds payloads) payloadTestConfig { return payloadTestConfig{ podOwnerName: plds.virtualmachineinstance.GetName(), @@ -428,12 +442,18 @@ func noAAPResourceList(t *testing.T) []*metav1.APIResourceList { require.NoError(t, apis.AddToScheme(scheme.Scheme)) noAAPResources := []*metav1.APIResourceList{ { - GroupVersion: vmGVR.GroupVersion().String(), + GroupVersion: "kubevirt.io/v1", APIResources: []metav1.APIResource{ {Name: "virtualmachineinstances", Namespaced: true, Kind: "VirtualMachineInstance"}, {Name: "virtualmachines", Namespaced: true, Kind: "VirtualMachine"}, }, }, + { + GroupVersion: "cdi.kubevirt.io/v1beta1", + APIResources: []metav1.APIResource{ + {Name: "datavolumes", Namespaced: true, Kind: "DataVolume"}, + }, + }, } for gvk := range scheme.Scheme.AllKnownTypes() { resource, _ := meta.UnsafeGuessKindToResource(gvk) diff --git a/test/idler_assertion.go b/test/idler_assertion.go index 03eea2b7..7b7da326 100644 --- a/test/idler_assertion.go +++ b/test/idler_assertion.go @@ -298,6 +298,22 @@ func (a *IdleablePayloadAssertion) JobDoesNotExist(job *batchv1.Job) *IdleablePa return a } +var dataVolumeGVR = schema.GroupVersionResource{Group: "cdi.kubevirt.io", Version: "v1beta1", Resource: "datavolumes"} + +func (a *IdleablePayloadAssertion) DataVolumeExists(dataVolume *unstructured.Unstructured) *IdleablePayloadAssertion { + _, err := a.dynamicClient. + Resource(dataVolumeGVR). + Namespace(dataVolume.GetNamespace()). + Get(context.TODO(), dataVolume.GetName(), metav1.GetOptions{}) + require.NoError(a.t, err) + return a +} + +func (a *IdleablePayloadAssertion) DataVolumeDoesNotExist(dataVolume *unstructured.Unstructured) *IdleablePayloadAssertion { + a.assertResourceDeleted(dataVolumeGVR, dataVolume.GetNamespace(), dataVolume.GetName()) + return a +} + func (a *IdleablePayloadAssertion) StatefulSetScaledDown(statefulSet *appsv1.StatefulSet) *IdleablePayloadAssertion { s := &appsv1.StatefulSet{} gvr := appsv1.SchemeGroupVersion.WithResource("statefulsets")