Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 2 additions & 4 deletions controllers/idler/idler_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@ import (
"github.com/codeready-toolchain/toolchain-common/pkg/condition"
notify "github.com/codeready-toolchain/toolchain-common/pkg/notification"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/rest"
"k8s.io/client-go/scale"
Expand Down Expand Up @@ -42,8 +41,6 @@ const (
vmSubresourceURLFmt = "/apis/subresources.kubevirt.io/%s"
)

var vmGVR = schema.GroupVersionResource{Group: "kubevirt.io", Version: "v1", Resource: "virtualmachines"}

// SetupWithManager sets up the controller with the Manager.
func (r *Reconciler) SetupWithManager(mgr manager.Manager, allNamespaceCluster runtimeCluster.Cluster) error {
return ctrl.NewControllerManagedBy(mgr).
Expand All @@ -70,11 +67,12 @@ type Reconciler struct {
//+kubebuilder:rbac:groups=toolchain.dev.openshift.com,resources=idlers/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=toolchain.dev.openshift.com,resources=idlers/finalizers,verbs=update

//+kubebuilder:rbac:groups="",resources=pods;replicationcontrollers,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups="",resources=pods;replicationcontrollers;persistentvolumeclaims,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=apps,resources=deployments;daemonsets;replicasets;statefulsets,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=apps.openshift.io,resources=deploymentconfigs,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=kubevirt.io,resources=virtualmachines;virtualmachineinstances,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=cdi.kubevirt.io,resources=datavolumes,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=serving.kserve.io,resources=inferenceservices;servingruntimes,verbs=get;list;watch;create;update;patch;delete

// needed to stop the VMs - we need to make a PUT request for the "stop" subresource. Kubernetes internally classifies these as either create or update
Expand Down
22 changes: 22 additions & 0 deletions controllers/idler/idler_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -198,6 +198,9 @@ func TestEnsureIdling(t *testing.T) {
JobDoesNotExist(podsRunningForTooLong.job).
JobExists(podsTooEarlyToKill.job).
JobExists(noise.job).
DataVolumeDoesNotExist(podsRunningForTooLong.dataVolume).
DataVolumeExists(podsTooEarlyToKill.dataVolume).
DataVolumeExists(noise.dataVolume).
DeploymentScaledDown(podsRunningForTooLong.deployment).
ScaleSubresourceScaledDown(podsRunningForTooLong.integration).
ScaleSubresourceScaledDown(podsRunningForTooLong.kameletBinding).
Expand Down Expand Up @@ -321,6 +324,7 @@ func TestEnsureIdling(t *testing.T) {
PodsDoNotExist(toKill.standalonePods).
DaemonSetDoesNotExist(toKill.daemonSet).
JobDoesNotExist(toKill.job).
DataVolumeDoesNotExist(toKill.dataVolume).
DeploymentScaledDown(toKill.deployment).
ScaleSubresourceScaledDown(toKill.integration).
ScaleSubresourceScaledDown(toKill.kameletBinding).
Expand Down Expand Up @@ -536,6 +540,7 @@ func TestEnsureIdlingFailed(t *testing.T) {
PodsDoNotExist(toKill.standalonePods).
DaemonSetDoesNotExist(toKill.daemonSet).
JobDoesNotExist(toKill.job).
DataVolumeDoesNotExist(toKill.dataVolume).
ReplicaSetScaledDown(toKill.replicaSet).
DeploymentScaledDown(toKill.deployment).
ScaleSubresourceScaledDown(toKill.integration).
Expand Down Expand Up @@ -871,6 +876,7 @@ type payloads struct {
deploymentConfig *openshiftappsv1.DeploymentConfig
replicationController *corev1.ReplicationController
job *batchv1.Job
dataVolume *unstructured.Unstructured
virtualmachine *unstructured.Unstructured
vmStopCallCounter *int
virtualmachineinstance *unstructured.Unstructured
Expand Down Expand Up @@ -982,6 +988,21 @@ func preparePayloads(t *testing.T, clients *memberoperatortest.FakeClientSet, na
createObjectWithDynamicClient(t, clients.DynamicClient, job)
controlledPods = createPods(t, clients.AllNamespacesClient, job, sTime, controlledPods, noRestart())

// DataVolume
dv := &unstructured.Unstructured{}
dv.SetAPIVersion("cdi.kubevirt.io/v1beta1")
dv.SetKind("DataVolume")
dv.SetName(fmt.Sprintf("%s%s-datavolume", namePrefix, namespace))
dv.SetNamespace(namespace)
createObjectWithDynamicClient(t, clients.DynamicClient, dv)
// PersistentVolumeClaim owned by DataVolume
dvPvc := &corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("%s-pvc", dv.GetName()), Namespace: namespace},
}
require.NoError(t, controllerutil.SetControllerReference(dv, dvPvc, scheme.Scheme))
createObjectWithDynamicClient(t, clients.DynamicClient, dvPvc)
controlledPods = createPods(t, clients.AllNamespacesClient, dvPvc, sTime, controlledPods, noRestart())

// StatefulSet
sts := &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("%s%s-statefulset", namePrefix, namespace), Namespace: namespace},
Expand Down Expand Up @@ -1107,6 +1128,7 @@ func preparePayloads(t *testing.T, clients *memberoperatortest.FakeClientSet, na
deploymentConfig: dc,
replicationController: standaloneRC,
job: job,
dataVolume: dv,
virtualmachine: vm,
vmStopCallCounter: stopCallCounter,
virtualmachineinstance: vmi,
Expand Down
2 changes: 1 addition & 1 deletion controllers/idler/owners.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ func (i *ownerIdler) scaleOwnerToZero(ctx context.Context, pod *corev1.Pod) (str
switch ownerKind {
case "Deployment", "ReplicaSet", "Integration", "KameletBinding", "StatefulSet", "ReplicationController":
err = i.scaleToZero(ctx, ownerWithGVR)
case "DaemonSet", "Job":
case "DaemonSet", "Job", "DataVolume":
err = i.deleteResource(ctx, ownerWithGVR) // Nothing to scale down. Delete instead.
case "DeploymentConfig":
err = i.scaleDeploymentConfigToZero(ctx, ownerWithGVR)
Expand Down
22 changes: 21 additions & 1 deletion controllers/idler/owners_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,20 @@ var testConfigs = map[string]createTestConfigFunc{
},
}
},
"DataVolume": func(plds payloads) payloadTestConfig {
return payloadTestConfig{
// We are testing the case with nested controllers (DataVolume -> PersistentVolumeClaim -> Pod) here,
// so the pod's owner is PersistentVolumeClaim but the expected scaled app is the parent DataVolume.
podOwnerName: fmt.Sprintf("%s-pvc", plds.dataVolume.GetName()),
expectedAppName: plds.dataVolume.GetName(),
ownerScaledUp: func(assertion *test.IdleablePayloadAssertion) {
assertion.DataVolumeExists(plds.dataVolume)
},
ownerScaledDown: func(assertion *test.IdleablePayloadAssertion) {
assertion.DataVolumeDoesNotExist(plds.dataVolume)
},
}
},
"VirtualMachine": func(plds payloads) payloadTestConfig {
return payloadTestConfig{
podOwnerName: plds.virtualmachineinstance.GetName(),
Expand Down Expand Up @@ -428,12 +442,18 @@ func noAAPResourceList(t *testing.T) []*metav1.APIResourceList {
require.NoError(t, apis.AddToScheme(scheme.Scheme))
noAAPResources := []*metav1.APIResourceList{
{
GroupVersion: vmGVR.GroupVersion().String(),
GroupVersion: "kubevirt.io/v1",
APIResources: []metav1.APIResource{
{Name: "virtualmachineinstances", Namespaced: true, Kind: "VirtualMachineInstance"},
{Name: "virtualmachines", Namespaced: true, Kind: "VirtualMachine"},
},
},
{
GroupVersion: "cdi.kubevirt.io/v1beta1",
APIResources: []metav1.APIResource{
{Name: "datavolumes", Namespaced: true, Kind: "DataVolume"},
},
},
}
for gvk := range scheme.Scheme.AllKnownTypes() {
resource, _ := meta.UnsafeGuessKindToResource(gvk)
Expand Down
16 changes: 16 additions & 0 deletions test/idler_assertion.go
Original file line number Diff line number Diff line change
Expand Up @@ -298,6 +298,22 @@ func (a *IdleablePayloadAssertion) JobDoesNotExist(job *batchv1.Job) *IdleablePa
return a
}

var dataVolumeGVR = schema.GroupVersionResource{Group: "cdi.kubevirt.io", Version: "v1beta1", Resource: "datavolumes"}

func (a *IdleablePayloadAssertion) DataVolumeExists(dataVolume *unstructured.Unstructured) *IdleablePayloadAssertion {
_, err := a.dynamicClient.
Resource(dataVolumeGVR).
Namespace(dataVolume.GetNamespace()).
Get(context.TODO(), dataVolume.GetName(), metav1.GetOptions{})
require.NoError(a.t, err)
return a
}

func (a *IdleablePayloadAssertion) DataVolumeDoesNotExist(dataVolume *unstructured.Unstructured) *IdleablePayloadAssertion {
a.assertResourceDeleted(dataVolumeGVR, dataVolume.GetNamespace(), dataVolume.GetName())
return a
}

func (a *IdleablePayloadAssertion) StatefulSetScaledDown(statefulSet *appsv1.StatefulSet) *IdleablePayloadAssertion {
s := &appsv1.StatefulSet{}
gvr := appsv1.SchemeGroupVersion.WithResource("statefulsets")
Expand Down
Loading