Skip to content

Commit 58feb82

Browse files
authored
test(databackup): migrate controller tests to Ginkgo/Gomega (#5713)
* test(databackup): migrate controller tests to Ginkgo/Gomega - Add suite_test.go with package-level Ginkgo bootstrap (TestAPIs) - Migrate status_handler_test.go from stdlib testing to Ginkgo/Gomega - Add databackup_controller_test.go: NewDataBackupReconciler, Build, ControllerName - Add implement_test.go: covers all dataBackupOperation methods including GetOperationObject, GetChartsDirectory, HasPrecedingOperation, Validate, UpdateStatusInfoForCompleted, GetTTL, GetStatusHandler, GetTargetDataset, UpdateOperationApiStatus, GetOperationType, GetPossibleTargetDatasetNamespacedNames, GetReleaseNameSpacedName, GetParallelTaskNumber, SetTargetDatasetStatusInProgress, RemoveTargetDatasetStatusInProgress Coverage: 73.2% -> 77.5% (exceeds 75% gate) Signed-off-by: Harsh <harshmastic@gmail.com> * test(databackup): assert LastTransitionTime in status handler test Strengthen the 'should use conditions LastTransitionTime' test to actually verify the transition time value, not just the condition count. Uses BeTemporally to handle metav1.Time second-precision truncation. Signed-off-by: Harsh <harshmastic@gmail.com> --------- Signed-off-by: Harsh <harshmastic@gmail.com>
1 parent 242a0bd commit 58feb82

4 files changed

Lines changed: 554 additions & 84 deletions

File tree

Lines changed: 63 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,26 +1,83 @@
11
/*
2-
Copyright 2023 The Fluid Authors.
2+
Copyright 2026 The Fluid Authors.
33
44
Licensed under the Apache License, Version 2.0 (the "License");
55
you may not use this file except in compliance with the License.
66
You may obtain a copy of the License at
77
8-
http://www.apache.org/licenses/LICENSE-2.0
8+
http://www.apache.org/licenses/LICENSE-2.0
99
1010
Unless required by applicable law or agreed to in writing, software
1111
distributed under the License is distributed on an "AS IS" BASIS,
1212
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1313
See the License for the specific language governing permissions and
1414
limitations under the License.
15-
1615
*/
1716

1817
package databackup
1918

2019
import (
21-
"testing"
20+
"k8s.io/apimachinery/pkg/runtime"
21+
"k8s.io/client-go/tools/record"
22+
ctrl "sigs.k8s.io/controller-runtime"
23+
24+
. "github.com/onsi/ginkgo/v2"
25+
. "github.com/onsi/gomega"
26+
27+
datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
28+
"github.com/fluid-cloudnative/fluid/pkg/utils/fake"
2229
)
2330

24-
func TestAPIs(t *testing.T) {
31+
var _ = Describe("DataBackupReconciler", func() {
32+
33+
Describe("ControllerName", func() {
34+
It("should return the constant controller name", func() {
35+
r := &DataBackupReconciler{}
36+
Expect(r.ControllerName()).To(Equal(controllerName))
37+
})
38+
})
39+
40+
Describe("NewDataBackupReconciler", func() {
41+
It("should initialize reconciler with all required fields set", func() {
42+
s := runtime.NewScheme()
43+
_ = datav1alpha1.AddToScheme(s)
44+
fakeClient := fake.NewFakeClientWithScheme(s)
45+
log := ctrl.Log.WithName("test")
46+
recorder := record.NewFakeRecorder(10)
47+
48+
r := NewDataBackupReconciler(fakeClient, log, s, recorder)
49+
Expect(r).NotTo(BeNil())
50+
Expect(r.Scheme).To(Equal(s))
51+
Expect(r.OperationReconciler).NotTo(BeNil())
52+
})
53+
})
54+
55+
Describe("Build", func() {
56+
It("should return a dataBackupOperation for a valid DataBackup object", func() {
57+
s := runtime.NewScheme()
58+
_ = datav1alpha1.AddToScheme(s)
59+
fakeClient := fake.NewFakeClientWithScheme(s)
60+
log := ctrl.Log.WithName("test")
61+
recorder := record.NewFakeRecorder(10)
62+
r := NewDataBackupReconciler(fakeClient, log, s, recorder)
63+
64+
dataBackup := &datav1alpha1.DataBackup{}
65+
op, err := r.Build(dataBackup)
66+
Expect(err).NotTo(HaveOccurred())
67+
Expect(op).NotTo(BeNil())
68+
})
69+
70+
It("should return an error for a non-DataBackup object", func() {
71+
s := runtime.NewScheme()
72+
fakeClient := fake.NewFakeClientWithScheme(s)
73+
log := ctrl.Log.WithName("test")
74+
recorder := record.NewFakeRecorder(10)
75+
r := NewDataBackupReconciler(fakeClient, log, s, recorder)
2576

26-
}
77+
dataset := &datav1alpha1.Dataset{}
78+
op, err := r.Build(dataset)
79+
Expect(err).To(HaveOccurred())
80+
Expect(op).To(BeNil())
81+
})
82+
})
83+
})
Lines changed: 290 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,290 @@
1+
/*
2+
Copyright 2026 The Fluid Authors.
3+
4+
Licensed under the Apache License, Version 2.0 (the "License");
5+
you may not use this file except in compliance with the License.
6+
You may obtain a copy of the License at
7+
8+
http://www.apache.org/licenses/LICENSE-2.0
9+
10+
Unless required by applicable law or agreed to in writing, software
11+
distributed under the License is distributed on an "AS IS" BASIS,
12+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
See the License for the specific language governing permissions and
14+
limitations under the License.
15+
*/
16+
17+
package databackup
18+
19+
import (
20+
. "github.com/onsi/ginkgo/v2"
21+
. "github.com/onsi/gomega"
22+
23+
datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
24+
"github.com/fluid-cloudnative/fluid/pkg/common"
25+
cdatabackup "github.com/fluid-cloudnative/fluid/pkg/databackup"
26+
"github.com/fluid-cloudnative/fluid/pkg/dataoperation"
27+
cruntime "github.com/fluid-cloudnative/fluid/pkg/runtime"
28+
"github.com/fluid-cloudnative/fluid/pkg/utils/fake"
29+
corev1 "k8s.io/api/core/v1"
30+
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
31+
"k8s.io/apimachinery/pkg/runtime"
32+
"k8s.io/apimachinery/pkg/types"
33+
"k8s.io/client-go/tools/record"
34+
ctrl "sigs.k8s.io/controller-runtime"
35+
)
36+
37+
var _ = Describe("dataBackupOperation", func() {
38+
var (
39+
testScheme *runtime.Scheme
40+
dataBackup *datav1alpha1.DataBackup
41+
op *dataBackupOperation
42+
)
43+
44+
BeforeEach(func() {
45+
testScheme = runtime.NewScheme()
46+
_ = datav1alpha1.AddToScheme(testScheme)
47+
_ = corev1.AddToScheme(testScheme)
48+
49+
dataBackup = &datav1alpha1.DataBackup{
50+
ObjectMeta: v1.ObjectMeta{
51+
Name: "test-backup",
52+
Namespace: "default",
53+
},
54+
Spec: datav1alpha1.DataBackupSpec{
55+
Dataset: "test-dataset",
56+
BackupPath: "pvc://test-pvc/path",
57+
},
58+
}
59+
60+
fakeClient := fake.NewFakeClientWithScheme(testScheme, dataBackup)
61+
log := ctrl.Log.WithName("test")
62+
recorder := record.NewFakeRecorder(10)
63+
64+
op = &dataBackupOperation{
65+
Client: fakeClient,
66+
Log: log,
67+
Recorder: recorder,
68+
dataBackup: dataBackup,
69+
}
70+
})
71+
72+
Describe("GetOperationObject", func() {
73+
It("should return the dataBackup object", func() {
74+
obj := op.GetOperationObject()
75+
Expect(obj).To(Equal(dataBackup))
76+
})
77+
})
78+
79+
Describe("GetChartsDirectory", func() {
80+
It("should contain the DatabackupChart constant", func() {
81+
dir := op.GetChartsDirectory()
82+
Expect(dir).To(ContainSubstring(cdatabackup.DatabackupChart))
83+
})
84+
})
85+
86+
Describe("HasPrecedingOperation", func() {
87+
It("should return false when RunAfter is nil", func() {
88+
dataBackup.Spec.RunAfter = nil
89+
Expect(op.HasPrecedingOperation()).To(BeFalse())
90+
})
91+
92+
It("should return true when RunAfter is set", func() {
93+
dataBackup.Spec.RunAfter = &datav1alpha1.OperationRef{}
94+
Expect(op.HasPrecedingOperation()).To(BeTrue())
95+
})
96+
})
97+
98+
Describe("GetOperationType", func() {
99+
It("should return DataBackupType", func() {
100+
Expect(op.GetOperationType()).To(Equal(dataoperation.DataBackupType))
101+
})
102+
})
103+
104+
Describe("GetPossibleTargetDatasetNamespacedNames", func() {
105+
It("should return a single NamespacedName matching the dataBackup", func() {
106+
names := op.GetPossibleTargetDatasetNamespacedNames()
107+
Expect(names).To(HaveLen(1))
108+
Expect(names[0]).To(Equal(types.NamespacedName{
109+
Namespace: "default",
110+
Name: "test-backup",
111+
}))
112+
})
113+
})
114+
115+
Describe("GetReleaseNameSpacedName", func() {
116+
It("should return NamespacedName with the release name derived from the backup name", func() {
117+
nsn := op.GetReleaseNameSpacedName()
118+
Expect(nsn.Namespace).To(Equal("default"))
119+
Expect(nsn.Name).NotTo(BeEmpty())
120+
})
121+
})
122+
123+
Describe("GetStatusHandler", func() {
124+
It("should return an OnceHandler", func() {
125+
handler := op.GetStatusHandler()
126+
Expect(handler).NotTo(BeNil())
127+
_, ok := handler.(*OnceHandler)
128+
Expect(ok).To(BeTrue())
129+
})
130+
})
131+
132+
Describe("GetTTL", func() {
133+
It("should return nil when TTLSecondsAfterFinished is not set", func() {
134+
dataBackup.Spec.TTLSecondsAfterFinished = nil
135+
ttl, err := op.GetTTL()
136+
Expect(err).NotTo(HaveOccurred())
137+
Expect(ttl).To(BeNil())
138+
})
139+
140+
It("should return the TTL value when set", func() {
141+
ttlVal := int32(300)
142+
dataBackup.Spec.TTLSecondsAfterFinished = &ttlVal
143+
ttl, err := op.GetTTL()
144+
Expect(err).NotTo(HaveOccurred())
145+
Expect(ttl).NotTo(BeNil())
146+
Expect(*ttl).To(Equal(int32(300)))
147+
})
148+
})
149+
150+
Describe("GetParallelTaskNumber", func() {
151+
It("should return 1", func() {
152+
Expect(op.GetParallelTaskNumber()).To(Equal(int32(1)))
153+
})
154+
})
155+
156+
Describe("SetTargetDatasetStatusInProgress", func() {
157+
It("should not panic and be a no-op", func() {
158+
dataset := &datav1alpha1.Dataset{}
159+
Expect(func() { op.SetTargetDatasetStatusInProgress(dataset) }).NotTo(Panic())
160+
})
161+
})
162+
163+
Describe("RemoveTargetDatasetStatusInProgress", func() {
164+
It("should not panic and be a no-op", func() {
165+
dataset := &datav1alpha1.Dataset{}
166+
Expect(func() { op.RemoveTargetDatasetStatusInProgress(dataset) }).NotTo(Panic())
167+
})
168+
})
169+
170+
Describe("Validate", func() {
171+
It("should return nil conditions and no error for a valid pvc:// path", func() {
172+
dataBackup.Spec.BackupPath = "pvc://my-pvc/path"
173+
ctx := cruntime.ReconcileRequestContext{Log: fake.NullLogger()}
174+
conditions, err := op.Validate(ctx)
175+
Expect(err).NotTo(HaveOccurred())
176+
Expect(conditions).To(BeNil())
177+
})
178+
179+
It("should return nil conditions and no error for a valid local:// path", func() {
180+
dataBackup.Spec.BackupPath = "local:///tmp/backup"
181+
ctx := cruntime.ReconcileRequestContext{Log: fake.NullLogger()}
182+
conditions, err := op.Validate(ctx)
183+
Expect(err).NotTo(HaveOccurred())
184+
Expect(conditions).To(BeNil())
185+
})
186+
187+
It("should return error and Failed condition for an unsupported path format", func() {
188+
dataBackup.Spec.BackupPath = "s3://my-bucket/path"
189+
ctx := cruntime.ReconcileRequestContext{Log: fake.NullLogger()}
190+
conditions, err := op.Validate(ctx)
191+
Expect(err).To(HaveOccurred())
192+
Expect(conditions).To(HaveLen(1))
193+
Expect(conditions[0].Type).To(Equal(common.Failed))
194+
Expect(conditions[0].Reason).To(Equal("PathNotSupported"))
195+
})
196+
})
197+
198+
Describe("UpdateStatusInfoForCompleted", func() {
199+
It("should set BackupLocationPath and BackupLocationNodeName=NA for pvc path", func() {
200+
dataBackup.Spec.BackupPath = "pvc://my-pvc/path"
201+
infos := map[string]string{}
202+
err := op.UpdateStatusInfoForCompleted(infos)
203+
Expect(err).NotTo(HaveOccurred())
204+
Expect(infos[cdatabackup.BackupLocationPath]).To(Equal("pvc://my-pvc/path"))
205+
Expect(infos[cdatabackup.BackupLocationNodeName]).To(Equal("NA"))
206+
})
207+
208+
It("should set BackupLocationNodeName from pod for local:// path", func() {
209+
podName := dataBackup.GetName() + "-pod"
210+
backupPod := &corev1.Pod{
211+
ObjectMeta: v1.ObjectMeta{
212+
Name: podName,
213+
Namespace: "default",
214+
},
215+
Spec: corev1.PodSpec{
216+
NodeName: "node-1",
217+
},
218+
}
219+
testScheme2 := runtime.NewScheme()
220+
_ = datav1alpha1.AddToScheme(testScheme2)
221+
_ = corev1.AddToScheme(testScheme2)
222+
dataBackup2 := dataBackup.DeepCopy()
223+
dataBackup2.Spec.BackupPath = "local:///tmp/backup"
224+
fakeClient2 := fake.NewFakeClientWithScheme(testScheme2, dataBackup2, backupPod)
225+
op2 := &dataBackupOperation{
226+
Client: fakeClient2,
227+
Log: ctrl.Log.WithName("test"),
228+
Recorder: record.NewFakeRecorder(10),
229+
dataBackup: dataBackup2,
230+
}
231+
232+
infos := map[string]string{}
233+
err := op2.UpdateStatusInfoForCompleted(infos)
234+
Expect(err).NotTo(HaveOccurred())
235+
Expect(infos[cdatabackup.BackupLocationPath]).To(Equal("local:///tmp/backup"))
236+
Expect(infos[cdatabackup.BackupLocationNodeName]).To(Equal("node-1"))
237+
})
238+
})
239+
240+
Describe("UpdateOperationApiStatus", func() {
241+
It("should update the dataBackup status without error", func() {
242+
testScheme4 := runtime.NewScheme()
243+
_ = datav1alpha1.AddToScheme(testScheme4)
244+
_ = corev1.AddToScheme(testScheme4)
245+
fakeClient4 := fake.NewFakeClientWithScheme(testScheme4, dataBackup)
246+
op4 := &dataBackupOperation{
247+
Client: fakeClient4,
248+
Log: ctrl.Log.WithName("test"),
249+
Recorder: record.NewFakeRecorder(10),
250+
dataBackup: dataBackup,
251+
}
252+
opStatus := &datav1alpha1.OperationStatus{
253+
Phase: "Complete",
254+
}
255+
err := op4.UpdateOperationApiStatus(opStatus)
256+
Expect(err).NotTo(HaveOccurred())
257+
})
258+
})
259+
260+
Describe("GetTargetDataset", func() {
261+
It("should return error when dataset does not exist", func() {
262+
_, err := op.GetTargetDataset()
263+
Expect(err).To(HaveOccurred())
264+
})
265+
266+
It("should return dataset when it exists", func() {
267+
dataset := &datav1alpha1.Dataset{
268+
ObjectMeta: v1.ObjectMeta{
269+
Name: dataBackup.Spec.Dataset,
270+
Namespace: "default",
271+
},
272+
}
273+
testScheme3 := runtime.NewScheme()
274+
_ = datav1alpha1.AddToScheme(testScheme3)
275+
_ = corev1.AddToScheme(testScheme3)
276+
fakeClient3 := fake.NewFakeClientWithScheme(testScheme3, dataBackup, dataset)
277+
op3 := &dataBackupOperation{
278+
Client: fakeClient3,
279+
Log: ctrl.Log.WithName("test"),
280+
Recorder: record.NewFakeRecorder(10),
281+
dataBackup: dataBackup,
282+
}
283+
284+
got, err := op3.GetTargetDataset()
285+
Expect(err).NotTo(HaveOccurred())
286+
Expect(got).NotTo(BeNil())
287+
Expect(got.Name).To(Equal(dataBackup.Spec.Dataset))
288+
})
289+
})
290+
})

0 commit comments

Comments
 (0)