Skip to content

Commit f3bd7bd

Browse files
author
玖宇
committed
disable sync schedule info in thinruntime
Signed-off-by: 玖宇 <guotongyu.gty@alibaba-inc.com>
1 parent 8a73ea1 commit f3bd7bd

2 files changed

Lines changed: 1 addition & 272 deletions

File tree

pkg/ddc/thin/node.go

Lines changed: 1 addition & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -16,15 +16,6 @@
1616

1717
package thin
1818

19-
import (
20-
"github.com/fluid-cloudnative/fluid/pkg/utils/dataset/lifecycle"
21-
)
22-
2319
func (t ThinEngine) SyncScheduleInfoToCacheNodes() (err error) {
24-
err = lifecycle.SyncScheduleInfoToCacheNodes(t.runtimeInfo, t.Client)
25-
if err != nil {
26-
return
27-
}
28-
29-
return
20+
return nil
3021
}

pkg/ddc/thin/node_test.go

Lines changed: 0 additions & 262 deletions
Original file line numberDiff line numberDiff line change
@@ -17,22 +17,10 @@
1717
package thin
1818

1919
import (
20-
"context"
21-
"fmt"
22-
"reflect"
23-
"testing"
24-
2520
datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
2621
"github.com/fluid-cloudnative/fluid/pkg/common"
27-
ctrlhelper "github.com/fluid-cloudnative/fluid/pkg/ctrl"
2822
"github.com/fluid-cloudnative/fluid/pkg/ddc/base"
2923
"github.com/fluid-cloudnative/fluid/pkg/utils/fake"
30-
appsv1 "k8s.io/api/apps/v1"
31-
v1 "k8s.io/api/core/v1"
32-
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
33-
"k8s.io/apimachinery/pkg/labels"
34-
"k8s.io/apimachinery/pkg/runtime"
35-
"k8s.io/utils/ptr"
3624
"sigs.k8s.io/controller-runtime/pkg/client"
3725
)
3826

@@ -51,253 +39,3 @@ func getTestThinEngineNode(client client.Client, name string, namespace string,
5139
}
5240
return engine
5341
}
54-
55-
func TestSyncScheduleInfoToCacheNodes(t *testing.T) {
56-
type fields struct {
57-
worker *appsv1.StatefulSet
58-
pods []*v1.Pod
59-
ds *appsv1.DaemonSet
60-
nodes []*v1.Node
61-
name string
62-
namespace string
63-
}
64-
testcases := []struct {
65-
name string
66-
fields fields
67-
nodeNames []string
68-
}{
69-
{
70-
name: "create",
71-
fields: fields{
72-
name: "spark",
73-
namespace: "big-data",
74-
worker: &appsv1.StatefulSet{
75-
TypeMeta: metav1.TypeMeta{
76-
Kind: "StatefulSet",
77-
APIVersion: "apps/v1",
78-
},
79-
ObjectMeta: metav1.ObjectMeta{
80-
Name: "spark-worker",
81-
Namespace: "big-data",
82-
UID: "uid1",
83-
},
84-
Spec: appsv1.StatefulSetSpec{
85-
Selector: &metav1.LabelSelector{
86-
MatchLabels: map[string]string{
87-
"app": "thin",
88-
"role": "thin-worker",
89-
"release": "spark",
90-
},
91-
},
92-
},
93-
},
94-
pods: []*v1.Pod{{
95-
ObjectMeta: metav1.ObjectMeta{
96-
Name: "spark-worker-0",
97-
Namespace: "big-data",
98-
OwnerReferences: []metav1.OwnerReference{{
99-
Kind: "StatefulSet",
100-
APIVersion: "apps/v1",
101-
Name: "spark-worker",
102-
UID: "uid1",
103-
Controller: ptr.To(true),
104-
}},
105-
Labels: map[string]string{
106-
"app": "thin",
107-
"role": "thin-worker",
108-
"release": "spark",
109-
"fluid.io/dataset": "big-data-spark",
110-
},
111-
},
112-
Spec: v1.PodSpec{
113-
NodeName: "node1",
114-
},
115-
}},
116-
nodes: []*v1.Node{{
117-
ObjectMeta: metav1.ObjectMeta{
118-
Name: "node1",
119-
},
120-
}},
121-
},
122-
nodeNames: []string{"node1"},
123-
},
124-
{
125-
name: "add",
126-
fields: fields{
127-
name: "hbase",
128-
namespace: "big-data",
129-
worker: &appsv1.StatefulSet{
130-
TypeMeta: metav1.TypeMeta{
131-
Kind: "StatefulSet",
132-
APIVersion: "apps/v1",
133-
},
134-
ObjectMeta: metav1.ObjectMeta{
135-
Name: "hbase-worker",
136-
Namespace: "big-data",
137-
UID: "uid2",
138-
},
139-
Spec: appsv1.StatefulSetSpec{
140-
Selector: &metav1.LabelSelector{
141-
MatchLabels: map[string]string{
142-
"app": "thin",
143-
"role": "thin-worker",
144-
"release": "hbase",
145-
},
146-
},
147-
},
148-
},
149-
pods: []*v1.Pod{
150-
{
151-
ObjectMeta: metav1.ObjectMeta{
152-
Name: "hbase-worker-0",
153-
Namespace: "big-data",
154-
OwnerReferences: []metav1.OwnerReference{{
155-
Kind: "StatefulSet",
156-
APIVersion: "apps/v1",
157-
Name: "hbase-worker",
158-
UID: "uid2",
159-
Controller: ptr.To(true),
160-
}},
161-
Labels: map[string]string{
162-
"app": "thin",
163-
"role": "thin-worker",
164-
"release": "hbase",
165-
"fluid.io/dataset": "big-data-hbase",
166-
},
167-
},
168-
Spec: v1.PodSpec{NodeName: "node3"},
169-
},
170-
},
171-
nodes: []*v1.Node{{
172-
ObjectMeta: metav1.ObjectMeta{
173-
Name: "node3",
174-
},
175-
}, {
176-
ObjectMeta: metav1.ObjectMeta{
177-
Name: "node2",
178-
Labels: map[string]string{"fluid.io/s-default-hbase": "true"},
179-
},
180-
}},
181-
},
182-
nodeNames: []string{"node3"},
183-
},
184-
{
185-
name: "noController",
186-
fields: fields{
187-
name: "hbase-a",
188-
namespace: "big-data",
189-
worker: &appsv1.StatefulSet{
190-
TypeMeta: metav1.TypeMeta{
191-
Kind: "StatefulSet",
192-
APIVersion: "apps/v1",
193-
},
194-
ObjectMeta: metav1.ObjectMeta{
195-
Name: "hbase-a-worker",
196-
Namespace: "big-data",
197-
UID: "uid3",
198-
},
199-
Spec: appsv1.StatefulSetSpec{
200-
Selector: &metav1.LabelSelector{
201-
MatchLabels: map[string]string{
202-
"app": "thin",
203-
"role": "thin-worker",
204-
"release": "hbase-a",
205-
},
206-
},
207-
},
208-
},
209-
pods: []*v1.Pod{
210-
{
211-
ObjectMeta: metav1.ObjectMeta{
212-
Name: "hbase-a-worker-0",
213-
Namespace: "big-data",
214-
Labels: map[string]string{
215-
"app": "thin",
216-
"role": "thin-worker",
217-
"release": "hbase-a",
218-
"fluid.io/dataset": "big-data-hbase-a",
219-
},
220-
},
221-
Spec: v1.PodSpec{NodeName: "node5"},
222-
},
223-
},
224-
nodes: []*v1.Node{{
225-
ObjectMeta: metav1.ObjectMeta{
226-
Name: "node5",
227-
},
228-
}, {
229-
ObjectMeta: metav1.ObjectMeta{
230-
Name: "node4",
231-
Labels: map[string]string{
232-
"fluid.io/s-default-hbase-a": "true",
233-
},
234-
},
235-
}},
236-
},
237-
nodeNames: []string{},
238-
},
239-
}
240-
241-
runtimeObjs := []runtime.Object{}
242-
243-
for _, testcase := range testcases {
244-
runtimeObjs = append(runtimeObjs, testcase.fields.worker)
245-
246-
if testcase.fields.ds != nil {
247-
runtimeObjs = append(runtimeObjs, testcase.fields.ds)
248-
}
249-
for _, pod := range testcase.fields.pods {
250-
runtimeObjs = append(runtimeObjs, pod)
251-
}
252-
253-
for _, node := range testcase.fields.nodes {
254-
runtimeObjs = append(runtimeObjs, node)
255-
}
256-
// runtimeObjs = append(runtimeObjs, testcase.fields.pods)
257-
}
258-
c := fake.NewFakeClientWithScheme(testScheme, runtimeObjs...)
259-
260-
for _, testcase := range testcases {
261-
engine := getTestThinEngineNode(c, testcase.fields.name, testcase.fields.namespace, true)
262-
runtimeInfo, err := base.BuildRuntimeInfo(testcase.fields.name,
263-
testcase.fields.namespace,
264-
common.ThinRuntime)
265-
if err != nil {
266-
t.Errorf("BuildRuntimeInfo() error = %v", err)
267-
}
268-
269-
engine.Helper = ctrlhelper.BuildHelper(runtimeInfo, c, engine.Log)
270-
err = engine.SyncScheduleInfoToCacheNodes()
271-
if err != nil {
272-
t.Errorf("Got error %t.", err)
273-
}
274-
275-
nodeList := &v1.NodeList{}
276-
datasetLabels, err := labels.Parse(fmt.Sprintf("%s=true", engine.runtimeInfo.GetRuntimeLabelName()))
277-
if err != nil {
278-
return
279-
}
280-
281-
err = c.List(context.TODO(), nodeList, &client.ListOptions{
282-
LabelSelector: datasetLabels,
283-
})
284-
285-
if err != nil {
286-
t.Errorf("Got error %t.", err)
287-
}
288-
289-
nodeNames := []string{}
290-
for _, node := range nodeList.Items {
291-
nodeNames = append(nodeNames, node.Name)
292-
}
293-
294-
if len(testcase.nodeNames) == 0 && len(nodeNames) == 0 {
295-
continue
296-
}
297-
298-
if !reflect.DeepEqual(testcase.nodeNames, nodeNames) {
299-
t.Errorf("test case %v fail to sync node labels, wanted %v, got %v", testcase.name, testcase.nodeNames, nodeNames)
300-
}
301-
302-
}
303-
}

0 commit comments

Comments
 (0)