diff --git a/.github/scripts/build-all-images.sh b/.github/scripts/build-all-images.sh
index e205253bb3e..8e4b43e07fc 100755
--- a/.github/scripts/build-all-images.sh
+++ b/.github/scripts/build-all-images.sh
@@ -15,7 +15,6 @@ function build_images() {
${IMG_REPO}/application-controller:${IMAGE_TAG}
${IMG_REPO}/alluxioruntime-controller:${IMAGE_TAG}
${IMG_REPO}/jindoruntime-controller:${IMAGE_TAG}
- ${IMG_REPO}/goosefsruntime-controller:${IMAGE_TAG}
${IMG_REPO}/juicefsruntime-controller:${IMAGE_TAG}
${IMG_REPO}/thinruntime-controller:${IMAGE_TAG}
${IMG_REPO}/efcruntime-controller:${IMAGE_TAG}
diff --git a/Makefile b/Makefile
index e723548ab3b..89c5d0a56d5 100644
--- a/Makefile
+++ b/Makefile
@@ -36,7 +36,6 @@ DATASET_CONTROLLER_IMG ?= ${IMG_REPO}/dataset-controller
APPLICATION_CONTROLLER_IMG ?= ${IMG_REPO}/application-controller
ALLUXIORUNTIME_CONTROLLER_IMG ?= ${IMG_REPO}/alluxioruntime-controller
JINDORUNTIME_CONTROLLER_IMG ?= ${IMG_REPO}/jindoruntime-controller
-GOOSEFSRUNTIME_CONTROLLER_IMG ?= ${IMG_REPO}/goosefsruntime-controller
JUICEFSRUNTIME_CONTROLLER_IMG ?= ${IMG_REPO}/juicefsruntime-controller
THINRUNTIME_CONTROLLER_IMG ?= ${IMG_REPO}/thinruntime-controller
EFCRUNTIME_CONTROLLER_IMG ?= ${IMG_REPO}/efcruntime-controller
@@ -52,7 +51,6 @@ DATASET_DOCKERFILE ?= docker/Dockerfile.dataset
APPLICATION_DOCKERFILE ?= docker/Dockerfile.application
ALLUXIORUNTIME_DOCKERFILE ?= docker/Dockerfile.alluxioruntime
JINDORUNTIME_DOCKERFILE ?= docker/Dockerfile.jindoruntime
-GOOSEFSRUNTIME_DOCKERFILE ?= docker/Dockerfile.goosefsruntime
JUICEFSRUNTIME_DOCKERFILE ?= docker/Dockerfile.juicefsruntime
THINRUNTIME_DOCKERFILE ?= docker/Dockerfile.thinruntime
EFCRUNTIME_DOCKERFILE ?= docker/Dockerfile.efcruntime
@@ -69,7 +67,6 @@ DATASET_BINARY ?= bin/dataset-controller
APPLICATION_BINARY ?= bin/fluidapp-controller
ALLUXIORUNTIME_BINARY ?= bin/alluxioruntime-controller
JINDORUNTIME_BINARY ?= bin/jindoruntime-controller
-GOOSEFSRUNTIME_BINARY ?= bin/goosefsruntime-controller
JUICEFSRUNTIME_BINARY ?= bin/juicefsruntime-controller
THINRUNTIME_BINARY ?= bin/thinruntime-controller
EFCRUNTIME_BINARY ?= bin/efcruntime-controller
@@ -99,7 +96,6 @@ DOCKER_BUILD := docker-build-dataset-controller
DOCKER_BUILD += docker-build-application-controller
DOCKER_BUILD += docker-build-alluxioruntime-controller
DOCKER_BUILD += docker-build-jindoruntime-controller
-DOCKER_BUILD += docker-build-goosefsruntime-controller
DOCKER_BUILD += docker-build-csi
DOCKER_BUILD += docker-build-webhook
DOCKER_BUILD += docker-build-juicefsruntime-controller
@@ -117,7 +113,6 @@ DOCKER_PUSH += docker-push-alluxioruntime-controller
DOCKER_PUSH += docker-push-jindoruntime-controller
DOCKER_PUSH += docker-push-csi
DOCKER_PUSH += docker-push-webhook
-DOCKER_PUSH += docker-push-goosefsruntime-controller
DOCKER_PUSH += docker-push-juicefsruntime-controller
DOCKER_PUSH += docker-push-thinruntime-controller
DOCKER_PUSH += docker-push-efcruntime-controller
@@ -132,7 +127,6 @@ DOCKER_BUILDX_PUSH := docker-buildx-push-dataset-controller
DOCKER_BUILDX_PUSH += docker-buildx-push-application-controller
DOCKER_BUILDX_PUSH += docker-buildx-push-alluxioruntime-controller
DOCKER_BUILDX_PUSH += docker-buildx-push-jindoruntime-controller
-DOCKER_BUILDX_PUSH += docker-buildx-push-goosefsruntime-controller
DOCKER_BUILDX_PUSH += docker-buildx-push-csi
DOCKER_BUILDX_PUSH += docker-buildx-push-webhook
DOCKER_BUILDX_PUSH += docker-buildx-push-juicefsruntime-controller
@@ -226,10 +220,6 @@ alluxioruntime-controller-build:
jindoruntime-controller-build:
CGO_ENABLED=${CGO_ENABLED} GOOS=${GOOS} GOARCH=${ARCH} GO111MODULE=${GO_MODULE} go build ${GC_FLAGS} -a -o ${JINDORUNTIME_BINARY} -ldflags '${LDFLAGS}' cmd/jindo/main.go
-.PHONY: goosefsruntime-controller-build
-goosefsruntime-controller-build:
- CGO_ENABLED=${CGO_ENABLED} GOOS=${GOOS} GOARCH=${ARCH} GO111MODULE=${GO_MODULE} go build ${GC_FLAGS} -a -o ${GOOSEFSRUNTIME_BINARY} -ldflags '${LDFLAGS}' cmd/goosefs/main.go
-
.PHONY: juicefsruntime-controller-build
juicefsruntime-controller-build:
CGO_ENABLED=${CGO_ENABLED} GOOS=${GOOS} GOARCH=${ARCH} GO111MODULE=${GO_MODULE} go build ${GC_FLAGS} -a -o ${JUICEFSRUNTIME_BINARY} -ldflags '-s -w ${LDFLAGS}' cmd/juicefs/main.go
@@ -271,10 +261,6 @@ docker-build-alluxioruntime-controller:
docker-build-jindoruntime-controller:
docker build ${DOCKER_NO_CACHE_OPTION} --build-arg TARGETARCH=${ARCH} ${DOCKER_BUILD_ARGS} . -f ${JINDORUNTIME_DOCKERFILE} -t ${JINDORUNTIME_CONTROLLER_IMG}:${GIT_VERSION}
-.PHONY: docker-build-goosefsruntime-controller
-docker-build-goosefsruntime-controller:
- docker build ${DOCKER_NO_CACHE_OPTION} --build-arg TARGETARCH=${ARCH} ${DOCKER_BUILD_ARGS} . -f ${GOOSEFSRUNTIME_DOCKERFILE} -t ${GOOSEFSRUNTIME_CONTROLLER_IMG}:${GIT_VERSION}
-
.PHONY: docker-build-juicefsruntime-controller
docker-build-juicefsruntime-controller:
docker build ${DOCKER_NO_CACHE_OPTION} --build-arg TARGETARCH=${ARCH} ${DOCKER_BUILD_ARGS} . -f ${JUICEFSRUNTIME_DOCKERFILE} -t ${JUICEFSRUNTIME_CONTROLLER_IMG}:${GIT_VERSION}
@@ -328,10 +314,6 @@ docker-push-alluxioruntime-controller: docker-build-alluxioruntime-controller
docker-push-jindoruntime-controller: docker-build-jindoruntime-controller
docker push ${JINDORUNTIME_CONTROLLER_IMG}:${GIT_VERSION}
-.PHONY: docker-push-goosefsruntime-controller
-docker-push-goosefsruntime-controller: docker-build-goosefsruntime-controller
- docker push ${GOOSEFSRUNTIME_CONTROLLER_IMG}:${GIT_VERSION}
-
.PHONY: docker-push-juicefsruntime-controller
docker-push-juicefsruntime-controller: docker-build-juicefsruntime-controller
docker push ${JUICEFSRUNTIME_CONTROLLER_IMG}:${GIT_VERSION}
@@ -385,10 +367,6 @@ docker-buildx-push-alluxioruntime-controller:
docker-buildx-push-jindoruntime-controller:
docker buildx build --push ${DOCKER_BUILD_ARGS} --platform ${DOCKER_PLATFORM} ${DOCKER_NO_CACHE_OPTION} . -f ${JINDORUNTIME_DOCKERFILE} -t ${JINDORUNTIME_CONTROLLER_IMG}:${GIT_VERSION}
-.PHONY: docker-buildx-push-goosefsruntime-controller
-docker-buildx-push-goosefsruntime-controller:
- docker buildx build --push ${DOCKER_BUILD_ARGS} --platform ${DOCKER_PLATFORM} ${DOCKER_NO_CACHE_OPTION} . -f ${GOOSEFSRUNTIME_DOCKERFILE} -t ${GOOSEFSRUNTIME_CONTROLLER_IMG}:${GIT_VERSION}
-
.PHONY: docker-buildx-push-juicefsruntime-controller
docker-buildx-push-juicefsruntime-controller:
docker buildx build --push ${DOCKER_BUILD_ARGS} --platform ${DOCKER_PLATFORM} ${DOCKER_NO_CACHE_OPTION} . -f ${JUICEFSRUNTIME_DOCKERFILE} -t ${JUICEFSRUNTIME_CONTROLLER_IMG}:${GIT_VERSION}
diff --git a/PROJECT b/PROJECT
index c72cc8c709b..3018a9439f2 100644
--- a/PROJECT
+++ b/PROJECT
@@ -49,15 +49,6 @@ resources:
kind: JuiceFSRuntime
path: github.com/fluid-cloudnative/fluid/api/v1alpha1
version: v1alpha1
-- api:
- crdVersion: v1
- namespaced: true
- controller: true
- domain: fluid.io
- group: data
- kind: GooseFSRuntime
- path: github.com/fluid-cloudnative/fluid/api/v1alpha1
- version: v1alpha1
- api:
crdVersion: v1
namespaced: true
diff --git a/api/v1alpha1/goosefsruntime_types.go b/api/v1alpha1/goosefsruntime_types.go
deleted file mode 100644
index ff65e124018..00000000000
--- a/api/v1alpha1/goosefsruntime_types.go
+++ /dev/null
@@ -1,236 +0,0 @@
-/*
-Copyright 2021 The Fluid Author.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha1
-
-import (
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-// GooseFSCompTemplateSpec is a description of the GooseFS commponents
-type GooseFSCompTemplateSpec struct {
- // Replicas is the desired number of replicas of the given template.
- // If unspecified, defaults to 1.
- // +kubebuilder:validation:Minimum=1
- // replicas is the min replicas of dataset in the cluster
- // +optional
- Replicas int32 `json:"replicas,omitempty"`
-
- // Options for JVM
- JvmOptions []string `json:"jvmOptions,omitempty"`
-
- // Configurable properties for the GOOSEFS component.
- // Refer to GOOSEFS Configuration Properties for more info
- // +optional
- Properties map[string]string `json:"properties,omitempty"`
-
- // Ports used by GooseFS(e.g. rpc: 19998 for master)
- // +optional
- Ports map[string]int `json:"ports,omitempty"`
-
- // Resources that will be requested by the GooseFS component.
- //
- // Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources
- // already allocated to the pod.
- // +optional
- Resources corev1.ResourceRequirements `json:"resources,omitempty"`
-
- // Environment variables that will be used by GooseFS component.
- Env map[string]string `json:"env,omitempty"`
-
- // Enabled or Disabled for the components. For now, only API Gateway is enabled or disabled.
- // +optional
- Enabled bool `json:"enabled,omitempty"`
-
- // NodeSelector is a selector which must be true for the master to fit on a node
- // +optional
- NodeSelector map[string]string `json:"nodeSelector,omitempty"`
-
- // Annotations is an unstructured key value map stored with a resource that may be
- // set by external tools to store and retrieve arbitrary metadata. They are not
- // queryable and should be preserved when modifying objects.
- // More info: http://kubernetes.io/docs/user-guide/annotations
- // +optional
- Annotations map[string]string `json:"annotations,omitempty"`
-}
-
-// GooseFSFuseSpec is a description of the GooseFS Fuse
-type GooseFSFuseSpec struct {
-
- // Image for GooseFS Fuse(e.g. goosefs/goosefs-fuse)
- Image string `json:"image,omitempty"`
-
- // Image Tag for GooseFS Fuse(e.g. v1.0.1)
- ImageTag string `json:"imageTag,omitempty"`
-
- // One of the three policies: `Always`, `IfNotPresent`, `Never`
- ImagePullPolicy string `json:"imagePullPolicy,omitempty"`
-
- // Options for JVM
- JvmOptions []string `json:"jvmOptions,omitempty"`
-
- // Configurable properties for the GOOSEFS component.
- // Refer to GOOSEFS Configuration Properties for more info
- Properties map[string]string `json:"properties,omitempty"`
-
- // Environment variables that will be used by GooseFS Fuse
- Env map[string]string `json:"env,omitempty"`
-
- // Resources that will be requested by GooseFS Fuse.
- //
- // Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources
- // already allocated to the pod.
- // +optional
- Resources corev1.ResourceRequirements `json:"resources,omitempty"`
-
- // Arguments that will be passed to GooseFS Fuse
- Args []string `json:"args,omitempty"`
-
- // NodeSelector is a selector which must be true for the fuse client to fit on a node,
- // this option only effect when global is enabled
- // +optional
- NodeSelector map[string]string `json:"nodeSelector,omitempty"`
-
- // CleanPolicy decides when to clean GooseFS Fuse pods.
- // Currently Fluid supports two policies: OnDemand and OnRuntimeDeleted
- // OnDemand cleans fuse pod once th fuse pod on some node is not needed
- // OnRuntimeDeleted cleans fuse pod only when the cache runtime is deleted
- // Defaults to OnRuntimeDeleted
- // +optional
- CleanPolicy FuseCleanPolicy `json:"cleanPolicy,omitempty"`
-
- // Annotations is an unstructured key value map stored with a resource that may be
- // set by external tools to store and retrieve arbitrary metadata. They are not
- // queryable and should be preserved when modifying objects.
- // More info: http://kubernetes.io/docs/user-guide/annotations
- // +optional
- Annotations map[string]string `json:"annotations,omitempty"`
-}
-
-// GooseFSRuntimeSpec defines the desired state of GooseFSRuntime
-type GooseFSRuntimeSpec struct {
- // The version information that instructs fluid to orchestrate a particular version of GooseFS.
- GooseFSVersion VersionSpec `json:"goosefsVersion,omitempty"`
-
- // The component spec of GooseFS master
- Master GooseFSCompTemplateSpec `json:"master,omitempty"`
-
- // The component spec of GooseFS job master
- JobMaster GooseFSCompTemplateSpec `json:"jobMaster,omitempty"`
-
- // The component spec of GooseFS worker
- Worker GooseFSCompTemplateSpec `json:"worker,omitempty"`
-
- // The component spec of GooseFS job Worker
- JobWorker GooseFSCompTemplateSpec `json:"jobWorker,omitempty"`
-
- // The component spec of GooseFS API Gateway
- APIGateway GooseFSCompTemplateSpec `json:"apiGateway,omitempty"`
-
- // The spec of init users
- InitUsers InitUsersSpec `json:"initUsers,omitempty"`
-
- // The component spec of GooseFS Fuse
- Fuse GooseFSFuseSpec `json:"fuse,omitempty"`
-
- // Configurable properties for the GOOSEFS component.
- // Refer to GOOSEFS Configuration Properties for more info
- Properties map[string]string `json:"properties,omitempty"`
-
- // Options for JVM
- JvmOptions []string `json:"jvmOptions,omitempty"`
-
- // Tiered storage used by GooseFS
- TieredStore TieredStore `json:"tieredstore,omitempty"`
-
- // Management strategies for the dataset to which the runtime is bound
- Data Data `json:"data,omitempty"`
-
- // The replicas of the worker, need to be specified
- Replicas int32 `json:"replicas,omitempty"`
-
- // Manage the user to run GooseFS Runtime
- // GooseFS support POSIX-ACL and Apache Ranger to manager authorization
- // TODO(chrisydxie@tencent.com) Support Apache Ranger.
- RunAs *User `json:"runAs,omitempty"`
-
- // Disable monitoring for GooseFS Runtime
- // Prometheus is enabled by default
- // +optional
- DisablePrometheus bool `json:"disablePrometheus,omitempty"`
-
- // Name of the configMap used to support HDFS configurations when using HDFS as GooseFS's UFS. The configMap
- // must be in the same namespace with the GooseFSRuntime. The configMap should contain user-specific HDFS conf files in it.
- // For now, only "hdfs-site.xml" and "core-site.xml" are supported. It must take the filename of the conf file as the key and content
- // of the file as the value.
- // +optional
- HadoopConfig string `json:"hadoopConfig,omitempty"`
-
- // CleanCachePolicy defines cleanCache Policy
- // +optional
- CleanCachePolicy CleanCachePolicy `json:"cleanCachePolicy,omitempty"`
-}
-
-// +kubebuilder:object:root=true
-// +kubebuilder:subresource:status
-// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.currentWorkerNumberScheduled,selectorpath=.status.selector
-// +kubebuilder:printcolumn:name="Ready Masters",type="integer",JSONPath=`.status.masterNumberReady`,priority=10
-// +kubebuilder:printcolumn:name="Desired Masters",type="integer",JSONPath=`.status.desiredMasterNumberScheduled`,priority=10
-// +kubebuilder:printcolumn:name="Master Phase",type="string",JSONPath=`.status.masterPhase`,priority=0
-// +kubebuilder:printcolumn:name="Ready Workers",type="integer",JSONPath=`.status.workerNumberReady`,priority=10
-// +kubebuilder:printcolumn:name="Desired Workers",type="integer",JSONPath=`.status.desiredWorkerNumberScheduled`,priority=10
-// +kubebuilder:printcolumn:name="Worker Phase",type="string",JSONPath=`.status.workerPhase`,priority=0
-// +kubebuilder:printcolumn:name="Ready Fuses",type="integer",JSONPath=`.status.fuseNumberReady`,priority=10
-// +kubebuilder:printcolumn:name="Desired Fuses",type="integer",JSONPath=`.status.desiredFuseNumberScheduled`,priority=10
-// +kubebuilder:printcolumn:name="Fuse Phase",type="string",JSONPath=`.status.fusePhase`,priority=0
-// +kubebuilder:printcolumn:name="API Gateway",type="string",JSONPath=`.status.apiGateway.endpoint`,priority=10
-// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=`.metadata.creationTimestamp`,priority=0
-// +kubebuilder:resource:scope=Namespaced
-// +kubebuilder:resource:categories={fluid},shortName=goose
-// +genclient
-
-// GooseFSRuntime is the Schema for the goosefsruntimes API
-type GooseFSRuntime struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- Spec GooseFSRuntimeSpec `json:"spec,omitempty"`
- Status RuntimeStatus `json:"status,omitempty"`
-}
-
-// +kubebuilder:object:root=true
-// +kubebuilder:resource:scope=Namespaced
-
-// GooseFSRuntimeList contains a list of GooseFSRuntime
-type GooseFSRuntimeList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata,omitempty"`
- Items []GooseFSRuntime `json:"items"`
-}
-
-func init() {
- SchemeBuilder.Register(&GooseFSRuntime{}, &GooseFSRuntimeList{})
-}
-
-// Replicas gets the replicas of runtime worker
-func (runtime *GooseFSRuntime) Replicas() int32 {
- return runtime.Spec.Replicas
-}
-
-func (runtime *GooseFSRuntime) GetStatus() *RuntimeStatus {
- return &runtime.Status
-}
diff --git a/api/v1alpha1/openapi_generated.go b/api/v1alpha1/openapi_generated.go
index 418350ba21b..b523a1a9f66 100644
--- a/api/v1alpha1/openapi_generated.go
+++ b/api/v1alpha1/openapi_generated.go
@@ -83,11 +83,6 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
"github.com/fluid-cloudnative/fluid/api/v1alpha1.ExternalEndpointSpec": schema_fluid_cloudnative_fluid_api_v1alpha1_ExternalEndpointSpec(ref),
"github.com/fluid-cloudnative/fluid/api/v1alpha1.ExternalStorage": schema_fluid_cloudnative_fluid_api_v1alpha1_ExternalStorage(ref),
"github.com/fluid-cloudnative/fluid/api/v1alpha1.ExtraResourcesComponentDependency": schema_fluid_cloudnative_fluid_api_v1alpha1_ExtraResourcesComponentDependency(ref),
- "github.com/fluid-cloudnative/fluid/api/v1alpha1.GooseFSCompTemplateSpec": schema_fluid_cloudnative_fluid_api_v1alpha1_GooseFSCompTemplateSpec(ref),
- "github.com/fluid-cloudnative/fluid/api/v1alpha1.GooseFSFuseSpec": schema_fluid_cloudnative_fluid_api_v1alpha1_GooseFSFuseSpec(ref),
- "github.com/fluid-cloudnative/fluid/api/v1alpha1.GooseFSRuntime": schema_fluid_cloudnative_fluid_api_v1alpha1_GooseFSRuntime(ref),
- "github.com/fluid-cloudnative/fluid/api/v1alpha1.GooseFSRuntimeList": schema_fluid_cloudnative_fluid_api_v1alpha1_GooseFSRuntimeList(ref),
- "github.com/fluid-cloudnative/fluid/api/v1alpha1.GooseFSRuntimeSpec": schema_fluid_cloudnative_fluid_api_v1alpha1_GooseFSRuntimeSpec(ref),
"github.com/fluid-cloudnative/fluid/api/v1alpha1.HCFSStatus": schema_fluid_cloudnative_fluid_api_v1alpha1_HCFSStatus(ref),
"github.com/fluid-cloudnative/fluid/api/v1alpha1.HeadlessRuntimeComponentService": schema_fluid_cloudnative_fluid_api_v1alpha1_HeadlessRuntimeComponentService(ref),
"github.com/fluid-cloudnative/fluid/api/v1alpha1.InitFuseSpec": schema_fluid_cloudnative_fluid_api_v1alpha1_InitFuseSpec(ref),
@@ -3866,527 +3861,6 @@ func schema_fluid_cloudnative_fluid_api_v1alpha1_ExtraResourcesComponentDependen
}
}
-func schema_fluid_cloudnative_fluid_api_v1alpha1_GooseFSCompTemplateSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
- return common.OpenAPIDefinition{
- Schema: spec.Schema{
- SchemaProps: spec.SchemaProps{
- Description: "GooseFSCompTemplateSpec is a description of the GooseFS commponents",
- Type: []string{"object"},
- Properties: map[string]spec.Schema{
- "replicas": {
- SchemaProps: spec.SchemaProps{
- Description: "Replicas is the desired number of replicas of the given template. If unspecified, defaults to 1. replicas is the min replicas of dataset in the cluster",
- Type: []string{"integer"},
- Format: "int32",
- },
- },
- "jvmOptions": {
- SchemaProps: spec.SchemaProps{
- Description: "Options for JVM",
- Type: []string{"array"},
- Items: &spec.SchemaOrArray{
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Default: "",
- Type: []string{"string"},
- Format: "",
- },
- },
- },
- },
- },
- "properties": {
- SchemaProps: spec.SchemaProps{
- Description: "Configurable properties for the GOOSEFS component.
Refer to GOOSEFS Configuration Properties for more info",
- Type: []string{"object"},
- AdditionalProperties: &spec.SchemaOrBool{
- Allows: true,
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Default: "",
- Type: []string{"string"},
- Format: "",
- },
- },
- },
- },
- },
- "ports": {
- SchemaProps: spec.SchemaProps{
- Description: "Ports used by GooseFS(e.g. rpc: 19998 for master)",
- Type: []string{"object"},
- AdditionalProperties: &spec.SchemaOrBool{
- Allows: true,
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Default: 0,
- Type: []string{"integer"},
- Format: "int32",
- },
- },
- },
- },
- },
- "resources": {
- SchemaProps: spec.SchemaProps{
- Description: "Resources that will be requested by the GooseFS component.
Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.",
- Default: map[string]interface{}{},
- Ref: ref("k8s.io/api/core/v1.ResourceRequirements"),
- },
- },
- "env": {
- SchemaProps: spec.SchemaProps{
- Description: "Environment variables that will be used by GooseFS component.
",
- Type: []string{"object"},
- AdditionalProperties: &spec.SchemaOrBool{
- Allows: true,
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Default: "",
- Type: []string{"string"},
- Format: "",
- },
- },
- },
- },
- },
- "enabled": {
- SchemaProps: spec.SchemaProps{
- Description: "Enabled or Disabled for the components. For now, only API Gateway is enabled or disabled.",
- Type: []string{"boolean"},
- Format: "",
- },
- },
- "nodeSelector": {
- SchemaProps: spec.SchemaProps{
- Description: "NodeSelector is a selector which must be true for the master to fit on a node",
- Type: []string{"object"},
- AdditionalProperties: &spec.SchemaOrBool{
- Allows: true,
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Default: "",
- Type: []string{"string"},
- Format: "",
- },
- },
- },
- },
- },
- "annotations": {
- SchemaProps: spec.SchemaProps{
- Description: "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations",
- Type: []string{"object"},
- AdditionalProperties: &spec.SchemaOrBool{
- Allows: true,
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Default: "",
- Type: []string{"string"},
- Format: "",
- },
- },
- },
- },
- },
- },
- },
- },
- Dependencies: []string{
- "k8s.io/api/core/v1.ResourceRequirements"},
- }
-}
-
-func schema_fluid_cloudnative_fluid_api_v1alpha1_GooseFSFuseSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
- return common.OpenAPIDefinition{
- Schema: spec.Schema{
- SchemaProps: spec.SchemaProps{
- Description: "GooseFSFuseSpec is a description of the GooseFS Fuse",
- Type: []string{"object"},
- Properties: map[string]spec.Schema{
- "image": {
- SchemaProps: spec.SchemaProps{
- Description: "Image for GooseFS Fuse(e.g. goosefs/goosefs-fuse)",
- Type: []string{"string"},
- Format: "",
- },
- },
- "imageTag": {
- SchemaProps: spec.SchemaProps{
- Description: "Image Tag for GooseFS Fuse(e.g. v1.0.1)",
- Type: []string{"string"},
- Format: "",
- },
- },
- "imagePullPolicy": {
- SchemaProps: spec.SchemaProps{
- Description: "One of the three policies: `Always`, `IfNotPresent`, `Never`",
- Type: []string{"string"},
- Format: "",
- },
- },
- "jvmOptions": {
- SchemaProps: spec.SchemaProps{
- Description: "Options for JVM",
- Type: []string{"array"},
- Items: &spec.SchemaOrArray{
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Default: "",
- Type: []string{"string"},
- Format: "",
- },
- },
- },
- },
- },
- "properties": {
- SchemaProps: spec.SchemaProps{
- Description: "Configurable properties for the GOOSEFS component.
Refer to GOOSEFS Configuration Properties for more info",
- Type: []string{"object"},
- AdditionalProperties: &spec.SchemaOrBool{
- Allows: true,
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Default: "",
- Type: []string{"string"},
- Format: "",
- },
- },
- },
- },
- },
- "env": {
- SchemaProps: spec.SchemaProps{
- Description: "Environment variables that will be used by GooseFS Fuse",
- Type: []string{"object"},
- AdditionalProperties: &spec.SchemaOrBool{
- Allows: true,
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Default: "",
- Type: []string{"string"},
- Format: "",
- },
- },
- },
- },
- },
- "resources": {
- SchemaProps: spec.SchemaProps{
- Description: "Resources that will be requested by GooseFS Fuse.
Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.",
- Default: map[string]interface{}{},
- Ref: ref("k8s.io/api/core/v1.ResourceRequirements"),
- },
- },
- "args": {
- SchemaProps: spec.SchemaProps{
- Description: "Arguments that will be passed to GooseFS Fuse",
- Type: []string{"array"},
- Items: &spec.SchemaOrArray{
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Default: "",
- Type: []string{"string"},
- Format: "",
- },
- },
- },
- },
- },
- "nodeSelector": {
- SchemaProps: spec.SchemaProps{
- Description: "NodeSelector is a selector which must be true for the fuse client to fit on a node, this option only effect when global is enabled",
- Type: []string{"object"},
- AdditionalProperties: &spec.SchemaOrBool{
- Allows: true,
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Default: "",
- Type: []string{"string"},
- Format: "",
- },
- },
- },
- },
- },
- "cleanPolicy": {
- SchemaProps: spec.SchemaProps{
- Description: "CleanPolicy decides when to clean GooseFS Fuse pods. Currently Fluid supports two policies: OnDemand and OnRuntimeDeleted OnDemand cleans fuse pod once th fuse pod on some node is not needed OnRuntimeDeleted cleans fuse pod only when the cache runtime is deleted Defaults to OnRuntimeDeleted",
- Type: []string{"string"},
- Format: "",
- },
- },
- "annotations": {
- SchemaProps: spec.SchemaProps{
- Description: "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations",
- Type: []string{"object"},
- AdditionalProperties: &spec.SchemaOrBool{
- Allows: true,
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Default: "",
- Type: []string{"string"},
- Format: "",
- },
- },
- },
- },
- },
- },
- },
- },
- Dependencies: []string{
- "k8s.io/api/core/v1.ResourceRequirements"},
- }
-}
-
-func schema_fluid_cloudnative_fluid_api_v1alpha1_GooseFSRuntime(ref common.ReferenceCallback) common.OpenAPIDefinition {
- return common.OpenAPIDefinition{
- Schema: spec.Schema{
- SchemaProps: spec.SchemaProps{
- Description: "GooseFSRuntime is the Schema for the goosefsruntimes API",
- Type: []string{"object"},
- Properties: map[string]spec.Schema{
- "kind": {
- SchemaProps: spec.SchemaProps{
- Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
- Type: []string{"string"},
- Format: "",
- },
- },
- "apiVersion": {
- SchemaProps: spec.SchemaProps{
- Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
- Type: []string{"string"},
- Format: "",
- },
- },
- "metadata": {
- SchemaProps: spec.SchemaProps{
- Default: map[string]interface{}{},
- Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
- },
- },
- "spec": {
- SchemaProps: spec.SchemaProps{
- Default: map[string]interface{}{},
- Ref: ref("github.com/fluid-cloudnative/fluid/api/v1alpha1.GooseFSRuntimeSpec"),
- },
- },
- "status": {
- SchemaProps: spec.SchemaProps{
- Default: map[string]interface{}{},
- Ref: ref("github.com/fluid-cloudnative/fluid/api/v1alpha1.RuntimeStatus"),
- },
- },
- },
- },
- },
- Dependencies: []string{
- "github.com/fluid-cloudnative/fluid/api/v1alpha1.GooseFSRuntimeSpec", "github.com/fluid-cloudnative/fluid/api/v1alpha1.RuntimeStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
- }
-}
-
-func schema_fluid_cloudnative_fluid_api_v1alpha1_GooseFSRuntimeList(ref common.ReferenceCallback) common.OpenAPIDefinition {
- return common.OpenAPIDefinition{
- Schema: spec.Schema{
- SchemaProps: spec.SchemaProps{
- Description: "GooseFSRuntimeList contains a list of GooseFSRuntime",
- Type: []string{"object"},
- Properties: map[string]spec.Schema{
- "kind": {
- SchemaProps: spec.SchemaProps{
- Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
- Type: []string{"string"},
- Format: "",
- },
- },
- "apiVersion": {
- SchemaProps: spec.SchemaProps{
- Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
- Type: []string{"string"},
- Format: "",
- },
- },
- "metadata": {
- SchemaProps: spec.SchemaProps{
- Default: map[string]interface{}{},
- Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
- },
- },
- "items": {
- SchemaProps: spec.SchemaProps{
- Type: []string{"array"},
- Items: &spec.SchemaOrArray{
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Default: map[string]interface{}{},
- Ref: ref("github.com/fluid-cloudnative/fluid/api/v1alpha1.GooseFSRuntime"),
- },
- },
- },
- },
- },
- },
- Required: []string{"items"},
- },
- },
- Dependencies: []string{
- "github.com/fluid-cloudnative/fluid/api/v1alpha1.GooseFSRuntime", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
- }
-}
-
-func schema_fluid_cloudnative_fluid_api_v1alpha1_GooseFSRuntimeSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
- return common.OpenAPIDefinition{
- Schema: spec.Schema{
- SchemaProps: spec.SchemaProps{
- Description: "GooseFSRuntimeSpec defines the desired state of GooseFSRuntime",
- Type: []string{"object"},
- Properties: map[string]spec.Schema{
- "goosefsVersion": {
- SchemaProps: spec.SchemaProps{
- Description: "The version information that instructs fluid to orchestrate a particular version of GooseFS.",
- Default: map[string]interface{}{},
- Ref: ref("github.com/fluid-cloudnative/fluid/api/v1alpha1.VersionSpec"),
- },
- },
- "master": {
- SchemaProps: spec.SchemaProps{
- Description: "The component spec of GooseFS master",
- Default: map[string]interface{}{},
- Ref: ref("github.com/fluid-cloudnative/fluid/api/v1alpha1.GooseFSCompTemplateSpec"),
- },
- },
- "jobMaster": {
- SchemaProps: spec.SchemaProps{
- Description: "The component spec of GooseFS job master",
- Default: map[string]interface{}{},
- Ref: ref("github.com/fluid-cloudnative/fluid/api/v1alpha1.GooseFSCompTemplateSpec"),
- },
- },
- "worker": {
- SchemaProps: spec.SchemaProps{
- Description: "The component spec of GooseFS worker",
- Default: map[string]interface{}{},
- Ref: ref("github.com/fluid-cloudnative/fluid/api/v1alpha1.GooseFSCompTemplateSpec"),
- },
- },
- "jobWorker": {
- SchemaProps: spec.SchemaProps{
- Description: "The component spec of GooseFS job Worker",
- Default: map[string]interface{}{},
- Ref: ref("github.com/fluid-cloudnative/fluid/api/v1alpha1.GooseFSCompTemplateSpec"),
- },
- },
- "apiGateway": {
- SchemaProps: spec.SchemaProps{
- Description: "The component spec of GooseFS API Gateway",
- Default: map[string]interface{}{},
- Ref: ref("github.com/fluid-cloudnative/fluid/api/v1alpha1.GooseFSCompTemplateSpec"),
- },
- },
- "initUsers": {
- SchemaProps: spec.SchemaProps{
- Description: "The spec of init users",
- Default: map[string]interface{}{},
- Ref: ref("github.com/fluid-cloudnative/fluid/api/v1alpha1.InitUsersSpec"),
- },
- },
- "fuse": {
- SchemaProps: spec.SchemaProps{
- Description: "The component spec of GooseFS Fuse",
- Default: map[string]interface{}{},
- Ref: ref("github.com/fluid-cloudnative/fluid/api/v1alpha1.GooseFSFuseSpec"),
- },
- },
- "properties": {
- SchemaProps: spec.SchemaProps{
- Description: "Configurable properties for the GOOSEFS component.
Refer to GOOSEFS Configuration Properties for more info",
- Type: []string{"object"},
- AdditionalProperties: &spec.SchemaOrBool{
- Allows: true,
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Default: "",
- Type: []string{"string"},
- Format: "",
- },
- },
- },
- },
- },
- "jvmOptions": {
- SchemaProps: spec.SchemaProps{
- Description: "Options for JVM",
- Type: []string{"array"},
- Items: &spec.SchemaOrArray{
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Default: "",
- Type: []string{"string"},
- Format: "",
- },
- },
- },
- },
- },
- "tieredstore": {
- SchemaProps: spec.SchemaProps{
- Description: "Tiered storage used by GooseFS",
- Default: map[string]interface{}{},
- Ref: ref("github.com/fluid-cloudnative/fluid/api/v1alpha1.TieredStore"),
- },
- },
- "data": {
- SchemaProps: spec.SchemaProps{
- Description: "Management strategies for the dataset to which the runtime is bound",
- Default: map[string]interface{}{},
- Ref: ref("github.com/fluid-cloudnative/fluid/api/v1alpha1.Data"),
- },
- },
- "replicas": {
- SchemaProps: spec.SchemaProps{
- Description: "The replicas of the worker, need to be specified",
- Type: []string{"integer"},
- Format: "int32",
- },
- },
- "runAs": {
- SchemaProps: spec.SchemaProps{
- Description: "Manage the user to run GooseFS Runtime GooseFS support POSIX-ACL and Apache Ranger to manager authorization",
- Ref: ref("github.com/fluid-cloudnative/fluid/api/v1alpha1.User"),
- },
- },
- "disablePrometheus": {
- SchemaProps: spec.SchemaProps{
- Description: "Disable monitoring for GooseFS Runtime Prometheus is enabled by default",
- Type: []string{"boolean"},
- Format: "",
- },
- },
- "hadoopConfig": {
- SchemaProps: spec.SchemaProps{
- Description: "Name of the configMap used to support HDFS configurations when using HDFS as GooseFS's UFS. The configMap must be in the same namespace with the GooseFSRuntime. The configMap should contain user-specific HDFS conf files in it. For now, only \"hdfs-site.xml\" and \"core-site.xml\" are supported. It must take the filename of the conf file as the key and content of the file as the value.",
- Type: []string{"string"},
- Format: "",
- },
- },
- "cleanCachePolicy": {
- SchemaProps: spec.SchemaProps{
- Description: "CleanCachePolicy defines cleanCache Policy",
- Default: map[string]interface{}{},
- Ref: ref("github.com/fluid-cloudnative/fluid/api/v1alpha1.CleanCachePolicy"),
- },
- },
- },
- },
- },
- Dependencies: []string{
- "github.com/fluid-cloudnative/fluid/api/v1alpha1.CleanCachePolicy", "github.com/fluid-cloudnative/fluid/api/v1alpha1.Data", "github.com/fluid-cloudnative/fluid/api/v1alpha1.GooseFSCompTemplateSpec", "github.com/fluid-cloudnative/fluid/api/v1alpha1.GooseFSFuseSpec", "github.com/fluid-cloudnative/fluid/api/v1alpha1.InitUsersSpec", "github.com/fluid-cloudnative/fluid/api/v1alpha1.TieredStore", "github.com/fluid-cloudnative/fluid/api/v1alpha1.User", "github.com/fluid-cloudnative/fluid/api/v1alpha1.VersionSpec"},
- }
-}
-
func schema_fluid_cloudnative_fluid_api_v1alpha1_HCFSStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go
index aa6630eb318..bc05909d9a2 100644
--- a/api/v1alpha1/zz_generated.deepcopy.go
+++ b/api/v1alpha1/zz_generated.deepcopy.go
@@ -1639,218 +1639,6 @@ func (in *ExtraResourcesComponentDependency) DeepCopy() *ExtraResourcesComponent
return out
}
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *GooseFSCompTemplateSpec) DeepCopyInto(out *GooseFSCompTemplateSpec) {
- *out = *in
- if in.JvmOptions != nil {
- in, out := &in.JvmOptions, &out.JvmOptions
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.Properties != nil {
- in, out := &in.Properties, &out.Properties
- *out = make(map[string]string, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
- if in.Ports != nil {
- in, out := &in.Ports, &out.Ports
- *out = make(map[string]int, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
- in.Resources.DeepCopyInto(&out.Resources)
- if in.Env != nil {
- in, out := &in.Env, &out.Env
- *out = make(map[string]string, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
- if in.NodeSelector != nil {
- in, out := &in.NodeSelector, &out.NodeSelector
- *out = make(map[string]string, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
- if in.Annotations != nil {
- in, out := &in.Annotations, &out.Annotations
- *out = make(map[string]string, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GooseFSCompTemplateSpec.
-func (in *GooseFSCompTemplateSpec) DeepCopy() *GooseFSCompTemplateSpec {
- if in == nil {
- return nil
- }
- out := new(GooseFSCompTemplateSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *GooseFSFuseSpec) DeepCopyInto(out *GooseFSFuseSpec) {
- *out = *in
- if in.JvmOptions != nil {
- in, out := &in.JvmOptions, &out.JvmOptions
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.Properties != nil {
- in, out := &in.Properties, &out.Properties
- *out = make(map[string]string, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
- if in.Env != nil {
- in, out := &in.Env, &out.Env
- *out = make(map[string]string, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
- in.Resources.DeepCopyInto(&out.Resources)
- if in.Args != nil {
- in, out := &in.Args, &out.Args
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.NodeSelector != nil {
- in, out := &in.NodeSelector, &out.NodeSelector
- *out = make(map[string]string, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
- if in.Annotations != nil {
- in, out := &in.Annotations, &out.Annotations
- *out = make(map[string]string, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GooseFSFuseSpec.
-func (in *GooseFSFuseSpec) DeepCopy() *GooseFSFuseSpec {
- if in == nil {
- return nil
- }
- out := new(GooseFSFuseSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *GooseFSRuntime) DeepCopyInto(out *GooseFSRuntime) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- in.Status.DeepCopyInto(&out.Status)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GooseFSRuntime.
-func (in *GooseFSRuntime) DeepCopy() *GooseFSRuntime {
- if in == nil {
- return nil
- }
- out := new(GooseFSRuntime)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *GooseFSRuntime) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *GooseFSRuntimeList) DeepCopyInto(out *GooseFSRuntimeList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]GooseFSRuntime, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GooseFSRuntimeList.
-func (in *GooseFSRuntimeList) DeepCopy() *GooseFSRuntimeList {
- if in == nil {
- return nil
- }
- out := new(GooseFSRuntimeList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *GooseFSRuntimeList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *GooseFSRuntimeSpec) DeepCopyInto(out *GooseFSRuntimeSpec) {
- *out = *in
- out.GooseFSVersion = in.GooseFSVersion
- in.Master.DeepCopyInto(&out.Master)
- in.JobMaster.DeepCopyInto(&out.JobMaster)
- in.Worker.DeepCopyInto(&out.Worker)
- in.JobWorker.DeepCopyInto(&out.JobWorker)
- in.APIGateway.DeepCopyInto(&out.APIGateway)
- in.InitUsers.DeepCopyInto(&out.InitUsers)
- in.Fuse.DeepCopyInto(&out.Fuse)
- if in.Properties != nil {
- in, out := &in.Properties, &out.Properties
- *out = make(map[string]string, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
- if in.JvmOptions != nil {
- in, out := &in.JvmOptions, &out.JvmOptions
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- in.TieredStore.DeepCopyInto(&out.TieredStore)
- out.Data = in.Data
- if in.RunAs != nil {
- in, out := &in.RunAs, &out.RunAs
- *out = new(User)
- (*in).DeepCopyInto(*out)
- }
- in.CleanCachePolicy.DeepCopyInto(&out.CleanCachePolicy)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GooseFSRuntimeSpec.
-func (in *GooseFSRuntimeSpec) DeepCopy() *GooseFSRuntimeSpec {
- if in == nil {
- return nil
- }
- out := new(GooseFSRuntimeSpec)
- in.DeepCopyInto(out)
- return out
-}
-
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HCFSStatus) DeepCopyInto(out *HCFSStatus) {
*out = *in
diff --git a/charts/fluid-databackup/goosefs/.helmignore b/charts/fluid-databackup/goosefs/.helmignore
deleted file mode 100644
index 0e8a0eb36f4..00000000000
--- a/charts/fluid-databackup/goosefs/.helmignore
+++ /dev/null
@@ -1,23 +0,0 @@
-# Patterns to ignore when building packages.
-# This supports shell glob matching, relative path matching, and
-# negation (prefixed with !). Only one pattern per line.
-.DS_Store
-# Common VCS dirs
-.git/
-.gitignore
-.bzr/
-.bzrignore
-.hg/
-.hgignore
-.svn/
-# Common backup files
-*.swp
-*.bak
-*.tmp
-*.orig
-*~
-# Various IDEs
-.project
-.idea/
-*.tmproj
-.vscode/
diff --git a/charts/fluid-databackup/goosefs/CHANGELOG.md b/charts/fluid-databackup/goosefs/CHANGELOG.md
deleted file mode 100644
index 310fcefa6d1..00000000000
--- a/charts/fluid-databackup/goosefs/CHANGELOG.md
+++ /dev/null
@@ -1,4 +0,0 @@
-### 0.1.0
-
-- Support parallel prefetch pod
-- Support configurations by setting values
diff --git a/charts/fluid-databackup/goosefs/Chart.yaml b/charts/fluid-databackup/goosefs/Chart.yaml
deleted file mode 100644
index bb52d560c9f..00000000000
--- a/charts/fluid-databackup/goosefs/Chart.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-apiVersion: v2
-name: fluid-databackup
-description: A Helm chart for Fluid GooseFS Engine to backup data
-
-# A chart can be either an 'application' or a 'library' chart.
-#
-# Application charts are a collection of templates that can be packaged into versioned archives
-# to be deployed.
-#
-# Library charts provide useful utilities or functions for the chart developer. They're included as
-# a dependency of application charts to inject those utilities and functions into the rendering
-# pipeline. Library charts do not define any templates and therefore cannot be deployed.
-type: application
-
-# This is the chart version. This version number should be incremented each time you make changes
-# to the chart and its templates, including the app version.
-# Versions are expected to follow Semantic Versioning (https://semver.org/)
-version: 0.1.0
-
-# This is the version number of the application being deployed. This version number should be
-# incremented each time you make changes to the application. Versions are not expected to
-# follow Semantic Versioning. They should reflect the version the application is using.
-appVersion: 0.1.0
-
-dependencies:
-- name: library
- version: "0.2.0"
- repository: "file://../../library"
diff --git a/charts/fluid-databackup/goosefs/README.md b/charts/fluid-databackup/goosefs/README.md
deleted file mode 100644
index d7d03572b90..00000000000
--- a/charts/fluid-databackup/goosefs/README.md
+++ /dev/null
@@ -1,41 +0,0 @@
-# fluid-databackup
-
-## Prerequisite
-- Dataset deployed
-- GooseFS Runtime deployed
-- Dataset mountPoint mounted
-- Dataset-related PV, PVC created
-
-## Install
-1. Install fluid-databackup
-
-```shell script
-helm install charts/fluid-databackup
-```
-
-You will see something like this:
-```
-helm install charts/fluid-databackup
-NAME: test
-LAST DEPLOYED: Fri Jan 15 09:18:02 2021
-NAMESPACE: default
-STATUS: deployed
-REVISION: 1
-TEST SUITE: None
-```
-
-one datbackup pod will be launched. You will see one pod running on the node:
-```shell script
-kubectl get pods -databackup-pod -o wide
-```
-
-Once the pod completes, you can check filed backuped:
-```shell script
-$ ls
-hbase-default.yaml metadata-backup-hbase-default.gz
-```
-
-## Uninstall
-```
-helm del test
-```
diff --git a/charts/fluid-databackup/goosefs/charts/library b/charts/fluid-databackup/goosefs/charts/library
deleted file mode 120000
index ba40d2f56b5..00000000000
--- a/charts/fluid-databackup/goosefs/charts/library
+++ /dev/null
@@ -1 +0,0 @@
-../../../library
\ No newline at end of file
diff --git a/charts/fluid-databackup/goosefs/templates/configmap.yaml b/charts/fluid-databackup/goosefs/templates/configmap.yaml
deleted file mode 100644
index 5300250105d..00000000000
--- a/charts/fluid-databackup/goosefs/templates/configmap.yaml
+++ /dev/null
@@ -1,47 +0,0 @@
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: {{ .Values.dataBackup.name }}-script
- labels:
- {{- include "library.fluid.labels" . | nindent 4 }}
-data:
- databackuper.goosefs: |
- #!/bin/bash
- dataset=$DATASET_NAME
- namespace=$DATASET_NAMESPACE
- path=$BACKUP_PATH
-
- if [ $BACKUP_PVC ];
- then
- targetPath="/pvc${path}"
- mkdir -p ${targetPath}
- else
- targetPath="/host/"
- fi
-
- result=$(goosefs fsadmin backup | sed -n '2p')
-
- metadatafile=${targetPath}metadata-backup-${dataset}-${namespace}.gz
- metainfofile=${targetPath}${dataset}-${namespace}.yaml
-
- mv ${result##*Backup URI : } ${metadatafile}
-
- result=$(goosefs fs count / | sed -n '2p')
- arr=($result)
- if [ ${#arr[@]} -ne "3" ]; then
- exit 1
- else
- ufstotal=${arr[2]}
- filenum=${arr[0]}
- fi
-
- echo -e "dataset: ${dataset}\nnamespace: ${namespace}\nufstotal: ${ufstotal}\nfilenum: ${filenum}" > ${metainfofile}
-
- if [ ! -f "${metadatafile}" ]; then
- echo "${metadatafile} backup failed"
- exit 1
- fi
- if [ ! -f "${metainfofile}" ]; then
- echo "${metainfofile} backup failed"
- exit 1
- fi
diff --git a/charts/fluid-databackup/goosefs/templates/databackup.yaml b/charts/fluid-databackup/goosefs/templates/databackup.yaml
deleted file mode 100644
index 005803ab10f..00000000000
--- a/charts/fluid-databackup/goosefs/templates/databackup.yaml
+++ /dev/null
@@ -1,133 +0,0 @@
-apiVersion: v1
-kind: Pod
-metadata:
- name: {{ .Values.dataBackup.name }}-pod
- {{- if .Values.dataBackup.namespace }}
- namespace: {{ .Values.dataBackup.namespace }}
- {{- end }}
- labels:
- {{- include "library.fluid.labels" . | nindent 4 }}
-spec:
- {{- if .Values.dataBackup.nodeName }}
- nodeName: {{ .Values.dataBackup.nodeName }}
- {{- end }}
- {{- with .Values.dataBackup.imagePullSecrets }}
- imagePullSecrets:
- {{- toYaml . | nindent 4 }}
- {{- end }}
- {{ if .Values.initUsers.enabled -}}
- initContainers:
- - name: init-users
- image: {{ .Values.initUsers.image }}:{{ .Values.initUsers.imageTag }}
- imagePullPolicy: {{ .Values.initUsers.imagePullPolicy }}
- command: ["/entrypoint.sh"]
- args:
- - "init_users"
- env:
- {{- if .Values.initUsers.envUsers }}
- - name: FLUID_INIT_USERS
- value: {{.Values.initUsers.envUsers | quote }}
- {{- end }}
- volumeMounts:
- - name: dir
- mountPath: /tmp
- - name: user
- mountPath: /tmp/passwd
- - name: group
- mountPath: /tmp/group
- {{- end }}
- containers:
- - name: tool
- command: ["/bin/sh", "-c"]
- args:
- - "/scripts/databackup.sh"
- image: {{ .Values.dataBackup.image }}
- imagePullPolicy: IfNotPresent
- securityContext:
- runAsUser: {{ .Values.user }}
- runAsGroup: {{ .Values.group }}
- env:
- {{- if .Values.dataBackup.javaEnv }}
- - name: GOOSEFS_JAVA_OPTS
- value: {{ .Values.dataBackup.javaEnv | quote }}
- {{- end }}
- {{- if .Values.dataBackup.namespace }}
- - name: DATASET_NAMESPACE
- value: {{ .Values.dataBackup.namespace | quote }}
- {{- end }}
- {{- if .Values.dataBackup.dataset }}
- - name: DATASET_NAME
- value: {{ .Values.dataBackup.dataset | quote }}
- {{- end }}
- {{- if .Values.dataBackup.pvcName }}
- - name: BACKUP_PVC
- value: {{ .Values.dataBackup.pvcName | quote }}
- {{- end }}
- {{- if .Values.dataBackup.path }}
- - name: BACKUP_PATH
- value: {{ .Values.dataBackup.path | quote }}
- {{- end }}
- volumeMounts:
- {{- if .Values.dataBackup.workdir }}
- - name: backup
- mountPath: /goosefs_backups
- {{- end }}
- - mountPath: /scripts
- name: script
- {{- if .Values.dataBackup.pvcName }}
- - mountPath: /pvc
- name: pvc
- {{- else }}
- - mountPath: /host
- name: host
- {{- end }}
- {{- if .Values.initUsers.enabled }}
- - name: user
- mountPath: /etc/passwd
- readOnly: true
- - name: group
- mountPath: /etc/group
- readOnly: true
- {{- end}}
- restartPolicy: Never
- volumes:
- {{- if .Values.dataBackup.workdir }}
- - name: backup
- hostPath:
- {{- if .Values.dataBackup.namespace }}
- path: {{ .Values.dataBackup.workdir }}/goosefs-backup/{{ .Values.dataBackup.namespace }}/{{ .Values.dataBackup.dataset }}
- {{- else }}
- path: {{ .Values.dataBackup.workdir }}/goosefs-backup/default/{{ .Values.dataBackup.dataset }}
- {{- end }}
- type: DirectoryOrCreate
- {{- end }}
- {{- if .Values.dataBackup.pvcName }}
- - name: pvc
- persistentVolumeClaim:
- claimName: {{ .Values.dataBackup.pvcName }}
- {{- else }}
- - name: host
- hostPath:
- path: {{ .Values.dataBackup.path }}
- {{- end }}
- - name: script
- configMap:
- name: {{ .Values.dataBackup.name }}-script
- items:
- - key: databackuper.goosefs
- path: databackup.sh
- mode: 365
- {{- if .Values.initUsers.enabled }}
- - name: dir
- hostPath:
- path: {{ .Values.initUsers.dir }}
- type: DirectoryOrCreate
- - name: user
- hostPath:
- path: {{ .Values.initUsers.dir }}/passwd
- type: FileOrCreate
- - name: group
- hostPath:
- path: {{ .Values.initUsers.dir }}/group
- type: FileOrCreate
- {{- end }}
\ No newline at end of file
diff --git a/charts/fluid-databackup/goosefs/values.yaml b/charts/fluid-databackup/goosefs/values.yaml
deleted file mode 100644
index bfc23d47e94..00000000000
--- a/charts/fluid-databackup/goosefs/values.yaml
+++ /dev/null
@@ -1,60 +0,0 @@
-# Default values for fluid-dataloader.
-# This is a YAML-formatted file.
-# Declare variables to be passed into your templates.
-
-dataBackup:
- # Optional
- # Default: default
- # Description: the namespace of the dataset and dataBackup
- namespace: #
-
- # Required
- # Description: the dataset that this DataBackup targets
- dataset: #
-
- # Required
- # Description: the name of DataBackup
- name: #
-
- # Optional
- # Description: the node to run the backup pod
- nodeName: #
-
- # Required
- # Description: the backup pod image
- image: ccr.ccs.tencentyun.com/goosefs/goosefs:v1.0.1
-
- # Required
- # Description: the java Env to find the master
- javaEnv:
-
- # Required
- # Description: the work path in node
- workdir: /tmp
-
- # Required
- # Description: the path to save data
- path: /
-
- # Optional
- # Description: the pvc to save data
- # if it is null, will backup in local
- # pvcName: test
-
- # Optional
- # Description: optional image pull secrets on DataLoad pods
- imagePullSecrets: []
-
-initUsers:
- enabled: false
- image: registry.cn-hangzhou.aliyuncs.com/fluid/init-users
- imageTag: v0.3.0-1467caa
- imagePullPolicy: IfNotPresent
- args: "1000"
- envUsers: ""
- dir: ""
-
-# Security Context
-user: 1000
-group: 1000
-fsGroup: 1000
diff --git a/charts/fluid-dataloader/goosefs/CHANGELOG.md b/charts/fluid-dataloader/goosefs/CHANGELOG.md
deleted file mode 100644
index 7147742637c..00000000000
--- a/charts/fluid-dataloader/goosefs/CHANGELOG.md
+++ /dev/null
@@ -1,16 +0,0 @@
-### 0.1.0
-
-- Support parallel prefetch job
-- Support configurations by setting values
-
-### 0.9.0
-- Support for setting affinity,tolerations,nodeSelector,schedulerName
-
-### 0.10.0
-- Support cron dataload
-
-### 0.10.3
-- Fix incorrect indentation of cron dataload template
-
-### 0.10.4
-- Refactor environment variable handling
\ No newline at end of file
diff --git a/charts/fluid-dataloader/goosefs/Chart.yaml b/charts/fluid-dataloader/goosefs/Chart.yaml
deleted file mode 100644
index a8e1a343d5c..00000000000
--- a/charts/fluid-dataloader/goosefs/Chart.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-apiVersion: v2
-name: fluid-dataloader
-description: A Helm chart for Fluid to prefetch data
-
-# A chart can be either an 'application' or a 'library' chart.
-#
-# Application charts are a collection of templates that can be packaged into versioned archives
-# to be deployed.
-#
-# Library charts provide useful utilities or functions for the chart developer. They're included as
-# a dependency of application charts to inject those utilities and functions into the rendering
-# pipeline. Library charts do not define any templates and therefore cannot be deployed.
-type: application
-
-# This is the chart version. This version number should be incremented each time you make changes
-# to the chart and its templates, including the app version.
-# Versions are expected to follow Semantic Versioning (https://semver.org/)
-version: 0.10.4
-
-# This is the version number of the application being deployed. This version number should be
-# incremented each time you make changes to the application. Versions are not expected to
-# follow Semantic Versioning. They should reflect the version the application is using.
-appVersion: 0.1.0
-
-dependencies:
-- name: library
- version: "0.2.0"
- repository: "file://../../library"
diff --git a/charts/fluid-dataloader/goosefs/README.md b/charts/fluid-dataloader/goosefs/README.md
deleted file mode 100644
index 120f6ecd0dd..00000000000
--- a/charts/fluid-dataloader/goosefs/README.md
+++ /dev/null
@@ -1,86 +0,0 @@
-# fluid-dataloader
-
-## Prerequisite
-- Dataset deployed
-- GooseFS Runtime deployed
-- Dataset mountPoint mounted
-- Dataset-related PV, PVC created
-
-## Install
-1. get dataset-related PVC name
-```shell script
-$ kubectl get pvc
-NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
- Bound 100Gi RWX 4h5m
-```
-Say `` is the name of your dataset-related PVC, usually it's the same name as your dataset.
-
-2. get num of GooseFS workers
-```shell script
-kubectl get pod -l release= | grep -c "worker"
-```
-
-3. Install fluid-dataloader
-
-```shell script
-helm install \
- --set dataloader.numWorker= \
- --set dataloader.threads=2 \
- -load charts/fluid-dataloader
-```
-
-You will see something like this:
-```
-helm install hbase-load charts/fluid-dataloader/
-NAME: -load
-LAST DEPLOYED: Fri Jul 31 19:52:11 2020
-NAMESPACE: default
-STATUS: deployed
-REVISION: 1
-TEST SUITE: None
-```
-
-Some dataloader jobs will be launched. You will see multiple jobs running on different nodes:
-```shell script
-kubectl get pod -o wide -l role=goosefs-dataloader
-```
-
-Once some job completes, you can check time consumed during data prefetch:
-```shell script
-kubectl logs -loader-xxxxx
-```
-and see something like this:
-```
-THREADS=2
-DATAPATH=/data/*
-python multithread_read_benchmark.py --threads=2 --path=/data/*
-/data/* contains 15 items
-/data/* processing 15 items with 2 threads uses 32.6712441444s, avg 0.459119338513/s, avg 8743748.5924B/s, avg 8.33868846169MiB/s
-```
-
-Now then, all data should be cached, reinstall it:
-```shell script
-helm del
-
-helm install \
- --set dataloader.numWorker= \
- --set dataloader.threads=2 \
- -load charts/fluid-dataloader
-```
-
-check again, and this time should be much faster:
-```shell script
-kubectl logs -loader-yyyyy
-```
-```
-THREADS=2
-DATAPATH=/data/*
-python multithread_read_benchmark.py --threads=2 --path=/data/*
-/data/* contains 15 items
-/data/* processing 15 items with 2 threads uses 0.308158159256s, avg 48.6763032211/s, avg 927021194.862B/s, avg 884.076304304MiB/s
-```
-
-## Uninstall
-```
-helm del
-```
diff --git a/charts/fluid-dataloader/goosefs/charts/library b/charts/fluid-dataloader/goosefs/charts/library
deleted file mode 120000
index ba40d2f56b5..00000000000
--- a/charts/fluid-dataloader/goosefs/charts/library
+++ /dev/null
@@ -1 +0,0 @@
-../../../library
\ No newline at end of file
diff --git a/charts/fluid-dataloader/goosefs/templates/configmap.yaml b/charts/fluid-dataloader/goosefs/templates/configmap.yaml
deleted file mode 100644
index c96d3214ccb..00000000000
--- a/charts/fluid-dataloader/goosefs/templates/configmap.yaml
+++ /dev/null
@@ -1,93 +0,0 @@
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: {{ printf "%s-data-load-script" .Release.Name }}
- labels:
- release: {{ .Release.Name }}
- role: dataload-job
- {{- include "library.fluid.labels" . | nindent 4 }}
-data:
- dataloader.goosefs.init: |
- #!/usr/bin/env bash
- set -xe
- goosefs_env_vars=(
- GOOSEFS_CLASSPATH
- GOOSEFS_HOSTNAME
- GOOSEFS_JARS
- GOOSEFS_JAVA_OPTS
- GOOSEFS_MASTER_JAVA_OPTS
- GOOSEFS_PROXY_JAVA_OPTS
- GOOSEFS_RAM_FOLDER
- GOOSEFS_USER_JAVA_OPTS
- GOOSEFS_WORKER_JAVA_OPTS
- GOOSEFS_JOB_MASTER_JAVA_OPTS
- GOOSEFS_JOB_WORKER_JAVA_OPTS
- )
- GOOSEFS_HOME=/opt/goosefs
- function public::goosefs::init_conf() {
- for key in "${goosefs_env_vars[@]}"; do
- if [[ -v $key ]]; then
- echo "export ${key}=\"${!key}\"" >> $GOOSEFS_HOME/conf/goosefs-env.sh
- fi
- done
- }
- main() {
- public::goosefs::init_conf
- }
- main
- dataloader.goosefs.distributedLoad: |
- #!/usr/bin/env bash
- set -xe
-
- function checkPathExistence() {
- local path=$1
- local checkPathResult=$(timeout 30s goosefs fs ls "$path" |& tail -1)
- local strUnexistence="does not exist"
- if [[ $checkPathResult =~ $strUnexistence ]]; then
- echo -e "dataLoad failed because some paths not exist."
- exit 1
- fi
- }
-
- function distributedLoad() {
- local path=$1
- local replica=$2
- checkPathExistence "$path"
- goosefs fs setReplication --max $replica -R $path
- if [[ $needLoadMetadata == 'true' ]]; then
- time goosefs fs distributedLoad -Dgoosefs.user.file.metadata.sync.interval=0 --replication $replica $path
- elif [[ $enableAtomicCache == 'true' ]]; then
- time goosefs fs distributedLoad -A --replication $replica $path --expire-time $atomicCacheTTL
- else
- time goosefs fs distributedLoad --replication $replica $path
- fi
- }
-
- function main() {
- needLoadMetadata="$NEED_LOAD_METADATA"
- enableAtomicCache="$ENABLE_ATOMIC_CACHE"
- atomicCacheTTL="$ATOMIC_CACHE_TTL"
- if [[ $enableAtomicCache == 'true' ]]; then
- needLoadMetadata="false"
- echo -e "when enable atomiccache, can't load metadata"
- fi
-
- if [[ $needLoadMetadata == 'true' ]]; then
- if [[ -d "/data" ]]; then
- du -sh "/data"
- fi
- fi
- paths="$DATA_PATH"
- paths=(${paths//:/ })
- replicas="$PATH_REPLICAS"
- replicas=(${replicas//:/ })
- for((i=0;i<${#paths[@]};i++)) do
- local path="${paths[i]}"
- local replica="${replicas[i]}"
- echo -e "distributedLoad on $path starts"
- distributedLoad ${paths[i]} ${replicas[i]}
- echo -e "distributedLoad on $path ends"
- done
- }
-
- main "$@"
diff --git a/charts/fluid-dataloader/goosefs/templates/cronjob.yaml b/charts/fluid-dataloader/goosefs/templates/cronjob.yaml
deleted file mode 100644
index 0fe4c456ac5..00000000000
--- a/charts/fluid-dataloader/goosefs/templates/cronjob.yaml
+++ /dev/null
@@ -1,123 +0,0 @@
- {{/* {{ $datasetName := "" -}}*/}}
- {{/* {{- $randomSuffix := "" -}}*/}}
- {{/* {{- if regexMatch "^[A-Za-z0-9._-]+-load-[A-Za-z0-9]{5}$" .Release.Name -}}*/}}
- {{/* {{- $arr := regexSplit "-load-" .Release.Name -1 -}}*/}}
- {{/* {{- $datasetName = first $arr -}}*/}}
- {{/* {{- $randomSuffix = last $arr -}}*/}}
- {{/* {{- else -}}*/}}
- {{/* {{- printf "Illegal release name. Should be like -load-. Current name: %s" .Release.Name | fail -}}*/}}
- {{/* {{- end }}*/}}
-{{- if eq (lower .Values.dataloader.policy) "cron" }}
-apiVersion: {{ ternary "batch/v1" "batch/v1beta1" (.Capabilities.APIVersions.Has "batch/v1/CronJob") }}
-kind: CronJob
-metadata:
- name: {{ printf "%s-job" .Release.Name }}
- labels:
- release: {{ .Release.Name }}
- role: dataload-cronjob
- app: goosefs
- targetDataset: {{ required "targetDataset should be set" .Values.dataloader.targetDataset }}
- dataload: {{ .Values.name }}
- fluid.io/jobPolicy: cron
- {{- include "library.fluid.labels" . | nindent 4 }}
- ownerReferences:
- {{- if .Values.owner.enabled }}
- - apiVersion: {{ .Values.owner.apiVersion }}
- blockOwnerDeletion: {{ .Values.owner.blockOwnerDeletion }}
- controller: {{ .Values.owner.controller }}
- kind: {{ .Values.owner.kind }}
- name: {{ .Values.owner.name }}
- uid: {{ .Values.owner.uid }}
- {{- end }}
-spec:
- schedule: "{{ .Values.dataloader.schedule }}"
- jobTemplate:
- spec:
- backoffLimit: {{ .Values.dataloader.backoffLimit | default "3" }}
- completions: 1
- parallelism: 1
- template:
- metadata:
- name: {{ printf "%s-loader" .Release.Name }}
- annotations:
- sidecar.istio.io/inject: "false"
- labels:
- release: {{ .Release.Name }}
- role: dataload-pod
- app: goosefs
- cronjob: {{ printf "%s-job" .Release.Name }}
- targetDataset: {{ required "targetDataset should be set" .Values.dataloader.targetDataset }}
- {{- include "library.fluid.labels" . | nindent 12 }}
- spec:
- {{- include "library.fluid.dataload.cronJobCommonTemplateSpec" . | nindent 10 }}
- containers:
- - name: dataloader
- image: {{ required "Dataloader image should be set" .Values.dataloader.image }}
- imagePullPolicy: IfNotPresent
- command: ["/bin/sh", "-c"]
- args: ["/scripts/env_init.sh && /scripts/dataload.sh"]
- {{- $targetPaths := "" }}
- {{- range .Values.dataloader.targetPaths }}
- {{- $targetPaths = cat $targetPaths (required "Path must be set" .path) ":" }}
- {{- end }}
- {{- $targetPaths = $targetPaths | nospace | trimSuffix ":" }}
-
- {{- $pathReplicas := ""}}
- {{- range .Values.dataloader.targetPaths }}
- {{- $pathReplicas = cat $pathReplicas ( default 1 .replicas ) ":"}}
- {{- end }}
- {{- $pathReplicas = $pathReplicas | nospace | trimSuffix ":"}}
- env:
- - name: GOOSEFS_CLIENT_HOSTNAME
- valueFrom:
- fieldRef:
- fieldPath: status.podIP
- - name: GOOSEFS_CLIENT_JAVA_OPTS
- value: " -Dgoosefs.user.hostname=${GOOSEFS_CLIENT_HOSTNAME}"
- - name: NEED_LOAD_METADATA
- value: {{ default false .Values.dataloader.loadMetadata | quote }}
- - name: DATA_PATH
- value: {{ $targetPaths | quote }}
- - name: PATH_REPLICAS
- value: {{ $pathReplicas | quote }}
- {{- range $key, $val := .Values.dataloader.options }}
- {{- if eq $key "atomicCache" }}
- - name: ENABLE_ATOMIC_CACHE
- value: {{ default false $val | quote }}
- {{- else if eq $key "expireTime" }}
- - name: ATOMIC_CACHE_TTL
- value: {{ default 43200000 $val | quote }}
- {{- end }}
- {{- end }}
- envFrom:
- - configMapRef:
- name: {{ required "targetDataset should be set" .Values.dataloader.targetDataset }}-config
- volumeMounts:
- - mountPath: /scripts
- name: data-load-script
- {{- range .Values.dataloader.targetPaths }}
- {{- if .fluidNative }}
- - mountPath: {{ .path | trimAll "/" | replace "/" "-" | printf "/data/%s"}}
- name: {{ .path | trimAll "/" | replace "/" "-" | printf "native-%s"}}
- {{- end }}
- {{- end }}
- volumes:
- - name: data-load-script
- configMap:
- name: {{ printf "%s-data-load-script" .Release.Name }}
- items:
- - key: dataloader.goosefs.init
- path: env_init.sh
- mode: 365
- - key: dataloader.goosefs.distributedLoad
- path: dataload.sh
- mode: 365
- {{- range .Values.dataloader.targetPaths }}
- {{- if .fluidNative }}
- - name: {{ .path | trimAll "/" | replace "/" "-" | printf "native-%s"}}
- hostPath:
- path: {{ .path }}
- {{- end }}
- {{- end }}
-
-{{- end }}
\ No newline at end of file
diff --git a/charts/fluid-dataloader/goosefs/templates/job.yaml b/charts/fluid-dataloader/goosefs/templates/job.yaml
deleted file mode 100644
index 0ec468a2ac8..00000000000
--- a/charts/fluid-dataloader/goosefs/templates/job.yaml
+++ /dev/null
@@ -1,136 +0,0 @@
- {{/* {{ $datasetName := "" -}}*/}}
- {{/* {{- $randomSuffix := "" -}}*/}}
- {{/* {{- if regexMatch "^[A-Za-z0-9._-]+-load-[A-Za-z0-9]{5}$" .Release.Name -}}*/}}
- {{/* {{- $arr := regexSplit "-load-" .Release.Name -1 -}}*/}}
- {{/* {{- $datasetName = first $arr -}}*/}}
- {{/* {{- $randomSuffix = last $arr -}}*/}}
- {{/* {{- else -}}*/}}
- {{/* {{- printf "Illegal release name. Should be like -load-. Current name: %s" .Release.Name | fail -}}*/}}
- {{/* {{- end }}*/}}
-{{- if or (eq (lower .Values.dataloader.policy) "") (eq (lower .Values.dataloader.policy) "once") }}
-apiVersion: batch/v1
-kind: Job
-metadata:
- name: {{ printf "%s-job" .Release.Name }}
- labels:
- release: {{ .Release.Name }}
- role: dataload-job
- app: goosefs
- targetDataset: {{ required "targetDataset should be set" .Values.dataloader.targetDataset }}
- {{- include "library.fluid.labels" . | nindent 4 }}
- ownerReferences:
- {{- if .Values.owner.enabled }}
- - apiVersion: {{ .Values.owner.apiVersion }}
- blockOwnerDeletion: {{ .Values.owner.blockOwnerDeletion }}
- controller: {{ .Values.owner.controller }}
- kind: {{ .Values.owner.kind }}
- name: {{ .Values.owner.name }}
- uid: {{ .Values.owner.uid }}
- {{- end }}
-spec:
- backoffLimit: {{ .Values.dataloader.backoffLimit | default "3" }}
- completions: 1
- parallelism: 1
- template:
- metadata:
- name: {{ printf "%s-loader" .Release.Name }}
- annotations:
- sidecar.istio.io/inject: "false"
- labels:
- release: {{ .Release.Name }}
- role: dataload-pod
- app: goosefs
- targetDataset: {{ required "targetDataset should be set" .Values.dataloader.targetDataset }}
- {{- include "library.fluid.labels" . | nindent 8 }}
- spec:
- {{- if .Values.dataloader.schedulerName }}
- schedulerName: {{ .Values.dataloader.schedulerName }}
- {{- end }}
- {{- with .Values.dataloader.nodeSelector }}
- nodeSelector:
- {{- toYaml . | nindent 8 }}
- {{- end }}
- {{- with .Values.dataloader.affinity }}
- affinity:
- {{- toYaml . | nindent 8 }}
- {{- end }}
- {{- with .Values.dataloader.tolerations }}
- tolerations:
- {{- toYaml . | nindent 8 }}
- {{- end }}
- restartPolicy: Never
- {{- with .Values.dataloader.imagePullSecrets }}
- imagePullSecrets:
- {{- toYaml . | nindent 8 }}
- {{- end }}
- containers:
- - name: dataloader
- image: {{ required "Dataloader image should be set" .Values.dataloader.image }}
- imagePullPolicy: IfNotPresent
- command: ["/bin/sh", "-c"]
- args: ["/scripts/env_init.sh && /scripts/dataload.sh"]
- {{- $targetPaths := "" }}
- {{- range .Values.dataloader.targetPaths }}
- {{- $targetPaths = cat $targetPaths (required "Path must be set" .path) ":" }}
- {{- end }}
- {{- $targetPaths = $targetPaths | nospace | trimSuffix ":" }}
-
- {{- $pathReplicas := ""}}
- {{- range .Values.dataloader.targetPaths }}
- {{- $pathReplicas = cat $pathReplicas ( default 1 .replicas ) ":"}}
- {{- end }}
- {{- $pathReplicas = $pathReplicas | nospace | trimSuffix ":"}}
- env:
- - name: GOOSEFS_CLIENT_HOSTNAME
- valueFrom:
- fieldRef:
- fieldPath: status.podIP
- - name: GOOSEFS_CLIENT_JAVA_OPTS
- value: " -Dgoosefs.user.hostname=${GOOSEFS_CLIENT_HOSTNAME}"
- - name: NEED_LOAD_METADATA
- value: {{ default false .Values.dataloader.loadMetadata | quote }}
- - name: DATA_PATH
- value: {{ $targetPaths | quote }}
- - name: PATH_REPLICAS
- value: {{ $pathReplicas | quote }}
- {{- range $key, $val := .Values.dataloader.options }}
- {{- if eq $key "atomicCache" }}
- - name: ENABLE_ATOMIC_CACHE
- value: {{ default false $val | quote }}
- {{- else if eq $key "expireTime" }}
- - name: ATOMIC_CACHE_TTL
- value: {{ default 43200000 $val | quote }}
- {{- end }}
- {{- end }}
- envFrom:
- - configMapRef:
- name: {{ required "targetDataset should be set" .Values.dataloader.targetDataset }}-config
- volumeMounts:
- - mountPath: /scripts
- name: data-load-script
- {{- range .Values.dataloader.targetPaths }}
- {{- if .fluidNative }}
- - mountPath: {{ .path | trimAll "/" | replace "/" "-" | printf "/data/%s"}}
- name: {{ .path | trimAll "/" | replace "/" "-" | printf "native-%s"}}
- {{- end }}
- {{- end }}
- volumes:
- - name: data-load-script
- configMap:
- name: {{ printf "%s-data-load-script" .Release.Name }}
- items:
- - key: dataloader.goosefs.init
- path: env_init.sh
- mode: 365
- - key: dataloader.goosefs.distributedLoad
- path: dataload.sh
- mode: 365
- {{- range .Values.dataloader.targetPaths }}
- {{- if .fluidNative }}
- - name: {{ .path | trimAll "/" | replace "/" "-" | printf "native-%s"}}
- hostPath:
- path: {{ .path }}
- {{- end }}
- {{- end }}
-
-{{- end }}
\ No newline at end of file
diff --git a/charts/fluid-dataloader/goosefs/values.yaml b/charts/fluid-dataloader/goosefs/values.yaml
deleted file mode 100644
index fbfdbb1e378..00000000000
--- a/charts/fluid-dataloader/goosefs/values.yaml
+++ /dev/null
@@ -1,106 +0,0 @@
-# Default values for fluid-dataloader.
-# This is a YAML-formatted file.
-# Declare variables to be passed into your templates.
-
-name:
-
-owner:
- enabled: false
- name: ""
- kind: ""
- uid: ""
- apiVersion: ""
- blockOwnerDeletion: false
- controller: false
-
-dataloader:
- # Required
- # Default: once
- # Description: policy of data load
- policy: ""
-
- # Optional
- # Description: schedule for cron policy
- schedule:
-
- # Optional
- # Default: 3
- # Description: how many times the prefetch job can fail, i.e. `Job.spec.backoffLimit`
- backoffLimit: 3
-
- # Required
- # Description: the dataset that this DataLoad targets
- # targetDataset: imagenet
- targetDataset: ""
-
- # Optional
- # Default: false
- # Description: should load metadata from UFS when doing data load
- loadMetadata: false
-
- # Optional
- # Options
- # atomicCache: false
- # Default: false
- # Description: when load path is still in cos but not in goosefs, if it not loaded in cache 100%, it will be invisible.
-
- # expireTime: 43200000
- # Default: 43200000
- # Description: when enable atomiccache and failed, ttl will avoid space waste, default is 12h, unit is ms.
-
- # Optional
- # Default: (path: "/", replicas: 1, fluidNative: false)
- # Description: which paths should the DataLoad load
- targetPaths:
- - path: "/"
- replicas: 1
- fluidNative: false
-
- # Required
- # Description: the image that the DataLoad job uses
- image:
-
- # Optional
- # Description: optional image pull secrets on DataLoad pods
- imagePullSecrets: []
-
- # Optional
- # Description: optional pod affinity
- # affinity:
- # nodeAffinity:
- # requiredDuringSchedulingIgnoredDuringExecution:
- # nodeSelectorTerms:
- # - matchExpressions:
- # - key: topology.kubernetes.io/zone
- # operator: In
- # values:
- # - antarctica-east1
- # - antarctica-west1
- # preferredDuringSchedulingIgnoredDuringExecution:
- # - weight: 1
- # preference:
- # matchExpressions:
- # - key: another-node-label-key
- # operator: In
- # values:
- # - another-node-label-value
- affinity: {}
-
- # Optional
- # Description: optional pod Tolerations
- #tolerations:
- # - key: "example-key"
- # operator: "Exists"
- # effect: "NoSchedule"
- tolerations: []
-
- # Optional
- # Description: optional pod scheduler definition
- #schedulerName: "scheduler"
- schedulerName: ""
-
- # Optional
- # Description: optional pod node selector
- #nodeSelector:
- # diskType: "ssd"
- nodeSelector: {}
diff --git a/charts/fluid/fluid/crds/data.fluid.io_goosefsruntimes.yaml b/charts/fluid/fluid/crds/data.fluid.io_goosefsruntimes.yaml
deleted file mode 100644
index c5f4dd06bf6..00000000000
--- a/charts/fluid/fluid/crds/data.fluid.io_goosefsruntimes.yaml
+++ /dev/null
@@ -1,1590 +0,0 @@
----
-apiVersion: apiextensions.k8s.io/v1
-kind: CustomResourceDefinition
-metadata:
- annotations:
- controller-gen.kubebuilder.io/version: v0.19.0
- name: goosefsruntimes.data.fluid.io
-spec:
- group: data.fluid.io
- names:
- categories:
- - fluid
- kind: GooseFSRuntime
- listKind: GooseFSRuntimeList
- plural: goosefsruntimes
- shortNames:
- - goose
- singular: goosefsruntime
- scope: Namespaced
- versions:
- - additionalPrinterColumns:
- - jsonPath: .status.masterNumberReady
- name: Ready Masters
- priority: 10
- type: integer
- - jsonPath: .status.desiredMasterNumberScheduled
- name: Desired Masters
- priority: 10
- type: integer
- - jsonPath: .status.masterPhase
- name: Master Phase
- type: string
- - jsonPath: .status.workerNumberReady
- name: Ready Workers
- priority: 10
- type: integer
- - jsonPath: .status.desiredWorkerNumberScheduled
- name: Desired Workers
- priority: 10
- type: integer
- - jsonPath: .status.workerPhase
- name: Worker Phase
- type: string
- - jsonPath: .status.fuseNumberReady
- name: Ready Fuses
- priority: 10
- type: integer
- - jsonPath: .status.desiredFuseNumberScheduled
- name: Desired Fuses
- priority: 10
- type: integer
- - jsonPath: .status.fusePhase
- name: Fuse Phase
- type: string
- - jsonPath: .status.apiGateway.endpoint
- name: API Gateway
- priority: 10
- type: string
- - jsonPath: .metadata.creationTimestamp
- name: Age
- type: date
- name: v1alpha1
- schema:
- openAPIV3Schema:
- properties:
- apiVersion:
- type: string
- kind:
- type: string
- metadata:
- type: object
- spec:
- properties:
- apiGateway:
- properties:
- annotations:
- additionalProperties:
- type: string
- type: object
- enabled:
- type: boolean
- env:
- additionalProperties:
- type: string
- type: object
- jvmOptions:
- items:
- type: string
- type: array
- nodeSelector:
- additionalProperties:
- type: string
- type: object
- ports:
- additionalProperties:
- type: integer
- type: object
- properties:
- additionalProperties:
- type: string
- type: object
- replicas:
- format: int32
- minimum: 1
- type: integer
- resources:
- properties:
- claims:
- items:
- properties:
- name:
- type: string
- required:
- - name
- type: object
- type: array
- x-kubernetes-list-map-keys:
- - name
- x-kubernetes-list-type: map
- limits:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- type: object
- requests:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- type: object
- type: object
- type: object
- cleanCachePolicy:
- properties:
- gracePeriodSeconds:
- default: 60
- format: int32
- type: integer
- maxRetryAttempts:
- default: 3
- format: int32
- type: integer
- type: object
- data:
- properties:
- pin:
- type: boolean
- replicas:
- format: int32
- type: integer
- type: object
- disablePrometheus:
- type: boolean
- fuse:
- properties:
- annotations:
- additionalProperties:
- type: string
- type: object
- args:
- items:
- type: string
- type: array
- cleanPolicy:
- type: string
- env:
- additionalProperties:
- type: string
- type: object
- image:
- type: string
- imagePullPolicy:
- type: string
- imageTag:
- type: string
- jvmOptions:
- items:
- type: string
- type: array
- nodeSelector:
- additionalProperties:
- type: string
- type: object
- properties:
- additionalProperties:
- type: string
- type: object
- resources:
- properties:
- claims:
- items:
- properties:
- name:
- type: string
- required:
- - name
- type: object
- type: array
- x-kubernetes-list-map-keys:
- - name
- x-kubernetes-list-type: map
- limits:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- type: object
- requests:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- type: object
- type: object
- type: object
- goosefsVersion:
- properties:
- image:
- type: string
- imagePullPolicy:
- type: string
- imageTag:
- type: string
- type: object
- hadoopConfig:
- type: string
- initUsers:
- properties:
- env:
- additionalProperties:
- type: string
- type: object
- image:
- type: string
- imagePullPolicy:
- type: string
- imageTag:
- type: string
- resources:
- properties:
- claims:
- items:
- properties:
- name:
- type: string
- required:
- - name
- type: object
- type: array
- x-kubernetes-list-map-keys:
- - name
- x-kubernetes-list-type: map
- limits:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- type: object
- requests:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- type: object
- type: object
- type: object
- jobMaster:
- properties:
- annotations:
- additionalProperties:
- type: string
- type: object
- enabled:
- type: boolean
- env:
- additionalProperties:
- type: string
- type: object
- jvmOptions:
- items:
- type: string
- type: array
- nodeSelector:
- additionalProperties:
- type: string
- type: object
- ports:
- additionalProperties:
- type: integer
- type: object
- properties:
- additionalProperties:
- type: string
- type: object
- replicas:
- format: int32
- minimum: 1
- type: integer
- resources:
- properties:
- claims:
- items:
- properties:
- name:
- type: string
- required:
- - name
- type: object
- type: array
- x-kubernetes-list-map-keys:
- - name
- x-kubernetes-list-type: map
- limits:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- type: object
- requests:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- type: object
- type: object
- type: object
- jobWorker:
- properties:
- annotations:
- additionalProperties:
- type: string
- type: object
- enabled:
- type: boolean
- env:
- additionalProperties:
- type: string
- type: object
- jvmOptions:
- items:
- type: string
- type: array
- nodeSelector:
- additionalProperties:
- type: string
- type: object
- ports:
- additionalProperties:
- type: integer
- type: object
- properties:
- additionalProperties:
- type: string
- type: object
- replicas:
- format: int32
- minimum: 1
- type: integer
- resources:
- properties:
- claims:
- items:
- properties:
- name:
- type: string
- required:
- - name
- type: object
- type: array
- x-kubernetes-list-map-keys:
- - name
- x-kubernetes-list-type: map
- limits:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- type: object
- requests:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- type: object
- type: object
- type: object
- jvmOptions:
- items:
- type: string
- type: array
- master:
- properties:
- annotations:
- additionalProperties:
- type: string
- type: object
- enabled:
- type: boolean
- env:
- additionalProperties:
- type: string
- type: object
- jvmOptions:
- items:
- type: string
- type: array
- nodeSelector:
- additionalProperties:
- type: string
- type: object
- ports:
- additionalProperties:
- type: integer
- type: object
- properties:
- additionalProperties:
- type: string
- type: object
- replicas:
- format: int32
- minimum: 1
- type: integer
- resources:
- properties:
- claims:
- items:
- properties:
- name:
- type: string
- required:
- - name
- type: object
- type: array
- x-kubernetes-list-map-keys:
- - name
- x-kubernetes-list-type: map
- limits:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- type: object
- requests:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- type: object
- type: object
- type: object
- properties:
- additionalProperties:
- type: string
- type: object
- replicas:
- format: int32
- type: integer
- runAs:
- properties:
- gid:
- format: int64
- type: integer
- group:
- type: string
- uid:
- format: int64
- type: integer
- user:
- type: string
- required:
- - gid
- - group
- - uid
- - user
- type: object
- tieredstore:
- properties:
- levels:
- items:
- properties:
- high:
- type: string
- low:
- type: string
- mediumtype:
- enum:
- - MEM
- - SSD
- - HDD
- type: string
- path:
- minLength: 1
- type: string
- quota:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- quotaList:
- pattern: ^((\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+)))),)+((\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?)$
- type: string
- volumeSource:
- properties:
- awsElasticBlockStore:
- properties:
- fsType:
- type: string
- partition:
- format: int32
- type: integer
- readOnly:
- type: boolean
- volumeID:
- type: string
- required:
- - volumeID
- type: object
- azureDisk:
- properties:
- cachingMode:
- type: string
- diskName:
- type: string
- diskURI:
- type: string
- fsType:
- type: string
- kind:
- type: string
- readOnly:
- type: boolean
- required:
- - diskName
- - diskURI
- type: object
- azureFile:
- properties:
- readOnly:
- type: boolean
- secretName:
- type: string
- shareName:
- type: string
- required:
- - secretName
- - shareName
- type: object
- cephfs:
- properties:
- monitors:
- items:
- type: string
- type: array
- path:
- type: string
- readOnly:
- type: boolean
- secretFile:
- type: string
- secretRef:
- properties:
- name:
- type: string
- type: object
- x-kubernetes-map-type: atomic
- user:
- type: string
- required:
- - monitors
- type: object
- cinder:
- properties:
- fsType:
- type: string
- readOnly:
- type: boolean
- secretRef:
- properties:
- name:
- type: string
- type: object
- x-kubernetes-map-type: atomic
- volumeID:
- type: string
- required:
- - volumeID
- type: object
- configMap:
- properties:
- defaultMode:
- format: int32
- type: integer
- items:
- items:
- properties:
- key:
- type: string
- mode:
- format: int32
- type: integer
- path:
- type: string
- required:
- - key
- - path
- type: object
- type: array
- name:
- type: string
- optional:
- type: boolean
- type: object
- x-kubernetes-map-type: atomic
- csi:
- properties:
- driver:
- type: string
- fsType:
- type: string
- nodePublishSecretRef:
- properties:
- name:
- type: string
- type: object
- x-kubernetes-map-type: atomic
- readOnly:
- type: boolean
- volumeAttributes:
- additionalProperties:
- type: string
- type: object
- required:
- - driver
- type: object
- downwardAPI:
- properties:
- defaultMode:
- format: int32
- type: integer
- items:
- items:
- properties:
- fieldRef:
- properties:
- apiVersion:
- type: string
- fieldPath:
- type: string
- required:
- - fieldPath
- type: object
- x-kubernetes-map-type: atomic
- mode:
- format: int32
- type: integer
- path:
- type: string
- resourceFieldRef:
- properties:
- containerName:
- type: string
- divisor:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- resource:
- type: string
- required:
- - resource
- type: object
- x-kubernetes-map-type: atomic
- required:
- - path
- type: object
- type: array
- type: object
- emptyDir:
- properties:
- medium:
- type: string
- sizeLimit:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- type: object
- ephemeral:
- properties:
- volumeClaimTemplate:
- properties:
- metadata:
- type: object
- spec:
- properties:
- accessModes:
- items:
- type: string
- type: array
- dataSource:
- properties:
- apiGroup:
- type: string
- kind:
- type: string
- name:
- type: string
- required:
- - kind
- - name
- type: object
- x-kubernetes-map-type: atomic
- dataSourceRef:
- properties:
- apiGroup:
- type: string
- kind:
- type: string
- name:
- type: string
- namespace:
- type: string
- required:
- - kind
- - name
- type: object
- resources:
- properties:
- limits:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- type: object
- requests:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- type: object
- type: object
- selector:
- properties:
- matchExpressions:
- items:
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- type: object
- type: object
- x-kubernetes-map-type: atomic
- storageClassName:
- type: string
- volumeAttributesClassName:
- type: string
- volumeMode:
- type: string
- volumeName:
- type: string
- type: object
- required:
- - spec
- type: object
- type: object
- fc:
- properties:
- fsType:
- type: string
- lun:
- format: int32
- type: integer
- readOnly:
- type: boolean
- targetWWNs:
- items:
- type: string
- type: array
- wwids:
- items:
- type: string
- type: array
- type: object
- flexVolume:
- properties:
- driver:
- type: string
- fsType:
- type: string
- options:
- additionalProperties:
- type: string
- type: object
- readOnly:
- type: boolean
- secretRef:
- properties:
- name:
- type: string
- type: object
- x-kubernetes-map-type: atomic
- required:
- - driver
- type: object
- flocker:
- properties:
- datasetName:
- type: string
- datasetUUID:
- type: string
- type: object
- gcePersistentDisk:
- properties:
- fsType:
- type: string
- partition:
- format: int32
- type: integer
- pdName:
- type: string
- readOnly:
- type: boolean
- required:
- - pdName
- type: object
- gitRepo:
- properties:
- directory:
- type: string
- repository:
- type: string
- revision:
- type: string
- required:
- - repository
- type: object
- glusterfs:
- properties:
- endpoints:
- type: string
- path:
- type: string
- readOnly:
- type: boolean
- required:
- - endpoints
- - path
- type: object
- hostPath:
- properties:
- path:
- type: string
- type:
- type: string
- required:
- - path
- type: object
- iscsi:
- properties:
- chapAuthDiscovery:
- type: boolean
- chapAuthSession:
- type: boolean
- fsType:
- type: string
- initiatorName:
- type: string
- iqn:
- type: string
- iscsiInterface:
- type: string
- lun:
- format: int32
- type: integer
- portals:
- items:
- type: string
- type: array
- readOnly:
- type: boolean
- secretRef:
- properties:
- name:
- type: string
- type: object
- x-kubernetes-map-type: atomic
- targetPortal:
- type: string
- required:
- - iqn
- - lun
- - targetPortal
- type: object
- nfs:
- properties:
- path:
- type: string
- readOnly:
- type: boolean
- server:
- type: string
- required:
- - path
- - server
- type: object
- persistentVolumeClaim:
- properties:
- claimName:
- type: string
- readOnly:
- type: boolean
- required:
- - claimName
- type: object
- photonPersistentDisk:
- properties:
- fsType:
- type: string
- pdID:
- type: string
- required:
- - pdID
- type: object
- portworxVolume:
- properties:
- fsType:
- type: string
- readOnly:
- type: boolean
- volumeID:
- type: string
- required:
- - volumeID
- type: object
- projected:
- properties:
- defaultMode:
- format: int32
- type: integer
- sources:
- items:
- properties:
- clusterTrustBundle:
- properties:
- labelSelector:
- properties:
- matchExpressions:
- items:
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- type: object
- type: object
- x-kubernetes-map-type: atomic
- name:
- type: string
- optional:
- type: boolean
- path:
- type: string
- signerName:
- type: string
- required:
- - path
- type: object
- configMap:
- properties:
- items:
- items:
- properties:
- key:
- type: string
- mode:
- format: int32
- type: integer
- path:
- type: string
- required:
- - key
- - path
- type: object
- type: array
- name:
- type: string
- optional:
- type: boolean
- type: object
- x-kubernetes-map-type: atomic
- downwardAPI:
- properties:
- items:
- items:
- properties:
- fieldRef:
- properties:
- apiVersion:
- type: string
- fieldPath:
- type: string
- required:
- - fieldPath
- type: object
- x-kubernetes-map-type: atomic
- mode:
- format: int32
- type: integer
- path:
- type: string
- resourceFieldRef:
- properties:
- containerName:
- type: string
- divisor:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- resource:
- type: string
- required:
- - resource
- type: object
- x-kubernetes-map-type: atomic
- required:
- - path
- type: object
- type: array
- type: object
- secret:
- properties:
- items:
- items:
- properties:
- key:
- type: string
- mode:
- format: int32
- type: integer
- path:
- type: string
- required:
- - key
- - path
- type: object
- type: array
- name:
- type: string
- optional:
- type: boolean
- type: object
- x-kubernetes-map-type: atomic
- serviceAccountToken:
- properties:
- audience:
- type: string
- expirationSeconds:
- format: int64
- type: integer
- path:
- type: string
- required:
- - path
- type: object
- type: object
- type: array
- type: object
- quobyte:
- properties:
- group:
- type: string
- readOnly:
- type: boolean
- registry:
- type: string
- tenant:
- type: string
- user:
- type: string
- volume:
- type: string
- required:
- - registry
- - volume
- type: object
- rbd:
- properties:
- fsType:
- type: string
- image:
- type: string
- keyring:
- type: string
- monitors:
- items:
- type: string
- type: array
- pool:
- type: string
- readOnly:
- type: boolean
- secretRef:
- properties:
- name:
- type: string
- type: object
- x-kubernetes-map-type: atomic
- user:
- type: string
- required:
- - image
- - monitors
- type: object
- scaleIO:
- properties:
- fsType:
- type: string
- gateway:
- type: string
- protectionDomain:
- type: string
- readOnly:
- type: boolean
- secretRef:
- properties:
- name:
- type: string
- type: object
- x-kubernetes-map-type: atomic
- sslEnabled:
- type: boolean
- storageMode:
- type: string
- storagePool:
- type: string
- system:
- type: string
- volumeName:
- type: string
- required:
- - gateway
- - secretRef
- - system
- type: object
- secret:
- properties:
- defaultMode:
- format: int32
- type: integer
- items:
- items:
- properties:
- key:
- type: string
- mode:
- format: int32
- type: integer
- path:
- type: string
- required:
- - key
- - path
- type: object
- type: array
- optional:
- type: boolean
- secretName:
- type: string
- type: object
- storageos:
- properties:
- fsType:
- type: string
- readOnly:
- type: boolean
- secretRef:
- properties:
- name:
- type: string
- type: object
- x-kubernetes-map-type: atomic
- volumeName:
- type: string
- volumeNamespace:
- type: string
- type: object
- vsphereVolume:
- properties:
- fsType:
- type: string
- storagePolicyID:
- type: string
- storagePolicyName:
- type: string
- volumePath:
- type: string
- required:
- - volumePath
- type: object
- type: object
- volumeType:
- default: hostPath
- enum:
- - hostPath
- - emptyDir
- type: string
- required:
- - mediumtype
- type: object
- type: array
- type: object
- worker:
- properties:
- annotations:
- additionalProperties:
- type: string
- type: object
- enabled:
- type: boolean
- env:
- additionalProperties:
- type: string
- type: object
- jvmOptions:
- items:
- type: string
- type: array
- nodeSelector:
- additionalProperties:
- type: string
- type: object
- ports:
- additionalProperties:
- type: integer
- type: object
- properties:
- additionalProperties:
- type: string
- type: object
- replicas:
- format: int32
- minimum: 1
- type: integer
- resources:
- properties:
- claims:
- items:
- properties:
- name:
- type: string
- required:
- - name
- type: object
- type: array
- x-kubernetes-list-map-keys:
- - name
- x-kubernetes-list-type: map
- limits:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- type: object
- requests:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- type: object
- type: object
- type: object
- type: object
- status:
- properties:
- apiGateway:
- properties:
- endpoint:
- type: string
- type: object
- cacheAffinity:
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- items:
- properties:
- preference:
- properties:
- matchExpressions:
- items:
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchFields:
- items:
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- type: object
- x-kubernetes-map-type: atomic
- weight:
- format: int32
- type: integer
- required:
- - preference
- - weight
- type: object
- type: array
- requiredDuringSchedulingIgnoredDuringExecution:
- properties:
- nodeSelectorTerms:
- items:
- properties:
- matchExpressions:
- items:
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchFields:
- items:
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- type: object
- x-kubernetes-map-type: atomic
- type: array
- required:
- - nodeSelectorTerms
- type: object
- x-kubernetes-map-type: atomic
- type: object
- cacheStates:
- additionalProperties:
- type: string
- type: object
- conditions:
- items:
- properties:
- lastProbeTime:
- format: date-time
- type: string
- lastTransitionTime:
- format: date-time
- type: string
- message:
- type: string
- reason:
- type: string
- status:
- type: string
- type:
- type: string
- required:
- - status
- - type
- type: object
- type: array
- currentFuseNumberScheduled:
- format: int32
- type: integer
- currentMasterNumberScheduled:
- format: int32
- type: integer
- currentWorkerNumberScheduled:
- format: int32
- type: integer
- desiredFuseNumberScheduled:
- format: int32
- type: integer
- desiredMasterNumberScheduled:
- format: int32
- type: integer
- desiredWorkerNumberScheduled:
- format: int32
- type: integer
- fuseNumberAvailable:
- format: int32
- type: integer
- fuseNumberReady:
- format: int32
- type: integer
- fuseNumberUnavailable:
- format: int32
- type: integer
- fusePhase:
- type: string
- fuseReason:
- type: string
- masterNumberReady:
- format: int32
- type: integer
- masterPhase:
- type: string
- masterReason:
- type: string
- mountTime:
- format: date-time
- type: string
- mounts:
- items:
- properties:
- encryptOptions:
- items:
- properties:
- name:
- type: string
- valueFrom:
- properties:
- secretKeyRef:
- properties:
- key:
- type: string
- name:
- type: string
- required:
- - name
- type: object
- type: object
- required:
- - name
- type: object
- type: array
- mountPoint:
- minLength: 5
- type: string
- name:
- minLength: 0
- type: string
- options:
- additionalProperties:
- type: string
- type: object
- path:
- type: string
- readOnly:
- type: boolean
- shared:
- type: boolean
- required:
- - mountPoint
- type: object
- type: array
- selector:
- type: string
- setupDuration:
- type: string
- valueFile:
- type: string
- workerNumberAvailable:
- format: int32
- type: integer
- workerNumberReady:
- format: int32
- type: integer
- workerNumberUnavailable:
- format: int32
- type: integer
- workerPhase:
- type: string
- workerReason:
- type: string
- required:
- - currentFuseNumberScheduled
- - currentMasterNumberScheduled
- - currentWorkerNumberScheduled
- - desiredFuseNumberScheduled
- - desiredMasterNumberScheduled
- - desiredWorkerNumberScheduled
- - fuseNumberReady
- - fusePhase
- - masterNumberReady
- - masterPhase
- - valueFile
- - workerNumberReady
- - workerPhase
- type: object
- type: object
- served: true
- storage: true
- subresources:
- scale:
- labelSelectorPath: .status.selector
- specReplicasPath: .spec.replicas
- statusReplicasPath: .status.currentWorkerNumberScheduled
- status: {}
diff --git a/charts/fluid/fluid/templates/controller/goosefsruntime_controller.yaml b/charts/fluid/fluid/templates/controller/goosefsruntime_controller.yaml
deleted file mode 100644
index d0b5ed34a85..00000000000
--- a/charts/fluid/fluid/templates/controller/goosefsruntime_controller.yaml
+++ /dev/null
@@ -1,92 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: goosefsruntime-controller
- namespace: {{ include "fluid.namespace" . }}
- labels:
- control-plane: goosefsruntime-controller
-spec:
- selector:
- matchLabels:
- control-plane: goosefsruntime-controller
- {{ if .Values.runtime.goosefs.enabled -}}
- replicas: {{ .Values.runtime.goosefs.replicas }}
- {{- else }}
- replicas: 0
- {{- end }}
- template:
- metadata:
- labels:
- control-plane: goosefsruntime-controller
- annotations:
- {{ if gt (.Values.runtime.goosefs.replicas | int) 1 -}}
- controller.runtime.fluid.io/replicas: {{ .Values.runtime.goosefs.replicas | quote }}
- {{- end }}
- spec:
- {{- with .Values.image.imagePullSecrets }}
- imagePullSecrets:
- {{- toYaml . | nindent 8 }}
- {{- end }}
- serviceAccountName: goosefsruntime-controller
- {{ include "fluid.controlplane.affinity" . | nindent 6}}
- {{- if .Values.runtime.goosefs.tolerations }}
- tolerations:
-{{ toYaml .Values.runtime.goosefs.tolerations | indent 6 }}
- {{- end }}
- #hostNetwork: true
- containers:
- - image: {{ include "fluid.controlplane.imageTransform" (list .Values.runtime.goosefs.controller.imagePrefix .Values.runtime.goosefs.controller.imageName .Values.runtime.goosefs.controller.imageTag . ) }}
- imagePullPolicy: IfNotPresent
- name: manager
- command: ["goosefsruntime-controller", "start"]
- args:
- - --development=false
- - --runtime-node-port-range={{ .Values.runtime.goosefs.portRange }}
- - --runtime-workers={{ .Values.runtime.goosefs.runtimeWorkers }}
- - --kube-api-qps={{ .Values.runtime.goosefs.kubeClientQPS }}
- - --kube-api-burst={{ .Values.runtime.goosefs.kubeClientBurst }}
- - --pprof-addr=:6060
- - --enable-leader-election
- - --leader-election-namespace={{ include "fluid.namespace" . }}
- - --port-allocate-policy={{ .Values.runtime.goosefs.portAllocatePolicy }}
- env:
- {{- if .Values.workdir }}
- - name: FLUID_WORKDIR
- value: {{ .Values.workdir | quote }}
- {{- end }}
- {{- if .Values.runtime.goosefs.init.imageName }}
- - name: DEFAULT_INIT_IMAGE_ENV
- value: {{ include "fluid.runtime.imageTransform" (list .Values.runtime.goosefs.init.imagePrefix .Values.runtime.goosefs.init.imageName .Values.runtime.goosefs.init.imageTag . ) }}
- {{- end }}
- {{- if .Values.runtime.goosefs.runtime.imageName }}
- - name: GOOSEFS_RUNTIME_IMAGE_ENV
- value: {{ include "fluid.runtime.imageTransform" (list .Values.runtime.goosefs.runtime.imagePrefix .Values.runtime.goosefs.runtime.imageName .Values.runtime.goosefs.runtime.imageTag . ) }}
- {{- end }}
- {{- if .Values.runtime.goosefs.fuse.imageName }}
- - name: GOOSEFS_FUSE_IMAGE_ENV
- value: {{ include "fluid.runtime.imageTransform" (list .Values.runtime.goosefs.fuse.imagePrefix .Values.runtime.goosefs.fuse.imageName .Values.runtime.goosefs.fuse.imageTag . ) }}
- {{- end }}
- {{- if .Values.runtime.mountRoot }}
- - name: MOUNT_ROOT
- value: {{ .Values.runtime.mountRoot | quote }}
- {{- end }}
- {{- if .Values.runtime.criticalFusePod }}
- - name: CRITICAL_FUSE_POD
- value: {{ ternary "true" "false" (semverCompare ">=1.16.0-0" .Capabilities.KubeVersion.Version) | quote }}
- {{- end }}
- {{- if .Values.runtime.syncRetryDuration }}
- - name: FLUID_SYNC_RETRY_DURATION
- value: {{ .Values.runtime.syncRetryDuration | quote }}
- {{- end }}
- {{- include "fluid.controllers.envs.syncScheduleInfoNodeExcludeSelector" . | nindent 10 }}
- {{- if .Values.runtime.goosefs.env }}
- {{ toYaml .Values.runtime.goosefs.env | nindent 10 }}
- {{- end }}
- ports:
- - containerPort: 8080
- name: metrics
- protocol: TCP
- resources:
- {{- include "fluid.controlplane.resources" (list $ .Values.runtime.goosefs.resources) | nindent 10 }}
- terminationGracePeriodSeconds: 10
diff --git a/charts/fluid/fluid/templates/role/csi/rbac.yaml b/charts/fluid/fluid/templates/role/csi/rbac.yaml
index 30af401c622..19ec04df3ad 100644
--- a/charts/fluid/fluid/templates/role/csi/rbac.yaml
+++ b/charts/fluid/fluid/templates/role/csi/rbac.yaml
@@ -14,7 +14,6 @@ rules:
resources:
- alluxioruntimes
- jindoruntimes
- - goosefsruntimes
- juicefsruntimes
- thinruntimes
- efcruntimes
@@ -22,7 +21,6 @@ rules:
- vineyardruntimes
- alluxioruntimes/status
- jindoruntimes/status
- - goosefsruntimes/status
- juicefsruntimes/status
- thinruntimes/status
- efcruntimes/status
diff --git a/charts/fluid/fluid/templates/role/dataset/rbac.yaml b/charts/fluid/fluid/templates/role/dataset/rbac.yaml
index 3d12ec5d23f..bce70e3ea66 100644
--- a/charts/fluid/fluid/templates/role/dataset/rbac.yaml
+++ b/charts/fluid/fluid/templates/role/dataset/rbac.yaml
@@ -93,8 +93,6 @@ rules:
- alluxioruntimes/status
- jindoruntimes
- jindoruntimes/status
- - goosefsruntimes
- - goosefsruntimes/status
- juicefsruntimes
- juicefsruntimes/status
- thinruntimes
diff --git a/charts/fluid/fluid/templates/role/goosefs/rbac.yaml b/charts/fluid/fluid/templates/role/goosefs/rbac.yaml
deleted file mode 100644
index 557c88c70dc..00000000000
--- a/charts/fluid/fluid/templates/role/goosefs/rbac.yaml
+++ /dev/null
@@ -1,188 +0,0 @@
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
- name: goosefsruntime-controller
-rules:
- - apiGroups:
- - ""
- resources:
- - namespaces
- verbs:
- - get
- - list
- - watch
- - apiGroups:
- - ""
- resources:
- - configmaps
- verbs:
- - get
- - list
- - watch
- - create
- - update
- - delete
- - apiGroups:
- - ""
- resources:
- - persistentvolumeclaims
- verbs:
- - get
- - list
- - watch
- - create
- - update
- - delete
- - apiGroups:
- - ""
- resources:
- - persistentvolumes
- verbs:
- - get
- - list
- - watch
- - create
- - delete
- - apiGroups:
- - ""
- resources:
- - pods
- verbs:
- - get
- - list
- - watch
- - apiGroups:
- - ""
- resources:
- - pods/exec
- verbs:
- - create
- - apiGroups:
- - ""
- resources:
- - nodes
- verbs:
- - get
- - list
- - watch
- - patch
- - apiGroups:
- - ""
- resources:
- - events
- verbs:
- - create
- - patch
- - apiGroups:
- - ""
- resources:
- - secrets
- verbs:
- - list
- - watch
- - get
- - create
- - delete
- - update
- - apiGroups:
- - ""
- resources:
- - services
- verbs:
- - create
- - delete
- - list
- - watch
- - get
- - apiGroups:
- - data.fluid.io
- resources:
- - goosefsdataloads
- - goosefsruntimes
- - datasets
- - goosefsdataloads/status
- - goosefsruntimes/status
- - datasets/status
- verbs:
- - get
- - list
- - watch
- - create
- - update
- - patch
- - delete
- - apiGroups:
- - apps
- resources:
- - daemonsets
- - statefulsets
- - daemonsets/status
- - statefulsets/status
- verbs:
- - get
- - list
- - watch
- - create
- - update
- - patch
- - delete
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: Role
-metadata:
- namespace: {{ include "fluid.namespace" . }}
- name: goosefsruntime-controller
-rules:
- - apiGroups:
- - coordination.k8s.io
- resources:
- - leases
- verbs:
- - create
- - apiGroups:
- - coordination.k8s.io
- resources:
- - leases
- resourceNames:
- - goosefs.data.fluid.io
- verbs:
- - get
- - list
- - watch
- - update
- - patch
- - delete
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
- name: goosefsruntime-clusterrolebinding
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: goosefsruntime-controller
-subjects:
- - kind: ServiceAccount
- name: goosefsruntime-controller
- namespace: {{ include "fluid.namespace" . }}
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
- name: goosefsruntime-rolebinding
- namespace: {{ include "fluid.namespace" . }}
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: Role
- name: goosefsruntime-controller
-subjects:
- - kind: ServiceAccount
- name: goosefsruntime-controller
- namespace: {{ include "fluid.namespace" . }}
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: goosefsruntime-controller
- namespace: {{ include "fluid.namespace" . }}
diff --git a/charts/fluid/fluid/templates/role/thin/rbac.yaml b/charts/fluid/fluid/templates/role/thin/rbac.yaml
index 6e4d941ee31..0f9e79719ac 100644
--- a/charts/fluid/fluid/templates/role/thin/rbac.yaml
+++ b/charts/fluid/fluid/templates/role/thin/rbac.yaml
@@ -94,7 +94,6 @@ rules:
- alluxioruntimes
- jindoruntimes
- juicefsruntimes
- - goosefsruntimes
- efcruntimes
- thinruntimes
- thinruntimeprofiles
diff --git a/charts/fluid/fluid/templates/role/webhook/rabc.yaml b/charts/fluid/fluid/templates/role/webhook/rabc.yaml
index 9c56c036341..44edf39bc9f 100644
--- a/charts/fluid/fluid/templates/role/webhook/rabc.yaml
+++ b/charts/fluid/fluid/templates/role/webhook/rabc.yaml
@@ -61,7 +61,6 @@ rules:
- alluxioruntimes
- jindoruntimes
- juicefsruntimes
- - goosefsruntimes
- thinruntimes
- efcruntimes
- vineyardruntimes
diff --git a/charts/fluid/fluid/values.yaml b/charts/fluid/fluid/values.yaml
index eed20b17db5..fc4b39f4224 100644
--- a/charts/fluid/fluid/values.yaml
+++ b/charts/fluid/fluid/values.yaml
@@ -74,7 +74,7 @@ csi:
kubeConfigFile: /etc/kubernetes/kubelet.conf
certDir: /var/lib/kubelet/pki
rootDir: /var/lib/kubelet
- pruneFs: fuse.alluxio-fuse,fuse.jindofs-fuse,fuse.juicefs,fuse.goosefs-fuse,ossfs,alifuse.aliyun-alinas-efc
+ pruneFs: fuse.alluxio-fuse,fuse.jindofs-fuse,fuse.juicefs,ossfs,alifuse.aliyun-alinas-efc
recoverWarningThreshold: 50
# default method is "bindMount", "symlink" is also support
# Notice: if use nodePublishMethod symlink, fuse recovery is not support
@@ -167,43 +167,6 @@ runtime:
imagePrefix: *defaultImagePrefix
imageName: init-users
imageTag: v0.9.0
- goosefs:
- replicas: 1
- env: []
- tolerations:
- - operator: Exists
- resources: ~
- # resources:
- # requests:
- # cpu: 500m
- # memory: 256Mi
- # limits:
- # cpu: 1000m
- # memory: 512Mi
- runtimeWorkers: 3
- kubeClientQPS: 20
- kubeClientBurst: 30
- workQueueQPS: 10
- workQueueBurst: 100
- portRange: 26000-32000
- portAllocatePolicy: random
- enabled: false
- init:
- imagePrefix: *defaultImagePrefix
- imageName: init-users
- imageTag: v0.9.0
- controller:
- imagePrefix: *defaultImagePrefix
- imageName: goosefsruntime-controller
- imageTag: *defaultVersion
- runtime:
- imagePrefix: ccr.ccs.tencentyun.com/qcloud
- imageName: goosefs
- imageTag: v1.2.0
- fuse:
- imagePrefix: ccr.ccs.tencentyun.com/qcloud
- imageName: goosefs-fuse
- imageTag: v1.2.0
juicefs:
replicas: 1
env: []
diff --git a/charts/goosefs/.helmignore b/charts/goosefs/.helmignore
deleted file mode 100644
index f0c13194444..00000000000
--- a/charts/goosefs/.helmignore
+++ /dev/null
@@ -1,21 +0,0 @@
-# Patterns to ignore when building packages.
-# This supports shell glob matching, relative path matching, and
-# negation (prefixed with !). Only one pattern per line.
-.DS_Store
-# Common VCS dirs
-.git/
-.gitignore
-.bzr/
-.bzrignore
-.hg/
-.hgignore
-.svn/
-# Common backup files
-*.swp
-*.bak
-*.tmp
-*~
-# Various IDEs
-.project
-.idea/
-*.tmproj
diff --git a/charts/goosefs/CHANGELOG.md b/charts/goosefs/CHANGELOG.md
deleted file mode 100644
index ccded0fa3d4..00000000000
--- a/charts/goosefs/CHANGELOG.md
+++ /dev/null
@@ -1,13 +0,0 @@
-1.0.1
-- Support Kubernetes Orchestration via Fluid
-
-1.1.0
-- Support fuse lazy start
-- Support fuse critical pod
-- Change worker from Daemonset to Statefulset
-
-1.1.1
-- Make fuse tolerate any taint
-
-1.1.2
-- Add `sidecar.istio.io/inject` to components annotation
diff --git a/charts/goosefs/Chart.yaml b/charts/goosefs/Chart.yaml
deleted file mode 100644
index 82251f20f60..00000000000
--- a/charts/goosefs/Chart.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-name: goosefs
-apiVersion: v2
-description: FileSystem on the cloud based on TencentCloud Object Storage aimed for data acceleration.
-version: 1.1.2
-home: https://cloud.tencent.com/document/product/436/56412
-maintainers:
-- name: Yuandong Xie
- email: chrisydxie@tencent.com
-dependencies:
-- name: library
- version: "0.2.0"
- repository: "file://../library"
diff --git a/charts/goosefs/README.md b/charts/goosefs/README.md
deleted file mode 100644
index 345211fb7dc..00000000000
--- a/charts/goosefs/README.md
+++ /dev/null
@@ -1,32 +0,0 @@
-数据湖加速器(Data Lake Accelerator Goose FileSystem,GooseFS),是由腾讯云推出的高可靠、高可用、弹性的数据湖加速服务。依靠对象存储(Cloud Object Storage,COS)作为数据湖存储底座的成本优势,为数据湖生态中的计算应用提供统一的数据湖入口,加速海量数据分析、机器学习、人工智能等业务访问存储的性能;采用了分布式集群架构,具备弹性、高可靠、高可用等特性,为上层计算应用提供统一的命名空间和访问协议,方便用户在不同的存储系统管理和流转数据。
-
-## 产品功能
-
-GooseFS 旨在提供一站式的缓存解决方案,在利用数据本地性和高速缓存,统一存储访问语义等方面具有天然的优势;GooseFS 在腾讯云数据湖生态中扮演着“上承计算,下启存储”的核心角色,如下图所示。
-
-
-
-GooseFS 提供了以下功能:
-
-1. 缓存加速和数据本地化(Locality):GooseFS 可以与计算节点混合部署提高数据本地性,利用高速缓存功能解决存储性能问题,提高写入对象存储 COS 的带宽;
-2. 融合存储语义:GooseFS 提供 UFS(Unified FileSystem)的语义,可以支持 COS、Hadoop、S3、K8S CSI、 FUSE 等多个存储语义,使用于多种生态和应用场景;
-3. 统一的腾讯云相关生态服务:包括日志、鉴权、监控,实现了与 COS 操作统一;
-4. 提供 Namespace 管理能力,针对不同业务、不同的Under File System,提供不同的读写缓存策略以及生命周期(TTL)管理;
-6. 感知 Table 元数据功能:对于大数据场景下数据 Table,提供 GooseFS Catalog 用于感知元数据 Table ,提供 Table 级别的 Cache 预热。
-
-
-## 产品优势
-
-GooseFS 在数据湖场景中具有如下几点明显的优势:
-
-### 数据 I/O 性能
-
-GooseFS 部署提供近计算端的分布式共享缓存,上层计算应用可以透明地、高效地从远端存储将需要频繁访问的热数据缓存到近计算端,加速数据 I/O 性能。GooseFS 提供了元数据缓存功能,可以加速大数据场景下查询文件数据以及列出文件列表等元数据操作的性能。配合大数据存储桶使用,还可进一步加速重命名文件的操作性能。此外,业务可以按需选择 MEM、SSD、NVME 以及 HDD 盘等不同的存储介质,平衡业务成本和数据访问性能。
-
-### 存储一体化
-
-GooseFS 提供了统一的命名空间,不仅支持了对象存储 COS 存储语义,也支持 HDFS、K8S CSI 以及 FUSE 等语义,为上层业务提供了一体化的融合存储方案,简化业务侧运维配置。存储一体化能够打通不同数据底座的壁垒,方便上层应用管理和流转数据,提升数据利用的效率。
-
-### 生态亲和性
-
-GooseFS 全兼容腾讯云大数据平台框架,也支持客户侧自定义的本地部署,具备优秀的生态亲和性。业务侧不仅可以在腾讯云弹性 MapReduce 产品中使用 GooseFS 加速大数据业务,也可以便捷地将 GooseFS 本地化部署在公有云 CVM 或者自建 IDC 内。此外,GooseFS 支持透明加速能力,对于已经使用腾讯云 COSN 和 CHDFS 的用户,只需做简单的配置修改,即可实现不修改任何业务代码和访问路径的前提下,自动使用GooseFS 加速 COSN 和 CHDFS 的业务访问。
\ No newline at end of file
diff --git a/charts/goosefs/charts/library b/charts/goosefs/charts/library
deleted file mode 120000
index 494d3c39e3a..00000000000
--- a/charts/goosefs/charts/library
+++ /dev/null
@@ -1 +0,0 @@
-../../library
\ No newline at end of file
diff --git a/charts/goosefs/docker/check_port.sh b/charts/goosefs/docker/check_port.sh
deleted file mode 100644
index d2144d4c6b5..00000000000
--- a/charts/goosefs/docker/check_port.sh
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env bash
-set -x
-
-function printUsage() {
- echo -e "Usage: Run command with related environment variable set"
- echo
- echo -e 'Environment Variable "PORTS_TO_CHECK" is set:'
- echo -e " PORT1:PORT2:PORT3..."
-}
-
-function check_port() {
- ports=$1
- for port in "${ports[@]}"; do
- # ignore grep not found
- netstat -ntp | awk '{print $4,"\t",$6,"\t",$7}' | grep "$port"
- if [[ $? -eq 0 ]]; then
- # Found any port is in use
- return 1
- fi
- echo
- done
-
- # No port in use
- return 0
-}
-
-function main() {
- # The shell scripts only reports the usage status of the ports.
- # If any port is in use, no err will be returned.
- ports="$PORTS_TO_CHECK"
- ports=(${ports//:/ })
- if [[ "${#ports[*]}" -eq 0 ]]; then
- printUsage
- exit 1
- fi
-
- # Timeout for 60 * 10s(10min)
- count=1
- while [[ count -lt 61 ]]; do
- echo
- echo "Retry to check port usage for the $count time"
- check_port $ports
- if [[ $? == 0 ]]; then
- echo "No port conflict found. Exiting..."
- exit 0
- fi
-
- count=`expr $count + 1`
- sleep 10
- done
-
- echo "Timeout for port conflicts"
- exit 1
-}
-
-main "$@"
\ No newline at end of file
diff --git a/charts/goosefs/docker/init-users/Dockerfile b/charts/goosefs/docker/init-users/Dockerfile
deleted file mode 100644
index f8cc252d584..00000000000
--- a/charts/goosefs/docker/init-users/Dockerfile
+++ /dev/null
@@ -1,10 +0,0 @@
-# FROM centos:centos8.2.2004
-FROM centos:centos8.2.2004@sha256:4062bbdd1bb0801b0aa38e0f83dece70fb7a5e9bce223423a68de2d8b784b43b
-
-RUN yum install -y net-tools
-
-COPY *.sh /
-
-RUN chmod +x /*.sh
-
-ENTRYPOINT ["/entrypoint.sh"]
diff --git a/charts/goosefs/docker/init-users/chmod_fuse_mountpoint.sh b/charts/goosefs/docker/init-users/chmod_fuse_mountpoint.sh
deleted file mode 100644
index 646880c1079..00000000000
--- a/charts/goosefs/docker/init-users/chmod_fuse_mountpoint.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/env bash
-set -e
-
-function printUsage() {
- echo -e "Usage: Run command with related environment variable set"
- echo
- echo -e 'Environment Variable "$FLUID_FUSE_MOUNTPOINT" is set:'
- echo -e " PATH1:PATH2:PATH3..."
-
-}
-
-function main() {
- if [[ -z "$FLUID_FUSE_MOUNTPOINT" ]]; then
- printUsage
- exit 1
- fi
- chmod -R 0777 $FLUID_FUSE_MOUNTPOINT
-}
-
-main "$@"
diff --git a/charts/goosefs/docker/init-users/chmod_tierpath.sh b/charts/goosefs/docker/init-users/chmod_tierpath.sh
deleted file mode 100755
index 42975a0b264..00000000000
--- a/charts/goosefs/docker/init-users/chmod_tierpath.sh
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/usr/bin/env bash
-set -xe
-
-function printUsage() {
- echo -e "Usage: Run command with related environment variable set"
- echo
- echo -e 'Environment Variable "$FLUID_TIERSTORE_PATHS" is set:'
- echo -e " PATH1:PATH2:PATH3..."
-
-}
-
-function main() {
- paths="$FLUID_TIERSTORE_PATHS"
- paths=(${paths//:/ })
- if [[ "${#paths[*]}" -eq 0 ]]; then
- printUsage
- exit 1
- fi
- for path in "${paths[@]}"; do
- chmod -R 0777 $path
- done
-}
-
-main "$@"
diff --git a/charts/goosefs/docker/init-users/entrypoint.sh b/charts/goosefs/docker/init-users/entrypoint.sh
deleted file mode 100755
index c3882330103..00000000000
--- a/charts/goosefs/docker/init-users/entrypoint.sh
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/env bash
-set -e
-
-function printUsage() {
- echo "Usage: COMMAND [COMMAND_OPTIONS]"
- echo
- echo "COMMAND is one of:"
- echo -e " init_users"
- echo -e " chmod_tierpath"
- echo -e " chmod_fuse_mountpoint"
- echo -e " check_port"
-}
-
-function main() {
- if [[ "$#" -eq 0 ]]; then
- printUsage
- exit 1
- fi
- while [[ ! "$#" -eq 0 ]]; do
- case "${1}" in
- init_users)
- sh -c ./init_users.sh
- ;;
- chmod_tierpath)
- sh -c ./chmod_tierpath.sh
- ;;
- chmod_fuse_mountpoint)
- sh -c ./chmod_fuse_mountpoint.sh
- ;;
- check_port)
- sh -c ./check_port.sh
- ;;
- *)
- printUsage
- ;;
- esac
- shift
- done
-}
-
-main "$@"
diff --git a/charts/goosefs/docker/init-users/init_users.sh b/charts/goosefs/docker/init-users/init_users.sh
deleted file mode 100755
index df4b122020e..00000000000
--- a/charts/goosefs/docker/init-users/init_users.sh
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/usr/bin/env bash
-set -xe
-
-function printUsage() {
- echo -e "Usage: Run command with related environment variable set"
- echo
- echo -e 'Environment Variable "$FLUID_INIT_USERS" is set:'
- echo -e " UID:UserName:GID,GroupID1:GroupName1..."
-}
-
-function main() {
- args="$FLUID_INIT_USERS"
- args=(${args//,/ })
- if [[ "${#args[*]}" -lt 2 ]]; then
- printUsage
- exit 1
- fi
-
- user=${args[0]}
- user_kv=(${user//:/ })
- uid=${user_kv[0]}
- username=${user_kv[1]}
- gid=${user_kv[2]}
-
- # create groups
- $(>temp)
- echo -n "useradd -m -u ${uid} -g ${gid} -G 0," >>temp
- for ((num = 1; num < ${#args[*]}; num++)); do
- group="${args[${num}]}"
- group_kv=(${group//:/ })
- groupid=${group_kv[0]}
- groupname=${group_kv[1]}
- echo -n "${groupid}" >>temp
- if [[ num -ne $((${#args[*]} - 1)) ]]; then
- echo -n "," >>temp
- fi
- groupadd -f -g ${groupid} ${groupname}
- done
-
- # create user and bind to group
- echo -n " ${username}" >>temp
- temp=$(cat temp)
- groups=${temp}
- eval $groups
- cat /etc/passwd >/tmp/passwd
- cat /etc/group >/tmp/group
-}
-
-main "$@"
diff --git a/charts/goosefs/templates/_helpers.tpl b/charts/goosefs/templates/_helpers.tpl
deleted file mode 100644
index 8ff9508256f..00000000000
--- a/charts/goosefs/templates/_helpers.tpl
+++ /dev/null
@@ -1,357 +0,0 @@
-{{/* vim: set filetype=mustache: */}}
-{{/*
-Expand the name of the chart.
-*/}}
-{{- define "goosefs.name" -}}
-{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
-{{- end -}}
-
-{{/*
-Create a default fully qualified app name.
-We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
-If release name contains chart name it will be used as a full name.
-*/}}
-{{- define "goosefs.fullname" -}}
-{{- if .Values.fullnameOverride -}}
-{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
-{{- else -}}
-{{- $name := default .Chart.Name .Values.nameOverride -}}
-{{- if contains $name .Release.Name -}}
-{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
-{{- else -}}
-{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
-{{- end -}}
-{{- end -}}
-{{- end -}}
-
-{{/*
-Create chart name and version as used by the chart label.
-*/}}
-{{- define "goosefs.chart" -}}
-{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
-{{- end -}}
-
-{{- define "goosefs.jobWorker.resources" -}}
-resources:
- limits:
- {{- if .Values.jobWorker.resources.limits }}
- {{- if .Values.jobWorker.resources.limits.cpu }}
- cpu: {{ .Values.jobWorker.resources.limits.cpu }}
- {{- end }}
- {{- if .Values.jobWorker.resources.limits.memory }}
- memory: {{ .Values.jobWorker.resources.limits.memory }}
- {{- end }}
- {{- end }}
- requests:
- {{- if .Values.jobWorker.resources.requests }}
- {{- if .Values.jobWorker.resources.requests.cpu }}
- cpu: {{ .Values.jobWorker.resources.requests.cpu }}
- {{- end }}
- {{- if .Values.jobWorker.resources.requests.memory }}
- memory: {{ .Values.jobWorker.resources.requests.memory }}
- {{- end }}
- {{- end }}
-{{- end -}}
-
-{{- define "goosefs.worker.resources" -}}
-resources:
- limits:
- {{- if .Values.worker.resources.limits }}
- {{- if .Values.worker.resources.limits.cpu }}
- cpu: {{ .Values.worker.resources.limits.cpu }}
- {{- end }}
- {{- if .Values.worker.resources.limits.memory }}
- memory: {{ .Values.worker.resources.limits.memory }}
- {{- end }}
- {{- end }}
- requests:
- {{- if .Values.worker.resources.requests }}
- {{- if .Values.worker.resources.requests.cpu }}
- cpu: {{ .Values.worker.resources.requests.cpu }}
- {{- end }}
- {{- if .Values.worker.resources.requests.memory }}
- memory: {{ .Values.worker.resources.requests.memory }}
- {{- end }}
- {{- end }}
-{{- end -}}
-
-{{- define "goosefs.apiGateway.resources" -}}
-resources:
- limits:
- {{- if .Values.apiGateway.resources.limits }}
- {{- if .Values.apiGateway.resources.limits.cpu }}
- cpu: {{ .Values.apiGateway.resources.limits.cpu }}
- {{- end }}
- {{- if .Values.apiGateway.resources.limits.memory }}
- memory: {{ .Values.apiGateway.resources.limits.memory }}
- {{- end }}
- {{- end }}
- requests:
- {{- if .Values.apiGateway.resources.requests }}
- {{- if .Values.apiGateway.resources.requests.cpu }}
- cpu: {{ .Values.apiGateway.resources.requests.cpu }}
- {{- end }}
- {{- if .Values.apiGateway.resources.requests.memory }}
- memory: {{ .Values.apiGateway.resources.requests.memory }}
- {{- end }}
- {{- end }}
-{{- end -}}
-
-{{- define "goosefs.master.resources" -}}
-resources:
- limits:
- {{- if .Values.master.resources.limits }}
- {{- if .Values.master.resources.limits.cpu }}
- cpu: {{ .Values.master.resources.limits.cpu }}
- {{- end }}
- {{- if .Values.master.resources.limits.memory }}
- memory: {{ .Values.master.resources.limits.memory }}
- {{- end }}
- {{- end }}
- requests:
- {{- if .Values.master.resources.requests }}
- {{- if .Values.master.resources.requests.cpu }}
- cpu: {{ .Values.master.resources.requests.cpu }}
- {{- end }}
- {{- if .Values.master.resources.requests.memory }}
- memory: {{ .Values.master.resources.requests.memory }}
- {{- end }}
- {{- end }}
-{{- end -}}
-
-{{- define "goosefs.jobMaster.resources" -}}
-resources:
- limits:
- {{- if .Values.jobMaster.resources.limits }}
- {{- if .Values.jobMaster.resources.limits.cpu }}
- cpu: {{ .Values.jobMaster.resources.limits.cpu }}
- {{- end }}
- {{- if .Values.jobMaster.resources.limits.memory }}
- memory: {{ .Values.jobMaster.resources.limits.memory }}
- {{- end }}
- {{- end }}
- requests:
- {{- if .Values.jobMaster.resources.requests }}
- {{- if .Values.jobMaster.resources.requests.cpu }}
- cpu: {{ .Values.jobMaster.resources.requests.cpu }}
- {{- end }}
- {{- if .Values.jobMaster.resources.requests.memory }}
- memory: {{ .Values.jobMaster.resources.requests.memory }}
- {{- end }}
- {{- end }}
-{{- end -}}
-
-{{- define "goosefs.journal.format.resources" -}}
-resources:
- limits:
- {{- if .Values.journal.format.resources.limits }}
- {{- if .Values.journal.format.resources.limits.cpu }}
- cpu: {{ .Values.journal.format.resources.limits.cpu }}
- {{- end }}
- {{- if .Values.journal.format.resources.limits.memory }}
- memory: {{ .Values.journal.format.resources.limits.memory }}
- {{- end }}
- {{- end }}
- requests:
- {{- if .Values.journal.format.resources.requests }}
- {{- if .Values.journal.format.resources.requests.cpu }}
- cpu: {{ .Values.journal.format.resources.requests.cpu }}
- {{- end }}
- {{- if .Values.journal.format.resources.requests.memory }}
- memory: {{ .Values.journal.format.resources.requests.memory }}
- {{- end }}
- {{- end }}
-{{- end -}}
-
-{{- define "goosefs.master.secretVolumeMounts" -}}
- {{- range $key, $val := .Values.secrets.master }}
- - name: secret-{{ $key }}-volume
- mountPath: /secrets/{{ $val }}
- readOnly: true
- {{- end }}
-{{- end -}}
-
-{{- define "goosefs.worker.secretVolumeMounts" -}}
- {{- range $key, $val := .Values.secrets.worker }}
- - name: secret-{{ $key }}-volume
- mountPath: /secrets/{{ $val }}
- readOnly: true
- {{- end -}}
-{{- end -}}
-
-{{- define "goosefs.worker.tieredstoreVolumeMounts" -}}
- {{- if .Values.tieredstore.levels }}
- {{- range .Values.tieredstore.levels }}
- {{- /* The mediumtype can have multiple parts like MEM,SSD */}}
- {{- if .mediumtype }}
- {{- /* Mount each part */}}
- {{- if contains "," .mediumtype }}
- {{- $type := .type }}
- {{- $path := .path }}
- {{- $parts := splitList "," .mediumtype }}
- {{- range $i, $val := $parts }}
- {{- /* Example: For path="/tmp/mem,/tmp/ssd", mountPath resolves to /tmp/mem and /tmp/ssd */}}
- - mountPath: {{ index ($path | splitList ",") $i }}
- name: {{ $val | lower }}-{{ $i }}
- {{- end}}
- {{- /* The mediumtype is a single value. */}}
- {{- else}}
- - mountPath: {{ .path }}
- name: {{ .mediumtype | replace "," "-" | lower }}
- {{- end}}
- {{- end}}
- {{- end}}
- {{- end}}
-{{- end -}}
-
-{{- define "goosefs.worker.otherVolumeMounts" -}}
- {{- range .Values.mounts }}
- - name: "{{ .name }}"
- mountPath: "{{ .path }}"
- {{- end }}
-{{- end -}}
-
-{{- define "goosefs.worker.tieredstoreVolumes" -}}
- {{- if .Values.tieredstore.levels }}
- {{- range .Values.tieredstore.levels }}
- {{- if .mediumtype }}
- {{- /* The mediumtype can have multiple parts like MEM,SSD */}}
- {{- if contains "," .mediumtype }}
- {{- $parts := splitList "," .mediumtype }}
- {{- $type := .type }}
- {{- $path := .path }}
- {{- $volumeName := .name }}
- {{- /* A volume will be generated for each part */}}
- {{- range $i, $val := $parts }}
- {{- /* Example: For mediumtype="MEM,SSD", mediumName resolves to mem-0 and ssd-1 */}}
- {{- $mediumName := printf "%v-%v" (lower $val) $i }}
- {{- if eq $type "hostPath"}}
- - hostPath:
- path: {{ index ($path | splitList ",") $i }}
- type: DirectoryOrCreate
- name: {{ $mediumName }}
- {{- else if eq $type "persistentVolumeClaim" }}
- - name: {{ $mediumName }}
- persistentVolumeClaim:
- {{- /* Example: For volumeName="/tmp/mem,/tmp/ssd", claimName resolves to /tmp/mem and /tmp/ssd */}}
- claimName: {{ index ($volumeName | splitList ",") $i }}
- {{- else }}
- - name: {{ $mediumName }}
- emptyDir:
- medium: "Memory"
- {{- if .quota }}
- sizeLimit: {{ .quota }}
- {{- end}}
- {{- end}}
- {{- end}}
- {{- /* The mediumtype is a single value like MEM. */}}
- {{- else}}
- {{- $mediumName := .mediumtype | lower }}
- {{- if eq .type "hostPath"}}
- - hostPath:
- path: {{ .path }}
- type: DirectoryOrCreate
- name: {{ $mediumName }}
- {{- else if eq .type "persistentVolumeClaim" }}
- - name: {{ $mediumName }}
- persistentVolumeClaim:
- claimName: {{ .name }}
- {{- else }}
- - name: {{ $mediumName }}
- emptyDir:
- medium: "Memory"
- {{- if .quota }}
- sizeLimit: {{ .quota }}
- {{- end}}
- {{- end}}
- {{- end}}
- {{- end}}
- {{- end}}
- {{- end}}
-{{- end -}}
-
-{{- define "goosefs.worker.secretVolumes" -}}
- {{- range $key, $val := .Values.secrets.worker }}
- - name: secret-{{ $key }}-volume
- secret:
- secretName: {{ $key }}
- defaultMode: 256
- {{- end }}
-{{- end -}}
-
-{{- define "goosefs.worker.shortCircuit.volume" -}}
- {{- if eq .Values.shortCircuit.volumeType "hostPath" }}
- - name: goosefs-domain
- hostPath:
- path: {{ .Values.shortCircuit.hostPath }}
- type: DirectoryOrCreate
- {{- else }}
- - name: goosefs-domain
- persistentVolumeClaim:
- claimName: "{{ .Values.shortCircuit.pvcName }}"
- {{- end }}
-{{- end -}}
-
-{{- define "goosefs.master.readinessProbe" -}}
-readinessProbe:
- exec:
- command: ["goosefs-monitor.sh", "master"]
-{{- end -}}
-
-{{- define "goosefs.jobMaster.readinessProbe" -}}
-readinessProbe:
- exec:
- command: ["goosefs-monitor.sh", "job_master"]
-{{- end -}}
-
-{{- define "goosefs.worker.readinessProbe" -}}
-readinessProbe:
- exec:
- command: ["goosefs-monitor.sh", "worker"]
-{{- end -}}
-
-{{- define "goosefs.jobWorker.readinessProbe" -}}
-readinessProbe:
- exec:
- command: ["goosefs-monitor.sh", "job_worker"]
-{{- end -}}
-
-{{- define "goosefs.master.livenessProbe" -}}
-livenessProbe:
- exec:
- command: ["goosefs-monitor.sh", "master"]
- initialDelaySeconds: 15
- periodSeconds: 30
- timeoutSeconds: 5
- failureThreshold: 2
-{{- end -}}
-
-{{- define "goosefs.jobMaster.livenessProbe" -}}
-livenessProbe:
- exec:
- command: ["goosefs-monitor.sh", "job_master"]
- initialDelaySeconds: 15
- periodSeconds: 30
- timeoutSeconds: 5
- failureThreshold: 2
-{{- end -}}
-
-{{- define "goosefs.worker.livenessProbe" -}}
-livenessProbe:
- exec:
- command: ["goosefs-monitor.sh", "worker"]
- initialDelaySeconds: 15
- periodSeconds: 30
- timeoutSeconds: 5
- failureThreshold: 2
-{{- end -}}
-
-{{- define "goosefs.jobWorker.livenessProbe" -}}
-livenessProbe:
- exec:
- command: ["goosefs-monitor.sh", "job_worker"]
- initialDelaySeconds: 15
- periodSeconds: 30
- timeoutSeconds: 5
- failureThreshold: 2
-{{- end -}}
diff --git a/charts/goosefs/templates/config/goosefs-conf.yaml b/charts/goosefs/templates/config/goosefs-conf.yaml
deleted file mode 100644
index fb760b1ade6..00000000000
--- a/charts/goosefs/templates/config/goosefs-conf.yaml
+++ /dev/null
@@ -1,182 +0,0 @@
-{{ $masterCount := int .Values.master.replicaCount }}
-{{- $defaultMasterName := "master-0" }}
-{{- $isSingleMaster := eq $masterCount 1 }}
-{{- $isHaEmbedded := and (eq .Values.journal.type "EMBEDDED") (gt $masterCount 1) }}
-{{- $release := .Release }}
-{{- $name := include "goosefs.name" . }}
-{{- $fullName := include "goosefs.fullname" . }}
-{{- $chart := include "goosefs.chart" . }}
-
-{{- /* ===================================== */}}
-{{- /* GOOSEFS_JAVA_OPTS */}}
-{{- /* ===================================== */}}
-{{- $goosefsJavaOpts := list }}
-{{- /* Specify master hostname if single master */}}
-{{- if $isSingleMaster }}
- {{- $goosefsJavaOpts = printf "-Dgoosefs.master.hostname=%v-%v" $fullName $defaultMasterName | append $goosefsJavaOpts }}
-{{- end }}
-{{ if .Values.fuse.enabled -}}
- {{- $goosefsJavaOpts = print "-Dgoosefs.user.hostname=${GOOSEFS_CLIENT_HOSTNAME}" | append $goosefsJavaOpts }}
-{{- end }}
-{{- $goosefsJavaOpts = printf "-Dgoosefs.master.journal.type=%v" .Values.journal.type | append $goosefsJavaOpts }}
-{{- $goosefsJavaOpts = printf "-Dgoosefs.master.journal.folder=%v" .Values.journal.folder | append $goosefsJavaOpts }}
-
-{{- /* Tiered store configuration */}}
-{{- if .Values.tieredstore }}
- {{- $goosefsJavaOpts = printf "-Dgoosefs.worker.tieredstore.levels=%v" (len .Values.tieredstore.levels) | append $goosefsJavaOpts }}
- {{- range .Values.tieredstore.levels }}
- {{- $tierName := printf "-Dgoosefs.worker.tieredstore.level%v" .level }}
- {{- if .alias }}
- {{- $goosefsJavaOpts = printf "%v.alias=%v" $tierName .alias | append $goosefsJavaOpts }}
- {{- end}}
- {{- $goosefsJavaOpts = printf "%v.dirs.mediumtype=%v" $tierName .mediumtype | append $goosefsJavaOpts }}
- {{- if .path }}
- {{- $goosefsJavaOpts = printf "%v.dirs.path=%v" $tierName .path | append $goosefsJavaOpts }}
- {{- end}}
- {{- if .quota }}
- {{- $goosefsJavaOpts = printf "%v.dirs.quota=%v" $tierName .quota | append $goosefsJavaOpts }}
- {{- end}}
- {{- if .high }}
- {{- $goosefsJavaOpts = printf "%v.watermark.high.ratio=%v" $tierName .high | append $goosefsJavaOpts }}
- {{- end}}
- {{- if .low }}
- {{- $goosefsJavaOpts = printf "%v.watermark.low.ratio=%v" $tierName .low | append $goosefsJavaOpts }}
- {{- end}}
- {{- end}}
-{{- end }}
-
-{{- /* Generate HA embedded journal address for masters */}}
-{{- if $isHaEmbedded }}
- {{- $embeddedJournalAddresses := "-Dgoosefs.master.embedded.journal.addresses=" }}
- {{- range $i := until $masterCount }}
- {{- $embeddedJournalAddresses = printf "%v,%v-master-%v:19200" $embeddedJournalAddresses $fullName $i }}
- {{- end }}
- {{- $goosefsJavaOpts = append $goosefsJavaOpts $embeddedJournalAddresses }}
-{{- end }}
-{{- range $key, $val := .Values.properties }}
- {{- $goosefsJavaOpts = printf "-D%v=%v" $key $val | append $goosefsJavaOpts }}
-{{- end }}
-{{- if .Values.jvmOptions }}
- {{- $goosefsJavaOpts = concat $goosefsJavaOpts .Values.jvmOptions }}
-{{- end }}
-
-{{- /* ===================================== */}}
-{{- /* GOOSEFS_MASTER_JAVA_OPTS */}}
-{{- /* ===================================== */}}
-{{- $masterJavaOpts := list }}
-{{- $masterJavaOpts = print "-Dgoosefs.master.hostname=${GOOSEFS_MASTER_HOSTNAME}" | append $masterJavaOpts }}
-{{- range $key, $val := .Values.master.properties }}
- {{- $masterJavaOpts = printf "-D%v=%v" $key $val | append $masterJavaOpts }}
-{{- end }}
-{{- if .Values.master.jvmOptions }}
- {{- $masterJavaOpts = concat $masterJavaOpts .Values.master.jvmOptions }}
-{{- end }}
-
-{{- /* ===================================== */}}
-{{- /* GOOSEFS_JOB_MASTER_JAVA_OPTS */}}
-{{- /* ===================================== */}}
-{{- $jobMasterJavaOpts := list }}
-{{- $jobMasterJavaOpts = print "-Dgoosefs.master.hostname=${GOOSEFS_MASTER_HOSTNAME}" | append $jobMasterJavaOpts }}
-{{- range $key, $val := .Values.jobMaster.properties }}
- {{- $jobMasterJavaOpts = printf "-D%v=%v" $key $val | append $jobMasterJavaOpts }}
-{{- end }}
-{{- if .Values.jobMaster.jvmOptions }}
- {{- $jobMasterJavaOpts = concat $jobMasterJavaOpts .Values.jobMaster.jvmOptions }}
-{{- end }}
-
-{{- /* ===================================== */}}
-{{- /* GOOSEFS_WORKER_JAVA_OPTS */}}
-{{- /* ===================================== */}}
-{{- $workerJavaOpts := list }}
-{{- $workerJavaOpts = print "-Dgoosefs.worker.hostname=${GOOSEFS_WORKER_HOSTNAME}" | append $workerJavaOpts }}
-{{- $workerJavaOpts = printf "-Dgoosefs.worker.rpc.port=%v" .Values.worker.ports.rpc | append $workerJavaOpts }}
-{{- $workerJavaOpts = printf "-Dgoosefs.worker.web.port=%v" .Values.worker.ports.web | append $workerJavaOpts }}
-
-{{- /* Short circuit configuration */}}
-{{- if eq .Values.shortCircuit.enabled false}}
- {{- $workerJavaOpts = print "-Dgoosefs.user.short.circuit.enabled=false" | append $workerJavaOpts }}
-{{- end }}
-{{- if and .Values.shortCircuit.enabled (eq .Values.shortCircuit.policy "uuid") }}
- {{- $workerJavaOpts = print "-Dgoosefs.worker.data.server.domain.socket.address=/opt/domain" | append $workerJavaOpts }}
- {{- $workerJavaOpts = print "-Dgoosefs.worker.data.server.domain.socket.as.uuid=true" | append $workerJavaOpts }}
-{{- end}}
-{{- /* Record container hostname if not using host network */}}
-{{- if not .Values.worker.hostNetwork }}
- {{- $workerJavaOpts = print "-Dgoosefs.worker.container.hostname=${GOOSEFS_WORKER_CONTAINER_HOSTNAME}" | append $workerJavaOpts }}
-{{- end}}
-
-{{- /* Resource configuration */}}
-{{- if .Values.worker.resources }}
- {{- if .Values.worker.resources.requests }}
- {{- if .Values.worker.resources.requests.memory }}
- {{- $workerJavaOpts = printf "-Dgoosefs.worker.memory.size=%v" .Values.worker.resources.requests.memory | append $workerJavaOpts }}
- {{- end}}
- {{- end}}
-{{- end}}
-
-{{- range $key, $val := .Values.worker.properties }}
- {{- $workerJavaOpts = printf "-D%v=%v" $key $val | append $workerJavaOpts }}
-{{- end }}
-{{- if .Values.worker.jvmOptions }}
- {{- $workerJavaOpts = concat $workerJavaOpts .Values.worker.jvmOptions }}
-{{- end }}
-
-{{- /* ===================================== */}}
-{{- /* GOOSEFS_JOB_WORKER_JAVA_OPTS */}}
-{{- /* ===================================== */}}
-{{- $jobWorkerJavaOpts := list }}
-{{- $jobWorkerJavaOpts = print "-Dgoosefs.worker.hostname=${GOOSEFS_WORKER_HOSTNAME}" | append $jobWorkerJavaOpts }}
-{{- $jobWorkerJavaOpts = printf "-Dgoosefs.job.worker.rpc.port=%v" .Values.jobWorker.ports.rpc | append $jobWorkerJavaOpts }}
-{{- $jobWorkerJavaOpts = printf "-Dgoosefs.job.worker.data.port=%v" .Values.jobWorker.ports.data | append $jobWorkerJavaOpts }}
-{{- $jobWorkerJavaOpts = printf "-Dgoosefs.job.worker.web.port=%v" .Values.jobWorker.ports.web | append $jobWorkerJavaOpts }}
-{{- range $key, $val := .Values.jobWorker.properties }}
- {{- $jobWorkerJavaOpts = printf "-D%v=%v" $key $val | append $jobWorkerJavaOpts }}
-{{- end }}
-{{- if .Values.jobWorker.jvmOptions }}
- {{- $jobWorkerJavaOpts = concat $jobWorkerJavaOpts .Values.jobWorker.jvmOptions }}
-{{- end }}
-
-{{- /* ===================================== */}}
-{{- /* GOOSEFS_FUSE_JAVA_OPTS */}}
-{{- /* ===================================== */}}
-{{- $fuseJavaOpts := list }}
-{{- range $key, $val := .Values.fuse.properties }}
- {{- $fuseJavaOpts = printf "-D%v=%v" $key $val | append $fuseJavaOpts }}
-{{- end }}
-{{- if .Values.fuse.jvmOptions }}
- {{- $fuseJavaOpts = concat $fuseJavaOpts .Values.fuse.jvmOptions }}
-{{- end }}
-apiVersion: v1
-kind: ConfigMap
-metadata:
- annotations:
- "helm.sh/hook": "pre-install"
- "helm.sh/hook-delete-policy": before-hook-creation
- name: {{ $fullName }}-config
- labels:
- name: {{ $fullName }}-config
- app: {{ $name }}
- chart: {{ $chart }}
- release: {{ .Release.Name }}
- heritage: {{ .Release.Service }}
- {{- include "library.fluid.labels" . | nindent 4 }}
-data:
- GOOSEFS_JAVA_OPTS: |-
- {{- /* Format GOOSEFS_JAVA_OPTS list to one line */}}
- {{ range $key := $goosefsJavaOpts }}{{ printf "%v " $key }}{{ end }}
- GOOSEFS_MASTER_JAVA_OPTS: |-
- {{- /* Format GOOSEFS_MASTER_JAVA_OPTS list to one line */}}
- {{ range $key := $masterJavaOpts }}{{ printf "%v " $key }}{{ end }}
- GOOSEFS_JOB_MASTER_JAVA_OPTS: |-
- {{- /* Format GOOSEFS_JOB_MASTER_JAVA_OPTS list to one line */}}
- {{ range $key := $jobMasterJavaOpts }}{{ printf "%v " $key }}{{ end }}
- GOOSEFS_WORKER_JAVA_OPTS: |-
- {{- /* Format GOOSEFS_WORKER_JAVA_OPTS list to one line */}}
- {{ range $key := $workerJavaOpts }}{{ printf "%v " $key }}{{ end }}
- GOOSEFS_JOB_WORKER_JAVA_OPTS: |-
- {{- /* Format GOOSEFS_JOB_WORKER_JAVA_OPTS list to one line */}}
- {{ range $key := $jobWorkerJavaOpts }}{{ printf "%v " $key }}{{ end }}
- GOOSEFS_FUSE_JAVA_OPTS: |-
- {{- /* Format GOOSEFS_FUSE_JAVA_OPTS list to one line */}}
- {{ range $key := $fuseJavaOpts }}{{ printf "%v " $key }}{{ end }}
- GOOSEFS_WORKER_TIEREDSTORE_LEVEL0_DIRS_PATH: "${GOOSEFS_WORKER_TIEREDSTORE_LEVEL0_DIRS_PATH}"
diff --git a/charts/goosefs/templates/fuse/client-daemonset.yaml b/charts/goosefs/templates/fuse/client-daemonset.yaml
deleted file mode 100644
index 18262d07512..00000000000
--- a/charts/goosefs/templates/fuse/client-daemonset.yaml
+++ /dev/null
@@ -1,54 +0,0 @@
-{{ if .Values.fuse.clientEnabled -}}
-apiVersion: apps/v1
-kind: DaemonSet
-metadata:
- name: {{ template "goosefs.fullname" . }}-fuse-client
- labels:
- app: {{ template "goosefs.name" . }}
- chart: {{ template "goosefs.chart" . }}
- release: {{ .Release.Name }}
- heritage: {{ .Release.Service }}
- role: goosefs-fuse-client
- {{- include "library.fluid.labels" . | nindent 4 }}
-spec:
- selector:
- matchLabels:
- app: {{ template "goosefs.name" . }}
- chart: {{ template "goosefs.chart" . }}
- release: {{ .Release.Name }}
- heritage: {{ .Release.Service }}
- role: goosefs-fuse-client
- template:
- metadata:
- labels:
- app: {{ template "goosefs.name" . }}
- chart: {{ template "goosefs.chart" . }}
- release: {{ .Release.Name }}
- heritage: {{ .Release.Service }}
- role: goosefs-fuse-client
- {{- include "library.fluid.labels" . | nindent 8 }}
- spec:
- tolerations:
- - operator: Exists
- containers:
- - name: goosefs-fuse-client
- image: alpine:latest
- resources:
- requests:
- cpu: "0.5"
- memory: "500M"
- limits:
- cpu: "1"
- memory: "500M"
- command: ["/bin/sh", "-c", "--"]
- args: ["while true; do sleep 30 && echo .; done;"]
- volumeMounts:
- - name: goosefs-fuse-mount
- mountPath: {{ .Values.fuse.mountPath | dir }}
- restartPolicy: Always
- volumes:
- - name: goosefs-fuse-mount
- hostPath:
- path: {{ .Values.fuse.mountPath | dir }}
- type: Directory
-{{- end }}
diff --git a/charts/goosefs/templates/fuse/daemonset.yaml b/charts/goosefs/templates/fuse/daemonset.yaml
deleted file mode 100644
index 5c1760163a0..00000000000
--- a/charts/goosefs/templates/fuse/daemonset.yaml
+++ /dev/null
@@ -1,170 +0,0 @@
-{{ if .Values.fuse.enabled -}}
-{{- $shortCircuitEnabled := .Values.shortCircuit.enabled -}}
-{{- $needDomainSocketVolume := and $shortCircuitEnabled (eq .Values.shortCircuit.policy "uuid") }}
-
-apiVersion: apps/v1
-kind: DaemonSet
-metadata:
- name: {{ template "goosefs.fullname" . }}-fuse
- labels:
- app: {{ template "goosefs.name" . }}
- chart: {{ template "goosefs.chart" . }}
- release: {{ .Release.Name }}
- heritage: {{ .Release.Service }}
- role: goosefs-fuse
- {{- include "library.fluid.labels" . | nindent 4 }}
-spec:
- selector:
- matchLabels:
- app: {{ template "goosefs.name" . }}
- chart: {{ template "goosefs.chart" . }}
- release: {{ .Release.Name }}
- heritage: {{ .Release.Service }}
- role: goosefs-fuse
- template:
- metadata:
- annotations:
- sidecar.istio.io/inject: "false"
- {{- if .Values.fuse.annotations }}
-{{ toYaml .Values.fuse.annotations | trim | indent 8 }}
- {{- end }}
- labels:
- sidecar.istio.io/inject: "false"
- app: {{ template "goosefs.name" . }}
- chart: {{ template "goosefs.chart" . }}
- release: {{ .Release.Name }}
- heritage: {{ .Release.Service }}
- role: goosefs-fuse
- {{- include "library.fluid.labels" . | nindent 8 }}
- spec:
- {{- if .Values.fuse.criticalPod }}
- priorityClassName: system-node-critical
- {{- end }}
- enableServiceLinks: false
- hostNetwork: {{ .Values.fuse.hostNetwork }}
- hostPID: {{ .Values.fuse.hostPID }}
- dnsPolicy: {{ .Values.fuse.dnsPolicy }}
- nodeSelector:
- {{- if .Values.fuse.nodeSelector }}
-{{ toYaml .Values.fuse.nodeSelector | trim | indent 8 }}
- {{- else if .Values.nodeSelector }}
-{{ toYaml .Values.nodeSelector | trim | indent 8 }}
- {{- end }}
- tolerations:
- - operator: Exists
- securityContext:
- fsGroup: {{ .Values.fuse.fsGroup }}
- initContainers:
- {{ if .Values.initUsers.enabled -}}
- - name: init-fuse
- image: {{ .Values.initUsers.image }}:{{ .Values.initUsers.imageTag }}
- imagePullPolicy: {{ .Values.initUsers.imagePullPolicy }}
- command: ["/entrypoint.sh"]
- args:
- - "chmod_fuse_mountpoint"
- env:
- - name: FLUID_FUSE_MOUNTPOINT
- value: {{ .Values.fuse.mountPath | dir | quote }}
- securityContext:
- runAsUser: 0
- runAsGroup: 0
- volumeMounts:
- - name: goosefs-fuse-mount
- mountPath: {{ .Values.fuse.mountPath | dir }}
- {{- end }}
- containers:
- - name: goosefs-fuse
- image: {{ .Values.fuse.image }}:{{ .Values.fuse.imageTag }}
- imagePullPolicy: {{ .Values.fuse.imagePullPolicy }}
- {{- if .Values.fuse.resources }}
- resources:
- {{- if .Values.fuse.resources.limits }}
- limits:
- cpu: {{ .Values.fuse.resources.limits.cpu }}
- memory: {{ .Values.fuse.resources.limits.memory }}
- {{- end }}
- {{- if .Values.fuse.resources.requests }}
- requests:
- cpu: {{ .Values.fuse.resources.requests.cpu }}
- memory: {{ .Values.fuse.resources.requests.memory }}
- {{- end }}
- {{- end }}
- command: ["/entrypoint.sh"]
- {{- if .Values.fuse.args }}
- args:
-{{ toYaml .Values.fuse.args | trim | indent 12 }}
- {{- end }}
- env:
- - name: GOOSEFS_CLIENT_HOSTNAME
- valueFrom:
- fieldRef:
- fieldPath: status.hostIP
- - name: GOOSEFS_CLIENT_JAVA_OPTS
- value: " -Dgoosefs.user.hostname=${GOOSEFS_CLIENT_HOSTNAME} "
- {{- range $key, $value := .Values.fuse.env }}
- - name: "{{ $key }}"
- value: "{{ $value }}"
- {{- end }}
- securityContext:
- runAsUser: {{ .Values.user }}
- runAsGroup: {{ .Values.group }}
- privileged: true
- lifecycle:
- preStop:
- exec:
- command: ["/opt/goosefs/integration/fuse/bin/goosefs-fuse", "unmount", "{{ .Values.fuse.mountPath }}"]
- envFrom:
- - configMapRef:
- name: {{ template "goosefs.fullname" . }}-config
- volumeMounts:
- {{ if .Values.initUsers.enabled -}}
- - name: user
- mountPath: /etc/passwd
- readOnly: true
- - name: group
- mountPath: /etc/group
- readOnly: true
- {{- end }}
- - name: goosefs-fuse-device
- mountPath: /dev/fuse
- - name: goosefs-fuse-mount
- mountPath: {{ .Values.fuse.mountPath | dir }}
- mountPropagation: Bidirectional
- {{- if $shortCircuitEnabled}}
- {{- if eq .Values.shortCircuit.policy "uuid" }}
- - name: goosefs-domain
- mountPath: /opt/domain
- {{- end }}
- {{- if eq .Values.shortCircuit.policy "local" }}
-{{- include "goosefs.worker.tieredstoreVolumeMounts" . }}
- {{- end }}
- {{- end }}
- restartPolicy: Always
- volumes:
- {{ if .Values.initUsers.enabled -}}
- - name: user
- hostPath:
- path: {{ .Values.initUsers.dir }}/passwd
- type: FileOrCreate
- - name: group
- hostPath:
- path: {{ .Values.initUsers.dir }}/group
- type: FileOrCreate
- {{- end }}
- - name: goosefs-fuse-device
- hostPath:
- path: /dev/fuse
- type: CharDevice
- - name: goosefs-fuse-mount
- hostPath:
- path: {{ .Values.fuse.mountPath | dir }}
- type: DirectoryOrCreate
- {{- if $shortCircuitEnabled}}
- {{- if eq .Values.shortCircuit.policy "uuid" }}
-{{- include "goosefs.worker.shortCircuit.volume" . }}
- {{- end }}
- {{- if eq .Values.shortCircuit.policy "local" }}
-{{- include "goosefs.worker.tieredstoreVolumes" . }}
- {{- end }}
- {{- end }}
-{{- end }}
diff --git a/charts/goosefs/templates/master/service.yaml b/charts/goosefs/templates/master/service.yaml
deleted file mode 100644
index 18cf24a3093..00000000000
--- a/charts/goosefs/templates/master/service.yaml
+++ /dev/null
@@ -1,63 +0,0 @@
-{{- $masterCount := int .Values.master.replicaCount }}
-{{- $isEmbedded := (eq .Values.journal.type "EMBEDDED") }}
-{{- $isHaEmbedded := and $isEmbedded (gt $masterCount 1) }}
-{{- $isMonitored := (.Values.monitoring) }}
-{{- $metricsLabel := .Values.monitoring }}
-{{- $release := .Release }}
-{{- $masterRpcPort := .Values.master.ports.rpc }}
-{{- $masterWebPort := .Values.master.ports.web }}
-{{- $jobMasterRpcPort := .Values.jobMaster.ports.rpc }}
-{{- $jobMasterWebPort := .Values.jobMaster.ports.web }}
-{{- $masterEmbeddedPort := .Values.master.ports.embedded }}
-{{- $jobMasterEmbeddedPort := .Values.jobMaster.ports.embedded }}
-{{- $isAPIGatewayEnabled := .Values.apiGateway.enabled }}
-{{- $apiGatewayPort := .Values.apiGateway.ports.rest }}
-{{- $name := include "goosefs.name" . }}
-{{- $fullName := include "goosefs.fullname" . }}
-{{- $chart := include "goosefs.chart" . }}
-{{- $labels := include "library.fluid.labels" . }}
-{{- range $i := until $masterCount }}
- {{- $masterName := printf "master-%v" $i }}
- {{- $masterJavaOpts := printf " -Dgoosefs.master.hostname=%v-%v " $fullName $masterName }}
-apiVersion: v1
-kind: Service
-metadata:
- name: {{ $fullName }}-{{ $masterName }}
- labels:
- app: {{ $name }}
- chart: {{ $chart }}
- release: {{ $release.Name }}
- heritage: {{ $release.Service }}
- role: goosefs-master
- {{- $labels | nindent 4}}
- {{- if $isMonitored }}
- monitor: {{ $metricsLabel }}
- {{- end }}
-spec:
- ports:
- - port: {{ $masterRpcPort }}
- name: rpc
- - port: {{ $masterWebPort }}
- name: web
- - port: {{ $jobMasterRpcPort }}
- name: job-rpc
- - port: {{ $jobMasterWebPort }}
- name: job-web
- {{- if $isAPIGatewayEnabled }}
- - port: {{ $apiGatewayPort }}
- name: rest
- {{- end }}
- {{- if $isHaEmbedded }}
- - port: {{ $masterEmbeddedPort }}
- name: embedded
- - port: {{ $jobMasterEmbeddedPort }}
- name: job-embedded
- {{- end }}
- clusterIP: None
- selector:
- role: goosefs-master
- app: {{ $name }}
- release: {{ $release.Name }}
- statefulset.kubernetes.io/pod-name: {{ $fullName }}-{{ $masterName }}
----
-{{- end }}
diff --git a/charts/goosefs/templates/master/statefulset.yaml b/charts/goosefs/templates/master/statefulset.yaml
deleted file mode 100644
index 1f71aa1fe5e..00000000000
--- a/charts/goosefs/templates/master/statefulset.yaml
+++ /dev/null
@@ -1,448 +0,0 @@
-{{- $masterCount := int .Values.master.replicaCount }}
-{{- $isSingleMaster := eq $masterCount 1 }}
-{{- $isEmbedded := (eq .Values.journal.type "EMBEDDED") }}
-{{- $isHaEmbedded := and $isEmbedded (gt $masterCount 1) }}
-{{- $isUfsLocal := and (eq .Values.journal.type "UFS") (eq .Values.journal.ufsType "local") }}
-{{- $needJournalVolume := or $isEmbedded $isUfsLocal }}
-{{- $hostNetwork := .Values.master.hostNetwork }}
-{{- $name := include "goosefs.name" . }}
-{{- $fullName := include "goosefs.fullname" . }}
-{{- $chart := include "goosefs.chart" . }}
-
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
- name: {{ $fullName }}-master
- labels:
- name: {{ $fullName }}-master
- app: {{ $name }}
- chart: {{ $chart }}
- release: {{ .Release.Name }}
- heritage: {{ .Release.Service }}
- role: goosefs-master
- {{- include "library.fluid.labels" . | nindent 4 }}
-spec:
- selector:
- matchLabels:
- app: {{ $name }}
- role: goosefs-master
- name: {{ $fullName }}-master
- serviceName: {{ $fullName }}-master
- replicas: {{ $masterCount }}
- template:
- metadata:
- annotations:
- sidecar.istio.io/inject: "false"
- {{- if .Values.master.annotations }}
-{{ toYaml .Values.master.annotations | trim | indent 8 }}
- {{- end }}
- labels:
- sidecar.istio.io/inject: "false"
- name: {{ $fullName }}-master
- app: {{ $name }}
- chart: {{ $chart }}
- release: {{ .Release.Name }}
- heritage: {{ .Release.Service }}
- role: goosefs-master
- {{- include "library.fluid.labels" . | nindent 8 }}
- spec:
- enableServiceLinks: false
- hostNetwork: {{ $hostNetwork }}
- dnsPolicy: {{ .Values.master.dnsPolicy | default ($hostNetwork | ternary "ClusterFirstWithHostNet" "ClusterFirst") }}
- nodeSelector:
- {{- if .Values.master.nodeSelector }}
-{{ toYaml .Values.master.nodeSelector | trim | indent 8 }}
- {{- else if .Values.nodeSelector }}
-{{ toYaml .Values.nodeSelector | trim | indent 8 }}
- {{- end }}
- securityContext:
- fsGroup: {{ .Values.fsGroup }}
- {{- if .Values.tolerations }}
- tolerations:
-{{ toYaml .Values.tolerations | indent 8 }}
- {{- end }}
- affinity:
- {{- if .Values.master.affinity }}
-{{ toYaml .Values.master.affinity | indent 8 }}
- {{- end }}
- initContainers:
- {{ if .Values.initUsers.enabled -}}
- - name: init-users
- image: {{ .Values.initUsers.image }}:{{ .Values.initUsers.imageTag }}
- imagePullPolicy: {{ .Values.initUsers.imagePullPolicy }}
- command: ["/entrypoint.sh"]
- args:
- - "init_users"
- env:
- {{- if .Values.initUsers.envUsers }}
- - name: FLUID_INIT_USERS
- value: {{.Values.initUsers.envUsers | quote }}
- {{- end }}
- {{- if .Values.initUsers.envTieredPaths }}
- - name: FLUID_TIERSTORE_PATHS
- value: {{ .Values.initUsers.envTieredPaths | quote }}
- {{- end }}
- volumeMounts:
- - name: dir
- mountPath: /tmp
- - name: user
- mountPath: /tmp/passwd
- - name: group
- mountPath: /tmp/group
- {{- end }}
- {{- if .Values.journal.format.runFormat }}
- - name: journal-format
- image: {{ .Values.image }}:{{ .Values.imageTag }}
- imagePullPolicy: {{ .Values.imagePullPolicy }}
- securityContext:
- runAsUser: {{ .Values.user }}
- command: ["goosefs","formatJournal"]
- volumeMounts:
- {{ if .Values.initUsers.enabled -}}
- - name: user
- mountPath: /etc/passwd
- readOnly: true
- - name: group
- mountPath: /etc/group
- readOnly: true
- {{- end}}
- - name: goosefs-journal
- mountPath: /journal
- {{- end}}
- containers:
- {{- if .Values.apiGateway.enabled }}
- - name: api-gateway
- image: {{ .Values.image }}:{{ .Values.imageTag }}
- imagePullPolicy: {{ .Values.imagePullPolicy }}
- {{- if .Values.apiGateway.resources }}
-{{ include "goosefs.apiGateway.resources" . | indent 10 }}
- {{- end }}
- command: ["/entrypoint.sh"]
- {{- if .Values.apiGateway.args }}
- args:
-{{ toYaml .Values.apiGateway.args | trim | indent 12 }}
- {{- end }}
- env:
- - name: GOOSEFS_MASTER_HOSTNAME
- valueFrom:
- fieldRef:
- fieldPath: status.podIP
- envFrom:
- - configMapRef:
- name: {{ $fullName }}-config
- ports:
- - containerPort: {{ .Values.apiGateway.ports.rest }}
- name: rest
- {{- end }}
- - name: goosefs-master
- image: {{ .Values.image }}:{{ .Values.imageTag }}
- imagePullPolicy: {{ .Values.imagePullPolicy }}
- securityContext:
- runAsUser: {{ .Values.user }}
- runAsGroup: {{ .Values.group }}
- {{- if .Values.master.resources }}
-{{ include "goosefs.master.resources" . | indent 10 }}
- {{- end }}
- command: ["/entrypoint.sh"]
- {{- if .Values.master.args }}
- args:
-{{ toYaml .Values.master.args | trim | indent 12 }}
- {{- end }}
- {{- if $isHaEmbedded }}
- env:
- - name: GOOSEFS_MASTER_HOSTNAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- {{- else if $isSingleMaster }}
- env:
- - name: GOOSEFS_MASTER_HOSTNAME
- valueFrom:
- fieldRef:
- fieldPath: status.podIP
- {{- end }}
- {{- range $key, $value := .Values.master.env }}
- - name: "{{ $key }}"
- value: "{{ $value }}"
- {{- end }}
- envFrom:
- - configMapRef:
- name: {{ $fullName }}-config
- ports:
- - containerPort: {{ .Values.master.ports.rpc }}
- name: rpc
- - containerPort: {{ .Values.master.ports.web }}
- name: web
- {{- if $isHaEmbedded }}
- - containerPort: {{ .Values.master.ports.embedded }}
- name: embedded
- {{- end }}
- volumeMounts:
- {{ if .Values.master.restore.enabled -}}
- {{- if .Values.master.restore.pvcName }}
- - name: pvc
- mountPath: /pvc
- {{- else }}
- - name: host
- mountPath: /host
- {{- end }}
- {{- end }}
- {{- if .Values.master.backupPath }}
- - name: backup
- mountPath: /goosefs_backups
- {{- end }}
- {{- if .Values.hadoopConfig }}
- {{- if or .Values.hadoopConfig.includeCoreSite .Values.hadoopConfig.includeHdfsSite }}
- - name: hdfs-confs
- mountPath: /hdfs-config
- readOnly: true
- {{- end }}
- {{- end }}
- {{- if .Values.initUsers.enabled }}
- - name: user
- mountPath: /etc/passwd
- readOnly: true
- - name: group
- mountPath: /etc/group
- readOnly: true
- {{- end }}
- {{- if $needJournalVolume }}
- - name: goosefs-journal
- mountPath: /journal
- {{- end }}
- {{- if .Values.ufsPaths }}
- {{- range .Values.ufsPaths }}
- - mountPath: {{ .containerPath }}
- name: {{ .name }}
- {{- end }}
- {{- end }}
- {{- if .Values.ufsVolumes }}
- {{- range .Values.ufsVolumes }}
- - mountPath: {{ .containerPath }}
- name: {{ .name }}
- {{- if .subPath }}
- subPath: {{ .subPath }}
- {{- end }}
- {{- end }}
- {{- end }}
- {{- if .Values.metastore }}
- - name: goosefs-metastore
- mountPath: {{ .Values.metastore.mountPath }}
- {{- end }}
- {{- if .Values.secrets }}
- {{- if .Values.secrets.master }}
-{{- include "goosefs.master.secretVolumeMounts" . }}
- {{- end }}
- {{- end }}
- {{- if .Values.mounts }}
- {{- range .Values.mounts }}
- - name: "{{ .name }}"
- mountPath: "{{ .path }}"
- {{- end }}
- {{- end }}
- - name: goosefs-job-master
- image: {{ .Values.image }}:{{ .Values.imageTag }}
- imagePullPolicy: {{ .Values.imagePullPolicy }}
- securityContext:
- runAsUser: {{ .Values.user }}
- runAsGroup: {{ .Values.group }}
- {{- if .Values.jobMaster.resources }}
-{{ include "goosefs.jobMaster.resources" . | indent 10 }}
- {{- end }}
- command: ["/entrypoint.sh"]
- {{- if .Values.jobMaster.args }}
- args:
-{{ toYaml .Values.jobMaster.args | trim | indent 12 }}
- {{- end }}
- {{- if $isHaEmbedded }}
- env:
- - name: GOOSEFS_MASTER_HOSTNAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- {{- else if $isSingleMaster }}
- env:
- - name: GOOSEFS_MASTER_HOSTNAME
- valueFrom:
- fieldRef:
- fieldPath: status.podIP
- {{- end }}
- {{- range $key, $value := .Values.jobMaster.env }}
- - name: "{{ $key }}"
- value: "{{ $value }}"
- {{- end }}
- envFrom:
- - configMapRef:
- name: {{ $fullName }}-config
- ports:
- - containerPort: {{ .Values.jobMaster.ports.rpc }}
- name: job-rpc
- - containerPort: {{ .Values.jobMaster.ports.web }}
- name: job-web
- {{- if $isHaEmbedded }}
- - containerPort: {{ .Values.jobMaster.ports.embedded }}
- name: job-embedded
- {{- end }}
- volumeMounts:
- {{- if .Values.hadoopConfig }}
- {{- if or .Values.hadoopConfig.includeCoreSite .Values.hadoopConfig.includeHdfsSite }}
- - name: hdfs-confs
- mountPath: /hdfs-config
- readOnly: true
- {{- end }}
- {{- end }}
- {{- if .Values.initUsers.enabled }}
- - name: user
- mountPath: /etc/passwd
- readOnly: true
- - name: group
- mountPath: /etc/group
- readOnly: true
- {{- end }}
- {{- if .Values.secrets }}
- {{- if .Values.secrets.master }}
-{{- include "goosefs.master.secretVolumeMounts" . }}
- {{- end }}
- {{- end }}
- {{- if .Values.ufsPaths }}
- {{- range .Values.ufsPaths }}
- - mountPath: {{ .containerPath }}
- name: {{ .name }}
- {{- end }}
- {{- end }}
- {{- if .Values.ufsVolumes }}
- {{- range .Values.ufsVolumes }}
- - mountPath: {{ .containerPath }}
- name: {{ .name }}
- {{- if .subPath }}
- subPath: {{ .subPath }}
- {{- end }}
- {{- end }}
- {{- end }}
- {{- if .Values.mounts }}
- {{- range .Values.mounts }}
- - name: "{{ .name }}"
- mountPath: "{{ .path }}"
- {{- end }}
- {{- end }}
- restartPolicy: Always
- volumes:
- {{ if .Values.master.restore.enabled -}}
- {{- if .Values.master.restore.pvcName }}
- - name: pvc
- persistentVolumeClaim:
- claimName: {{ .Values.master.restore.pvcName }}
- {{- else }}
- - name: host
- hostPath:
- path: {{ .Values.master.restore.path }}
- type: DirectoryOrCreate
- {{- end }}
- {{- end }}
- {{- if .Values.master.backupPath }}
- - name: backup
- hostPath:
- path: {{ .Values.master.backupPath }}
- type: DirectoryOrCreate
- {{- end }}
- {{- if .Values.hadoopConfig }}
- {{- if or .Values.hadoopConfig.includeCoreSite .Values.hadoopConfig.includeHdfsSite }}
- - name: hdfs-confs
- configMap:
- name: {{ .Values.hadoopConfig.configMap }}
- items:
- {{ if .Values.hadoopConfig.includeHdfsSite -}}
- - key: hdfs-site.xml
- path: hdfs-site.xml
- {{- end }}
- {{- if .Values.hadoopConfig.includeCoreSite }}
- - key: core-site.xml
- path: core-site.xml
- {{- end }}
- {{- end }}
- {{- end }}
- {{- if .Values.initUsers.enabled }}
- - name: dir
- hostPath:
- path: {{ .Values.initUsers.dir }}
- type: DirectoryOrCreate
- - name: user
- hostPath:
- path: {{ .Values.initUsers.dir }}/passwd
- type: FileOrCreate
- - name: group
- hostPath:
- path: {{ .Values.initUsers.dir }}/group
- type: FileOrCreate
- {{- end }}
- {{- if .Values.secrets }}
- {{- if .Values.secrets.master }}
- {{- range $key, $val := .Values.secrets.master }}
- - name: secret-{{ $key }}-volume
- secret:
- secretName: {{ $key }}
- defaultMode: 256
- {{- end }}
- {{- end }}
- {{- end }}
- {{- if .Values.mounts }}
- {{- range .Values.mounts }}
- - name: "{{ .name }}"
- persistentVolumeClaim:
- claimName: "{{ .name }}"
- {{- end }}
- {{- end }}
- {{- if .Values.metastore }}
- {{- if eq .Values.metastore.volumeType "emptyDir" }}
- - name: goosefs-metastore
- emptyDir:
- medium: {{ .Values.metastore.medium }}
- sizeLimit: {{ .Values.metastore.size | quote }}
- {{- end }}
- {{- end}}
- {{- if and $needJournalVolume (eq .Values.journal.volumeType "emptyDir") }}
- - name: goosefs-journal
- emptyDir:
- medium: {{ .Values.journal.medium }}
- sizeLimit: {{ .Values.journal.size | quote }}
- {{- end }}
- {{- if .Values.ufsPaths }}
- {{- range .Values.ufsPaths }}
- - hostPath:
- path: {{ .hostPath }}
- type: ""
- name: {{ .name }}
- {{- end }}
- {{- end }}
- {{- if .Values.ufsVolumes }}
- {{- range .Values.ufsVolumes }}
- - name: "{{ .name }}"
- persistentVolumeClaim:
- claimName: "{{ .name }}"
- {{- end }}
- {{- end }}
- volumeClaimTemplates:
- {{- if and $needJournalVolume (eq .Values.journal.volumeType "persistentVolumeClaim") }}
- - metadata:
- name: goosefs-journal
- spec:
- storageClassName: {{ .Values.journal.storageClass }}
- accessModes:
-{{ toYaml .Values.journal.accessModes | indent 8 }}
- resources:
- requests:
- storage: {{ .Values.journal.size }}
- {{- end }}
- {{- if .Values.metastore }}
- {{- if eq .Values.metastore.volumeType "persistentVolumeClaim" }}
- - metadata:
- name: goosefs-metastore
- spec:
- storageClassName: {{ .Values.metastore.storageClass }}
- accessModes:
-{{ toYaml .Values.metastore.accessModes | indent 8 }}
- resources:
- requests:
- storage: {{ .Values.metastore.size }}
- {{- end }}
- {{- end }}
diff --git a/charts/goosefs/templates/worker/domain-socket-pvc.yaml b/charts/goosefs/templates/worker/domain-socket-pvc.yaml
deleted file mode 100644
index 2e0b6a5dfc2..00000000000
--- a/charts/goosefs/templates/worker/domain-socket-pvc.yaml
+++ /dev/null
@@ -1,29 +0,0 @@
-{{ $shortCircuitEnabled := .Values.shortCircuit.enabled -}}
-{{ $needDomainSocketVolume := and (and $shortCircuitEnabled (eq .Values.shortCircuit.policy "uuid")) (eq .Values.shortCircuit.volumeType "persistentVolumeClaim") -}}
-{{ if $needDomainSocketVolume -}}
-apiVersion: v1
-kind: PersistentVolumeClaim
-metadata:
- name: {{ .Values.shortCircuit.pvcName }}
- labels:
- app: {{ template "goosefs.name" . }}
- chart: {{ template "goosefs.chart" . }}
- release: {{ .Release.Name }}
- heritage: {{ .Release.Service }}
- role: goosefs-worker
- {{- include "library.fluid.labels" . | nindent 4 }}
-spec:
- volumeMode: Filesystem
- resources:
- requests:
- storage: {{ .Values.shortCircuit.size }}
- storageClassName: {{ .Values.shortCircuit.storageClass }}
- accessModes:
-{{ toYaml .Values.shortCircuit.accessModes | trim | indent 4 }}
- selector:
- matchLabels:
- app: {{ template "goosefs.name" . }}
- release: {{ .Release.Name }}
- heritage: {{ .Release.Service }}
- role: goosefs-worker
-{{- end -}}
diff --git a/charts/goosefs/templates/worker/statefulset.yaml b/charts/goosefs/templates/worker/statefulset.yaml
deleted file mode 100644
index 7cf8a592eef..00000000000
--- a/charts/goosefs/templates/worker/statefulset.yaml
+++ /dev/null
@@ -1,321 +0,0 @@
-{{- $shortCircuitEnabled := .Values.shortCircuit.enabled }}
-{{- $needDomainSocketVolume := and $shortCircuitEnabled (eq .Values.shortCircuit.policy "uuid") }}
-{{- $hostNetwork := .Values.worker.hostNetwork }}
-
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
- name: {{ template "goosefs.fullname" . }}-worker
- labels:
- app: {{ template "goosefs.name" . }}
- chart: {{ template "goosefs.chart" . }}
- release: {{ .Release.Name }}
- heritage: {{ .Release.Service }}
- role: goosefs-worker
- fluid.io/dataset: {{ .Values.ownerDatasetId }}
- fluid.io/dataset-placement: {{ .Values.placement }}
- {{- include "library.fluid.labels" . | nindent 4 }}
-spec:
- selector:
- matchLabels:
- app: {{ template "goosefs.name" . }}
- release: {{ .Release.Name }}
- role: goosefs-worker
- serviceName: {{ template "goosefs.fullname" . }}-worker
- replicas: {{ .Values.worker.replicaCount }}
- template:
- metadata:
- annotations:
- sidecar.istio.io/inject: "false"
- {{- if .Values.worker.annotations }}
-{{ toYaml .Values.worker.annotations | trim | indent 8 }}
- {{- end }}
- labels:
- sidecar.istio.io/inject: "false"
- app: {{ template "goosefs.name" . }}
- chart: {{ template "goosefs.chart" . }}
- release: {{ .Release.Name }}
- heritage: {{ .Release.Service }}
- role: goosefs-worker
- fluid.io/dataset: {{ .Values.ownerDatasetId }}
- fluid.io/dataset-placement: {{ .Values.placement }}
- {{- include "library.fluid.labels" . | nindent 8 }}
- spec:
- enableServiceLinks: false
- hostNetwork: {{ $hostNetwork }}
- dnsPolicy: {{ .Values.worker.dnsPolicy | default ($hostNetwork | ternary "ClusterFirstWithHostNet" "ClusterFirst") }}
- securityContext:
- fsGroup: {{ .Values.fsGroup }}
- nodeSelector:
- {{- if .Values.worker.nodeSelector }}
-{{ toYaml .Values.worker.nodeSelector | trim | indent 8 }}
- {{- else if .Values.nodeSelector }}
-{{ toYaml .Values.nodeSelector | trim | indent 8 }}
- {{- end }}
- {{- if .Values.tolerations }}
- tolerations:
-{{ toYaml .Values.tolerations | indent 8 }}
- {{- end }}
- initContainers:
- {{ if .Values.initUsers.enabled -}}
- - name: init-users
- image: {{ .Values.initUsers.image }}:{{ .Values.initUsers.imageTag }}
- imagePullPolicy: {{ .Values.initUsers.imagePullPolicy }}
- command: ["/entrypoint.sh"]
- args:
- - "init_users"
- - "chmod_tierpath"
- env:
- {{- if .Values.initUsers.envUsers }}
- - name: FLUID_INIT_USERS
- value: {{.Values.initUsers.envUsers | quote }}
- {{- end }}
- {{- if .Values.initUsers.envTieredPaths }}
- - name: FLUID_TIERSTORE_PATHS
- value: {{ .Values.initUsers.envTieredPaths | quote }}
- {{- end }}
- volumeMounts:
- - name: dir
- mountPath: /tmp
- - name: user
- mountPath: /tmp/passwd
- - name: group
- mountPath: /tmp/group
- {{- if .Values.tieredstore -}}
-{{- include "goosefs.worker.tieredstoreVolumeMounts" . }}
- {{- end -}}
- {{- end }}
- containers:
- - name: goosefs-worker
- image: {{ .Values.image }}:{{ .Values.imageTag }}
- imagePullPolicy: {{ .Values.imagePullPolicy }}
- securityContext:
- runAsUser: {{ .Values.user }}
- runAsGroup: {{ .Values.group }}
- {{- if .Values.worker.resources }}
-{{ include "goosefs.worker.resources" . | indent 10 }}
- {{- end }}
- command: ["/entrypoint.sh"]
- {{- if .Values.worker.args }}
- args:
-{{ toYaml .Values.worker.args | trim | indent 12 }}
- {{- end }}
- env:
- - name: GOOSEFS_WORKER_HOSTNAME
- valueFrom:
- fieldRef:
- fieldPath: status.hostIP
- {{- if not .Values.worker.hostNetwork }}
- - name: GOOSEFS_WORKER_CONTAINER_HOSTNAME
- valueFrom:
- fieldRef:
- fieldPath: status.podIP
- {{- end }}
- {{- range $key, $value := .Values.worker.env }}
- - name: "{{ $key }}"
- value: "{{ $value }}"
- {{- end }}
- envFrom:
- - configMapRef:
- name: {{ template "goosefs.fullname" . }}-config
- ports:
- - containerPort: {{ .Values.worker.ports.rpc }}
- name: rpc
- - containerPort: {{ .Values.worker.ports.web }}
- name: web
- volumeMounts:
- {{ if .Values.hadoopConfig -}}
- {{ if or .Values.hadoopConfig.includeCoreSite .Values.hadoopConfig.includeHdfsSite -}}
- - name: hdfs-confs
- mountPath: /hdfs-config
- readOnly: true
- {{- end }}
- {{- end }}
- {{- if .Values.initUsers.enabled }}
- - name: user
- mountPath: /etc/passwd
- readOnly: true
- - name: group
- mountPath: /etc/group
- readOnly: true
- {{- end}}
- {{- if $needDomainSocketVolume }}
- - name: goosefs-domain
- mountPath: /opt/domain
- {{- end }}
- {{- if .Values.ufsPaths }}
- {{- range .Values.ufsPaths }}
- - mountPath: {{ .containerPath }}
- name: {{ .name }}
- {{- end }}
- {{- end }}
- {{- if .Values.ufsVolumes }}
- {{- range .Values.ufsVolumes }}
- - mountPath: {{ .containerPath }}
- name: {{ .name }}
- {{- if .subPath }}
- subPath: {{ .subPath }}
- {{- end }}
- {{- end }}
- {{- end }}
- {{- if .Values.secrets -}}
- {{- if .Values.secrets.worker -}}
-{{- include "goosefs.worker.secretVolumeMounts" . }}
- {{- end -}}
- {{- end -}}
- {{- if .Values.tieredstore -}}
-{{- include "goosefs.worker.tieredstoreVolumeMounts" . }}
- {{- end -}}
- {{- if .Values.mounts -}}
-{{- include "goosefs.worker.otherVolumeMounts" . }}
- {{- end }}
- - name: goosefs-job-worker
- image: {{ .Values.image }}:{{ .Values.imageTag }}
- securityContext:
- runAsUser: {{ .Values.user }}
- runAsGroup: {{ .Values.group }}
- imagePullPolicy: {{ .Values.imagePullPolicy }}
- {{- if .Values.jobWorker.resources }}
-{{ include "goosefs.jobWorker.resources" . | indent 10 }}
- {{- end }}
- command: ["/entrypoint.sh"]
- {{- if .Values.jobWorker.args }}
- args:
-{{ toYaml .Values.jobWorker.args | trim | indent 12 }}
- {{- end }}
- env:
- - name: GOOSEFS_WORKER_HOSTNAME
- valueFrom:
- fieldRef:
- fieldPath: status.hostIP
- {{- if not .Values.worker.hostNetwork }}
- - name: GOOSEFS_WORKER_CONTAINER_HOSTNAME
- valueFrom:
- fieldRef:
- fieldPath: status.podIP
- {{- end }}
- {{- range $key, $value := .Values.jobWorker.env }}
- - name: "{{ $key }}"
- value: "{{ $value }}"
- {{- end }}
- envFrom:
- - configMapRef:
- name: {{ template "goosefs.fullname" . }}-config
- ports:
- - containerPort: {{ .Values.jobWorker.ports.rpc }}
- name: job-rpc
- - containerPort: {{ .Values.jobWorker.ports.data }}
- name: job-data
- - containerPort: {{ .Values.jobWorker.ports.web }}
- name: job-web
- volumeMounts:
- {{ if .Values.hadoopConfig -}}
- {{ if or .Values.hadoopConfig.includeCoreSite .Values.hadoopConfig.includeHdfsSite -}}
- - name: hdfs-confs
- mountPath: /hdfs-config
- readOnly: true
- {{- end }}
- {{- end }}
- {{- if .Values.initUsers.enabled }}
- - name: user
- mountPath: /etc/passwd
- readOnly: true
- - name: group
- mountPath: /etc/group
- readOnly: true
- {{- end}}
- {{- if $needDomainSocketVolume }}
- - name: goosefs-domain
- mountPath: /opt/domain
- {{- end }}
- {{- if .Values.ufsPaths }}
- {{- range .Values.ufsPaths }}
- - mountPath: {{ .containerPath }}
- name: {{ .name }}
- {{- end }}
- {{- end }}
- {{- if .Values.ufsVolumes }}
- {{- range .Values.ufsVolumes }}
- - mountPath: {{ .containerPath }}
- name: {{ .name }}
- {{- if .subPath }}
- subPath: {{ .subPath }}
- {{- end }}
- {{- end }}
- {{- end }}
- {{- if .Values.secrets }}
- {{- if .Values.secrets.worker }}
-{{- include "goosefs.worker.secretVolumeMounts" . }}
- {{- end -}}
- {{- end }}
- {{- if .Values.tieredstore }}
-{{- include "goosefs.worker.tieredstoreVolumeMounts" . }}
- {{- end }}
- {{- if .Values.mounts }}
-{{- include "goosefs.worker.otherVolumeMounts" . }}
- {{- end }}
- restartPolicy: Always
- volumes:
- {{ if .Values.hadoopConfig -}}
- {{ if or .Values.hadoopConfig.includeCoreSite .Values.hadoopConfig.includeHdfsSite -}}
- - name: hdfs-confs
- configMap:
- name: {{ .Values.hadoopConfig.configMap }}
- items:
- {{ if .Values.hadoopConfig.includeHdfsSite -}}
- - key: hdfs-site.xml
- path: hdfs-site.xml
- {{- end }}
- {{- if .Values.hadoopConfig.includeCoreSite }}
- - key: core-site.xml
- path: core-site.xml
- {{- end }}
- {{- end }}
- {{- end }}
- {{- if .Values.initUsers.enabled }}
- - name: dir
- hostPath:
- path: {{ .Values.initUsers.dir }}
- type: DirectoryOrCreate
- - name: user
- hostPath:
- path: {{ .Values.initUsers.dir }}/passwd
- type: FileOrCreate
- - name: group
- hostPath:
- path: {{ .Values.initUsers.dir }}/group
- type: FileOrCreate
- {{- end }}
- {{- if $needDomainSocketVolume }}
-{{- include "goosefs.worker.shortCircuit.volume" . }}
- {{- end }}
- {{- if .Values.secrets }}
- {{- if .Values.secrets.worker }}
-{{- include "goosefs.worker.secretVolumes" . }}
- {{- end }}
- {{- end }}
- {{- if .Values.tieredstore }}
-{{- include "goosefs.worker.tieredstoreVolumes" . }}
- {{- end}}
- {{- if .Values.mounts }}
- {{- range .Values.mounts }}
- - name: "{{ .name }}"
- persistentVolumeClaim:
- claimName: "{{ .name }}"
- {{- end }}
- {{- end }}
- {{- if .Values.ufsPaths }}
- {{- range .Values.ufsPaths }}
- - hostPath:
- path: {{ .hostPath }}
- type: ""
- name: {{ .name }}
- {{- end }}
- {{- end }}
- {{- if .Values.ufsVolumes }}
- {{- range .Values.ufsVolumes }}
- - name: "{{ .name }}"
- persistentVolumeClaim:
- claimName: "{{ .name }}"
- {{- end }}
- {{- end }}
diff --git a/charts/goosefs/values.yaml b/charts/goosefs/values.yaml
deleted file mode 100644
index c8939c794ac..00000000000
--- a/charts/goosefs/values.yaml
+++ /dev/null
@@ -1,366 +0,0 @@
-# Default values for GooseFS.
-# This is a YAML-formatted file.
-# Declare variables to be passed into your templates.
-fullnameOverride: goosefs
-
-
-## Common ##
-
-# Docker Image
-image: ccr.ccs.tencentyun.com/goosefs/goosefs
-imageTag: v1.0.1
-imagePullPolicy: IfNotPresent
-
-# Create User Docker Image // TODO make it configurable
-initUsers:
- enabled: false
- image: registry.aliyuncs.com/fluid/init-users
- imageTag: v0.6.0-6a86b3f
- imagePullPolicy: IfNotPresent
- args: "1000"
- envUsers: ""
- envTieredPaths: ""
- dir: ""
-
-# Security Context
-user: 1000
-group: 1000
-fsGroup: 1000
-
-# Site properties for all the components
-properties:
- # goosefs.user.metrics.collection.enabled: 'true'
- goosefs.security.stale.channel.purge.interval: 365d
-
-# Recommended JVM Heap options for running in Docker
-# Ref: https://developers.redhat.com/blog/2017/03/14/java-inside-docker/
-# These JVM options are common to all GooseFS services
-# jvmOptions:
-# - "-XX:+UnlockExperimentalVMOptions"
-# - "-XX:+UseCGroupMemoryLimitForHeap"
-# - "-XX:MaxRAMFraction=2"
-
-# Mount Persistent Volumes to all components
-# mounts:
-# - name:
-# path:
-
-# Use labels to run GooseFS on a subset of the K8s nodes
-# nodeSelector: {}
-
-##
-## Example:
-## - key: "toleration=key"
-## operator: "Equal"
-## value: "value"
-## effect: "NoSchedule"
-##
-## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
-##
-tolerations: []
-
-## Master ##
-
-master:
- replicaCount: 1 # Controls the number of StatefulSets. For multiMaster mode increase this to >1.
- env:
- # Extra environment variables for the master pod
- # Example:
- # JAVA_HOME: /opt/java
- args: # Arguments to Docker entrypoint
- - master-only
- - --no-format
- # Properties for the master component
- properties:
- # Example: use ROCKS DB instead of Heap
- # goosefs.master.metastore: ROCKS
- # goosefs.master.metastore.dir: /metastore
- resources:
- # The default xmx is 8G
- limits:
-# cpu: "4"
-# memory: "8G"
- requests:
-# cpu: "1"
-# memory: "1G"
- ports:
- embedded: 19200
- rpc: 19998
- web: 19999
- hostNetwork: false
- # dnsPolicy will be ClusterFirstWithHostNet if hostNetwork: true
- # and ClusterFirst if hostNetwork: false
- # You can specify dnsPolicy here to override this inference
- # dnsPolicy: ClusterFirst
- # JVM options specific to the master container
- jvmOptions:
- nodeSelector: {}
- # backupPath is the path in the host to save metadata and metadata info
- backupPath: #
- # restore is the location to restore metadata and metadata info from
- restore:
- enabled: false
- # path: /subpath/
- # pvcName: pvc-local
- annotations: {}
-
-
-jobMaster:
- args:
- - job-master
- # Properties for the jobMaster component
- properties:
- resources:
- limits:
-# cpu: "4"
-# memory: "8G"
- requests:
-# cpu: "1"
-# memory: "1G"
- ports:
- embedded: 20003
- rpc: 20001
- web: 20002
- # JVM options specific to the jobMaster container
- jvmOptions:
-
-# GooseFS supports journal type of UFS and EMBEDDED
-# UFS journal with HDFS example
-# journal:
-# type: "UFS"
-# folder: "hdfs://{$hostname}:{$hostport}/journal"
-
-# EMBEDDED journal to local path /journal example
-# journal:
-# type: "EMBEDDED"
-# folder: "/journal"
-
- # EMBEDDED journal to persistentVolumeClaim example
-journal:
- # Setting journal type, "UFS" or "EMBEDDED"
- # In commit 2c125dee25f8d4566ae23646a999c16c530ffbc9, in order to support master raft embeeded journal mode, change default value from UFS to EMBEDDED
- # TODO (@xieydd), will support journalClaimTemplates configure the journal setting
- type: "EMBEDDED"
- ufsType: "local" # Ignored if type is "EMBEDDED". "local" or "HDFS"
- folder: "/journal" # Master journal folder
- # volumeType controls the type of journal volume.
- # It can be "persistentVolumeClaim" or "emptyDir"
- volumeType: persistentVolumeClaim
- size: 1Gi
- # Attributes to use when the journal is persistentVolumeClaim
- storageClass: "standard"
- accessModes:
- - ReadWriteOnce
- # Attributes to use when the journal is emptyDir
- medium: ""
- # Configuration for journal formatting job
- format:
- runFormat: false # Change to true to format journal
-
-apiGateway:
- enabled: false
- args:
- - proxy
- properties:
- resources:
- limits:
-# cpu: "4"
-# memory: "4G"
- requests:
-# cpu: "1"
-# memory: "2G"
- ports:
- rest: 39999
-
-# You can enable metastore to use ROCKS DB instead of Heap
-# metastore:
-# volumeType: persistentVolumeClaim # Options: "persistentVolumeClaim" or "emptyDir"
-# size: 1Gi
-# mountPath: /metastore
-# # Attributes to use when the metastore is persistentVolumeClaim
-# storageClass: "standard"
-# accessModes:
-# - ReadWriteOnce
-# # Attributes to use when the metastore is emptyDir
-# medium: ""
-
-
-## Worker ##
-
-worker:
- env:
- # Extra environment variables for the worker pod
- # Example:
- # JAVA_HOME: /opt/java
- args:
- - worker-only
- - --no-format
- # Properties for the worker component
- properties:
- resources:
- limits:
-# cpu: "4"
-# memory: "4G"
- requests:
-# cpu: "1"
-# memory: "2G"
- ports:
- rpc: 29999
- web: 30000
- hostNetwork: false
- # dnsPolicy will be ClusterFirstWithHostNet if hostNetwork: true
- # and ClusterFirst if hostNetwork: false
- # You can specify dnsPolicy here to override this inference
- # dnsPolicy: ClusterFirst
- # JVM options specific to the worker container
- jvmOptions:
- nodeSelector: {}
- replicaCount: 0
- annotations: {}
-
-jobWorker:
- args:
- - job-worker
- # Properties for the jobWorker component
- properties:
- resources:
- limits:
-# cpu: "4"
-# memory: "4G"
- requests:
-# cpu: "1"
-# memory: "1G"
- ports:
- rpc: 30001
- data: 30002
- web: 30003
- # JVM options specific to the jobWorker container
- jvmOptions:
-
-# Tiered Storage
-# emptyDir example
-# - level: 0
-# alias: MEM
-# mediumtype: MEM
-# path: /dev/shm
-# type: emptyDir
-# quota: 1G
-#
-# hostPath example
-# - level: 0
-# alias: MEM
-# mediumtype: MEM
-# path: /dev/shm
-# type: hostPath
-# quota: 1G
-#
-# persistentVolumeClaim example
-# - level: 1
-# alias: SSD
-# mediumtype: SSD
-# type: persistentVolumeClaim
-# name: goosefs-ssd
-# path: /dev/ssd
-# quota: 10G
-#
-# multi-part mediumtype example
-# - level: 1
-# alias: SSD,HDD
-# mediumtype: SSD,HDD
-# type: persistentVolumeClaim
-# name: goosefs-ssd,goosefs-hdd
-# path: /dev/ssd,/dev/hdd
-# quota: 10G,10G
-tieredstore:
- levels:
- - level: 0
- alias: MEM
- mediumtype: MEM
- path: /dev/shm
- type: emptyDir
- quota: 1G
- high: 0.95
- low: 0.7
-
-# Short circuit related properties
-shortCircuit:
- enabled: true
- # The policy for short circuit can be "local" or "uuid",
- # local means the cache directory is in the same mount namespace,
- # uuid means interact with domain socket
- policy: uuid
- # volumeType controls the type of shortCircuit volume.
- # It can be "persistentVolumeClaim" or "hostPath"
- volumeType: persistentVolumeClaim
- size: 1Mi
- # Attributes to use if the domain socket volume is PVC
- pvcName: goosefs-worker-domain-socket
- accessModes:
- - ReadWriteOnce
- storageClass: standard
- # Attributes to use if the domain socket volume is hostPath
- hostPath: "/tmp/goosefs-domain" # The hostPath directory to use
-
-
-## FUSE ##
-
-fuse:
- criticalPod: false
- env:
- # Extra environment variables for the fuse pod
- # Example:
- # JAVA_HOME: /opt/java
- image: goose/goose-fuse
- imageTag: 2.4.0-SNAPSHOT
- imagePullPolicy: IfNotPresent
- # Change both to true to deploy FUSE
- enabled: false
- clientEnabled: false
- # Change global to true to enable fuse client deploy in global mode, which will deploy fuse client in all the nodes
- # not consider the the worker and fuse affinity
- global: false
- nodeSelector: {}
- # Properties for the jobWorker component
- properties:
- # Customize the MaxDirectMemorySize
- # These options are specific to the FUSE daemon
- jvmOptions:
- - "-XX:MaxDirectMemorySize=2g"
- hostNetwork: true
- # hostPID requires escalated privileges
- hostPID: false
- dnsPolicy: ClusterFirstWithHostNet
- user: 0
- group: 0
- fsGroup: 0
- args:
- - fuse
- - --fuse-opts=allow_other
- # Mount path in the host
- mountPath: /mnt/goosefs-fuse
- resources:
- requests:
-# cpu: "0.5"
-# memory: "1G"
- limits:
-# cpu: "4"
-# memory: "4G"
- annotations: {}
-
-## HDFS Configuraions ##
-# hadoopConfig:
-# configMap: hdfs-configmap
-# includeHdfsSite: true
-# includeCoreSite: true
-
-## Secrets ##
-
-# Format: (:):
-# secrets:
-# master: # Shared by master and jobMaster containers
-# goosefs-hdfs-config: hdfsConfig
-# worker: # Shared by worker and jobWorker containers
-# goosefs-hdfs-config: hdfsConfig
-
-## Monitor ##
-# monitoring: "goosefs_runtime_metrics"
diff --git a/cmd/csi/app/csi.go b/cmd/csi/app/csi.go
index b676ef77b84..5bbc0e1e144 100644
--- a/cmd/csi/app/csi.go
+++ b/cmd/csi/app/csi.go
@@ -83,7 +83,7 @@ func init() {
ErrorAndExit(err)
}
- startCmd.Flags().StringSliceVarP(&pruneFs, "prune-fs", "", []string{"fuse.alluxio-fuse", "fuse.jindofs-fuse", "fuse.juicefs", "fuse.goosefs-fuse", "ossfs"}, "Prune fs to add in /etc/updatedb.conf, separated by comma")
+ startCmd.Flags().StringSliceVarP(&pruneFs, "prune-fs", "", []string{"fuse.alluxio-fuse", "fuse.jindofs-fuse", "fuse.juicefs", "ossfs"}, "Prune fs to add in /etc/updatedb.conf, separated by comma")
startCmd.Flags().StringVarP(&prunePath, "prune-path", "", "/runtime-mnt", "Prune path to add in /etc/updatedb.conf")
startCmd.Flags().StringVarP(&metricsAddr, "metrics-addr", "", ":8080", "The address the metrics endpoint binds to.")
startCmd.Flags().StringVarP(&pprofAddr, "pprof-addr", "", "", "The address for pprof to use while exporting profiling results")
diff --git a/cmd/goosefs/app/goosefs.go b/cmd/goosefs/app/goosefs.go
deleted file mode 100644
index f6f9fc26fd2..00000000000
--- a/cmd/goosefs/app/goosefs.go
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package app
-
-import (
- "os"
-
- "github.com/fluid-cloudnative/fluid"
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/controllers"
- goosefsctl "github.com/fluid-cloudnative/fluid/pkg/controllers/v1alpha1/goosefs"
- "github.com/fluid-cloudnative/fluid/pkg/ddc/base"
- "github.com/fluid-cloudnative/fluid/pkg/ddc/base/portallocator"
- "github.com/fluid-cloudnative/fluid/pkg/ddc/goosefs"
- "github.com/fluid-cloudnative/fluid/pkg/utils"
- "github.com/spf13/cobra"
- zapOpt "go.uber.org/zap"
- "go.uber.org/zap/zapcore"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/util/net"
- clientgoscheme "k8s.io/client-go/kubernetes/scheme"
- ctrl "sigs.k8s.io/controller-runtime"
- "sigs.k8s.io/controller-runtime/pkg/controller"
- "sigs.k8s.io/controller-runtime/pkg/log/zap"
- metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
-)
-
-var (
- scheme = runtime.NewScheme()
- setupLog = ctrl.Log.WithName("setup")
- // Use compiler to check if the struct implements all the interface
- _ base.Implement = (*goosefs.GooseFSEngine)(nil)
-
- metricsAddr string
- enableLeaderElection bool
- leaderElectionNamespace string
- development bool
- portRange string
- maxConcurrentReconciles int
- pprofAddr string
- portAllocatePolicy string
-
- kubeClientQPS float32
- kubeClientBurst int
-)
-
-var cmd = &cobra.Command{
- Use: "goosefsruntime-controller",
- Short: "Controller for goosefsruntime",
-}
-
-var startCmd = &cobra.Command{
- Use: "start",
- Short: "start goosefsruntime-controller in Kubernetes",
- Run: func(cmd *cobra.Command, args []string) {
- handle()
- },
-}
-
-func init() {
- _ = clientgoscheme.AddToScheme(scheme)
- _ = datav1alpha1.AddToScheme(scheme)
-
- startCmd.Flags().StringVarP(&metricsAddr, "metrics-addr", "", ":8080", "The address the metric endpoint binds to.")
- startCmd.Flags().BoolVarP(&enableLeaderElection, "enable-leader-election", "", false, "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.")
- startCmd.Flags().StringVarP(&leaderElectionNamespace, "leader-election-namespace", "", "fluid-system", "The namespace in which the leader election resource will be created.")
- startCmd.Flags().BoolVarP(&development, "development", "", true, "Enable development mode for fluid controller.")
- startCmd.Flags().StringVar(&portRange, "runtime-node-port-range", "20000-25000", "Set available port range for GooseFS")
- startCmd.Flags().StringVar(&portAllocatePolicy, "port-allocate-policy", "random", "Set port allocating policy, available choice is bitmap or random(default random).")
- startCmd.Flags().StringVarP(&pprofAddr, "pprof-addr", "", "", "The address for pprof to use while exporting profiling results")
- startCmd.Flags().IntVar(&maxConcurrentReconciles, "runtime-workers", 3, "Set max concurrent workers for GooseFSRuntime controller")
- startCmd.Flags().Float32VarP(&kubeClientQPS, "kube-api-qps", "", 20, "QPS to use while talking with kubernetes apiserver.") // 20 is the default qps in controller-runtime
- startCmd.Flags().IntVarP(&kubeClientBurst, "kube-api-burst", "", 30, "Burst to use while talking with kubernetes apiserver.") // 30 is the default burst in controller-runtime
- cmd.AddCommand(startCmd)
-}
-
-func handle() {
- fluid.LogVersion()
-
- ctrl.SetLogger(zap.New(func(o *zap.Options) {
- o.Development = development
- }, func(o *zap.Options) {
- o.ZapOpts = append(o.ZapOpts, zapOpt.AddCaller())
- }, func(o *zap.Options) {
- if !development {
- encCfg := zapOpt.NewProductionEncoderConfig()
- encCfg.EncodeLevel = zapcore.CapitalLevelEncoder
- encCfg.EncodeTime = zapcore.ISO8601TimeEncoder
- o.Encoder = zapcore.NewConsoleEncoder(encCfg)
- }
- }))
-
- utils.NewPprofServer(setupLog, pprofAddr, development)
-
- // the default webhook server port is 9443, no need to set
- mgr, err := ctrl.NewManager(controllers.GetConfigOrDieWithQPSAndBurst(kubeClientQPS, kubeClientBurst), ctrl.Options{
- Scheme: scheme,
- Metrics: metricsserver.Options{
- BindAddress: metricsAddr,
- },
- LeaderElection: enableLeaderElection,
- LeaderElectionNamespace: leaderElectionNamespace,
- LeaderElectionID: "goosefs.data.fluid.io",
- })
- if err != nil {
- setupLog.Error(err, "unable to start goosefsruntime manager")
- os.Exit(1)
- }
-
- controllerOptions := controller.Options{
- MaxConcurrentReconciles: maxConcurrentReconciles,
- }
-
- if err = (goosefsctl.NewRuntimeReconciler(mgr.GetClient(),
- ctrl.Log.WithName("goosefsctl").WithName("GooseFSRuntime"),
- mgr.GetScheme(),
- mgr.GetEventRecorderFor("GooseFSRuntime"),
- )).SetupWithManager(mgr, controllerOptions); err != nil {
- setupLog.Error(err, "unable to create controller", "controller", "GooseFSRuntime")
- os.Exit(1)
- }
-
- pr, err := net.ParsePortRange(portRange)
- if err != nil {
- setupLog.Error(err, "can't parse port range. Port range must be like -")
- os.Exit(1)
- }
- setupLog.Info("port range parsed", "port range", pr.String())
-
- err = portallocator.SetupRuntimePortAllocator(mgr.GetClient(), pr, portAllocatePolicy, goosefs.GetReservedPorts)
- if err != nil {
- setupLog.Error(err, "failed to setup runtime port allocator")
- os.Exit(1)
- }
- setupLog.Info("Set up runtime port allocator", "policy", portAllocatePolicy)
-
- setupLog.Info("starting goosefsruntime-controller")
- if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
- setupLog.Error(err, "problem goosefsruntime-controller")
- os.Exit(1)
- }
-
-}
diff --git a/cmd/goosefs/app/init.go b/cmd/goosefs/app/init.go
deleted file mode 100644
index 18dda732db7..00000000000
--- a/cmd/goosefs/app/init.go
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
-Copyright 2021 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package app
-
-import "github.com/spf13/cobra"
-
-func NewGooseFSCommand() *cobra.Command {
- cmd := &cobra.Command{
- Use: "goosefsruntime-controller",
- Short: "Controller for goosefsruntime",
- }
-
- cmd.AddCommand(versionCmd, startCmd)
- return cmd
-}
diff --git a/cmd/goosefs/app/version.go b/cmd/goosefs/app/version.go
deleted file mode 100644
index c1003351628..00000000000
--- a/cmd/goosefs/app/version.go
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
-Copyright 2021 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package app
-
-import (
- "github.com/fluid-cloudnative/fluid"
- "github.com/spf13/cobra"
-)
-
-var (
- short bool
-)
-
-var versionCmd = &cobra.Command{
- Use: "version",
- Short: "print version information",
- Run: func(cmd *cobra.Command, args []string) {
- fluid.PrintVersion(short)
- },
-}
-
-func init() {
- versionCmd.Flags().BoolVar(&short, "short", false, "print just the short version info")
-}
diff --git a/cmd/goosefs/main.go b/cmd/goosefs/main.go
deleted file mode 100644
index 4b1a2b78298..00000000000
--- a/cmd/goosefs/main.go
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
-Copyright 2021 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package main
-
-import (
- "os"
-
- "github.com/fluid-cloudnative/fluid/cmd/goosefs/app"
-)
-
-func main() {
- command := app.NewGooseFSCommand()
- if err := command.Execute(); err != nil {
- os.Exit(0)
- }
-}
diff --git a/config/crd/bases/data.fluid.io_goosefsruntimes.yaml b/config/crd/bases/data.fluid.io_goosefsruntimes.yaml
deleted file mode 100644
index c5f4dd06bf6..00000000000
--- a/config/crd/bases/data.fluid.io_goosefsruntimes.yaml
+++ /dev/null
@@ -1,1590 +0,0 @@
----
-apiVersion: apiextensions.k8s.io/v1
-kind: CustomResourceDefinition
-metadata:
- annotations:
- controller-gen.kubebuilder.io/version: v0.19.0
- name: goosefsruntimes.data.fluid.io
-spec:
- group: data.fluid.io
- names:
- categories:
- - fluid
- kind: GooseFSRuntime
- listKind: GooseFSRuntimeList
- plural: goosefsruntimes
- shortNames:
- - goose
- singular: goosefsruntime
- scope: Namespaced
- versions:
- - additionalPrinterColumns:
- - jsonPath: .status.masterNumberReady
- name: Ready Masters
- priority: 10
- type: integer
- - jsonPath: .status.desiredMasterNumberScheduled
- name: Desired Masters
- priority: 10
- type: integer
- - jsonPath: .status.masterPhase
- name: Master Phase
- type: string
- - jsonPath: .status.workerNumberReady
- name: Ready Workers
- priority: 10
- type: integer
- - jsonPath: .status.desiredWorkerNumberScheduled
- name: Desired Workers
- priority: 10
- type: integer
- - jsonPath: .status.workerPhase
- name: Worker Phase
- type: string
- - jsonPath: .status.fuseNumberReady
- name: Ready Fuses
- priority: 10
- type: integer
- - jsonPath: .status.desiredFuseNumberScheduled
- name: Desired Fuses
- priority: 10
- type: integer
- - jsonPath: .status.fusePhase
- name: Fuse Phase
- type: string
- - jsonPath: .status.apiGateway.endpoint
- name: API Gateway
- priority: 10
- type: string
- - jsonPath: .metadata.creationTimestamp
- name: Age
- type: date
- name: v1alpha1
- schema:
- openAPIV3Schema:
- properties:
- apiVersion:
- type: string
- kind:
- type: string
- metadata:
- type: object
- spec:
- properties:
- apiGateway:
- properties:
- annotations:
- additionalProperties:
- type: string
- type: object
- enabled:
- type: boolean
- env:
- additionalProperties:
- type: string
- type: object
- jvmOptions:
- items:
- type: string
- type: array
- nodeSelector:
- additionalProperties:
- type: string
- type: object
- ports:
- additionalProperties:
- type: integer
- type: object
- properties:
- additionalProperties:
- type: string
- type: object
- replicas:
- format: int32
- minimum: 1
- type: integer
- resources:
- properties:
- claims:
- items:
- properties:
- name:
- type: string
- required:
- - name
- type: object
- type: array
- x-kubernetes-list-map-keys:
- - name
- x-kubernetes-list-type: map
- limits:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- type: object
- requests:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- type: object
- type: object
- type: object
- cleanCachePolicy:
- properties:
- gracePeriodSeconds:
- default: 60
- format: int32
- type: integer
- maxRetryAttempts:
- default: 3
- format: int32
- type: integer
- type: object
- data:
- properties:
- pin:
- type: boolean
- replicas:
- format: int32
- type: integer
- type: object
- disablePrometheus:
- type: boolean
- fuse:
- properties:
- annotations:
- additionalProperties:
- type: string
- type: object
- args:
- items:
- type: string
- type: array
- cleanPolicy:
- type: string
- env:
- additionalProperties:
- type: string
- type: object
- image:
- type: string
- imagePullPolicy:
- type: string
- imageTag:
- type: string
- jvmOptions:
- items:
- type: string
- type: array
- nodeSelector:
- additionalProperties:
- type: string
- type: object
- properties:
- additionalProperties:
- type: string
- type: object
- resources:
- properties:
- claims:
- items:
- properties:
- name:
- type: string
- required:
- - name
- type: object
- type: array
- x-kubernetes-list-map-keys:
- - name
- x-kubernetes-list-type: map
- limits:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- type: object
- requests:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- type: object
- type: object
- type: object
- goosefsVersion:
- properties:
- image:
- type: string
- imagePullPolicy:
- type: string
- imageTag:
- type: string
- type: object
- hadoopConfig:
- type: string
- initUsers:
- properties:
- env:
- additionalProperties:
- type: string
- type: object
- image:
- type: string
- imagePullPolicy:
- type: string
- imageTag:
- type: string
- resources:
- properties:
- claims:
- items:
- properties:
- name:
- type: string
- required:
- - name
- type: object
- type: array
- x-kubernetes-list-map-keys:
- - name
- x-kubernetes-list-type: map
- limits:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- type: object
- requests:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- type: object
- type: object
- type: object
- jobMaster:
- properties:
- annotations:
- additionalProperties:
- type: string
- type: object
- enabled:
- type: boolean
- env:
- additionalProperties:
- type: string
- type: object
- jvmOptions:
- items:
- type: string
- type: array
- nodeSelector:
- additionalProperties:
- type: string
- type: object
- ports:
- additionalProperties:
- type: integer
- type: object
- properties:
- additionalProperties:
- type: string
- type: object
- replicas:
- format: int32
- minimum: 1
- type: integer
- resources:
- properties:
- claims:
- items:
- properties:
- name:
- type: string
- required:
- - name
- type: object
- type: array
- x-kubernetes-list-map-keys:
- - name
- x-kubernetes-list-type: map
- limits:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- type: object
- requests:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- type: object
- type: object
- type: object
- jobWorker:
- properties:
- annotations:
- additionalProperties:
- type: string
- type: object
- enabled:
- type: boolean
- env:
- additionalProperties:
- type: string
- type: object
- jvmOptions:
- items:
- type: string
- type: array
- nodeSelector:
- additionalProperties:
- type: string
- type: object
- ports:
- additionalProperties:
- type: integer
- type: object
- properties:
- additionalProperties:
- type: string
- type: object
- replicas:
- format: int32
- minimum: 1
- type: integer
- resources:
- properties:
- claims:
- items:
- properties:
- name:
- type: string
- required:
- - name
- type: object
- type: array
- x-kubernetes-list-map-keys:
- - name
- x-kubernetes-list-type: map
- limits:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- type: object
- requests:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- type: object
- type: object
- type: object
- jvmOptions:
- items:
- type: string
- type: array
- master:
- properties:
- annotations:
- additionalProperties:
- type: string
- type: object
- enabled:
- type: boolean
- env:
- additionalProperties:
- type: string
- type: object
- jvmOptions:
- items:
- type: string
- type: array
- nodeSelector:
- additionalProperties:
- type: string
- type: object
- ports:
- additionalProperties:
- type: integer
- type: object
- properties:
- additionalProperties:
- type: string
- type: object
- replicas:
- format: int32
- minimum: 1
- type: integer
- resources:
- properties:
- claims:
- items:
- properties:
- name:
- type: string
- required:
- - name
- type: object
- type: array
- x-kubernetes-list-map-keys:
- - name
- x-kubernetes-list-type: map
- limits:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- type: object
- requests:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- type: object
- type: object
- type: object
- properties:
- additionalProperties:
- type: string
- type: object
- replicas:
- format: int32
- type: integer
- runAs:
- properties:
- gid:
- format: int64
- type: integer
- group:
- type: string
- uid:
- format: int64
- type: integer
- user:
- type: string
- required:
- - gid
- - group
- - uid
- - user
- type: object
- tieredstore:
- properties:
- levels:
- items:
- properties:
- high:
- type: string
- low:
- type: string
- mediumtype:
- enum:
- - MEM
- - SSD
- - HDD
- type: string
- path:
- minLength: 1
- type: string
- quota:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- quotaList:
- pattern: ^((\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+)))),)+((\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?)$
- type: string
- volumeSource:
- properties:
- awsElasticBlockStore:
- properties:
- fsType:
- type: string
- partition:
- format: int32
- type: integer
- readOnly:
- type: boolean
- volumeID:
- type: string
- required:
- - volumeID
- type: object
- azureDisk:
- properties:
- cachingMode:
- type: string
- diskName:
- type: string
- diskURI:
- type: string
- fsType:
- type: string
- kind:
- type: string
- readOnly:
- type: boolean
- required:
- - diskName
- - diskURI
- type: object
- azureFile:
- properties:
- readOnly:
- type: boolean
- secretName:
- type: string
- shareName:
- type: string
- required:
- - secretName
- - shareName
- type: object
- cephfs:
- properties:
- monitors:
- items:
- type: string
- type: array
- path:
- type: string
- readOnly:
- type: boolean
- secretFile:
- type: string
- secretRef:
- properties:
- name:
- type: string
- type: object
- x-kubernetes-map-type: atomic
- user:
- type: string
- required:
- - monitors
- type: object
- cinder:
- properties:
- fsType:
- type: string
- readOnly:
- type: boolean
- secretRef:
- properties:
- name:
- type: string
- type: object
- x-kubernetes-map-type: atomic
- volumeID:
- type: string
- required:
- - volumeID
- type: object
- configMap:
- properties:
- defaultMode:
- format: int32
- type: integer
- items:
- items:
- properties:
- key:
- type: string
- mode:
- format: int32
- type: integer
- path:
- type: string
- required:
- - key
- - path
- type: object
- type: array
- name:
- type: string
- optional:
- type: boolean
- type: object
- x-kubernetes-map-type: atomic
- csi:
- properties:
- driver:
- type: string
- fsType:
- type: string
- nodePublishSecretRef:
- properties:
- name:
- type: string
- type: object
- x-kubernetes-map-type: atomic
- readOnly:
- type: boolean
- volumeAttributes:
- additionalProperties:
- type: string
- type: object
- required:
- - driver
- type: object
- downwardAPI:
- properties:
- defaultMode:
- format: int32
- type: integer
- items:
- items:
- properties:
- fieldRef:
- properties:
- apiVersion:
- type: string
- fieldPath:
- type: string
- required:
- - fieldPath
- type: object
- x-kubernetes-map-type: atomic
- mode:
- format: int32
- type: integer
- path:
- type: string
- resourceFieldRef:
- properties:
- containerName:
- type: string
- divisor:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- resource:
- type: string
- required:
- - resource
- type: object
- x-kubernetes-map-type: atomic
- required:
- - path
- type: object
- type: array
- type: object
- emptyDir:
- properties:
- medium:
- type: string
- sizeLimit:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- type: object
- ephemeral:
- properties:
- volumeClaimTemplate:
- properties:
- metadata:
- type: object
- spec:
- properties:
- accessModes:
- items:
- type: string
- type: array
- dataSource:
- properties:
- apiGroup:
- type: string
- kind:
- type: string
- name:
- type: string
- required:
- - kind
- - name
- type: object
- x-kubernetes-map-type: atomic
- dataSourceRef:
- properties:
- apiGroup:
- type: string
- kind:
- type: string
- name:
- type: string
- namespace:
- type: string
- required:
- - kind
- - name
- type: object
- resources:
- properties:
- limits:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- type: object
- requests:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- type: object
- type: object
- selector:
- properties:
- matchExpressions:
- items:
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- type: object
- type: object
- x-kubernetes-map-type: atomic
- storageClassName:
- type: string
- volumeAttributesClassName:
- type: string
- volumeMode:
- type: string
- volumeName:
- type: string
- type: object
- required:
- - spec
- type: object
- type: object
- fc:
- properties:
- fsType:
- type: string
- lun:
- format: int32
- type: integer
- readOnly:
- type: boolean
- targetWWNs:
- items:
- type: string
- type: array
- wwids:
- items:
- type: string
- type: array
- type: object
- flexVolume:
- properties:
- driver:
- type: string
- fsType:
- type: string
- options:
- additionalProperties:
- type: string
- type: object
- readOnly:
- type: boolean
- secretRef:
- properties:
- name:
- type: string
- type: object
- x-kubernetes-map-type: atomic
- required:
- - driver
- type: object
- flocker:
- properties:
- datasetName:
- type: string
- datasetUUID:
- type: string
- type: object
- gcePersistentDisk:
- properties:
- fsType:
- type: string
- partition:
- format: int32
- type: integer
- pdName:
- type: string
- readOnly:
- type: boolean
- required:
- - pdName
- type: object
- gitRepo:
- properties:
- directory:
- type: string
- repository:
- type: string
- revision:
- type: string
- required:
- - repository
- type: object
- glusterfs:
- properties:
- endpoints:
- type: string
- path:
- type: string
- readOnly:
- type: boolean
- required:
- - endpoints
- - path
- type: object
- hostPath:
- properties:
- path:
- type: string
- type:
- type: string
- required:
- - path
- type: object
- iscsi:
- properties:
- chapAuthDiscovery:
- type: boolean
- chapAuthSession:
- type: boolean
- fsType:
- type: string
- initiatorName:
- type: string
- iqn:
- type: string
- iscsiInterface:
- type: string
- lun:
- format: int32
- type: integer
- portals:
- items:
- type: string
- type: array
- readOnly:
- type: boolean
- secretRef:
- properties:
- name:
- type: string
- type: object
- x-kubernetes-map-type: atomic
- targetPortal:
- type: string
- required:
- - iqn
- - lun
- - targetPortal
- type: object
- nfs:
- properties:
- path:
- type: string
- readOnly:
- type: boolean
- server:
- type: string
- required:
- - path
- - server
- type: object
- persistentVolumeClaim:
- properties:
- claimName:
- type: string
- readOnly:
- type: boolean
- required:
- - claimName
- type: object
- photonPersistentDisk:
- properties:
- fsType:
- type: string
- pdID:
- type: string
- required:
- - pdID
- type: object
- portworxVolume:
- properties:
- fsType:
- type: string
- readOnly:
- type: boolean
- volumeID:
- type: string
- required:
- - volumeID
- type: object
- projected:
- properties:
- defaultMode:
- format: int32
- type: integer
- sources:
- items:
- properties:
- clusterTrustBundle:
- properties:
- labelSelector:
- properties:
- matchExpressions:
- items:
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchLabels:
- additionalProperties:
- type: string
- type: object
- type: object
- x-kubernetes-map-type: atomic
- name:
- type: string
- optional:
- type: boolean
- path:
- type: string
- signerName:
- type: string
- required:
- - path
- type: object
- configMap:
- properties:
- items:
- items:
- properties:
- key:
- type: string
- mode:
- format: int32
- type: integer
- path:
- type: string
- required:
- - key
- - path
- type: object
- type: array
- name:
- type: string
- optional:
- type: boolean
- type: object
- x-kubernetes-map-type: atomic
- downwardAPI:
- properties:
- items:
- items:
- properties:
- fieldRef:
- properties:
- apiVersion:
- type: string
- fieldPath:
- type: string
- required:
- - fieldPath
- type: object
- x-kubernetes-map-type: atomic
- mode:
- format: int32
- type: integer
- path:
- type: string
- resourceFieldRef:
- properties:
- containerName:
- type: string
- divisor:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- resource:
- type: string
- required:
- - resource
- type: object
- x-kubernetes-map-type: atomic
- required:
- - path
- type: object
- type: array
- type: object
- secret:
- properties:
- items:
- items:
- properties:
- key:
- type: string
- mode:
- format: int32
- type: integer
- path:
- type: string
- required:
- - key
- - path
- type: object
- type: array
- name:
- type: string
- optional:
- type: boolean
- type: object
- x-kubernetes-map-type: atomic
- serviceAccountToken:
- properties:
- audience:
- type: string
- expirationSeconds:
- format: int64
- type: integer
- path:
- type: string
- required:
- - path
- type: object
- type: object
- type: array
- type: object
- quobyte:
- properties:
- group:
- type: string
- readOnly:
- type: boolean
- registry:
- type: string
- tenant:
- type: string
- user:
- type: string
- volume:
- type: string
- required:
- - registry
- - volume
- type: object
- rbd:
- properties:
- fsType:
- type: string
- image:
- type: string
- keyring:
- type: string
- monitors:
- items:
- type: string
- type: array
- pool:
- type: string
- readOnly:
- type: boolean
- secretRef:
- properties:
- name:
- type: string
- type: object
- x-kubernetes-map-type: atomic
- user:
- type: string
- required:
- - image
- - monitors
- type: object
- scaleIO:
- properties:
- fsType:
- type: string
- gateway:
- type: string
- protectionDomain:
- type: string
- readOnly:
- type: boolean
- secretRef:
- properties:
- name:
- type: string
- type: object
- x-kubernetes-map-type: atomic
- sslEnabled:
- type: boolean
- storageMode:
- type: string
- storagePool:
- type: string
- system:
- type: string
- volumeName:
- type: string
- required:
- - gateway
- - secretRef
- - system
- type: object
- secret:
- properties:
- defaultMode:
- format: int32
- type: integer
- items:
- items:
- properties:
- key:
- type: string
- mode:
- format: int32
- type: integer
- path:
- type: string
- required:
- - key
- - path
- type: object
- type: array
- optional:
- type: boolean
- secretName:
- type: string
- type: object
- storageos:
- properties:
- fsType:
- type: string
- readOnly:
- type: boolean
- secretRef:
- properties:
- name:
- type: string
- type: object
- x-kubernetes-map-type: atomic
- volumeName:
- type: string
- volumeNamespace:
- type: string
- type: object
- vsphereVolume:
- properties:
- fsType:
- type: string
- storagePolicyID:
- type: string
- storagePolicyName:
- type: string
- volumePath:
- type: string
- required:
- - volumePath
- type: object
- type: object
- volumeType:
- default: hostPath
- enum:
- - hostPath
- - emptyDir
- type: string
- required:
- - mediumtype
- type: object
- type: array
- type: object
- worker:
- properties:
- annotations:
- additionalProperties:
- type: string
- type: object
- enabled:
- type: boolean
- env:
- additionalProperties:
- type: string
- type: object
- jvmOptions:
- items:
- type: string
- type: array
- nodeSelector:
- additionalProperties:
- type: string
- type: object
- ports:
- additionalProperties:
- type: integer
- type: object
- properties:
- additionalProperties:
- type: string
- type: object
- replicas:
- format: int32
- minimum: 1
- type: integer
- resources:
- properties:
- claims:
- items:
- properties:
- name:
- type: string
- required:
- - name
- type: object
- type: array
- x-kubernetes-list-map-keys:
- - name
- x-kubernetes-list-type: map
- limits:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- type: object
- requests:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- type: object
- type: object
- type: object
- type: object
- status:
- properties:
- apiGateway:
- properties:
- endpoint:
- type: string
- type: object
- cacheAffinity:
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- items:
- properties:
- preference:
- properties:
- matchExpressions:
- items:
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchFields:
- items:
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- type: object
- x-kubernetes-map-type: atomic
- weight:
- format: int32
- type: integer
- required:
- - preference
- - weight
- type: object
- type: array
- requiredDuringSchedulingIgnoredDuringExecution:
- properties:
- nodeSelectorTerms:
- items:
- properties:
- matchExpressions:
- items:
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- matchFields:
- items:
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- items:
- type: string
- type: array
- required:
- - key
- - operator
- type: object
- type: array
- type: object
- x-kubernetes-map-type: atomic
- type: array
- required:
- - nodeSelectorTerms
- type: object
- x-kubernetes-map-type: atomic
- type: object
- cacheStates:
- additionalProperties:
- type: string
- type: object
- conditions:
- items:
- properties:
- lastProbeTime:
- format: date-time
- type: string
- lastTransitionTime:
- format: date-time
- type: string
- message:
- type: string
- reason:
- type: string
- status:
- type: string
- type:
- type: string
- required:
- - status
- - type
- type: object
- type: array
- currentFuseNumberScheduled:
- format: int32
- type: integer
- currentMasterNumberScheduled:
- format: int32
- type: integer
- currentWorkerNumberScheduled:
- format: int32
- type: integer
- desiredFuseNumberScheduled:
- format: int32
- type: integer
- desiredMasterNumberScheduled:
- format: int32
- type: integer
- desiredWorkerNumberScheduled:
- format: int32
- type: integer
- fuseNumberAvailable:
- format: int32
- type: integer
- fuseNumberReady:
- format: int32
- type: integer
- fuseNumberUnavailable:
- format: int32
- type: integer
- fusePhase:
- type: string
- fuseReason:
- type: string
- masterNumberReady:
- format: int32
- type: integer
- masterPhase:
- type: string
- masterReason:
- type: string
- mountTime:
- format: date-time
- type: string
- mounts:
- items:
- properties:
- encryptOptions:
- items:
- properties:
- name:
- type: string
- valueFrom:
- properties:
- secretKeyRef:
- properties:
- key:
- type: string
- name:
- type: string
- required:
- - name
- type: object
- type: object
- required:
- - name
- type: object
- type: array
- mountPoint:
- minLength: 5
- type: string
- name:
- minLength: 0
- type: string
- options:
- additionalProperties:
- type: string
- type: object
- path:
- type: string
- readOnly:
- type: boolean
- shared:
- type: boolean
- required:
- - mountPoint
- type: object
- type: array
- selector:
- type: string
- setupDuration:
- type: string
- valueFile:
- type: string
- workerNumberAvailable:
- format: int32
- type: integer
- workerNumberReady:
- format: int32
- type: integer
- workerNumberUnavailable:
- format: int32
- type: integer
- workerPhase:
- type: string
- workerReason:
- type: string
- required:
- - currentFuseNumberScheduled
- - currentMasterNumberScheduled
- - currentWorkerNumberScheduled
- - desiredFuseNumberScheduled
- - desiredMasterNumberScheduled
- - desiredWorkerNumberScheduled
- - fuseNumberReady
- - fusePhase
- - masterNumberReady
- - masterPhase
- - valueFile
- - workerNumberReady
- - workerPhase
- type: object
- type: object
- served: true
- storage: true
- subresources:
- scale:
- labelSelectorPath: .status.selector
- specReplicasPath: .spec.replicas
- statusReplicasPath: .status.currentWorkerNumberScheduled
- status: {}
diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml
index bcc99314160..fc3f620e6fe 100644
--- a/config/crd/kustomization.yaml
+++ b/config/crd/kustomization.yaml
@@ -7,7 +7,6 @@ resources:
- bases/data.fluid.io_databackups.yaml
- bases/data.fluid.io_dataloads.yaml
- bases/data.fluid.io_jindoruntimes.yaml
-- bases/data.fluid.io_goosefsruntimes.yaml
- bases/data.fluid.io_juicefsruntimes.yaml
- bases/data.fluid.io_thinruntimes.yaml
- bases/data.fluid.io_thinruntimeprofiles.yaml
diff --git a/config/crd/patches/cainjection_in_goosefsruntimes.yaml b/config/crd/patches/cainjection_in_goosefsruntimes.yaml
deleted file mode 100644
index 88975356b97..00000000000
--- a/config/crd/patches/cainjection_in_goosefsruntimes.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
-# The following patch adds a directive for certmanager to inject CA into the CRD
-# CRD conversion requires k8s 1.13 or later.
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- annotations:
- cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
- name: goosefsruntime.data.fluid.io
diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml
index 71d7693e653..6c18f8cec18 100644
--- a/config/rbac/role.yaml
+++ b/config/rbac/role.yaml
@@ -90,26 +90,6 @@ rules:
- get
- patch
- update
-- apiGroups:
- - data.fluid.io
- resources:
- - goosefsruntimes
- verbs:
- - create
- - delete
- - get
- - list
- - patch
- - update
- - watch
-- apiGroups:
- - data.fluid.io
- resources:
- - goosefsruntimes/status
- verbs:
- - get
- - patch
- - update
- apiGroups:
- data.fluid.io
resources:
diff --git a/docker/Dockerfile.goosefsruntime b/docker/Dockerfile.goosefsruntime
deleted file mode 100644
index 597120cceb9..00000000000
--- a/docker/Dockerfile.goosefsruntime
+++ /dev/null
@@ -1,38 +0,0 @@
-# Build the goosefsruntime-controller manager binary
-# golang:1.24.12-bookworm
-FROM golang:1.24.12-bookworm@sha256:1c64c586e1cf9dc4c394c5896ec574659c792a0840f4fa0eb54a88de146e978b as builder
-
-WORKDIR /go/src/github.com/fluid-cloudnative/fluid
-COPY . .
-
-ARG FLUID_VERSION
-RUN make goosefsruntime-controller-build && \
- cp bin/goosefsruntime-controller /go/bin/goosefsruntime-controller
-RUN bash hack/helm/pin_runtime_chart_version.sh "${FLUID_VERSION}"
-
-# Debug
-RUN go install github.com/go-delve/delve/cmd/dlv@v1.8.2
-
-# alpine:3.23.3
-FROM alpine:3.23.3@sha256:25109184c71bdad752c8312a8623239686a9a2071e8825f20acb8f2198c3f659
-RUN apk add --update bash curl wget iproute2 libc6-compat tzdata vim && \
- rm -rf /var/cache/apk/* && \
- cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \
- echo "Asia/Shanghai" > /etc/timezone
-
-ARG TARGETARCH
-ARG HELM_VERSION
-RUN wget -O helm-${HELM_VERSION}-linux-${TARGETARCH}.tar.gz https://github.com/fluid-cloudnative/helm/releases/download/${HELM_VERSION}/helm-${HELM_VERSION}-linux-${TARGETARCH}.tar.gz && \
- tar -xvf helm-${HELM_VERSION}-linux-${TARGETARCH}.tar.gz && \
- mv linux-${TARGETARCH}/helm /usr/local/bin/ddc-helm && \
- chmod u+x /usr/local/bin/ddc-helm && \
- rm -f ${HELM_VERSION}-linux-${TARGETARCH}.tar.gz
-
-COPY --from=builder /go/src/github.com/fluid-cloudnative/fluid/charts/ /charts
-
-COPY --from=builder /go/bin/goosefsruntime-controller /usr/local/bin/goosefsruntime-controller
-COPY --from=builder /go/bin/dlv /usr/local/bin/dlv
-RUN chmod -R u+x /usr/local/bin/
-
-CMD ["goosefsruntime-controller", "start"]
-
diff --git a/integration/prometheus/fluid-prometheus-grafana-monitor-goosefs.json b/integration/prometheus/fluid-prometheus-grafana-monitor-goosefs.json
deleted file mode 100644
index 4bcf4188d8b..00000000000
--- a/integration/prometheus/fluid-prometheus-grafana-monitor-goosefs.json
+++ /dev/null
@@ -1,2691 +0,0 @@
-{
- "annotations": {
- "list": [
- {
- "builtIn": 1,
- "datasource": "-- Grafana --",
- "enable": true,
- "hide": true,
- "iconColor": "rgba(0, 211, 255, 1)",
- "name": "Annotations & Alerts",
- "type": "dashboard"
- }
- ]
- },
- "editable": true,
- "gnetId": 13467,
- "graphTooltip": 0,
- "id": 4,
- "iteration": 1618217355560,
- "links": [],
- "panels": [
- {
- "collapsed": false,
- "datasource": "Prometheus",
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 0
- },
- "id": 2,
- "panels": [],
- "repeat": null,
- "title": "GooseFS IO Key Metrics",
- "type": "row"
- },
- {
- "aliasColors": {},
- "bars": true,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$goosefs_datasource",
- "fieldConfig": {
- "defaults": {
- "custom": {},
- "links": []
- },
- "overrides": []
- },
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 9,
- "w": 6,
- "x": 0,
- "y": 1
- },
- "hiddenSeries": false,
- "id": 8,
- "legend": {
- "avg": true,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "options": {
- "alertThreshold": true
- },
- "percentage": false,
- "pluginVersion": "7.3.7",
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "repeat": null,
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "Cluster_BytesReadLocal{fluid_runtime=~\"$fluid_runtime\",namespace=~\"$namespace\"}",
- "instant": false,
- "interval": "",
- "legendFormat": "Bytes Read Local",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Bytes Read Local (Total)",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": true,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$goosefs_datasource",
- "fieldConfig": {
- "defaults": {
- "custom": {},
- "links": []
- },
- "overrides": []
- },
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 9,
- "w": 6,
- "x": 6,
- "y": 1
- },
- "hiddenSeries": false,
- "id": 106,
- "legend": {
- "avg": true,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "options": {
- "alertThreshold": true
- },
- "percentage": false,
- "pluginVersion": "7.3.7",
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "Cluster_BytesReadAlluxio{fluid_runtime=~\"$fluid_runtime\",namespace=~\"$namespace\"}",
- "instant": false,
- "interval": "",
- "legendFormat": "Bytes Read Local",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Bytes Read Remote (Total)",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$goosefs_datasource",
- "fieldConfig": {
- "defaults": {
- "custom": {},
- "links": []
- },
- "overrides": []
- },
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 9,
- "w": 6,
- "x": 12,
- "y": 1
- },
- "hiddenSeries": false,
- "id": 6,
- "legend": {
- "avg": false,
- "current": true,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "alertThreshold": true
- },
- "percentage": false,
- "pluginVersion": "7.3.7",
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "Cluster_BytesReadUfsAll{fluid_runtime=~\"$fluid_runtime\",namespace=~\"$namespace\"}",
- "interval": "",
- "legendFormat": "Bytes Read UFS",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Bytes Read UFS(Total)",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "datasource": "$goosefs_datasource",
- "fieldConfig": {
- "defaults": {
- "custom": {
- "align": null,
- "filterable": false
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- }
- },
- "overrides": []
- },
- "gridPos": {
- "h": 12,
- "w": 5,
- "x": 18,
- "y": 1
- },
- "id": 105,
- "options": {
- "colorMode": "value",
- "graphMode": "area",
- "justifyMode": "auto",
- "orientation": "auto",
- "reduceOptions": {
- "calcs": [
- "lastNotNull"
- ],
- "fields": "",
- "values": false
- },
- "textMode": "auto"
- },
- "pluginVersion": "7.3.7",
- "targets": [
- {
- "expr": "sum(Cluster_Workers{fluid_runtime=~\"$fluid_runtime\",namespace=~\"$namespace\"})",
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "",
- "refId": "B"
- }
- ],
- "timeFrom": null,
- "timeShift": null,
- "title": "Live Workers",
- "type": "stat"
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$goosefs_datasource",
- "fieldConfig": {
- "defaults": {
- "custom": {},
- "links": []
- },
- "overrides": []
- },
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 9,
- "w": 6,
- "x": 0,
- "y": 10
- },
- "hiddenSeries": false,
- "id": 4,
- "legend": {
- "avg": true,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "alertThreshold": true
- },
- "percentage": false,
- "pluginVersion": "7.3.7",
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "Cluster_BytesReadAlluxioThroughput{fluid_runtime=~\"$fluid_runtime\",namespace=~\"$namespace\"}",
- "interval": "",
- "legendFormat": "Bytes Read Remote (Per Minute)",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Bytes Read Remote (Throughput)",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$goosefs_datasource",
- "fieldConfig": {
- "defaults": {
- "custom": {},
- "links": []
- },
- "overrides": []
- },
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 9,
- "w": 6,
- "x": 6,
- "y": 10
- },
- "hiddenSeries": false,
- "id": 107,
- "legend": {
- "avg": true,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "alertThreshold": true
- },
- "percentage": false,
- "pluginVersion": "7.3.7",
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "Cluster_BytesReadLocalThroughput{fluid_runtime=~\"$fluid_runtime\",namespace=~\"$namespace\"}",
- "interval": "",
- "legendFormat": "Bytes Read Local (Per Minute)",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Bytes Read Local (Throughput)",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$goosefs_datasource",
- "fieldConfig": {
- "defaults": {
- "custom": {},
- "links": []
- },
- "overrides": []
- },
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 9,
- "w": 6,
- "x": 12,
- "y": 10
- },
- "hiddenSeries": false,
- "id": 108,
- "legend": {
- "avg": false,
- "current": true,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "alertThreshold": true
- },
- "percentage": false,
- "pluginVersion": "7.3.7",
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "Cluster_BytesReadUfsThroughput{fluid_runtime=~\"$fluid_runtime\",namespace=~\"$namespace\"}",
- "interval": "",
- "legendFormat": "Bytes Read Ufs (Per Minute)",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Bytes Read UFS (Throughput)",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "collapsed": false,
- "datasource": "Prometheus",
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 19
- },
- "id": 101,
- "panels": [],
- "title": "Storage",
- "type": "row"
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$goosefs_datasource",
- "description": "Displays the percentage of free GooseFS storage with thresholds from 100-50%, 50-10%, and 10-0%",
- "fieldConfig": {
- "defaults": {
- "custom": {},
- "links": [],
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- }
- },
- "overrides": []
- },
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 7,
- "w": 12,
- "x": 0,
- "y": 20
- },
- "hiddenSeries": false,
- "id": 102,
- "interval": "",
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "options": {
- "alertThreshold": true
- },
- "percentage": false,
- "pluginVersion": "7.3.7",
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "(Cluster_CapacityUsed{fluid_runtime=~\"$fluid_runtime\",namespace=~\"$namespace\"}) / Cluster_CapacityTotal{fluid_runtime=~\"$fluid_runtime\",namespace=~\"$namespace\"}",
- "instant": false,
- "interval": "",
- "legendFormat": "{{fluid_runtime}} Used Percentage",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "% GooseFS Space Used",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "percentunit",
- "label": null,
- "logBase": 1,
- "max": "1",
- "min": "0",
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {
- "Alter level": "dark-red",
- "Free GooseFS Space": "semi-dark-blue",
- "Value": "dark-red",
- "scale(divideSeries(Master.CapacityFree,Master.CapacityTotal),100)": "semi-dark-blue"
- },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$goosefs_datasource",
- "description": "Displays the percentage of free GooseFS storage with thresholds from 100-50%, 50-10%, and 10-0%",
- "fieldConfig": {
- "defaults": {
- "custom": {},
- "links": [],
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- }
- },
- "overrides": []
- },
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 7,
- "w": 12,
- "x": 12,
- "y": 20
- },
- "hiddenSeries": false,
- "id": 42,
- "interval": "",
- "legend": {
- "avg": false,
- "current": true,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "options": {
- "alertThreshold": true
- },
- "percentage": false,
- "pluginVersion": "7.3.7",
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "Cluster_CapacityTotal{fluid_runtime=~\"$fluid_runtime\",namespace=~\"$namespace\"}",
- "instant": false,
- "interval": "",
- "legendFormat": "{{fluid_runtime}} Total Capacity",
- "refId": "A"
- },
- {
- "expr": "Cluster_CapacityUsed{fluid_runtime=~\"$fluid_runtime\",namespace=~\"$namespace\"}",
- "instant": false,
- "interval": "",
- "legendFormat": "{{fluid_runtime}} Used Capacity",
- "refId": "B"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "GooseFS Space Used",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "decimals": null,
- "format": "decbytes",
- "label": "",
- "logBase": 1,
- "max": null,
- "min": "0",
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "collapsed": false,
- "datasource": "Prometheus",
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 27
- },
- "id": 58,
- "panels": [],
- "title": "Logical Operations",
- "type": "row"
- },
- {
- "cacheTimeout": null,
- "datasource": "$goosefs_datasource",
- "description": "Displays the current number of how many directories GooseFS has created since starting",
- "fieldConfig": {
- "defaults": {
- "custom": {
- "align": null,
- "filterable": false
- },
- "mappings": [
- {
- "id": 0,
- "op": "=",
- "text": "N/A",
- "type": 1,
- "value": "null"
- }
- ],
- "nullValueMode": "connected",
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "none"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 13,
- "w": 4,
- "x": 0,
- "y": 28
- },
- "id": 60,
- "interval": null,
- "links": [],
- "maxDataPoints": 100,
- "options": {
- "colorMode": "value",
- "graphMode": "area",
- "justifyMode": "auto",
- "orientation": "auto",
- "reduceOptions": {
- "calcs": [
- "mean"
- ],
- "fields": "",
- "values": false
- },
- "textMode": "auto"
- },
- "pluginVersion": "7.3.7",
- "targets": [
- {
- "expr": "Master_DirectoriesCreated{fluid_runtime=~\"$fluid_runtime\",namespace=~\"$namespace\"}",
- "interval": "",
- "legendFormat": "{{fluid_runtime}} Directories Created",
- "refId": "A"
- }
- ],
- "timeFrom": null,
- "timeShift": null,
- "title": "Directories Created",
- "type": "stat"
- },
- {
- "cacheTimeout": null,
- "datasource": "$goosefs_datasource",
- "description": "displays the number of times the FileInfosGot operation was called",
- "fieldConfig": {
- "defaults": {
- "custom": {},
- "mappings": [
- {
- "id": 0,
- "op": "=",
- "text": "N/A",
- "type": 1,
- "value": "null"
- }
- ],
- "nullValueMode": "connected",
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "none"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 6,
- "w": 4,
- "x": 4,
- "y": 28
- },
- "id": 62,
- "interval": null,
- "links": [],
- "maxDataPoints": 100,
- "options": {
- "colorMode": "value",
- "graphMode": "area",
- "justifyMode": "auto",
- "orientation": "auto",
- "reduceOptions": {
- "calcs": [
- "mean"
- ],
- "fields": "",
- "values": false
- },
- "textMode": "auto"
- },
- "pluginVersion": "7.3.7",
- "targets": [
- {
- "expr": "Master_FileInfosGot{fluid_runtime=~\"$fluid_runtime\",namespace=~\"$namespace\"}",
- "interval": "",
- "legendFormat": "{{fluid_runtime}} FileInfo Got",
- "refId": "A"
- }
- ],
- "timeFrom": null,
- "timeShift": null,
- "title": "File Infos Got",
- "type": "stat"
- },
- {
- "cacheTimeout": null,
- "datasource": "$goosefs_datasource",
- "description": "Displays the total paths located in the GooseFS file system",
- "fieldConfig": {
- "defaults": {
- "custom": {},
- "mappings": [
- {
- "id": 0,
- "op": "=",
- "text": "N/A",
- "type": 1,
- "value": "null"
- }
- ],
- "nullValueMode": "connected",
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "none"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 6,
- "w": 4,
- "x": 8,
- "y": 28
- },
- "id": 64,
- "interval": null,
- "links": [],
- "maxDataPoints": 100,
- "options": {
- "colorMode": "value",
- "graphMode": "area",
- "justifyMode": "auto",
- "orientation": "auto",
- "reduceOptions": {
- "calcs": [
- "mean"
- ],
- "fields": "",
- "values": false
- },
- "textMode": "auto"
- },
- "pluginVersion": "7.3.7",
- "targets": [
- {
- "expr": "Master_TotalPaths{fluid_runtime=~\"$fluid_runtime\",namespace=~\"$namespace\"}",
- "interval": "",
- "legendFormat": "{{fluid_runtime}} Total Paths",
- "refId": "A"
- }
- ],
- "timeFrom": null,
- "timeShift": null,
- "title": "Total Paths",
- "type": "stat"
- },
- {
- "cacheTimeout": null,
- "datasource": "$goosefs_datasource",
- "description": "Displays how many times the CompleteFile operation has been called",
- "fieldConfig": {
- "defaults": {
- "custom": {},
- "mappings": [
- {
- "id": 0,
- "op": "=",
- "text": "N/A",
- "type": 1,
- "value": "null"
- }
- ],
- "nullValueMode": "connected",
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "none"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 6,
- "w": 4,
- "x": 12,
- "y": 28
- },
- "id": 66,
- "interval": null,
- "links": [],
- "maxDataPoints": 100,
- "options": {
- "colorMode": "value",
- "graphMode": "area",
- "justifyMode": "auto",
- "orientation": "auto",
- "reduceOptions": {
- "calcs": [
- "mean"
- ],
- "fields": "",
- "values": false
- },
- "textMode": "auto"
- },
- "pluginVersion": "7.3.7",
- "targets": [
- {
- "expr": "Master_CompleteFileOps{fluid_runtime=~\"$fluid_runtime\",namespace=~\"$namespace\"}",
- "interval": "",
- "legendFormat": "{{fluid_runtime}} Complete FIle Ops",
- "refId": "A"
- }
- ],
- "timeFrom": null,
- "timeShift": null,
- "title": "Complete File Operations",
- "type": "stat"
- },
- {
- "cacheTimeout": null,
- "datasource": "$goosefs_datasource",
- "description": "Displays the amount of directories that GooseFS has created (Not the current amount of directories)",
- "fieldConfig": {
- "defaults": {
- "custom": {},
- "mappings": [
- {
- "id": 0,
- "op": "=",
- "text": "N/A",
- "type": 1,
- "value": "null"
- }
- ],
- "nullValueMode": "connected",
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "none"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 6,
- "w": 4,
- "x": 16,
- "y": 28
- },
- "id": 68,
- "interval": null,
- "links": [],
- "maxDataPoints": 100,
- "options": {
- "colorMode": "value",
- "graphMode": "area",
- "justifyMode": "auto",
- "orientation": "auto",
- "reduceOptions": {
- "calcs": [
- "mean"
- ],
- "fields": "",
- "values": false
- },
- "textMode": "auto"
- },
- "pluginVersion": "7.3.7",
- "targets": [
- {
- "expr": "Master_CreateDirectoryOps{fluid_runtime=~\"$fluid_runtime\",namespace=~\"$namespace\"}",
- "interval": "",
- "legendFormat": "{{fluid_runtime}} Directory Ops",
- "refId": "A"
- }
- ],
- "timeFrom": null,
- "timeShift": null,
- "title": "Create Directory Oprtations",
- "type": "stat"
- },
- {
- "cacheTimeout": null,
- "datasource": "$goosefs_datasource",
- "description": "Displays how many times the deletePath operation has been called (And how many total paths have been deleted)",
- "fieldConfig": {
- "defaults": {
- "custom": {},
- "mappings": [
- {
- "id": 0,
- "op": "=",
- "text": "N/A",
- "type": 1,
- "value": "null"
- }
- ],
- "nullValueMode": "connected",
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "none"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 13,
- "w": 4,
- "x": 20,
- "y": 28
- },
- "id": 78,
- "interval": null,
- "links": [],
- "maxDataPoints": 100,
- "options": {
- "colorMode": "value",
- "graphMode": "area",
- "justifyMode": "auto",
- "orientation": "auto",
- "reduceOptions": {
- "calcs": [
- "mean"
- ],
- "fields": "",
- "values": false
- },
- "textMode": "auto"
- },
- "pluginVersion": "7.3.7",
- "targets": [
- {
- "expr": "Master_DeletePathOps{fluid_runtime=~\"$fluid_runtime\",namespace=~\"$namespace\"}",
- "interval": "",
- "legendFormat": "{{fluid_runtime}} DeletePath Ops",
- "refId": "A"
- }
- ],
- "timeFrom": null,
- "timeShift": null,
- "title": "Delete Path Operations",
- "type": "stat"
- },
- {
- "cacheTimeout": null,
- "datasource": "$goosefs_datasource",
- "description": "Displays the current number of pinned files in GooseFS",
- "fieldConfig": {
- "defaults": {
- "custom": {},
- "mappings": [
- {
- "id": 0,
- "op": "=",
- "text": "N/A",
- "type": 1,
- "value": "null"
- }
- ],
- "nullValueMode": "connected",
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "none"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 7,
- "w": 4,
- "x": 4,
- "y": 34
- },
- "id": 70,
- "interval": null,
- "links": [],
- "maxDataPoints": 100,
- "options": {
- "colorMode": "value",
- "graphMode": "area",
- "justifyMode": "auto",
- "orientation": "auto",
- "reduceOptions": {
- "calcs": [
- "mean"
- ],
- "fields": "",
- "values": false
- },
- "textMode": "auto"
- },
- "pluginVersion": "7.3.7",
- "targets": [
- {
- "expr": "Master_FilesPinned{fluid_runtime=~\"$fluid_runtime\",namespace=~\"$namespace\"}",
- "interval": "",
- "legendFormat": "{{fluid_runtime}} Files Pinned",
- "refId": "A"
- }
- ],
- "timeFrom": null,
- "timeShift": null,
- "title": "Files Pinned",
- "type": "stat"
- },
- {
- "cacheTimeout": null,
- "datasource": "$goosefs_datasource",
- "description": "Displays how many times the mount operation has been called (including failed invocations)",
- "fieldConfig": {
- "defaults": {
- "custom": {},
- "mappings": [
- {
- "id": 0,
- "op": "=",
- "text": "N/A",
- "type": 1,
- "value": "null"
- }
- ],
- "nullValueMode": "connected",
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "none"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 7,
- "w": 4,
- "x": 8,
- "y": 34
- },
- "id": 72,
- "interval": null,
- "links": [],
- "maxDataPoints": 100,
- "options": {
- "colorMode": "value",
- "graphMode": "area",
- "justifyMode": "auto",
- "orientation": "auto",
- "reduceOptions": {
- "calcs": [
- "mean"
- ],
- "fields": "",
- "values": false
- },
- "textMode": "auto"
- },
- "pluginVersion": "7.3.7",
- "targets": [
- {
- "expr": "Master_MountOps{fluid_runtime=~\"$fluid_runtime\",namespace=~\"$namespace\"}",
- "interval": "",
- "legendFormat": "{{fluid_runtime}} Mount",
- "refId": "A"
- }
- ],
- "timeFrom": null,
- "timeShift": null,
- "title": "Mount Operations",
- "type": "stat"
- },
- {
- "cacheTimeout": null,
- "datasource": "$goosefs_datasource",
- "description": "Displays the current number of freed files in GooseFS",
- "fieldConfig": {
- "defaults": {
- "custom": {},
- "mappings": [
- {
- "id": 0,
- "op": "=",
- "text": "N/A",
- "type": 1,
- "value": "null"
- }
- ],
- "nullValueMode": "connected",
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "none"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 7,
- "w": 4,
- "x": 12,
- "y": 34
- },
- "id": 74,
- "interval": null,
- "links": [],
- "maxDataPoints": 100,
- "options": {
- "colorMode": "value",
- "graphMode": "area",
- "justifyMode": "auto",
- "orientation": "auto",
- "reduceOptions": {
- "calcs": [
- "mean"
- ],
- "fields": "",
- "values": false
- },
- "textMode": "auto"
- },
- "pluginVersion": "7.3.7",
- "targets": [
- {
- "expr": "Master_FilesFreed{fluid_runtime=~\"$fluid_runtime\",namespace=~\"$namespace\"}",
- "interval": "",
- "legendFormat": "{{fluid_runtime}} Files Freed",
- "refId": "A"
- }
- ],
- "timeFrom": null,
- "timeShift": null,
- "title": "Files Freed",
- "type": "stat"
- },
- {
- "cacheTimeout": null,
- "datasource": "$goosefs_datasource",
- "description": "Displays how many times the unmount operation has been called",
- "fieldConfig": {
- "defaults": {
- "custom": {},
- "mappings": [
- {
- "id": 0,
- "op": "=",
- "text": "N/A",
- "type": 1,
- "value": "null"
- }
- ],
- "nullValueMode": "connected",
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "none"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 7,
- "w": 4,
- "x": 16,
- "y": 34
- },
- "id": 76,
- "interval": null,
- "links": [],
- "maxDataPoints": 100,
- "options": {
- "colorMode": "value",
- "graphMode": "area",
- "justifyMode": "auto",
- "orientation": "auto",
- "reduceOptions": {
- "calcs": [
- "mean"
- ],
- "fields": "",
- "values": false
- },
- "textMode": "auto"
- },
- "pluginVersion": "7.3.7",
- "targets": [
- {
- "expr": "Master_UnmountOps{fluid_runtime=~\"$fluid_runtime\",namespace=~\"$namespace\"}",
- "interval": "",
- "legendFormat": "{{fluid_runtime}} Unmount Ops",
- "refId": "A"
- }
- ],
- "timeFrom": null,
- "timeShift": null,
- "title": "Unmount Operations",
- "type": "stat"
- },
- {
- "collapsed": false,
- "datasource": "Prometheus",
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 41
- },
- "id": 46,
- "panels": [],
- "title": "GooseFS Metadata Operations",
- "type": "row"
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$goosefs_datasource",
- "description": "",
- "fieldConfig": {
- "defaults": {
- "custom": {},
- "links": [],
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- }
- },
- "overrides": []
- },
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 7,
- "w": 8,
- "x": 0,
- "y": 42
- },
- "hiddenSeries": false,
- "id": 48,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "options": {
- "alertThreshold": true
- },
- "percentage": false,
- "pluginVersion": "7.3.7",
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "{__name__=\"Master_GetStatus_User:$goosefs_user\",fluid_runtime=~\"$fluid_runtime\", namespace=~\"$namespace\",quantile=\"0.5\"}",
- "interval": "",
- "legendFormat": "{{fluid_runtime}} GetStatus Total quantile=0.5",
- "refId": "A"
- },
- {
- "expr": "{__name__=\"Master_GetStatus_User:$goosefs_user\",fluid_runtime=~\"$fluid_runtime\", namespace=~\"$namespace\",quantile=\"0.95\"}",
- "interval": "",
- "legendFormat": "{{fluid_runtime}} GetStatus Total quantile=0.95",
- "refId": "B"
- },
- {
- "expr": "{__name__=\"Master_GetStatus_User:$goosefs_user\",fluid_runtime=~\"$fluid_runtime\", namespace=~\"$namespace\",quantile=\"0.99\"}",
- "interval": "",
- "legendFormat": "{{fluid_runtime}} GetStatus Total quantile=0.99",
- "refId": "C"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "GetStatus Operations",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "ms",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$goosefs_datasource",
- "description": "",
- "fieldConfig": {
- "defaults": {
- "custom": {},
- "links": []
- },
- "overrides": []
- },
- "fill": 0,
- "fillGradient": 0,
- "gridPos": {
- "h": 7,
- "w": 8,
- "x": 8,
- "y": 42
- },
- "hiddenSeries": false,
- "id": 50,
- "legend": {
- "avg": false,
- "current": true,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "options": {
- "alertThreshold": true
- },
- "percentage": false,
- "pluginVersion": "7.3.7",
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "{__name__=\"Master_ListStatus_User:$goosefs_user\",fluid_runtime=~\"$fluid_runtime\", namespace=~\"$namespace\",quantile=\"0.5\"}",
- "interval": "",
- "legendFormat": "{{fluid_runtime}} ListStatus quantile=0.5",
- "refId": "A"
- },
- {
- "expr": "{__name__=\"Master_ListStatus_User:$goosefs_user\",fluid_runtime=~\"$fluid_runtime\", namespace=~\"$namespace\",quantile=\"0.95\"}",
- "interval": "",
- "legendFormat": "{{fluid_runtime}} ListStatus quantile=0.95",
- "refId": "B"
- },
- {
- "expr": "{__name__=\"Master_ListStatus_User:$goosefs_user\",fluid_runtime=~\"$fluid_runtime\", namespace=~\"$namespace\",quantile=\"0.99\"}",
- "interval": "",
- "legendFormat": "{{fluid_runtime}} ListStatus quantile=0.99",
- "refId": "C"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "ListStatus Operation",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "ms",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$goosefs_datasource",
- "description": "",
- "fieldConfig": {
- "defaults": {
- "custom": {},
- "links": []
- },
- "overrides": []
- },
- "fill": 0,
- "fillGradient": 0,
- "gridPos": {
- "h": 7,
- "w": 8,
- "x": 16,
- "y": 42
- },
- "hiddenSeries": false,
- "id": 52,
- "legend": {
- "avg": false,
- "current": true,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "options": {
- "alertThreshold": true
- },
- "percentage": false,
- "pluginVersion": "7.3.7",
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "{__name__=\"Master_getConfiguration\",fluid_runtime=~\"$fluid_runtime\", namespace=~\"$namespace\",quantile=\"0.5\"}",
- "interval": "",
- "legendFormat": "{{fluid_runtime}} GetConfiguration quantile=0.5",
- "refId": "A"
- },
- {
- "expr": "{__name__=\"Master_getConfiguration\",fluid_runtime=~\"$fluid_runtime\", namespace=~\"$namespace\",quantile=\"0.95\"}",
- "interval": "",
- "legendFormat": "{{fluid_runtime}} GetConfiguration quantile=0.95",
- "refId": "B"
- },
- {
- "expr": "{__name__=\"Master_getConfiguration\",fluid_runtime=~\"$fluid_runtime\", namespace=~\"$namespace\",quantile=\"0.99\"}",
- "interval": "",
- "legendFormat": "{{fluid_runtime}} GetConfiguration quantile=0.99",
- "refId": "C"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Get Configuration Operation",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "ms",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {
- "p50": "#7eb26d",
- "p95": "#eab839",
- "p99": "#6ed0e0"
- },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$goosefs_datasource",
- "description": "Displays the 50th, 95th, and 99th percentiles call time for the blockHeartbeat metadata operation.\n\nNote: This is not a graph for the actual Block Heartbeat just how long it took the operation to run",
- "fieldConfig": {
- "defaults": {
- "custom": {},
- "links": []
- },
- "overrides": []
- },
- "fill": 0,
- "fillGradient": 0,
- "gridPos": {
- "h": 7,
- "w": 12,
- "x": 0,
- "y": 49
- },
- "hiddenSeries": false,
- "id": 54,
- "legend": {
- "avg": false,
- "current": true,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "options": {
- "alertThreshold": true
- },
- "percentage": false,
- "pluginVersion": "7.3.7",
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "{__name__=\"Master_blockHeartbeat_User:$goosefs_user\",fluid_runtime=~\"$fluid_runtime\",quantile=\"0.5\"}",
- "interval": "",
- "legendFormat": "{{fluid_runtime}} BlockHeartBeat quantile=0.5",
- "refId": "A"
- },
- {
- "expr": "{__name__=\"Master_blockHeartbeat_User:$goosefs_user\",fluid_runtime=~\"$fluid_runtime\",quantile=\"0.95\"}",
- "interval": "",
- "legendFormat": "{{fluid_runtime}} BlockHeartBeat quantile=0.95",
- "refId": "B"
- },
- {
- "expr": "{__name__=\"Master_blockHeartbeat_User:$goosefs_user\",fluid_runtime=~\"$fluid_runtime\",quantile=\"0.99\"}",
- "interval": "",
- "legendFormat": "{{fluid_runtime}} BlockHeartBeat quantile=0.99",
- "refId": "C"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Block Heartbeat Operation",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "ms",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "collapsed": false,
- "datasource": "Prometheus",
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 56
- },
- "id": 92,
- "panels": [],
- "title": "Master JVM Memory",
- "type": "row"
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$goosefs_datasource",
- "fieldConfig": {
- "defaults": {
- "custom": {}
- },
- "overrides": []
- },
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 9,
- "w": 8,
- "x": 0,
- "y": 57
- },
- "hiddenSeries": false,
- "id": 98,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "alertThreshold": true
- },
- "percentage": false,
- "pluginVersion": "7.3.7",
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "{__name__=\"pools_Compressed_Class_Space_used\", fluid_runtime=~\"$fluid_runtime\", namespace=~\"$namespace\"}",
- "interval": "",
- "legendFormat": "{{fluid_runtime}} Master Memory Pools Compressed Class Space Used",
- "refId": "A"
- },
- {
- "expr": "{__name__=\"pools_Metaspace_used\", fluid_runtime=~\"$fluid_runtime\", namespace=~\"$namespace\"}",
- "interval": "",
- "legendFormat": "{{fluid_runtime}} Master Memory Pools Metdspace Used",
- "refId": "B"
- },
- {
- "expr": "{__name__=\"pools_PS_Eden_Space_used\", fluid_runtime=~\"$fluid_runtime\", namespace=~\"$namespace\"}",
- "interval": "",
- "legendFormat": "",
- "refId": "C"
- },
- {
- "expr": "{__name__=\"pools_PS_Survivor_Space_used\", fluid_runtime=~\"$fluid_runtime\", namespace=~\"$namespace\"}",
- "interval": "",
- "legendFormat": "",
- "refId": "D"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Master Memory Pools",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "decbytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$goosefs_datasource",
- "fieldConfig": {
- "defaults": {
- "custom": {}
- },
- "overrides": []
- },
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 9,
- "w": 8,
- "x": 8,
- "y": 57
- },
- "hiddenSeries": false,
- "id": 96,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "alertThreshold": true
- },
- "percentage": false,
- "pluginVersion": "7.3.7",
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "{__name__=\"heap_used\",fluid_runtime=~\"$fluid_runtime\", namespace=~\"$namespace\"}",
- "interval": "",
- "legendFormat": "{{fluid_runtime}} Master Heap Used",
- "refId": "A"
- },
- {
- "expr": "{__name__=\"heap_max\",fluid_runtime=~\"$fluid_runtime\", namespace=~\"$namespace\"}",
- "interval": "",
- "legendFormat": "{{fluid_runtime}} Master Heap Max",
- "refId": "B"
- },
- {
- "expr": "{__name__=\"non_heap_used\",fluid_runtime=~\"$fluid_runtime\", namespace=~\"$namespace\"}",
- "interval": "",
- "legendFormat": "{{fluid_runtime}} Master Non Heap Used",
- "refId": "C"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Master Heap Memory",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "decbytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$goosefs_datasource",
- "fieldConfig": {
- "defaults": {
- "custom": {}
- },
- "overrides": []
- },
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 9,
- "w": 8,
- "x": 16,
- "y": 57
- },
- "hiddenSeries": false,
- "id": 94,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "alertThreshold": true
- },
- "percentage": false,
- "pluginVersion": "7.3.7",
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "{__name__=\"total_max\",fluid_runtime=~\"$fluid_runtime\"}",
- "interval": "",
- "legendFormat": "{{fluid_runtime}} Master Total Max Memory",
- "refId": "A"
- },
- {
- "expr": "{__name__=\"total_used\",fluid_runtime=~\"$fluid_runtime\"}",
- "interval": "",
- "legendFormat": "{{fluid_runtime}} Master Total Used Memory",
- "refId": "B"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Master Total Memory",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "decbytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- }
- ],
- "refresh": false,
- "schemaVersion": 26,
- "style": "dark",
- "tags": [],
- "templating": {
- "list": [
- {
- "current": {
- "selected": false,
- "text": "Prometheus",
- "value": "Prometheus"
- },
- "error": null,
- "hide": 0,
- "includeAll": false,
- "label": "The datasource of goosefs prometheus",
- "multi": false,
- "name": "goosefs_datasource",
- "options": [],
- "query": "prometheus",
- "queryValue": "",
- "refresh": 1,
- "regex": "",
- "skipUrlSync": false,
- "type": "datasource"
- },
- {
- "current": {
- "selected": false,
- "text": "goosefs master",
- "value": "goosefs master"
- },
- "error": null,
- "hide": 2,
- "label": "",
- "name": "masters",
- "options": [
- {
- "selected": false,
- "text": "master",
- "value": "master"
- }
- ],
- "query": "master",
- "skipUrlSync": false,
- "type": "constant"
- },
- {
- "current": {
- "selected": true,
- "text": "goosefs worker",
- "value": "goosefs worker"
- },
- "error": null,
- "hide": 2,
- "label": "The prometheus job_name including worker nodes",
- "name": "workers",
- "options": [
- {
- "selected": true,
- "text": "goosefs worker",
- "value": "goosefs worker"
- }
- ],
- "query": "goosefs worker",
- "skipUrlSync": false,
- "type": "constant"
- },
- {
- "allValue": null,
- "current": {
- "selected": true,
- "text": [
- "root"
- ],
- "value": [
- "root"
- ]
- },
- "datasource": "$goosefs_datasource",
- "definition": "metrics(Master_getConfiguration_User)",
- "error": null,
- "hide": 0,
- "includeAll": false,
- "label": "User of runtime",
- "multi": true,
- "name": "goosefs_user",
- "options": [],
- "query": "metrics(Master_getConfiguration_User)",
- "refresh": 2,
- "regex": "/:_?([^\\s_]+)/",
- "skipUrlSync": false,
- "sort": 0,
- "tagValuesQuery": "",
- "tags": [],
- "tagsQuery": "",
- "type": "query",
- "useTags": false
- },
- {
- "allValue": null,
- "current": {
- "selected": false,
- "text": "All",
- "value": "$__all"
- },
- "datasource": "$goosefs_datasource",
- "definition": "label_values(instance)",
- "error": null,
- "hide": 2,
- "includeAll": true,
- "label": null,
- "multi": false,
- "name": "instance",
- "options": [],
- "query": "label_values(instance)",
- "refresh": 2,
- "regex": "",
- "skipUrlSync": false,
- "sort": 0,
- "tagValuesQuery": "",
- "tags": [],
- "tagsQuery": "",
- "type": "query",
- "useTags": false
- },
- {
- "allValue": null,
- "current": {
- "selected": true,
- "text": [
- "All"
- ],
- "value": [
- "$__all"
- ]
- },
- "datasource": "$goosefs_datasource",
- "definition": "label_values(fluid_runtime)",
- "error": null,
- "hide": 0,
- "includeAll": true,
- "label": null,
- "multi": true,
- "name": "fluid_runtime",
- "options": [],
- "query": "label_values(fluid_runtime)",
- "refresh": 2,
- "regex": "",
- "skipUrlSync": false,
- "sort": 0,
- "tagValuesQuery": "",
- "tags": [],
- "tagsQuery": "",
- "type": "query",
- "useTags": false
- },
- {
- "allValue": null,
- "current": {
- "selected": true,
- "text": [
- "All"
- ],
- "value": [
- "$__all"
- ]
- },
- "datasource": "$goosefs_datasource",
- "definition": "label_values(namespace)",
- "error": null,
- "hide": 0,
- "includeAll": true,
- "label": null,
- "multi": true,
- "name": "namespace",
- "options": [],
- "query": "label_values(namespace)",
- "refresh": 2,
- "regex": "",
- "skipUrlSync": false,
- "sort": 0,
- "tagValuesQuery": "",
- "tags": [],
- "tagsQuery": "",
- "type": "query",
- "useTags": false
- }
- ]
- },
- "time": {
- "from": "now-3h",
- "to": "now"
- },
- "timepicker": {
- "refresh_intervals": [
- "5s",
- "10s",
- "30s",
- "1m",
- "5m",
- "15m",
- "30m",
- "1h",
- "2h",
- "1d"
- ]
- },
- "timezone": "",
- "title": "Fluid-GooseFS-Runtime",
- "uid": "oJnC7i4Mk",
- "version": 3
-}
\ No newline at end of file
diff --git a/integration/prometheus/prometheus-goosefs.yaml b/integration/prometheus/prometheus-goosefs.yaml
deleted file mode 100644
index 897d7a2f1e8..00000000000
--- a/integration/prometheus/prometheus-goosefs.yaml
+++ /dev/null
@@ -1,33 +0,0 @@
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: prometheus-configmap
- namespace: kube-system
-data:
- prometheus.yml: |-
- rule_files:
- - "/etc/prometheus-rules/*.rules"
- scrape_configs:
- - job_name: 'goosefs runtime'
- metrics_path: /metrics/prometheus
- kubernetes_sd_configs:
- - role: endpoints
- relabel_configs:
- - source_labels: [__meta_kubernetes_service_label_monitor]
- regex: goosefs_runtime_metrics
- action: keep
- - source_labels: [__meta_kubernetes_endpoint_port_name]
- regex: web
- action: keep
- - source_labels: [__meta_kubernetes_namespace]
- target_label: namespace
- replacement: $1
- action: replace
- - source_labels: [__meta_kubernetes_service_label_release]
- target_label: fluid_runtime
- replacement: $1
- action: replace
- - source_labels: [__meta_kubernetes_endpoint_address_target_name]
- target_label: pod
- replacement: $1
- action: replace
diff --git a/pkg/application/inject/fuse/mutator/mutator_default.go b/pkg/application/inject/fuse/mutator/mutator_default.go
index 64b8ecb1103..508bae3859f 100644
--- a/pkg/application/inject/fuse/mutator/mutator_default.go
+++ b/pkg/application/inject/fuse/mutator/mutator_default.go
@@ -40,15 +40,15 @@ import (
var (
// datavolume-, volume-localtime for JindoFS
- // mem, ssd, hdd for Alluxio and GooseFS
+ // mem, ssd, hdd for Alluxio
// cache-dir for JuiceFS
cacheDirNames = []string{"datavolume-", "volume-localtime", "cache-dir", "mem", "ssd", "hdd"}
- // hostpath fuse mount point for Alluxio, JindoFS, GooseFS and JuiceFS
- hostMountNames = []string{"alluxio-fuse-mount", "jindofs-fuse-mount", "goosefs-fuse-mount", "juicefs-fuse-mount", "thin-fuse-mount", "efc-fuse-mount", "efc-sock"}
+ // hostpath fuse mount point for Alluxio, JindoFS and JuiceFS
+ hostMountNames = []string{"alluxio-fuse-mount", "jindofs-fuse-mount", "juicefs-fuse-mount", "thin-fuse-mount", "efc-fuse-mount", "efc-sock"}
- // fuse devices for Alluxio, JindoFS, GooseFS
- hostFuseDeviceNames = []string{"alluxio-fuse-device", "jindofs-fuse-device", "goosefs-fuse-device", "thin-fuse-device"}
+ // fuse devices for Alluxio, JindoFS
+ hostFuseDeviceNames = []string{"alluxio-fuse-device", "jindofs-fuse-device", "thin-fuse-device"}
)
// TODO: DefaultMutator will be rewritten with polymorphism withe platform-specific mutation logic
diff --git a/pkg/application/inject/fuse/poststart/script_gen_helper_test.go b/pkg/application/inject/fuse/poststart/script_gen_helper_test.go
index 3d8c27b4b69..4e47f1782b9 100644
--- a/pkg/application/inject/fuse/poststart/script_gen_helper_test.go
+++ b/pkg/application/inject/fuse/poststart/script_gen_helper_test.go
@@ -167,23 +167,6 @@ var _ = Describe("ScriptGeneratorHelper", func() {
Expect(key.Namespace).To(Equal("ns"))
})
})
-
- Context("mixed case runtime type", func() {
- It("should return correct configmap key", func() {
- helper := &scriptGeneratorHelper{
- configMapName: "my-config",
- }
- datasetKey := types.NamespacedName{
- Name: "test",
- Namespace: "test-ns",
- }
-
- key := helper.GetNamespacedConfigMapKey(datasetKey, "GooseFS")
-
- Expect(key.Name).To(Equal("goosefs-my-config"))
- Expect(key.Namespace).To(Equal("test-ns"))
- })
- })
})
Describe("GetVolume", func() {
diff --git a/pkg/common/goosefs.go b/pkg/common/goosefs.go
deleted file mode 100644
index a9a1d5ed718..00000000000
--- a/pkg/common/goosefs.go
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
-Copyright 2021 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package common
-
-// Runtime for GooseFS
-const (
- GooseFSRuntime = "goosefs"
-
- GooseFSMountType = "fuse.goosefs-fuse"
-
- GooseFSNamespace = "goosefs-system"
-
- GooseFSChart = GooseFSRuntime
-
- GooseFSEngineImpl = GooseFSRuntime
-)
-
-// Constants for GooseFS images
-const (
- GooseFSRuntimeImageEnv = "GOOSEFS_RUNTIME_IMAGE_ENV"
-
- GooseFSFuseImageEnv = "GOOSEFS_FUSE_IMAGE_ENV"
-
- DefaultGooseFSRuntimeImage = "ccr.ccs.tencentyun.com/qcloud/goosefs:v1.2.0"
-
- DefaultGooseFSFuseImage = "ccr.ccs.tencentyun.com/qcloud/goosefs-fuse:v1.2.0"
-)
-
-var (
- // goosefs ufs root path
- GooseFSMountPathFormat = RootDirPath + "%s"
-
- GooseFSLocalStorageRootPath = "/underFSStorage"
- GooseFSLocalStoragePathFormat = GooseFSLocalStorageRootPath + "/%s"
-)
diff --git a/pkg/common/ufs_path.go b/pkg/common/ufs_path.go
index 7603dda3d0d..ae74c4bed8b 100644
--- a/pkg/common/ufs_path.go
+++ b/pkg/common/ufs_path.go
@@ -5,7 +5,7 @@ const (
RootDirPath = "/"
UFSMountPathFormat = RootDirPath + "%s"
- // same for Alluxio, GooseFS and JindoFS
+ // same for Alluxio and JindoFS
LocalStorageRootPath = "/underFSStorage"
LocalStoragePathFormat = LocalStorageRootPath + "/%s"
)
diff --git a/pkg/controllers/deploy/runtime_controllers.go b/pkg/controllers/deploy/runtime_controllers.go
index 1720e28eaa1..3832b9f5e27 100644
--- a/pkg/controllers/deploy/runtime_controllers.go
+++ b/pkg/controllers/deploy/runtime_controllers.go
@@ -27,7 +27,6 @@ import (
"github.com/fluid-cloudnative/fluid/pkg/ddc/alluxio"
"github.com/fluid-cloudnative/fluid/pkg/ddc/efc"
- "github.com/fluid-cloudnative/fluid/pkg/ddc/goosefs"
"github.com/fluid-cloudnative/fluid/pkg/ddc/jindofsx"
"github.com/fluid-cloudnative/fluid/pkg/ddc/juicefs"
"github.com/fluid-cloudnative/fluid/pkg/ddc/thin"
@@ -53,7 +52,6 @@ var (
"alluxioruntime-controller": alluxio.Precheck,
"jindoruntime-controller": jindofsx.Precheck,
"juicefsruntime-controller": juicefs.Precheck,
- "goosefsruntime-controller": goosefs.Precheck,
"thinruntime-controller": thin.Precheck,
"efcruntime-controller": efc.Precheck,
"vineyardruntime-controller": vineyard.Precheck,
diff --git a/pkg/controllers/deploy/runtime_controllers_test.go b/pkg/controllers/deploy/runtime_controllers_test.go
index e7c338f2d34..17b1ff1f99c 100644
--- a/pkg/controllers/deploy/runtime_controllers_test.go
+++ b/pkg/controllers/deploy/runtime_controllers_test.go
@@ -27,7 +27,6 @@ import (
"github.com/fluid-cloudnative/fluid/pkg/common"
"github.com/fluid-cloudnative/fluid/pkg/ddc/alluxio"
"github.com/fluid-cloudnative/fluid/pkg/ddc/efc"
- "github.com/fluid-cloudnative/fluid/pkg/ddc/goosefs"
"github.com/fluid-cloudnative/fluid/pkg/ddc/jindofsx"
"github.com/fluid-cloudnative/fluid/pkg/ddc/juicefs"
"github.com/fluid-cloudnative/fluid/pkg/ddc/thin"
@@ -94,8 +93,6 @@ var _ = Describe("runtime controller scaleout", func() {
},
Entry("defaults zero replicas to one when no annotation exists",
newDeployment("unknown-controller", 0, nil), true, int32(1)),
- Entry("uses the configured replica annotation when it is greater than one",
- newDeployment("goosefsruntime-controller", 0, map[string]string{common.RuntimeControllerReplicas: "3"}), true, int32(3)),
Entry("enforces a minimum of one replica when annotation is zero",
newDeployment("juicefsruntime-controller", 0, map[string]string{common.RuntimeControllerReplicas: "0"}), true, int32(1)),
Entry("leaves already running controllers unchanged",
@@ -242,7 +239,6 @@ func controllerDeployments() []runtime.Object {
newDeployment("alluxioruntime-controller", 0, nil),
newDeployment("jindoruntime-controller", 1, nil),
newDeployment("juicefsruntime-controller", 0, map[string]string{common.RuntimeControllerReplicas: "0"}),
- newDeployment("goosefsruntime-controller", 0, map[string]string{common.RuntimeControllerReplicas: "3"}),
newDeployment("unknown-controller", 0, nil),
}
}
@@ -250,7 +246,6 @@ func controllerDeployments() []runtime.Object {
func runtimeObjects() []runtime.Object {
return []runtime.Object{
&datav1alpha1.AlluxioRuntime{ObjectMeta: metav1.ObjectMeta{Name: "alluxio", Namespace: corev1.NamespaceDefault}},
- &datav1alpha1.GooseFSRuntime{ObjectMeta: metav1.ObjectMeta{Name: "goosefs", Namespace: corev1.NamespaceDefault}},
&datav1alpha1.JindoRuntime{ObjectMeta: metav1.ObjectMeta{Name: "jindo", Namespace: corev1.NamespaceDefault}},
&datav1alpha1.JuiceFSRuntime{ObjectMeta: metav1.ObjectMeta{Name: "juicefs", Namespace: corev1.NamespaceDefault}},
}
@@ -261,7 +256,6 @@ func runtimePrecheckFuncs() map[string]CheckFunc {
"alluxioruntime-controller": alluxio.Precheck,
"jindoruntime-controller": jindofsx.Precheck,
"juicefsruntime-controller": juicefs.Precheck,
- "goosefsruntime-controller": goosefs.Precheck,
"thinruntime-controller": thin.Precheck,
"efcruntime-controller": efc.Precheck,
"vineyardruntime-controller": vineyard.Precheck,
diff --git a/pkg/controllers/operation_controller.go b/pkg/controllers/operation_controller.go
index 0321f1d4a5b..0522db23036 100644
--- a/pkg/controllers/operation_controller.go
+++ b/pkg/controllers/operation_controller.go
@@ -288,8 +288,6 @@ func (o *OperationReconciler) getRuntimeObjectAndEngineImpl(runtimeType, name, n
runtime, err = utils.GetAlluxioRuntime(o.Client, name, namespace)
case common.JindoRuntime:
runtime, err = utils.GetJindoRuntime(o.Client, name, namespace)
- case common.GooseFSRuntime:
- runtime, err = utils.GetGooseFSRuntime(o.Client, name, namespace)
case common.JuiceFSRuntime:
runtime, err = utils.GetJuiceFSRuntime(o.Client, name, namespace)
case common.EFCRuntime:
@@ -316,8 +314,6 @@ func (o *OperationReconciler) getRuntimeObjectAndEngineImpl(runtimeType, name, n
return runtime, ddc.InferEngineImpl(*runtime.GetStatus(), common.AlluxioEngineImpl), nil
case common.JindoRuntime:
return runtime, ddc.InferEngineImpl(*runtime.GetStatus(), jindoutils.GetDefaultEngineImpl()), nil
- case common.GooseFSRuntime:
- return runtime, ddc.InferEngineImpl(*runtime.GetStatus(), common.GooseFSEngineImpl), nil
case common.JuiceFSRuntime:
return runtime, ddc.InferEngineImpl(*runtime.GetStatus(), common.JuiceFSEngineImpl), nil
case common.EFCRuntime:
diff --git a/pkg/controllers/operation_controller_test.go b/pkg/controllers/operation_controller_test.go
index 93c0c614862..6939770a880 100644
--- a/pkg/controllers/operation_controller_test.go
+++ b/pkg/controllers/operation_controller_test.go
@@ -346,7 +346,6 @@ var _ = Describe("OperationReconciler getRuntimeObjectAndEngineImpl", func() {
},
Entry("alluxio", common.AlluxioRuntime),
Entry("jindo", common.JindoRuntime),
- Entry("goosefs", common.GooseFSRuntime),
Entry("juicefs", common.JuiceFSRuntime),
Entry("efc", common.EFCRuntime),
Entry("thin", common.ThinRuntime),
@@ -372,9 +371,6 @@ var _ = Describe("OperationReconciler getRuntimeObjectAndEngineImpl", func() {
Expect(obj).NotTo(BeNil())
Expect(engineImpl).To(Equal(expectedEngineImpl))
},
- Entry("goosefs", common.GooseFSRuntime, &datav1alpha1.GooseFSRuntime{
- ObjectMeta: metav1.ObjectMeta{Name: testRuntimeName, Namespace: "default"},
- }, common.GooseFSEngineImpl),
Entry("jindo", common.JindoRuntime, &datav1alpha1.JindoRuntime{
ObjectMeta: metav1.ObjectMeta{Name: testRuntimeName, Namespace: "default"},
}, jindoutils.GetDefaultEngineImpl()),
diff --git a/pkg/controllers/v1alpha1/goosefs/goosefs_runtime_controller.go b/pkg/controllers/v1alpha1/goosefs/goosefs_runtime_controller.go
deleted file mode 100644
index 3adab2de5de..00000000000
--- a/pkg/controllers/v1alpha1/goosefs/goosefs_runtime_controller.go
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
-Copyright 2021 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "context"
-
- "sync"
-
- "github.com/pkg/errors"
-
- "github.com/go-logr/logr"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/client-go/tools/record"
- ctrl "sigs.k8s.io/controller-runtime"
- "sigs.k8s.io/controller-runtime/pkg/client"
- "sigs.k8s.io/controller-runtime/pkg/controller"
-
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/common"
- "github.com/fluid-cloudnative/fluid/pkg/controllers"
- "github.com/fluid-cloudnative/fluid/pkg/ddc"
- "github.com/fluid-cloudnative/fluid/pkg/ddc/base"
- cruntime "github.com/fluid-cloudnative/fluid/pkg/runtime"
- "github.com/fluid-cloudnative/fluid/pkg/utils"
-)
-
-const controllerName string = "GooseFSRuntimeController"
-
-// Use compiler to check if the struct implements all the interface
-var _ controllers.RuntimeReconcilerInterface = (*RuntimeReconciler)(nil)
-
-// RuntimeReconciler reconciles a GooseFSRuntime object
-type RuntimeReconciler struct {
- Scheme *runtime.Scheme
- engines map[string]base.Engine
- mutex *sync.Mutex
- *controllers.RuntimeReconciler
-}
-
-// NewRuntimeReconciler create controller for watching runtime custom resources created
-func NewRuntimeReconciler(client client.Client,
- log logr.Logger,
- scheme *runtime.Scheme,
- recorder record.EventRecorder) *RuntimeReconciler {
- r := &RuntimeReconciler{
- Scheme: scheme,
- mutex: &sync.Mutex{},
- engines: map[string]base.Engine{},
- }
- r.RuntimeReconciler = controllers.NewRuntimeReconciler(r, client, log, recorder)
- return r
-}
-
-// Reconcile reconciles goosefs runtime
-// +kubebuilder:rbac:groups=data.fluid.io,resources=goosefsruntimes,verbs=get;list;watch;create;update;patch;delete
-// +kubebuilder:rbac:groups=data.fluid.io,resources=goosefsruntimes/status,verbs=get;update;patch
-
-func (r *RuntimeReconciler) Reconcile(context context.Context, req ctrl.Request) (ctrl.Result, error) {
- ctx := cruntime.ReconcileRequestContext{
- Context: context,
- Log: r.Log.WithValues("goosefsruntime", req.NamespacedName),
- NamespacedName: req.NamespacedName,
- Recorder: r.Recorder,
- Category: common.AccelerateCategory,
- RuntimeType: common.GooseFSRuntime,
- Client: r.Client,
- FinalizerName: runtimeResourceFinalizerName,
- }
-
- ctx.Log.V(1).Info("process the request", "request", req)
-
- // 1.Load the Runtime
- runtime, err := r.getRuntime(ctx)
- if err != nil {
- if utils.IgnoreNotFound(err) == nil {
- ctx.Log.V(1).Info("The runtime is not found", "runtime", ctx.NamespacedName)
- return ctrl.Result{}, nil
- } else {
- ctx.Log.Error(err, "Failed to get the ddc runtime")
- return utils.RequeueIfError(errors.Wrap(err, "Unable to get ddc runtime"))
- }
- }
- ctx.Runtime = runtime
- ctx.EngineImpl = ddc.InferEngineImpl(runtime.Status, common.GooseFSEngineImpl)
- ctx.Log.V(1).Info("process the runtime", "runtime", ctx.Runtime)
-
- // reconcile the implement
- return r.ReconcileInternal(ctx)
-}
-
-// SetupWithManager setups the manager with RuntimeReconciler
-func (r *RuntimeReconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error {
- return ctrl.NewControllerManagedBy(mgr).
- WithOptions(options).
- For(&datav1alpha1.GooseFSRuntime{}).
- Complete(r)
-}
-
-func (r *RuntimeReconciler) ControllerName() string {
- return controllerName
-}
diff --git a/pkg/controllers/v1alpha1/goosefs/implement.go b/pkg/controllers/v1alpha1/goosefs/implement.go
deleted file mode 100644
index 903d8d5044b..00000000000
--- a/pkg/controllers/v1alpha1/goosefs/implement.go
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
-Copyright 2021 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- cruntime "github.com/fluid-cloudnative/fluid/pkg/runtime"
-
- "github.com/fluid-cloudnative/fluid/pkg/ddc"
- "github.com/fluid-cloudnative/fluid/pkg/ddc/base"
-)
-
-const (
- runtimeResourceFinalizerName = "goosefs-runtime-controller-finalizer"
-)
-
-// getRuntime gets the runtime
-func (r *RuntimeReconciler) getRuntime(ctx cruntime.ReconcileRequestContext) (*datav1alpha1.GooseFSRuntime, error) {
- var runtime datav1alpha1.GooseFSRuntime
- if err := r.Get(ctx, ctx.NamespacedName, &runtime); err != nil {
- return nil, err
- }
- return &runtime, nil
-}
-
-// GetOrCreateEngine gets or creates engine
-func (r *RuntimeReconciler) GetOrCreateEngine(
- ctx cruntime.ReconcileRequestContext) (engine base.Engine, err error) {
- var found bool
- id := ddc.GenerateEngineID(ctx.NamespacedName)
- r.mutex.Lock()
- defer r.mutex.Unlock()
- if engine, found = r.engines[id]; !found {
- engine, err = ddc.CreateEngine(id,
- ctx)
- if err != nil {
- return nil, err
- }
- r.engines[id] = engine
- r.Log.V(1).Info("Put Engine to engine map")
- } else {
- r.Log.V(1).Info("Get Engine from engine map")
- }
-
- return engine, err
-}
-
-// RemoveEngine removes the engine
-func (r *RuntimeReconciler) RemoveEngine(ctx cruntime.ReconcileRequestContext) {
- r.mutex.Lock()
- defer r.mutex.Unlock()
- id := ddc.GenerateEngineID(ctx.NamespacedName)
- delete(r.engines, id)
-}
diff --git a/pkg/controllers/v1alpha1/goosefs/suite_test.go b/pkg/controllers/v1alpha1/goosefs/suite_test.go
deleted file mode 100644
index f23eb760cf1..00000000000
--- a/pkg/controllers/v1alpha1/goosefs/suite_test.go
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
-Copyright 2021 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "os"
- "path/filepath"
- "testing"
-
- . "github.com/onsi/ginkgo/v2"
- . "github.com/onsi/gomega"
-
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "k8s.io/client-go/kubernetes/scheme"
- "k8s.io/client-go/rest"
- "sigs.k8s.io/controller-runtime/pkg/client"
- "sigs.k8s.io/controller-runtime/pkg/envtest"
- logf "sigs.k8s.io/controller-runtime/pkg/log"
- "sigs.k8s.io/controller-runtime/pkg/log/zap"
- // +kubebuilder:scaffold:imports
-)
-
-// These tests use Ginkgo (BDD-style Go testing framework). Refer to
-// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
-
-var cfg *rest.Config
-var k8sClient client.Client
-var testEnv *envtest.Environment
-var useExistingCluster = false
-
-func TestAPIs(t *testing.T) {
- RegisterFailHandler(Fail)
-
- RunSpecs(t,
- "Controller Suite")
-}
-
-var _ = BeforeSuite(func(done Done) {
- logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
- if env := os.Getenv("USE_EXISTING_CLUSTER"); env == "true" {
- useExistingCluster = true
- }
-
- By("bootstrapping test environment")
- testEnv = &envtest.Environment{
- CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "..", "config", "crd", "bases")},
- }
-
- var err error
- cfg, err = testEnv.Start()
- Expect(err).ToNot(HaveOccurred())
- Expect(cfg).ToNot(BeNil())
-
- err = datav1alpha1.AddToScheme(scheme.Scheme)
- Expect(err).NotTo(HaveOccurred())
-
- // +kubebuilder:scaffold:scheme
-
- k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
- Expect(err).ToNot(HaveOccurred())
- Expect(k8sClient).ToNot(BeNil())
-
- close(done)
-}, 60)
-
-var _ = AfterSuite(func() {
- By("tearing down the test environment")
- err := testEnv.Stop()
- Expect(err).ToNot(HaveOccurred())
-})
diff --git a/pkg/controllers/v1alpha1/thinruntime/thinruntime_controller.go b/pkg/controllers/v1alpha1/thinruntime/thinruntime_controller.go
index 97b84742eaa..ea5e74df106 100644
--- a/pkg/controllers/v1alpha1/thinruntime/thinruntime_controller.go
+++ b/pkg/controllers/v1alpha1/thinruntime/thinruntime_controller.go
@@ -146,7 +146,6 @@ func NewCache() cache.Options {
common.AlluxioRuntime,
common.JindoRuntime,
common.JuiceFSRuntime,
- common.GooseFSRuntime,
common.EFCRuntime,
})
if err != nil {
diff --git a/pkg/csi/updatedbconf/register_test.go b/pkg/csi/updatedbconf/register_test.go
index 8bf727dbe93..315c6a636fa 100644
--- a/pkg/csi/updatedbconf/register_test.go
+++ b/pkg/csi/updatedbconf/register_test.go
@@ -230,12 +230,11 @@ PRUNEPATHS = "/tmp"`
Context("with multiple fuse filesystems", func() {
It("should handle multiple fuse types", func() {
- content := `PRUNEFS = "9p afs fuse.alluxio fuse.jindofs fuse.juicefs fuse.goosefs"`
+ content := `PRUNEFS = "9p afs fuse.alluxio fuse.jindofs fuse.juicefs`
Expect(content).To(ContainSubstring("fuse.alluxio"))
Expect(content).To(ContainSubstring("fuse.jindofs"))
Expect(content).To(ContainSubstring("fuse.juicefs"))
- Expect(content).To(ContainSubstring("fuse.goosefs"))
})
})
diff --git a/pkg/csi/updatedbconf/updatedbconf_test.go b/pkg/csi/updatedbconf/updatedbconf_test.go
index bcbd40f2e63..ede64dacb42 100644
--- a/pkg/csi/updatedbconf/updatedbconf_test.go
+++ b/pkg/csi/updatedbconf/updatedbconf_test.go
@@ -111,11 +111,11 @@ var _ = Describe("updateConfig", func() {
content := `PRUNE_BIND_MOUNTS="yes"
PRUNEPATHS="/tmp /var/spool /media /var/lib/os-prober /var/lib/ceph /home/.ecryptfs /var/lib/schroot"
PRUNEFS="foo bar"`
- newFs := []string{"fuse.alluxio-fuse", "fuse.jindofs-fuse", "JuiceFS", "fuse.goosefs-fuse"}
+ newFs := []string{"fuse.alluxio-fuse", "fuse.jindofs-fuse", "JuiceFS"}
newPaths := []string{"/runtime-mnt"}
want := `PRUNE_BIND_MOUNTS="yes"
PRUNEPATHS="/tmp /var/spool /media /var/lib/os-prober /var/lib/ceph /home/.ecryptfs /var/lib/schroot /runtime-mnt"
-PRUNEFS="foo bar fuse.alluxio-fuse fuse.jindofs-fuse JuiceFS fuse.goosefs-fuse"`
+PRUNEFS="foo bar fuse.alluxio-fuse fuse.jindofs-fuse JuiceFS"`
got, err := updateConfig(content, newFs, newPaths)
Expect(err).ToNot(HaveOccurred())
@@ -128,11 +128,11 @@ PRUNEFS="foo bar fuse.alluxio-fuse fuse.jindofs-fuse JuiceFS fuse.goosefs-fuse"`
content := `PRUNE_BIND_MOUNTS="yes"
PRUNEPATHS="/tmp /var/spool /media /var/lib/os-prober /var/lib/ceph /home/.ecryptfs /var/lib/schroot /runtime-mnt"
PRUNEFS="foo bar"`
- newFs := []string{"fuse.alluxio-fuse", "fuse.jindofs-fuse", "JuiceFS", "fuse.goosefs-fuse"}
+ newFs := []string{"fuse.alluxio-fuse", "fuse.jindofs-fuse", "JuiceFS"}
newPaths := []string{"/runtime-mnt"}
want := `PRUNE_BIND_MOUNTS="yes"
PRUNEPATHS="/tmp /var/spool /media /var/lib/os-prober /var/lib/ceph /home/.ecryptfs /var/lib/schroot /runtime-mnt"
-PRUNEFS="foo bar fuse.alluxio-fuse fuse.jindofs-fuse JuiceFS fuse.goosefs-fuse"`
+PRUNEFS="foo bar fuse.alluxio-fuse fuse.jindofs-fuse JuiceFS"`
got, err := updateConfig(content, newFs, newPaths)
Expect(err).ToNot(HaveOccurred())
@@ -143,10 +143,10 @@ PRUNEFS="foo bar fuse.alluxio-fuse fuse.jindofs-fuse JuiceFS fuse.goosefs-fuse"`
Context("when path or fs config is empty", func() {
It("should add new configuration lines", func() {
content := `PRUNE_BIND_MOUNTS="yes"`
- newFs := []string{"fuse.alluxio-fuse", "fuse.jindofs-fuse", "JuiceFS", "fuse.goosefs-fuse"}
+ newFs := []string{"fuse.alluxio-fuse", "fuse.jindofs-fuse", "JuiceFS"}
newPaths := []string{"/runtime-mnt"}
want := `PRUNE_BIND_MOUNTS="yes"
-PRUNEFS="fuse.alluxio-fuse fuse.jindofs-fuse JuiceFS fuse.goosefs-fuse"
+PRUNEFS="fuse.alluxio-fuse fuse.jindofs-fuse JuiceFS"
PRUNEPATHS="/runtime-mnt"`
got, err := updateConfig(content, newFs, newPaths)
diff --git a/pkg/ctrl/fuse_test.go b/pkg/ctrl/fuse_test.go
index 4025d836405..f4c3d41ceea 100644
--- a/pkg/ctrl/fuse_test.go
+++ b/pkg/ctrl/fuse_test.go
@@ -286,80 +286,6 @@ var _ = Describe("Ctrl Helper Fuse Tests", Label("pkg.ctrl.fuse_test.go"), func(
})
})
- Context("when cleaning up GooseFS runtime with no matching labels", func() {
- BeforeEach(func() {
- name = "hbase"
- namespace = "fluid"
- runtimeType = "goosefs"
- wantedNodeLabels = map[string]map[string]string{
- "no-fuse": {},
- "multiple-fuse": {
- "fluid.io/f-fluid-spark": "true",
- "node-select": "true",
- "fluid.io/s-fluid-hadoop": "true",
- "fluid.io/f-fluid-hadoop": "true",
- "fluid.io/s-h-goosefs-d-fluid-hadoop": "5B",
- "fluid.io/s-h-goosefs-m-fluid-hadoop": "1B",
- "fluid.io/s-h-goosefs-t-fluid-hadoop": "6B",
- },
- "fuse": {
- "fluid.io/dataset-num": "1",
- "fluid.io/f-fluid-spark": "true",
- "node-select": "true",
- },
- }
- nodeInputs = []*corev1.Node{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "no-fuse",
- Labels: map[string]string{},
- },
- },
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "multiple-fuse",
- Labels: map[string]string{
- "fluid.io/f-fluid-spark": "true",
- "node-select": "true",
- "fluid.io/f-fluid-hadoop": "true",
- "fluid.io/s-fluid-hadoop": "true",
- "fluid.io/s-h-goosefs-d-fluid-hadoop": "5B",
- "fluid.io/s-h-goosefs-m-fluid-hadoop": "1B",
- "fluid.io/s-h-goosefs-t-fluid-hadoop": "6B",
- },
- },
- },
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "fuse",
- Labels: map[string]string{
- "fluid.io/dataset-num": "1",
- "fluid.io/f-fluid-spark": "true",
- "node-select": "true",
- },
- },
- },
- }
- })
-
- It("should not clean up any fuse labels", func() {
- count, err := h.CleanUpFuse()
- Expect(err).NotTo(HaveOccurred())
- Expect(count).To(Equal(0))
-
- nodeList := &corev1.NodeList{}
- err = fakeClient.List(context.TODO(), nodeList, &client.ListOptions{})
- Expect(err).NotTo(HaveOccurred())
-
- for _, node := range nodeList.Items {
- Expect(node.Labels).To(HaveLen(len(wantedNodeLabels[node.Name])))
- if len(node.Labels) != 0 {
- Expect(node.Labels).To(Equal(wantedNodeLabels[node.Name]))
- }
- }
- })
- })
-
Context("edge cases", func() {
When("there are no nodes", func() {
BeforeEach(func() {
diff --git a/pkg/databackup/constants.go b/pkg/databackup/constants.go
index 5daf741452c..b4a7270fd3d 100644
--- a/pkg/databackup/constants.go
+++ b/pkg/databackup/constants.go
@@ -19,7 +19,6 @@ package databackup
const (
Finalizer = "fluid-databackup-controller-finalizer"
AlluxioBackupPathPod = "/alluxio_backups"
- GooseFSBackupPathPod = "/goosefs_backups"
DatabackupChart = "fluid-databackup"
BackupLocationPath = "BackupLocationPath"
diff --git a/pkg/ddc/base/runtime.go b/pkg/ddc/base/runtime.go
index f80da6e8f16..cb661b19bfd 100644
--- a/pkg/ddc/base/runtime.go
+++ b/pkg/ddc/base/runtime.go
@@ -457,22 +457,6 @@ func GetRuntimeInfo(reader client.Reader, name, namespace string) (runtimeInfo R
}
runtimeInfo.SetFuseNodeSelector(jindoRuntime.Spec.Fuse.NodeSelector)
runtimeInfo.SetupFuseCleanPolicy(jindoRuntime.Spec.Fuse.CleanPolicy)
- case common.GooseFSRuntime:
- goosefsRuntime, err := utils.GetGooseFSRuntime(reader, name, namespace)
- if err != nil {
- return runtimeInfo, err
- }
- opts := []RuntimeInfoOption{
- WithTieredStore(datav1alpha1.TieredStore{}),
- WithMetadataList(GetMetadataListFromAnnotation(goosefsRuntime)),
- WithAnnotations(goosefsRuntime.Annotations),
- }
- runtimeInfo, err = BuildRuntimeInfo(name, namespace, common.GooseFSRuntime, opts...)
- if err != nil {
- return runtimeInfo, err
- }
- runtimeInfo.SetFuseNodeSelector(goosefsRuntime.Spec.Fuse.NodeSelector)
- runtimeInfo.SetupFuseCleanPolicy(goosefsRuntime.Spec.Fuse.CleanPolicy)
case common.JuiceFSRuntime:
juicefsRuntime, err := utils.GetJuiceFSRuntime(reader, name, namespace)
if err != nil {
@@ -563,12 +547,6 @@ func GetRuntimeStatus(client client.Client, runtimeType, name, namespace string)
return status, err
}
return &runtime.Status, nil
- case common.GooseFSRuntime:
- runtime, err := utils.GetGooseFSRuntime(client, name, namespace)
- if err != nil {
- return status, err
- }
- return &runtime.Status, nil
case common.JuiceFSRuntime:
runtime, err := utils.GetJuiceFSRuntime(client, name, namespace)
if err != nil {
diff --git a/pkg/ddc/base/runtime_test.go b/pkg/ddc/base/runtime_test.go
index a1a544eb7ef..cd0bbc7fcaf 100644
--- a/pkg/ddc/base/runtime_test.go
+++ b/pkg/ddc/base/runtime_test.go
@@ -217,7 +217,6 @@ var _ = Describe("CleanPolicy", func() {
s.AddKnownTypes(v1alpha1.GroupVersion, &v1alpha1.AlluxioRuntime{})
s.AddKnownTypes(v1alpha1.GroupVersion, &v1alpha1.JindoRuntime{})
s.AddKnownTypes(v1alpha1.GroupVersion, &v1alpha1.JuiceFSRuntime{})
- s.AddKnownTypes(v1alpha1.GroupVersion, &v1alpha1.GooseFSRuntime{})
s.AddKnownTypes(v1alpha1.GroupVersion, &v1alpha1.Dataset{})
// Test Alluxio Runtime
@@ -484,94 +483,6 @@ var _ = Describe("CleanPolicy", func() {
juiceRuntimeObjs = append(juiceRuntimeObjs, &juiceRuntimeOnDemandCleanPolicy, &dataJuiceOnDemandCleanPolicy)
juiceRuntimeObjs = append(juiceRuntimeObjs, &juiceRuntimeOnRuntimeDeletedCleanPolicy, &dataJuiceOnRuntimeDeletedCleanPolicy)
- // Test GooseFs Runtime
- goosefsRuntimeDefaultCleanPolicy := v1alpha1.GooseFSRuntime{
- ObjectMeta: metav1.ObjectMeta{
- Name: "default_policy_goosefs",
- Namespace: "default",
- },
- Spec: v1alpha1.GooseFSRuntimeSpec{
- Fuse: v1alpha1.GooseFSFuseSpec{},
- },
- }
-
- dataGooseFSDefaultCleanPolicy := v1alpha1.Dataset{
- ObjectMeta: metav1.ObjectMeta{
- Name: "default_policy_goosefs",
- Namespace: "default",
- },
- Status: v1alpha1.DatasetStatus{
- Runtimes: []v1alpha1.Runtime{
- {
- Name: "default_policy_goosefs",
- Namespace: "default",
- Type: common.GooseFSRuntime,
- },
- },
- },
- }
-
- goosefsRuntimeOnDemandCleanPolicy := v1alpha1.GooseFSRuntime{
- ObjectMeta: metav1.ObjectMeta{
- Name: "on_demand_policy_goosefs",
- Namespace: "default",
- },
- Spec: v1alpha1.GooseFSRuntimeSpec{
- Fuse: v1alpha1.GooseFSFuseSpec{
- CleanPolicy: v1alpha1.OnDemandCleanPolicy,
- },
- },
- }
-
- dataGooseFSOnDemandCleanPolicy := v1alpha1.Dataset{
- ObjectMeta: metav1.ObjectMeta{
- Name: "on_demand_policy_goosefs",
- Namespace: "default",
- },
- Status: v1alpha1.DatasetStatus{
- Runtimes: []v1alpha1.Runtime{
- {
- Name: "on_demand_policy_goosefs",
- Namespace: "default",
- Type: common.GooseFSRuntime,
- },
- },
- },
- }
-
- goosefsRuntimeOnRuntimeDeletedCleanPolicy := v1alpha1.GooseFSRuntime{
- ObjectMeta: metav1.ObjectMeta{
- Name: "on_runtime_deleted_policy_goosefs",
- Namespace: "default",
- },
- Spec: v1alpha1.GooseFSRuntimeSpec{
- Fuse: v1alpha1.GooseFSFuseSpec{
- CleanPolicy: v1alpha1.OnRuntimeDeletedCleanPolicy,
- },
- },
- }
-
- dataGooseFSOnRuntimeDeletedCleanPolicy := v1alpha1.Dataset{
- ObjectMeta: metav1.ObjectMeta{
- Name: "on_runtime_deleted_policy_goosefs",
- Namespace: "default",
- },
- Status: v1alpha1.DatasetStatus{
- Runtimes: []v1alpha1.Runtime{
- {
- Name: "on_runtime_deleted_policy_goosefs",
- Namespace: "default",
- Type: common.GooseFSRuntime,
- },
- },
- },
- }
-
- goosefsRuntimeObjs := []runtime.Object{}
- goosefsRuntimeObjs = append(goosefsRuntimeObjs, &goosefsRuntimeDefaultCleanPolicy, &dataGooseFSDefaultCleanPolicy)
- goosefsRuntimeObjs = append(goosefsRuntimeObjs, &goosefsRuntimeOnDemandCleanPolicy, &dataGooseFSOnDemandCleanPolicy)
- goosefsRuntimeObjs = append(goosefsRuntimeObjs, &goosefsRuntimeOnRuntimeDeletedCleanPolicy, &dataGooseFSOnRuntimeDeletedCleanPolicy)
-
type args struct {
client client.Client
name string
@@ -736,57 +647,6 @@ var _ = Describe("CleanPolicy", func() {
},
wantErr: false,
},
- {
- name: "default_test_goosefs",
- args: args{
- client: fakeutils.NewFakeClientWithScheme(s, goosefsRuntimeObjs...),
- name: "default_policy_goosefs",
- namespace: "default",
- },
- want: &RuntimeInfo{
- name: "default_policy_goosefs",
- namespace: "default",
- runtimeType: common.GooseFSRuntime,
- fuse: Fuse{
- CleanPolicy: v1alpha1.OnRuntimeDeletedCleanPolicy,
- },
- },
- wantErr: false,
- },
- {
- name: "on_demand_test_goosefs",
- args: args{
- client: fakeutils.NewFakeClientWithScheme(s, goosefsRuntimeObjs...),
- name: "on_demand_policy_goosefs",
- namespace: "default",
- },
- want: &RuntimeInfo{
- name: "on_demand_policy_goosefs",
- namespace: "default",
- runtimeType: common.GooseFSRuntime,
- fuse: Fuse{
- CleanPolicy: v1alpha1.OnDemandCleanPolicy,
- },
- },
- wantErr: false,
- },
- {
- name: "on_runtime_deleted_test-goosefs",
- args: args{
- client: fakeutils.NewFakeClientWithScheme(s, goosefsRuntimeObjs...),
- name: "on_runtime_deleted_policy_goosefs",
- namespace: "default",
- },
- want: &RuntimeInfo{
- name: "on_runtime_deleted_policy_goosefs",
- namespace: "default",
- runtimeType: common.GooseFSRuntime,
- fuse: Fuse{
- CleanPolicy: v1alpha1.OnRuntimeDeletedCleanPolicy,
- },
- },
- wantErr: false,
- },
}
for _, tt := range tests {
@@ -829,29 +689,6 @@ var _ = Describe("GetRuntimeInfo", func() {
},
}
- goosefsRuntime := v1alpha1.GooseFSRuntime{
- ObjectMeta: metav1.ObjectMeta{
- Name: "goosefs",
- Namespace: "default",
- },
- }
-
- dataGooseFS := v1alpha1.Dataset{
- ObjectMeta: metav1.ObjectMeta{
- Name: "goosefs",
- Namespace: "default",
- },
- Status: v1alpha1.DatasetStatus{
- Runtimes: []v1alpha1.Runtime{
- {
- Name: "goosefs",
- Namespace: "default",
- Type: common.GooseFSRuntime,
- },
- },
- },
- }
-
jindoRuntime := v1alpha1.JindoRuntime{
ObjectMeta: metav1.ObjectMeta{
Name: "jindo",
@@ -949,20 +786,17 @@ var _ = Describe("GetRuntimeInfo", func() {
},
}
s.AddKnownTypes(v1alpha1.GroupVersion, &v1alpha1.AlluxioRuntime{})
- s.AddKnownTypes(v1alpha1.GroupVersion, &v1alpha1.GooseFSRuntime{})
s.AddKnownTypes(v1alpha1.GroupVersion, &v1alpha1.JindoRuntime{})
s.AddKnownTypes(v1alpha1.GroupVersion, &v1alpha1.JuiceFSRuntime{})
s.AddKnownTypes(v1alpha1.GroupVersion, &v1alpha1.EFCRuntime{})
s.AddKnownTypes(v1alpha1.GroupVersion, &v1alpha1.Dataset{})
_ = v1.AddToScheme(s)
alluxioRuntimeObjs := []runtime.Object{}
- goosefsRuntimeObjs := []runtime.Object{}
jindoRuntimeObjs := []runtime.Object{}
juicefsRuntimeObjs := []runtime.Object{}
efcRuntimeObjs := []runtime.Object{}
alluxioRuntimeObjs = append(alluxioRuntimeObjs, &alluxioRuntime, &dataAlluxio)
- goosefsRuntimeObjs = append(goosefsRuntimeObjs, &goosefsRuntime, &dataGooseFS)
jindoRuntimeObjs = append(jindoRuntimeObjs, &jindoRuntime, &dataJindo)
for _, jfsRuntime := range juicefsRuntimes {
juicefsRuntimeObjs = append(juicefsRuntimeObjs, &jfsRuntime)
@@ -999,40 +833,6 @@ var _ = Describe("GetRuntimeInfo", func() {
},
wantErr: false,
},
- {
- name: "goosefs_test",
- args: args{
- client: fakeutils.NewFakeClientWithScheme(s, goosefsRuntimeObjs...),
- name: "goosefs",
- namespace: "default",
- },
- want: &RuntimeInfo{
- name: "goosefs",
- namespace: "default",
- runtimeType: common.GooseFSRuntime,
- fuse: Fuse{
- CleanPolicy: v1alpha1.OnRuntimeDeletedCleanPolicy,
- },
- },
- wantErr: false,
- },
- {
- name: "goosefs_test_fake",
- args: args{
- client: fakeutils.NewFakeClientWithScheme(s, goosefsRuntimeObjs...),
- name: "goosefs-fake",
- namespace: "default",
- },
- want: &RuntimeInfo{
- name: "goosefs-fake",
- namespace: "default",
- runtimeType: common.GooseFSRuntime,
- fuse: Fuse{
- CleanPolicy: v1alpha1.OnDemandCleanPolicy,
- },
- },
- wantErr: true,
- },
{
name: "jindo_test",
args: args{
@@ -1170,13 +970,6 @@ var _ = Describe("GetRuntimeStatus", func() {
},
}
- goosefsRuntime := v1alpha1.GooseFSRuntime{
- ObjectMeta: metav1.ObjectMeta{
- Name: "goosefs",
- Namespace: "default",
- },
- }
-
jindoRuntime := v1alpha1.JindoRuntime{
ObjectMeta: metav1.ObjectMeta{
Name: "jindo",
@@ -1206,7 +999,6 @@ var _ = Describe("GetRuntimeStatus", func() {
}
s.AddKnownTypes(v1alpha1.GroupVersion, &v1alpha1.AlluxioRuntime{})
- s.AddKnownTypes(v1alpha1.GroupVersion, &v1alpha1.GooseFSRuntime{})
s.AddKnownTypes(v1alpha1.GroupVersion, &v1alpha1.JindoRuntime{})
s.AddKnownTypes(v1alpha1.GroupVersion, &v1alpha1.JuiceFSRuntime{})
s.AddKnownTypes(v1alpha1.GroupVersion, &v1alpha1.EFCRuntime{})
@@ -1215,14 +1007,12 @@ var _ = Describe("GetRuntimeStatus", func() {
_ = v1.AddToScheme(s)
alluxioRuntimeObjs := []runtime.Object{}
- goosefsRuntimeObjs := []runtime.Object{}
jindoRuntimeObjs := []runtime.Object{}
juicefsRuntimeObjs := []runtime.Object{}
efcRuntimeObjs := []runtime.Object{}
thinRuntimeObjs := []runtime.Object{}
alluxioRuntimeObjs = append(alluxioRuntimeObjs, &alluxioRuntime)
- goosefsRuntimeObjs = append(goosefsRuntimeObjs, &goosefsRuntime)
jindoRuntimeObjs = append(jindoRuntimeObjs, &jindoRuntime)
juicefsRuntimeObjs = append(juicefsRuntimeObjs, &juicefsRuntime)
efcRuntimeObjs = append(efcRuntimeObjs, &efcRuntime)
@@ -1258,26 +1048,6 @@ var _ = Describe("GetRuntimeStatus", func() {
},
wantErr: true,
},
- {
- name: "goosefs_test",
- args: args{
- client: fakeutils.NewFakeClientWithScheme(s, goosefsRuntimeObjs...),
- name: "goosefs",
- namespace: "default",
- runtimeType: common.GooseFSRuntime,
- },
- wantErr: false,
- },
- {
- name: "goosefs_test_error",
- args: args{
- client: fakeutils.NewFakeClientWithScheme(s, goosefsRuntimeObjs...),
- name: "goosefs-error",
- namespace: "default",
- runtimeType: common.GooseFSRuntime,
- },
- wantErr: true,
- },
{
name: "jindo_test",
args: args{
diff --git a/pkg/ddc/factory.go b/pkg/ddc/factory.go
index e076d11476b..eb2f6c5dca1 100644
--- a/pkg/ddc/factory.go
+++ b/pkg/ddc/factory.go
@@ -20,7 +20,6 @@ import (
"github.com/fluid-cloudnative/fluid/pkg/ddc/alluxio"
"github.com/fluid-cloudnative/fluid/pkg/ddc/base"
"github.com/fluid-cloudnative/fluid/pkg/ddc/efc"
- "github.com/fluid-cloudnative/fluid/pkg/ddc/goosefs"
"github.com/fluid-cloudnative/fluid/pkg/ddc/jindo"
"github.com/fluid-cloudnative/fluid/pkg/ddc/jindocache"
"github.com/fluid-cloudnative/fluid/pkg/ddc/jindofsx"
@@ -43,7 +42,6 @@ func init() {
common.JindoFSEngineImpl: jindo.Build,
common.JindoFSxEngineImpl: jindofsx.Build,
common.JindoCacheEngineImpl: jindocache.Build,
- common.GooseFSEngineImpl: goosefs.Build,
common.JuiceFSEngineImpl: juicefs.Build,
common.ThinEngineImpl: thin.Build,
common.EFCEngineImpl: efc.Build,
diff --git a/pkg/ddc/goosefs/api_gateway.go b/pkg/ddc/goosefs/api_gateway.go
deleted file mode 100644
index a19c2ce5f8f..00000000000
--- a/pkg/ddc/goosefs/api_gateway.go
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "fmt"
-
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/utils/kubeclient"
-)
-
-// Query the API Gateway status
-// GetAPIGatewayStatus retrieves the API Gateway status for GooseFS engine
-// This method attempts to obtain the HCFS endpoint through queryAPIGatewayEndpoint(),
-// constructs and returns an APIGatewayStatus object containing the endpoint information.
-// Returns:
-// - *datav1alpha1.APIGatewayStatus: Contains the API Gateway endpoint if successfully retrieved
-// - error: Returns error details if failed to query the endpoint
-// Error Handling:
-// - Logs and returns error when endpoint query operation fails
-func (e *GooseFSEngine) GetAPIGatewayStatus() (status *datav1alpha1.APIGatewayStatus, err error) {
- endpoint, err := e.queryAPIGatewayEndpoint()
- if err != nil {
- e.Log.Error(err, "Failed to get HCFS Endpoint")
- return status, err
- }
-
- status = &datav1alpha1.APIGatewayStatus{
- Endpoint: endpoint,
- }
- return
-}
-
-// query the api endpoint
-func (e *GooseFSEngine) queryAPIGatewayEndpoint() (endpoint string, err error) {
-
- var (
- serviceName = fmt.Sprintf("%s-master-0", e.name)
- host = fmt.Sprintf("%s.%s", serviceName, e.namespace)
- )
-
- svc, err := kubeclient.GetServiceByName(e.Client, serviceName, e.namespace)
- if err != nil {
- e.Log.Error(err, "Failed to get Endpoint")
- return endpoint, err
- }
-
- if svc == nil {
- e.Log.Error(fmt.Errorf("failed to find the svc %s in %s", e.name, e.namespace), "failed to find the svc, it's nil")
- return
- }
-
- for _, port := range svc.Spec.Ports {
- if port.Name == "rest" {
- endpoint = fmt.Sprintf("%s:%d", host, port.Port)
- return
- }
- }
-
- return
-}
diff --git a/pkg/ddc/goosefs/api_gateway_test.go b/pkg/ddc/goosefs/api_gateway_test.go
deleted file mode 100644
index 89bd2d58b8b..00000000000
--- a/pkg/ddc/goosefs/api_gateway_test.go
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
-Copyright 2022 The Fluid Author.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "fmt"
-
- . "github.com/onsi/ginkgo/v2"
- . "github.com/onsi/gomega"
-
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/common"
- "github.com/fluid-cloudnative/fluid/pkg/utils/fake"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "sigs.k8s.io/controller-runtime/pkg/client"
-)
-
-func mockGooseFSEngineWithClient(name, ns string, port int32) *GooseFSEngine {
- var mockClient client.Client
-
- mockSvc := &corev1.Service{
- ObjectMeta: metav1.ObjectMeta{
- Name: fmt.Sprintf("%s-master-0", name),
- Namespace: ns,
- },
- Spec: corev1.ServiceSpec{
- Ports: []corev1.ServicePort{
- {
- Name: "rest",
- Port: port,
- },
- },
- },
- }
-
- mockClient = fake.NewFakeClient(mockSvc)
-
- e := &GooseFSEngine{
- Client: mockClient,
- name: name,
- namespace: ns,
- }
- return e
-}
-
-var _ = Describe("APIGateway", func() {
- endpointFormat := "%s-master-0.%s:%d"
-
- type testCase struct {
- engineName string
- engineNamespace string
- port int32
- wantEndpoint string
- }
-
- Describe("GetAPIGatewayStatus", func() {
- DescribeTable("should return correct API gateway status",
- func(tc testCase) {
- e := mockGooseFSEngineWithClient(tc.engineName, tc.engineNamespace, tc.port)
- got, err := e.GetAPIGatewayStatus()
-
- Expect(err).NotTo(HaveOccurred())
- expectedStatus := &datav1alpha1.APIGatewayStatus{
- Endpoint: fmt.Sprintf(endpointFormat, tc.engineName, tc.engineNamespace, tc.port),
- }
- Expect(got).To(Equal(expectedStatus))
- },
- Entry("fluid engine in default namespace",
- testCase{
- engineName: "fluid",
- engineNamespace: "default",
- port: 8080,
- wantEndpoint: fmt.Sprintf(endpointFormat, "fluid", "default", 8080),
- },
- ),
- Entry("demo engine in fluid-system namespace",
- testCase{
- engineName: "demo",
- engineNamespace: common.NamespaceFluidSystem,
- port: 80,
- wantEndpoint: fmt.Sprintf(endpointFormat, "demo", common.NamespaceFluidSystem, 80),
- },
- ),
- )
- })
-
- Describe("queryAPIGatewayEndpoint", func() {
- DescribeTable("should return correct endpoint",
- func(tc testCase) {
- e := mockGooseFSEngineWithClient(tc.engineName, tc.engineNamespace, tc.port)
- got, err := e.queryAPIGatewayEndpoint()
-
- Expect(err).NotTo(HaveOccurred())
- Expect(got).To(Equal(tc.wantEndpoint))
- },
- Entry("fluid engine in default namespace",
- testCase{
- engineName: "fluid",
- engineNamespace: "default",
- port: 8080,
- wantEndpoint: fmt.Sprintf(endpointFormat, "fluid", "default", 8080),
- },
- ),
- Entry("demo engine in fluid-system namespace",
- testCase{
- engineName: "demo",
- engineNamespace: common.NamespaceFluidSystem,
- port: 80,
- wantEndpoint: fmt.Sprintf(endpointFormat, "demo", common.NamespaceFluidSystem, 80),
- },
- ),
- )
- })
-})
diff --git a/pkg/ddc/goosefs/backup_data.go b/pkg/ddc/goosefs/backup_data.go
deleted file mode 100644
index 22053884bd6..00000000000
--- a/pkg/ddc/goosefs/backup_data.go
+++ /dev/null
@@ -1,187 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "fmt"
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/common"
- cdatabackup "github.com/fluid-cloudnative/fluid/pkg/databackup"
- "github.com/fluid-cloudnative/fluid/pkg/dataflow"
- "github.com/fluid-cloudnative/fluid/pkg/ddc/goosefs/operations"
- cruntime "github.com/fluid-cloudnative/fluid/pkg/runtime"
- "github.com/fluid-cloudnative/fluid/pkg/utils"
- "github.com/fluid-cloudnative/fluid/pkg/utils/docker"
- "github.com/fluid-cloudnative/fluid/pkg/utils/kubeclient"
- "gopkg.in/yaml.v2"
- "os"
- "sigs.k8s.io/controller-runtime/pkg/client"
- "strconv"
- "strings"
-)
-
-// generateDataBackupValueFile builds a DataBackupValueFile by extracted specifications from the given DataBackup, and
-// marshals the DataBackup to a temporary yaml file where stores values that'll be used by fluid dataBackup helm chart
-func (e *GooseFSEngine) generateDataBackupValueFile(ctx cruntime.ReconcileRequestContext, object client.Object) (valueFileName string, err error) {
- logger := ctx.Log.WithName("generateDataBackupValueFile")
-
- databackup, ok := object.(*datav1alpha1.DataBackup)
- if !ok {
- err = fmt.Errorf("object %v is not a DataLoad", object)
- return "", err
- }
-
- // get the runAs and initUsers imageInfo from runtime
- runtime, err := e.getRuntime()
- if err != nil {
- return "", err
- }
-
- masterPodName, containerName := e.getMasterPodInfo()
- if runtime.Spec.Replicas > 1 {
- fileUtils := operations.NewGooseFSFileUtils(masterPodName, containerName, runtime.GetNamespace(), ctx.Log)
- masterPodName, err = fileUtils.MasterPodName()
- if err != nil {
- return "", err
- }
- }
-
- masterPod, err := kubeclient.GetPodByName(e.Client, masterPodName, ctx.Namespace)
- if err != nil {
- return "", err
- }
-
- nodeName, ip, rpcPort := utils.GetAddressOfMaster(masterPod)
-
- var imageEnv, defaultImage string
-
- imageName, imageTag := docker.GetWorkerImage(e.Client, databackup.Spec.Dataset, common.GooseFSRuntime, databackup.Namespace)
- javaEnv := "-Dgoosefs.master.hostname=" + ip + " -Dgoosefs.master.rpc.port=" + strconv.Itoa(int(rpcPort))
-
- if len(imageName) == 0 {
- imageEnv = common.GooseFSRuntimeImageEnv
- defaultImage = common.DefaultGooseFSRuntimeImage
-
- imageName = docker.GetImageRepoFromEnv(imageEnv)
- if len(imageName) == 0 {
- defaultImageInfo := strings.Split(defaultImage, ":")
- if len(defaultImageInfo) < 1 {
- logger.Error(fmt.Errorf("ImageInfo"), "invalid default databackup image!")
- return
- } else {
- imageName = defaultImageInfo[0]
- }
- }
- }
-
- if len(imageTag) == 0 {
- imageEnv = common.GooseFSRuntimeImageEnv
- defaultImage = common.DefaultGooseFSRuntimeImage
-
- imageTag = docker.GetImageTagFromEnv(imageEnv)
- if len(imageTag) == 0 {
- defaultImageInfo := strings.Split(defaultImage, ":")
- if len(defaultImageInfo) < 1 {
- logger.Error(fmt.Errorf("ImageInfo"), "invalid default databackup image!")
- return
- } else {
- imageTag = defaultImageInfo[1]
- }
- }
- }
-
- image := fmt.Sprintf("%s:%s", imageName, imageTag)
-
- workdir := os.Getenv("FLUID_WORKDIR")
- if workdir == "" {
- workdir = "/tmp"
- }
-
- // image pull secrets
- // if the environment variable is not set, it is still an empty slice
- imagePullSecrets := docker.GetImagePullSecretsFromEnv(common.EnvImagePullSecretsKey)
-
- dataBackup := cdatabackup.DataBackup{
- Namespace: databackup.Namespace,
- Dataset: databackup.Spec.Dataset,
- Name: databackup.Name,
- NodeName: nodeName,
- Image: image,
- JavaEnv: javaEnv,
- Workdir: workdir,
- RuntimeType: common.AlluxioRuntime,
- ImagePullSecrets: imagePullSecrets,
- }
- pvcName, path, err := utils.ParseBackupRestorePath(databackup.Spec.BackupPath)
- if err != nil {
- return
- }
- dataBackup.PVCName = pvcName
- dataBackup.Path = path
-
- // inject the node affinity by previous operation pod.
- dataBackup.Affinity, err = dataflow.InjectAffinityByRunAfterOp(e.Client, databackup.Spec.RunAfter, databackup.Namespace, nil)
- if err != nil {
- return "", err
- }
-
- dataBackupValue := cdatabackup.DataBackupValue{DataBackup: dataBackup}
-
- dataBackupValue.InitUsers = common.InitUsers{
- Enabled: false,
- }
-
- var runAs = runtime.Spec.RunAs
- // databackup.Spec.RunAs > runtime.Spec.RunAs > root
- if databackup.Spec.RunAs != nil {
- runAs = databackup.Spec.RunAs
- }
-
- if runAs != nil {
- dataBackupValue.UserInfo.User = int(*runAs.UID)
- dataBackupValue.UserInfo.Group = int(*runAs.GID)
- dataBackupValue.UserInfo.FSGroup = 0
- dataBackupValue.InitUsers = common.InitUsers{
- Enabled: true,
- EnvUsers: utils.GetInitUserEnv(runAs),
- Dir: utils.GetBackupUserDir(dataBackup.Namespace, dataBackup.Name),
- }
- }
-
- image = runtime.Spec.InitUsers.Image
- imageTag = runtime.Spec.InitUsers.ImageTag
- imagePullPolicy := runtime.Spec.InitUsers.ImagePullPolicy
-
- dataBackupValue.InitUsers.Image, dataBackupValue.InitUsers.ImageTag, dataBackupValue.InitUsers.ImagePullPolicy = docker.ParseInitImage(image, imageTag, imagePullPolicy, common.DefaultInitImageEnv)
-
- data, err := yaml.Marshal(dataBackupValue)
- if err != nil {
- return
- }
-
- valueFile, err := os.CreateTemp(os.TempDir(), fmt.Sprintf("%s-%s-%s-backuper-values.yaml", databackup.Namespace, databackup.Name, dataBackup.RuntimeType))
- if err != nil {
- return
- }
-
- err = os.WriteFile(valueFile.Name(), data, 0400)
- if err != nil {
- return
- }
-
- return valueFile.Name(), nil
-}
diff --git a/pkg/ddc/goosefs/backup_data_test.go b/pkg/ddc/goosefs/backup_data_test.go
deleted file mode 100644
index f5a3319384d..00000000000
--- a/pkg/ddc/goosefs/backup_data_test.go
+++ /dev/null
@@ -1,703 +0,0 @@
-/*
-Copyright 2026 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "fmt"
- "os"
- "reflect"
-
- . "github.com/agiledragon/gomonkey/v2"
- . "github.com/onsi/ginkgo/v2"
- . "github.com/onsi/gomega"
- "gopkg.in/yaml.v2"
-
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/common"
- cdatabackup "github.com/fluid-cloudnative/fluid/pkg/databackup"
- "github.com/fluid-cloudnative/fluid/pkg/dataflow"
- "github.com/fluid-cloudnative/fluid/pkg/ddc/goosefs/operations"
- cruntime "github.com/fluid-cloudnative/fluid/pkg/runtime"
- "github.com/fluid-cloudnative/fluid/pkg/utils/docker"
- "github.com/fluid-cloudnative/fluid/pkg/utils/fake"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/types"
- "sigs.k8s.io/controller-runtime/pkg/client"
-)
-
-const (
- testNamespace = "fluid"
- testDatasetName = "test-dataset"
- testBackupName = "test-backup"
- testMasterName = "test-dataset-master-0"
- testNodeName = "test-node"
- testHostIP = "192.168.1.100"
- testBackupPath = "pvc://backup-pvc/path"
- testMasterContainerName = "goosefs-master"
-)
-
-var _ = Describe("GooseFSEngine Data Backup", func() {
- var engine *GooseFSEngine
-
- BeforeEach(func() {
- engine = &GooseFSEngine{
- name: testDatasetName,
- namespace: testNamespace,
- Log: fake.NullLogger(),
- }
- })
-
- Describe("generateDataBackupValueFile", func() {
- Context("when object is not a DataBackup", func() {
- It("should return an error", func() {
- ctx := cruntime.ReconcileRequestContext{
- Log: fake.NullLogger(),
- }
-
- valueFileName, err := engine.generateDataBackupValueFile(ctx, &datav1alpha1.Dataset{})
- Expect(err).To(HaveOccurred())
- Expect(valueFileName).To(BeEmpty())
- })
- })
-
- Context("when runtime is not found", func() {
- It("should return an error", func() {
- testObjs := []runtime.Object{}
- fakeClient := fake.NewFakeClientWithScheme(testScheme, testObjs...)
- engine.Client = fakeClient
-
- ctx := cruntime.ReconcileRequestContext{
- Log: fake.NullLogger(),
- NamespacedName: types.NamespacedName{
- Namespace: testNamespace,
- Name: testDatasetName,
- },
- }
-
- databackup := &datav1alpha1.DataBackup{
- ObjectMeta: metav1.ObjectMeta{
- Name: testBackupName,
- Namespace: testNamespace,
- },
- Spec: datav1alpha1.DataBackupSpec{
- Dataset: testDatasetName,
- BackupPath: testBackupPath,
- },
- }
-
- valueFileName, err := engine.generateDataBackupValueFile(ctx, databackup)
- Expect(err).To(HaveOccurred())
- Expect(valueFileName).To(BeEmpty())
- })
- })
-
- Context("when HA runtime MasterPodName fails", func() {
- It("should return an error", func() {
- goosefsRuntime := &datav1alpha1.GooseFSRuntime{
- ObjectMeta: metav1.ObjectMeta{
- Name: testDatasetName,
- Namespace: testNamespace,
- },
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- Replicas: 3,
- },
- }
-
- testObjs := []runtime.Object{goosefsRuntime.DeepCopy()}
- fakeClient := fake.NewFakeClientWithScheme(testScheme, testObjs...)
- engine.Client = fakeClient
-
- ctx := cruntime.ReconcileRequestContext{
- Log: fake.NullLogger(),
- NamespacedName: types.NamespacedName{
- Namespace: testNamespace,
- Name: testDatasetName,
- },
- }
-
- databackup := &datav1alpha1.DataBackup{
- ObjectMeta: metav1.ObjectMeta{
- Name: testBackupName,
- Namespace: testNamespace,
- },
- Spec: datav1alpha1.DataBackupSpec{
- Dataset: testDatasetName,
- BackupPath: testBackupPath,
- },
- }
-
- goosefsFileUtils := operations.GooseFSFileUtils{}
- patch := ApplyMethod(reflect.TypeOf(goosefsFileUtils), "MasterPodName", func(_ operations.GooseFSFileUtils) (string, error) {
- return "", fmt.Errorf("mock error")
- })
- defer patch.Reset()
-
- valueFileName, err := engine.generateDataBackupValueFile(ctx, databackup)
- Expect(err).To(HaveOccurred())
- Expect(valueFileName).To(BeEmpty())
- })
- })
-
- Context("when backup path is invalid", func() {
- var (
- fakeClient client.Client
- patch *Patches
- )
-
- BeforeEach(func() {
- goosefsRuntime := &datav1alpha1.GooseFSRuntime{
- ObjectMeta: metav1.ObjectMeta{
- Name: testDatasetName,
- Namespace: testNamespace,
- },
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- Replicas: 1,
- },
- }
-
- masterPod := createMasterPod()
- testObjs := []runtime.Object{goosefsRuntime.DeepCopy(), masterPod.DeepCopy()}
- fakeClient = fake.NewFakeClientWithScheme(testScheme, testObjs...)
- engine.Client = fakeClient
-
- patch = ApplyFunc(docker.GetWorkerImage, func(_ client.Client, _ string, _ string, _ string) (string, string) {
- return "goosefs", "v1.0"
- })
- })
-
- AfterEach(func() {
- patch.Reset()
- })
-
- It("should return an error", func() {
- ctx := cruntime.ReconcileRequestContext{
- Log: fake.NullLogger(),
- Client: fakeClient,
- NamespacedName: types.NamespacedName{
- Namespace: testNamespace,
- Name: testDatasetName,
- },
- }
-
- databackup := &datav1alpha1.DataBackup{
- ObjectMeta: metav1.ObjectMeta{
- Name: testBackupName,
- Namespace: testNamespace,
- },
- Spec: datav1alpha1.DataBackupSpec{
- Dataset: testDatasetName,
- BackupPath: "invalid-path",
- },
- }
-
- valueFileName, err := engine.generateDataBackupValueFile(ctx, databackup)
- Expect(err).To(HaveOccurred())
- Expect(valueFileName).To(BeEmpty())
- })
- })
-
- Context("when affinity injection fails", func() {
- var (
- fakeClient client.Client
- patch *Patches
- patch2 *Patches
- )
-
- BeforeEach(func() {
- goosefsRuntime := &datav1alpha1.GooseFSRuntime{
- ObjectMeta: metav1.ObjectMeta{
- Name: testDatasetName,
- Namespace: testNamespace,
- },
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- Replicas: 1,
- },
- }
-
- masterPod := createMasterPod()
- testObjs := []runtime.Object{goosefsRuntime.DeepCopy(), masterPod.DeepCopy()}
- fakeClient = fake.NewFakeClientWithScheme(testScheme, testObjs...)
- engine.Client = fakeClient
-
- patch = ApplyFunc(docker.GetWorkerImage, func(_ client.Client, _ string, _ string, _ string) (string, string) {
- return "goosefs", "v1.0"
- })
-
- patch2 = ApplyFunc(dataflow.InjectAffinityByRunAfterOp, func(_ client.Client, _ *datav1alpha1.OperationRef, _ string, _ *corev1.Affinity) (*corev1.Affinity, error) {
- return nil, fmt.Errorf("mock affinity error")
- })
- })
-
- AfterEach(func() {
- patch.Reset()
- patch2.Reset()
- })
-
- It("should return an error", func() {
- ctx := cruntime.ReconcileRequestContext{
- Log: fake.NullLogger(),
- Client: fakeClient,
- NamespacedName: types.NamespacedName{
- Namespace: testNamespace,
- Name: testDatasetName,
- },
- }
-
- databackup := &datav1alpha1.DataBackup{
- ObjectMeta: metav1.ObjectMeta{
- Name: testBackupName,
- Namespace: testNamespace,
- },
- Spec: datav1alpha1.DataBackupSpec{
- Dataset: testDatasetName,
- BackupPath: testBackupPath,
- RunAfter: &datav1alpha1.OperationRef{
- ObjectRef: datav1alpha1.ObjectRef{
- Kind: "DataLoad",
- Name: "nonexistent",
- },
- AffinityStrategy: datav1alpha1.AffinityStrategy{
- Policy: datav1alpha1.RequireAffinityStrategy,
- },
- },
- },
- }
-
- valueFileName, err := engine.generateDataBackupValueFile(ctx, databackup)
- Expect(err).To(HaveOccurred())
- Expect(valueFileName).To(BeEmpty())
- })
- })
- })
-})
-
-var _ = Describe("GooseFSEngine Data Backup Success Cases", func() {
- var (
- engine *GooseFSEngine
- fakeClient client.Client
- patch *Patches
- )
-
- BeforeEach(func() {
- engine = &GooseFSEngine{
- name: testDatasetName,
- namespace: testNamespace,
- Log: fake.NullLogger(),
- }
- })
-
- Describe("generateDataBackupValueFile with valid configuration", func() {
- Context("when runtime has RunAs and InitUsers configured", func() {
- var valueFileName string
-
- BeforeEach(func() {
- uid := int64(1000)
- gid := int64(1000)
-
- goosefsRuntime := &datav1alpha1.GooseFSRuntime{
- ObjectMeta: metav1.ObjectMeta{
- Name: testDatasetName,
- Namespace: testNamespace,
- },
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- Replicas: 1,
- RunAs: &datav1alpha1.User{
- UID: &uid,
- GID: &gid,
- },
- InitUsers: datav1alpha1.InitUsersSpec{
- Image: "init-users",
- ImageTag: "v1.0",
- ImagePullPolicy: "IfNotPresent",
- },
- },
- }
-
- masterPod := createMasterPod()
- testObjs := []runtime.Object{goosefsRuntime.DeepCopy(), masterPod.DeepCopy()}
- fakeClient = fake.NewFakeClientWithScheme(testScheme, testObjs...)
- engine.Client = fakeClient
-
- patch = ApplyFunc(docker.GetWorkerImage, func(_ client.Client, _ string, _ string, _ string) (string, string) {
- return "goosefs", "v1.0"
- })
- })
-
- AfterEach(func() {
- patch.Reset()
- if valueFileName != "" {
- _ = os.Remove(valueFileName)
- }
- })
-
- It("should generate a valid value file", func() {
- ctx := cruntime.ReconcileRequestContext{
- Log: fake.NullLogger(),
- Client: fakeClient,
- NamespacedName: types.NamespacedName{
- Namespace: testNamespace,
- Name: testDatasetName,
- },
- }
-
- databackup := &datav1alpha1.DataBackup{
- ObjectMeta: metav1.ObjectMeta{
- Name: testBackupName,
- Namespace: testNamespace,
- },
- Spec: datav1alpha1.DataBackupSpec{
- Dataset: testDatasetName,
- BackupPath: testBackupPath,
- },
- }
-
- var err error
- valueFileName, err = engine.generateDataBackupValueFile(ctx, databackup)
- Expect(err).NotTo(HaveOccurred())
- Expect(valueFileName).NotTo(BeEmpty())
- Expect(valueFileName).To(ContainSubstring("backuper-values.yaml"))
-
- content, readErr := os.ReadFile(valueFileName)
- Expect(readErr).NotTo(HaveOccurred())
- Expect(content).NotTo(BeEmpty())
- })
- })
-
- Context("when databackup has its own RunAs", func() {
- var valueFileName string
-
- BeforeEach(func() {
- runtimeUID := int64(1000)
- runtimeGID := int64(1000)
-
- goosefsRuntime := &datav1alpha1.GooseFSRuntime{
- ObjectMeta: metav1.ObjectMeta{
- Name: testDatasetName,
- Namespace: testNamespace,
- },
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- Replicas: 1,
- RunAs: &datav1alpha1.User{
- UID: &runtimeUID,
- GID: &runtimeGID,
- },
- },
- }
-
- masterPod := createMasterPod()
- testObjs := []runtime.Object{goosefsRuntime.DeepCopy(), masterPod.DeepCopy()}
- fakeClient = fake.NewFakeClientWithScheme(testScheme, testObjs...)
- engine.Client = fakeClient
-
- patch = ApplyFunc(docker.GetWorkerImage, func(_ client.Client, _ string, _ string, _ string) (string, string) {
- return "goosefs", "v1.0"
- })
- })
-
- AfterEach(func() {
- patch.Reset()
- if valueFileName != "" {
- _ = os.Remove(valueFileName)
- }
- })
-
- It("should use databackup RunAs over runtime RunAs", func() {
- backupUID := int64(2000)
- backupGID := int64(2000)
-
- ctx := cruntime.ReconcileRequestContext{
- Log: fake.NullLogger(),
- Client: fakeClient,
- NamespacedName: types.NamespacedName{
- Namespace: testNamespace,
- Name: testDatasetName,
- },
- }
-
- databackup := &datav1alpha1.DataBackup{
- ObjectMeta: metav1.ObjectMeta{
- Name: testBackupName,
- Namespace: testNamespace,
- },
- Spec: datav1alpha1.DataBackupSpec{
- Dataset: testDatasetName,
- BackupPath: "local:///backup/path",
- RunAs: &datav1alpha1.User{
- UID: &backupUID,
- GID: &backupGID,
- },
- },
- }
-
- var err error
- valueFileName, err = engine.generateDataBackupValueFile(ctx, databackup)
- Expect(err).NotTo(HaveOccurred())
- Expect(valueFileName).NotTo(BeEmpty())
-
- content, readErr := os.ReadFile(valueFileName)
- Expect(readErr).NotTo(HaveOccurred())
-
- var dataBackupValue cdatabackup.DataBackupValue
- unmarshalErr := yaml.Unmarshal(content, &dataBackupValue)
- Expect(unmarshalErr).NotTo(HaveOccurred())
-
- Expect(dataBackupValue.UserInfo.User).To(Equal(int(backupUID)))
- Expect(dataBackupValue.UserInfo.Group).To(Equal(int(backupGID)))
- })
- })
-
- Context("when runtime has HA replicas", func() {
- var (
- valueFileName string
- masterPatch *Patches
- )
-
- BeforeEach(func() {
- goosefsRuntime := &datav1alpha1.GooseFSRuntime{
- ObjectMeta: metav1.ObjectMeta{
- Name: testDatasetName,
- Namespace: testNamespace,
- },
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- Replicas: 3,
- },
- }
-
- masterPod := createMasterPod()
- testObjs := []runtime.Object{goosefsRuntime.DeepCopy(), masterPod.DeepCopy()}
- fakeClient = fake.NewFakeClientWithScheme(testScheme, testObjs...)
- engine.Client = fakeClient
-
- goosefsFileUtils := operations.GooseFSFileUtils{}
- masterPatch = ApplyMethod(reflect.TypeOf(goosefsFileUtils), "MasterPodName", func(_ operations.GooseFSFileUtils) (string, error) {
- return testMasterName, nil
- })
-
- patch = ApplyFunc(docker.GetWorkerImage, func(_ client.Client, _ string, _ string, _ string) (string, string) {
- return "goosefs", "v1.0"
- })
- })
-
- AfterEach(func() {
- masterPatch.Reset()
- patch.Reset()
- if valueFileName != "" {
- _ = os.Remove(valueFileName)
- }
- })
-
- It("should successfully generate value file", func() {
- ctx := cruntime.ReconcileRequestContext{
- Log: fake.NullLogger(),
- Client: fakeClient,
- NamespacedName: types.NamespacedName{
- Namespace: testNamespace,
- Name: testDatasetName,
- },
- }
-
- databackup := &datav1alpha1.DataBackup{
- ObjectMeta: metav1.ObjectMeta{
- Name: testBackupName,
- Namespace: testNamespace,
- },
- Spec: datav1alpha1.DataBackupSpec{
- Dataset: testDatasetName,
- BackupPath: testBackupPath,
- },
- }
-
- var err error
- valueFileName, err = engine.generateDataBackupValueFile(ctx, databackup)
- Expect(err).NotTo(HaveOccurred())
- Expect(valueFileName).NotTo(BeEmpty())
- })
- })
-
- Context("when worker image is not configured", func() {
- var valueFileName string
-
- BeforeEach(func() {
- goosefsRuntime := &datav1alpha1.GooseFSRuntime{
- ObjectMeta: metav1.ObjectMeta{
- Name: testDatasetName,
- Namespace: testNamespace,
- },
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- Replicas: 1,
- },
- }
-
- masterPod := createMasterPod()
- testObjs := []runtime.Object{goosefsRuntime.DeepCopy(), masterPod.DeepCopy()}
- fakeClient = fake.NewFakeClientWithScheme(testScheme, testObjs...)
- engine.Client = fakeClient
-
- patch = ApplyFunc(docker.GetWorkerImage, func(_ client.Client, _ string, _ string, _ string) (string, string) {
- return "", ""
- })
- })
-
- AfterEach(func() {
- patch.Reset()
- if valueFileName != "" {
- _ = os.Remove(valueFileName)
- }
- })
-
- It("should use default image", func() {
- ctx := cruntime.ReconcileRequestContext{
- Log: fake.NullLogger(),
- Client: fakeClient,
- NamespacedName: types.NamespacedName{
- Namespace: testNamespace,
- Name: testDatasetName,
- },
- }
-
- databackup := &datav1alpha1.DataBackup{
- ObjectMeta: metav1.ObjectMeta{
- Name: testBackupName,
- Namespace: testNamespace,
- },
- Spec: datav1alpha1.DataBackupSpec{
- Dataset: testDatasetName,
- BackupPath: testBackupPath,
- },
- }
-
- var err error
- valueFileName, err = engine.generateDataBackupValueFile(ctx, databackup)
- Expect(err).NotTo(HaveOccurred())
- Expect(valueFileName).NotTo(BeEmpty())
-
- content, readErr := os.ReadFile(valueFileName)
- Expect(readErr).NotTo(HaveOccurred())
-
- var dataBackupValue cdatabackup.DataBackupValue
- unmarshalErr := yaml.Unmarshal(content, &dataBackupValue)
- Expect(unmarshalErr).NotTo(HaveOccurred())
-
- Expect(dataBackupValue.DataBackup.Image).To(Equal(common.DefaultGooseFSRuntimeImage))
- })
- })
- })
-})
-
-func createMasterPod() *corev1.Pod {
- return &corev1.Pod{
- ObjectMeta: metav1.ObjectMeta{
- Name: testMasterName,
- Namespace: testNamespace,
- },
- Spec: corev1.PodSpec{
- NodeName: testNodeName,
- Containers: []corev1.Container{
- {
- Name: testMasterContainerName,
- Ports: []corev1.ContainerPort{
- {
- Name: "rpc",
- ContainerPort: 19998,
- HostPort: 19998,
- },
- },
- },
- },
- },
- Status: corev1.PodStatus{
- HostIP: testHostIP,
- },
- }
-}
-
-var _ = Describe("GooseFSEngine Data Backup Value File Content", func() {
- It("should contain expected yaml structure", func() {
- uid := int64(1000)
- gid := int64(1000)
-
- goosefsRuntime := &datav1alpha1.GooseFSRuntime{
- ObjectMeta: metav1.ObjectMeta{
- Name: testDatasetName,
- Namespace: testNamespace,
- },
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- Replicas: 1,
- RunAs: &datav1alpha1.User{
- UID: &uid,
- GID: &gid,
- },
- },
- }
-
- masterPod := createMasterPod()
- testObjs := []runtime.Object{goosefsRuntime.DeepCopy(), masterPod.DeepCopy()}
- fakeClient := fake.NewFakeClientWithScheme(testScheme, testObjs...)
-
- engine := &GooseFSEngine{
- name: testDatasetName,
- namespace: testNamespace,
- Log: fake.NullLogger(),
- Client: fakeClient,
- }
-
- patch := ApplyFunc(docker.GetWorkerImage, func(_ client.Client, _ string, _ string, _ string) (string, string) {
- return "goosefs", "v1.0"
- })
- defer patch.Reset()
-
- ctx := cruntime.ReconcileRequestContext{
- Log: fake.NullLogger(),
- Client: fakeClient,
- NamespacedName: types.NamespacedName{
- Namespace: testNamespace,
- Name: testDatasetName,
- },
- }
-
- databackup := &datav1alpha1.DataBackup{
- ObjectMeta: metav1.ObjectMeta{
- Name: testBackupName,
- Namespace: testNamespace,
- },
- Spec: datav1alpha1.DataBackupSpec{
- Dataset: testDatasetName,
- BackupPath: testBackupPath,
- },
- }
-
- valueFileName, err := engine.generateDataBackupValueFile(ctx, databackup)
- Expect(err).NotTo(HaveOccurred())
- Expect(valueFileName).NotTo(BeEmpty())
- defer func() { _ = os.Remove(valueFileName) }()
-
- content, readErr := os.ReadFile(valueFileName)
- Expect(readErr).NotTo(HaveOccurred())
-
- var dataBackupValue cdatabackup.DataBackupValue
- unmarshalErr := yaml.Unmarshal(content, &dataBackupValue)
- Expect(unmarshalErr).NotTo(HaveOccurred())
-
- Expect(dataBackupValue.DataBackup.Name).To(Equal(testBackupName))
- Expect(dataBackupValue.DataBackup.Namespace).To(Equal(testNamespace))
- Expect(dataBackupValue.DataBackup.Dataset).To(Equal(testDatasetName))
- Expect(dataBackupValue.UserInfo.User).To(Equal(int(1000)))
- Expect(dataBackupValue.UserInfo.Group).To(Equal(int(1000)))
- })
-})
diff --git a/pkg/ddc/goosefs/cache.go b/pkg/ddc/goosefs/cache.go
deleted file mode 100644
index 7367cfb9c58..00000000000
--- a/pkg/ddc/goosefs/cache.go
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
-Copyright 2023 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "errors"
- "fmt"
- "time"
-
- "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/ddc/goosefs/operations"
- "github.com/fluid-cloudnative/fluid/pkg/utils"
- "github.com/fluid-cloudnative/fluid/pkg/utils/kubeclient"
-)
-
-var (
- cachedPercentageFormat = "%.1f%%"
-)
-
-// queryCacheStatus checks the cache status
-func (e *GooseFSEngine) queryCacheStatus() (states cacheStates, err error) {
- // get goosefs fsadmin report summary
- summary, err := e.GetReportSummary()
- if err != nil {
- e.Log.Error(err, "Failed to get GooseFS summary when query cache status")
- return states, err
- }
-
- if len(summary) == 0 {
- return states, errors.New("GooseFS summary is empty")
- }
-
- // parse goosefs fsadmin report summary
- states = e.ParseReportSummary(summary)
-
- dataset, err := utils.GetDataset(e.Client, e.name, e.namespace)
- if err != nil {
- e.Log.Error(err, "Failed to get dataset when query cache status")
- return states, err
- }
-
- e.patchDatasetStatus(dataset, &states)
-
- states.cacheHitStates = e.GetCacheHitStates()
-
- return states, nil
-
-}
-
-func (e GooseFSEngine) patchDatasetStatus(dataset *v1alpha1.Dataset, states *cacheStates) {
- // skip when `dataset.Status.UfsTotal` is empty
- if dataset.Status.UfsTotal == "" {
- return
- }
- // skip when `dataset.Status.UfsTotal` is "[Calculating]"
- if dataset.Status.UfsTotal == MetadataSyncNotDoneMsg {
- return
- }
-
- usedInBytes, _ := utils.FromHumanSize(states.cached)
- ufsTotalInBytes, _ := utils.FromHumanSize(dataset.Status.UfsTotal)
-
- states.cachedPercentage = fmt.Sprintf(cachedPercentageFormat, float64(usedInBytes)/float64(ufsTotalInBytes)*100.0)
-
-}
-
-// GetCacheHitStates gets cache hit related info by parsing GooseFS metrics
-func (e *GooseFSEngine) GetCacheHitStates() (cacheHitStates cacheHitStates) {
- // get cache hit states every 1 minute(CACHE_HIT_QUERY_INTERVAL_MIN * 20s)
- cacheHitStates.timestamp = time.Now()
- if e.lastCacheHitStates != nil && cacheHitStates.timestamp.Sub(e.lastCacheHitStates.timestamp).Minutes() < CacheHitQueryIntervalMin {
- return *e.lastCacheHitStates
- }
-
- metrics, err := e.GetReportMetrics()
- if err != nil {
- e.Log.Error(err, "Failed to get GooseFS metrics when get cache hit states")
- if e.lastCacheHitStates != nil {
- return *e.lastCacheHitStates
- }
- return
- }
-
- // refresh last cache hit states
- e.ParseReportMetric(metrics, &cacheHitStates, e.lastCacheHitStates)
-
- e.lastCacheHitStates = &cacheHitStates
- return
-}
-
-// invokeCleanCache cleans the cache for a specified path in the GooseFS cluster.
-// This function ensures that the master pod is ready before initiating the clean action.
-// If the master pod is not available or not ready, the function logs the issue and exits gracefully.
-// Otherwise, it proceeds to clean the cache using the GooseFS file utilities.
-func (e *GooseFSEngine) invokeCleanCache(path string) (err error) {
- // 1. Check if the master pod is ready. If not, log the status and return without performing any action.
- masterName := e.getMasterName()
- master, err := kubeclient.GetStatefulSet(e.Client, masterName, e.namespace)
- if err != nil {
- // Ignore "not found" errors and exit gracefully.
- if utils.IgnoreNotFound(err) == nil {
- e.Log.Info("Failed to get master", "err", err.Error())
- return nil
- }
- // Return other unexpected errors.
- return err
- }
- if master.Status.ReadyReplicas == 0 {
- e.Log.Info("The master is not ready, just skip clean cache.", "master", masterName)
- return nil
- } else {
- e.Log.Info("The master is ready, so start cleaning cache", "master", masterName)
- }
-
- // 2. Run the clean action using the GooseFS file utilities.
- podName, containerName := e.getMasterPodInfo()
- fileUitls := operations.NewGooseFSFileUtils(podName, containerName, e.namespace, e.Log)
- return fileUitls.CleanCache(path)
-}
diff --git a/pkg/ddc/goosefs/cache_test.go b/pkg/ddc/goosefs/cache_test.go
deleted file mode 100644
index 572ff1fac45..00000000000
--- a/pkg/ddc/goosefs/cache_test.go
+++ /dev/null
@@ -1,426 +0,0 @@
-/*
-Copyright 2021 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "reflect"
- "testing"
-
- "github.com/fluid-cloudnative/fluid/pkg/utils/fake"
- appsv1 "k8s.io/api/apps/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
-
- . "github.com/agiledragon/gomonkey/v2"
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/utils"
- . "github.com/smartystreets/goconvey/convey"
- "sigs.k8s.io/controller-runtime/pkg/client"
-)
-
-func TestQueryCacheStatus(t *testing.T) {
- Convey("test queryCacheStatus ", t, func() {
- Convey("with dataset UFSTotal is not empty ", func() {
- var engine *GooseFSEngine
- patch1 := ApplyMethod(reflect.TypeOf(engine), "GetReportSummary",
- func(_ *GooseFSEngine) (string, error) {
- summary := mockGooseFSReportSummary()
- return summary, nil
- })
- defer patch1.Reset()
-
- patch2 := ApplyFunc(utils.GetDataset,
- func(_ client.Reader, _ string, _ string) (*datav1alpha1.Dataset, error) {
- d := &datav1alpha1.Dataset{
- Status: datav1alpha1.DatasetStatus{
- UfsTotal: "52.18MiB",
- },
- }
- return d, nil
- })
- defer patch2.Reset()
-
- patch3 := ApplyMethod(reflect.TypeOf(engine), "GetCacheHitStates",
- func(_ *GooseFSEngine) cacheHitStates {
- return cacheHitStates{
- bytesReadLocal: 20310917,
- bytesReadUfsAll: 32243712,
- }
- })
- defer patch3.Reset()
-
- e := &GooseFSEngine{}
- got, err := e.queryCacheStatus()
- want := cacheStates{
- cacheCapacity: "19.07MiB",
- cached: "0.00B",
- cachedPercentage: "0.0%",
- cacheHitStates: cacheHitStates{
- bytesReadLocal: 20310917,
- bytesReadUfsAll: 32243712,
- },
- }
-
- So(got, ShouldResemble, want)
- So(err, ShouldEqual, nil)
- })
-
- Convey("with dataset UFSTotal is: [Calculating]", func() {
- var engine *GooseFSEngine
- patch1 := ApplyMethod(reflect.TypeOf(engine), "GetReportSummary",
- func(_ *GooseFSEngine) (string, error) {
- summary := mockGooseFSReportSummary()
- return summary, nil
- })
- defer patch1.Reset()
-
- patch2 := ApplyFunc(utils.GetDataset,
- func(_ client.Reader, _ string, _ string) (*datav1alpha1.Dataset, error) {
- d := &datav1alpha1.Dataset{
- Status: datav1alpha1.DatasetStatus{
- UfsTotal: "[Calculating]",
- },
- }
- return d, nil
- })
- defer patch2.Reset()
-
- patch3 := ApplyMethod(reflect.TypeOf(engine), "GetCacheHitStates",
- func(_ *GooseFSEngine) cacheHitStates {
- return cacheHitStates{}
- })
- defer patch3.Reset()
-
- e := &GooseFSEngine{}
- got, err := e.queryCacheStatus()
- want := cacheStates{
- cacheCapacity: "19.07MiB",
- cached: "0.00B",
- }
-
- So(got, ShouldResemble, want)
- So(err, ShouldEqual, nil)
- })
-
- Convey("with dataset UFSTotal is empty", func() {
- var engine *GooseFSEngine
- patch1 := ApplyMethod(reflect.TypeOf(engine), "GetReportSummary",
- func(_ *GooseFSEngine) (string, error) {
- summary := mockGooseFSReportSummary()
- return summary, nil
- })
- defer patch1.Reset()
-
- patch2 := ApplyFunc(utils.GetDataset,
- func(_ client.Reader, _ string, _ string) (*datav1alpha1.Dataset, error) {
- d := &datav1alpha1.Dataset{
- Status: datav1alpha1.DatasetStatus{
- UfsTotal: "",
- },
- }
- return d, nil
- })
- defer patch2.Reset()
-
- patch3 := ApplyMethod(reflect.TypeOf(engine), "GetCacheHitStates",
- func(_ *GooseFSEngine) cacheHitStates {
- return cacheHitStates{}
- })
- defer patch3.Reset()
-
- e := &GooseFSEngine{}
- got, err := e.queryCacheStatus()
- want := cacheStates{
- cacheCapacity: "19.07MiB",
- cached: "0.00B",
- }
-
- So(got, ShouldResemble, want)
- So(err, ShouldEqual, nil)
- })
- })
-}
-
-func TestGetCacheHitStates(t *testing.T) {
- Convey("Test GetCacheHitStates ", t, func() {
- Convey("with data ", func() {
- var engine *GooseFSEngine
- patch1 := ApplyMethod(reflect.TypeOf(engine), "GetReportMetrics",
- func(_ *GooseFSEngine) (string, error) {
- r := mockGooseFSReportMetrics()
- return r, nil
- })
- defer patch1.Reset()
-
- e := &GooseFSEngine{}
-
- got := e.GetCacheHitStates()
- want := cacheHitStates{
- bytesReadLocal: 20310917,
- bytesReadUfsAll: 32243712,
- }
- So(got.bytesReadLocal, ShouldEqual, want.bytesReadLocal)
- So(got.bytesReadUfsAll, ShouldEqual, want.bytesReadUfsAll)
- })
-
- })
-}
-
-func TestPatchDatasetStatus(t *testing.T) {
- engine := &GooseFSEngine{}
- testCases := []struct {
- total string
- cached string
- percentage string
- }{
- {
- total: "100",
- cached: "10",
- percentage: "10.0%",
- },
- {
- total: "100",
- cached: "50",
- percentage: "50.0%",
- },
- }
- for _, testCase := range testCases {
- dataset := &datav1alpha1.Dataset{
- Status: datav1alpha1.DatasetStatus{
- UfsTotal: testCase.total,
- },
- }
- states := &cacheStates{
- cached: testCase.cached,
- }
- engine.patchDatasetStatus(dataset, states)
- if states.cachedPercentage != testCase.percentage {
- t.Errorf(" want %s, got %s", testCase.percentage, states.cachedPercentage)
- }
- }
-}
-
-func TestInvokeCleanCache(t *testing.T) {
- masterInputs := []*appsv1.StatefulSet{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hadoop-master",
- Namespace: "fluid",
- },
- Status: appsv1.StatefulSetStatus{
- ReadyReplicas: 0,
- },
- },
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase-master",
- Namespace: "fluid",
- },
- Status: appsv1.StatefulSetStatus{
- ReadyReplicas: 1,
- },
- },
- }
- objs := []runtime.Object{}
- for _, masterInput := range masterInputs {
- objs = append(objs, masterInput.DeepCopy())
- }
- fakeClient := fake.NewFakeClientWithScheme(testScheme, objs...)
- testCases := []struct {
- name string
- namespace string
- isErr bool
- }{
- {
- name: "hadoop",
- namespace: "fluid",
- isErr: false,
- },
- {
- name: "hbase",
- namespace: "fluid",
- isErr: true,
- },
- {
- name: "none",
- namespace: "fluid",
- isErr: false,
- },
- }
- for _, testCase := range testCases {
- engine := &GooseFSEngine{
- Client: fakeClient,
- namespace: testCase.namespace,
- name: testCase.name,
- Log: fake.NullLogger(),
- }
- err := engine.invokeCleanCache("")
- isErr := err != nil
- if isErr != testCase.isErr {
- t.Errorf("test-name:%s want %t, got %t", testCase.name, testCase.isErr, isErr)
- }
- }
-}
-
-// $ goosefs fsadmin report summary
-func mockGooseFSReportSummary() string {
- s := `GooseFS cluster summary:
- Master Address: 172.18.0.2:20000
- Web Port: 20001
- Rpc Port: 20000
- Started: 06-29-2021 13:43:56:297
- Uptime: 0 day(s), 0 hour(s), 4 minute(s), and 13 second(s)
- Version: 2.3.1-SNAPSHOT
- Safe Mode: false
- Zookeeper Enabled: false
- Live Workers: 1
- Lost Workers: 0
- Total Capacity: 19.07MB
- Tier: MEM Size: 19.07MB
- Used Capacity: 0B
- Tier: MEM Size: 0B
- Free Capacity: 19.07MB
- `
- return s
-}
-
-func mockGooseFSReportMetrics() string {
- r := `Cluster.BytesReadGooseFS (Type: COUNTER, Value: 0B)
- Cluster.BytesReadGooseFSThroughput (Type: GAUGE, Value: 0B/MIN)
- Cluster.BytesReadDomain (Type: COUNTER, Value: 0B)
- Cluster.BytesReadDomainThroughput (Type: GAUGE, Value: 0B/MIN)
- Cluster.BytesReadLocal (Type: COUNTER, Value: 19.37MB)
- Cluster.BytesReadLocalThroughput (Type: GAUGE, Value: 495.97KB/MIN)
- Cluster.BytesReadPerUfs.UFS:s3:%2F%2Ffluid (Type: COUNTER, Value: 30.75MB)
- Cluster.BytesReadUfsAll (Type: COUNTER, Value: 30.75MB)
- Cluster.BytesReadUfsThroughput (Type: GAUGE, Value: 787.17KB/MIN)
- Cluster.BytesWrittenGooseFS (Type: COUNTER, Value: 0B)
- Cluster.BytesWrittenGooseFSThroughput (Type: GAUGE, Value: 0B/MIN)
- Cluster.BytesWrittenDomain (Type: COUNTER, Value: 0B)
- Cluster.BytesWrittenDomainThroughput (Type: GAUGE, Value: 0B/MIN)
- Cluster.BytesWrittenLocal (Type: COUNTER, Value: 0B)
- Cluster.BytesWrittenLocalThroughput (Type: GAUGE, Value: 0B/MIN)
- Cluster.BytesWrittenUfsAll (Type: COUNTER, Value: 0B)
- Cluster.BytesWrittenUfsThroughput (Type: GAUGE, Value: 0B/MIN)
- Cluster.CapacityFree (Type: GAUGE, Value: 9,842,601)
- Cluster.CapacityFreeTierHDD (Type: GAUGE, Value: 0)
- Cluster.CapacityFreeTierMEM (Type: GAUGE, Value: 9,842,601)
- Cluster.CapacityFreeTierSSD (Type: GAUGE, Value: 0)
- Cluster.CapacityTotal (Type: GAUGE, Value: 20,000,000)
- Cluster.CapacityTotalTierHDD (Type: GAUGE, Value: 0)
- Cluster.CapacityTotalTierMEM (Type: GAUGE, Value: 20,000,000)
- Cluster.CapacityTotalTierSSD (Type: GAUGE, Value: 0)
- Cluster.CapacityUsed (Type: GAUGE, Value: 10,157,399)
- Cluster.CapacityUsedTierHDD (Type: GAUGE, Value: 0)
- Cluster.CapacityUsedTierMEM (Type: GAUGE, Value: 10,157,399)
- Cluster.CapacityUsedTierSSD (Type: GAUGE, Value: 0)
- Cluster.RootUfsCapacityFree (Type: GAUGE, Value: -1)
- Cluster.RootUfsCapacityTotal (Type: GAUGE, Value: -1)
- Cluster.RootUfsCapacityUsed (Type: GAUGE, Value: -1)
- Cluster.Workers (Type: GAUGE, Value: 1)
- Master.CompleteFileOps (Type: COUNTER, Value: 0)
- Master.ConnectFromMaster.UFS:s3:%2F%2Ffluid.UFS_TYPE:s3 (Type: TIMER, Value: 0)
- Master.Create.UFS:%2Fjournal%2FBlockMaster.UFS_TYPE:local (Type: TIMER, Value: 1)
- Master.Create.UFS:%2Fjournal%2FFileSystemMaster.UFS_TYPE:local (Type: TIMER, Value: 1)
- Master.Create.UFS:%2Fjournal%2FMetaMaster.UFS_TYPE:local (Type: TIMER, Value: 1)
- Master.CreateDirectoryOps (Type: COUNTER, Value: 0)
- Master.CreateFileOps (Type: COUNTER, Value: 0)
- Master.DeletePathOps (Type: COUNTER, Value: 0)
- Master.DirectoriesCreated (Type: COUNTER, Value: 0)
- Master.EdgeCacheSize (Type: GAUGE, Value: 7)
- Master.Exists.UFS:s3:%2F%2Ffluid.UFS_TYPE:s3 (Type: TIMER, Value: 2)
- Master.FileBlockInfosGot (Type: COUNTER, Value: 0)
- Master.FileInfosGot (Type: COUNTER, Value: 25)
- Master.FilesCompleted (Type: COUNTER, Value: 7)
- Master.FilesCreated (Type: COUNTER, Value: 7)
- Master.FilesFreed (Type: COUNTER, Value: 0)
- Master.FilesPersisted (Type: COUNTER, Value: 0)
- Master.FilesPinned (Type: GAUGE, Value: 0)
- Master.FreeFileOps (Type: COUNTER, Value: 0)
- Master.GetAcl.UFS:s3:%2F%2Ffluid.UFS_TYPE:s3 (Type: TIMER, Value: 7)
- Master.GetBlockInfo.User:root (Type: TIMER, Value: 3)
- Master.GetBlockMasterInfo.User:root (Type: TIMER, Value: 173)
- Master.GetConfigHash.User:root (Type: TIMER, Value: 40)
- Master.GetFileBlockInfoOps (Type: COUNTER, Value: 0)
- Master.GetFileInfoOps (Type: COUNTER, Value: 9)
- Master.GetFileLocations.User:root.UFS:s3:%2F%2Ffluid.UFS_TYPE:s3 (Type: TIMER, Value: 24)
- Master.GetFingerprint.User:root.UFS:s3:%2F%2Ffluid.UFS_TYPE:s3 (Type: TIMER, Value: 1)
- Master.GetMountTable.User:root (Type: TIMER, Value: 2)
- Master.GetNewBlockOps (Type: COUNTER, Value: 0)
- Master.GetSpace.UFS:s3:%2F%2Ffluid.UFS_TYPE:s3 (Type: TIMER, Value: 18)
- Master.GetSpace.User:root.UFS:s3:%2F%2Ffluid.UFS_TYPE:s3 (Type: TIMER, Value: 103)
- Master.GetStatus.User:root (Type: TIMER, Value: 6)
- Master.GetStatus.User:root.UFS:s3:%2F%2Ffluid.UFS_TYPE:s3 (Type: TIMER, Value: 3)
- Master.GetStatusFailures.User:root.UFS:s3:%2F%2Ffluid.UFS_TYPE:s3 (Type: COUNTER, Value: 2)
- Master.GetWorkerInfoList.User:root (Type: TIMER, Value: 2)
- Master.InodeCacheSize (Type: GAUGE, Value: 8)
- Master.JournalFlushTimer (Type: TIMER, Value: 22)
- Master.LastBackupEntriesCount (Type: GAUGE, Value: -1)
- Master.LastBackupRestoreCount (Type: GAUGE, Value: -1)
- Master.LastBackupRestoreTimeMs (Type: GAUGE, Value: -1)
- Master.LastBackupTimeMs (Type: GAUGE, Value: -1)
- Master.ListStatus.UFS:%2Fjournal%2FBlockMaster.UFS_TYPE:local (Type: TIMER, Value: 63)
- Master.ListStatus.UFS:%2Fjournal%2FFileSystemMaster.UFS_TYPE:local (Type: TIMER, Value: 63)
- Master.ListStatus.UFS:%2Fjournal%2FMetaMaster.UFS_TYPE:local (Type: TIMER, Value: 63)
- Master.ListStatus.UFS:%2Fjournal%2FMetricsMaster.UFS_TYPE:local (Type: TIMER, Value: 63)
- Master.ListStatus.UFS:%2Fjournal%2FTableMaster.UFS_TYPE:local (Type: TIMER, Value: 63)
- Master.ListStatus.User:root (Type: TIMER, Value: 3)
- Master.ListStatus.User:root.UFS:s3:%2F%2Ffluid.UFS_TYPE:s3 (Type: TIMER, Value: 1)
- Master.ListingCacheSize (Type: GAUGE, Value: 8)
- Master.MountOps (Type: COUNTER, Value: 0)
- Master.NewBlocksGot (Type: COUNTER, Value: 0)
- Master.PathsDeleted (Type: COUNTER, Value: 0)
- Master.PathsMounted (Type: COUNTER, Value: 0)
- Master.PathsRenamed (Type: COUNTER, Value: 0)
- Master.PathsUnmounted (Type: COUNTER, Value: 0)
- Master.PerUfsOpConnectFromMaster.UFS:s3:%2F%2Ffluid (Type: GAUGE, Value: 0)
- Master.PerUfsOpCreate.UFS:%2Fjournal%2FBlockMaster (Type: GAUGE, Value: 1)
- Master.PerUfsOpCreate.UFS:%2Fjournal%2FFileSystemMaster (Type: GAUGE, Value: 1)
- Master.PerUfsOpCreate.UFS:%2Fjournal%2FMetaMaster (Type: GAUGE, Value: 1)
- Master.PerUfsOpExists.UFS:s3:%2F%2Ffluid (Type: GAUGE, Value: 2)
- Master.PerUfsOpGetFileLocations.UFS:s3:%2F%2Ffluid (Type: GAUGE, Value: 24)
- Master.PerUfsOpGetFingerprint.UFS:s3:%2F%2Ffluid (Type: GAUGE, Value: 1)
- Master.PerUfsOpGetSpace.UFS:s3:%2F%2Ffluid (Type: GAUGE, Value: 116)
- Master.PerUfsOpGetStatus.UFS:s3:%2F%2Ffluid (Type: GAUGE, Value: 3)
- Master.PerUfsOpListStatus.UFS:%2Fjournal%2FBlockMaster (Type: GAUGE, Value: 60)
- Master.PerUfsOpListStatus.UFS:%2Fjournal%2FFileSystemMaster (Type: GAUGE, Value: 60)
- Master.PerUfsOpListStatus.UFS:%2Fjournal%2FMetaMaster (Type: GAUGE, Value: 60)
- Master.PerUfsOpListStatus.UFS:%2Fjournal%2FMetricsMaster (Type: GAUGE, Value: 60)
- Master.PerUfsOpListStatus.UFS:%2Fjournal%2FTableMaster (Type: GAUGE, Value: 60)
- Master.PerUfsOpListStatus.UFS:s3:%2F%2Ffluid (Type: GAUGE, Value: 1)
- Master.RenamePathOps (Type: COUNTER, Value: 0)
- Master.SetAclOps (Type: COUNTER, Value: 0)
- Master.SetAttributeOps (Type: COUNTER, Value: 0)
- Master.TotalPaths (Type: GAUGE, Value: 8)
- Master.UfsSessionCount-Ufs:s3:%2F%2Ffluid (Type: COUNTER, Value: 0)
- Master.UnmountOps (Type: COUNTER, Value: 0)
- Master.blockHeartbeat.User:root (Type: TIMER, Value: 2,410)
- Master.commitBlock.User:root (Type: TIMER, Value: 1)
- Master.getConfigHash (Type: TIMER, Value: 4)
- Master.getConfigHash.User:root (Type: TIMER, Value: 239)
- Master.getConfiguration (Type: TIMER, Value: 20)
- Master.getConfiguration.User:root (Type: TIMER, Value: 428)
- Master.getMasterInfo.User:root (Type: TIMER, Value: 173)
- Master.getMetrics.User:root (Type: TIMER, Value: 33)
- Master.getPinnedFileIds.User:root (Type: TIMER, Value: 2,410)
- Master.getUfsInfo.User:root (Type: TIMER, Value: 1)
- Master.getWorkerId.User:root (Type: TIMER, Value: 1)
- Master.metricsHeartbeat.User:root (Type: TIMER, Value: 4)
- Master.registerWorker.User:root (Type: TIMER, Value: 1)
- `
- return r
-}
diff --git a/pkg/ddc/goosefs/const.go b/pkg/ddc/goosefs/const.go
deleted file mode 100644
index 51b0e914c69..00000000000
--- a/pkg/ddc/goosefs/const.go
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-const (
-
- // goosefsHome string = "/opt/goosefs"
-
- // goosefsUser string = "fluid"
- MetricsPrefixBytesReadLocal = "Cluster.BytesReadLocal "
-
- MetricsPrefixBytesReadRemote = "Cluster.BytesReadRemote "
-
- MetricsPrefixBytesReadUfsAll = "Cluster.BytesReadUfsAll "
-
- MetricsPrefixBytesReadLocalThroughput = "Cluster.BytesReadLocalThroughput "
-
- MetricsPrefixBytesReadRemoteThroughput = "Cluster.BytesReadRemoteThroughput "
-
- MetricsPrefixBytesReadUfsThroughput = "Cluster.BytesReadUfsThroughput "
-
- MetadataSyncNotDoneMsg = "[Calculating]"
-
- GooseFSRuntimeMetricsLabel = "goosefs_runtime_metrics"
-
- CheckMetadataSyncDoneTimeoutMillisec = 500
-
- AUTO_SELECT_PORT_MIN = 20000
- AUTO_SELECT_PORT_MAX = 30000
-
- PortNum = 9
-
- CacheHitQueryIntervalMin = 1
-
- HadoopConfHdfsSiteFilename = "hdfs-site.xml"
-
- HadoopConfCoreSiteFilename = "core-site.xml"
-
- HadoopConfMountPath = "/hdfs-config"
-
- WokrerPodRole = "goosefs-worker"
-)
diff --git a/pkg/ddc/goosefs/create_volume.go b/pkg/ddc/goosefs/create_volume.go
deleted file mode 100644
index 90509b14746..00000000000
--- a/pkg/ddc/goosefs/create_volume.go
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
-Copyright 2023 The Fluid Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "github.com/fluid-cloudnative/fluid/pkg/common"
- volumeHelper "github.com/fluid-cloudnative/fluid/pkg/utils/dataset/volume"
-)
-
-// CreateVolume creates volume
-func (e *GooseFSEngine) CreateVolume() (err error) {
- if e.runtime == nil {
- e.runtime, err = e.getRuntime()
- if err != nil {
- return
- }
- }
-
- err = e.createFusePersistentVolume()
- if err != nil {
- return err
- }
-
- err = e.createFusePersistentVolumeClaim()
- if err != nil {
- return err
- }
-
- err = e.createHCFSPersistentVolume()
- if err != nil {
- return err
- }
-
- return nil
-
-}
-
-// createFusePersistentVolume
-func (e *GooseFSEngine) createFusePersistentVolume() (err error) {
- runtimeInfo, err := e.getRuntimeInfo()
- if err != nil {
- return err
- }
-
- return volumeHelper.CreatePersistentVolumeForRuntime(e.Client,
- runtimeInfo,
- e.getMountPoint(),
- common.GooseFSMountType,
- e.Log)
-
-}
-
-// createFusePersistentVolume
-func (e *GooseFSEngine) createFusePersistentVolumeClaim() (err error) {
- runtimeInfo, err := e.getRuntimeInfo()
- if err != nil {
- return err
- }
-
- return volumeHelper.CreatePersistentVolumeClaimForRuntime(e.Client, runtimeInfo, e.Log)
-
-}
-
-// createHCFSVolume (TODO: chrisydxie)
-func (e *GooseFSEngine) createHCFSPersistentVolume() (err error) {
- return nil
-}
-
-// createHCFSVolume (TODO: chrisydxie)
-// func (e *GooseFSEngine) createHCFSPersistentVolumeClaim() (err error) {
-// return nil
-// }
diff --git a/pkg/ddc/goosefs/create_volume_test.go b/pkg/ddc/goosefs/create_volume_test.go
deleted file mode 100644
index b79b037c962..00000000000
--- a/pkg/ddc/goosefs/create_volume_test.go
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "context"
- "strconv"
- "testing"
-
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/common"
- "github.com/fluid-cloudnative/fluid/pkg/ddc/base"
- "github.com/fluid-cloudnative/fluid/pkg/utils/fake"
- appsv1 "k8s.io/api/apps/v1"
- v1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
-)
-
-func TestCreateVolume(t *testing.T) {
- runtimeInfo, err := base.BuildRuntimeInfo("hbase", "fluid", common.GooseFSRuntime)
- runtimeInfo.SetOwnerDatasetUID("dummy-dataset-uid")
- if err != nil {
- t.Errorf("fail to create the runtimeInfo with error %v", err)
- }
-
- engine := &GooseFSEngine{
- Log: fake.NullLogger(),
- namespace: "fluid",
- name: "hbase",
- runtimeInfo: runtimeInfo,
- runtime: &datav1alpha1.GooseFSRuntime{
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase",
- Namespace: "fluid",
- },
- },
- }
-
- engine.runtimeInfo.SetFuseName(engine.getFuseName())
-
- testDsInputs := &appsv1.DaemonSet{
- ObjectMeta: metav1.ObjectMeta{
- Name: engine.getFuseName(),
- Namespace: engine.namespace,
- },
- Spec: appsv1.DaemonSetSpec{
- Template: v1.PodTemplateSpec{
- Spec: v1.PodSpec{
- Containers: []v1.Container{
- {
- Image: "fuse-image:v1",
- },
- },
- },
- },
- },
- }
-
- testDatasetInputs := []*datav1alpha1.Dataset{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase",
- Namespace: "fluid",
- },
- Spec: datav1alpha1.DatasetSpec{},
- },
- }
-
- testObjs := []runtime.Object{}
- for _, datasetInput := range testDatasetInputs {
- testObjs = append(testObjs, datasetInput.DeepCopy())
- }
- testObjs = append(testObjs, testDsInputs)
- client := fake.NewFakeClientWithScheme(testScheme, testObjs...)
- engine.Client = client
-
- err = engine.CreateVolume()
- if err != nil {
- t.Errorf("fail to exec CreateVolume with error %v", err)
- }
-
- var pvs v1.PersistentVolumeList
- err = client.List(context.TODO(), &pvs)
- if err != nil {
- t.Errorf("fail to exec the function with error %v", err)
- return
- }
- if len(pvs.Items) != 1 {
- t.Errorf("fail to create the pv")
- }
-
- var pvcs v1.PersistentVolumeClaimList
- err = client.List(context.TODO(), &pvcs)
- if err != nil {
- t.Errorf("fail to exec the function with error %v", err)
- return
- }
- if len(pvcs.Items) != 1 {
- t.Errorf("fail to create the pvc")
- }
-
- if pvcs.Items[0].Labels[common.LabelRuntimeFuseGeneration] != strconv.Itoa(int(testDsInputs.Generation)) {
- t.Errorf("fail to check fuse generation on pvc")
- }
-}
-
-func TestCreateFusePersistentVolume(t *testing.T) {
- runtimeInfo, err := base.BuildRuntimeInfo("hbase", "fluid", common.GooseFSRuntime)
- runtimeInfo.SetOwnerDatasetUID("dummy-dataset-uid")
- if err != nil {
- t.Errorf("fail to create the runtimeInfo with error %v", err)
- }
-
- testDatasetInputs := []*datav1alpha1.Dataset{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase",
- Namespace: "fluid",
- },
- Spec: datav1alpha1.DatasetSpec{},
- },
- }
-
- testObjs := []runtime.Object{}
- for _, datasetInput := range testDatasetInputs {
- testObjs = append(testObjs, datasetInput.DeepCopy())
- }
- client := fake.NewFakeClientWithScheme(testScheme, testObjs...)
-
- engine := &GooseFSEngine{
- Client: client,
- Log: fake.NullLogger(),
- namespace: "fluid",
- name: "hbase",
- runtimeInfo: runtimeInfo,
- }
-
- err = engine.createFusePersistentVolume()
- if err != nil {
- t.Errorf("fail to exec createFusePersistentVolume with error %v", err)
- }
-
- var pvs v1.PersistentVolumeList
- err = client.List(context.TODO(), &pvs)
- if err != nil {
- t.Errorf("fail to exec the function with error %v", err)
- return
- }
- if len(pvs.Items) != 1 {
- t.Errorf("fail to create the pv")
- }
-}
-
-func TestCreateFusePersistentVolumeClaim(t *testing.T) {
- runtimeInfo, err := base.BuildRuntimeInfo("hbase", "fluid", common.GooseFSRuntime)
- runtimeInfo.SetOwnerDatasetUID("dummy-dataset-uid")
- if err != nil {
- t.Errorf("fail to create the runtimeInfo with error %v", err)
- }
-
- engine := &GooseFSEngine{
- Log: fake.NullLogger(),
- namespace: "fluid",
- name: "hbase",
- runtimeInfo: runtimeInfo,
- }
-
- engine.runtimeInfo.SetFuseName(engine.getFuseName())
-
- testDsInputs := &appsv1.DaemonSet{
- ObjectMeta: metav1.ObjectMeta{
- Name: engine.getFuseName(),
- Namespace: engine.namespace,
- },
- Spec: appsv1.DaemonSetSpec{
- Template: v1.PodTemplateSpec{
- Spec: v1.PodSpec{
- Containers: []v1.Container{
- {
- Image: "fuse-image:v1",
- },
- },
- },
- },
- },
- }
-
- testDatasetInputs := []*datav1alpha1.Dataset{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase",
- Namespace: "fluid",
- },
- Spec: datav1alpha1.DatasetSpec{},
- },
- }
-
- testObjs := []runtime.Object{}
- for _, datasetInput := range testDatasetInputs {
- testObjs = append(testObjs, datasetInput.DeepCopy())
- }
- testObjs = append(testObjs, testDsInputs)
- client := fake.NewFakeClientWithScheme(testScheme, testObjs...)
- engine.Client = client
-
- err = engine.createFusePersistentVolumeClaim()
- if err != nil {
- t.Errorf("fail to exec createFusePersistentVolumeClaim with error %v", err)
- }
-
- var pvcs v1.PersistentVolumeClaimList
- err = client.List(context.TODO(), &pvcs)
- if err != nil {
- t.Errorf("fail to exec the function with error %v", err)
- return
- }
- if len(pvcs.Items) != 1 {
- t.Errorf("fail to create the pvc")
- }
-
- if pvcs.Items[0].Labels[common.LabelRuntimeFuseGeneration] != strconv.Itoa(int(testDsInputs.Generation)) {
- t.Errorf("fail to check fuse generation on pvc")
- }
-}
diff --git a/pkg/ddc/goosefs/dataset.go b/pkg/ddc/goosefs/dataset.go
deleted file mode 100644
index 44f70da7b17..00000000000
--- a/pkg/ddc/goosefs/dataset.go
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "context"
- "reflect"
-
- "k8s.io/apimachinery/pkg/types"
-
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/common"
- "github.com/fluid-cloudnative/fluid/pkg/utils"
-
- corev1 "k8s.io/api/core/v1"
- "k8s.io/client-go/util/retry"
-)
-
-// UpdateCacheOfDataset updates the CacheStates and Runtimes of the dataset.
-func (e *GooseFSEngine) UpdateCacheOfDataset() (err error) {
- // 1. update the runtime status
- runtime, err := e.getRuntime()
- if err != nil {
- return err
- }
-
- // 2.update the dataset status
- err = retry.RetryOnConflict(retry.DefaultBackoff, func() error {
- dataset, err := utils.GetDataset(e.Client, e.name, e.namespace)
- if err != nil {
- return err
- }
- datasetToUpdate := dataset.DeepCopy()
-
- datasetToUpdate.Status.CacheStates = runtime.Status.CacheStates
- // datasetToUpdate.Status.CacheStates =
-
- if len(datasetToUpdate.Status.Runtimes) == 0 {
- datasetToUpdate.Status.Runtimes = []datav1alpha1.Runtime{}
- }
-
- datasetToUpdate.Status.Runtimes = utils.AddRuntimesIfNotExist(datasetToUpdate.Status.Runtimes, utils.NewRuntime(e.name,
- e.namespace,
- common.AccelerateCategory,
- common.GooseFSRuntime,
- e.runtime.Spec.Master.Replicas))
-
- e.Log.Info("the dataset status", "status", datasetToUpdate.Status)
-
- if !reflect.DeepEqual(dataset.Status, datasetToUpdate.Status) {
- err = e.Client.Status().Update(context.TODO(), datasetToUpdate)
- if err != nil {
- e.Log.Error(err, "Update dataset")
- return err
- }
- } else {
- e.Log.Info("No need to update the cache of the data")
- }
-
- return nil
- })
-
- if err != nil {
- e.Log.Error(err, "Update dataset")
- return err
- }
-
- return
-
-}
-
-// UpdateDatasetStatus updates the status of the dataset
-func (e *GooseFSEngine) UpdateDatasetStatus(phase datav1alpha1.DatasetPhase) (err error) {
- // 1. update the runtime status
- runtime, err := e.getRuntime()
- if err != nil {
- return err
- }
-
- // 2.update the dataset status
- err = retry.RetryOnConflict(retry.DefaultBackoff, func() error {
- dataset, err := utils.GetDataset(e.Client, e.name, e.namespace)
- if err != nil {
- return err
- }
- datasetToUpdate := dataset.DeepCopy()
- var cond datav1alpha1.DatasetCondition
-
- if phase != dataset.Status.Phase {
- switch phase {
- case datav1alpha1.BoundDatasetPhase:
- if len(datasetToUpdate.Status.Mounts) == 0 {
- datasetToUpdate.Status.Mounts = datasetToUpdate.Spec.Mounts
- }
- cond = utils.NewDatasetCondition(datav1alpha1.DatasetReady, datav1alpha1.DatasetReadyReason,
- "The ddc runtime is ready.",
- corev1.ConditionTrue)
- case datav1alpha1.FailedDatasetPhase:
- cond = utils.NewDatasetCondition(datav1alpha1.DatasetReady, datav1alpha1.DatasetReadyReason,
- "The ddc runtime is not ready.",
- corev1.ConditionFalse)
- default:
- cond = utils.NewDatasetCondition(datav1alpha1.DatasetReady, datav1alpha1.DatasetReadyReason,
- "The ddc runtime is unknown.",
- corev1.ConditionFalse)
- }
-
- datasetToUpdate.Status.Phase = phase
- datasetToUpdate.Status.Conditions = utils.UpdateDatasetCondition(datasetToUpdate.Status.Conditions,
- cond)
- }
-
- datasetToUpdate.Status.CacheStates = runtime.Status.CacheStates
- // datasetToUpdate.Status.CacheStates =
-
- if datasetToUpdate.Status.HCFSStatus == nil {
- datasetToUpdate.Status.HCFSStatus, err = e.GetHCFSStatus()
- if err != nil {
- return err
- }
- } else {
- e.Log.Info("No need to update HCFS status")
- }
-
- if !reflect.DeepEqual(dataset.Status, datasetToUpdate.Status) {
- err = e.Client.Status().Update(context.TODO(), datasetToUpdate)
- }
-
- return err
- })
-
- if err != nil {
- _ = utils.LoggingErrorExceptConflict(e.Log, err, "Failed to update dataset status", types.NamespacedName{Namespace: e.namespace, Name: e.name})
- return err
- }
-
- return
-}
-
-func (e *GooseFSEngine) BindToDataset() (err error) {
- return e.UpdateDatasetStatus(datav1alpha1.BoundDatasetPhase)
-}
diff --git a/pkg/ddc/goosefs/dataset_test.go b/pkg/ddc/goosefs/dataset_test.go
deleted file mode 100644
index f92fe2eaadf..00000000000
--- a/pkg/ddc/goosefs/dataset_test.go
+++ /dev/null
@@ -1,335 +0,0 @@
-/*
-Copyright 2023 The Fluid Author.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "context"
- "reflect"
- "testing"
-
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/common"
- "github.com/fluid-cloudnative/fluid/pkg/utils/fake"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
-)
-
-func TestUpdateCacheOfDataset(t *testing.T) {
- testDatasetInputs := []*datav1alpha1.Dataset{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase",
- Namespace: "fluid",
- },
- Spec: datav1alpha1.DatasetSpec{},
- },
- }
- testObjs := []runtime.Object{}
- for _, datasetInput := range testDatasetInputs {
- testObjs = append(testObjs, datasetInput.DeepCopy())
- }
-
- testRuntimeInputs := []*datav1alpha1.GooseFSRuntime{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase",
- Namespace: "fluid",
- },
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- Master: datav1alpha1.GooseFSCompTemplateSpec{
- Replicas: 1,
- },
- },
- Status: datav1alpha1.RuntimeStatus{
- CacheStates: map[common.CacheStateName]string{
- common.Cached: "true",
- },
- },
- },
- }
- for _, runtimeInput := range testRuntimeInputs {
- testObjs = append(testObjs, runtimeInput.DeepCopy())
- }
- client := fake.NewFakeClientWithScheme(testScheme, testObjs...)
-
- engine := &GooseFSEngine{
- Client: client,
- Log: fake.NullLogger(),
- name: "hbase",
- namespace: "fluid",
- runtime: testRuntimeInputs[0],
- }
-
- err := engine.UpdateCacheOfDataset()
- if err != nil {
- t.Errorf("fail to exec UpdateCacheOfDataset with error %v", err)
- return
- }
-
- expectedDataset := datav1alpha1.Dataset{
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase",
- Namespace: "fluid",
- },
- Status: datav1alpha1.DatasetStatus{
- CacheStates: map[common.CacheStateName]string{
- common.Cached: "true",
- },
- Runtimes: []datav1alpha1.Runtime{
- {
- Name: "hbase",
- Namespace: "fluid",
- Category: common.AccelerateCategory,
- Type: common.GooseFSRuntime,
- MasterReplicas: 1,
- },
- },
- },
- }
-
- var datasets datav1alpha1.DatasetList
- err = client.List(context.TODO(), &datasets)
- if err != nil {
- t.Errorf("fail to list the datasets with error %v", err)
- return
- }
- if !reflect.DeepEqual(datasets.Items[0].Status, expectedDataset.Status) {
- t.Errorf("fail to exec the function with error %v", err)
- return
- }
-}
-
-func TestUpdateDatasetStatus(t *testing.T) {
- testDatasetInputs := []*datav1alpha1.Dataset{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase",
- Namespace: "fluid",
- },
- Spec: datav1alpha1.DatasetSpec{},
- Status: datav1alpha1.DatasetStatus{
- HCFSStatus: &datav1alpha1.HCFSStatus{
- Endpoint: "test Endpoint",
- UnderlayerFileSystemVersion: "Underlayer HCFS Compatible Version",
- },
- },
- },
- }
- testObjs := []runtime.Object{}
- for _, datasetInput := range testDatasetInputs {
- testObjs = append(testObjs, datasetInput.DeepCopy())
- }
-
- testRuntimeInputs := []*datav1alpha1.GooseFSRuntime{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase",
- Namespace: "fluid",
- },
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- Master: datav1alpha1.GooseFSCompTemplateSpec{
- Replicas: 1,
- },
- },
- Status: datav1alpha1.RuntimeStatus{
- CacheStates: map[common.CacheStateName]string{
- common.Cached: "true",
- },
- },
- },
- }
- for _, runtimeInput := range testRuntimeInputs {
- testObjs = append(testObjs, runtimeInput.DeepCopy())
- }
- client := fake.NewFakeClientWithScheme(testScheme, testObjs...)
-
- engine := &GooseFSEngine{
- Client: client,
- Log: fake.NullLogger(),
- name: "hbase",
- namespace: "fluid",
- runtime: testRuntimeInputs[0],
- }
-
- var testCase = []struct {
- phase datav1alpha1.DatasetPhase
- expectedResult datav1alpha1.Dataset
- }{
- {
- phase: datav1alpha1.BoundDatasetPhase,
- expectedResult: datav1alpha1.Dataset{
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase",
- Namespace: "fluid",
- },
- Status: datav1alpha1.DatasetStatus{
- Phase: datav1alpha1.BoundDatasetPhase,
- CacheStates: map[common.CacheStateName]string{
- common.Cached: "true",
- },
- HCFSStatus: &datav1alpha1.HCFSStatus{
- Endpoint: "test Endpoint",
- UnderlayerFileSystemVersion: "Underlayer HCFS Compatible Version",
- },
- },
- },
- },
- {
- phase: datav1alpha1.FailedDatasetPhase,
- expectedResult: datav1alpha1.Dataset{
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase",
- Namespace: "fluid",
- },
- Status: datav1alpha1.DatasetStatus{
- Phase: datav1alpha1.FailedDatasetPhase,
- CacheStates: map[common.CacheStateName]string{
- common.Cached: "true",
- },
- HCFSStatus: &datav1alpha1.HCFSStatus{
- Endpoint: "test Endpoint",
- UnderlayerFileSystemVersion: "Underlayer HCFS Compatible Version",
- },
- },
- },
- },
- {
- phase: datav1alpha1.NoneDatasetPhase,
- expectedResult: datav1alpha1.Dataset{
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase",
- Namespace: "fluid",
- },
- Status: datav1alpha1.DatasetStatus{
- Phase: datav1alpha1.NoneDatasetPhase,
- CacheStates: map[common.CacheStateName]string{
- common.Cached: "true",
- },
- HCFSStatus: &datav1alpha1.HCFSStatus{
- Endpoint: "test Endpoint",
- UnderlayerFileSystemVersion: "Underlayer HCFS Compatible Version",
- },
- },
- },
- },
- }
-
- for _, test := range testCase {
- err := engine.UpdateDatasetStatus(test.phase)
- if err != nil {
- t.Errorf("fail to exec UpdateCacheOfDataset with error %v", err)
- return
- }
-
- var datasets datav1alpha1.DatasetList
- err = client.List(context.TODO(), &datasets)
- if err != nil {
- t.Errorf("fail to list the datasets with error %v", err)
- return
- }
- if !reflect.DeepEqual(datasets.Items[0].Status.Phase, test.expectedResult.Status.Phase) ||
- !reflect.DeepEqual(datasets.Items[0].Status.CacheStates, test.expectedResult.Status.CacheStates) ||
- !reflect.DeepEqual(datasets.Items[0].Status.HCFSStatus, test.expectedResult.Status.HCFSStatus) {
- t.Errorf("fail to exec the function with error %v", err)
- return
- }
- }
-}
-
-func TestBindToDataset(t *testing.T) {
- testDatasetInputs := []*datav1alpha1.Dataset{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase",
- Namespace: "fluid",
- },
- Spec: datav1alpha1.DatasetSpec{},
- Status: datav1alpha1.DatasetStatus{
- HCFSStatus: &datav1alpha1.HCFSStatus{
- Endpoint: "test Endpoint",
- UnderlayerFileSystemVersion: "Underlayer HCFS Compatible Version",
- },
- },
- },
- }
- testObjs := []runtime.Object{}
- for _, datasetInput := range testDatasetInputs {
- testObjs = append(testObjs, datasetInput.DeepCopy())
- }
-
- testRuntimeInputs := []*datav1alpha1.GooseFSRuntime{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase",
- Namespace: "fluid",
- },
- Spec: datav1alpha1.GooseFSRuntimeSpec{},
- Status: datav1alpha1.RuntimeStatus{
- CacheStates: map[common.CacheStateName]string{
- common.Cached: "true",
- },
- },
- },
- }
- for _, runtimeInput := range testRuntimeInputs {
- testObjs = append(testObjs, runtimeInput.DeepCopy())
- }
- client := fake.NewFakeClientWithScheme(testScheme, testObjs...)
-
- engine := &GooseFSEngine{
- Client: client,
- Log: fake.NullLogger(),
- name: "hbase",
- namespace: "fluid",
- runtime: testRuntimeInputs[0],
- }
-
- var expectedResult = datav1alpha1.Dataset{
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase",
- Namespace: "fluid",
- },
- Status: datav1alpha1.DatasetStatus{
- Phase: datav1alpha1.BoundDatasetPhase,
- CacheStates: map[common.CacheStateName]string{
- common.Cached: "true",
- },
- HCFSStatus: &datav1alpha1.HCFSStatus{
- Endpoint: "test Endpoint",
- UnderlayerFileSystemVersion: "Underlayer HCFS Compatible Version",
- },
- },
- }
- err := engine.BindToDataset()
- if err != nil {
- t.Errorf("fail to exec UpdateCacheOfDataset with error %v", err)
- return
- }
-
- var datasets datav1alpha1.DatasetList
- err = client.List(context.TODO(), &datasets)
- if err != nil {
- t.Errorf("fail to list the datasets with error %v", err)
- return
- }
- if !reflect.DeepEqual(datasets.Items[0].Status.Phase, expectedResult.Status.Phase) ||
- !reflect.DeepEqual(datasets.Items[0].Status.CacheStates, expectedResult.Status.CacheStates) ||
- !reflect.DeepEqual(datasets.Items[0].Status.HCFSStatus, expectedResult.Status.HCFSStatus) {
- t.Errorf("fail to exec the function with error %v", err)
- return
- }
-}
diff --git a/pkg/ddc/goosefs/delete_volume.go b/pkg/ddc/goosefs/delete_volume.go
deleted file mode 100644
index 8de43523c90..00000000000
--- a/pkg/ddc/goosefs/delete_volume.go
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- volumeHelper "github.com/fluid-cloudnative/fluid/pkg/utils/dataset/volume"
-)
-
-// DeleteVolume creates volume
-// DeleteVolume deletes the GooseFS volume by performing the following steps:
-// 1. Initializes the runtime if it is not already initialized.
-// 2. Deletes the Fuse Persistent Volume Claim (PVC) associated with the volume.
-// 3. Deletes the Fuse Persistent Volume (PV) associated with the volume.
-// Returns an error if any of the steps fail.
-func (e *GooseFSEngine) DeleteVolume() (err error) {
-
- if e.runtime == nil {
- e.runtime, err = e.getRuntime()
- if err != nil {
- return
- }
- }
-
- err = e.deleteFusePersistentVolumeClaim()
- if err != nil {
- return
- }
-
- err = e.deleteFusePersistentVolume()
- if err != nil {
- return
- }
-
- return
-
-}
-
-// deleteFusePersistentVolume
-func (e *GooseFSEngine) deleteFusePersistentVolume() (err error) {
- runtimeInfo, err := e.getRuntimeInfo()
- if err != nil {
- return err
- }
-
- return volumeHelper.DeleteFusePersistentVolume(e.Client, runtimeInfo, e.Log)
-}
-
-// deleteFusePersistentVolume
-func (e *GooseFSEngine) deleteFusePersistentVolumeClaim() (err error) {
- runtimeInfo, err := e.getRuntimeInfo()
- if err != nil {
- return err
- }
-
- return volumeHelper.DeleteFusePersistentVolumeClaim(e.Client, runtimeInfo, e.Log)
-}
diff --git a/pkg/ddc/goosefs/delete_volume_test.go b/pkg/ddc/goosefs/delete_volume_test.go
deleted file mode 100644
index c399b7deacb..00000000000
--- a/pkg/ddc/goosefs/delete_volume_test.go
+++ /dev/null
@@ -1,220 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "context"
- "reflect"
- "testing"
-
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/common"
- "github.com/fluid-cloudnative/fluid/pkg/ddc/base"
- "github.com/fluid-cloudnative/fluid/pkg/utils/fake"
- v1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/types"
- "sigs.k8s.io/controller-runtime/pkg/client"
-)
-
-type TestCase struct {
- engine *GooseFSEngine
- isDeleted bool
- isErr bool
-}
-
-func newTestGooseFSEngine(client client.Client, name string, namespace string, withRunTime bool) *GooseFSEngine {
- runTime := &datav1alpha1.GooseFSRuntime{}
- runTimeInfo, _ := base.BuildRuntimeInfo(name, namespace, common.GooseFSRuntime)
- runTimeInfo.SetOwnerDatasetUID("dummy-dataset-uid")
- if !withRunTime {
- runTimeInfo = nil
- runTime = nil
- }
- engine := &GooseFSEngine{
- runtime: runTime,
- name: name,
- namespace: namespace,
- Client: client,
- runtimeInfo: runTimeInfo,
- Log: fake.NullLogger(),
- }
- return engine
-}
-
-func doTestCases(testCases []TestCase, t *testing.T) {
- for _, test := range testCases {
- err := test.engine.DeleteVolume()
- pv := &v1.PersistentVolume{}
- nullPV := v1.PersistentVolume{}
- key := types.NamespacedName{
- Namespace: test.engine.namespace,
- Name: test.engine.name,
- }
- _ = test.engine.Client.Get(context.TODO(), key, pv)
- if test.isDeleted != reflect.DeepEqual(nullPV, *pv) {
- t.Errorf("PV/PVC still exist after delete.")
- }
- isErr := err != nil
- if isErr != test.isErr {
- t.Errorf("expected %t, got %t.", test.isErr, isErr)
- }
- }
-}
-
-func TestGooseFSEngine_DeleteVolume(t *testing.T) {
- testPVInputs := []*v1.PersistentVolume{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "fluid-hbase",
- Annotations: map[string]string{
- "CreatedBy": "fluid",
- },
- },
- Spec: v1.PersistentVolumeSpec{},
- },
- }
-
- tests := []runtime.Object{}
-
- for _, pvInput := range testPVInputs {
- tests = append(tests, pvInput.DeepCopy())
- }
-
- testPVCInputs := []*v1.PersistentVolumeClaim{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase",
- Namespace: "fluid",
- Finalizers: []string{"kubernetes.io/pvc-protection"}, // no err
- },
- Spec: v1.PersistentVolumeClaimSpec{},
- },
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "error",
- Namespace: "fluid",
- Finalizers: []string{"kubernetes.io/pvc-protection"},
- Annotations: map[string]string{
- "CreatedBy": "fluid", // have err
- },
- },
- Spec: v1.PersistentVolumeClaimSpec{},
- },
- }
-
- for _, pvcInput := range testPVCInputs {
- tests = append(tests, pvcInput.DeepCopy())
- }
-
- fakeClient := fake.NewFakeClientWithScheme(testScheme, tests...)
- goosefsEngineCommon := newTestGooseFSEngine(fakeClient, "hbase", "fluid", true)
- goosefsEngineErr := newTestGooseFSEngine(fakeClient, "error", "fluid", true)
- goosefsEngineNoRunTime := newTestGooseFSEngine(fakeClient, "hbase", "fluid", false)
- var testCases = []TestCase{
- {
- engine: goosefsEngineCommon,
- isDeleted: true,
- isErr: false,
- },
- {
- engine: goosefsEngineErr,
- isDeleted: true,
- isErr: true,
- },
- {
- engine: goosefsEngineNoRunTime,
- isDeleted: true,
- isErr: true,
- },
- }
- doTestCases(testCases, t)
-}
-
-func TestGooseFSEngine_DeleteFusePersistentVolume(t *testing.T) {
- testPVInputs := []*v1.PersistentVolume{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "fluid-hbase",
- Annotations: map[string]string{
- "CreatedBy": "fluid",
- },
- },
- Spec: v1.PersistentVolumeSpec{},
- },
- }
-
- tests := []runtime.Object{}
-
- for _, pvInput := range testPVInputs {
- tests = append(tests, pvInput.DeepCopy())
- }
-
- fakeClient := fake.NewFakeClientWithScheme(testScheme, tests...)
- goosefsEngine := newTestGooseFSEngine(fakeClient, "hbase", "fluid", true)
- goosefsEngineNoRuntime := newTestGooseFSEngine(fakeClient, "hbase", "fluid", false)
- testCases := []TestCase{
- {
- engine: goosefsEngine,
- isDeleted: true,
- isErr: false,
- },
- {
- engine: goosefsEngineNoRuntime,
- isDeleted: true,
- isErr: true,
- },
- }
- doTestCases(testCases, t)
-}
-
-func TestGooseFSEngine_DeleteFusePersistentVolumeClaim(t *testing.T) {
- testPVCInputs := []*v1.PersistentVolumeClaim{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase",
- Namespace: "fluid",
- Finalizers: []string{"kubernetes.io/pvc-protection"}, // no err
- },
- Spec: v1.PersistentVolumeClaimSpec{},
- },
- }
-
- tests := []runtime.Object{}
-
- for _, pvcInput := range testPVCInputs {
- tests = append(tests, pvcInput.DeepCopy())
- }
-
- fakeClient := fake.NewFakeClientWithScheme(testScheme, tests...)
- goosefsEngine := newTestGooseFSEngine(fakeClient, "hbase", "fluid", true)
- goosefsEngineNoRuntime := newTestGooseFSEngine(fakeClient, "hbase", "fluid", false)
- testCases := []TestCase{
- {
- engine: goosefsEngine,
- isDeleted: true,
- isErr: false,
- },
- {
- engine: goosefsEngineNoRuntime,
- isDeleted: true,
- isErr: true,
- },
- }
- doTestCases(testCases, t)
-}
diff --git a/pkg/ddc/goosefs/engine.go b/pkg/ddc/goosefs/engine.go
deleted file mode 100644
index c4fbbb0b0e5..00000000000
--- a/pkg/ddc/goosefs/engine.go
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
-Copyright 2023 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "fmt"
-
- "k8s.io/apimachinery/pkg/types"
- "k8s.io/client-go/tools/record"
-
- "github.com/go-logr/logr"
- "sigs.k8s.io/controller-runtime/pkg/client"
-
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/ctrl"
- "github.com/fluid-cloudnative/fluid/pkg/ddc/base"
- cruntime "github.com/fluid-cloudnative/fluid/pkg/runtime"
- "github.com/fluid-cloudnative/fluid/pkg/utils"
-)
-
-// GooseFSEngine implements the Engine interface.
-type GooseFSEngine struct {
- // *base.TemplateEngine
- runtime *datav1alpha1.GooseFSRuntime
- name string
- namespace string
- runtimeType string
- engineImpl string
- Log logr.Logger
- client.Client
- // gracefulShutdownLimits is the limit for the system to forcibly clean up.
- gracefulShutdownLimits int32
- retryShutdown int32
- initImage string
- MetadataSyncDoneCh chan base.MetadataSyncResult
- runtimeInfo base.RuntimeInfoInterface
- UnitTest bool
- lastCacheHitStates *cacheHitStates
- *ctrl.Helper
- Recorder record.EventRecorder
-}
-
-// Build function builds the GooseFS Engine
-func Build(id string, ctx cruntime.ReconcileRequestContext) (base.Engine, error) {
- engine := &GooseFSEngine{
- name: ctx.Name,
- namespace: ctx.Namespace,
- Client: ctx.Client,
- Recorder: ctx.Recorder,
- Log: ctx.Log,
- runtimeType: ctx.RuntimeType,
- engineImpl: ctx.EngineImpl,
- gracefulShutdownLimits: 5,
- retryShutdown: 0,
- MetadataSyncDoneCh: nil,
- lastCacheHitStates: nil,
- }
- // var implement base.Implement = engine
- // engine.TemplateEngine = template
- if ctx.Runtime != nil {
- runtime, ok := ctx.Runtime.(*datav1alpha1.GooseFSRuntime)
- if !ok {
- return nil, fmt.Errorf("engine %s is failed to parse", ctx.Name)
- }
- engine.runtime = runtime
- } else {
- return nil, fmt.Errorf("engine %s is failed to parse", ctx.Name)
- }
-
- // Build and setup runtime info
- runtimeInfo, err := engine.getRuntimeInfo()
- if err != nil {
- return nil, fmt.Errorf("engine %s failed to get runtime info", ctx.Name)
- }
- engine.runtimeInfo = runtimeInfo
-
- // Build the helper
- engine.Helper = ctrl.BuildHelper(runtimeInfo, ctx.Client, engine.Log)
-
- template := base.NewTemplateEngine(engine, id, ctx)
-
- return template, nil
-}
-
-// Precheck checks if the given key can be found in the current runtime types
-func Precheck(client client.Client, key types.NamespacedName) (found bool, err error) {
- var obj datav1alpha1.GooseFSRuntime
- return utils.CheckObject(client, key, &obj)
-}
diff --git a/pkg/ddc/goosefs/engine_test.go b/pkg/ddc/goosefs/engine_test.go
deleted file mode 100644
index d7910be7c7d..00000000000
--- a/pkg/ddc/goosefs/engine_test.go
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- . "github.com/onsi/ginkgo/v2"
- . "github.com/onsi/gomega"
-
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/common"
- cruntime "github.com/fluid-cloudnative/fluid/pkg/runtime"
- "github.com/fluid-cloudnative/fluid/pkg/utils/fake"
- appsv1 "k8s.io/api/apps/v1"
- v1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/types"
-)
-
-var _ = Describe("Build", func() {
- It("should build engine successfully", func() {
- var namespace = v1.Namespace{
- ObjectMeta: metav1.ObjectMeta{
- Name: "fluid",
- },
- }
- testObjs := []runtime.Object{}
- testObjs = append(testObjs, namespace.DeepCopy())
-
- var dataset = datav1alpha1.Dataset{
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase",
- Namespace: "fluid",
- },
- }
- testObjs = append(testObjs, dataset.DeepCopy())
-
- var goosefsRuntime = datav1alpha1.GooseFSRuntime{
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase",
- Namespace: "fluid",
- },
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- Master: datav1alpha1.GooseFSCompTemplateSpec{
- Replicas: 1,
- },
- Fuse: datav1alpha1.GooseFSFuseSpec{},
- },
- Status: datav1alpha1.RuntimeStatus{
- CacheStates: map[common.CacheStateName]string{
- common.Cached: "true",
- },
- },
- }
- testObjs = append(testObjs, goosefsRuntime.DeepCopy())
-
- var daemonset = appsv1.DaemonSet{
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase-worker",
- Namespace: "fluid",
- },
- }
- testObjs = append(testObjs, daemonset.DeepCopy())
- client := fake.NewFakeClientWithScheme(testScheme, testObjs...)
-
- var ctx = cruntime.ReconcileRequestContext{
- NamespacedName: types.NamespacedName{
- Name: "hbase",
- Namespace: "fluid",
- },
- Client: client,
- Log: fake.NullLogger(),
- RuntimeType: "goosefs",
- Runtime: &goosefsRuntime,
- }
-
- engine, err := Build("testId", ctx)
- Expect(err).NotTo(HaveOccurred())
- Expect(engine).NotTo(BeNil())
- })
-})
diff --git a/pkg/ddc/goosefs/goosefs_suite_test.go b/pkg/ddc/goosefs/goosefs_suite_test.go
deleted file mode 100644
index bceb74d9b17..00000000000
--- a/pkg/ddc/goosefs/goosefs_suite_test.go
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
-Copyright 2024 The Fluid Authors.
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "testing"
-
- . "github.com/onsi/ginkgo/v2"
- . "github.com/onsi/gomega"
-)
-
-func TestGooseFS(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "GooseFS Suite")
-}
diff --git a/pkg/ddc/goosefs/hcfs.go b/pkg/ddc/goosefs/hcfs.go
deleted file mode 100644
index f32b559d77e..00000000000
--- a/pkg/ddc/goosefs/hcfs.go
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "fmt"
-
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/ddc/goosefs/operations"
- "github.com/fluid-cloudnative/fluid/pkg/utils/kubeclient"
-)
-
-// GetHCFSStatus retrieves the status information of HCFS (Hadoop Compatible File System)
-// This function queries the HCFS endpoint and the underlying filesystem compatible version
-//
-// Process flow:
-// 1. First calls queryHCFSEndpoint() to get the HCFS access endpoint
-// 2. If endpoint retrieval fails, logs the error and returns the error
-// 3. Calls queryCompatibleUFSVersion() to get the underlying filesystem compatible version
-// 4. If version retrieval fails, logs the error and returns the error
-// 5. Constructs the HCFSStatus struct and populates it with the retrieved information
-// 6. Returns the status information and nil error
-//
-// Returns:
-// - *datav1alpha1.HCFSStatus: Pointer to a struct containing HCFS status information,
-// including endpoint and version details
-// - error: Returns an error if any issues occur during the query process
-//
-// Query the hcfs status
-func (e *GooseFSEngine) GetHCFSStatus() (status *datav1alpha1.HCFSStatus, err error) {
- endpoint, err := e.queryHCFSEndpoint()
- if err != nil {
- e.Log.Error(err, "Failed to get HCFS Endpoint")
- return status, err
- }
-
- version, err := e.queryCompatibleUFSVersion()
- if err != nil {
- e.Log.Error(err, "Failed to get Compatible Endpoint")
- return status, err
- }
-
- status = &datav1alpha1.HCFSStatus{
- Endpoint: endpoint,
- UnderlayerFileSystemVersion: version,
- }
- return
-}
-
-// query the hcfs endpoint
-func (e *GooseFSEngine) queryHCFSEndpoint() (endpoint string, err error) {
-
- var (
- serviceName = fmt.Sprintf("%s-master-0", e.name)
- host = fmt.Sprintf("%s.%s", serviceName, e.namespace)
- )
-
- svc, err := kubeclient.GetServiceByName(e.Client, serviceName, e.namespace)
- if err != nil {
- e.Log.Error(err, "Failed to get Endpoint")
- return endpoint, err
- }
-
- if svc == nil {
- e.Log.Error(fmt.Errorf("failed to find the svc %s in %s", e.name, e.namespace), "failed to find the svc, it's nil")
- return
- }
-
- for _, port := range svc.Spec.Ports {
- if port.Name == "rpc" {
- endpoint = fmt.Sprintf("goosefs://%s:%d", host, port.Port)
- return
- }
- }
-
- return
-}
-
-// query the compatible version of UFS
-func (e *GooseFSEngine) queryCompatibleUFSVersion() (version string, err error) {
- podName, containerName := e.getMasterPodInfo()
- fileUtils := operations.NewGooseFSFileUtils(podName, containerName, e.namespace, e.Log)
- version, err = fileUtils.GetConf("goosefs.underfs.version")
- if err != nil {
- e.Log.Error(err, "Failed to getConf")
- return
- }
- return
-}
diff --git a/pkg/ddc/goosefs/hcfs_test.go b/pkg/ddc/goosefs/hcfs_test.go
deleted file mode 100644
index 94c98956715..00000000000
--- a/pkg/ddc/goosefs/hcfs_test.go
+++ /dev/null
@@ -1,217 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "errors"
- "reflect"
- "testing"
-
- "github.com/agiledragon/gomonkey/v2"
- "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/common"
- "github.com/fluid-cloudnative/fluid/pkg/ddc/base"
- "github.com/fluid-cloudnative/fluid/pkg/utils/fake"
- "github.com/fluid-cloudnative/fluid/pkg/utils/kubeclient"
- v1 "k8s.io/api/apps/v1"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "sigs.k8s.io/controller-runtime/pkg/client"
-)
-
-func newGooseFSEngineHCFS(client client.Client, name string, namespace string) *GooseFSEngine {
- runTime := &v1alpha1.GooseFSRuntime{}
- runTimeInfo, _ := base.BuildRuntimeInfo(name, namespace, "goosefs")
- engine := &GooseFSEngine{
- runtime: runTime,
- name: name,
- namespace: namespace,
- Client: client,
- runtimeInfo: runTimeInfo,
- Log: fake.NullLogger(),
- }
- return engine
-}
-
-func TestGetHCFSStatus(t *testing.T) {
- mockExecCommon := func(podName string, containerName string, namespace string, cmd []string) (stdout string, stderr string, e error) {
- return "conf", "", nil
- }
- mockExecErr := func(podName string, containerName string, namespace string, cmd []string) (stdout string, stderr string, e error) {
- return "err", "", errors.New("other error")
- }
- service := &corev1.Service{
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase-master-0",
- Namespace: "fluid",
- Annotations: common.GetExpectedFluidAnnotations(),
- },
- Spec: corev1.ServiceSpec{
- Ports: []corev1.ServicePort{
- {
- Name: "rpc",
- Port: 2333,
- },
- },
- },
- }
- serviceWithErr := &corev1.Service{
- ObjectMeta: metav1.ObjectMeta{
- Name: "not-register-master-0",
- Namespace: "fluid",
- Annotations: common.GetExpectedFluidAnnotations(),
- },
- }
- runtimeObjs := []runtime.Object{}
- runtimeObjs = append(runtimeObjs, service.DeepCopy())
- runtimeObjs = append(runtimeObjs, serviceWithErr.DeepCopy())
- fakeClient := fake.NewFakeClientWithScheme(testScheme, runtimeObjs...)
- scheme := runtime.NewScheme()
- scheme.AddKnownTypes(v1.SchemeGroupVersion, service)
- fakeClientWithErr := fake.NewFakeClientWithScheme(scheme, runtimeObjs...)
-
- // test common case
- patches := gomonkey.ApplyFunc(kubeclient.ExecCommandInContainer, mockExecCommon)
-
- engine := newGooseFSEngineHCFS(fakeClient, "hbase", "fluid")
- out, err := engine.GetHCFSStatus()
- if err != nil {
- t.Fatal(err.Error())
- }
- patches.Reset()
- status := &v1alpha1.HCFSStatus{
- Endpoint: "goosefs://hbase-master-0.fluid:2333",
- UnderlayerFileSystemVersion: "conf",
- }
- if !reflect.DeepEqual(*out, *status) {
- t.Errorf("status message wrong!")
- }
-
- // test when not register case
- engine = newGooseFSEngineHCFS(fakeClientWithErr, "hbase", "fluid")
- _, err = engine.GetHCFSStatus()
- if err == nil {
- t.Errorf("expect No Register Err, but not got.")
- }
-
- // test when getConf with err
- patches.ApplyFunc(kubeclient.ExecCommandInContainer, mockExecErr)
- defer patches.Reset()
-
- engine = newGooseFSEngineHCFS(fakeClient, "hbase", "fluid")
- _, err = engine.GetHCFSStatus()
- if err == nil {
- t.Errorf("expect get Conf Err, but not got.")
- }
-
-}
-
-func TestQueryHCFSEndpoint(t *testing.T) {
- service := &corev1.Service{
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase-master-0",
- Namespace: "fluid",
- Annotations: common.GetExpectedFluidAnnotations(),
- },
- Spec: corev1.ServiceSpec{
- Ports: []corev1.ServicePort{
- {
- Name: "rpc",
- Port: 2333,
- },
- },
- },
- }
- serviceWithErr := &corev1.Service{
- ObjectMeta: metav1.ObjectMeta{
- Name: "not-register-master-0",
- Namespace: "fluid",
- Annotations: common.GetExpectedFluidAnnotations(),
- },
- }
- runtimeObjs := []runtime.Object{}
- runtimeObjs = append(runtimeObjs, service.DeepCopy())
- runtimeObjs = append(runtimeObjs, serviceWithErr.DeepCopy())
- fakeClient := fake.NewFakeClientWithScheme(testScheme, runtimeObjs...)
- scheme := runtime.NewScheme()
- scheme.AddKnownTypes(v1.SchemeGroupVersion, service)
- fakeClientWithErr := fake.NewFakeClientWithScheme(scheme, runtimeObjs...)
- testCases := []struct {
- name string
- namespace string
- out string
- isErr bool
- }{
- {
- name: "not-found",
- namespace: "fluid",
- out: "",
- isErr: false,
- },
- {
- name: "not-register",
- namespace: "fluid",
- out: "",
- isErr: false,
- },
- {
- name: "hbase",
- namespace: "fluid",
- out: "goosefs://hbase-master-0.fluid:2333",
- isErr: false,
- },
- }
- for _, testCase := range testCases {
- engine := newGooseFSEngineHCFS(fakeClient, testCase.name, testCase.namespace)
- if testCase.name == "not-register" {
- engine = newGooseFSEngineHCFS(fakeClientWithErr, testCase.name, testCase.namespace)
- }
- out, err := engine.queryHCFSEndpoint()
- if out != testCase.out {
- t.Errorf("input parameter is %s,expected %s, got %s", testCase.name, testCase.out, out)
- }
- isErr := err != nil
- if isErr != testCase.isErr {
- t.Errorf("input parameter is %s,expected %t, got %t", testCase.name, testCase.isErr, isErr)
- }
- }
-}
-
-func TestCompatibleUFSVersion(t *testing.T) {
- mockExecCommon := func(podName string, containerName string, namespace string, cmd []string) (stdout string, stderr string, e error) {
- return "conf", "", nil
- }
- mockExecErr := func(podName string, containerName string, namespace string, cmd []string) (stdout string, stderr string, e error) {
- return "err", "", errors.New("other error")
- }
- patches := gomonkey.ApplyFunc(kubeclient.ExecCommandInContainer, mockExecCommon)
- defer patches.Reset()
-
- engine := newGooseFSEngineHCFS(nil, "hbase", "fluid")
- out, _ := engine.queryCompatibleUFSVersion()
- if out != "conf" {
- t.Errorf("expected %s, got %s", "conf", out)
- }
-
- patches.ApplyFunc(kubeclient.ExecCommandInContainer, mockExecErr)
- engine = newGooseFSEngineHCFS(nil, "hbase", "fluid")
- out, _ = engine.queryCompatibleUFSVersion()
- if out != "err" {
- t.Errorf("expected %s, got %s", "err", out)
- }
-}
diff --git a/pkg/ddc/goosefs/health_check.go b/pkg/ddc/goosefs/health_check.go
deleted file mode 100644
index c84eda49887..00000000000
--- a/pkg/ddc/goosefs/health_check.go
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "fmt"
-
- "github.com/fluid-cloudnative/fluid/pkg/ddc/base"
- "k8s.io/apimachinery/pkg/types"
- "sigs.k8s.io/controller-runtime/pkg/client"
-
- data "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/utils"
-)
-
-// CheckRuntimeHealthy checks the healthy of the runtime
-func (e *GooseFSEngine) CheckRuntimeHealthy() (err error) {
- // 1. Check the healthy of the master
- masterReady, err := e.CheckMasterReady()
- if err != nil {
- e.Log.Error(err, "failed to check if master is ready")
- updateErr := e.UpdateDatasetStatus(data.FailedDatasetPhase)
- if updateErr != nil {
- e.Log.Error(updateErr, "failed to update dataset status to \"Failed\"")
- }
- return
- }
-
- if !masterReady {
- return fmt.Errorf("the master \"%s\" is not healthy, expect at least one replica is ready", e.getMasterName())
- }
-
- // 2. Check the healthy of the workers
- workerReady, err := e.CheckWorkersReady()
- if err != nil {
- e.Log.Error(err, "failed to check if workers are ready")
- updateErr := e.UpdateDatasetStatus(data.FailedDatasetPhase)
- if updateErr != nil {
- e.Log.Error(updateErr, "failed to update dataset status to \"Failed\"")
- }
- return
- }
-
- if !workerReady {
- return fmt.Errorf("the worker \"%s\" is not healthy, expect at least one replica is ready", e.getWorkerName())
- }
-
- // 3. Check the healthy of the fuse
- fuseReady, err := e.checkFuseHealthy()
- if err != nil {
- e.Log.Error(err, "The fuse is not healthy")
- updateErr := e.UpdateDatasetStatus(data.FailedDatasetPhase)
- if updateErr != nil {
- e.Log.Error(updateErr, "failed to update dataset status to \"Failed\"")
- }
- return
- }
-
- if !fuseReady {
- // fluid assumes fuse is always ready, so it's a protective branch.
- return fmt.Errorf("the fuse \"%s\" is not healthy", e.getFuseName())
- }
-
- err = e.UpdateDatasetStatus(data.BoundDatasetPhase)
- if err != nil {
- e.Log.Error(err, "failed to update dataset status to \"Bound\"")
- return
- }
-
- return
-}
-
-// checkFuseHealthy check fuses number changed
-func (e *GooseFSEngine) checkFuseHealthy() (ready bool, err error) {
- getRuntimeFn := func(client client.Client) (base.RuntimeInterface, error) {
- return utils.GetGooseFSRuntime(client, e.name, e.namespace)
- }
-
- ready, err = e.Helper.CheckAndSyncFuseStatus(getRuntimeFn, types.NamespacedName{Namespace: e.namespace, Name: e.getFuseName()})
- if err != nil {
- e.Log.Error(err, "failed to check and update fuse status")
- return
- }
-
- if !ready {
- e.Log.Info("fuses are not ready")
- }
-
- return
-}
diff --git a/pkg/ddc/goosefs/health_check_test.go b/pkg/ddc/goosefs/health_check_test.go
deleted file mode 100644
index 12d483c3870..00000000000
--- a/pkg/ddc/goosefs/health_check_test.go
+++ /dev/null
@@ -1,381 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "context"
- "reflect"
- "testing"
-
- appsv1 "k8s.io/api/apps/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/client-go/tools/record"
- "k8s.io/utils/ptr"
-
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/common"
- "github.com/fluid-cloudnative/fluid/pkg/ctrl"
- "github.com/fluid-cloudnative/fluid/pkg/ddc/base"
- "github.com/fluid-cloudnative/fluid/pkg/utils"
- "github.com/fluid-cloudnative/fluid/pkg/utils/fake"
-)
-
-func TestCheckRuntimeHealthy(t *testing.T) {
- var statefulsetInputs = []appsv1.StatefulSet{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase-master",
- Namespace: "fluid",
- },
- Spec: appsv1.StatefulSetSpec{
- Replicas: ptr.To[int32](1),
- },
- Status: appsv1.StatefulSetStatus{
- Replicas: 3,
- ReadyReplicas: 3,
- },
- },
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase-worker",
- Namespace: "fluid",
- },
- Status: appsv1.StatefulSetStatus{
- Replicas: 1,
- ReadyReplicas: 1,
- AvailableReplicas: 1,
- },
- Spec: appsv1.StatefulSetSpec{
- Replicas: ptr.To[int32](1),
- },
- },
- }
-
- testObjs := []runtime.Object{}
- for _, statefulset := range statefulsetInputs {
- testObjs = append(testObjs, statefulset.DeepCopy())
- }
-
- var daemonSetInputs = []appsv1.DaemonSet{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase-fuse",
- Namespace: "fluid",
- },
- Status: appsv1.DaemonSetStatus{
- NumberUnavailable: 0,
- NumberReady: 1,
- NumberAvailable: 1,
- },
- },
- }
- for _, daemonSet := range daemonSetInputs {
- testObjs = append(testObjs, daemonSet.DeepCopy())
- }
-
- var goosefsRuntimeInputs = []datav1alpha1.GooseFSRuntime{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase",
- Namespace: "fluid",
- },
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- Master: datav1alpha1.GooseFSCompTemplateSpec{
- Replicas: 1,
- },
- },
- Status: datav1alpha1.RuntimeStatus{
- CacheStates: map[common.CacheStateName]string{
- common.Cached: "true",
- },
- },
- },
- }
- for _, goosefsRuntime := range goosefsRuntimeInputs {
- testObjs = append(testObjs, goosefsRuntime.DeepCopy())
- }
-
- var datasetInputs = []*datav1alpha1.Dataset{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase",
- Namespace: "fluid",
- },
- Spec: datav1alpha1.DatasetSpec{},
- Status: datav1alpha1.DatasetStatus{
- HCFSStatus: &datav1alpha1.HCFSStatus{
- Endpoint: "test Endpoint",
- UnderlayerFileSystemVersion: "Underlayer HCFS Compatible Version",
- },
- },
- },
- }
- for _, dataset := range datasetInputs {
- testObjs = append(testObjs, dataset.DeepCopy())
- }
-
- client := fake.NewFakeClientWithScheme(testScheme, testObjs...)
-
- engines := []GooseFSEngine{
- {
- Client: client,
- Log: fake.NullLogger(),
- namespace: "fluid",
- name: "hbase",
- runtime: &goosefsRuntimeInputs[0],
- Recorder: record.NewFakeRecorder(1),
- },
- }
-
- var testCase = []struct {
- engine GooseFSEngine
- expectedErrorNil bool
- expectedMasterPhase datav1alpha1.RuntimePhase
- expectedWorkerPhase datav1alpha1.RuntimePhase
- expectedRuntimeWorkerNumberReady int32
- expectedRuntimeWorkerAvailable int32
- expectedRuntimeFuseNumberReady int32
- expectedRuntimeFuseNumberAvailable int32
- expectedDataset datav1alpha1.Dataset
- }{
- {
- engine: engines[0],
- expectedErrorNil: true,
- expectedMasterPhase: datav1alpha1.RuntimePhaseReady,
- expectedWorkerPhase: "",
- expectedRuntimeWorkerNumberReady: 1,
- expectedRuntimeWorkerAvailable: 1,
- expectedRuntimeFuseNumberReady: 1,
- expectedRuntimeFuseNumberAvailable: 1,
- expectedDataset: datav1alpha1.Dataset{
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase",
- Namespace: "fluid",
- },
- Status: datav1alpha1.DatasetStatus{
- Phase: datav1alpha1.BoundDatasetPhase,
- CacheStates: map[common.CacheStateName]string{
- common.Cached: "true",
- },
- HCFSStatus: &datav1alpha1.HCFSStatus{
- Endpoint: "test Endpoint",
- UnderlayerFileSystemVersion: "Underlayer HCFS Compatible Version",
- },
- },
- },
- },
- }
- for _, test := range testCase {
- runtimeInfo, _ := base.BuildRuntimeInfo(test.engine.name, test.engine.namespace, common.GooseFSRuntime)
- test.engine.Helper = ctrl.BuildHelper(runtimeInfo, client, test.engine.Log)
- err := test.engine.CheckRuntimeHealthy()
- if err != nil && test.expectedErrorNil == true ||
- err == nil && test.expectedErrorNil == false {
- t.Errorf("fail to exec the checkMasterHealthy function with err %v", err)
- return
- }
- if test.expectedErrorNil == false {
- continue
- }
-
- goosefsRuntime, err := test.engine.getRuntime()
- if err != nil {
- t.Errorf("fail to get the runtime with the error %v", err)
- return
- }
- if goosefsRuntime.Status.MasterPhase != test.expectedMasterPhase {
- t.Errorf("fail to update the runtime status, get %s, expect %s", goosefsRuntime.Status.MasterPhase, test.expectedMasterPhase)
- return
- }
- if goosefsRuntime.Status.WorkerNumberReady != test.expectedRuntimeWorkerNumberReady ||
- goosefsRuntime.Status.WorkerNumberAvailable != test.expectedRuntimeWorkerAvailable {
- t.Errorf("fail to update the runtime")
- return
- }
- if goosefsRuntime.Status.FuseNumberReady != test.expectedRuntimeFuseNumberReady ||
- goosefsRuntime.Status.FuseNumberAvailable != test.expectedRuntimeFuseNumberAvailable {
- t.Errorf("fail to update the runtime")
- return
- }
-
- _, cond := utils.GetRuntimeCondition(goosefsRuntime.Status.Conditions, datav1alpha1.RuntimeMasterReady)
- if cond == nil {
- t.Errorf("fail to update the condition")
- return
- }
- _, cond = utils.GetRuntimeCondition(goosefsRuntime.Status.Conditions, datav1alpha1.RuntimeWorkersReady)
- if cond == nil {
- t.Errorf("fail to update the condition")
- return
- }
- _, cond = utils.GetRuntimeCondition(goosefsRuntime.Status.Conditions, datav1alpha1.RuntimeFusesReady)
- if cond == nil {
- t.Errorf("fail to update the condition")
- return
- }
-
- var datasets datav1alpha1.DatasetList
- err = client.List(context.TODO(), &datasets)
- if err != nil {
- t.Errorf("fail to list the datasets with error %v", err)
- return
- }
- if !reflect.DeepEqual(datasets.Items[0].Status.Phase, test.expectedDataset.Status.Phase) ||
- !reflect.DeepEqual(datasets.Items[0].Status.CacheStates, test.expectedDataset.Status.CacheStates) ||
- !reflect.DeepEqual(datasets.Items[0].Status.HCFSStatus, test.expectedDataset.Status.HCFSStatus) {
- t.Errorf("fail to exec the function with error %v", err)
- return
- }
- }
-}
-
-func TestCheckFuseHealthy(t *testing.T) {
- var daemonSetInputs = []appsv1.DaemonSet{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase-fuse",
- Namespace: "fluid",
- },
- Status: appsv1.DaemonSetStatus{
- NumberUnavailable: 1,
- NumberReady: 1,
- NumberAvailable: 1,
- },
- },
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "spark-fuse",
- Namespace: "fluid",
- },
- Status: appsv1.DaemonSetStatus{
- NumberUnavailable: 0,
- NumberReady: 1,
- NumberAvailable: 1,
- },
- },
- }
-
- testObjs := []runtime.Object{}
- for _, daemonSet := range daemonSetInputs {
- testObjs = append(testObjs, daemonSet.DeepCopy())
- }
-
- var goosefsruntimeInputs = []datav1alpha1.GooseFSRuntime{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase",
- Namespace: "fluid",
- },
- },
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "spark",
- Namespace: "fluid",
- },
- },
- }
- for _, goosefsruntimeInput := range goosefsruntimeInputs {
- testObjs = append(testObjs, goosefsruntimeInput.DeepCopy())
- }
- client := fake.NewFakeClientWithScheme(testScheme, testObjs...)
-
- engines := []GooseFSEngine{
- {
- Client: client,
- Log: fake.NullLogger(),
- namespace: "fluid",
- name: "hbase",
- runtime: &datav1alpha1.GooseFSRuntime{
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase",
- Namespace: "fluid",
- },
- },
- Recorder: record.NewFakeRecorder(1),
- },
- {
- Client: client,
- Log: fake.NullLogger(),
- namespace: "fluid",
- name: "spark",
- runtime: &datav1alpha1.GooseFSRuntime{
- ObjectMeta: metav1.ObjectMeta{
- Name: "spark",
- Namespace: "fluid",
- },
- },
- Recorder: record.NewFakeRecorder(1),
- },
- }
-
- var testCase = []struct {
- engine GooseFSEngine
- expectedWorkerPhase datav1alpha1.RuntimePhase
- expectedErrorNil bool
- expectedRuntimeFuseNumberReady int32
- expectedRuntimeFuseNumberAvailable int32
- expectedRuntimeFuseNumberUnavailable int32
- }{
- {
- engine: engines[0],
- expectedWorkerPhase: datav1alpha1.RuntimePhaseNotReady,
- expectedErrorNil: true,
- expectedRuntimeFuseNumberReady: 1,
- expectedRuntimeFuseNumberAvailable: 1,
- expectedRuntimeFuseNumberUnavailable: 1,
- },
- {
- engine: engines[1],
- expectedWorkerPhase: "",
- expectedErrorNil: true,
- expectedRuntimeFuseNumberReady: 1,
- expectedRuntimeFuseNumberAvailable: 1,
- expectedRuntimeFuseNumberUnavailable: 0,
- },
- }
-
- for _, test := range testCase {
- runtimeInfo, _ := base.BuildRuntimeInfo(test.engine.name, test.engine.namespace, common.GooseFSRuntime)
- test.engine.Helper = ctrl.BuildHelper(runtimeInfo, client, test.engine.Log)
- _, err := test.engine.checkFuseHealthy()
- if err != nil && test.expectedErrorNil == true ||
- err == nil && test.expectedErrorNil == false {
- t.Errorf("fail to exec the checkMasterHealthy function with err %v", err)
- return
- }
-
- goosefsruntime, err := test.engine.getRuntime()
- if err != nil {
- t.Errorf("fail to get the runtime with the error %v", err)
- return
- }
-
- if goosefsruntime.Status.FuseNumberReady != test.expectedRuntimeFuseNumberReady ||
- goosefsruntime.Status.FuseNumberAvailable != test.expectedRuntimeFuseNumberAvailable ||
- goosefsruntime.Status.FuseNumberUnavailable != test.expectedRuntimeFuseNumberUnavailable {
- t.Errorf("fail to update the runtime")
- return
- }
-
- _, cond := utils.GetRuntimeCondition(goosefsruntime.Status.Conditions, datav1alpha1.RuntimeFusesReady)
- if cond == nil {
- t.Errorf("fail to update the condition")
- return
- }
- }
-}
diff --git a/pkg/ddc/goosefs/load_data.go b/pkg/ddc/goosefs/load_data.go
deleted file mode 100644
index 3e43a001b9c..00000000000
--- a/pkg/ddc/goosefs/load_data.go
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
-Copyright 2023 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "fmt"
- "os"
- "strings"
-
- "github.com/fluid-cloudnative/fluid/pkg/dataflow"
- "github.com/fluid-cloudnative/fluid/pkg/utils/transformer"
- "sigs.k8s.io/controller-runtime/pkg/client"
-
- v1 "k8s.io/api/core/v1"
- "sigs.k8s.io/yaml"
-
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/common"
- cdataload "github.com/fluid-cloudnative/fluid/pkg/dataload"
- "github.com/fluid-cloudnative/fluid/pkg/ddc/goosefs/operations"
- cruntime "github.com/fluid-cloudnative/fluid/pkg/runtime"
- "github.com/fluid-cloudnative/fluid/pkg/utils"
- "github.com/fluid-cloudnative/fluid/pkg/utils/docker"
-)
-
-// generateDataLoadValueFile builds a DataLoadValue by extracted specifications from the given DataLoad, and
-// marshals the DataLoadValue to a temporary yaml file where stores values that'll be used by fluid dataloader helm chart
-func (e *GooseFSEngine) generateDataLoadValueFile(r cruntime.ReconcileRequestContext, object client.Object) (valueFileName string, err error) {
- dataload, ok := object.(*datav1alpha1.DataLoad)
- if !ok {
- err = fmt.Errorf("object %v is not a DataLoad", object)
- return "", err
- }
-
- targetDataset, err := utils.GetDataset(r.Client, dataload.Spec.Dataset.Name, dataload.Spec.Dataset.Namespace)
- if err != nil {
- return "", err
- }
-
- imageName, imageTag := docker.GetWorkerImage(r.Client, dataload.Spec.Dataset.Name, "goosefs", dataload.Spec.Dataset.Namespace)
-
- if len(imageName) == 0 {
- imageName = docker.GetImageRepoFromEnv(common.GooseFSRuntimeImageEnv)
- if len(imageName) == 0 {
- defaultImageInfo := strings.Split(common.DefaultGooseFSRuntimeImage, ":")
- if len(defaultImageInfo) < 1 {
- panic("invalid default dataload image!")
- } else {
- imageName = defaultImageInfo[0]
- }
- }
- }
-
- if len(imageTag) == 0 {
- imageTag = docker.GetImageTagFromEnv(common.GooseFSRuntimeImageEnv)
- if len(imageTag) == 0 {
- defaultImageInfo := strings.Split(common.DefaultGooseFSRuntimeImage, ":")
- if len(defaultImageInfo) < 2 {
- panic("invalid default dataload image!")
- } else {
- imageTag = defaultImageInfo[1]
- }
- }
- }
- image := fmt.Sprintf("%s:%s", imageName, imageTag)
-
- dataLoadValue, err := e.genDataLoadValue(image, targetDataset, dataload)
- if err != nil {
- return
- }
-
- data, err := yaml.Marshal(dataLoadValue)
- if err != nil {
- return
- }
-
- valueFile, err := os.CreateTemp(os.TempDir(), fmt.Sprintf("%s-%s-loader-values.yaml", dataload.Namespace, dataload.Name))
- if err != nil {
- return
- }
- err = os.WriteFile(valueFile.Name(), data, 0o400)
- if err != nil {
- return
- }
- return valueFile.Name(), nil
-}
-
-func (e *GooseFSEngine) genDataLoadValue(image string, targetDataset *datav1alpha1.Dataset, dataload *datav1alpha1.DataLoad) (*cdataload.DataLoadValue, error) {
- imagePullSecrets := docker.GetImagePullSecretsFromEnv(common.EnvImagePullSecretsKey)
-
- dataloadInfo := cdataload.DataLoadInfo{
- BackoffLimit: 3,
- TargetDataset: dataload.Spec.Dataset.Name,
- LoadMetadata: dataload.Spec.LoadMetadata,
- Image: image,
- Options: dataload.Spec.Options,
- ImagePullSecrets: imagePullSecrets,
- Labels: dataload.Spec.PodMetadata.Labels,
- Annotations: dataflow.InjectAffinityAnnotation(dataload.Annotations, dataload.Spec.PodMetadata.Annotations),
- Policy: string(dataload.Spec.Policy),
- Schedule: dataload.Spec.Schedule,
- }
-
- // pod affinity
- if dataload.Spec.Affinity != nil {
- dataloadInfo.Affinity = dataload.Spec.Affinity
- }
-
- // inject the node affinity by previous operation pod.
- var err error
- dataloadInfo.Affinity, err = dataflow.InjectAffinityByRunAfterOp(e.Client, dataload.Spec.RunAfter, dataload.Namespace, dataloadInfo.Affinity)
- if err != nil {
- return nil, err
- }
-
- // node selector
- if dataload.Spec.NodeSelector != nil {
- if dataloadInfo.NodeSelector == nil {
- dataloadInfo.NodeSelector = make(map[string]string)
- }
- dataloadInfo.NodeSelector = dataload.Spec.NodeSelector
- }
-
- // pod tolerations
- if len(dataload.Spec.Tolerations) > 0 {
- if dataloadInfo.Tolerations == nil {
- dataloadInfo.Tolerations = make([]v1.Toleration, 0)
- }
- dataloadInfo.Tolerations = dataload.Spec.Tolerations
- }
-
- // scheduler name
- if len(dataload.Spec.SchedulerName) > 0 {
- dataloadInfo.SchedulerName = dataload.Spec.SchedulerName
- }
-
- targetPaths := []cdataload.TargetPath{}
- for _, target := range dataload.Spec.Target {
- fluidNative := utils.IsTargetPathUnderFluidNativeMounts(target.Path, *targetDataset)
- targetPaths = append(targetPaths, cdataload.TargetPath{
- Path: target.Path,
- Replicas: target.Replicas,
- FluidNative: fluidNative,
- })
- }
- dataloadInfo.TargetPaths = targetPaths
- dataLoadValue := &cdataload.DataLoadValue{
- Name: dataload.Name,
- OwnerDatasetId: utils.GetDatasetId(targetDataset.Namespace, targetDataset.Name, string(targetDataset.UID)),
- DataLoadInfo: dataloadInfo,
- Owner: transformer.GenerateOwnerReferenceFromObject(dataload),
- }
-
- return dataLoadValue, nil
-}
-
-func (e *GooseFSEngine) CheckRuntimeReady() (ready bool) {
- podName, containerName := e.getMasterPodInfo()
- fileUtils := operations.NewGooseFSFileUtils(podName, containerName, e.namespace, e.Log)
- ready = fileUtils.Ready()
- if !ready {
- e.Log.Info("runtime not ready", "runtime", ready)
- return false
- }
- return true
-}
diff --git a/pkg/ddc/goosefs/load_data_test.go b/pkg/ddc/goosefs/load_data_test.go
deleted file mode 100644
index 2749002c1f2..00000000000
--- a/pkg/ddc/goosefs/load_data_test.go
+++ /dev/null
@@ -1,546 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "errors"
- "fmt"
- "os"
- "path/filepath"
- "reflect"
- "strings"
- "testing"
-
- "github.com/agiledragon/gomonkey/v2"
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/common"
- cdataload "github.com/fluid-cloudnative/fluid/pkg/dataload"
- cruntime "github.com/fluid-cloudnative/fluid/pkg/runtime"
- "github.com/fluid-cloudnative/fluid/pkg/utils/fake"
- "github.com/fluid-cloudnative/fluid/pkg/utils/kubeclient"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
-)
-
-func TestGenerateDataLoadValueFile(t *testing.T) {
- datasetInputs := []datav1alpha1.Dataset{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "test-dataset",
- Namespace: "fluid",
- },
- },
- }
- testObjs := []runtime.Object{}
- for _, datasetInput := range datasetInputs {
- testObjs = append(testObjs, datasetInput.DeepCopy())
- }
- client := fake.NewFakeClientWithScheme(testScheme, testObjs...)
-
- context := cruntime.ReconcileRequestContext{
- Client: client,
- }
- dataLoadNoTarget := datav1alpha1.DataLoad{
- ObjectMeta: metav1.ObjectMeta{
- Name: "test-dataload",
- Namespace: "fluid",
- },
- Spec: datav1alpha1.DataLoadSpec{
- Dataset: datav1alpha1.TargetDataset{
- Name: "test-dataset",
- Namespace: "fluid",
- },
- },
- }
- dataLoadWithTarget := datav1alpha1.DataLoad{
- ObjectMeta: metav1.ObjectMeta{
- Name: "test-dataload",
- Namespace: "fluid",
- },
- Spec: datav1alpha1.DataLoadSpec{
- Dataset: datav1alpha1.TargetDataset{
- Name: "test-dataset",
- Namespace: "fluid",
- },
- Target: []datav1alpha1.TargetPath{
- {
- Path: "/test",
- Replicas: 1,
- },
- },
- },
- }
-
- dataLoadWithOptions := datav1alpha1.DataLoad{
- ObjectMeta: metav1.ObjectMeta{
- Name: "test-dataload",
- Namespace: "fluid",
- },
- Spec: datav1alpha1.DataLoadSpec{
- Dataset: datav1alpha1.TargetDataset{
- Name: "test-dataset",
- Namespace: "fluid",
- },
- Target: []datav1alpha1.TargetPath{
- {
- Path: "/test",
- Replicas: 1,
- },
- },
- Options: map[string]string{
- "atomicCache": "true",
- "expireTime": "43200000",
- },
- },
- }
-
- testCases := []struct {
- dataLoad datav1alpha1.DataLoad
- expectFileName string
- }{
- {
- dataLoad: dataLoadNoTarget,
- expectFileName: filepath.Join(os.TempDir(), "fluid-test-dataload-loader-values.yaml"),
- },
- {
- dataLoad: dataLoadWithTarget,
- expectFileName: filepath.Join(os.TempDir(), "fluid-test-dataload-loader-values.yaml"),
- },
- {
- dataLoad: dataLoadWithOptions,
- expectFileName: filepath.Join(os.TempDir(), "fluid-test-dataload-loader-values.yaml"),
- },
- }
- for _, test := range testCases {
- engine := GooseFSEngine{}
- if fileName, err := engine.generateDataLoadValueFile(context, &test.dataLoad); !strings.Contains(fileName, test.expectFileName) {
- t.Errorf("fail to generate the dataload value file %v", err)
- }
- }
-}
-
-func Test_genDataLoadValue(t *testing.T) {
- testCases := map[string]struct {
- image string
- targetDataset *datav1alpha1.Dataset
- dataload *datav1alpha1.DataLoad
- want *cdataload.DataLoadValue
- }{
- "test case with scheduler name": {
- image: "fluid:v0.0.1",
- targetDataset: &datav1alpha1.Dataset{
- ObjectMeta: metav1.ObjectMeta{
- Name: "test-dataset",
- Namespace: "fluid",
- },
- Spec: datav1alpha1.DatasetSpec{
- Mounts: []datav1alpha1.Mount{
- {
- Name: "spark",
- MountPoint: "local://mnt/data0",
- Path: "/mnt",
- },
- },
- },
- },
- dataload: &datav1alpha1.DataLoad{
- ObjectMeta: metav1.ObjectMeta{
- Name: "test-dataload",
- Namespace: "fluid",
- },
- Spec: datav1alpha1.DataLoadSpec{
- Dataset: datav1alpha1.TargetDataset{
- Name: "test-dataset",
- Namespace: "fluid",
- },
- Target: []datav1alpha1.TargetPath{
- {
- Path: "/test",
- Replicas: 1,
- },
- },
- SchedulerName: "scheduler-test",
- Options: map[string]string{
- "mock-options-k1": "mock-options-v1",
- },
- },
- },
- want: &cdataload.DataLoadValue{
- Name: "test-dataload",
- OwnerDatasetId: "fluid-test-dataset",
- Owner: &common.OwnerReference{
- APIVersion: "/",
- Enabled: true,
- Name: "test-dataload",
- BlockOwnerDeletion: false,
- Controller: true,
- },
- DataLoadInfo: cdataload.DataLoadInfo{
- BackoffLimit: 3,
- Image: "fluid:v0.0.1",
- TargetDataset: "test-dataset",
- SchedulerName: "scheduler-test",
- TargetPaths: []cdataload.TargetPath{
- {
- Path: "/test",
- Replicas: 1,
- },
- },
- ImagePullSecrets: []corev1.LocalObjectReference{},
- Options: map[string]string{
- "mock-options-k1": "mock-options-v1",
- },
- },
- },
- },
- "test case with affinity": {
- image: "fluid:v0.0.1",
- targetDataset: &datav1alpha1.Dataset{
- ObjectMeta: metav1.ObjectMeta{
- Name: "test-dataset",
- Namespace: "fluid",
- },
- Spec: datav1alpha1.DatasetSpec{
- Mounts: []datav1alpha1.Mount{
- {
- Name: "spark",
- MountPoint: "local://mnt/data0",
- Path: "/mnt",
- },
- },
- },
- },
- dataload: &datav1alpha1.DataLoad{
- ObjectMeta: metav1.ObjectMeta{
- Name: "test-dataload",
- Namespace: "fluid",
- },
- Spec: datav1alpha1.DataLoadSpec{
- Dataset: datav1alpha1.TargetDataset{
- Name: "test-dataset",
- Namespace: "fluid",
- },
- Target: []datav1alpha1.TargetPath{
- {
- Path: "/test",
- Replicas: 1,
- },
- },
- SchedulerName: "scheduler-test",
- Options: map[string]string{
- "mock-options-k1": "mock-options-v1",
- },
- Affinity: &corev1.Affinity{
- NodeAffinity: &corev1.NodeAffinity{
- RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
- NodeSelectorTerms: []corev1.NodeSelectorTerm{
- {
- MatchExpressions: []corev1.NodeSelectorRequirement{
- {
- Key: "topology.kubernetes.io/zone",
- Operator: corev1.NodeSelectorOpIn,
- Values: []string{
- "antarctica-east1",
- "antarctica-west1",
- },
- },
- },
- },
- },
- },
- PreferredDuringSchedulingIgnoredDuringExecution: []corev1.PreferredSchedulingTerm{
- {
- Weight: 1,
- Preference: corev1.NodeSelectorTerm{
- MatchExpressions: []corev1.NodeSelectorRequirement{
- {
- Key: "another-node-label-key",
- Operator: corev1.NodeSelectorOpIn,
- Values: []string{
- "another-node-label-value",
- },
- },
- },
- },
- },
- },
- },
- },
- },
- },
- want: &cdataload.DataLoadValue{
- Name: "test-dataload",
- OwnerDatasetId: "fluid-test-dataset",
- Owner: &common.OwnerReference{
- APIVersion: "/",
- Enabled: true,
- Name: "test-dataload",
- BlockOwnerDeletion: false,
- Controller: true,
- },
- DataLoadInfo: cdataload.DataLoadInfo{
- BackoffLimit: 3,
- Image: "fluid:v0.0.1",
- TargetDataset: "test-dataset",
- SchedulerName: "scheduler-test",
- TargetPaths: []cdataload.TargetPath{
- {
- Path: "/test",
- Replicas: 1,
- },
- },
- ImagePullSecrets: []corev1.LocalObjectReference{},
- Options: map[string]string{
- "mock-options-k1": "mock-options-v1",
- },
- Affinity: &corev1.Affinity{
- NodeAffinity: &corev1.NodeAffinity{
- RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
- NodeSelectorTerms: []corev1.NodeSelectorTerm{
- {
- MatchExpressions: []corev1.NodeSelectorRequirement{
- {
- Key: "topology.kubernetes.io/zone",
- Operator: corev1.NodeSelectorOpIn,
- Values: []string{
- "antarctica-east1",
- "antarctica-west1",
- },
- },
- },
- },
- },
- },
- PreferredDuringSchedulingIgnoredDuringExecution: []corev1.PreferredSchedulingTerm{
- {
- Weight: 1,
- Preference: corev1.NodeSelectorTerm{
- MatchExpressions: []corev1.NodeSelectorRequirement{
- {
- Key: "another-node-label-key",
- Operator: corev1.NodeSelectorOpIn,
- Values: []string{
- "another-node-label-value",
- },
- },
- },
- },
- },
- },
- },
- },
- },
- },
- },
- "test case with node selector": {
- image: "fluid:v0.0.1",
- targetDataset: &datav1alpha1.Dataset{
- ObjectMeta: metav1.ObjectMeta{
- Name: "test-dataset",
- Namespace: "fluid",
- },
- Spec: datav1alpha1.DatasetSpec{
- Mounts: []datav1alpha1.Mount{
- {
- Name: "spark",
- MountPoint: "local://mnt/data0",
- Path: "/mnt",
- },
- },
- },
- },
- dataload: &datav1alpha1.DataLoad{
- ObjectMeta: metav1.ObjectMeta{
- Name: "test-dataload",
- Namespace: "fluid",
- },
- Spec: datav1alpha1.DataLoadSpec{
- Dataset: datav1alpha1.TargetDataset{
- Name: "test-dataset",
- Namespace: "fluid",
- },
- Target: []datav1alpha1.TargetPath{
- {
- Path: "/test",
- Replicas: 1,
- },
- },
- SchedulerName: "scheduler-test",
- NodeSelector: map[string]string{
- "diskType": "ssd",
- },
- Options: map[string]string{
- "mock-options-k1": "mock-options-v1",
- },
- },
- },
- want: &cdataload.DataLoadValue{
- Name: "test-dataload",
- OwnerDatasetId: "fluid-test-dataset",
- Owner: &common.OwnerReference{
- APIVersion: "/",
- Enabled: true,
- Name: "test-dataload",
- BlockOwnerDeletion: false,
- Controller: true,
- },
- DataLoadInfo: cdataload.DataLoadInfo{
- BackoffLimit: 3,
- Image: "fluid:v0.0.1",
- TargetDataset: "test-dataset",
- SchedulerName: "scheduler-test",
- TargetPaths: []cdataload.TargetPath{
- {
- Path: "/test",
- Replicas: 1,
- },
- },
- ImagePullSecrets: []corev1.LocalObjectReference{},
- NodeSelector: map[string]string{
- "diskType": "ssd",
- },
- Options: map[string]string{
- "mock-options-k1": "mock-options-v1",
- },
- },
- },
- },
- "test case with tolerations": {
- image: "fluid:v0.0.1",
- targetDataset: &datav1alpha1.Dataset{
- ObjectMeta: metav1.ObjectMeta{
- Name: "test-dataset",
- Namespace: "fluid",
- },
- Spec: datav1alpha1.DatasetSpec{
- Mounts: []datav1alpha1.Mount{
- {
- Name: "spark",
- MountPoint: "local://mnt/data0",
- Path: "/mnt",
- },
- },
- },
- },
- dataload: &datav1alpha1.DataLoad{
- ObjectMeta: metav1.ObjectMeta{
- Name: "test-dataload",
- Namespace: "fluid",
- },
- Spec: datav1alpha1.DataLoadSpec{
- Dataset: datav1alpha1.TargetDataset{
- Name: "test-dataset",
- Namespace: "fluid",
- },
- Target: []datav1alpha1.TargetPath{
- {
- Path: "/test",
- Replicas: 1,
- },
- },
- SchedulerName: "scheduler-test",
- Tolerations: []corev1.Toleration{
- {
- Key: "example-key",
- Operator: corev1.TolerationOpExists,
- Effect: corev1.TaintEffectNoSchedule,
- },
- },
- Options: map[string]string{
- "mock-options-k1": "mock-options-v1",
- },
- },
- },
- want: &cdataload.DataLoadValue{
- Name: "test-dataload",
- OwnerDatasetId: "fluid-test-dataset",
- Owner: &common.OwnerReference{
- APIVersion: "/",
- Enabled: true,
- Name: "test-dataload",
- BlockOwnerDeletion: false,
- Controller: true,
- },
- DataLoadInfo: cdataload.DataLoadInfo{
- BackoffLimit: 3,
- Image: "fluid:v0.0.1",
- TargetDataset: "test-dataset",
- SchedulerName: "scheduler-test",
- TargetPaths: []cdataload.TargetPath{
- {
- Path: "/test",
- Replicas: 1,
- },
- },
- ImagePullSecrets: []corev1.LocalObjectReference{},
- Tolerations: []corev1.Toleration{
- {
- Key: "example-key",
- Operator: corev1.TolerationOpExists,
- Effect: corev1.TaintEffectNoSchedule,
- },
- },
- Options: map[string]string{
- "mock-options-k1": "mock-options-v1",
- },
- },
- },
- },
- }
- engine := GooseFSEngine{
- namespace: "fluid",
- name: "test",
- Log: fake.NullLogger(),
- }
- for k, item := range testCases {
- got, _ := engine.genDataLoadValue(item.image, item.targetDataset, item.dataload)
- if !reflect.DeepEqual(got, item.want) {
- t.Errorf("case %s, got %v,want:%v", k, got, item.want)
- }
- }
-}
-
-func TestCheckRuntimeReady(t *testing.T) {
- mockExecCommon := func(podName string, containerName string, namespace string, cmd []string) (stdout string, stderr string, e error) {
- return "", "", nil
- }
- mockExecErr := func(podName string, containerName string, namespace string, cmd []string) (stdout string, stderr string, e error) {
- return "err", "", errors.New("error")
- }
-
- engine := GooseFSEngine{
- namespace: "fluid",
- name: "hbase",
- Log: fake.NullLogger(),
- }
-
- patches := gomonkey.ApplyFunc(kubeclient.ExecCommandInContainer, mockExecCommon)
- defer patches.Reset()
-
- if ready := engine.CheckRuntimeReady(); ready != true {
- fmt.Println(ready)
- t.Errorf("fail to exec the function CheckRuntimeReady")
- }
-
- patches.ApplyFunc(kubeclient.ExecCommandInContainer, mockExecErr)
-
- if ready := engine.CheckRuntimeReady(); ready != false {
- fmt.Println(ready)
- t.Errorf("fail to exec the function CheckRuntimeReady")
- }
-}
diff --git a/pkg/ddc/goosefs/master.go b/pkg/ddc/goosefs/master.go
deleted file mode 100644
index 6d6939de247..00000000000
--- a/pkg/ddc/goosefs/master.go
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "context"
- "reflect"
-
- corev1 "k8s.io/api/core/v1"
- apierrs "k8s.io/apimachinery/pkg/api/errors"
- "k8s.io/apimachinery/pkg/types"
- "k8s.io/client-go/util/retry"
- "sigs.k8s.io/controller-runtime/pkg/client"
-
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/ddc/base"
- "github.com/fluid-cloudnative/fluid/pkg/utils"
- "github.com/fluid-cloudnative/fluid/pkg/utils/kubeclient"
-)
-
-// CheckMasterReady checks if the master is ready
-func (e *GooseFSEngine) CheckMasterReady() (ready bool, err error) {
- getRuntimeFn := func(client client.Client) (base.RuntimeInterface, error) {
- return utils.GetGooseFSRuntime(client, e.name, e.namespace)
- }
-
- ready, err = e.Helper.CheckAndSyncMasterStatus(getRuntimeFn, types.NamespacedName{Namespace: e.namespace, Name: e.getMasterName()})
- if err != nil {
- e.Log.Error(err, "fail to check and update master status")
- return
- }
-
- if !ready {
- e.Log.Info("master is not ready")
- }
-
- return
-}
-
-// ShouldSetupMaster checks if we need setup the master
-func (e *GooseFSEngine) ShouldSetupMaster() (should bool, err error) {
-
- runtime, err := e.getRuntime()
- if err != nil {
- return
- }
-
- switch runtime.Status.MasterPhase {
- case datav1alpha1.RuntimePhaseNone:
- should = true
- default:
- should = false
- }
-
- return
-}
-
-// SetupMaster setups the master and updates the status of the runtime.
-//
-// This function performs the following steps:
-// 1. Checks if the master StatefulSet exists:
-// - If not found, initializes the master via `setupMasterInternal()`.
-// - If found, logs the current ready replicas.
-//
-// 2. Updates the runtime status:
-// - Sets the master phase to `RuntimePhaseNotReady`.
-// - Records desired master replicas (defaulting to 1 if unspecified).
-// - Initializes worker selectors and sets the value file configmap.
-// - Adds a condition indicating the master is initialized.
-//
-// 3. Uses retry logic to handle concurrent updates to the runtime status.
-//
-// Parameters:
-// - e: *GooseFSEngine
-// The engine instance containing client, logger, namespace, and configuration for the GooseFS runtime.
-//
-// Returns:
-// - error
-// Returns an error if the master setup fails or the runtime status update encounters an issue.
-func (e *GooseFSEngine) SetupMaster() (err error) {
- masterName := e.getMasterName()
-
- // 1. Setup the master
- master, err := kubeclient.GetStatefulSet(e.Client, masterName, e.namespace)
- if err != nil && apierrs.IsNotFound(err) {
- //1. Is not found error
- e.Log.V(1).Info("SetupMaster", "master", masterName)
- return e.setupMasterInternal()
- } else if err != nil {
- //2. Other errors
- return
- } else {
- //3.The master has been set up
- e.Log.V(1).Info("The master has been set.", "replicas", master.Status.ReadyReplicas)
- }
-
- // 2. Update the status of the runtime
- err = retry.RetryOnConflict(retry.DefaultBackoff, func() error {
- runtime, err := e.getRuntime()
- if err != nil {
- return err
- }
- runtimeToUpdate := runtime.DeepCopy()
-
- runtimeToUpdate.Status.MasterPhase = datav1alpha1.RuntimePhaseNotReady
-
- // Init selector for worker
- runtimeToUpdate.Status.Selector = e.getWorkerSelectors()
-
- runtimeToUpdate.Status.ValueFileConfigmap = e.getHelmValuesConfigMapName()
-
- if len(runtimeToUpdate.Status.Conditions) == 0 {
- runtimeToUpdate.Status.Conditions = []datav1alpha1.RuntimeCondition{}
- }
- cond := utils.NewRuntimeCondition(datav1alpha1.RuntimeMasterInitialized, datav1alpha1.RuntimeMasterInitializedReason,
- "The master is initialized.", corev1.ConditionTrue)
- runtimeToUpdate.Status.Conditions =
- utils.UpdateRuntimeCondition(runtimeToUpdate.Status.Conditions,
- cond)
-
- if !reflect.DeepEqual(runtime.Status, runtimeToUpdate.Status) {
- return e.Client.Status().Update(context.TODO(), runtimeToUpdate)
- }
-
- return nil
- })
-
- if err != nil {
- e.Log.Error(err, "Update runtime status")
- return err
- }
-
- return
-}
diff --git a/pkg/ddc/goosefs/master_internal.go b/pkg/ddc/goosefs/master_internal.go
deleted file mode 100644
index af54bba6e0e..00000000000
--- a/pkg/ddc/goosefs/master_internal.go
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "fmt"
- "os"
-
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/common"
- "github.com/fluid-cloudnative/fluid/pkg/utils"
- "github.com/fluid-cloudnative/fluid/pkg/utils/helm"
- "github.com/fluid-cloudnative/fluid/pkg/utils/kubeclient"
- yaml "gopkg.in/yaml.v2"
-)
-
-// setup the cache master
-func (e *GooseFSEngine) setupMasterInternal() (err error) {
- var (
- chartName = utils.GetChartsDirectory() + "/" + common.GooseFSChart
- )
-
- runtime, err := e.getRuntime()
- if err != nil {
- return
- }
-
- valuefileName, err := e.generateGooseFSValueFile(runtime)
- if err != nil {
- return
- }
-
- found, err := helm.CheckRelease(e.name, e.namespace)
- if err != nil {
- return
- }
-
- if found {
- e.Log.Info("The release is already installed", "name", e.name, "namespace", e.namespace)
- return
- }
-
- return helm.InstallRelease(e.name, e.namespace, valuefileName, chartName)
-}
-
-// generate goosefs struct
-// generateGooseFSValueFile generates the GooseFS values file for the Helm chart.
-// It first deletes any existing ConfigMap for Helm values, then transforms the runtime
-// configuration into a values structure, marshals it to YAML, and writes it to a
-// temporary file. Finally, it saves the YAML data into a new ConfigMap.
-func (e *GooseFSEngine) generateGooseFSValueFile(runtime *datav1alpha1.GooseFSRuntime) (valueFileName string, err error) {
-
- //0. Check if the configmap exists
- err = kubeclient.DeleteConfigMap(e.Client, e.getHelmValuesConfigMapName(), e.namespace)
-
- if err != nil {
- e.Log.Error(err, "Failed to clean value files")
- return
- }
-
- // labelName := common.LabelAnnotationStorageCapacityPrefix + e.runtimeType + "-" + e.name
- // configmapName := e.name + "-" + e.runtimeType + "-values"
- //1. Transform the runtime to value
- value, err := e.transform(runtime)
- if err != nil {
- return
- }
-
- e.Log.Info("Generate values", "value", value)
-
- data, err := yaml.Marshal(value)
- if err != nil {
- return
- }
-
- //2. Get the template value file
- valueFile, err := os.CreateTemp(os.TempDir(), fmt.Sprintf("%s-%s-values.yaml", e.name, e.engineImpl))
- if err != nil {
- e.Log.Error(err, "failed to create value file", "valueFile", valueFile.Name())
- return valueFileName, err
- }
-
- valueFileName = valueFile.Name()
- e.Log.V(1).Info("Save the values file", "valueFile", valueFileName)
-
- err = os.WriteFile(valueFileName, data, 0400)
- if err != nil {
- return
- }
-
- //3. Save the configfile into configmap
- runtimeInfo := e.runtimeInfo
- ownerDatasetId := utils.GetDatasetId(runtimeInfo.GetNamespace(), runtimeInfo.GetName(), runtimeInfo.GetOwnerDatasetUID())
- err = kubeclient.CreateConfigMap(e.Client, e.getHelmValuesConfigMapName(), e.namespace, "data", data, ownerDatasetId)
- if err != nil {
- return
- }
-
- return valueFileName, err
-}
-
-func (e *GooseFSEngine) getHelmValuesConfigMapName() string {
- return e.name + "-" + e.engineImpl + "-values"
-}
diff --git a/pkg/ddc/goosefs/master_internal_test.go b/pkg/ddc/goosefs/master_internal_test.go
deleted file mode 100644
index fbfbc75b7d4..00000000000
--- a/pkg/ddc/goosefs/master_internal_test.go
+++ /dev/null
@@ -1,294 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "fmt"
- "testing"
-
- "github.com/agiledragon/gomonkey/v2"
- "github.com/pkg/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/util/net"
-
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/ddc/base"
- "github.com/fluid-cloudnative/fluid/pkg/ddc/base/portallocator"
- "github.com/fluid-cloudnative/fluid/pkg/utils/fake"
- "github.com/fluid-cloudnative/fluid/pkg/utils/helm"
-)
-
-func TestSetupMasterInternal(t *testing.T) {
-
- mockExecCheckReleaseCommonFound := func(name string, namespace string) (exist bool, err error) {
-
- return true, nil
-
- }
-
- mockExecCheckReleaseCommonNotFound := func(name string, namespace string) (exist bool, err error) {
-
- return false, nil
-
- }
-
- mockExecCheckReleaseErr := func(name string, namespace string) (exist bool, err error) {
-
- return false, errors.New("fail to check release")
-
- }
-
- mockExecInstallReleaseCommon := func(name string, namespace string, valueFile string, chartName string) error {
-
- return nil
-
- }
-
- mockExecInstallReleaseErr := func(name string, namespace string, valueFile string, chartName string) error {
-
- return errors.New("fail to install dataload chart")
-
- }
-
- allixioruntime := &datav1alpha1.GooseFSRuntime{
-
- ObjectMeta: metav1.ObjectMeta{
-
- Name: "hbase",
-
- Namespace: "fluid",
- },
- }
-
- testObjs := []runtime.Object{}
-
- testObjs = append(testObjs, (*allixioruntime).DeepCopy())
-
- datasetInputs := []datav1alpha1.Dataset{
-
- {
-
- ObjectMeta: metav1.ObjectMeta{
-
- Name: "hbase",
-
- Namespace: "fluid",
- },
- },
- }
-
- for _, datasetInput := range datasetInputs {
-
- testObjs = append(testObjs, datasetInput.DeepCopy())
-
- }
-
- client := fake.NewFakeClientWithScheme(testScheme, testObjs...)
-
- runtimeInfo, err := base.BuildRuntimeInfo("hbase", "fluid", "goosefs")
- if err != nil {
- t.Errorf("fail to create the runtimeInfo with error %v", err)
- }
-
- engine := GooseFSEngine{
-
- name: "hbase",
-
- namespace: "fluid",
-
- Client: client,
-
- Log: fake.NullLogger(),
-
- runtime: &datav1alpha1.GooseFSRuntime{
-
- Spec: datav1alpha1.GooseFSRuntimeSpec{
-
- APIGateway: datav1alpha1.GooseFSCompTemplateSpec{
-
- Enabled: false,
- },
-
- Master: datav1alpha1.GooseFSCompTemplateSpec{
-
- Replicas: 2,
- },
- },
- },
- runtimeInfo: runtimeInfo,
- }
-
- err = portallocator.SetupRuntimePortAllocator(client, &net.PortRange{Base: 10, Size: 100}, "bitmap", GetReservedPorts)
- if err != nil {
- t.Fatal(err.Error())
- }
-
- // check release found
- patches := gomonkey.ApplyFunc(helm.CheckRelease, mockExecCheckReleaseCommonFound)
- defer patches.Reset()
-
- err = engine.setupMasterInternal()
-
- if err != nil {
-
- t.Errorf("fail to exec check helm release")
-
- }
-
- // check release error
- patches.ApplyFunc(helm.CheckRelease, mockExecCheckReleaseErr)
-
- err = engine.setupMasterInternal()
-
- if err == nil {
-
- t.Errorf("fail to catch the error")
-
- }
-
- // check release not found
- patches.ApplyFunc(helm.CheckRelease, mockExecCheckReleaseCommonNotFound)
-
- // install release with error
- patches.ApplyFunc(helm.InstallRelease, mockExecInstallReleaseErr)
-
- err = engine.setupMasterInternal()
-
- if err == nil {
-
- t.Errorf("fail to catch the error")
-
- }
-
- // install release successfully
- patches.ApplyFunc(helm.InstallRelease, mockExecInstallReleaseCommon)
-
- err = engine.setupMasterInternal()
-
- fmt.Println(err)
-
- if err != nil {
-
- t.Errorf("fail to install release")
-
- }
-
-}
-
-func TestGenerateGooseFSValueFile(t *testing.T) {
- allixioruntime := &datav1alpha1.GooseFSRuntime{
-
- ObjectMeta: metav1.ObjectMeta{
-
- Name: "hbase",
-
- Namespace: "fluid",
- },
- }
-
- testObjs := []runtime.Object{}
-
- testObjs = append(testObjs, (*allixioruntime).DeepCopy())
-
- datasetInputs := []datav1alpha1.Dataset{
-
- {
-
- ObjectMeta: metav1.ObjectMeta{
-
- Name: "hbase",
-
- Namespace: "fluid",
- },
- },
- }
-
- for _, datasetInput := range datasetInputs {
-
- testObjs = append(testObjs, datasetInput.DeepCopy())
-
- }
-
- client := fake.NewFakeClientWithScheme(testScheme, testObjs...)
-
- runtimeInfo, err := base.BuildRuntimeInfo("hbase", "fluid", "goosefs")
- if err != nil {
- t.Errorf("fail to create the runtimeInfo with error %v", err)
- }
-
- engine := GooseFSEngine{
-
- name: "hbase",
-
- namespace: "fluid",
-
- Client: client,
-
- Log: fake.NullLogger(),
-
- runtime: &datav1alpha1.GooseFSRuntime{
-
- Spec: datav1alpha1.GooseFSRuntimeSpec{
-
- APIGateway: datav1alpha1.GooseFSCompTemplateSpec{
-
- Enabled: false,
- },
-
- Master: datav1alpha1.GooseFSCompTemplateSpec{
-
- Replicas: 2,
- },
- },
- },
- runtimeInfo: runtimeInfo,
- }
-
- err = portallocator.SetupRuntimePortAllocator(client, &net.PortRange{Base: 10, Size: 50}, "bitmap", GetReservedPorts)
- if err != nil {
- t.Fatal(err.Error())
- }
-
- _, err = engine.generateGooseFSValueFile(allixioruntime)
-
- if err != nil {
-
- t.Errorf("fail to exec the function")
-
- }
-
-}
-
-func TestGetConfigmapName(t *testing.T) {
-
- engine := GooseFSEngine{
-
- name: "hbase",
-
- engineImpl: "goosefs",
- }
-
- expectedResult := "hbase-goosefs-values"
-
- if engine.getHelmValuesConfigMapName() != expectedResult {
-
- t.Errorf("fail to get the configmap name")
-
- }
-
-}
diff --git a/pkg/ddc/goosefs/master_test.go b/pkg/ddc/goosefs/master_test.go
deleted file mode 100644
index 7ef56016d0e..00000000000
--- a/pkg/ddc/goosefs/master_test.go
+++ /dev/null
@@ -1,312 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "testing"
-
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/ctrl"
- "github.com/fluid-cloudnative/fluid/pkg/ddc/base"
- "github.com/fluid-cloudnative/fluid/pkg/utils/fake"
- v1 "k8s.io/api/apps/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/utils/ptr"
-)
-
-func TestCheckMasterReady(t *testing.T) {
- statefulsetInputs := []v1.StatefulSet{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "spark-master",
- Namespace: "fluid",
- },
- Spec: v1.StatefulSetSpec{
- Replicas: ptr.To[int32](1),
- },
- Status: v1.StatefulSetStatus{
- ReadyReplicas: 1,
- },
- },
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase-master",
- Namespace: "fluid",
- },
- Spec: v1.StatefulSetSpec{
- Replicas: ptr.To[int32](1),
- },
- Status: v1.StatefulSetStatus{
- ReadyReplicas: 0,
- },
- },
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hadoop-master",
- Namespace: "fluid",
- },
- Spec: v1.StatefulSetSpec{
- Replicas: ptr.To[int32](1),
- },
- Status: v1.StatefulSetStatus{
- ReadyReplicas: 1,
- },
- },
- }
- testObjs := []runtime.Object{}
- for _, statefulset := range statefulsetInputs {
- testObjs = append(testObjs, statefulset.DeepCopy())
- }
-
- goosefsRuntimeInputs := []datav1alpha1.GooseFSRuntime{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "spark",
- Namespace: "fluid",
- },
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- Master: datav1alpha1.GooseFSCompTemplateSpec{
- Replicas: 1,
- },
- },
- },
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase",
- Namespace: "fluid",
- },
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- Master: datav1alpha1.GooseFSCompTemplateSpec{
- Replicas: 1,
- },
- },
- },
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hadoop",
- Namespace: "fluid",
- },
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- Master: datav1alpha1.GooseFSCompTemplateSpec{
- Replicas: 1,
- },
- APIGateway: datav1alpha1.GooseFSCompTemplateSpec{
- Enabled: true,
- },
- },
- Status: datav1alpha1.RuntimeStatus{
- APIGatewayStatus: &datav1alpha1.APIGatewayStatus{
- Endpoint: "test-endpoint",
- },
- },
- },
- }
- for _, goosefsRuntime := range goosefsRuntimeInputs {
- testObjs = append(testObjs, goosefsRuntime.DeepCopy())
- }
- client := fake.NewFakeClientWithScheme(testScheme, testObjs...)
-
- engines := []GooseFSEngine{
- {
- name: "spark",
- namespace: "fluid",
- Client: client,
- Log: fake.NullLogger(),
- Helper: ctrl.BuildHelper(&base.RuntimeInfo{}, client, fake.NullLogger()),
- },
- {
- name: "hbase",
- namespace: "fluid",
- Client: client,
- Log: fake.NullLogger(),
- Helper: ctrl.BuildHelper(&base.RuntimeInfo{}, client, fake.NullLogger()),
- },
- {
- name: "hadoop",
- namespace: "fluid",
- Client: client,
- Log: fake.NullLogger(),
- Helper: ctrl.BuildHelper(&base.RuntimeInfo{}, client, fake.NullLogger()),
- },
- }
-
- var testCases = []struct {
- engine GooseFSEngine
- expectedResult bool
- }{
- {
- engine: engines[0],
- expectedResult: true,
- },
- {
- engine: engines[1],
- expectedResult: false,
- },
- }
-
- for _, test := range testCases {
- if ready, _ := test.engine.CheckMasterReady(); ready != test.expectedResult {
- t.Errorf("fail to exec the function")
- return
- }
- if !test.expectedResult {
- continue
- }
- goosefsRuntime, err := test.engine.getRuntime()
- if err != nil {
- t.Errorf("fail to get runtime %v", err)
- return
- }
- if len(goosefsRuntime.Status.Conditions) == 0 {
- t.Errorf("fail to update the runtime conditions")
- return
- }
- }
-}
-
-func TestShouldSetupMaster(t *testing.T) {
- goosefsRuntimeInputs := []datav1alpha1.GooseFSRuntime{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "spark",
- Namespace: "fluid",
- },
- Status: datav1alpha1.RuntimeStatus{
- MasterPhase: datav1alpha1.RuntimePhaseNotReady,
- },
- },
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase",
- Namespace: "fluid",
- },
- Status: datav1alpha1.RuntimeStatus{
- MasterPhase: datav1alpha1.RuntimePhaseNone,
- },
- },
- }
- testObjs := []runtime.Object{}
- for _, goosefsRuntime := range goosefsRuntimeInputs {
- testObjs = append(testObjs, goosefsRuntime.DeepCopy())
- }
- client := fake.NewFakeClientWithScheme(testScheme, testObjs...)
-
- engines := []GooseFSEngine{
- {
- name: "spark",
- namespace: "fluid",
- Client: client,
- },
- {
- name: "hbase",
- namespace: "fluid",
- Client: client,
- },
- }
-
- var testCases = []struct {
- engine GooseFSEngine
- expectedResult bool
- }{
- {
- engine: engines[0],
- expectedResult: false,
- },
- {
- engine: engines[1],
- expectedResult: true,
- },
- }
-
- for _, test := range testCases {
- if should, _ := test.engine.ShouldSetupMaster(); should != test.expectedResult {
- t.Errorf("fail to exec the function")
- return
- }
- }
-}
-
-func TestSetupMaster(t *testing.T) {
- statefulSetInputs := []v1.StatefulSet{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "spark-master",
- Namespace: "fluid",
- },
- Status: v1.StatefulSetStatus{
- ReadyReplicas: 1,
- },
- },
- }
-
- testObjs := []runtime.Object{}
- for _, statefulSet := range statefulSetInputs {
- testObjs = append(testObjs, statefulSet.DeepCopy())
- }
-
- goosefsRuntimeInputs := []datav1alpha1.GooseFSRuntime{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "spark",
- Namespace: "fluid",
- },
- },
- }
- for _, goosefsRuntime := range goosefsRuntimeInputs {
- testObjs = append(testObjs, goosefsRuntime.DeepCopy())
- }
- client := fake.NewFakeClientWithScheme(testScheme, testObjs...)
-
- engines := []GooseFSEngine{
- {
- name: "spark",
- namespace: "fluid",
- Client: client,
- Log: fake.NullLogger(),
- },
- }
-
- var testCases = []struct {
- engine GooseFSEngine
- expectedSelector string
- expectedConfigMapName string
- }{
- {
- engine: engines[0],
- expectedConfigMapName: "spark--values",
- expectedSelector: "app=goosefs,release=spark,role=goosefs-worker",
- },
- }
-
- for _, test := range testCases {
- if err := test.engine.SetupMaster(); err != nil {
- t.Errorf("fail to exec the func with error %v", err)
- return
- }
- goosefsRuntime, err := test.engine.getRuntime()
- if err != nil {
- t.Errorf("fail to get the runtime")
- return
- }
- if goosefsRuntime.Status.Selector != test.expectedSelector || goosefsRuntime.Status.ValueFileConfigmap != test.expectedConfigMapName ||
- len(goosefsRuntime.Status.Conditions) == 0 {
- t.Errorf("fail to update the runtime")
- return
- }
- }
-}
diff --git a/pkg/ddc/goosefs/metadata.go b/pkg/ddc/goosefs/metadata.go
deleted file mode 100644
index e7924fb7cc0..00000000000
--- a/pkg/ddc/goosefs/metadata.go
+++ /dev/null
@@ -1,304 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "context"
- "errors"
- "fmt"
- "reflect"
- "strconv"
- "time"
-
- "github.com/fluid-cloudnative/fluid/pkg/common"
- "github.com/fluid-cloudnative/fluid/pkg/ddc/base"
- "github.com/fluid-cloudnative/fluid/pkg/ddc/goosefs/operations"
- "github.com/fluid-cloudnative/fluid/pkg/utils"
- "k8s.io/client-go/util/retry"
-)
-
-// SyncMetadata syncs metadata if necessary
-// For GooseFS Engine, metadata sync is an asynchronous operation, which means
-// you should call this function periodically to make sure the function actually takes effect.
-func (e *GooseFSEngine) SyncMetadata() (err error) {
- should, err := e.shouldSyncMetadata()
- if err != nil {
- e.Log.Error(err, "Failed to check if should sync metadata")
- return
- }
- // should sync metadata
- if should {
- should, err = e.shouldRestoreMetadata()
- if err != nil {
- e.Log.Error(err, "Failed to check if should restore metadata, will not restore!")
- should = false
- }
- // should restore metadata from backup
- if should {
- err = e.RestoreMetadataInternal()
- if err == nil {
- return
- }
- }
- // load metadata again
- return e.syncMetadataInternal()
- }
- return
-}
-
-// shouldSyncMetadata checks dataset's UfsTotal to decide whether should sync metadata
-func (e *GooseFSEngine) shouldSyncMetadata() (should bool, err error) {
- dataset, err := utils.GetDataset(e.Client, e.name, e.namespace)
- if err != nil {
- should = false
- return should, err
- }
-
- //todo(xuzhihao): option to enable/disable automatic metadata sync
- //todo: periodical metadata sync
- if dataset.Status.UfsTotal != "" && dataset.Status.UfsTotal != MetadataSyncNotDoneMsg {
- e.Log.V(1).Info("dataset ufs is ready",
- "dataset name", dataset.Name,
- "dataset namespace", dataset.Namespace,
- "ufstotal", dataset.Status.UfsTotal)
- should = false
- return should, nil
- }
- should = true
- return should, nil
-}
-
-// shouldRestoreMetadata checks whether should restore metadata from backup
-func (e *GooseFSEngine) shouldRestoreMetadata() (should bool, err error) {
- dataset, err := utils.GetDataset(e.Client, e.name, e.namespace)
- if err != nil {
- return
- }
- if dataset.Spec.DataRestoreLocation != nil {
- e.Log.V(1).Info("restore metadata of dataset from backup",
- "dataset name", dataset.Name,
- "dataset namespace", dataset.Namespace,
- "DataRestoreLocation", dataset.Spec.DataRestoreLocation)
- should = true
- return
- }
- return
-}
-
-// RestoreMetadataInternal restore metadata from backup
-// there are three kinds of data to be restored
-// 1. metadata of dataset
-// 2. ufsTotal info of dataset
-// 3. fileNum info of dataset
-// if 1 fails, the goosefs master will fail directly, if 2 or 3 fails, fluid will get the info from goosefs again
-func (e *GooseFSEngine) RestoreMetadataInternal() (err error) {
- dataset, err := utils.GetDataset(e.Client, e.name, e.namespace)
- if err != nil {
- return
- }
- metadataInfoRestoreFile := ""
- pvcName, path, err := utils.ParseBackupRestorePath(dataset.Spec.DataRestoreLocation.Path)
- if err != nil {
- e.Log.Error(err, "restore path cannot analyse", "Path", dataset.Spec.DataRestoreLocation.Path)
- return
- } else {
- if pvcName != "" {
- metadataInfoRestoreFile = "/pvc" + path + e.GetMetadataInfoFileName()
- } else {
- metadataInfoRestoreFile = "/host/" + e.GetMetadataInfoFileName()
- }
- }
-
- podName, containerName := e.getMasterPodInfo()
- fileUtils := operations.NewGooseFSFileUtils(podName, containerName, e.namespace, e.Log)
-
- ufsTotal, err := fileUtils.QueryMetaDataInfoIntoFile(operations.UfsTotal, metadataInfoRestoreFile)
- if err != nil {
- e.Log.Error(err, "Failed to get UfsTotal from restore file", "name", e.name, "namespace", e.namespace)
- return
- }
- ufsTotalFloat, err := strconv.ParseFloat(ufsTotal, 64)
- if err != nil {
- e.Log.Error(err, "Failed to change UfsTotal to float", "name", e.name, "namespace", e.namespace)
- return
- }
- ufsTotal = utils.BytesSize(ufsTotalFloat)
-
- fileNum, err := fileUtils.QueryMetaDataInfoIntoFile(operations.FileNum, metadataInfoRestoreFile)
- if err != nil {
- e.Log.Error(err, "Failed to get fileNum from restore file", "name", e.name, "namespace", e.namespace)
- return
- }
-
- err = retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
- datasetToUpdate := dataset.DeepCopy()
- datasetToUpdate.Status.UfsTotal = ufsTotal
- datasetToUpdate.Status.FileNum = fileNum
- if !reflect.DeepEqual(datasetToUpdate, dataset) {
- err = e.Client.Status().Update(context.TODO(), datasetToUpdate)
- if err != nil {
- return
- }
- }
- return
- })
- if err != nil {
- e.Log.Error(err, "Failed to update UfsTotal and FileNum of the dataset")
- return
- }
- return
-}
-
-// syncMetadataInternal do the actual work of metadata sync
-// At any time, there is at most one goroutine working on metadata sync. First call to
-// this function will start a goroutine including the following two steps:
-// 1. load metadata
-// 2. get total size of UFSs
-//
-// Any following calls to this function will try to get result of the working goroutine with a timeout, which
-// ensures the function won't block the following Sync operations(e.g. CheckAndUpdateRuntimeStatus) for a long time.
-func (e *GooseFSEngine) syncMetadataInternal() (err error) {
- if e.MetadataSyncDoneCh != nil {
- // Either get result from channel or timeout
- select {
- case result := <-e.MetadataSyncDoneCh:
- defer func() {
- e.MetadataSyncDoneCh = nil
- }()
- e.Log.Info("Get result from MetadataSyncDoneCh", "result", result)
- if result.Done {
- e.Log.Info("Metadata sync succeeded", "period", time.Since(result.StartTime))
- err = retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
- dataset, err := utils.GetDataset(e.Client, e.name, e.namespace)
- if err != nil {
- return
- }
- datasetToUpdate := dataset.DeepCopy()
- datasetToUpdate.Status.UfsTotal = result.UfsTotal
- datasetToUpdate.Status.FileNum = result.FileNum
- if !reflect.DeepEqual(datasetToUpdate, dataset) {
- err = e.Client.Status().Update(context.TODO(), datasetToUpdate)
- if err != nil {
- return
- }
- }
- return
- })
- if err != nil {
- e.Log.Error(err, "Failed to update UfsTotal and FileNum of the dataset")
- return err
- }
- } else {
- e.Log.Error(result.Err, "Metadata sync failed")
- return result.Err
- }
- case <-time.After(CheckMetadataSyncDoneTimeoutMillisec * time.Millisecond):
- e.Log.V(1).Info("Metadata sync still in progress")
- }
- } else {
- // Metadata sync haven't started
- err = retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
- dataset, err := utils.GetDataset(e.Client, e.name, e.namespace)
- if err != nil {
- return
- }
- datasetToUpdate := dataset.DeepCopy()
- datasetToUpdate.Status.UfsTotal = MetadataSyncNotDoneMsg
- datasetToUpdate.Status.FileNum = MetadataSyncNotDoneMsg
- if !reflect.DeepEqual(dataset, datasetToUpdate) {
- err = e.Client.Status().Update(context.TODO(), datasetToUpdate)
- if err != nil {
- return
- }
- }
- return
- })
- if err != nil {
- e.Log.Error(err, "Failed to set UfsTotal to METADATA_SYNC_NOT_DONE_MSG")
- }
- e.MetadataSyncDoneCh = make(chan base.MetadataSyncResult)
- go func(resultChan chan base.MetadataSyncResult) {
- defer close(resultChan)
- result := base.MetadataSyncResult{
- StartTime: time.Now(),
- UfsTotal: "",
- }
- dataset, err := utils.GetDataset(e.Client, e.name, e.namespace)
- if err != nil {
- e.Log.Error(err, "Can't get dataset when syncing metadata", "name", e.name, "namespace", e.namespace)
- result.Err = err
- result.Done = false
- resultChan <- result
- return
- }
-
- e.Log.Info("Metadata Sync starts", "dataset namespace", e.namespace, "dataset name", e.name)
-
- podName, containerName := e.getMasterPodInfo()
- fileUtils := operations.NewGooseFSFileUtils(podName, containerName, e.namespace, e.Log)
-
- // sync local dir if necessary
- for _, mount := range dataset.Spec.Mounts {
- if common.IsFluidNativeScheme(mount.MountPoint) {
- localDirPath := utils.UFSPathBuilder{}.GenLocalStoragePath(mount)
- e.Log.Info(fmt.Sprintf("Syncing local dir, path: %s", localDirPath))
- err = fileUtils.SyncLocalDir(localDirPath)
- if err != nil {
- e.Log.Error(err, fmt.Sprintf("Sync local dir failed when syncing metadata, path: %s", localDirPath), "name", e.name, "namespace", e.namespace)
- result.Err = err
- result.Done = false
- resultChan <- result
- return
- }
- }
- }
- // load metadata
- err = fileUtils.LoadMetadataWithoutTimeout("/")
- if err != nil {
- e.Log.Error(err, "LoadMetadata failed when syncing metadata", "name", e.name, "namespace", e.namespace)
- result.Err = err
- result.Done = false
- resultChan <- result
- return
- }
- result.Done = true
-
- datasetUFSTotalBytes, err := e.TotalStorageBytes()
- if err != nil {
- e.Log.Error(err, "Get Ufs Total size failed when syncing metadata", "name", e.name, "namespace", e.namespace)
- result.Done = false
- } else {
- result.UfsTotal = utils.BytesSize(float64(datasetUFSTotalBytes))
- }
- fileNum, err := e.getDataSetFileNum()
- if err != nil {
- e.Log.Error(err, "Get File Num failed when syncing metadata", "name", e.name, "namespace", e.namespace)
- result.Done = false
- } else {
- result.FileNum = fileNum
- }
-
- if !result.Done {
- result.Err = errors.New("GetMetadataInfoFailed")
- } else {
- result.Err = nil
- }
- resultChan <- result
- }(e.MetadataSyncDoneCh)
- }
- return
-}
diff --git a/pkg/ddc/goosefs/metadata_test.go b/pkg/ddc/goosefs/metadata_test.go
deleted file mode 100644
index 0d9c839dd86..00000000000
--- a/pkg/ddc/goosefs/metadata_test.go
+++ /dev/null
@@ -1,331 +0,0 @@
-/*
-Copyright 2021 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "errors"
- "testing"
-
- "github.com/agiledragon/gomonkey/v2"
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/ddc/goosefs/operations"
- "github.com/fluid-cloudnative/fluid/pkg/utils/fake"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
-)
-
-func TestSyncMetadata(t *testing.T) {
- QueryMetaDataInfoIntoFileCommon := func(a operations.GooseFSFileUtils, key operations.KeyOfMetaDataFile, filename string) (value string, err error) {
- return "1024", nil
- }
-
- datasetInputs := []datav1alpha1.Dataset{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase",
- Namespace: "fluid",
- },
- Status: datav1alpha1.DatasetStatus{
- UfsTotal: "2Gi",
- },
- },
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "spark",
- Namespace: "fluid",
- },
- Status: datav1alpha1.DatasetStatus{
- UfsTotal: "",
- },
- },
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hadoop",
- Namespace: "fluid",
- },
- Spec: datav1alpha1.DatasetSpec{
- DataRestoreLocation: &datav1alpha1.DataRestoreLocation{
- Path: "local:///host1/erf",
- NodeName: "test-node",
- },
- },
- Status: datav1alpha1.DatasetStatus{
- UfsTotal: "",
- },
- },
- }
-
- testObjs := []runtime.Object{}
- for _, datasetInput := range datasetInputs {
- testObjs = append(testObjs, datasetInput.DeepCopy())
- }
- client := fake.NewFakeClientWithScheme(testScheme, testObjs...)
-
- engines := []GooseFSEngine{
- {
- name: "hbase",
- namespace: "fluid",
- Client: client,
- Log: fake.NullLogger(),
- },
- {
- name: "spark",
- namespace: "fluid",
- Client: client,
- Log: fake.NullLogger(),
- },
- }
-
- for _, engine := range engines {
- err := engine.SyncMetadata()
- if err != nil {
- t.Errorf("fail to exec the function")
- }
- }
-
- engine := GooseFSEngine{
- name: "hadoop",
- namespace: "fluid",
- Client: client,
- Log: fake.NullLogger(),
- }
-
- patches := gomonkey.ApplyPrivateMethod(operations.GooseFSFileUtils{}, "QueryMetaDataInfoIntoFile", QueryMetaDataInfoIntoFileCommon)
- defer patches.Reset()
-
- err := engine.SyncMetadata()
- if err != nil {
- t.Errorf("fail to exec function RestoreMetadataInternal")
- }
-}
-
-func TestShouldSyncMetadata(t *testing.T) {
- datasetInputs := []datav1alpha1.Dataset{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase",
- Namespace: "fluid",
- },
- Status: datav1alpha1.DatasetStatus{
- UfsTotal: "2Gi",
- },
- },
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "spark",
- Namespace: "fluid",
- },
- Status: datav1alpha1.DatasetStatus{
- UfsTotal: "",
- },
- },
- }
- testObjs := []runtime.Object{}
- for _, datasetInput := range datasetInputs {
- testObjs = append(testObjs, datasetInput.DeepCopy())
- }
- client := fake.NewFakeClientWithScheme(testScheme, testObjs...)
-
- engines := []GooseFSEngine{
- {
- name: "hbase",
- namespace: "fluid",
- Client: client,
- Log: fake.NullLogger(),
- },
- {
- name: "spark",
- namespace: "fluid",
- Client: client,
- Log: fake.NullLogger(),
- },
- }
-
- var testCases = []struct {
- engine GooseFSEngine
- expectedShould bool
- }{
- {
- engine: engines[0],
- expectedShould: false,
- },
- {
- engine: engines[1],
- expectedShould: true,
- },
- }
-
- for _, test := range testCases {
- should, err := test.engine.shouldSyncMetadata()
- if err != nil || should != test.expectedShould {
- t.Errorf("fail to exec the function")
- }
- }
-}
-
-func TestShouldRestoreMetadata(t *testing.T) {
- datasetInputs := []datav1alpha1.Dataset{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase",
- Namespace: "fluid",
- },
- Spec: datav1alpha1.DatasetSpec{
- DataRestoreLocation: &datav1alpha1.DataRestoreLocation{
- Path: "local:///host1/erf",
- NodeName: "test-node",
- },
- },
- },
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "spark",
- Namespace: "fluid",
- },
- },
- }
- testObjs := []runtime.Object{}
- for _, datasetInput := range datasetInputs {
- testObjs = append(testObjs, datasetInput.DeepCopy())
- }
-
- client := fake.NewFakeClientWithScheme(testScheme, testObjs...)
-
- engines := []GooseFSEngine{
- {
- name: "hbase",
- namespace: "fluid",
- Client: client,
- Log: fake.NullLogger(),
- },
- {
- name: "spark",
- namespace: "fluid",
- Client: client,
- Log: fake.NullLogger(),
- },
- }
-
- var testCases = []struct {
- engine GooseFSEngine
- expectedShould bool
- }{
- {
- engine: engines[0],
- expectedShould: true,
- },
- {
- engine: engines[1],
- expectedShould: false,
- },
- }
- for _, test := range testCases {
- should, err := test.engine.shouldRestoreMetadata()
- if err != nil || should != test.expectedShould {
- t.Errorf("fail to exec the function")
- }
- }
-}
-
-func TestRestoreMetadataInternal(t *testing.T) {
- QueryMetaDataInfoIntoFileCommon := func(a operations.GooseFSFileUtils, key operations.KeyOfMetaDataFile, filename string) (value string, err error) {
- return "1024", nil
- }
- QueryMetaDataInfoIntoFileErr := func(a operations.GooseFSFileUtils, key operations.KeyOfMetaDataFile, filename string) (value string, err error) {
- return "", errors.New("fail to query MetaDataInfo")
- }
-
- datasetInputs := []datav1alpha1.Dataset{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase",
- Namespace: "fluid",
- },
- Spec: datav1alpha1.DatasetSpec{
- DataRestoreLocation: &datav1alpha1.DataRestoreLocation{
- Path: "local:///host1/erf",
- NodeName: "test-node",
- },
- },
- },
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "spark",
- Namespace: "fluid",
- },
- Spec: datav1alpha1.DatasetSpec{
- DataRestoreLocation: &datav1alpha1.DataRestoreLocation{
- Path: "pvc://pvc1/erf",
- NodeName: "test-node",
- },
- },
- },
- }
- testObjs := []runtime.Object{}
- for _, datasetInput := range datasetInputs {
- testObjs = append(testObjs, datasetInput.DeepCopy())
- }
-
- client := fake.NewFakeClientWithScheme(testScheme, testObjs...)
-
- engines := []GooseFSEngine{
- {
- name: "hbase",
- namespace: "fluid",
- Client: client,
- Log: fake.NullLogger(),
- },
- {
- name: "hbase",
- namespace: "fluid",
- Client: client,
- Log: fake.NullLogger(),
- },
- }
-
- patches := gomonkey.ApplyPrivateMethod(operations.GooseFSFileUtils{}, "QueryMetaDataInfoIntoFile", QueryMetaDataInfoIntoFileErr)
- defer patches.Reset()
-
- err := engines[0].RestoreMetadataInternal()
- if err == nil {
- t.Errorf("fail to catch the error")
- }
-
- patches.ApplyPrivateMethod(operations.GooseFSFileUtils{}, "QueryMetaDataInfoIntoFile", QueryMetaDataInfoIntoFileCommon)
-
- var testCases = []struct {
- engine GooseFSEngine
- expectedDatasetUfsTotal string
- expectedDatasetFileNum string
- }{
- {
- engine: engines[0],
- expectedDatasetUfsTotal: "1.00KiB",
- expectedDatasetFileNum: "1024",
- },
- {
- engine: engines[1],
- expectedDatasetUfsTotal: "1.00KiB",
- expectedDatasetFileNum: "1024",
- },
- }
-
- for _, test := range testCases {
- err = test.engine.RestoreMetadataInternal()
- if err != nil {
- t.Errorf("fail to exec function RestoreMetadataInternal")
- }
- }
-}
diff --git a/pkg/ddc/goosefs/node.go b/pkg/ddc/goosefs/node.go
deleted file mode 100644
index 46624a3e642..00000000000
--- a/pkg/ddc/goosefs/node.go
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "github.com/fluid-cloudnative/fluid/pkg/utils/dataset/lifecycle"
-)
-
-// SyncScheduleInfoToCacheNodes syncs schedule info to nodes
-func (e *GooseFSEngine) SyncScheduleInfoToCacheNodes() (err error) {
- return lifecycle.SyncScheduleInfoToCacheNodes(e.runtimeInfo, e.Client)
-}
diff --git a/pkg/ddc/goosefs/node_test.go b/pkg/ddc/goosefs/node_test.go
deleted file mode 100644
index 65c445025a8..00000000000
--- a/pkg/ddc/goosefs/node_test.go
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "context"
- "fmt"
-
- . "github.com/onsi/ginkgo/v2"
- . "github.com/onsi/gomega"
-
- "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/common"
- "github.com/fluid-cloudnative/fluid/pkg/ddc/base"
- "github.com/fluid-cloudnative/fluid/pkg/utils/fake"
- appsv1 "k8s.io/api/apps/v1"
- v1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/labels"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/types"
- "k8s.io/utils/ptr"
- "sigs.k8s.io/controller-runtime/pkg/client"
-)
-
-const (
- testNodeNamespace = "big-data"
- testNodeLabelApp = "goosefs"
- testNodeLabelRole = "goosefs-worker"
- testNodeAPIVersion = "apps/v1"
- testNodeKindSts = "StatefulSet"
- testNodeLabelDataset = "fluid.io/dataset"
- testNodeLabelSelector = "%s=true"
-)
-
-func getTestGooseFSEngineNode(c client.Client, name string, namespace string, withRunTime bool) *GooseFSEngine {
- engine := &GooseFSEngine{
- runtime: nil,
- name: name,
- namespace: namespace,
- Client: c,
- runtimeInfo: nil,
- Log: fake.NullLogger(),
- }
- if withRunTime {
- engine.runtime = &v1alpha1.GooseFSRuntime{}
- engine.runtimeInfo, _ = base.BuildRuntimeInfo(name, namespace, common.GooseFSRuntime)
- }
- return engine
-}
-
-var _ = Describe("GooseFSEngine", func() {
- Describe("SyncScheduleInfoToCacheNodes", func() {
- type fields struct {
- worker *appsv1.StatefulSet
- pods []*v1.Pod
- nodes []*v1.Node
- name string
- namespace string
- }
-
- testcaseCnt := 0
- makeDatasetResources := func(dsName string, dsNamespace string, stsPodNodeNames []string) fields {
- testcaseCnt++
- ret := fields{
- name: dsName,
- namespace: dsNamespace,
- worker: &appsv1.StatefulSet{
- TypeMeta: metav1.TypeMeta{
- Kind: testNodeKindSts,
- APIVersion: testNodeAPIVersion,
- },
- ObjectMeta: metav1.ObjectMeta{
- Name: dsName + "-worker",
- Namespace: dsNamespace,
- UID: types.UID(fmt.Sprintf("uid%d", testcaseCnt)),
- },
- Spec: appsv1.StatefulSetSpec{
- Selector: &metav1.LabelSelector{
- MatchLabels: map[string]string{
- "app": testNodeLabelApp,
- "role": testNodeLabelRole,
- "release": dsName,
- },
- },
- },
- },
- pods: []*v1.Pod{},
- }
-
- for idx, nodeName := range stsPodNodeNames {
- ret.pods = append(ret.pods, &v1.Pod{
- ObjectMeta: metav1.ObjectMeta{
- Name: fmt.Sprintf("%s-worker-%d", dsName, idx),
- Namespace: dsNamespace,
- OwnerReferences: []metav1.OwnerReference{{
- Kind: testNodeKindSts,
- APIVersion: testNodeAPIVersion,
- Name: dsName + "-worker",
- UID: types.UID(fmt.Sprintf("uid%d", testcaseCnt)),
- Controller: ptr.To(true),
- }},
- Labels: map[string]string{
- "app": testNodeLabelApp,
- "role": testNodeLabelRole,
- "release": dsName,
- testNodeLabelDataset: fmt.Sprintf("%s-%s", dsNamespace, dsName),
- },
- },
- Spec: v1.PodSpec{
- NodeName: nodeName,
- },
- })
- }
-
- return ret
- }
-
- fields1 := makeDatasetResources("spark", testNodeNamespace, []string{"node1"})
- fields1.nodes = append(fields1.nodes, &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1"}})
-
- fields2 := makeDatasetResources("hbase", testNodeNamespace, []string{"node2", "node3"})
- fields2.nodes = append(fields2.nodes,
- &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node3"}},
- &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: map[string]string{"fluid.io/s-big-data-hbase": "true"}}},
- )
-
- fields3 := makeDatasetResources("hbase-a", testNodeNamespace, []string{"node4", "node5"})
- fields3.pods[1].OwnerReferences = []metav1.OwnerReference{}
- fields3.nodes = append(fields3.nodes,
- &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node5"}},
- &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node4", Labels: map[string]string{"fluid.io/s-big-data-hbase-a": "true"}}},
- )
-
- fields4 := makeDatasetResources("hbase-b", testNodeNamespace, []string{})
- fields4.nodes = append(fields4.nodes,
- &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node6", Labels: map[string]string{
- "fluid.io/s-big-data-hbase-b": "true",
- "fluid.io/s-goosefs-big-data-hbase-b": "true",
- }}},
- )
-
- DescribeTable("should sync schedule info to cache nodes correctly",
- func(f fields, expectedNodeNames []string) {
- runtimeObjs := []runtime.Object{}
- runtimeObjs = append(runtimeObjs, f.worker)
-
- for _, pod := range f.pods {
- runtimeObjs = append(runtimeObjs, pod)
- }
- for _, node := range f.nodes {
- runtimeObjs = append(runtimeObjs, node)
- }
-
- c := fake.NewFakeClientWithScheme(testScheme, runtimeObjs...)
- engine := getTestGooseFSEngineNode(c, f.name, f.namespace, true)
-
- err := engine.SyncScheduleInfoToCacheNodes()
- Expect(err).NotTo(HaveOccurred())
-
- nodeList := &v1.NodeList{}
- datasetLabels, err := labels.Parse(fmt.Sprintf(testNodeLabelSelector, engine.runtimeInfo.GetCommonLabelName()))
- Expect(err).NotTo(HaveOccurred())
-
- err = c.List(context.TODO(), nodeList, &client.ListOptions{
- LabelSelector: datasetLabels,
- })
- Expect(err).NotTo(HaveOccurred())
-
- nodeNames := []string{}
- for _, node := range nodeList.Items {
- nodeNames = append(nodeNames, node.Name)
- }
-
- if len(expectedNodeNames) == 0 && len(nodeNames) == 0 {
- return
- }
-
- Expect(nodeNames).To(Equal(expectedNodeNames),
- fmt.Sprintf("wanted %v, got %v", expectedNodeNames, nodeNames))
- },
- Entry("create", fields1, []string{"node1"}),
- Entry("add", fields2, []string{"node2", "node3"}),
- Entry("noController", fields3, []string{"node4"}),
- Entry("remove", fields4, []string{}),
- )
- })
-})
diff --git a/pkg/ddc/goosefs/operate.go b/pkg/ddc/goosefs/operate.go
deleted file mode 100644
index 11ad890fe4f..00000000000
--- a/pkg/ddc/goosefs/operate.go
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "github.com/fluid-cloudnative/fluid/pkg/dataoperation"
- "github.com/fluid-cloudnative/fluid/pkg/errors"
- cruntime "github.com/fluid-cloudnative/fluid/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
-)
-
-func (e *GooseFSEngine) GetDataOperationValueFile(ctx cruntime.ReconcileRequestContext, operation dataoperation.OperationInterface) (valueFileName string, err error) {
- operateType := operation.GetOperationType()
- object := operation.GetOperationObject()
-
- if operateType == dataoperation.DataBackupType {
- valueFileName, err = e.generateDataBackupValueFile(ctx, object)
- return valueFileName, err
- }
-
- if operateType == dataoperation.DataLoadType {
- valueFileName, err = e.generateDataLoadValueFile(ctx, object)
- return valueFileName, err
- }
-
- return "", errors.NewNotSupported(
- schema.GroupResource{
- Group: object.GetObjectKind().GroupVersionKind().Group,
- Resource: object.GetObjectKind().GroupVersionKind().Kind,
- }, "GooseFSRuntime")
-}
diff --git a/pkg/ddc/goosefs/operations/base.go b/pkg/ddc/goosefs/operations/base.go
deleted file mode 100644
index 5ce4878793c..00000000000
--- a/pkg/ddc/goosefs/operations/base.go
+++ /dev/null
@@ -1,521 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package operations
-
-import (
- "context"
- "errors"
- "fmt"
- "regexp"
- "strconv"
- "strings"
- "time"
-
- "github.com/fluid-cloudnative/fluid/pkg/utils/cmdguard"
- "github.com/fluid-cloudnative/fluid/pkg/utils/kubeclient"
- "github.com/go-logr/logr"
-)
-
-type GooseFSFileUtils struct {
- podName string
- namespace string
- container string
- log logr.Logger
-}
-
-func NewGooseFSFileUtils(podName string, containerName string, namespace string, log logr.Logger) GooseFSFileUtils {
-
- return GooseFSFileUtils{
- podName: podName,
- namespace: namespace,
- container: containerName,
- log: log,
- }
-}
-
-// IsExist checks if the goosefsPath exists
-func (a GooseFSFileUtils) IsExist(goosefsPath string) (found bool, err error) {
- var (
- command = []string{"goosefs", "fs", "ls", goosefsPath}
- stdout string
- stderr string
- )
-
- stdout, stderr, err = a.exec(command, true)
- if err != nil {
- if strings.Contains(stdout, "does not exist") {
- err = nil
- } else {
- err = fmt.Errorf("execute command %v with expectedErr: %v stdout %s and stderr %s", command, err, stdout, stderr)
- return
- }
- } else {
- found = true
- }
-
- return
-}
-
-// Get summary info of the GooseFS Engine
-func (a GooseFSFileUtils) ReportSummary() (summary string, err error) {
- var (
- command = []string{"goosefs", "fsadmin", "report", "summary"}
- stdout string
- stderr string
- )
-
- stdout, stderr, err = a.exec(command, false)
- if err != nil {
- err = fmt.Errorf("execute command %v with expectedErr: %v stdout %s and stderr %s", command, err, stdout, stderr)
- return stdout, err
- }
- return stdout, err
-}
-
-// Load the metadata without timeout
-func (a GooseFSFileUtils) LoadMetadataWithoutTimeout(goosefsPath string) (err error) {
- var (
- command = []string{"goosefs", "fs", "loadMetadata", "-R", goosefsPath}
- stdout string
- stderr string
- )
-
- start := time.Now()
- stdout, stderr, err = a.execWithoutTimeout(command, false)
- duration := time.Since(start)
- a.log.Info("Async Load Metadata took times to run", "period", duration)
- if err != nil {
- err = fmt.Errorf("execute command %v with expectedErr: %v stdout %s and stderr %s", command, err, stdout, stderr)
- return
- } else {
- a.log.Info("Async Load Metadata finished", "stdout", stdout)
- }
- return
-}
-
-// LoadMetaData loads the metadata.
-func (a GooseFSFileUtils) LoadMetaData(goosefsPath string, sync bool) (err error) {
- var (
- // command = []string{"goosefs", "fs", "-Dgoosefs.user.file.metadata.sync.interval=0", "ls", "-R", goosefsPath}
- // command = []string{"goosefs", "fs", "-Dgoosefs.user.file.metadata.sync.interval=0", "count", goosefsPath}
- command []string
- stdout string
- stderr string
- )
-
- if sync {
- command = []string{"goosefs", "fs", "-Dgoosefs.user.file.metadata.sync.interval=0", "ls", "-R", goosefsPath}
- } else {
- command = []string{"goosefs", "fs", "ls", "-R", goosefsPath}
- }
-
- start := time.Now()
- stdout, stderr, err = a.exec(command, false)
- duration := time.Since(start)
- a.log.Info("Load MetaData took times to run", "period", duration)
- if err != nil {
- err = fmt.Errorf("execute command %v with expectedErr: %v stdout %s and stderr %s", command, err, stdout, stderr)
- return
- }
-
- return
-}
-
-/*
-MetadataInfoFile is a yaml file to save the metadata info of dataset, such as ufs total and fileNum
-it is in the form of:
- dataset:
- namespace:
- ufstotal:
- filenum:
-*/
-
-type KeyOfMetaDataFile string
-
-var (
- DatasetName KeyOfMetaDataFile = "dataset"
- Namespace KeyOfMetaDataFile = "namespace"
- UfsTotal KeyOfMetaDataFile = "ufstotal"
- FileNum KeyOfMetaDataFile = "filenum"
-)
-
-// QueryMetadataInfoFile query the metadata info file.
-func (a GooseFSFileUtils) QueryMetaDataInfoIntoFile(key KeyOfMetaDataFile, filename string) (value string, err error) {
- line := ""
- switch key {
- case DatasetName:
- line = "1p"
- case Namespace:
- line = "2p"
- case UfsTotal:
- line = "3p"
- case FileNum:
- line = "4p"
- default:
- a.log.Error(errors.New("the key not in metadatafile"), "key", key)
- }
- var (
- str = "sed -n '" + line + "' " + filename
- command = []string{"bash", "-c", str}
- stdout string
- stderr string
- )
- stdout, stderr, err = a.exec(command, false)
- if err != nil {
- err = fmt.Errorf("execute command %v with expectedErr: %v stdout %s and stderr %s", command, err, stdout, stderr)
- } else {
- value = strings.TrimPrefix(stdout, string(key)+": ")
- }
- return
-}
-
-func (a GooseFSFileUtils) Mkdir(goosefsPath string) (err error) {
- var (
- command = []string{"goosefs", "fs", "mkdir", goosefsPath}
- stdout string
- stderr string
- )
-
- stdout, stderr, err = a.exec(command, false)
- if err != nil {
- err = fmt.Errorf("execute command %v with expectedErr: %v stdout %s and stderr %s", command, err, stdout, stderr)
- return
- }
-
- return
-}
-
-func (a GooseFSFileUtils) Mount(goosefsPath string,
- ufsPath string,
- options map[string]string,
- readOnly bool,
- shared bool) (err error) {
-
- // exist, expectedErr := a.IsExist(goosefsPath)
- // if expectedErr != nil {
- // return expectedErr
- // }
-
- // if !exist {
- // expectedErr = a.Mkdir(goosefsPath)
- // if expectedErr != nil {
- // return expectedErr
- // }
- // }
-
- var (
- command = []string{"goosefs", "fs", "mount"}
- stderr string
- stdout string
- )
-
- if readOnly {
- command = append(command, "--readonly")
- }
-
- if shared {
- command = append(command, "--shared")
- }
-
- for key, value := range options {
- command = append(command, "--option", fmt.Sprintf("%s=%s", key, value))
- }
-
- command = append(command, goosefsPath, ufsPath)
-
- stdout, stderr, err = a.exec(command, false)
- if err != nil {
- err = fmt.Errorf("execute command %v with expectedErr: %v stdout %s and stderr %s", command, err, stdout, stderr)
- return
- }
-
- return
-}
-
-// UnMount execute command `goosefs fs umount $path` to unmount mountpoint
-func (a GooseFSFileUtils) UnMount(goosefsPath string) (err error) {
- var (
- command = []string{"goosefs", "fs", "unmount"}
- stderr string
- stdout string
- )
-
- command = append(command, goosefsPath)
-
- stdout, stderr, err = a.exec(command, false)
- if err != nil {
- err = fmt.Errorf("execute command %v with expectedErr: %v stdout %s and stderr %s", command, err, stdout, stderr)
- return
- }
-
- return
-}
-
-func (a GooseFSFileUtils) IsMounted(goosefsPath string) (mounted bool, err error) {
- var (
- command = []string{"goosefs", "fs", "mount"}
- stdout string
- stderr string
- )
-
- stdout, stderr, err = a.exec(command, true)
- if err != nil {
- return mounted, fmt.Errorf("execute command %v with expectedErr: %v stdout %s and stderr %s", command, err, stdout, stderr)
- }
-
- results := strings.Split(stdout, "\n")
-
- for _, line := range results {
- fields := strings.Fields(line)
- a.log.Info("parse output of isMounted", "goosefsPath", goosefsPath, "fields", fields)
- if fields[2] == goosefsPath {
- mounted = true
- return mounted, nil
- }
- }
-
- // pattern := fmt.Sprintf(" on %s ", goosefsPath)
- // if strings.Contains(stdout, pattern) {
- // mounted = true
- // }
-
- return mounted, err
-}
-
-// Check if the GooseFS is ready by running `goosefs fsadmin report` command
-func (a GooseFSFileUtils) Ready() (ready bool) {
- var (
- command = []string{"goosefs", "fsadmin", "report"}
- )
-
- _, _, err := a.exec(command, true)
- if err == nil {
- ready = true
- }
-
- return ready
-}
-
-func (a GooseFSFileUtils) Du(goosefsPath string) (ufs int64, cached int64, cachedPercentage string, err error) {
- var (
- command = []string{"goosefs", "fs", "du", "-s", goosefsPath}
- stdout string
- stderr string
- )
-
- stdout, stderr, err = a.exec(command, false)
- if err != nil {
- err = fmt.Errorf("execute command %v with expectedErr: %v stdout %s and stderr %s", command, err, stdout, stderr)
- return
- }
- str := strings.Split(stdout, "\n")
-
- if len(str) != 2 {
- err = fmt.Errorf("failed to parse %s in Du method", str)
- return
- }
-
- data := strings.Fields(str[1])
- if len(data) != 4 {
- err = fmt.Errorf("failed to parse %s in Du method", data)
- return
- }
-
- ufs, err = strconv.ParseInt(data[0], 10, 64)
- if err != nil {
- return
- }
-
- cached, err = strconv.ParseInt(data[1], 10, 64)
- if err != nil {
- return
- }
-
- cachedPercentage = strings.TrimLeft(data[2], "(")
- cachedPercentage = strings.TrimRight(cachedPercentage, ")")
-
- return
-}
-
-// The count of the GooseFS Filesystem
-func (a GooseFSFileUtils) Count(goosefsPath string) (fileCount int64, folderCount int64, total int64, err error) {
- var (
- command = []string{"goosefs", "fs", "count", goosefsPath}
- stdout string
- stderr string
- ufileCount, ufolderCount, utotal int64
- )
-
- stdout, stderr, err = a.execWithoutTimeout(command, false)
- if err != nil {
- err = fmt.Errorf("execute command %v with expectedErr: %v stdout %s and stderr %s", command, err, stdout, stderr)
- return
- }
-
- // [File Count Folder Count Total Bytes 1152 4 154262709011]
- str := strings.Split(stdout, "\n")
-
- if len(str) != 2 {
- err = fmt.Errorf("failed to parse %s in Count method", str)
- return
- }
-
- data := strings.Fields(str[1])
- if len(data) != 3 {
- err = fmt.Errorf("failed to parse %s in Count method", data)
- return
- }
-
- ufileCount, err = strconv.ParseInt(data[0], 10, 64)
- if err != nil {
- return
- }
-
- ufolderCount, err = strconv.ParseInt(data[1], 10, 64)
- if err != nil {
- return
- }
-
- utotal, err = strconv.ParseInt(data[2], 10, 64)
- if err != nil {
- return
- }
-
- if ufileCount < 0 || ufolderCount < 0 || utotal < 0 {
- err = fmt.Errorf("the return value of Count method is negative")
- return
- }
-
- return ufileCount, ufolderCount, utotal, err
-}
-
-// file count of the GooseFS Filesystem (except folder)
-// use "goosefs fsadmin report metrics" for better performance
-func (a GooseFSFileUtils) GetFileCount() (fileCount int64, err error) {
- args := []string{"goosefs", "fsadmin", "report", "metrics", "|", "grep", "Master.FilesCompleted"}
- var (
- command = []string{"bash", "-c", strings.Join(args, " ")}
- stdout string
- stderr string
- )
-
- stdout, stderr, err = a.execWithoutTimeout(command, false)
- if err != nil {
- err = fmt.Errorf("execute command %v with expectedErr: %v stdout %s and stderr %s", command, err, stdout, stderr)
- return
- }
-
- // eg: Master.FilesCompleted (Type: COUNTER, Value: 6,367,897)
- outStrWithoutComma := strings.Replace(stdout, ",", "", -1)
- matchExp := regexp.MustCompile(`\d+`)
- fileCountStr := matchExp.FindString(outStrWithoutComma)
- fileCount, err = strconv.ParseInt(fileCountStr, 10, 64)
- if err != nil {
- return
- }
- return fileCount, nil
-}
-
-// ReportMetrics get goosefs metrics by running `goosefs fsadmin report metrics` command
-func (a GooseFSFileUtils) ReportMetrics() (metrics string, err error) {
- var (
- command = []string{"goosefs", "fsadmin", "report", "metrics"}
- stdout string
- stderr string
- )
- stdout, stderr, err = a.exec(command, false)
- if err != nil {
- err = fmt.Errorf("execute command %v with expectedErr: %v stdout %s and stderr %s", command, err, stdout, stderr)
- return stdout, err
- }
- return stdout, err
-}
-
-// ReportCapacity get goosefs capacity info by running `goosefs fsadmin report capacity` command
-func (a GooseFSFileUtils) ReportCapacity() (report string, err error) {
- var (
- command = []string{"goosefs", "fsadmin", "report", "capacity"}
- stdout string
- stderr string
- )
- stdout, stderr, err = a.exec(command, false)
- if err != nil {
- err = fmt.Errorf("execute command %v with expectedErr: %v stdout %s and stderr %s", command, err, stdout, stderr)
- return stdout, err
- }
- return stdout, err
-}
-
-func (a GooseFSFileUtils) MasterPodName() (masterPodName string, err error) {
- var (
- command = []string{"goosefs", "fsadmin", "report"}
- stdout string
- stderr string
- )
- stdout, stderr, err = a.exec(command, true)
- if err != nil {
- err = fmt.Errorf("execute command %v with expectedErr: %v stdout %s and stderr %s", command, err, stdout, stderr)
- return stdout, err
- }
-
- str := strings.Split(stdout, "\n")
- data := strings.Fields(str[1])
- address := strings.Split(data[2], ":")[0]
-
- return address, nil
-}
-
-// exec with timeout
-func (a GooseFSFileUtils) exec(command []string, verbose bool) (stdout string, stderr string, err error) {
- ctx, cancel := context.WithTimeout(context.TODO(), time.Second*1500)
- ch := make(chan string, 1)
- defer cancel()
-
- go func() {
- stdout, stderr, err = a.execWithoutTimeout(command, verbose)
- ch <- "done"
- }()
-
- select {
- case <-ch:
- a.log.Info("execute in time", "command", command)
- case <-ctx.Done():
- err = fmt.Errorf("timeout when executing %v", command)
- }
-
- return
-}
-
-// execWithoutTimeout
-func (a GooseFSFileUtils) execWithoutTimeout(command []string, verbose bool) (stdout string, stderr string, err error) {
- err = cmdguard.ValidateCommandSlice(command)
- if err != nil {
- return
- }
-
- stdout, stderr, err = kubeclient.ExecCommandInContainer(a.podName, a.container, a.namespace, command)
- if err != nil {
- a.log.Info("Stdout", "Command", command, "Stdout", stdout)
- a.log.Error(err, "Failed", "Command", command, "FailedReason", stderr)
- return
- }
- if verbose {
- a.log.Info("Stdout", "Command", command, "Stdout", stdout)
- }
-
- return
-}
diff --git a/pkg/ddc/goosefs/operations/base_test.go b/pkg/ddc/goosefs/operations/base_test.go
deleted file mode 100644
index c979e1906c9..00000000000
--- a/pkg/ddc/goosefs/operations/base_test.go
+++ /dev/null
@@ -1,638 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package operations
-
-import (
- "errors"
- "fmt"
- "reflect"
- "strings"
- "testing"
-
- "github.com/agiledragon/gomonkey/v2"
- "github.com/fluid-cloudnative/fluid/pkg/utils/fake"
- "github.com/fluid-cloudnative/fluid/pkg/utils/kubeclient"
-)
-
-const (
- NOT_EXIST = "not-exist"
- OTHER_ERR = "other-err"
- FINE = "fine"
- EXEC_ERR = "exec-err"
- NEGATIVE_RES = "negative-res"
- TOO_MANY_LINES = "too many lines"
- DATA_NUM = "data nums not match"
- PARSE_ERR = "parse err"
-)
-
-func TestNewGooseFSFileUtils(t *testing.T) {
- var expectedResult = GooseFSFileUtils{
- podName: "hbase",
- namespace: "default",
- container: "hbase-container",
- log: fake.NullLogger(),
- }
- result := NewGooseFSFileUtils("hbase", "hbase-container", "default", fake.NullLogger())
- if !reflect.DeepEqual(expectedResult, result) {
- t.Errorf("fail to create the GooseFSFileUtils, want: %v, got: %v", expectedResult, result)
- }
-}
-
-func TestGooseFSFileUtils_IsExist(t *testing.T) {
-
- mockExec := func(p1, p2, p3 string, p4 []string) (stdout string, stderr string, e error) {
-
- if strings.Contains(p4[3], NOT_EXIST) {
- return "does not exist", "", errors.New("does not exist")
-
- } else if strings.Contains(p4[3], OTHER_ERR) {
- return "", "", errors.New("other error")
- } else {
- return "", "", nil
- }
- }
-
- patches := gomonkey.ApplyFunc(kubeclient.ExecCommandInContainer, mockExec)
- defer patches.Reset()
-
- var tests = []struct {
- in string
- out bool
- noErr bool
- }{
- {NOT_EXIST, false, true},
- {OTHER_ERR, false, false},
- {FINE, true, true},
- }
- for _, test := range tests {
- found, err := GooseFSFileUtils{log: fake.NullLogger()}.IsExist(test.in)
- if found != test.out {
- t.Errorf("input parameter is %s,expected %t, got %t", test.in, test.out, found)
- }
- var noErr bool = (err == nil)
- if test.noErr != noErr {
- t.Errorf("input parameter is %s,expected noerr is %t", test.in, test.noErr)
- }
- }
-}
-
-func TestGooseFSFileUtils_Du(t *testing.T) {
- out1, out2, out3 := 111, 222, "%233"
- mockExec := func(p1, p2, p3 string, p4 []string) (stdout string, stderr string, e error) {
-
- if strings.Contains(p4[4], EXEC_ERR) {
- return "does not exist", "", errors.New("exec-error")
- } else if strings.Contains(p4[4], TOO_MANY_LINES) {
- return "1\n2\n3\n4\n", "1\n2\n3\n4\n", nil
- } else if strings.Contains(p4[4], DATA_NUM) {
- return "1\n2\t3", "1\n2\t3", nil
- } else if strings.Contains(p4[4], PARSE_ERR) {
- return "1\n1\tdududu\tbbb\t", "1\n1\t2\tbbb\t", nil
- } else {
- return fmt.Sprintf("first line!\n%d\t%d\t(%s)\t2333", out1, out2, out3), "", nil
- }
- }
-
- patches := gomonkey.ApplyFunc(kubeclient.ExecCommandInContainer, mockExec)
- defer patches.Reset()
-
- var tests = []struct {
- in string
- out1, out2 int64
- out3 string
- noErr bool
- }{
- {EXEC_ERR, 0, 0, "", false},
- {TOO_MANY_LINES, 0, 0, "", false},
- {DATA_NUM, 0, 0, "", false},
- {PARSE_ERR, 0, 0, "", false},
- {FINE, int64(out1), int64(out2), out3, true},
- }
- for _, test := range tests {
- o1, o2, o3, err := GooseFSFileUtils{log: fake.NullLogger()}.Du(test.in)
- var noErr bool = (err == nil)
- if test.noErr != noErr {
- t.Errorf("input parameter is %s,expected noerr is %t", test.in, test.noErr)
- }
- if test.noErr {
- if o1 != test.out1 || o2 != test.out2 || o3 != test.out3 {
- t.Fatalf("input parameter is %s,output is %d,%d, %s", test.in, o1, o2, o3)
- }
- }
- }
-}
-
-func TestGooseFSFileUtils_ReportSummary(t *testing.T) {
- ExecCommon := func(a GooseFSFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) {
- return "GooseFS cluster summary", "", nil
- }
- ExecErr := func(a GooseFSFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) {
- return "", "", errors.New("fail to run the command")
- }
-
- patches := gomonkey.ApplyPrivateMethod(GooseFSFileUtils{}, "exec", ExecErr)
- defer patches.Reset()
-
- a := GooseFSFileUtils{}
- _, err := a.ReportSummary()
- if err == nil {
- t.Error("check failure, want err, got nil")
- }
-
- patches.ApplyPrivateMethod(GooseFSFileUtils{}, "exec", ExecCommon)
- _, err = a.ReportSummary()
- if err != nil {
- t.Errorf("check failure, want nil, got err: %v", err)
- }
-}
-
-func TestLoadMetadataWithoutTimeout(t *testing.T) {
- ExecWithoutTimeoutCommon := func(a GooseFSFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) {
- return "GooseFS cluster summary", "", nil
- }
- ExecWithoutTimeoutErr := func(a GooseFSFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) {
- return "", "", errors.New("fail to run the command")
- }
-
- patches := gomonkey.ApplyPrivateMethod(GooseFSFileUtils{}, "execWithoutTimeout", ExecWithoutTimeoutErr)
- defer patches.Reset()
-
- a := GooseFSFileUtils{log: fake.NullLogger()}
- err := a.LoadMetadataWithoutTimeout("/")
- if err == nil {
- t.Error("check failure, want err, got nil")
- }
-
- patches.ApplyPrivateMethod(GooseFSFileUtils{}, "execWithoutTimeout", ExecWithoutTimeoutCommon)
- err = a.LoadMetadataWithoutTimeout("/")
- if err != nil {
- t.Errorf("check failure, want nil, got err: %v", err)
- }
-}
-
-func TestLoadMetaData(t *testing.T) {
- ExecCommon := func(a GooseFSFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) {
- return "GooseFS cluster summary", "", nil
- }
- ExecErr := func(a GooseFSFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) {
- return "", "", errors.New("fail to run the command")
- }
-
- patches := gomonkey.ApplyPrivateMethod(GooseFSFileUtils{}, "exec", ExecErr)
- defer patches.Reset()
-
- a := GooseFSFileUtils{log: fake.NullLogger()}
- err := a.LoadMetaData("/", true)
- if err == nil {
- t.Error("check failure, want err, got nil")
- }
-
- patches.ApplyPrivateMethod(GooseFSFileUtils{}, "exec", ExecCommon)
- err = a.LoadMetaData("/", false)
- if err != nil {
- t.Errorf("check failure, want nil, got err: %v", err)
- }
-}
-
-func TestQueryMetaDataInfoIntoFile(t *testing.T) {
- ExecCommon := func(a GooseFSFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) {
- return "GooseFS cluster summary", "", nil
- }
- ExecErr := func(a GooseFSFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) {
- return "", "", errors.New("fail to run the command")
- }
-
- patches := gomonkey.ApplyPrivateMethod(GooseFSFileUtils{}, "exec", ExecErr)
- defer patches.Reset()
-
- a := GooseFSFileUtils{log: fake.NullLogger()}
-
- keySets := []KeyOfMetaDataFile{DatasetName, Namespace, UfsTotal, FileNum, ""}
- for index, keySet := range keySets {
- _, err := a.QueryMetaDataInfoIntoFile(keySet, "/tmp/file")
- if err == nil {
- t.Errorf("%d check failure, want err, got nil", index)
- return
- }
- }
-
- patches.ApplyPrivateMethod(GooseFSFileUtils{}, "exec", ExecCommon)
- for index, keySet := range keySets {
- _, err := a.QueryMetaDataInfoIntoFile(keySet, "/tmp/file")
- if err != nil {
- t.Errorf("%d check failure, want nil, got err: %v", index, err)
- return
- }
- }
-}
-
-func TestMKdir(t *testing.T) {
- ExecCommon := func(a GooseFSFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) {
- return "GooseFS mkdir success", "", nil
- }
- ExecErr := func(a GooseFSFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) {
- return "", "", errors.New("fail to run the command")
- }
-
- patches := gomonkey.ApplyPrivateMethod(GooseFSFileUtils{}, "exec", ExecErr)
- defer patches.Reset()
-
- a := GooseFSFileUtils{}
- err := a.Mkdir("/")
- if err == nil {
- t.Error("check failure, want err, got nil")
- }
-
- patches.ApplyPrivateMethod(GooseFSFileUtils{}, "exec", ExecCommon)
- err = a.Mkdir("/")
- if err != nil {
- t.Errorf("check failure, want nil, got err: %v", err)
- }
-}
-
-func TestMount(t *testing.T) {
- ExecCommon := func(a GooseFSFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) {
- return "GooseFS mkdir success", "", nil
- }
- ExecErr := func(a GooseFSFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) {
- return "", "", errors.New("fail to run the command")
- }
-
- a := GooseFSFileUtils{}
- var testCases = []struct {
- readOnly bool
- shared bool
- options map[string]string
- }{
- {
- readOnly: true,
- shared: true,
- options: map[string]string{
- "testKey": "testValue",
- },
- },
- {
- readOnly: true,
- shared: false,
- },
- {
- readOnly: false,
- shared: true,
- },
- {
- readOnly: false,
- shared: false,
- },
- }
-
- patches := gomonkey.ApplyPrivateMethod(GooseFSFileUtils{}, "exec", ExecErr)
- defer patches.Reset()
-
- for index, test := range testCases {
- err := a.Mount("/", "/", nil, test.readOnly, test.shared)
- if err == nil {
- t.Errorf("%d check failure, want err, got nil", index)
- return
- }
- }
-
- patches.ApplyPrivateMethod(GooseFSFileUtils{}, "exec", ExecCommon)
- for index, test := range testCases {
- err := a.Mount("/", "/", nil, test.readOnly, test.shared)
- if err != nil {
- t.Errorf("%d check failure, want nil, got err: %v", index, err)
- return
- }
- }
-}
-
-func TestIsMounted(t *testing.T) {
- ExecCommon := func(a GooseFSFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) {
- return "https://mirrors.bit.edu.cn/apache/hbase/stable on /hbase (web, capacity=-1B, used=-1B, read-only, not shared, properties={}) \n /underFSStorage on / (local, capacity=0B, used=0B, not read-only, not shared, properties={})", "", nil
- }
- ExecErr := func(a GooseFSFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) {
- return "", "", errors.New("fail to run the command")
- }
-
- patches := gomonkey.ApplyPrivateMethod(GooseFSFileUtils{}, "exec", ExecErr)
- defer patches.Reset()
-
- a := &GooseFSFileUtils{log: fake.NullLogger()}
- _, err := a.IsMounted("/hbase")
- if err == nil {
- t.Error("check failure, want err, got nil")
- return
- }
-
- patches.ApplyPrivateMethod(GooseFSFileUtils{}, "exec", ExecCommon)
- var testCases = []struct {
- goosefsPath string
- expectedResult bool
- }{
- {
- goosefsPath: "/spark",
- expectedResult: false,
- },
- {
- goosefsPath: "/hbase",
- expectedResult: true,
- },
- }
- for index, test := range testCases {
- mounted, err := a.IsMounted(test.goosefsPath)
- if err != nil {
- t.Errorf("%d check failure, want nil, got err: %v", index, err)
- return
- }
-
- if mounted != test.expectedResult {
- t.Errorf("%d check failure, want: %t, got: %t ", index, mounted, test.expectedResult)
- return
- }
- }
-}
-
-func TestReady(t *testing.T) {
- ExecCommon := func(a GooseFSFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) {
- return "GooseFS cluster summary: ", "", nil
- }
- ExecErr := func(a GooseFSFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) {
- return "", "", errors.New("fail to run the command")
- }
-
- patches := gomonkey.ApplyPrivateMethod(GooseFSFileUtils{}, "exec", ExecErr)
- defer patches.Reset()
-
- a := &GooseFSFileUtils{log: fake.NullLogger()}
- ready := a.Ready()
- if ready != false {
- t.Errorf("check failure, want false, got %t", ready)
- }
-
- patches.ApplyPrivateMethod(GooseFSFileUtils{}, "exec", ExecCommon)
- ready = a.Ready()
- if ready != true {
- t.Errorf("check failure, want true, got %t", ready)
- }
-}
-
-func TestDu(t *testing.T) {
- ExecCommon := func(a GooseFSFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) {
- return "File Size In GooseFS Path\n577575561 0 (0%) /hbase", "", nil
- }
- ExecErr := func(a GooseFSFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) {
- return "", "", errors.New("fail to run the command")
- }
-
- patches := gomonkey.ApplyPrivateMethod(GooseFSFileUtils{}, "exec", ExecErr)
- defer patches.Reset()
-
- a := &GooseFSFileUtils{log: fake.NullLogger()}
- _, _, _, err := a.Du("/hbase")
- if err == nil {
- t.Error("check failure, want err, got nil")
- }
-
- patches.ApplyPrivateMethod(GooseFSFileUtils{}, "exec", ExecCommon)
- ufs, cached, cachedPercentage, err := a.Du("/hbase")
- if err != nil {
- t.Errorf("check failure, want nil, got err: %v", err)
- }
- if ufs != 577575561 {
- t.Errorf("check failure, want 577575561, got %d", ufs)
- }
- if cached != 0 {
- t.Errorf("check failure, want 0, got %d", cached)
- }
- if cachedPercentage != "0%" {
- t.Errorf("check failure, want 0, got %s", cachedPercentage)
- }
-}
-
-func TestCount(t *testing.T) {
- out1, out2, out3 := 111, 222, 333
- mockExec := func(p1, p2, p3 string, p4 []string) (stdout string, stderr string, e error) {
-
- if strings.Contains(p4[3], EXEC_ERR) {
- return "does not exist", "", errors.New("exec-error")
- } else if strings.Contains(p4[3], NEGATIVE_RES) {
- return "12324\t45463\t-9223372036854775808", "", nil
- } else if strings.Contains(p4[3], TOO_MANY_LINES) {
- return "1\n2\n3\n4\n", "1\n2\n3\n4\n", nil
- } else if strings.Contains(p4[3], DATA_NUM) {
- return "1\n2\t3", "1\n2\t3", nil
- } else if strings.Contains(p4[3], PARSE_ERR) {
- return "1\n1\tdududu\tbbb\t", "1\n1\t2\tbbb\t", nil
- } else {
- return fmt.Sprintf("first line!\n%d\t%d\t%d", out1, out2, out3), "", nil
- }
- }
-
- patches := gomonkey.ApplyFunc(kubeclient.ExecCommandInContainer, mockExec)
- defer patches.Reset()
-
- var tests = []struct {
- in string
- out1, out2, out3 int64
- noErr bool
- }{
- {EXEC_ERR, 0, 0, 0, false},
- {NEGATIVE_RES, 0, 0, 0, false},
- {TOO_MANY_LINES, 0, 0, 0, false},
- {DATA_NUM, 0, 0, 0, false},
- {PARSE_ERR, 0, 0, 0, false},
- {FINE, int64(out1), int64(out2), int64(out3), true},
- }
- for _, test := range tests {
- o1, o2, o3, err := GooseFSFileUtils{log: fake.NullLogger()}.Count(test.in)
- var noErr bool = (err == nil)
- if test.noErr != noErr {
- t.Errorf("input parameter is %s,expected noerr is %t", test.in, test.noErr)
- }
- if test.noErr {
- if o1 != test.out1 || o2 != test.out2 || o3 != test.out3 {
- t.Fatalf("input parameter is %s,output is %d,%d, %d", test.in, o1, o2, o3)
- }
- }
- }
-}
-
-func TestGetFileCount(t *testing.T) {
- ExecWithoutTimeoutCommon := func(a GooseFSFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) {
- return "Type: COUNTER, Value: 6,367,897", "", nil
- }
- ExecWithoutTimeoutErr := func(a GooseFSFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) {
- return "", "", errors.New("fail to run the command")
- }
-
- patches := gomonkey.ApplyPrivateMethod(GooseFSFileUtils{}, "execWithoutTimeout", ExecWithoutTimeoutErr)
- defer patches.Reset()
-
- a := &GooseFSFileUtils{log: fake.NullLogger()}
- _, err := a.GetFileCount()
- if err == nil {
- t.Error("check failure, want err, got nil")
- }
-
- patches.ApplyPrivateMethod(GooseFSFileUtils{}, "execWithoutTimeout", ExecWithoutTimeoutCommon)
- fileCount, err := a.GetFileCount()
- if err != nil {
- t.Errorf("check failure, want nil, got err: %v", err)
- }
- if fileCount != 6367897 {
- t.Errorf("check failure, want 6367897, got %d", fileCount)
- }
-}
-
-func TestReportMetrics(t *testing.T) {
- ExecCommon := func(a GooseFSFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) {
- return "report [category] [category args]\nReport GooseFS running cluster information.\n", "", nil
- }
- ExecErr := func(a GooseFSFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) {
- return "", "", errors.New("fail to run the command")
- }
-
- patches := gomonkey.ApplyPrivateMethod(GooseFSFileUtils{}, "exec", ExecErr)
- defer patches.Reset()
-
- a := &GooseFSFileUtils{log: fake.NullLogger()}
-
- _, err := a.ReportMetrics()
- if err == nil {
- t.Error("check failure, want err, got nil")
- }
-
- patches.ApplyPrivateMethod(GooseFSFileUtils{}, "exec", ExecCommon)
- _, err = a.ReportMetrics()
- if err != nil {
- t.Errorf("check failure, want nil, got err: %v", err)
- }
-}
-
-func TestReportCapacity(t *testing.T) {
- ExecCommon := func(a GooseFSFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) {
- return "report [category] [category args]\nReport GooseFS running cluster information.\n", "", nil
- }
- ExecErr := func(a GooseFSFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) {
- return "", "", errors.New("fail to run the command")
- }
-
- patches := gomonkey.ApplyPrivateMethod(GooseFSFileUtils{}, "exec", ExecErr)
- defer patches.Reset()
-
- a := &GooseFSFileUtils{log: fake.NullLogger()}
- _, err := a.ReportCapacity()
- if err == nil {
- t.Error("check failure, want err, got nil")
- }
-
- patches.ApplyPrivateMethod(GooseFSFileUtils{}, "exec", ExecCommon)
- _, err = a.ReportCapacity()
- if err != nil {
- t.Errorf("check failure, want nil, got err: %v", err)
- }
-}
-
-func TestExec(t *testing.T) {
- ExecWithoutTimeoutCommon := func(a GooseFSFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) {
- return "Type: COUNTER, Value: 6,367,897", "", nil
- }
- ExecWithoutTimeoutErr := func(a GooseFSFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) {
- return "", "", errors.New("fail to run the command")
- }
-
- patches := gomonkey.ApplyPrivateMethod(GooseFSFileUtils{}, "execWithoutTimeout", ExecWithoutTimeoutErr)
- defer patches.Reset()
-
- a := &GooseFSFileUtils{log: fake.NullLogger()}
- _, _, err := a.exec([]string{"goosefs", "fsadmin", "report", "capacity"}, false)
- if err == nil {
- t.Error("check failure, want err, got nil")
- }
-
- patches.ApplyPrivateMethod(GooseFSFileUtils{}, "execWithoutTimeout", ExecWithoutTimeoutCommon)
- _, _, err = a.exec([]string{"goosefs", "fsadmin", "report", "capacity"}, true)
- if err != nil {
- t.Errorf("check failure, want nil, got err: %v", err)
- }
-}
-
-func TestExecWithoutTimeout(t *testing.T) {
- mockExecCommon := func(podName string, containerName string, namespace string, cmd []string) (stdout string, stderr string, e error) {
- return "conf", "", nil
- }
- mockExecErr := func(podName string, containerName string, namespace string, cmd []string) (stdout string, stderr string, e error) {
- return "err", "", errors.New("other error")
- }
-
- patches := gomonkey.ApplyFunc(kubeclient.ExecCommandInContainer, mockExecErr)
- defer patches.Reset()
-
- a := &GooseFSFileUtils{log: fake.NullLogger()}
- _, _, err := a.execWithoutTimeout([]string{"goosefs", "fsadmin", "report", "capacity"}, false)
- if err == nil {
- t.Error("check failure, want err, got nil")
- }
-
- patches.ApplyFunc(kubeclient.ExecCommandInContainer, mockExecCommon)
- _, _, err = a.execWithoutTimeout([]string{"goosefs", "fsadmin", "report", "capacity"}, true)
- if err != nil {
- t.Errorf("check failure, want nil, got err: %v", err)
- }
-}
-
-func TestMasterPodName(t *testing.T) {
- ExecCommon := func(a GooseFSFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) {
- return "GooseFS cluster summary: \n Master Address: 192.168.0.193:20009\n Web Port: 20010", "", nil
- }
- ExecErr := func(a GooseFSFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) {
- return "", "", errors.New("fail to run the command")
- }
-
- patches := gomonkey.ApplyPrivateMethod(GooseFSFileUtils{}, "exec", ExecErr)
- defer patches.Reset()
-
- a := &GooseFSFileUtils{log: fake.NullLogger()}
- _, err := a.MasterPodName()
- if err == nil {
- t.Error("check failure, want err, got nil")
- }
-
- patches.ApplyPrivateMethod(GooseFSFileUtils{}, "exec", ExecCommon)
- address, err := a.MasterPodName()
- if err != nil {
- t.Errorf("check failure, want nil, got err: %v", err)
- }
- if address != "192.168.0.193" {
- t.Errorf("check failure, want: %s, got: %s", "192.168.0.193", address)
- }
-}
-
-func TestUnMount(t *testing.T) {
- ExecCommon := func(a GooseFSFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) {
- return "Unmounted /hbase \n", "", nil
- }
-
- patches := gomonkey.ApplyPrivateMethod(GooseFSFileUtils{}, "exec", ExecCommon)
- defer patches.Reset()
-
- a := &GooseFSFileUtils{log: fake.NullLogger()}
- err := a.UnMount("/hbase")
- if err != nil {
- t.Error("check failure, want err, got nil")
- }
-}
diff --git a/pkg/ddc/goosefs/operations/cached.go b/pkg/ddc/goosefs/operations/cached.go
deleted file mode 100644
index 4d3c459327a..00000000000
--- a/pkg/ddc/goosefs/operations/cached.go
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package operations
-
-import (
- "fmt"
- "strings"
-
- units "github.com/docker/go-units"
-)
-
-func (a GooseFSFileUtils) CachedState() (cached int64, err error) {
- var (
- command = []string{"goosefs", "fsadmin", "report"}
- stdout string
- stderr string
- )
-
- found := false
- stdout, stderr, err = a.exec(command, false)
- if err != nil {
- err = fmt.Errorf("execute command %v with expectedErr: %v stdout %s and stderr %s", command, err, stdout, stderr)
- return
- }
- str := strings.Split(stdout, "\n")
-
- for _, s := range str {
- if strings.Contains(s, "Used Capacity:") {
- values := strings.Fields(s)
- if len(values) == 0 {
- return cached, fmt.Errorf("failed to parse %s", s)
- }
- cached, err = units.RAMInBytes(values[len(values)-1])
- if err != nil {
- return
- }
- found = true
- }
- }
-
- if !found {
- err = fmt.Errorf("failed to find the cache in output %v", stdout)
- }
-
- return
-}
-
-// clean cache with a preset timeout of 60s
-func (a GooseFSFileUtils) CleanCache(path string) (err error) {
- var (
- releaseVersion = []string{"cat", "/etc/issue"}
- command = []string{"60", "goosefs", "fs", "free", "-f", path}
- stdout string
- stderr string
- )
-
- stdout, stderr, err = a.exec(releaseVersion, false)
- if err != nil {
- err = fmt.Errorf("execute command %v with expectedErr: %v stdout %s and stderr %s", releaseVersion, err, stdout, stderr)
- return
- }
-
- if strings.Contains(stdout, "Ubuntu") {
- command = append([]string{"timeout", "-k"}, command...)
- } else if strings.Contains(stdout, "Alpine") {
- command = append([]string{"timeout"}, command...)
- } else {
- err = fmt.Errorf("unknow release version for linux")
- return
- }
-
- stdout, stderr, err = a.exec(command, false)
- if err != nil {
- err = fmt.Errorf("execute command %v with expectedErr: %v stdout %s and stderr %s", command, err, stdout, stderr)
- return
- }
-
- return
-}
diff --git a/pkg/ddc/goosefs/operations/cached_test.go b/pkg/ddc/goosefs/operations/cached_test.go
deleted file mode 100644
index f9f7d6810d0..00000000000
--- a/pkg/ddc/goosefs/operations/cached_test.go
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package operations
-
-import (
- "errors"
- "fmt"
- "testing"
-
- "github.com/agiledragon/gomonkey/v2"
- "github.com/fluid-cloudnative/fluid/pkg/utils/fake"
-)
-
-func TestGooseFSFileUtils_CachedState(t *testing.T) {
- ExecCommon := func(a GooseFSFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) {
- return "GooseFS cluster summary: \n Master Address: 192.168.0.193:20009 \n Used Capacity: 0B\n", "", nil
- }
- ExecErr := func(a GooseFSFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) {
- return "", "", errors.New("fail to run the command")
- }
- patches := gomonkey.ApplyPrivateMethod(GooseFSFileUtils{}, "exec", ExecErr)
- defer patches.Reset()
-
- a := &GooseFSFileUtils{log: fake.NullLogger()}
- _, err := a.CachedState()
- if err == nil {
- t.Error("check failure, want err, got nil")
- }
-
- patches.ApplyPrivateMethod(GooseFSFileUtils{}, "exec", ExecCommon)
-
- cached, err := a.CachedState()
- if err != nil {
- t.Errorf("check failure, want nil, got err: %v", err)
- }
- if cached != 0 {
- t.Errorf("check failure, want 0, got: %d", cached)
- }
-}
-
-func TestGooseFSFIlUtils_CleanCache(t *testing.T) {
- ExecCommonUbuntu := func(a GooseFSFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) {
- return "Ubuntu", "", nil
- }
- ExecCommonAlpine := func(a GooseFSFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) {
- return "Alpine", "", nil
- }
- ExecCommonCentos := func(a GooseFSFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) {
- return "", "", fmt.Errorf("unknow release version for linux")
- }
- ExecErr := func(a GooseFSFileUtils, command []string, verbose bool) (stdout string, stderr string, err error) {
- return "", "", errors.New("fail to run the command")
- }
-
- patches := gomonkey.ApplyPrivateMethod(GooseFSFileUtils{}, "exec", ExecErr)
- defer patches.Reset()
-
- a := &GooseFSFileUtils{log: fake.NullLogger()}
- err := a.CleanCache("/")
- if err == nil {
- t.Error("check failure, want err, got nil")
- }
-
- patches.ApplyPrivateMethod(GooseFSFileUtils{}, "exec", ExecCommonUbuntu)
- err = a.CleanCache("/")
- if err != nil {
- t.Errorf("check failure, want nil, got err: %v", err)
- }
-
- patches.ApplyPrivateMethod(GooseFSFileUtils{}, "exec", ExecCommonAlpine)
- err = a.CleanCache("/")
- if err != nil {
- t.Errorf("check failure, want nil, got err: %v", err)
- }
-
- patches.ApplyPrivateMethod(GooseFSFileUtils{}, "exec", ExecCommonCentos)
- err = a.CleanCache("/")
- if err == nil {
- t.Error("check failure, want err, got nil")
- }
-}
diff --git a/pkg/ddc/goosefs/operations/conf.go b/pkg/ddc/goosefs/operations/conf.go
deleted file mode 100644
index 4b55714a25d..00000000000
--- a/pkg/ddc/goosefs/operations/conf.go
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package operations
-
-import "fmt"
-
-// Get Conf info of the GooseFS Engine
-func (a GooseFSFileUtils) GetConf(key string) (value string, err error) {
- var (
- command = []string{"goosefs", "getConf", key}
- stdout string
- stderr string
- )
-
- stdout, stderr, err = a.exec(command, false)
- if err != nil {
- err = fmt.Errorf("execute command %v with expectedErr: %v stdout %s and stderr %s", command, err, stdout, stderr)
- return stdout, err
- }
- return stdout, err
-}
diff --git a/pkg/ddc/goosefs/operations/local.go b/pkg/ddc/goosefs/operations/local.go
deleted file mode 100644
index 3b837ed21cd..00000000000
--- a/pkg/ddc/goosefs/operations/local.go
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package operations
-
-import (
- "fmt"
- "time"
-)
-
-// SyncLocalDir syncs local path by running command `du -sh `.
-// Under the circumstance where some NAS(e.g. NFS) is mounted on the ``, the function will sync metadata of all files in the NAS.
-// This is necessary for GooseFS to get consistent file metadata with UFS(i.e. NAS in this case).
-func (a GooseFSFileUtils) SyncLocalDir(path string) (err error) {
- var (
- // command = []string{"goosefs", "fs", "-Dgoosefs.user.file.metadata.sync.interval=0", "ls", "-R", goosefsPath}
- // command = []string{"goosefs", "fs", "-Dgoosefs.user.file.metadata.sync.interval=0", "count", goosefsPath}
- command = []string{"du", "-sh", path}
- stdout string
- stderr string
- )
-
- start := time.Now()
- stdout, stderr, err = a.execWithoutTimeout(command, false)
- duration := time.Since(start)
- a.log.Info("du -sh", "path", path, "period", duration)
- if err != nil {
- err = fmt.Errorf("execute command %v with expectedErr: %v stdout %s and stderr %s", command, err, stdout, stderr)
- return
- }
-
- return
-}
diff --git a/pkg/ddc/goosefs/operations/local_test.go b/pkg/ddc/goosefs/operations/local_test.go
deleted file mode 100644
index f4ad03b109d..00000000000
--- a/pkg/ddc/goosefs/operations/local_test.go
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
-Copyright 2023 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package operations
-
-import (
- "testing"
-
- . "github.com/agiledragon/gomonkey/v2"
- "github.com/fluid-cloudnative/fluid/pkg/utils/kubeclient"
- ctrl "sigs.k8s.io/controller-runtime"
- "sigs.k8s.io/controller-runtime/pkg/log/zap"
-)
-
-func mockExecCommandInContainerForSyncLocalDir() (stdout string, stderr string, err error) {
- r := `File Size In GooseFS Path
- 592.06MB 0B (0%) /`
- return r, "", nil
-}
-
-func TestSyncLocalDir(t *testing.T) {
- ctrl.SetLogger(zap.New(func(o *zap.Options) {
- o.Development = true
- }))
- var tests = []struct {
- path string
- err error
- }{
- {"/underFSStorage/test", nil},
- }
-
- for _, test := range tests {
- tools := NewGooseFSFileUtils("", "", "", ctrl.Log)
- patch1 := ApplyFunc(kubeclient.ExecCommandInContainer, func(podName string, containerName string, namespace string, cmd []string) (string, string, error) {
- stdout, stderr, err := mockExecCommandInContainerForSyncLocalDir()
- return stdout, stderr, err
- })
- defer patch1.Reset()
- err := tools.SyncLocalDir(test.path)
- // fmt.Println(expectedErr)
- if err != nil {
- t.Errorf("expected %v, got %v %s", test.path, tools, err)
- }
- }
-}
diff --git a/pkg/ddc/goosefs/port_parser.go b/pkg/ddc/goosefs/port_parser.go
deleted file mode 100644
index daa73e720d8..00000000000
--- a/pkg/ddc/goosefs/port_parser.go
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "context"
- "fmt"
- "strconv"
-
- "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/utils/kubeclient"
- "github.com/pkg/errors"
- "gopkg.in/yaml.v2"
- v1 "k8s.io/api/core/v1"
- "sigs.k8s.io/controller-runtime/pkg/client"
-)
-
-var propertiesToCheck = []string{
- "goosefs.master.rpc.port",
- "goosefs.master.web.port",
- "goosefs.worker.rpc.port",
- "goosefs.worker.web.port",
- "goosefs.job.master.rpc.port",
- "goosefs.job.master.web.port",
- "goosefs.job.worker.rpc.port",
- "goosefs.job.worker.web.port",
- "goosefs.job.worker.data.port",
- "goosefs.proxy.web.port",
- "goosefs.master.embedded.journal.port",
- "goosefs.job.master.embedded.journal.port",
-}
-
-// GetReservedPorts defines restoration logic for goosefsRuntime
-func GetReservedPorts(client client.Client) (ports []int, err error) {
- var datasets v1alpha1.DatasetList
- err = client.List(context.TODO(), &datasets)
- if err != nil {
- return nil, errors.Wrap(err, "can't list datasets when GetReservedPorts")
- }
-
- for _, dataset := range datasets.Items {
- if len(dataset.Status.Runtimes) != 0 {
- // Assume there is only one runtime with category "Accelerate"
- accelerateRuntime := dataset.Status.Runtimes[0]
- if accelerateRuntime.Type != "goosefs" {
- continue
- }
- configMapName := fmt.Sprintf("%s-%s-values", accelerateRuntime.Name, accelerateRuntime.Type)
- configMap, err := kubeclient.GetConfigmapByName(client, configMapName, accelerateRuntime.Namespace)
- if err != nil {
- return nil, errors.Wrap(err, "GetConfigMapByName when GetReservedPorts")
- }
-
- if configMap == nil {
- continue
- }
-
- reservedPorts, err := parsePortsFromConfigMap(configMap)
- if err != nil {
- return nil, errors.Wrap(err, "parsePortsFromConfigMap when GetReservedPorts")
- }
- ports = append(ports, reservedPorts...)
- }
- }
- return ports, nil
-}
-
-// parsePortsFromConfigMap extracts port usage information given a configMap
-func parsePortsFromConfigMap(configMap *v1.ConfigMap) (ports []int, err error) {
- var value GooseFS
- if v, ok := configMap.Data["data"]; ok {
- if err := yaml.Unmarshal([]byte(v), &value); err != nil {
- return nil, err
- }
- for _, property := range propertiesToCheck {
- if portStr, ok := value.Properties[property]; ok {
- portInt, err := strconv.Atoi(portStr)
- if err != nil {
- return nil, err
- }
- ports = append(ports, portInt)
- }
- }
- }
- return ports, nil
-}
diff --git a/pkg/ddc/goosefs/port_parser_test.go b/pkg/ddc/goosefs/port_parser_test.go
deleted file mode 100644
index 0df89be786c..00000000000
--- a/pkg/ddc/goosefs/port_parser_test.go
+++ /dev/null
@@ -1,249 +0,0 @@
-/*
-Copyright 2023 The Fluid Author.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "reflect"
- "testing"
-
- "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/utils/fake"
- v1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
-)
-
-var valuesConfigMapData = `
-fullnameOverride: hbase
-image: ccr.ccs.tencentyun.com/goosefs/goosefs
-imageTag: v1.0.1
-imagePullPolicy: IfNotPresent
-user: 0
-group: 0
-fsGroup: 0
-properties:
- goosefs.fuse.cached.paths.max: "1000000"
- goosefs.fuse.debug.enabled: "false"
- goosefs.fuse.jnifuse.enabled: "true"
- goosefs.fuse.logging.threshold: 1000ms
- goosefs.fuse.user.group.translation.enabled: "true"
- goosefs.job.master.finished.job.retention.time: 30sec
- goosefs.job.master.rpc.port: "20004"
- goosefs.job.master.web.port: "20005"
- goosefs.job.worker.data.port: "20008"
- goosefs.job.worker.rpc.port: "20006"
- goosefs.job.worker.threadpool.size: "164"
- goosefs.job.worker.web.port: "20007"
- goosefs.master.journal.folder: /journal
- goosefs.master.journal.log.size.bytes.max: 500MB
- goosefs.master.journal.type: UFS
- goosefs.master.metadata.sync.concurrency.level: "128"
- goosefs.master.metadata.sync.executor.pool.size: "128"
- goosefs.master.metadata.sync.ufs.prefetch.pool.size: "128"
- goosefs.master.metastore: ROCKS
- goosefs.master.metastore.inode.cache.max.size: "10000000"
- goosefs.master.mount.table.root.ufs: /underFSStorage
- goosefs.master.rpc.executor.core.pool.size: "128"
- goosefs.master.rpc.executor.max.pool.size: "1024"
- goosefs.master.rpc.port: "20000"
- goosefs.master.security.impersonation.root.groups: '*'
- goosefs.master.security.impersonation.root.users: '*'
- goosefs.master.web.port: "20001"
- goosefs.security.authorization.permission.enabled: "false"
- goosefs.security.stale.channel.purge.interval: 365d
- goosefs.underfs.object.store.breadcrumbs.enabled: "false"
- goosefs.user.block.avoid.eviction.policy.reserved.size.bytes: 2GB
- goosefs.user.block.master.client.pool.gc.threshold: 2day
- goosefs.user.block.master.client.threads: "1024"
- goosefs.user.block.size.bytes.default: 256MB
- goosefs.user.block.worker.client.pool.min: "512"
- goosefs.user.block.write.location.policy.class: com.qcloud.cos.goosefs.client.block.policy.LocalFirstAvoidEvictionPolicy
- goosefs.user.client.cache.enabled: "false"
- goosefs.user.direct.memory.io.enabled: "true"
- goosefs.user.file.create.ttl.action: FREE
- goosefs.user.file.master.client.threads: "1024"
- goosefs.user.file.passive.cache.enabled: "false"
- goosefs.user.file.readtype.default: CACHE
- goosefs.user.file.replication.max: "1"
- goosefs.user.file.writetype.default: MUST_CACHE
- goosefs.user.local.reader.chunk.size.bytes: 256MB
- goosefs.user.logging.threshold: 1000ms
- goosefs.user.metadata.cache.enabled: "true"
- goosefs.user.metadata.cache.expiration.time: 2day
- goosefs.user.metadata.cache.max.size: "6000000"
- goosefs.user.metrics.collection.enabled: "true"
- goosefs.user.streaming.data.timeout: 300sec
- goosefs.user.streaming.reader.chunk.size.bytes: 256MB
- goosefs.user.ufs.block.read.location.policy: com.qcloud.cos.goosefs.client.block.policy.LocalFirstPolicy
- goosefs.user.update.file.accesstime.disabled: "true"
- goosefs.user.worker.list.refresh.interval: 2min
- goosefs.web.ui.enabled: "false"
- goosefs.worker.allocator.class: goosefs.worker.block.allocator.MaxFreeAllocator
- goosefs.worker.block.master.client.pool.size: "1024"
- goosefs.worker.network.reader.buffer.size: 256MB
- goosefs.worker.rpc.port: "20002"
- goosefs.worker.web.port: "20003"
-fuse:
- image: ccr.ccs.tencentyun.com/goosefs/goosefs-fuse
- nodeSelector:
- fluid.io/s-default-hbase: "true"
- imageTag: v1.0.1
- imagePullPolicy: IfNotPresent
- env:
- MOUNT_POINT: /runtime-mnt/goosefs/default/hbase/goosefs-fuse
- jvmOptions:
- - -Xmx16G
- - -Xms16G
- - -XX:+UseG1GC
- - -XX:MaxDirectMemorySize=32g
- - -XX:+UnlockExperimentalVMOptions
- mountPath: /runtime-mnt/goosefs/default/hbase/goosefs-fuse
- args:
- - fuse
- - --fuse-opts=kernel_cache,ro,max_read=131072,attr_timeout=7200,entry_timeout=7200,nonempty,max_readahead=0,allow_other
- hostNetwork: true
- enabled: true
-tieredstore:
- levels:
- - alias: MEM
- level: 0
- mediumtype: MEM
- type: hostPath
- path: /dev/shm/default/hbase
- quota: 2GB
- high: "0.95"
- low: "0.7"
-journal:
- volumeType: emptyDir
- size: 30Gi
-shortCircuit:
- enable: true
- policy: local
- volumeType: emptyDir
-monitoring: goosefs_runtime_metrics
-`
-
-func Test_parsePortsFromConfigMap(t *testing.T) {
- type args struct {
- configMap *v1.ConfigMap
- }
- tests := []struct {
- name string
- args args
- wantPorts []int
- wantErr bool
- }{
- {
- name: "parsePortsFromConfigMap",
- args: args{configMap: &v1.ConfigMap{
- Data: map[string]string{
- "data": valuesConfigMapData,
- },
- }},
- wantPorts: []int{20000, 20001, 20002, 20003, 20004, 20005, 20006, 20007, 20008},
- wantErr: false,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- gotPorts, err := parsePortsFromConfigMap(tt.args.configMap)
- if (err != nil) != tt.wantErr {
- t.Errorf("parsePortsFromConfigMap() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if !reflect.DeepEqual(gotPorts, tt.wantPorts) {
- t.Errorf("parsePortsFromConfigMap() gotPorts = %v, want %v", gotPorts, tt.wantPorts)
- }
- })
- }
-}
-
-func TestGetReservedPorts(t *testing.T) {
- configMap := &v1.ConfigMap{
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase-goosefs-values",
- Namespace: "fluid",
- },
- Data: map[string]string{
- "data": valuesConfigMapData,
- },
- }
- dataSets := []*v1alpha1.Dataset{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase",
- Namespace: "fluid",
- },
- Status: v1alpha1.DatasetStatus{
- Runtimes: []v1alpha1.Runtime{
- {
- Name: "hbase",
- Namespace: "fluid",
- Type: "goosefs",
- },
- },
- },
- },
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "no-runtime",
- Namespace: "fluid",
- },
- },
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "runtime-type",
- Namespace: "fluid",
- },
- Status: v1alpha1.DatasetStatus{
- Runtimes: []v1alpha1.Runtime{
- {
- Type: "not-goosefs",
- },
- },
- },
- },
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "no-map",
- Namespace: "fluid",
- },
- Status: v1alpha1.DatasetStatus{
- Runtimes: []v1alpha1.Runtime{
- {
- Type: "goosefs",
- },
- },
- },
- },
- }
- runtimeObjs := []runtime.Object{}
- runtimeObjs = append(runtimeObjs, configMap)
- for _, dataSet := range dataSets {
- runtimeObjs = append(runtimeObjs, dataSet.DeepCopy())
- }
- fakeClient := fake.NewFakeClientWithScheme(testScheme, runtimeObjs...)
- wantPorts := []int{20000, 20001, 20002, 20003, 20004, 20005, 20006, 20007, 20008}
- ports, err := GetReservedPorts(fakeClient)
- if err != nil {
- t.Errorf("GetReservedPorts failed.")
- }
- if !reflect.DeepEqual(ports, wantPorts) {
- t.Errorf("gotPorts = %v, want %v", ports, wantPorts)
- }
-
-}
diff --git a/pkg/ddc/goosefs/replicas.go b/pkg/ddc/goosefs/replicas.go
deleted file mode 100644
index 9ef9f55ddaa..00000000000
--- a/pkg/ddc/goosefs/replicas.go
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
-Copyright 2023 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "github.com/fluid-cloudnative/fluid/pkg/ctrl"
- cruntime "github.com/fluid-cloudnative/fluid/pkg/runtime"
- "github.com/fluid-cloudnative/fluid/pkg/utils"
- "k8s.io/apimachinery/pkg/types"
- "k8s.io/client-go/util/retry"
-)
-
-// SyncReplicas syncs the replicas
-func (e *GooseFSEngine) SyncReplicas(ctx cruntime.ReconcileRequestContext) (err error) {
-
- err = retry.RetryOnConflict(retry.DefaultBackoff, func() error {
- workers, err := ctrl.GetWorkersAsStatefulset(e.Client,
- types.NamespacedName{Namespace: e.namespace, Name: e.getWorkerName()})
- if err != nil {
- return err
- }
- runtime, err := e.getRuntime()
- if err != nil {
- return err
- }
-
- runtimeToUpdate := runtime.DeepCopy()
- err = e.Helper.SyncReplicas(ctx, runtimeToUpdate, runtimeToUpdate.Status, workers)
- return err
- })
-
- if err != nil {
- _ = utils.LoggingErrorExceptConflict(e.Log, err, "Failed to sync replicas", types.NamespacedName{Namespace: e.namespace, Name: e.name})
- }
-
- return
-
-}
diff --git a/pkg/ddc/goosefs/replicas_test.go b/pkg/ddc/goosefs/replicas_test.go
deleted file mode 100644
index 39d7477a665..00000000000
--- a/pkg/ddc/goosefs/replicas_test.go
+++ /dev/null
@@ -1,287 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "testing"
-
- v1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/common"
- "github.com/fluid-cloudnative/fluid/pkg/ctrl"
- "github.com/fluid-cloudnative/fluid/pkg/ddc/base"
- cruntime "github.com/fluid-cloudnative/fluid/pkg/runtime"
- "github.com/fluid-cloudnative/fluid/pkg/utils"
- "github.com/fluid-cloudnative/fluid/pkg/utils/fake"
- appsv1 "k8s.io/api/apps/v1"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/client-go/tools/record"
- "k8s.io/utils/ptr"
- "sigs.k8s.io/controller-runtime/pkg/client"
-)
-
-func newGooseFSEngineREP(client client.Client, name string, namespace string) *GooseFSEngine {
-
- runTimeInfo, _ := base.BuildRuntimeInfo(name, namespace, common.GooseFSRuntime)
- engine := &GooseFSEngine{
- runtime: &v1alpha1.GooseFSRuntime{},
- name: name,
- namespace: namespace,
- Client: client,
- runtimeInfo: runTimeInfo,
- Log: fake.NullLogger(),
- }
- engine.Helper = ctrl.BuildHelper(runTimeInfo, client, engine.Log)
- return engine
-}
-
-func TestSyncReplicas(t *testing.T) {
- nodeInputs := []*corev1.Node{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "test-node-spark",
- Labels: map[string]string{
- "fluid.io/dataset-num": "1",
- "fluid.io/s-goosefs-fluid-spark": "true",
- "fluid.io/s-fluid-spark": "true",
- "fluid.io/s-h-goosefs-d-fluid-spark": "5B",
- "fluid.io/s-h-goosefs-m-fluid-spark": "1B",
- "fluid.io/s-h-goosefs-t-fluid-spark": "6B",
- "fluid_exclusive": "fluid_spark",
- },
- },
- },
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "test-node-share",
- Labels: map[string]string{
- "fluid.io/dataset-num": "2",
- "fluid.io/s-goosefs-fluid-hadoop": "true",
- "fluid.io/s-fluid-hadoop": "true",
- "fluid.io/s-h-goosefs-d-fluid-hadoop": "5B",
- "fluid.io/s-h-goosefs-m-fluid-hadoop": "1B",
- "fluid.io/s-h-goosefs-t-fluid-hadoop": "6B",
- "fluid.io/s-goosefs-fluid-hbase": "true",
- "fluid.io/s-fluid-hbase": "true",
- "fluid.io/s-h-goosefs-d-fluid-hbase": "5B",
- "fluid.io/s-h-goosefs-m-fluid-hbase": "1B",
- "fluid.io/s-h-goosefs-t-fluid-hbase": "6B",
- },
- },
- },
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "test-node-hadoop",
- Labels: map[string]string{
- "fluid.io/dataset-num": "1",
- "fluid.io/s-goosefs-fluid-hadoop": "true",
- "fluid.io/s-fluid-hadoop": "true",
- "fluid.io/s-h-goosefs-d-fluid-hadoop": "5B",
- "fluid.io/s-h-goosefs-m-fluid-hadoop": "1B",
- "fluid.io/s-h-goosefs-t-fluid-hadoop": "6B",
- "node-select": "true",
- },
- },
- },
- }
- runtimeInputs := []*v1alpha1.GooseFSRuntime{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase",
- Namespace: "fluid",
- },
- Spec: v1alpha1.GooseFSRuntimeSpec{
- Replicas: 3, // 2
- },
- Status: v1alpha1.RuntimeStatus{
- DesiredWorkerNumberScheduled: 2,
- Conditions: []v1alpha1.RuntimeCondition{
- utils.NewRuntimeCondition(v1alpha1.RuntimeWorkersInitialized, v1alpha1.RuntimeWorkersInitializedReason, "The workers are initialized.", corev1.ConditionTrue),
- utils.NewRuntimeCondition(v1alpha1.RuntimeFusesInitialized, v1alpha1.RuntimeFusesInitializedReason, "The fuses are initialized.", corev1.ConditionTrue),
- },
- },
- },
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hadoop",
- Namespace: "fluid",
- },
- Spec: v1alpha1.GooseFSRuntimeSpec{
- Replicas: 1,
- },
- Status: v1alpha1.RuntimeStatus{
- DesiredWorkerNumberScheduled: 2,
- Conditions: []v1alpha1.RuntimeCondition{
- utils.NewRuntimeCondition(v1alpha1.RuntimeWorkersInitialized, v1alpha1.RuntimeWorkersInitializedReason, "The workers are initialized.", corev1.ConditionTrue),
- utils.NewRuntimeCondition(v1alpha1.RuntimeFusesInitialized, v1alpha1.RuntimeFusesInitializedReason, "The fuses are initialized.", corev1.ConditionTrue),
- },
- },
- },
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "obj",
- Namespace: "fluid",
- },
- Spec: v1alpha1.GooseFSRuntimeSpec{
- Replicas: 2,
- },
- Status: v1alpha1.RuntimeStatus{
- DesiredWorkerNumberScheduled: 2,
- },
- },
- }
- workersInputs := []*appsv1.StatefulSet{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase-worker",
- Namespace: "fluid",
- },
- Spec: appsv1.StatefulSetSpec{
- Replicas: ptr.To[int32](2),
- },
- },
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hadoop-worker",
- Namespace: "fluid",
- },
- Spec: appsv1.StatefulSetSpec{
- Replicas: ptr.To[int32](2),
- },
- },
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "obj-worker",
- Namespace: "fluid",
- },
- Spec: appsv1.StatefulSetSpec{
- Replicas: ptr.To[int32](2),
- },
- },
- }
- dataSetInputs := []*v1alpha1.Dataset{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase",
- Namespace: "fluid",
- },
- },
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hadoop",
- Namespace: "fluid",
- },
- },
- }
-
- fuseInputs := []*appsv1.DaemonSet{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase-fuse",
- Namespace: "fluid",
- },
- },
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hadoop-fuse",
- Namespace: "fluid",
- },
- },
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "obj-fuse",
- Namespace: "fluid",
- },
- },
- }
-
- objs := []runtime.Object{}
- for _, nodeInput := range nodeInputs {
- objs = append(objs, nodeInput.DeepCopy())
- }
- for _, runtimeInput := range runtimeInputs {
- objs = append(objs, runtimeInput.DeepCopy())
- }
- for _, workerInput := range workersInputs {
- objs = append(objs, workerInput.DeepCopy())
- }
- for _, fuseInput := range fuseInputs {
- objs = append(objs, fuseInput.DeepCopy())
- }
- for _, dataSetInput := range dataSetInputs {
- objs = append(objs, dataSetInput.DeepCopy())
- }
-
- fakeClient := fake.NewFakeClientWithScheme(testScheme, objs...)
- testCases := []struct {
- testName string
- name string
- namespace string
- Type v1alpha1.RuntimeConditionType
- isErr bool
- condtionLength int
- deprecated bool
- }{
- {
- testName: "scaleout",
- name: "hbase",
- namespace: "fluid",
- Type: v1alpha1.RuntimeWorkerScaledOut,
- isErr: false,
- condtionLength: 3,
- },
- {
- testName: "scalein",
- name: "hadoop",
- namespace: "fluid",
- Type: v1alpha1.RuntimeWorkerScaledIn,
- isErr: false,
- condtionLength: 3,
- },
- {
- testName: "noscale",
- name: "obj",
- namespace: "fluid",
- Type: "",
- isErr: false,
- condtionLength: 0,
- },
- }
- for _, testCase := range testCases {
- engine := newGooseFSEngineREP(fakeClient, testCase.name, testCase.namespace)
- err := engine.SyncReplicas(cruntime.ReconcileRequestContext{
- Log: fake.NullLogger(),
- Recorder: record.NewFakeRecorder(300),
- })
- if err != nil {
- t.Errorf("sync replicas failed,err:%s", err.Error())
- }
- rt, _ := engine.getRuntime()
- found := false
- for _, cond := range rt.Status.Conditions {
-
- if cond.Type == testCase.Type {
- found = true
- break
- }
- }
- if !found && testCase.condtionLength > 0 {
- t.Errorf("testCase: %s runtime condition want conditionType %v, got conditions %v", testCase.testName, testCase.Type, rt.Status.Conditions)
- }
- }
-}
diff --git a/pkg/ddc/goosefs/report.go b/pkg/ddc/goosefs/report.go
deleted file mode 100644
index c63026b4342..00000000000
--- a/pkg/ddc/goosefs/report.go
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "fmt"
- "regexp"
- "strings"
-
- "github.com/fluid-cloudnative/fluid/pkg/ddc/goosefs/operations"
- ddctypes "github.com/fluid-cloudnative/fluid/pkg/ddc/types"
- "github.com/fluid-cloudnative/fluid/pkg/utils"
-)
-
-// reportSummary reports goosefs summary
-func (e *GooseFSEngine) GetReportSummary() (summary string, err error) {
- podName, containerName := e.getMasterPodInfo()
- fileUtils := operations.NewGooseFSFileUtils(podName, containerName, e.namespace, e.Log)
- return fileUtils.ReportSummary()
-}
-
-// parse goosefs report summary to cacheStates
-func (e GooseFSEngine) ParseReportSummary(s string) cacheStates {
-
- var states cacheStates
-
- strs := strings.Split(s, "\n")
- for _, str := range strs {
- str = strings.TrimSpace(str)
- if strings.HasPrefix(str, ddctypes.SummaryPrefixTotalCapacity) {
- totalCacheCapacityGooseFS, _ := utils.FromHumanSize(strings.TrimPrefix(str, ddctypes.SummaryPrefixTotalCapacity))
- // Convert GooseFS's binary byte units to Fluid's binary byte units
- // e.g. 10KB -> 10KiB, 2GB -> 2GiB
- states.cacheCapacity = utils.BytesSize(float64(totalCacheCapacityGooseFS))
- }
- if strings.HasPrefix(str, ddctypes.SummaryPrefixUsedCapacity) {
- usedCacheCapacityGooseFS, _ := utils.FromHumanSize(strings.TrimPrefix(str, ddctypes.SummaryPrefixUsedCapacity))
- // Convert GooseFS's binary byte units to Fluid's binary byte units
- // e.g. 10KB -> 10KiB, 2GB -> 2GiB
- states.cached = utils.BytesSize(float64(usedCacheCapacityGooseFS))
- }
- }
-
- return states
-}
-
-// reportMetrics reports goosefs metrics
-func (e *GooseFSEngine) GetReportMetrics() (summary string, err error) {
- podName, containerName := e.getMasterPodInfo()
- fileUtils := operations.NewGooseFSFileUtils(podName, containerName, e.namespace, e.Log)
- return fileUtils.ReportMetrics()
-}
-
-// parse goosefs report metric to cacheHitStates
-func (e GooseFSEngine) ParseReportMetric(metrics string, cacheHitStates, lastCacheHitStates *cacheHitStates) {
- var localThroughput, remoteThroughput, ufsThroughput int64
-
- strs := strings.Split(metrics, "\n")
- for _, str := range strs {
- str = strings.TrimSpace(str)
- counterPattern := regexp.MustCompile(`\(Type:\sCOUNTER,\sValue:\s(.*)\)`)
- gaugePattern := regexp.MustCompile(`\(Type:\sGAUGE,\sValue:\s(.*)/MIN\)`)
- if strings.HasPrefix(str, MetricsPrefixBytesReadLocal) {
- cacheHitStates.bytesReadLocal, _ = utils.FromHumanSize(counterPattern.FindStringSubmatch(str)[1])
- continue
- }
-
- if strings.HasPrefix(str, MetricsPrefixBytesReadRemote) {
- cacheHitStates.bytesReadRemote, _ = utils.FromHumanSize(counterPattern.FindStringSubmatch(str)[1])
- continue
- }
-
- if strings.HasPrefix(str, MetricsPrefixBytesReadUfsAll) {
- cacheHitStates.bytesReadUfsAll, _ = utils.FromHumanSize(counterPattern.FindStringSubmatch(str)[1])
- continue
- }
-
- if strings.HasPrefix(str, MetricsPrefixBytesReadLocalThroughput) {
- localThroughput, _ = utils.FromHumanSize(gaugePattern.FindStringSubmatch(str)[1])
- continue
- }
-
- if strings.HasPrefix(str, MetricsPrefixBytesReadRemoteThroughput) {
- remoteThroughput, _ = utils.FromHumanSize(gaugePattern.FindStringSubmatch(str)[1])
- continue
- }
-
- if strings.HasPrefix(str, MetricsPrefixBytesReadUfsThroughput) {
- ufsThroughput, _ = utils.FromHumanSize(gaugePattern.FindStringSubmatch(str)[1])
- }
- }
-
- if lastCacheHitStates == nil {
- return
- }
-
- // Summarize local/remote cache hit ratio
- deltaReadLocal := cacheHitStates.bytesReadLocal - lastCacheHitStates.bytesReadLocal
- deltaReadRemote := cacheHitStates.bytesReadRemote - lastCacheHitStates.bytesReadRemote
- deltaReadUfsAll := cacheHitStates.bytesReadUfsAll - lastCacheHitStates.bytesReadUfsAll
- deltaReadTotal := deltaReadLocal + deltaReadRemote + deltaReadUfsAll
-
- if deltaReadTotal != 0 {
- cacheHitStates.localHitRatio = fmt.Sprintf("%.1f%%", float64(deltaReadLocal)*100.0/float64(deltaReadTotal))
- cacheHitStates.remoteHitRatio = fmt.Sprintf("%.1f%%", float64(deltaReadRemote)*100.0/float64(deltaReadTotal))
- cacheHitStates.cacheHitRatio = fmt.Sprintf("%.1f%%", float64(deltaReadLocal+deltaReadRemote)*100.0/float64(deltaReadTotal))
- } else {
- // No data is requested in last minute
- cacheHitStates.localHitRatio = "0.0%"
- cacheHitStates.remoteHitRatio = "0.0%"
- cacheHitStates.cacheHitRatio = "0.0%"
- }
-
- // Summarize local/remote throughput ratio
- totalThroughput := localThroughput + remoteThroughput + ufsThroughput
- if totalThroughput != 0 {
- cacheHitStates.localThroughputRatio = fmt.Sprintf("%.1f%%", float64(localThroughput)*100.0/float64(totalThroughput))
- cacheHitStates.remoteThroughputRatio = fmt.Sprintf("%.1f%%", float64(remoteThroughput)*100.0/float64(totalThroughput))
- cacheHitStates.cacheThroughputRatio = fmt.Sprintf("%.1f%%", float64(localThroughput+remoteThroughput)*100.0/float64(totalThroughput))
- } else {
- cacheHitStates.localThroughputRatio = "0.0%"
- cacheHitStates.remoteThroughputRatio = "0.0%"
- cacheHitStates.cacheThroughputRatio = "0.0%"
- }
-
-}
-
-// reportCapacity reports goosefs capacity
-func (e *GooseFSEngine) reportCapacity() (summary string, err error) {
- podName, containerName := e.getMasterPodInfo()
- fileUtils := operations.NewGooseFSFileUtils(podName, containerName, e.namespace, e.Log)
- return fileUtils.ReportCapacity()
-}
diff --git a/pkg/ddc/goosefs/report_test.go b/pkg/ddc/goosefs/report_test.go
deleted file mode 100644
index af0482f111b..00000000000
--- a/pkg/ddc/goosefs/report_test.go
+++ /dev/null
@@ -1,251 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "reflect"
- "testing"
-)
-
-func TestParseReportSummary(t *testing.T) {
- testCases := map[string]struct {
- summary string
- want cacheStates
- }{
- "test parseReportSummary case 1": {
- summary: mockGooseFSReportSummaryForParseReport(),
- want: cacheStates{
- cacheCapacity: "19.07MiB",
- cached: "9.69MiB",
- },
- },
- }
-
- for k, item := range testCases {
- got := GooseFSEngine{}.ParseReportSummary(item.summary)
- if !reflect.DeepEqual(item.want, got) {
- t.Errorf("%s check failure,want:%+v,got:%+v", k, item.want, got)
- }
- }
-}
-
-func TestParseReportMetric(t *testing.T) {
- testCases := map[string]struct {
- metrics string
- want cacheHitStates
- lastCacheHitStates *cacheHitStates
- }{
- "test ParseReportMetric case 1": {
- metrics: mockGooseFSReportMetricsForParseMetric(),
- lastCacheHitStates: nil,
- want: cacheHitStates{
- bytesReadLocal: 20310917,
- bytesReadUfsAll: 32243712,
- },
- },
- "test ParseReportMetric case 2": {
- metrics: mockGooseFSReportMetricsForParseMetric(),
- lastCacheHitStates: &cacheHitStates{
- bytesReadLocal: 10000,
- bytesReadUfsAll: 40000,
- },
- want: cacheHitStates{
- bytesReadLocal: 20310917,
- bytesReadUfsAll: 32243712,
- cacheHitRatio: "38.7%",
- localHitRatio: "38.7%",
- remoteHitRatio: "0.0%",
- localThroughputRatio: "38.7%",
- remoteThroughputRatio: "0.0%",
- cacheThroughputRatio: "38.7%",
- },
- },
- }
-
- for k, item := range testCases {
- e := &GooseFSEngine{}
- if item.lastCacheHitStates != nil {
- e.lastCacheHitStates = item.lastCacheHitStates
- got := cacheHitStates{}
- e.ParseReportMetric(item.metrics, &got, item.lastCacheHitStates)
-
- // skip timestamp check
- item.want.timestamp = got.timestamp
-
- if !reflect.DeepEqual(item.want, got) {
- t.Errorf("%s check failure,\n want:%+v \n,got:%+v", k, item.want, got)
- }
- } else {
- got := cacheHitStates{}
- e.ParseReportMetric(item.metrics, &got, item.lastCacheHitStates)
- if item.want.bytesReadLocal != got.bytesReadLocal {
- t.Errorf("%s bytesReadLocal check failure,want:%+v,got:%+v", k, item.want.bytesReadLocal, got.bytesReadLocal)
- }
- if item.want.bytesReadUfsAll != got.bytesReadUfsAll {
- t.Errorf("%s bytesReadUfsAll check failure,want:%+v,got:%+v", k, item.want.bytesReadUfsAll, got.bytesReadUfsAll)
- }
- }
-
- }
-}
-
-func mockGooseFSReportMetricsForParseMetric() string {
- return `Cluster.BytesReadGooseFS (Type: COUNTER, Value: 0B)
- Cluster.BytesReadGooseFSThroughput (Type: GAUGE, Value: 0B/MIN)
- Cluster.BytesReadDomain (Type: COUNTER, Value: 0B)
- Cluster.BytesReadDomainThroughput (Type: GAUGE, Value: 0B/MIN)
- Cluster.BytesReadLocal (Type: COUNTER, Value: 19.37MB)
- Cluster.BytesReadLocalThroughput (Type: GAUGE, Value: 495.97KB/MIN)
- Cluster.BytesReadPerUfs.UFS:s3:%2F%2Ffluid (Type: COUNTER, Value: 30.75MB)
- Cluster.BytesReadUfsAll (Type: COUNTER, Value: 30.75MB)
- Cluster.BytesReadUfsThroughput (Type: GAUGE, Value: 787.17KB/MIN)
- Cluster.BytesWrittenGooseFS (Type: COUNTER, Value: 0B)
- Cluster.BytesWrittenGooseFSThroughput (Type: GAUGE, Value: 0B/MIN)
- Cluster.BytesWrittenDomain (Type: COUNTER, Value: 0B)
- Cluster.BytesWrittenDomainThroughput (Type: GAUGE, Value: 0B/MIN)
- Cluster.BytesWrittenLocal (Type: COUNTER, Value: 0B)
- Cluster.BytesWrittenLocalThroughput (Type: GAUGE, Value: 0B/MIN)
- Cluster.BytesWrittenUfsAll (Type: COUNTER, Value: 0B)
- Cluster.BytesWrittenUfsThroughput (Type: GAUGE, Value: 0B/MIN)
- Cluster.CapacityFree (Type: GAUGE, Value: 9,842,601)
- Cluster.CapacityFreeTierHDD (Type: GAUGE, Value: 0)
- Cluster.CapacityFreeTierMEM (Type: GAUGE, Value: 9,842,601)
- Cluster.CapacityFreeTierSSD (Type: GAUGE, Value: 0)
- Cluster.CapacityTotal (Type: GAUGE, Value: 20,000,000)
- Cluster.CapacityTotalTierHDD (Type: GAUGE, Value: 0)
- Cluster.CapacityTotalTierMEM (Type: GAUGE, Value: 20,000,000)
- Cluster.CapacityTotalTierSSD (Type: GAUGE, Value: 0)
- Cluster.CapacityUsed (Type: GAUGE, Value: 10,157,399)
- Cluster.CapacityUsedTierHDD (Type: GAUGE, Value: 0)
- Cluster.CapacityUsedTierMEM (Type: GAUGE, Value: 10,157,399)
- Cluster.CapacityUsedTierSSD (Type: GAUGE, Value: 0)
- Cluster.RootUfsCapacityFree (Type: GAUGE, Value: -1)
- Cluster.RootUfsCapacityTotal (Type: GAUGE, Value: -1)
- Cluster.RootUfsCapacityUsed (Type: GAUGE, Value: -1)
- Cluster.Workers (Type: GAUGE, Value: 1)
- Master.CompleteFileOps (Type: COUNTER, Value: 0)
- Master.ConnectFromMaster.UFS:s3:%2F%2Ffluid.UFS_TYPE:s3 (Type: TIMER, Value: 0)
- Master.Create.UFS:%2Fjournal%2FBlockMaster.UFS_TYPE:local (Type: TIMER, Value: 1)
- Master.Create.UFS:%2Fjournal%2FFileSystemMaster.UFS_TYPE:local (Type: TIMER, Value: 1)
- Master.Create.UFS:%2Fjournal%2FMetaMaster.UFS_TYPE:local (Type: TIMER, Value: 1)
- Master.CreateDirectoryOps (Type: COUNTER, Value: 0)
- Master.CreateFileOps (Type: COUNTER, Value: 0)
- Master.DeletePathOps (Type: COUNTER, Value: 0)
- Master.DirectoriesCreated (Type: COUNTER, Value: 0)
- Master.EdgeCacheSize (Type: GAUGE, Value: 7)
- Master.Exists.UFS:s3:%2F%2Ffluid.UFS_TYPE:s3 (Type: TIMER, Value: 2)
- Master.FileBlockInfosGot (Type: COUNTER, Value: 0)
- Master.FileInfosGot (Type: COUNTER, Value: 25)
- Master.FilesCompleted (Type: COUNTER, Value: 7)
- Master.FilesCreated (Type: COUNTER, Value: 7)
- Master.FilesFreed (Type: COUNTER, Value: 0)
- Master.FilesPersisted (Type: COUNTER, Value: 0)
- Master.FilesPinned (Type: GAUGE, Value: 0)
- Master.FreeFileOps (Type: COUNTER, Value: 0)
- Master.GetAcl.UFS:s3:%2F%2Ffluid.UFS_TYPE:s3 (Type: TIMER, Value: 7)
- Master.GetBlockInfo.User:root (Type: TIMER, Value: 3)
- Master.GetBlockMasterInfo.User:root (Type: TIMER, Value: 173)
- Master.GetConfigHash.User:root (Type: TIMER, Value: 40)
- Master.GetFileBlockInfoOps (Type: COUNTER, Value: 0)
- Master.GetFileInfoOps (Type: COUNTER, Value: 9)
- Master.GetFileLocations.User:root.UFS:s3:%2F%2Ffluid.UFS_TYPE:s3 (Type: TIMER, Value: 24)
- Master.GetFingerprint.User:root.UFS:s3:%2F%2Ffluid.UFS_TYPE:s3 (Type: TIMER, Value: 1)
- Master.GetMountTable.User:root (Type: TIMER, Value: 2)
- Master.GetNewBlockOps (Type: COUNTER, Value: 0)
- Master.GetSpace.UFS:s3:%2F%2Ffluid.UFS_TYPE:s3 (Type: TIMER, Value: 18)
- Master.GetSpace.User:root.UFS:s3:%2F%2Ffluid.UFS_TYPE:s3 (Type: TIMER, Value: 103)
- Master.GetStatus.User:root (Type: TIMER, Value: 6)
- Master.GetStatus.User:root.UFS:s3:%2F%2Ffluid.UFS_TYPE:s3 (Type: TIMER, Value: 3)
- Master.GetStatusFailures.User:root.UFS:s3:%2F%2Ffluid.UFS_TYPE:s3 (Type: COUNTER, Value: 2)
- Master.GetWorkerInfoList.User:root (Type: TIMER, Value: 2)
- Master.InodeCacheSize (Type: GAUGE, Value: 8)
- Master.JournalFlushTimer (Type: TIMER, Value: 22)
- Master.LastBackupEntriesCount (Type: GAUGE, Value: -1)
- Master.LastBackupRestoreCount (Type: GAUGE, Value: -1)
- Master.LastBackupRestoreTimeMs (Type: GAUGE, Value: -1)
- Master.LastBackupTimeMs (Type: GAUGE, Value: -1)
- Master.ListStatus.UFS:%2Fjournal%2FBlockMaster.UFS_TYPE:local (Type: TIMER, Value: 63)
- Master.ListStatus.UFS:%2Fjournal%2FFileSystemMaster.UFS_TYPE:local (Type: TIMER, Value: 63)
- Master.ListStatus.UFS:%2Fjournal%2FMetaMaster.UFS_TYPE:local (Type: TIMER, Value: 63)
- Master.ListStatus.UFS:%2Fjournal%2FMetricsMaster.UFS_TYPE:local (Type: TIMER, Value: 63)
- Master.ListStatus.UFS:%2Fjournal%2FTableMaster.UFS_TYPE:local (Type: TIMER, Value: 63)
- Master.ListStatus.User:root (Type: TIMER, Value: 3)
- Master.ListStatus.User:root.UFS:s3:%2F%2Ffluid.UFS_TYPE:s3 (Type: TIMER, Value: 1)
- Master.ListingCacheSize (Type: GAUGE, Value: 8)
- Master.MountOps (Type: COUNTER, Value: 0)
- Master.NewBlocksGot (Type: COUNTER, Value: 0)
- Master.PathsDeleted (Type: COUNTER, Value: 0)
- Master.PathsMounted (Type: COUNTER, Value: 0)
- Master.PathsRenamed (Type: COUNTER, Value: 0)
- Master.PathsUnmounted (Type: COUNTER, Value: 0)
- Master.PerUfsOpConnectFromMaster.UFS:s3:%2F%2Ffluid (Type: GAUGE, Value: 0)
- Master.PerUfsOpCreate.UFS:%2Fjournal%2FBlockMaster (Type: GAUGE, Value: 1)
- Master.PerUfsOpCreate.UFS:%2Fjournal%2FFileSystemMaster (Type: GAUGE, Value: 1)
- Master.PerUfsOpCreate.UFS:%2Fjournal%2FMetaMaster (Type: GAUGE, Value: 1)
- Master.PerUfsOpExists.UFS:s3:%2F%2Ffluid (Type: GAUGE, Value: 2)
- Master.PerUfsOpGetFileLocations.UFS:s3:%2F%2Ffluid (Type: GAUGE, Value: 24)
- Master.PerUfsOpGetFingerprint.UFS:s3:%2F%2Ffluid (Type: GAUGE, Value: 1)
- Master.PerUfsOpGetSpace.UFS:s3:%2F%2Ffluid (Type: GAUGE, Value: 116)
- Master.PerUfsOpGetStatus.UFS:s3:%2F%2Ffluid (Type: GAUGE, Value: 3)
- Master.PerUfsOpListStatus.UFS:%2Fjournal%2FBlockMaster (Type: GAUGE, Value: 60)
- Master.PerUfsOpListStatus.UFS:%2Fjournal%2FFileSystemMaster (Type: GAUGE, Value: 60)
- Master.PerUfsOpListStatus.UFS:%2Fjournal%2FMetaMaster (Type: GAUGE, Value: 60)
- Master.PerUfsOpListStatus.UFS:%2Fjournal%2FMetricsMaster (Type: GAUGE, Value: 60)
- Master.PerUfsOpListStatus.UFS:%2Fjournal%2FTableMaster (Type: GAUGE, Value: 60)
- Master.PerUfsOpListStatus.UFS:s3:%2F%2Ffluid (Type: GAUGE, Value: 1)
- Master.RenamePathOps (Type: COUNTER, Value: 0)
- Master.SetAclOps (Type: COUNTER, Value: 0)
- Master.SetAttributeOps (Type: COUNTER, Value: 0)
- Master.TotalPaths (Type: GAUGE, Value: 8)
- Master.UfsSessionCount-Ufs:s3:%2F%2Ffluid (Type: COUNTER, Value: 0)
- Master.UnmountOps (Type: COUNTER, Value: 0)
- Master.blockHeartbeat.User:root (Type: TIMER, Value: 2,410)
- Master.commitBlock.User:root (Type: TIMER, Value: 1)
- Master.getConfigHash (Type: TIMER, Value: 4)
- Master.getConfigHash.User:root (Type: TIMER, Value: 239)
- Master.getConfiguration (Type: TIMER, Value: 20)
- Master.getConfiguration.User:root (Type: TIMER, Value: 428)
- Master.getMasterInfo.User:root (Type: TIMER, Value: 173)
- Master.getMetrics.User:root (Type: TIMER, Value: 33)
- Master.getPinnedFileIds.User:root (Type: TIMER, Value: 2,410)
- Master.getUfsInfo.User:root (Type: TIMER, Value: 1)
- Master.getWorkerId.User:root (Type: TIMER, Value: 1)
- Master.metricsHeartbeat.User:root (Type: TIMER, Value: 4)
- Master.registerWorker.User:root (Type: TIMER, Value: 1)`
-}
-
-func mockGooseFSReportSummaryForParseReport() string {
- summary := `GooseFS cluster summary:
- Master Address: 172.18.0.2:20000
- Web Port: 20001
- Rpc Port: 20000
- Started: 07-02-2021 11:15:25:107
- Uptime: 0 day(s), 1 hour(s), 3 minute(s), and 35 second(s)
- Version: 2.3.1-SNAPSHOT
- Safe Mode: false
- Zookeeper Enabled: false
- Live Workers: 1
- Lost Workers: 0
- Total Capacity: 19.07MB
- Tier: MEM Size: 19.07MB
- Used Capacity: 9.69MB
- Tier: MEM Size: 9.69MB
- Free Capacity: 9.39MB
- `
-
- return summary
-}
diff --git a/pkg/ddc/goosefs/runtime_info.go b/pkg/ddc/goosefs/runtime_info.go
deleted file mode 100644
index 03ee4dbe605..00000000000
--- a/pkg/ddc/goosefs/runtime_info.go
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "github.com/fluid-cloudnative/fluid/pkg/ddc/base"
- "github.com/fluid-cloudnative/fluid/pkg/utils"
- "github.com/fluid-cloudnative/fluid/pkg/utils/testutil"
-)
-
-// getRuntimeInfo gets runtime info
-func (e *GooseFSEngine) getRuntimeInfo() (base.RuntimeInfoInterface, error) {
- if e.runtimeInfo == nil {
- runtime, err := e.getRuntime()
- if err != nil {
- return e.runtimeInfo, err
- }
- opts := []base.RuntimeInfoOption{
- base.WithTieredStore(runtime.Spec.TieredStore),
- base.WithMetadataList(base.GetMetadataListFromAnnotation(runtime)),
- base.WithAnnotations(runtime.Annotations),
- }
-
- e.runtimeInfo, err = base.BuildRuntimeInfo(e.name, e.namespace, e.runtimeType, opts...)
- if err != nil {
- return e.runtimeInfo, err
- }
-
- // Setup Fuse Deploy Mode
- e.runtimeInfo.SetFuseNodeSelector(runtime.Spec.Fuse.NodeSelector)
- }
-
- if testutil.IsUnitTest() {
- return e.runtimeInfo, nil
- }
-
- // Handling information of bound dataset. XXXEngine.getRuntimeInfo() might be called before the runtime is bound to a dataset,
- // so here we must lazily set dataset-related information once we found there's one bound dataset.
- if len(e.runtimeInfo.GetOwnerDatasetUID()) == 0 {
- runtime, err := e.getRuntime()
- if err != nil {
- return e.runtimeInfo, err
- }
-
- uid, err := base.GetOwnerDatasetUIDFromRuntimeMeta(runtime.ObjectMeta)
- if err != nil {
- return nil, err
- }
-
- if len(uid) > 0 {
- e.runtimeInfo.SetOwnerDatasetUID(uid)
- }
- }
-
- if !e.runtimeInfo.IsPlacementModeSet() {
- dataset, err := utils.GetDataset(e.Client, e.name, e.namespace)
- if utils.IgnoreNotFound(err) != nil {
- return nil, err
- }
-
- if dataset != nil {
- e.runtimeInfo.SetupWithDataset(dataset)
- }
- }
-
- return e.runtimeInfo, nil
-}
diff --git a/pkg/ddc/goosefs/runtime_info_test.go b/pkg/ddc/goosefs/runtime_info_test.go
deleted file mode 100644
index 643e505f2e5..00000000000
--- a/pkg/ddc/goosefs/runtime_info_test.go
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
-Copyright 2021 The Fluid Authors.
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-package goosefs
-
-import (
- . "github.com/onsi/ginkgo/v2"
- . "github.com/onsi/gomega"
-
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/common"
- "github.com/fluid-cloudnative/fluid/pkg/ddc/base"
- "github.com/fluid-cloudnative/fluid/pkg/utils/fake"
- v1 "k8s.io/api/apps/v1"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "sigs.k8s.io/controller-runtime/pkg/client"
-)
-
-func newGooseEngineRT(c client.Client, name string, namespace string, withRuntimeInfo bool, unittest bool) *GooseFSEngine {
- runTimeInfo, _ := base.BuildRuntimeInfo(name, namespace, common.GooseFSRuntime)
- engine := &GooseFSEngine{
- runtime: &datav1alpha1.GooseFSRuntime{},
- name: name,
- namespace: namespace,
- Client: c,
- runtimeInfo: nil,
- UnitTest: unittest,
- Log: fake.NullLogger(),
- }
-
- if withRuntimeInfo {
- engine.runtimeInfo = runTimeInfo
- }
- return engine
-}
-
-var _ = Describe("GetRuntimeInfo", func() {
- var fakeClient client.Client
-
- BeforeEach(func() {
- runtimeInputs := []*datav1alpha1.GooseFSRuntime{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase",
- Namespace: "fluid",
- },
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- Fuse: datav1alpha1.GooseFSFuseSpec{},
- },
- },
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hadoop",
- Namespace: "fluid",
- },
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- Fuse: datav1alpha1.GooseFSFuseSpec{},
- },
- },
- }
- daemonSetInputs := []*v1.DaemonSet{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase-worker",
- Namespace: "fluid",
- },
- Spec: v1.DaemonSetSpec{
- Template: corev1.PodTemplateSpec{
- Spec: corev1.PodSpec{NodeSelector: map[string]string{"data.fluid.io/storage-fluid-hbase": "selector"}},
- },
- },
- },
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hadoop-worker",
- Namespace: "fluid",
- },
- Spec: v1.DaemonSetSpec{
- Template: corev1.PodTemplateSpec{
- Spec: corev1.PodSpec{NodeSelector: map[string]string{"data.fluid.io/storage-fluid-hadoop": "selector"}},
- },
- },
- },
- }
- dataSetInputs := []*datav1alpha1.Dataset{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "hadoop",
- Namespace: "fluid",
- },
- },
- }
- objs := []runtime.Object{}
- for _, runtimeInput := range runtimeInputs {
- objs = append(objs, runtimeInput.DeepCopy())
- }
- for _, daemonSetInput := range daemonSetInputs {
- objs = append(objs, daemonSetInput.DeepCopy())
- }
- for _, dataSetInput := range dataSetInputs {
- objs = append(objs, dataSetInput.DeepCopy())
- }
- fakeClient = fake.NewFakeClientWithScheme(testScheme, objs...)
- })
-
- type testCase struct {
- name string
- namespace string
- withRuntimeInfo bool
- unittest bool
- expectErr bool
- expectNil bool
- }
-
- DescribeTable("should get runtime info correctly",
- func(tc testCase) {
- engine := newGooseEngineRT(fakeClient, tc.name, tc.namespace, tc.withRuntimeInfo, tc.unittest)
- runtimeInfo, err := engine.getRuntimeInfo()
-
- if tc.expectErr {
- Expect(err).To(HaveOccurred())
- } else {
- Expect(err).NotTo(HaveOccurred())
- }
-
- if tc.expectNil {
- Expect(runtimeInfo).To(BeNil())
- } else {
- Expect(runtimeInfo).NotTo(BeNil())
- }
- },
- Entry("hbase without runtimeInfo, not unittest",
- testCase{
- name: "hbase",
- namespace: "fluid",
- withRuntimeInfo: false,
- unittest: false,
- expectErr: false,
- expectNil: false,
- },
- ),
- Entry("hbase without runtimeInfo, unittest",
- testCase{
- name: "hbase",
- namespace: "fluid",
- withRuntimeInfo: false,
- unittest: true,
- expectErr: false,
- expectNil: false,
- },
- ),
- Entry("hbase with runtimeInfo",
- testCase{
- name: "hbase",
- namespace: "fluid",
- withRuntimeInfo: true,
- unittest: false,
- expectErr: false,
- expectNil: false,
- },
- ),
- Entry("hadoop without runtimeInfo",
- testCase{
- name: "hadoop",
- namespace: "fluid",
- withRuntimeInfo: false,
- unittest: false,
- expectErr: false,
- expectNil: false,
- },
- ),
- )
-})
diff --git a/pkg/ddc/goosefs/shutdown.go b/pkg/ddc/goosefs/shutdown.go
deleted file mode 100644
index 0ae96b864d4..00000000000
--- a/pkg/ddc/goosefs/shutdown.go
+++ /dev/null
@@ -1,212 +0,0 @@
-/*
-Copyright 2023 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "fmt"
- "strings"
- "time"
-
- "github.com/fluid-cloudnative/fluid/pkg/ddc/base/portallocator"
- "github.com/fluid-cloudnative/fluid/pkg/utils"
- "github.com/pkg/errors"
-
- "github.com/fluid-cloudnative/fluid/pkg/utils/dataset/lifecycle"
- "github.com/fluid-cloudnative/fluid/pkg/utils/helm"
- "github.com/fluid-cloudnative/fluid/pkg/utils/kubeclient"
-)
-
-// shut down the GooseFS engine
-func (e *GooseFSEngine) Shutdown() (err error) {
- if e.retryShutdown < e.gracefulShutdownLimits {
- err = e.cleanupCache()
- if err != nil {
- e.retryShutdown = e.retryShutdown + 1
- e.Log.Info("clean cache failed",
- // "engine", e,
- "retry times", e.retryShutdown)
- return
- }
- }
-
- if e.MetadataSyncDoneCh != nil {
- close(e.MetadataSyncDoneCh)
- }
-
- err = e.destroyWorkers()
- if err != nil {
- return
- }
-
- err = e.destroyMaster()
- if err != nil {
- return
- }
-
- err = e.releasePorts()
- if err != nil {
- return
- }
-
- err = e.cleanAll()
- return err
-}
-
-// destroyMaster Destroies the master
-func (e *GooseFSEngine) destroyMaster() (err error) {
- var found bool
- found, err = helm.CheckRelease(e.name, e.namespace)
- if err != nil {
- return err
- }
-
- if found {
- err = helm.DeleteRelease(e.name, e.namespace)
- if err != nil {
- return
- }
- }
- return
-}
-
-// // Destroy the workers
-// func (e *GooseFSEngine) destroyWorkers() error {
-// return nil
-// }
-
-// cleanupCache cleans up the cache
-func (e *GooseFSEngine) cleanupCache() (err error) {
- // TODO(chrisydxie): clean up the cache
- cacheStates, err := e.queryCacheStatus()
- if cacheStates.cached == "" {
- return
- }
-
- e.Log.Info("The cache before cleanup",
- "cached", cacheStates.cached,
- "cachedPercentage", cacheStates.cachedPercentage)
-
- cached, err := utils.FromHumanSize(cacheStates.cached)
- if err != nil {
- return err
- }
-
- if cached == 0 {
- e.Log.Info("No need to clean cache",
- "cached", cacheStates.cached,
- "cachedPercentage", cacheStates.cachedPercentage)
- return nil
- }
-
- err = e.invokeCleanCache("/")
- if err != nil {
- if strings.Contains(err.Error(), "not found") {
- return nil
- } else if strings.Contains(err.Error(), "does not have a host assigned") {
- return nil
- }
- return err
- } else {
- e.Log.Info("Clean up the cache successfully")
- }
-
- time.Sleep(time.Duration(10 * time.Second))
-
- // ufs, cached, cachedPercentage, err = e.du()
- // if err != nil {
- // return
- // }
-
- // e.Log.Info("The cache after cleanup", "ufs", ufs,
- // "cached", cached,
- // "cachedPercentage", cachedPercentage)
-
- // if cached > 0 {
- // return fmt.Errorf("The remaining cached is not cleaned up, it still has %d", cached)
- // }
-
- return fmt.Errorf("the remaining cached is not cleaned up, check again")
-}
-
-func (e *GooseFSEngine) releasePorts() (err error) {
- var valueConfigMapName = e.getHelmValuesConfigMapName()
-
- allocator, err := portallocator.GetRuntimePortAllocator()
- if err != nil {
- return errors.Wrap(err, "GetRuntimePortAllocator when releasePorts")
- }
-
- cm, err := kubeclient.GetConfigmapByName(e.Client, valueConfigMapName, e.namespace)
- if err != nil {
- return errors.Wrap(err, "GetConfigmapByName when releasePorts")
- }
-
- // The value configMap is not found
- if cm == nil {
- e.Log.Info("value configMap not found, there might be some unreleased ports", "valueConfigMapName", valueConfigMapName)
- return nil
- }
-
- portsToRelease, err := parsePortsFromConfigMap(cm)
- if err != nil {
- return errors.Wrap(err, "parsePortsFromConfigMap when releasePorts")
- }
-
- allocator.ReleaseReservedPorts(portsToRelease)
- return nil
-}
-
-// cleanAll cleans up the all
-func (e *GooseFSEngine) cleanAll() (err error) {
- count, err := e.Helper.CleanUpFuse()
- if err != nil {
- e.Log.Error(err, "Err in cleaning fuse")
- return err
- }
- e.Log.Info("clean up fuse count", "n", count)
-
- var (
- valueConfigmapName = e.getHelmValuesConfigMapName()
- configmapName = e.name + "-config"
- namespace = e.namespace
- )
-
- cms := []string{valueConfigmapName, configmapName}
-
- for _, cm := range cms {
- err = kubeclient.DeleteConfigMap(e.Client, cm, namespace)
- if err != nil {
- return
- }
- }
-
- return nil
-}
-
-// destroyWorkers tears down the GooseFS worker pods associated with this engine,
-// delegating the actual teardown logic to Helper.TearDownWorkers.
-func (e *GooseFSEngine) destroyWorkers() (err error) {
- // SchedulerMutex only for patch mode
- lifecycle.SchedulerMutex.Lock()
- defer lifecycle.SchedulerMutex.Unlock()
- runtimeInfo, err := e.getRuntimeInfo()
- if err != nil {
- return err
- }
-
- return e.Helper.TearDownWorkers(runtimeInfo)
-}
diff --git a/pkg/ddc/goosefs/shutdown_test.go b/pkg/ddc/goosefs/shutdown_test.go
deleted file mode 100644
index c7faff83f27..00000000000
--- a/pkg/ddc/goosefs/shutdown_test.go
+++ /dev/null
@@ -1,593 +0,0 @@
-/*
-Copyright 2023 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "reflect"
- "testing"
-
- . "github.com/agiledragon/gomonkey/v2"
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/common"
- "github.com/fluid-cloudnative/fluid/pkg/ctrl"
- "github.com/fluid-cloudnative/fluid/pkg/ddc/base"
- "github.com/fluid-cloudnative/fluid/pkg/ddc/base/portallocator"
- "github.com/fluid-cloudnative/fluid/pkg/utils"
- "github.com/fluid-cloudnative/fluid/pkg/utils/fake"
- "github.com/fluid-cloudnative/fluid/pkg/utils/helm"
- "github.com/fluid-cloudnative/fluid/pkg/utils/kubeclient"
- "github.com/go-logr/logr"
- appsv1 "k8s.io/api/apps/v1"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/util/net"
- "sigs.k8s.io/controller-runtime/pkg/client"
-)
-
-var (
- testScheme *runtime.Scheme
- mockConfigMapData = `----
-fullnameOverride: mnist
-image: ccr.ccs.tencentyun.com/qcloud/goosefs
-imageTag: v1.2.0
-imagePullPolicy: IfNotPresent
-user: 0
-group: 0
-fsGroup: 0
-properties:
- goosefs.fuse.cached.paths.max: "1000000"
- goosefs.fuse.debug.enabled: "true"
- goosefs.fuse.jnifuse.enabled: "true"
- goosefs.fuse.logging.threshold: 1000ms
- goosefs.fuse.user.group.translation.enabled: "true"
- goosefs.job.master.finished.job.retention.time: 30sec
- goosefs.job.master.rpc.port: "28362"
- goosefs.job.master.web.port: "31380"
- goosefs.job.worker.data.port: "30918"
- goosefs.job.worker.rpc.port: "29476"
- goosefs.job.worker.threadpool.size: "164"
- goosefs.job.worker.web.port: "27403"
- goosefs.master.journal.folder: /journal
- goosefs.master.journal.log.size.bytes.max: 500MB
- goosefs.master.journal.type: UFS
- goosefs.master.metadata.sync.concurrency.level: "128"
- goosefs.master.metadata.sync.executor.pool.size: "128"
- goosefs.master.metadata.sync.ufs.prefetch.pool.size: "128"
- goosefs.master.metastore: ROCKS
- goosefs.master.metastore.inode.cache.max.size: "10000000"
- goosefs.master.mount.table.root.ufs: /underFSStorage
- goosefs.master.rpc.executor.core.pool.size: "128"
- goosefs.master.rpc.executor.max.pool.size: "1024"
- goosefs.master.rpc.port: "30399"
- goosefs.master.security.impersonation.root.groups: '*'
- goosefs.master.security.impersonation.root.users: '*'
- goosefs.master.web.port: "31203"
- goosefs.security.authorization.permission.enabled: "false"
- goosefs.security.stale.channel.purge.interval: 365d
- goosefs.underfs.object.store.breadcrumbs.enabled: "false"
- goosefs.user.block.avoid.eviction.policy.reserved.size.bytes: 2GB
- goosefs.user.block.master.client.pool.gc.threshold: 2day
- goosefs.user.block.master.client.threads: "1024"
- goosefs.user.block.size.bytes.default: 16MB
- goosefs.user.block.worker.client.pool.min: "512"
- goosefs.user.block.write.location.policy.class: com.qcloud.cos.goosefs.client.block.policy.LocalFirstAvoidEvictionPolicy
- goosefs.user.client.cache.enabled: "false"
- goosefs.user.file.create.ttl.action: FREE
- goosefs.user.file.master.client.threads: "1024"
- goosefs.user.file.passive.cache.enabled: "false"
- goosefs.user.file.readtype.default: CACHE
- goosefs.user.file.replication.max: "1"
- goosefs.user.file.writetype.default: CACHE_THROUGH
- goosefs.user.local.reader.chunk.size.bytes: 32MB
- goosefs.user.logging.threshold: 1000ms
- goosefs.user.metadata.cache.enabled: "true"
- goosefs.user.metadata.cache.expiration.time: 2day
- goosefs.user.metadata.cache.max.size: "6000000"
- goosefs.user.metrics.collection.enabled: "true"
- goosefs.user.streaming.reader.chunk.size.bytes: 32MB
- goosefs.user.ufs.block.read.location.policy: com.qcloud.cos.goosefs.client.block.policy.LocalFirstAvoidEvictionPolicy
- goosefs.user.update.file.accesstime.disabled: "true"
- goosefs.user.worker.list.refresh.interval: 2min
- goosefs.web.ui.enabled: "false"
- goosefs.worker.allocator.class: com.qcloud.cos.goosefs.worker.block.allocator.MaxFreeAllocator
- goosefs.worker.block.master.client.pool.size: "1024"
- goosefs.worker.file.buffer.size: 1MB
- goosefs.worker.network.reader.buffer.size: 32MB
- goosefs.worker.rpc.port: "31285"
- goosefs.worker.web.port: "31674"
- log4j.logger.alluxio.fuse: DEBUG
- log4j.logger.com.qcloud.cos.goosefs.fuse: DEBUG
-master:
- jvmOptions:
- - -Xmx16G
- - -XX:+UnlockExperimentalVMOptions
- env:
- GOOSEFS_WORKER_TIEREDSTORE_LEVEL0_DIRS_PATH: /dev/shm/yijiupi/mnist
- affinity:
- nodeAffinity: null
- replicaCount: 1
- hostNetwork: true
- ports:
- rpc: 30399
- web: 31203
- backupPath: /tmp/goosefs-backup/yijiupi/mnist
-jobMaster:
- ports:
- rpc: 28362
- web: 31380
-worker:
- jvmOptions:
- - -Xmx12G
- - -XX:+UnlockExperimentalVMOptions
- - -XX:MaxDirectMemorySize=32g
- env:
- GOOSEFS_WORKER_TIEREDSTORE_LEVEL0_DIRS_PATH: /dev/shm/yijiupi/mnist
- hostNetwork: true
- ports:
- rpc: 31285
- web: 31674
-jobWorker:
- ports:
- rpc: 29476
- web: 27403
- data: 30918
-fuse:
- image: ccr.ccs.tencentyun.com/qcloud/goosefs-fuse
- nodeSelector:
- fluid.io/f-yijiupi-mnist: "true"
- imageTag: v1.2.0
- env:
- MOUNT_POINT: /runtime-mnt/goosefs/yijiupi/mnist/goosefs-fuse
- jvmOptions:
- - -Xmx16G
- - -Xms16G
- - -XX:+UseG1GC
- - -XX:MaxDirectMemorySize=32g
- - -XX:+UnlockExperimentalVMOptions
- mountPath: /runtime-mnt/goosefs/yijiupi/mnist/goosefs-fuse
- args:
- - fuse
- - --fuse-opts=rw,allow_other
- hostNetwork: true
- enabled: true
- criticalPod: true
-tieredstore:
- levels:
- - alias: MEM
- level: 0
- mediumtype: MEM
- type: hostPath
- path: /dev/shm/yijiupi/mnist
- quota: 1953125KB
- high: "0.8"
- low: "0.7"
-journal:
- volumeType: emptyDir
- size: 30Gi
-shortCircuit:
- enable: true
- policy: local
- volumeType: emptyDir
-initUsers:
- image: fluidcloudnative/init-users
- imageTag: v0.7.0-1cf2443
- imagePullPolicy: IfNotPresent
- envUsers: ""
- dir: ""
- envTieredPaths: ""
-monitoring: goosefs_runtime_metrics
-placement: Exclusive`
-)
-
-func init() {
- testScheme = runtime.NewScheme()
- _ = corev1.AddToScheme(testScheme)
- _ = datav1alpha1.AddToScheme(testScheme)
- _ = appsv1.AddToScheme(testScheme)
-}
-
-func TestDestroyWorker(t *testing.T) {
- // runtimeInfoSpark tests destroy Worker in exclusive mode.
- runtimeInfoSpark, err := base.BuildRuntimeInfo("spark", "fluid", common.GooseFSRuntime)
- if err != nil {
- t.Errorf("fail to create the runtimeInfo with error %v", err)
- }
- runtimeInfoSpark.SetupWithDataset(&datav1alpha1.Dataset{
- Spec: datav1alpha1.DatasetSpec{PlacementMode: datav1alpha1.ExclusiveMode},
- })
-
- // runtimeInfoSpark tests destroy Worker in shareMode mode.
- runtimeInfoHadoop, err := base.BuildRuntimeInfo("hadoop", "fluid", common.GooseFSRuntime)
- if err != nil {
- t.Errorf("fail to create the runtimeInfo with error %v", err)
- }
- runtimeInfoHadoop.SetupWithDataset(&datav1alpha1.Dataset{
- Spec: datav1alpha1.DatasetSpec{PlacementMode: datav1alpha1.ShareMode},
- })
- nodeSelector := map[string]string{
- "node-select": "true",
- }
- runtimeInfoHadoop.SetFuseNodeSelector(nodeSelector)
-
- var nodeInputs = []*corev1.Node{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "test-node-spark",
- Labels: map[string]string{
- "fluid.io/dataset-num": "1",
- "fluid.io/s-goosefs-fluid-spark": "true",
- "fluid.io/s-fluid-spark": "true",
- "fluid.io/s-h-goosefs-d-fluid-spark": "5B",
- "fluid.io/s-h-goosefs-m-fluid-spark": "1B",
- "fluid.io/s-h-goosefs-t-fluid-spark": "6B",
- "fluid_exclusive": "fluid_spark",
- },
- },
- },
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "test-node-share",
- Labels: map[string]string{
- "fluid.io/dataset-num": "2",
- "fluid.io/s-goosefs-fluid-hadoop": "true",
- "fluid.io/s-fluid-hadoop": "true",
- "fluid.io/s-h-goosefs-d-fluid-hadoop": "5B",
- "fluid.io/s-h-goosefs-m-fluid-hadoop": "1B",
- "fluid.io/s-h-goosefs-t-fluid-hadoop": "6B",
- "fluid.io/s-goosefs-fluid-hbase": "true",
- "fluid.io/s-fluid-hbase": "true",
- "fluid.io/s-h-goosefs-d-fluid-hbase": "5B",
- "fluid.io/s-h-goosefs-m-fluid-hbase": "1B",
- "fluid.io/s-h-goosefs-t-fluid-hbase": "6B",
- },
- },
- },
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "test-node-hadoop",
- Labels: map[string]string{
- "fluid.io/dataset-num": "1",
- "fluid.io/s-goosefs-fluid-hadoop": "true",
- "fluid.io/s-fluid-hadoop": "true",
- "fluid.io/s-h-goosefs-d-fluid-hadoop": "5B",
- "fluid.io/s-h-goosefs-m-fluid-hadoop": "1B",
- "fluid.io/s-h-goosefs-t-fluid-hadoop": "6B",
- "node-select": "true",
- },
- },
- },
- }
-
- testNodes := []runtime.Object{}
- for _, nodeInput := range nodeInputs {
- testNodes = append(testNodes, nodeInput.DeepCopy())
- }
-
- client := fake.NewFakeClientWithScheme(testScheme, testNodes...)
-
- var testCase = []struct {
- expectedWorkers int32
- runtimeInfo base.RuntimeInfoInterface
- wantedNodeNumber int32
- wantedNodeLabels map[string]map[string]string
- }{
- {
- expectedWorkers: -1,
- runtimeInfo: runtimeInfoSpark,
- wantedNodeNumber: 0,
- wantedNodeLabels: map[string]map[string]string{
- "test-node-spark": {},
- "test-node-share": {
- "fluid.io/dataset-num": "2",
- "fluid.io/s-goosefs-fluid-hadoop": "true",
- "fluid.io/s-fluid-hadoop": "true",
- "fluid.io/s-h-goosefs-d-fluid-hadoop": "5B",
- "fluid.io/s-h-goosefs-m-fluid-hadoop": "1B",
- "fluid.io/s-h-goosefs-t-fluid-hadoop": "6B",
- "fluid.io/s-goosefs-fluid-hbase": "true",
- "fluid.io/s-fluid-hbase": "true",
- "fluid.io/s-h-goosefs-d-fluid-hbase": "5B",
- "fluid.io/s-h-goosefs-m-fluid-hbase": "1B",
- "fluid.io/s-h-goosefs-t-fluid-hbase": "6B",
- },
- "test-node-hadoop": {
- "fluid.io/dataset-num": "1",
- "fluid.io/s-goosefs-fluid-hadoop": "true",
- "fluid.io/s-fluid-hadoop": "true",
- "fluid.io/s-h-goosefs-d-fluid-hadoop": "5B",
- "fluid.io/s-h-goosefs-m-fluid-hadoop": "1B",
- "fluid.io/s-h-goosefs-t-fluid-hadoop": "6B",
- "node-select": "true",
- },
- },
- },
- {
- expectedWorkers: -1,
- runtimeInfo: runtimeInfoHadoop,
- wantedNodeNumber: 0,
- wantedNodeLabels: map[string]map[string]string{
- "test-node-spark": {},
- "test-node-share": {
- "fluid.io/dataset-num": "1",
- "fluid.io/s-goosefs-fluid-hbase": "true",
- "fluid.io/s-fluid-hbase": "true",
- "fluid.io/s-h-goosefs-d-fluid-hbase": "5B",
- "fluid.io/s-h-goosefs-m-fluid-hbase": "1B",
- "fluid.io/s-h-goosefs-t-fluid-hbase": "6B",
- },
- "test-node-hadoop": {
- "node-select": "true",
- },
- },
- },
- }
- for _, test := range testCase {
- engine := &GooseFSEngine{Log: fake.NullLogger(), runtimeInfo: test.runtimeInfo}
- engine.Client = client
- engine.Helper = ctrl.BuildHelper(test.runtimeInfo, client, engine.Log)
- engine.name = test.runtimeInfo.GetName()
- engine.namespace = test.runtimeInfo.GetNamespace()
- if err != nil {
- t.Errorf("fail to exec the function with the error %v", err)
- }
- err := engine.destroyWorkers()
- if err != nil {
- t.Errorf("fail to exec the function with the error %v", err)
- }
- for _, node := range nodeInputs {
- newNode, err := kubeclient.GetNode(client, node.Name)
- if err != nil {
- t.Errorf("fail to get the node with the error %v", err)
- }
-
- if len(newNode.Labels) != len(test.wantedNodeLabels[node.Name]) {
- t.Errorf("fail to decrease the labels")
- }
- if len(newNode.Labels) != 0 && !reflect.DeepEqual(newNode.Labels, test.wantedNodeLabels[node.Name]) {
- t.Errorf("fail to decrease the labels")
- }
- }
-
- }
-}
-
-func TestGooseFSEngineCleanAll(t *testing.T) {
- type fields struct {
- name string
- namespace string
- cm *corev1.ConfigMap
- runtimeType string
- log logr.Logger
- }
- tests := []struct {
- name string
- fields fields
- wantErr bool
- }{
- {
- name: "spark",
- fields: fields{
- name: "spark",
- namespace: "fluid",
- runtimeType: "goosefs",
- cm: &corev1.ConfigMap{
- ObjectMeta: metav1.ObjectMeta{
- Name: "spark-goosefs-values",
- Namespace: "fluid",
- },
- Data: map[string]string{"data": mockConfigMapData},
- },
- log: fake.NullLogger(),
- },
- wantErr: false,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- testObjs := []runtime.Object{}
- testObjs = append(testObjs, tt.fields.cm.DeepCopy())
- client := fake.NewFakeClientWithScheme(testScheme, testObjs...)
- helper := &ctrl.Helper{}
- patch1 := ApplyMethod(reflect.TypeOf(helper), "CleanUpFuse", func(_ *ctrl.Helper) (int, error) {
- return 0, nil
- })
- defer patch1.Reset()
- e := &GooseFSEngine{
- name: tt.fields.name,
- namespace: tt.fields.namespace,
- Client: client,
- Log: tt.fields.log,
- }
- if err := e.cleanAll(); (err != nil) != tt.wantErr {
- t.Errorf("GooseFSEngine.cleanAll() error = %v, wantErr %v", err, tt.wantErr)
- }
- })
- }
-}
-
-func TestGooseFSEngineReleasePorts(t *testing.T) {
- type fields struct {
- runtime *datav1alpha1.GooseFSRuntime
- name string
- namespace string
- runtimeType string
- cm *corev1.ConfigMap
- }
- tests := []struct {
- name string
- fields fields
- wantErr bool
- }{
- {
- name: "spark",
- fields: fields{
- name: "spark",
- namespace: "fluid",
- runtimeType: "goosefs",
- cm: &corev1.ConfigMap{
- ObjectMeta: metav1.ObjectMeta{
- Name: "spark-goosefs-values",
- Namespace: "fluid",
- },
- Data: map[string]string{"data": mockConfigMapData},
- },
- },
- wantErr: false,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- portRange := "26000-32000"
- pr, _ := net.ParsePortRange(portRange)
- testObjs := []runtime.Object{}
- testObjs = append(testObjs, tt.fields.cm.DeepCopy())
- client := fake.NewFakeClientWithScheme(testScheme, testObjs...)
-
- e := &GooseFSEngine{
- runtime: tt.fields.runtime,
- name: tt.fields.name,
- namespace: tt.fields.namespace,
- Client: client,
- Log: fake.NullLogger(),
- }
-
- err := portallocator.SetupRuntimePortAllocator(client, pr, "bitmap", GetReservedPorts)
- if err != nil {
- t.Fatalf("Failed to set up runtime port allocator due to %v", err)
- }
- allocator, _ := portallocator.GetRuntimePortAllocator()
- patch1 := ApplyMethod(reflect.TypeOf(allocator), "ReleaseReservedPorts",
- func(_ *portallocator.RuntimePortAllocator, ports []int) {
- })
- defer patch1.Reset()
-
- if err := e.releasePorts(); (err != nil) != tt.wantErr {
- t.Errorf("GooseFSEngine.releasePorts() error = %v, wantErr %v", err, tt.wantErr)
- }
- })
- }
-}
-
-func TestGooseFSEngineCleanupCache(t *testing.T) {
- type fields struct {
- name string
- namespace string
- Log logr.Logger
- }
- tests := []struct {
- name string
- fields fields
- wantErr bool
- }{
- {
- name: "spark",
- fields: fields{
- name: "spark",
- namespace: "field",
- Log: fake.NullLogger(),
- },
- wantErr: false,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- engine := &GooseFSEngine{
- name: tt.fields.name,
- namespace: tt.fields.namespace,
- Log: tt.fields.Log,
- }
-
- patch1 := ApplyMethod(reflect.TypeOf(engine), "GetReportSummary",
- func(_ *GooseFSEngine) (string, error) {
- summary := mockGooseFSReportSummary()
- return summary, nil
- })
- defer patch1.Reset()
-
- patch2 := ApplyFunc(utils.GetDataset,
- func(_ client.Reader, _ string, _ string) (*datav1alpha1.Dataset, error) {
- d := &datav1alpha1.Dataset{
- Status: datav1alpha1.DatasetStatus{
- UfsTotal: "19.07MiB",
- },
- }
- return d, nil
- })
- defer patch2.Reset()
-
- patch3 := ApplyMethod(reflect.TypeOf(engine), "GetCacheHitStates",
- func(_ *GooseFSEngine) cacheHitStates {
- return cacheHitStates{
- bytesReadLocal: 20310917,
- bytesReadUfsAll: 32243712,
- }
- })
- defer patch3.Reset()
-
- if err := engine.cleanupCache(); (err != nil) != tt.wantErr {
- t.Errorf("GooseFSEngine.cleanupCache() error = %v, wantErr %v", err, tt.wantErr)
- }
- })
- }
-}
-
-func TestGooseFSEngineDestroyMaster(t *testing.T) {
- type fields struct {
- name string
- namespace string
- }
- tests := []struct {
- name string
- fields fields
- wantErr bool
- }{
- {
- name: "spark",
- fields: fields{
- name: "spark",
- namespace: "fluid",
- },
- wantErr: false,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- e := &GooseFSEngine{
- name: tt.fields.name,
- namespace: tt.fields.namespace,
- }
-
- patch1 := ApplyFunc(helm.CheckRelease,
- func(_ string, _ string) (bool, error) {
- d := true
- return d, nil
- })
- defer patch1.Reset()
-
- patch2 := ApplyFunc(helm.DeleteRelease,
- func(_ string, _ string) error {
- return nil
- })
- defer patch2.Reset()
-
- if err := e.destroyMaster(); (err != nil) != tt.wantErr {
- t.Errorf("GooseFSEngine.destroyMaster() error = %v, wantErr %v", err, tt.wantErr)
- }
- })
- }
-}
diff --git a/pkg/ddc/goosefs/status.go b/pkg/ddc/goosefs/status.go
deleted file mode 100644
index a41f16368af..00000000000
--- a/pkg/ddc/goosefs/status.go
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "context"
- "reflect"
- "time"
-
- "github.com/fluid-cloudnative/fluid/pkg/common"
- "github.com/fluid-cloudnative/fluid/pkg/ctrl"
- "github.com/fluid-cloudnative/fluid/pkg/utils"
- "github.com/fluid-cloudnative/fluid/pkg/utils/kubeclient"
- "k8s.io/apimachinery/pkg/types"
- "k8s.io/client-go/util/retry"
-)
-
-// CheckAndUpdateRuntimeStatus checks the related runtime status and updates it.
-func (e *GooseFSEngine) CheckAndUpdateRuntimeStatus() (ready bool, err error) {
-
- var (
- masterReady, workerReady bool
- masterName string = e.getMasterName()
- workerName string = e.getWorkerName()
- namespace string = e.namespace
- )
-
- // 1. Master should be ready
- master, err := kubeclient.GetStatefulSet(e.Client, masterName, namespace)
- if err != nil {
- return ready, err
- }
-
- // 2. Worker should be ready
- workers, err := ctrl.GetWorkersAsStatefulset(e.Client,
- types.NamespacedName{Namespace: e.namespace, Name: workerName})
- if err != nil {
- return ready, err
- }
- var workerNodeAffinity = kubeclient.MergeNodeSelectorAndNodeAffinity(workers.Spec.Template.Spec.NodeSelector, workers.Spec.Template.Spec.Affinity)
-
- err = retry.RetryOnConflict(retry.DefaultBackoff, func() error {
- runtime, err := e.getRuntime()
- if err != nil {
- return err
- }
-
- runtimeToUpdate := runtime.DeepCopy()
-
- states, err := e.queryCacheStatus()
- if err != nil {
- return err
- }
-
- // 0. Update the cache status
- if len(runtime.Status.CacheStates) == 0 {
- runtimeToUpdate.Status.CacheStates = map[common.CacheStateName]string{}
- }
-
- // set node affinity
- runtimeToUpdate.Status.CacheAffinity = workerNodeAffinity
-
- runtimeToUpdate.Status.CacheStates[common.CacheCapacity] = states.cacheCapacity
- runtimeToUpdate.Status.CacheStates[common.CachedPercentage] = states.cachedPercentage
- runtimeToUpdate.Status.CacheStates[common.Cached] = states.cached
- // update cache hit ratio
- runtimeToUpdate.Status.CacheStates[common.CacheHitRatio] = states.cacheHitStates.cacheHitRatio
- runtimeToUpdate.Status.CacheStates[common.LocalHitRatio] = states.cacheHitStates.localHitRatio
- runtimeToUpdate.Status.CacheStates[common.RemoteHitRatio] = states.cacheHitStates.remoteHitRatio
- // update cache throughput ratio
- runtimeToUpdate.Status.CacheStates[common.LocalThroughputRatio] = states.cacheHitStates.localThroughputRatio
- runtimeToUpdate.Status.CacheStates[common.RemoteThroughputRatio] = states.cacheHitStates.remoteThroughputRatio
- runtimeToUpdate.Status.CacheStates[common.CacheThroughputRatio] = states.cacheHitStates.cacheThroughputRatio
-
- if *master.Spec.Replicas == master.Status.ReadyReplicas {
- masterReady = true
- }
-
- if runtime.Replicas() == 0 || workers.Status.ReadyReplicas > 0 {
- workerReady = true
- }
-
- if masterReady && workerReady {
- ready = true
- }
-
- // Update the setup time of GooseFS runtime
- if ready && runtimeToUpdate.Status.SetupDuration == "" {
- runtimeToUpdate.Status.SetupDuration = utils.CalculateDuration(runtimeToUpdate.CreationTimestamp.Time, time.Now())
- }
-
- // Update API Gateway address if it is enabled
- if runtimeToUpdate.Spec.APIGateway.Enabled {
- if runtimeToUpdate.Status.APIGatewayStatus == nil {
- runtimeToUpdate.Status.APIGatewayStatus, err = e.GetAPIGatewayStatus()
- if err != nil {
- return err
- }
- } else {
- e.Log.V(1).Info("No need to update APIGateway status")
- }
- } else {
- e.Log.V(1).Info("No need to update APIGateway status")
- }
-
- if !reflect.DeepEqual(runtime.Status, runtimeToUpdate.Status) {
- err = e.Client.Status().Update(context.TODO(), runtimeToUpdate)
- } else {
- e.Log.Info("Do nothing because the runtime status is not changed.")
- }
-
- return err
- })
-
- if err != nil {
- _ = utils.LoggingErrorExceptConflict(e.Log, err, "Failed to update runtime status", types.NamespacedName{Namespace: e.namespace, Name: e.name})
- }
- return
-}
diff --git a/pkg/ddc/goosefs/status_test.go b/pkg/ddc/goosefs/status_test.go
deleted file mode 100644
index 11781c3ee46..00000000000
--- a/pkg/ddc/goosefs/status_test.go
+++ /dev/null
@@ -1,525 +0,0 @@
-/*
-Copyright 2021 The Fluid Authors.
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "reflect"
-
- "github.com/agiledragon/gomonkey/v2"
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/common"
- "github.com/fluid-cloudnative/fluid/pkg/ctrl"
- "github.com/fluid-cloudnative/fluid/pkg/ddc/base"
- "github.com/fluid-cloudnative/fluid/pkg/utils"
- "github.com/fluid-cloudnative/fluid/pkg/utils/fake"
- appsv1 "k8s.io/api/apps/v1"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/utils/ptr"
- "sigs.k8s.io/controller-runtime/pkg/client"
-
- . "github.com/onsi/ginkgo/v2"
- . "github.com/onsi/gomega"
-)
-
-const (
- testStatusNamespace = "fluid"
- testStatusRuntimeHadoop = "hadoop"
- testStatusRuntimeHbase = "hbase"
- testStatusRuntimeNoWorker = "no-worker"
- testStatusRuntimeNoMaster = "no-master"
- testStatusRuntimeZeroReplicas = "zero-replicas"
- testStatusPhaseNotReady = "NotReady"
- testStatusUfsTotal = "19.07MiB"
-)
-
-func newGooseFSEngineForStatus(c client.Client, name string, namespace string) *GooseFSEngine {
- runTimeInfo, err := base.BuildRuntimeInfo(name, namespace, common.GooseFSRuntime)
- Expect(err).NotTo(HaveOccurred())
- engine := &GooseFSEngine{
- runtime: &datav1alpha1.GooseFSRuntime{},
- name: name,
- namespace: namespace,
- Client: c,
- runtimeInfo: runTimeInfo,
- Log: fake.NullLogger(),
- }
- engine.Helper = ctrl.BuildHelper(runTimeInfo, c, engine.Log)
- return engine
-}
-
-var _ = Describe("GooseFSEngine Runtime Status Tests", Label("pkg.ddc.goosefs.status_test.go"), func() {
- var patches *gomonkey.Patches
-
- AfterEach(func() {
- if patches != nil {
- patches.Reset()
- }
- })
-
- Describe("CheckAndUpdateRuntimeStatus", func() {
- Context("when master and worker are all ready", func() {
- var (
- engine *GooseFSEngine
- fakeClient client.Client
- )
-
- BeforeEach(func() {
- masterInputs := []*appsv1.StatefulSet{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: testStatusRuntimeHadoop + "-master",
- Namespace: testStatusNamespace,
- },
- Spec: appsv1.StatefulSetSpec{
- Replicas: ptr.To[int32](1),
- },
- Status: appsv1.StatefulSetStatus{
- ReadyReplicas: 1,
- },
- },
- }
-
- workerInputs := []appsv1.StatefulSet{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: testStatusRuntimeHadoop + "-worker",
- Namespace: testStatusNamespace,
- },
- Spec: appsv1.StatefulSetSpec{
- Replicas: ptr.To[int32](3),
- },
- Status: appsv1.StatefulSetStatus{
- Replicas: 3,
- ReadyReplicas: 2,
- },
- },
- }
-
- runtimeInputs := []*datav1alpha1.GooseFSRuntime{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: testStatusRuntimeHadoop,
- Namespace: testStatusNamespace,
- },
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- Replicas: 2,
- },
- Status: datav1alpha1.RuntimeStatus{
- CurrentWorkerNumberScheduled: 3,
- CurrentMasterNumberScheduled: 3,
- CurrentFuseNumberScheduled: 3,
- DesiredMasterNumberScheduled: 2,
- DesiredWorkerNumberScheduled: 3,
- DesiredFuseNumberScheduled: 2,
- Conditions: []datav1alpha1.RuntimeCondition{
- utils.NewRuntimeCondition(datav1alpha1.RuntimeWorkersInitialized, datav1alpha1.RuntimeWorkersInitializedReason, "The workers are initialized.", corev1.ConditionTrue),
- utils.NewRuntimeCondition(datav1alpha1.RuntimeFusesInitialized, datav1alpha1.RuntimeFusesInitializedReason, "The fuses are initialized.", corev1.ConditionTrue),
- },
- WorkerPhase: testStatusPhaseNotReady,
- FusePhase: testStatusPhaseNotReady,
- },
- },
- }
-
- objs := []runtime.Object{}
- for _, masterInput := range masterInputs {
- objs = append(objs, masterInput.DeepCopy())
- }
-
- for _, workerInput := range workerInputs {
- objs = append(objs, workerInput.DeepCopy())
- }
-
- for _, runtimeInput := range runtimeInputs {
- objs = append(objs, runtimeInput.DeepCopy())
- }
- fakeClient = fake.NewFakeClientWithScheme(testScheme, objs...)
- engine = newGooseFSEngineForStatus(fakeClient, testStatusRuntimeHadoop, testStatusNamespace)
- })
-
- It("should return ready with no error", func() {
- patches = gomonkey.ApplyMethod(reflect.TypeOf(engine), "GetReportSummary",
- func(_ *GooseFSEngine) (string, error) {
- summary := mockGooseFSReportSummary()
- return summary, nil
- })
-
- patches.ApplyFunc(utils.GetDataset,
- func(_ client.Reader, _ string, _ string) (*datav1alpha1.Dataset, error) {
- d := &datav1alpha1.Dataset{
- Status: datav1alpha1.DatasetStatus{
- UfsTotal: testStatusUfsTotal,
- },
- }
- return d, nil
- })
-
- patches.ApplyMethod(reflect.TypeOf(engine), "GetCacheHitStates",
- func(_ *GooseFSEngine) cacheHitStates {
- return cacheHitStates{
- bytesReadLocal: 20310917,
- bytesReadUfsAll: 32243712,
- }
- })
-
- ready, err := engine.CheckAndUpdateRuntimeStatus()
- Expect(err).NotTo(HaveOccurred())
- Expect(ready).To(BeTrue())
- })
- })
-
- Context("when master is not ready", func() {
- var (
- engine *GooseFSEngine
- fakeClient client.Client
- )
-
- BeforeEach(func() {
- masterInputs := []*appsv1.StatefulSet{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: testStatusRuntimeHbase + "-master",
- Namespace: testStatusNamespace,
- },
- Spec: appsv1.StatefulSetSpec{
- Replicas: ptr.To[int32](1),
- },
- Status: appsv1.StatefulSetStatus{
- ReadyReplicas: 0,
- },
- },
- }
-
- workerInputs := []appsv1.StatefulSet{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: testStatusRuntimeHbase + "-worker",
- Namespace: testStatusNamespace,
- },
- Spec: appsv1.StatefulSetSpec{
- Replicas: ptr.To[int32](2),
- },
- Status: appsv1.StatefulSetStatus{
- Replicas: 2,
- ReadyReplicas: 2,
- },
- },
- }
-
- runtimeInputs := []*datav1alpha1.GooseFSRuntime{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: testStatusRuntimeHbase,
- Namespace: testStatusNamespace,
- },
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- Replicas: 2,
- },
- Status: datav1alpha1.RuntimeStatus{
- CurrentWorkerNumberScheduled: 2,
- CurrentMasterNumberScheduled: 2,
- CurrentFuseNumberScheduled: 2,
- DesiredMasterNumberScheduled: 2,
- DesiredWorkerNumberScheduled: 2,
- DesiredFuseNumberScheduled: 2,
- Conditions: []datav1alpha1.RuntimeCondition{
- utils.NewRuntimeCondition(datav1alpha1.RuntimeWorkersInitialized, datav1alpha1.RuntimeWorkersInitializedReason, "The workers are initialized.", corev1.ConditionTrue),
- utils.NewRuntimeCondition(datav1alpha1.RuntimeFusesInitialized, datav1alpha1.RuntimeFusesInitializedReason, "The fuses are initialized.", corev1.ConditionTrue),
- },
- WorkerPhase: testStatusPhaseNotReady,
- FusePhase: testStatusPhaseNotReady,
- },
- },
- }
-
- objs := []runtime.Object{}
- for _, masterInput := range masterInputs {
- objs = append(objs, masterInput.DeepCopy())
- }
-
- for _, workerInput := range workerInputs {
- objs = append(objs, workerInput.DeepCopy())
- }
-
- for _, runtimeInput := range runtimeInputs {
- objs = append(objs, runtimeInput.DeepCopy())
- }
- fakeClient = fake.NewFakeClientWithScheme(testScheme, objs...)
- engine = newGooseFSEngineForStatus(fakeClient, testStatusRuntimeHbase, testStatusNamespace)
- })
-
- It("should return not ready", func() {
- patches = gomonkey.ApplyMethod(reflect.TypeOf(engine), "GetReportSummary",
- func(_ *GooseFSEngine) (string, error) {
- summary := mockGooseFSReportSummary()
- return summary, nil
- })
-
- patches.ApplyFunc(utils.GetDataset,
- func(_ client.Reader, _ string, _ string) (*datav1alpha1.Dataset, error) {
- d := &datav1alpha1.Dataset{
- Status: datav1alpha1.DatasetStatus{
- UfsTotal: testStatusUfsTotal,
- },
- }
- return d, nil
- })
-
- patches.ApplyMethod(reflect.TypeOf(engine), "GetCacheHitStates",
- func(_ *GooseFSEngine) cacheHitStates {
- return cacheHitStates{
- bytesReadLocal: 20310917,
- bytesReadUfsAll: 32243712,
- }
- })
-
- ready, err := engine.CheckAndUpdateRuntimeStatus()
- Expect(err).NotTo(HaveOccurred())
- Expect(ready).To(BeFalse())
- })
- })
-
- Context("when worker statefulset is not found", func() {
- var (
- engine *GooseFSEngine
- fakeClient client.Client
- )
-
- BeforeEach(func() {
- masterInputs := []*appsv1.StatefulSet{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: testStatusRuntimeNoWorker + "-master",
- Namespace: testStatusNamespace,
- },
- Spec: appsv1.StatefulSetSpec{
- Replicas: ptr.To[int32](1),
- },
- Status: appsv1.StatefulSetStatus{
- ReadyReplicas: 1,
- },
- },
- }
-
- runtimeInputs := []*datav1alpha1.GooseFSRuntime{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: testStatusRuntimeNoWorker,
- Namespace: testStatusNamespace,
- },
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- Replicas: 2,
- },
- Status: datav1alpha1.RuntimeStatus{
- CurrentWorkerNumberScheduled: 2,
- CurrentMasterNumberScheduled: 2,
- CurrentFuseNumberScheduled: 2,
- DesiredMasterNumberScheduled: 2,
- DesiredWorkerNumberScheduled: 2,
- DesiredFuseNumberScheduled: 2,
- WorkerPhase: testStatusPhaseNotReady,
- FusePhase: testStatusPhaseNotReady,
- },
- },
- }
-
- objs := []runtime.Object{}
- for _, masterInput := range masterInputs {
- objs = append(objs, masterInput.DeepCopy())
- }
- for _, runtimeInput := range runtimeInputs {
- objs = append(objs, runtimeInput.DeepCopy())
- }
- fakeClient = fake.NewFakeClientWithScheme(testScheme, objs...)
- engine = newGooseFSEngineForStatus(fakeClient, testStatusRuntimeNoWorker, testStatusNamespace)
- })
-
- It("should return error", func() {
- ready, err := engine.CheckAndUpdateRuntimeStatus()
- Expect(err).To(HaveOccurred())
- Expect(ready).To(BeFalse())
- })
- })
-
- Context("when master statefulset is not found", func() {
- var (
- engine *GooseFSEngine
- fakeClient client.Client
- )
-
- BeforeEach(func() {
- workerInputs := []appsv1.StatefulSet{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: testStatusRuntimeNoMaster + "-worker",
- Namespace: testStatusNamespace,
- },
- Spec: appsv1.StatefulSetSpec{
- Replicas: ptr.To[int32](2),
- },
- Status: appsv1.StatefulSetStatus{
- Replicas: 2,
- ReadyReplicas: 2,
- },
- },
- }
-
- runtimeInputs := []*datav1alpha1.GooseFSRuntime{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: testStatusRuntimeNoMaster,
- Namespace: testStatusNamespace,
- },
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- Replicas: 2,
- },
- Status: datav1alpha1.RuntimeStatus{
- CurrentWorkerNumberScheduled: 2,
- CurrentMasterNumberScheduled: 2,
- CurrentFuseNumberScheduled: 2,
- DesiredMasterNumberScheduled: 2,
- DesiredWorkerNumberScheduled: 2,
- DesiredFuseNumberScheduled: 2,
- WorkerPhase: testStatusPhaseNotReady,
- FusePhase: testStatusPhaseNotReady,
- },
- },
- }
-
- objs := []runtime.Object{}
- for _, workerInput := range workerInputs {
- objs = append(objs, workerInput.DeepCopy())
- }
- for _, runtimeInput := range runtimeInputs {
- objs = append(objs, runtimeInput.DeepCopy())
- }
- fakeClient = fake.NewFakeClientWithScheme(testScheme, objs...)
- engine = newGooseFSEngineForStatus(fakeClient, testStatusRuntimeNoMaster, testStatusNamespace)
- })
-
- It("should return error", func() {
- ready, err := engine.CheckAndUpdateRuntimeStatus()
- Expect(err).To(HaveOccurred())
- Expect(ready).To(BeFalse())
- })
- })
-
- Context("when runtime has zero worker replicas", func() {
- var (
- engine *GooseFSEngine
- fakeClient client.Client
- )
-
- BeforeEach(func() {
- masterInputs := []*appsv1.StatefulSet{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: testStatusRuntimeZeroReplicas + "-master",
- Namespace: testStatusNamespace,
- },
- Spec: appsv1.StatefulSetSpec{
- Replicas: ptr.To[int32](1),
- },
- Status: appsv1.StatefulSetStatus{
- ReadyReplicas: 1,
- },
- },
- }
-
- workerInputs := []appsv1.StatefulSet{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: testStatusRuntimeZeroReplicas + "-worker",
- Namespace: testStatusNamespace,
- },
- Spec: appsv1.StatefulSetSpec{
- Replicas: ptr.To[int32](0),
- },
- Status: appsv1.StatefulSetStatus{
- Replicas: 0,
- ReadyReplicas: 0,
- },
- },
- }
-
- runtimeInputs := []*datav1alpha1.GooseFSRuntime{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: testStatusRuntimeZeroReplicas,
- Namespace: testStatusNamespace,
- },
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- Replicas: 0,
- },
- Status: datav1alpha1.RuntimeStatus{
- CurrentWorkerNumberScheduled: 0,
- CurrentMasterNumberScheduled: 1,
- CurrentFuseNumberScheduled: 0,
- DesiredMasterNumberScheduled: 1,
- DesiredWorkerNumberScheduled: 0,
- DesiredFuseNumberScheduled: 0,
- WorkerPhase: testStatusPhaseNotReady,
- FusePhase: testStatusPhaseNotReady,
- },
- },
- }
-
- objs := []runtime.Object{}
- for _, masterInput := range masterInputs {
- objs = append(objs, masterInput.DeepCopy())
- }
- for _, workerInput := range workerInputs {
- objs = append(objs, workerInput.DeepCopy())
- }
- for _, runtimeInput := range runtimeInputs {
- objs = append(objs, runtimeInput.DeepCopy())
- }
- fakeClient = fake.NewFakeClientWithScheme(testScheme, objs...)
- engine = newGooseFSEngineForStatus(fakeClient, testStatusRuntimeZeroReplicas, testStatusNamespace)
- })
-
- It("should return ready when master is ready and zero workers are expected", func() {
- patches = gomonkey.ApplyMethod(reflect.TypeOf(engine), "GetReportSummary",
- func(_ *GooseFSEngine) (string, error) {
- summary := mockGooseFSReportSummary()
- return summary, nil
- })
-
- patches.ApplyFunc(utils.GetDataset,
- func(_ client.Reader, _ string, _ string) (*datav1alpha1.Dataset, error) {
- d := &datav1alpha1.Dataset{
- Status: datav1alpha1.DatasetStatus{
- UfsTotal: testStatusUfsTotal,
- },
- }
- return d, nil
- })
-
- patches.ApplyMethod(reflect.TypeOf(engine), "GetCacheHitStates",
- func(_ *GooseFSEngine) cacheHitStates {
- return cacheHitStates{
- bytesReadLocal: 0,
- bytesReadUfsAll: 0,
- }
- })
-
- ready, err := engine.CheckAndUpdateRuntimeStatus()
- Expect(err).NotTo(HaveOccurred())
- Expect(ready).To(BeTrue())
- })
- })
- })
-})
diff --git a/pkg/ddc/goosefs/sync_runtime.go b/pkg/ddc/goosefs/sync_runtime.go
deleted file mode 100644
index c3710814a97..00000000000
--- a/pkg/ddc/goosefs/sync_runtime.go
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import cruntime "github.com/fluid-cloudnative/fluid/pkg/runtime"
-
-// SyncRuntime syncs the runtime spec
-func (e *GooseFSEngine) SyncRuntime(ctx cruntime.ReconcileRequestContext) (changed bool, err error) {
- return
-}
diff --git a/pkg/ddc/goosefs/sync_runtime_test.go b/pkg/ddc/goosefs/sync_runtime_test.go
deleted file mode 100644
index 4148ab4d26d..00000000000
--- a/pkg/ddc/goosefs/sync_runtime_test.go
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- . "github.com/onsi/ginkgo/v2"
- . "github.com/onsi/gomega"
-
- cruntime "github.com/fluid-cloudnative/fluid/pkg/runtime"
-)
-
-var _ = Describe("GooseFSEngine_SyncRuntime", func() {
- type testCase struct {
- ctx cruntime.ReconcileRequestContext
- wantChanged bool
- wantErr bool
- }
-
- DescribeTable("should sync runtime correctly",
- func(tc testCase) {
- e := &GooseFSEngine{}
- gotChanged, err := e.SyncRuntime(tc.ctx)
-
- if tc.wantErr {
- Expect(err).To(HaveOccurred())
- } else {
- Expect(err).NotTo(HaveOccurred())
- }
- Expect(gotChanged).To(Equal(tc.wantChanged))
- },
- Entry("default case",
- testCase{
- wantChanged: false,
- wantErr: false,
- },
- ),
- )
-})
diff --git a/pkg/ddc/goosefs/transform.go b/pkg/ddc/goosefs/transform.go
deleted file mode 100644
index 854e4d25270..00000000000
--- a/pkg/ddc/goosefs/transform.go
+++ /dev/null
@@ -1,432 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "errors"
- "fmt"
- "os"
- "strings"
- "time"
-
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/common"
- "github.com/fluid-cloudnative/fluid/pkg/ddc/base/portallocator"
- "github.com/fluid-cloudnative/fluid/pkg/utils"
- "github.com/fluid-cloudnative/fluid/pkg/utils/tieredstore"
-)
-
-func (e *GooseFSEngine) transform(runtime *datav1alpha1.GooseFSRuntime) (value *GooseFS, err error) {
- if runtime == nil {
- err = fmt.Errorf("the goosefsRuntime is null")
- return
- }
- defer utils.TimeTrack(time.Now(), "GooseFSRuntime.Transform", "name", runtime.Name)
-
- dataset, err := utils.GetDataset(e.Client, e.name, e.namespace)
- if err != nil {
- return value, err
- }
-
- value = &GooseFS{}
-
- value.FullnameOverride = e.name
- value.OwnerDatasetId = utils.GetDatasetId(e.namespace, e.name, e.runtimeInfo.GetOwnerDatasetUID())
-
- // 1.transform the common part
- err = e.transformCommonPart(runtime, dataset, value)
- if err != nil {
- return
- }
-
- // 2.transform the masters
- err = e.transformMasters(runtime, dataset, value)
- if err != nil {
- return
- }
-
- // 3.transform the workers
- err = e.transformWorkers(runtime, value)
- if err != nil {
- return
- }
-
- // 4.transform the fuse
- err = e.transformFuse(runtime, dataset, value)
- if err != nil {
- return
- }
-
- // 5.transform the hadoop non-default configurations
- err = e.transformHadoopConfig(runtime, value)
- if err != nil {
- return
- }
-
- // 6.transform the dataset if it has local path or volume
- e.transformDatasetToVolume(runtime, dataset, value)
-
- // 7.transform the permission
- e.transformPermission(runtime, value)
-
- // 8.set optimization parameters
- e.optimizeDefaultProperties(runtime, value)
-
- // 9. set optimization parameters if all the mounts are HTTP
- e.optimizeDefaultPropertiesAndFuseForHTTP(runtime, dataset, value)
-
- // 10.allocate port for fluid engine
- err = e.allocatePorts(value)
- if err != nil {
- return
- }
-
- // 11.set engine properties
- e.setPortProperties(runtime, value)
-
- // 12.set API Gateway
- err = e.transformAPIGateway(runtime, value)
-
- // 13.set the placementMode
- e.transformPlacementMode(dataset, value)
- return
-}
-
-// 2. Transform the common part
-func (e *GooseFSEngine) transformCommonPart(runtime *datav1alpha1.GooseFSRuntime,
- dataset *datav1alpha1.Dataset,
- value *GooseFS) (err error) {
-
- image := runtime.Spec.GooseFSVersion.Image
- imageTag := runtime.Spec.GooseFSVersion.ImageTag
- imagePullPolicy := runtime.Spec.GooseFSVersion.ImagePullPolicy
-
- value.Image, value.ImageTag, value.ImagePullPolicy = e.parseRuntimeImage(image, imageTag, imagePullPolicy)
-
- value.UserInfo = common.UserInfo{
- User: 0,
- FSGroup: 0,
- Group: 0,
- }
-
- // transform init users
- e.transformInitUsers(runtime, value)
-
- // TODO: support nodeAffinity
-
- if len(runtime.Spec.Properties) > 0 {
- value.Properties = runtime.Spec.Properties
- } else {
- value.Properties = map[string]string{}
- }
-
- // generate goosefs root ufs by dataset spec mounts
- e.Log.Info("input", "mounts", dataset.Spec.Mounts, "common.RootDirPath", common.RootDirPath)
- uRootPath, m := utils.UFSPathBuilder{}.GenAlluxioUFSRootPath(dataset.Spec.Mounts)
- // attach mount options when direct mount ufs endpoint
- if m != nil {
- if mOptions, err := e.genUFSMountOptions(*m, dataset.Spec.SharedOptions, dataset.Spec.SharedEncryptOptions); err != nil {
- return err
- } else {
- for k, v := range mOptions {
- value.Properties[k] = v
- }
- }
- }
- e.Log.Info("output", "uRootPath", uRootPath, "m", m)
- // set goosefs root ufs
- value.Properties["goosefs.master.mount.table.root.ufs"] = uRootPath
-
- // Set the max replication
- dataReplicas := runtime.Spec.Data.Replicas
- if dataReplicas <= 0 {
- dataReplicas = 1
- }
-
- value.Properties["goosefs.user.file.replication.max"] = fmt.Sprintf("%d", dataReplicas)
-
- if len(runtime.Spec.JvmOptions) > 0 {
- value.JvmOptions = runtime.Spec.JvmOptions
- }
-
- value.Fuse.ShortCircuitPolicy = "local"
-
- var levels []Level
-
- runtimeInfo, err := e.getRuntimeInfo()
- if err != nil {
- return err
- }
-
- for _, level := range runtimeInfo.GetTieredStoreInfo().Levels {
-
- l := tieredstore.GetTieredLevel(runtimeInfo, level.MediumType)
-
- var paths []string
- var quotas []string
- for _, cachePath := range level.CachePaths {
- paths = append(paths, fmt.Sprintf("%s/%s/%s", cachePath.Path, runtime.Namespace, runtime.Name))
- quotas = append(quotas, utils.TransformQuantityToGooseFSUnit(cachePath.Quota))
- }
-
- pathConfigStr := strings.Join(paths, ",")
- quotaConfigStr := strings.Join(quotas, ",")
- mediumTypeConfigStr := strings.Join(*utils.FillSliceWithString(string(level.MediumType), len(paths)), ",")
-
- levels = append(levels, Level{
- Alias: string(level.MediumType),
- Level: l,
- Type: "hostPath",
- Path: pathConfigStr,
- Mediumtype: mediumTypeConfigStr,
- Low: level.Low,
- High: level.High,
- Quota: quotaConfigStr,
- })
- }
-
- value.Tieredstore.Levels = levels
-
- // value.Metastore = Metastore{
- // VolumeType: "emptyDir",
- // }
-
- // quantity, err := resource.ParseQuantity("10Gi")
- // if err != nil {
- // return err
- // }
- value.Journal = Journal{
- VolumeType: "emptyDir",
- Size: "30Gi",
- }
-
- value.ShortCircuit = ShortCircuit{
- VolumeType: "emptyDir",
- Policy: "local",
- Enable: true,
- }
-
- if !runtime.Spec.DisablePrometheus {
- value.Monitoring = GooseFSRuntimeMetricsLabel
- }
-
- // transform Tolerations
- e.transformTolerations(dataset, value)
-
- return
-}
-
-// 2. Transform the masters
-func (e *GooseFSEngine) transformMasters(runtime *datav1alpha1.GooseFSRuntime,
- dataset *datav1alpha1.Dataset,
- value *GooseFS) (err error) {
-
- value.Master = Master{}
-
- backupRoot := os.Getenv("FLUID_WORKDIR")
- if backupRoot == "" {
- backupRoot = "/tmp"
- }
- value.Master.BackupPath = backupRoot + "/goosefs-backup/" + e.namespace + "/" + e.name
-
- if runtime.Spec.Master.Replicas == 0 {
- value.Master.Replicas = 1
- } else {
- value.Master.Replicas = runtime.Spec.Master.Replicas
- }
-
- // if len(runtime.Spec.Master.JvmOptions) > 0 {
- // value.Master.JvmOptions = strings.Join(runtime.Spec.Master.JvmOptions, " ")
- // }
-
- e.optimizeDefaultForMaster(runtime, value)
-
- if len(runtime.Spec.Master.Env) > 0 {
- value.Master.Env = runtime.Spec.Master.Env
- } else {
- value.Master.Env = map[string]string{}
- }
-
- value.Master.Env["GOOSEFS_WORKER_TIEREDSTORE_LEVEL0_DIRS_PATH"] = value.getTiredStoreLevel0Path(e.name, e.namespace)
-
- if len(runtime.Spec.Master.Properties) > 0 {
- value.Master.Properties = runtime.Spec.Master.Properties
- }
-
- value.Master.HostNetwork = true
-
- nodeSelector := e.transformMasterSelector(runtime)
- if len(nodeSelector) != 0 {
- value.Master.NodeSelector = nodeSelector
- }
-
- // // check the run as
- // if runtime.Spec.RunAs != nil {
- // value.Master.Env["GOOSEFS_USERNAME"] = goosefsUser
- // value.Master.Env["GOOSEFS_GROUP"] = goosefsUser
- // value.Master.Env["GOOSEFS_UID"] = strconv.FormatInt(*runtime.Spec.RunAs.UID, 10)
- // value.Master.Env["GOOSEFS_GID"] = strconv.FormatInt(*runtime.Spec.RunAs.GID, 10)
- // }
- // if the dataset indicates a restore path, need to load the backup file in it
-
- if dataset.Spec.DataRestoreLocation != nil {
- if dataset.Spec.DataRestoreLocation.Path != "" {
- pvcName, path, err := utils.ParseBackupRestorePath(dataset.Spec.DataRestoreLocation.Path)
- if err != nil {
- e.Log.Error(err, "restore path cannot analyse", "Path", dataset.Spec.DataRestoreLocation.Path)
- }
- if pvcName != "" {
- // RestorePath is in the form of pvc:///subpath
- value.Master.Restore.Enabled = true
- value.Master.Restore.PVCName = pvcName
- value.Master.Restore.Path = path
- value.Master.Env["JOURNAL_BACKUP"] = "/pvc" + path + e.GetMetadataFileName()
- } else if dataset.Spec.DataRestoreLocation.NodeName != "" {
- // RestorePath is in the form of local://subpath
- value.Master.Restore.Enabled = true
- if len(value.Master.NodeSelector) == 0 {
- value.Master.NodeSelector = map[string]string{}
- }
- value.Master.NodeSelector["kubernetes.io/hostname"] = dataset.Spec.DataRestoreLocation.NodeName
- value.Master.Env["JOURNAL_BACKUP"] = "/host/" + e.GetMetadataFileName()
- value.Master.Restore.Path = path
- } else {
- // RestorePath in Dataset cannot analyse
- err := errors.New("DataRestoreLocation in Dataset cannot analyse, will not restore")
- e.Log.Error(err, "restore path cannot analyse", "Location", dataset.Spec.DataRestoreLocation)
- }
- }
- }
-
- e.transformResourcesForMaster(runtime, value)
-
- // transform the annotation for goosefs master.
- value.Master.Annotations = runtime.Spec.Master.Annotations
- return
-}
-
-// 3. Transform the workers
-func (e *GooseFSEngine) transformWorkers(runtime *datav1alpha1.GooseFSRuntime, value *GooseFS) (err error) {
- value.Worker = Worker{}
- e.optimizeDefaultForWorker(runtime, value)
-
- if len(value.Worker.NodeSelector) == 0 {
- value.Worker.NodeSelector = map[string]string{}
- }
-
- if len(runtime.Spec.Worker.Properties) > 0 {
- value.Worker.Properties = runtime.Spec.Worker.Properties
- }
-
- if len(runtime.Spec.Worker.Env) > 0 {
- value.Worker.Env = runtime.Spec.Worker.Env
- } else {
- value.Worker.Env = map[string]string{}
- }
-
- // check the run as
- // if runtime.Spec.RunAs != nil {
- // value.Worker.Env["GOOSEFS_USERNAME"] = goosefsUser
- // value.Worker.Env["GOOSEFS_GROUP"] = goosefsUser
- // value.Worker.Env["GOOSEFS_UID"] = strconv.FormatInt(*runtime.Spec.RunAs.UID, 10)
- // value.Worker.Env["GOOSEFS_GID"] = strconv.FormatInt(*runtime.Spec.RunAs.GID, 10)
- // }
-
- value.Worker.Env["GOOSEFS_WORKER_TIEREDSTORE_LEVEL0_DIRS_PATH"] = value.getTiredStoreLevel0Path(e.name, e.namespace)
-
- value.Worker.HostNetwork = true
-
- e.transformResourcesForWorker(runtime, value)
-
- // transform the annotation for goosefs worker.
- value.Worker.Annotations = runtime.Spec.Worker.Annotations
-
- return
-}
-
-// 8.allocate port for fluid engine
-func (e *GooseFSEngine) allocatePorts(value *GooseFS) error {
- expectedPortNum := PortNum
-
- if e.runtime.Spec.APIGateway.Enabled {
- expectedPortNum += 1
- }
-
- if e.runtime.Spec.Master.Replicas > 1 {
- expectedPortNum += 2
- }
-
- allocator, err := portallocator.GetRuntimePortAllocator()
- if err != nil {
- e.Log.Error(err, "can't get runtime port allocator")
- return err
- }
-
- allocatedPorts, err := allocator.GetAvailablePorts(expectedPortNum)
- if err != nil {
- e.Log.Error(err, "can't get available ports", "expected port num", expectedPortNum)
- return err
- }
-
- index := 0
- value.Master.Ports.Rpc = allocatedPorts[index]
- index++
- value.Master.Ports.Web = allocatedPorts[index]
- index++
- value.Worker.Ports.Rpc = allocatedPorts[index]
- index++
- value.Worker.Ports.Web = allocatedPorts[index]
- index++
- value.JobMaster.Ports.Rpc = allocatedPorts[index]
- index++
- value.JobMaster.Ports.Web = allocatedPorts[index]
- index++
- value.JobWorker.Ports.Rpc = allocatedPorts[index]
- index++
- value.JobWorker.Ports.Web = allocatedPorts[index]
- index++
- value.JobWorker.Ports.Data = allocatedPorts[index]
- index++
-
- if e.runtime.Spec.APIGateway.Enabled {
- value.APIGateway.Ports.Rest = allocatedPorts[index]
- index++
- }
-
- if e.runtime.Spec.Master.Replicas > 1 {
- value.Master.Ports.Embedded = allocatedPorts[index]
- index++
- value.JobMaster.Ports.Embedded = allocatedPorts[index]
- }
-
- return nil
-}
-
-func (e *GooseFSEngine) transformMasterSelector(runtime *datav1alpha1.GooseFSRuntime) map[string]string {
- properties := map[string]string{}
- if runtime.Spec.Master.NodeSelector != nil {
- properties = runtime.Spec.Master.NodeSelector
- }
- return properties
-}
-
-func (e *GooseFSEngine) transformPlacementMode(dataset *datav1alpha1.Dataset, value *GooseFS) {
- value.PlacementMode = string(dataset.Spec.PlacementMode)
- if len(value.PlacementMode) == 0 {
- value.PlacementMode = string(datav1alpha1.ExclusiveMode)
- }
-}
diff --git a/pkg/ddc/goosefs/transform_api_gateway.go b/pkg/ddc/goosefs/transform_api_gateway.go
deleted file mode 100644
index 5277672c43b..00000000000
--- a/pkg/ddc/goosefs/transform_api_gateway.go
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "fmt"
-
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
-)
-
-// transformAPIGateway decide whether to enable APIGateway in value according to GooseFSRuntime
-func (e *GooseFSEngine) transformAPIGateway(runtime *datav1alpha1.GooseFSRuntime, value *GooseFS) (err error) {
- if runtime == nil || value == nil {
- err = fmt.Errorf("cannot transform because runtime or value will lead to nil pointer")
- return
- }
- value.APIGateway.Enabled = runtime.Spec.APIGateway.Enabled
- return
-}
diff --git a/pkg/ddc/goosefs/transform_api_gateway_test.go b/pkg/ddc/goosefs/transform_api_gateway_test.go
deleted file mode 100644
index c58569b3ae1..00000000000
--- a/pkg/ddc/goosefs/transform_api_gateway_test.go
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- . "github.com/onsi/ginkgo/v2"
- . "github.com/onsi/gomega"
-
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
-)
-
-var _ = Describe("GooseFS", func() {
- DescribeTable("transformAPIGateway",
- func(runtime *datav1alpha1.GooseFSRuntime, value *GooseFS, expectError bool, shouldMatch bool) {
- engine := &GooseFSEngine{}
- err := engine.transformAPIGateway(runtime, value)
-
- if expectError {
- Expect(err).To(HaveOccurred())
- } else {
- Expect(err).NotTo(HaveOccurred())
- if shouldMatch {
- Expect(runtime.Spec.APIGateway.Enabled).To(Equal(value.APIGateway.Enabled))
- }
- }
- },
- Entry("should sync when runtime enabled and value disabled",
- &datav1alpha1.GooseFSRuntime{
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- APIGateway: datav1alpha1.GooseFSCompTemplateSpec{
- Enabled: true,
- },
- },
- },
- &GooseFS{
- APIGateway: APIGateway{
- Enabled: false,
- },
- },
- false,
- true,
- ),
- Entry("should sync when runtime disabled and value enabled",
- &datav1alpha1.GooseFSRuntime{
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- APIGateway: datav1alpha1.GooseFSCompTemplateSpec{
- Enabled: false,
- },
- },
- },
- &GooseFS{
- APIGateway: APIGateway{
- Enabled: true,
- },
- },
- false,
- true,
- ),
- Entry("should return error when runtime is nil",
- nil,
- &GooseFS{
- APIGateway: APIGateway{
- Enabled: false,
- },
- },
- true,
- false,
- ),
- Entry("should return error when value is nil",
- &datav1alpha1.GooseFSRuntime{
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- APIGateway: datav1alpha1.GooseFSCompTemplateSpec{
- Enabled: true,
- },
- },
- },
- nil,
- true,
- false,
- ),
- )
-})
diff --git a/pkg/ddc/goosefs/transform_fuse.go b/pkg/ddc/goosefs/transform_fuse.go
deleted file mode 100644
index 4994848bd73..00000000000
--- a/pkg/ddc/goosefs/transform_fuse.go
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "fmt"
- "strings"
-
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/common"
- "github.com/fluid-cloudnative/fluid/pkg/utils"
-)
-
-// 4. Transform the fuse
-func (e *GooseFSEngine) transformFuse(runtime *datav1alpha1.GooseFSRuntime, dataset *datav1alpha1.Dataset, value *GooseFS) (err error) {
- value.Fuse = Fuse{}
-
- image := runtime.Spec.Fuse.Image
- tag := runtime.Spec.Fuse.ImageTag
- imagePullPolicy := runtime.Spec.Fuse.ImagePullPolicy
-
- value.Fuse.Image, value.Fuse.ImageTag, value.Fuse.ImagePullPolicy = e.parseFuseImage(image, tag, imagePullPolicy)
-
- if len(runtime.Spec.Fuse.Properties) > 0 {
- value.Fuse.Properties = runtime.Spec.Fuse.Properties
- }
-
- if len(runtime.Spec.Fuse.Env) > 0 {
- value.Fuse.Env = runtime.Spec.Fuse.Env
- } else {
- value.Fuse.Env = map[string]string{}
- }
-
- // if runtime.Spec.Fuse.MountPath != "" {
- // value.Fuse.MountPath = runtime.Spec.Fuse.MountPath
- // } else {
- // value.Fuse.MountPath = fmt.Sprintf("format", a)
- // }
-
- value.Fuse.MountPath = e.getMountPoint()
- value.Fuse.Env["MOUNT_POINT"] = value.Fuse.MountPath
-
- // if len(runtime.Spec.Fuse.Args) > 0 {
- // value.Fuse.Args = runtime.Spec.Fuse.Args
- // } else {
- // value.Fuse.Args = []string{"fuse", "--fuse-opts=kernel_cache"}
- // }
- e.optimizeDefaultFuse(runtime, value)
-
- if dataset.Spec.Owner != nil {
- value.Fuse.Args[len(value.Fuse.Args)-1] = strings.Join([]string{value.Fuse.Args[len(value.Fuse.Args)-1], fmt.Sprintf("uid=%d,gid=%d", *dataset.Spec.Owner.UID, *dataset.Spec.Owner.GID)}, ",")
- } else {
- if len(value.Properties) == 0 {
- value.Properties = map[string]string{}
- }
- value.Properties["goosefs.fuse.user.group.translation.enabled"] = "true"
- }
- // value.Fuse.Args[-1]
-
- // Allow root: only the RunAs user and root can access fuse
- //if !strings.Contains(value.Fuse.Args[len(value.Fuse.Args)-1], "allow_") {
- // value.Fuse.Args[len(value.Fuse.Args)-1] = strings.Join([]string{value.Fuse.Args[len(value.Fuse.Args)-1], "allow_root"}, ",")
- //}
-
- // Allow others: all users(including root) can access fuse
- if !strings.Contains(value.Fuse.Args[len(value.Fuse.Args)-1], "allow_") {
- value.Fuse.Args[len(value.Fuse.Args)-1] = strings.Join([]string{value.Fuse.Args[len(value.Fuse.Args)-1], "allow_other"}, ",")
- }
-
- if len(runtime.Spec.Fuse.NodeSelector) > 0 {
- value.Fuse.NodeSelector = runtime.Spec.Fuse.NodeSelector
- } else {
- value.Fuse.NodeSelector = map[string]string{}
- }
-
- value.Fuse.NodeSelector[utils.GetFuseLabelName(runtime.Namespace, runtime.Name, e.runtimeInfo.GetOwnerDatasetUID())] = "true"
- value.Fuse.HostNetwork = true
- value.Fuse.HostPID = common.HostPIDEnabled(runtime.Annotations)
- value.Fuse.Enabled = true
-
- e.transformResourcesForFuse(runtime, value)
-
- // set critical fuse pod to avoid eviction
- value.Fuse.CriticalPod = common.CriticalFusePodEnabled()
-
- // transform the annotation for goosefs fuse.
- value.Fuse.Annotations = runtime.Spec.Fuse.Annotations
-
- return
-
-}
diff --git a/pkg/ddc/goosefs/transform_fuse_test.go b/pkg/ddc/goosefs/transform_fuse_test.go
deleted file mode 100644
index 66cfc5a5585..00000000000
--- a/pkg/ddc/goosefs/transform_fuse_test.go
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "testing"
-
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/ddc/base"
- "github.com/fluid-cloudnative/fluid/pkg/utils/fake"
-)
-
-func TestTransformFuseWithNoArgs(t *testing.T) {
- var tests = []struct {
- runtime *datav1alpha1.GooseFSRuntime
- dataset *datav1alpha1.Dataset
- goosefsValue *GooseFS
- expect string
- }{
- {&datav1alpha1.GooseFSRuntime{
- Spec: datav1alpha1.GooseFSRuntimeSpec{},
- }, &datav1alpha1.Dataset{
- Spec: datav1alpha1.DatasetSpec{
- Mounts: []datav1alpha1.Mount{{
- MountPoint: "local:///mnt/test",
- Name: "test",
- }},
- }}, &GooseFS{}, "--fuse-opts=rw,direct_io,allow_other"},
- }
- for _, test := range tests {
- runtimeInfo, err := base.BuildRuntimeInfo("test", "fluid", "goosefs")
- if err != nil {
- t.Errorf("fail to create the runtimeInfo with error %v", err)
- }
- engine := &GooseFSEngine{
- Log: fake.NullLogger(),
- runtimeInfo: runtimeInfo,
- Client: fake.NewFakeClientWithScheme(testScheme),
- }
- err = engine.transformFuse(test.runtime, test.dataset, test.goosefsValue)
- if err != nil {
- t.Errorf("Got err %v", err)
- }
- if test.goosefsValue.Fuse.Args[1] != test.expect {
- t.Errorf("expected value %v, but got %v", test.expect, test.goosefsValue.Fuse.Args[1])
- }
- }
-}
-
-func TestTransformFuseWithArgs(t *testing.T) {
- var tests = []struct {
- runtime *datav1alpha1.GooseFSRuntime
- dataset *datav1alpha1.Dataset
- goosefsValue *GooseFS
- expect string
- }{
- {&datav1alpha1.GooseFSRuntime{
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- Fuse: datav1alpha1.GooseFSFuseSpec{
- Args: []string{
- "fuse",
- "--fuse-opts=kernel_cache",
- },
- },
- },
- }, &datav1alpha1.Dataset{
- Spec: datav1alpha1.DatasetSpec{
- Mounts: []datav1alpha1.Mount{{
- MountPoint: "local:///mnt/test",
- Name: "test",
- }},
- }}, &GooseFS{}, "--fuse-opts=kernel_cache,allow_other"},
- }
- for _, test := range tests {
- runtimeInfo, err := base.BuildRuntimeInfo("test", "fluid", "goosefs")
- if err != nil {
- t.Errorf("fail to create the runtimeInfo with error %v", err)
- }
- engine := &GooseFSEngine{
- Log: fake.NullLogger(),
- runtimeInfo: runtimeInfo,
- Client: fake.NewFakeClientWithScheme(testScheme),
- }
- err = engine.transformFuse(test.runtime, test.dataset, test.goosefsValue)
- if err != nil {
- t.Errorf("Got err %v", err)
- }
- if test.goosefsValue.Fuse.Args[1] != test.expect {
- t.Errorf("expected fuse %v, but got %v", test.expect, test.goosefsValue.Fuse.Args[1])
- }
- }
-}
diff --git a/pkg/ddc/goosefs/transform_hadoop_config.go b/pkg/ddc/goosefs/transform_hadoop_config.go
deleted file mode 100644
index 8702a9d712a..00000000000
--- a/pkg/ddc/goosefs/transform_hadoop_config.go
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
-Copyright 2023 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "context"
- "fmt"
- "strings"
-
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- v1 "k8s.io/api/core/v1"
- apierrs "k8s.io/apimachinery/pkg/api/errors"
- "k8s.io/apimachinery/pkg/types"
-)
-
-// transformHadoopConfig transforms the given value by checking existence of user-specific hadoop configurations
-func (e *GooseFSEngine) transformHadoopConfig(runtime *datav1alpha1.GooseFSRuntime, value *GooseFS) (err error) {
- if len(runtime.Spec.HadoopConfig) == 0 {
- return nil
- }
-
- key := types.NamespacedName{
- Namespace: runtime.Namespace,
- Name: runtime.Spec.HadoopConfig,
- }
-
- hadoopConfigMap := &v1.ConfigMap{}
-
- if err = e.Client.Get(context.TODO(), key, hadoopConfigMap); err != nil {
- if apierrs.IsNotFound(err) {
- err = fmt.Errorf("specified hadoopConfig \"%v\" is not found", runtime.Spec.HadoopConfig)
- }
- return err
- }
-
- var confFiles []string
- for k := range hadoopConfigMap.Data {
- switch k {
- case HadoopConfHdfsSiteFilename:
- value.HadoopConfig.IncludeHdfsSite = true
- confFiles = append(confFiles, HadoopConfMountPath+"/"+HadoopConfHdfsSiteFilename)
- case HadoopConfCoreSiteFilename:
- value.HadoopConfig.IncludeCoreSite = true
- confFiles = append(confFiles, HadoopConfMountPath+"/"+HadoopConfCoreSiteFilename)
- }
- }
-
- // Neither hdfs-site.xml nor core-site.xml is found in the configMap
- if !value.HadoopConfig.IncludeCoreSite && !value.HadoopConfig.IncludeHdfsSite {
- err = fmt.Errorf("neither \"%v\" nor \"%v\" is found in the specified configMap \"%v\" ", HadoopConfHdfsSiteFilename, HadoopConfCoreSiteFilename, runtime.Spec.HadoopConfig)
- return err
- }
-
- value.HadoopConfig.ConfigMap = runtime.Spec.HadoopConfig
- value.Properties["goosefs.underfs.hdfs.configuration"] = strings.Join(confFiles, ":")
-
- return nil
-}
diff --git a/pkg/ddc/goosefs/transform_init_users.go b/pkg/ddc/goosefs/transform_init_users.go
deleted file mode 100644
index 1bb7cee788c..00000000000
--- a/pkg/ddc/goosefs/transform_init_users.go
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/common"
- "github.com/fluid-cloudnative/fluid/pkg/utils"
- "github.com/fluid-cloudnative/fluid/pkg/utils/docker"
-)
-
-// transform dataset which has ufsPaths and ufsVolumes
-func (e *GooseFSEngine) transformInitUsers(runtime *datav1alpha1.GooseFSRuntime, value *GooseFS) {
-
- value.InitUsers = common.InitUsers{
- Enabled: false,
- }
-
- if runtime.Spec.RunAs != nil {
- value.UserInfo.User = int(*runtime.Spec.RunAs.UID)
- value.UserInfo.Group = int(*runtime.Spec.RunAs.GID)
- value.InitUsers = common.InitUsers{
- Enabled: true,
- Dir: e.getInitUserDir(),
- EnvUsers: utils.GetInitUserEnv(runtime.Spec.RunAs),
- EnvTieredPaths: e.getInitTierPathsEnv(runtime),
- }
- }
-
- image := runtime.Spec.InitUsers.Image
- tag := runtime.Spec.InitUsers.ImageTag
- imagePullPolicy := runtime.Spec.InitUsers.ImagePullPolicy
-
- value.InitUsers.Image, value.InitUsers.ImageTag, value.InitUsers.ImagePullPolicy = docker.ParseInitImage(image, tag, imagePullPolicy, common.DefaultInitImageEnv)
-
- e.Log.Info("Check InitUsers", "InitUsers", value.InitUsers)
-
-}
diff --git a/pkg/ddc/goosefs/transform_init_users_test.go b/pkg/ddc/goosefs/transform_init_users_test.go
deleted file mode 100644
index 33606aab14e..00000000000
--- a/pkg/ddc/goosefs/transform_init_users_test.go
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "strings"
-
- . "github.com/onsi/ginkgo/v2"
- . "github.com/onsi/gomega"
-
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/common"
- "github.com/fluid-cloudnative/fluid/pkg/utils/fake"
-)
-
-var _ = Describe("TransformInitUsers", func() {
- Describe("without RunAs", func() {
- It("should disable init users when RunAs is not specified", func() {
- runtime := &datav1alpha1.GooseFSRuntime{
- Spec: datav1alpha1.GooseFSRuntimeSpec{},
- }
- goosefsValue := &GooseFS{}
-
- engine := &GooseFSEngine{Log: fake.NullLogger()}
- engine.transformInitUsers(runtime, goosefsValue)
-
- Expect(goosefsValue.InitUsers.Enabled).To(BeFalse())
- })
- })
-
- Describe("with RunAs", func() {
- It("should enable init users and set default image", func() {
- value := int64(1000)
- runtime := &datav1alpha1.GooseFSRuntime{
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- RunAs: &datav1alpha1.User{
- UID: &value,
- GID: &value,
- UserName: "user1",
- GroupName: "group1",
- },
- },
- }
- goosefsValue := &GooseFS{}
-
- engine := &GooseFSEngine{
- Log: fake.NullLogger(),
- initImage: common.DefaultInitImage,
- }
- engine.transformInitUsers(runtime, goosefsValue)
-
- Expect(goosefsValue.InitUsers.Enabled).To(BeTrue())
-
- imageInfo := strings.Split(common.DefaultInitImage, ":")
- Expect(goosefsValue.InitUsers.Image).To(Equal(imageInfo[0]))
- Expect(goosefsValue.InitUsers.ImageTag).To(Equal(imageInfo[1]))
- })
- })
-
- Describe("with image overwrite", func() {
- It("should use custom image when specified in runtime", func() {
- value := int64(1000)
- image := "some-registry.some-repository"
- imageTag := "v1.0.0-abcdefg"
-
- runtime := &datav1alpha1.GooseFSRuntime{
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- RunAs: &datav1alpha1.User{
- UID: &value,
- GID: &value,
- UserName: "user1",
- GroupName: "group1",
- },
- InitUsers: datav1alpha1.InitUsersSpec{
- Image: image,
- ImageTag: imageTag,
- },
- },
- }
- goosefsValue := &GooseFS{}
-
- engine := &GooseFSEngine{
- Log: fake.NullLogger(),
- initImage: common.DefaultInitImage,
- }
- engine.transformInitUsers(runtime, goosefsValue)
-
- Expect(goosefsValue.InitUsers.Enabled).To(BeTrue())
- Expect(goosefsValue.InitUsers.Image).To(Equal(image))
- Expect(goosefsValue.InitUsers.ImageTag).To(Equal(imageTag))
- })
- })
-})
diff --git a/pkg/ddc/goosefs/transform_optimization.go b/pkg/ddc/goosefs/transform_optimization.go
deleted file mode 100644
index 9bb62c33e28..00000000000
--- a/pkg/ddc/goosefs/transform_optimization.go
+++ /dev/null
@@ -1,259 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "strconv"
- "strings"
-
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/common"
- "github.com/fluid-cloudnative/fluid/pkg/utils"
-
- v1 "k8s.io/api/core/v1"
-)
-
-// transform dataset which has ufsPaths and ufsVolumes
-func (e *GooseFSEngine) optimizeDefaultProperties(runtime *datav1alpha1.GooseFSRuntime, value *GooseFS) {
- if len(value.Properties) == 0 {
- if len(runtime.Spec.Properties) > 0 {
- value.Properties = runtime.Spec.Properties
- } else {
- value.Properties = map[string]string{}
- }
- }
- setDefaultProperties(runtime, value, "goosefs.fuse.jnifuse.enabled", "true")
- setDefaultProperties(runtime, value, "goosefs.master.metastore", "ROCKS")
- setDefaultProperties(runtime, value, "goosefs.web.ui.enabled", "false")
- setDefaultProperties(runtime, value, "goosefs.user.update.file.accesstime.disabled", "true")
- setDefaultProperties(runtime, value, "goosefs.user.client.cache.enabled", "false")
- setDefaultProperties(runtime, value, "goosefs.master.metastore.inode.cache.max.size", "10000000")
- setDefaultProperties(runtime, value, "goosefs.master.journal.log.size.bytes.max", "500MB")
- setDefaultProperties(runtime, value, "goosefs.master.metadata.sync.concurrency.level", "128")
- setDefaultProperties(runtime, value, "goosefs.master.metadata.sync.executor.pool.size", "128")
- setDefaultProperties(runtime, value, "goosefs.master.metadata.sync.ufs.prefetch.pool.size", "128")
- setDefaultProperties(runtime, value, "goosefs.user.block.worker.client.pool.min", "512")
- setDefaultProperties(runtime, value, "goosefs.fuse.debug.enabled", "false")
- setDefaultProperties(runtime, value, "goosefs.web.ui.enabled", "false")
- setDefaultProperties(runtime, value, "goosefs.user.file.writetype.default", "MUST_CACHE")
- setDefaultProperties(runtime, value, "goosefs.user.ufs.block.read.location.policy", "com.qcloud.cos.goosefs.client.block.policy.LocalFirstAvoidEvictionPolicy")
- setDefaultProperties(runtime, value, "goosefs.user.block.write.location.policy.class", "com.qcloud.cos.goosefs.client.block.policy.LocalFirstAvoidEvictionPolicy")
- setDefaultProperties(runtime, value, "goosefs.worker.allocator.class", "com.qcloud.cos.goosefs.worker.block.allocator.MaxFreeAllocator")
- setDefaultProperties(runtime, value, "goosefs.user.block.size.bytes.default", "16MB")
- setDefaultProperties(runtime, value, "goosefs.user.streaming.reader.chunk.size.bytes", "32MB")
- setDefaultProperties(runtime, value, "goosefs.user.local.reader.chunk.size.bytes", "32MB")
- setDefaultProperties(runtime, value, "goosefs.worker.network.reader.buffer.size", "32MB")
- setDefaultProperties(runtime, value, "goosefs.worker.file.buffer.size", "1MB")
- // Enable metrics as default for better monitoring result, if you have performance concern, feel free to turn it off
- setDefaultProperties(runtime, value, "goosefs.user.metrics.collection.enabled", "true")
- setDefaultProperties(runtime, value, "goosefs.master.rpc.executor.max.pool.size", "1024")
- setDefaultProperties(runtime, value, "goosefs.master.rpc.executor.core.pool.size", "128")
- // setDefaultProperties(runtime, value, "goosefs.master.mount.table.root.readonly", "true")
- setDefaultProperties(runtime, value, "goosefs.user.update.file.accesstime.disabled", "true")
- setDefaultProperties(runtime, value, "goosefs.user.file.passive.cache.enabled", "false")
- setDefaultProperties(runtime, value, "goosefs.user.block.avoid.eviction.policy.reserved.size.bytes", "2GB")
- setDefaultProperties(runtime, value, "goosefs.master.journal.folder", "/journal")
- setDefaultProperties(runtime, value, "goosefs.user.block.master.client.pool.gc.threshold", "2day")
- setDefaultProperties(runtime, value, "goosefs.user.file.master.client.threads", "1024")
- setDefaultProperties(runtime, value, "goosefs.user.block.master.client.threads", "1024")
- setDefaultProperties(runtime, value, "goosefs.user.file.create.ttl.action", "FREE")
- setDefaultProperties(runtime, value, "goosefs.user.file.readtype.default", "CACHE")
- setDefaultProperties(runtime, value, "goosefs.security.stale.channel.purge.interval", "365d")
- setDefaultProperties(runtime, value, "goosefs.user.metadata.cache.enabled", "true")
- setDefaultProperties(runtime, value, "goosefs.user.metadata.cache.expiration.time", "2day")
- // set the default max size of metadata cache
- setDefaultProperties(runtime, value, "goosefs.user.metadata.cache.max.size", "6000000")
- setDefaultProperties(runtime, value, "goosefs.fuse.cached.paths.max", "1000000")
- setDefaultProperties(runtime, value, "goosefs.job.worker.threadpool.size", "164")
- setDefaultProperties(runtime, value, "goosefs.user.worker.list.refresh.interval", "2min")
- setDefaultProperties(runtime, value, "goosefs.user.logging.threshold", "1000ms")
- setDefaultProperties(runtime, value, "goosefs.fuse.logging.threshold", "1000ms")
- setDefaultProperties(runtime, value, "goosefs.worker.block.master.client.pool.size", "1024")
- // Disable this optimization since it will cause availbilty issue. see https://github.com/Alluxio/alluxio/issues/14909
- // setDefaultProperties(runtime, value, "goosefs.fuse.shared.caching.reader.enabled", "true")
- setDefaultProperties(runtime, value, "goosefs.job.master.finished.job.retention.time", "30sec")
- setDefaultProperties(runtime, value, "goosefs.underfs.object.store.breadcrumbs.enabled", "false")
-
- if value.Master.Replicas > 1 {
- setDefaultProperties(runtime, value, "goosefs.master.journal.type", "EMBEDDED")
- } else {
- setDefaultProperties(runtime, value, "goosefs.master.journal.type", "UFS")
- }
-
- // "goosefs.user.direct.memory.io.enabled" is only safe when the workload is read only and the
- // worker has only one tier and one storage directory in this tier.
- readOnly := false
- runtimeInfo := e.runtimeInfo
- if runtimeInfo != nil {
- accessModes, err := utils.GetAccessModesOfDataset(e.Client, runtimeInfo.GetName(), runtimeInfo.GetNamespace())
- if err != nil {
- e.Log.Info("Error:", "err", err)
- }
-
- if len(accessModes) > 0 {
- for _, mode := range accessModes {
- if mode == v1.ReadOnlyMany {
- readOnly = true
- }
- }
- }
- tieredstoreInfo := runtimeInfo.GetTieredStoreInfo()
- if readOnly && len(tieredstoreInfo.Levels) == 1 && len(tieredstoreInfo.Levels[0].CachePaths) == 1 {
- setDefaultProperties(runtime, value, "goosefs.user.direct.memory.io.enabled", "true")
- }
- }
-}
-
-// optimizeDefaultPropertiesAndFuseForHTTP sets the default value for properties and fuse when the mounts are all HTTP.
-func (e *GooseFSEngine) optimizeDefaultPropertiesAndFuseForHTTP(runtime *datav1alpha1.GooseFSRuntime, dataset *datav1alpha1.Dataset, value *GooseFS) {
- var isHTTP = true
- for _, mount := range dataset.Spec.Mounts {
- // the mount is not http
- if !(strings.HasPrefix(mount.MountPoint, common.HttpScheme.String()) || strings.HasPrefix(mount.MountPoint, common.HttpsScheme.String())) {
- isHTTP = false
- break
- }
- }
-
- if isHTTP {
- setDefaultProperties(runtime, value, "goosefs.user.block.size.bytes.default", "256MB")
- setDefaultProperties(runtime, value, "goosefs.user.streaming.reader.chunk.size.bytes", "256MB")
- setDefaultProperties(runtime, value, "goosefs.user.local.reader.chunk.size.bytes", "256MB")
- setDefaultProperties(runtime, value, "goosefs.worker.network.reader.buffer.size", "256MB")
- setDefaultProperties(runtime, value, "goosefs.user.streaming.data.timeout", "300sec")
- if len(runtime.Spec.Fuse.Args) == 0 {
- value.Fuse.Args[1] = strings.Join([]string{value.Fuse.Args[1], "max_readahead=0"}, ",")
- }
- }
-}
-
-func setDefaultProperties(runtime *datav1alpha1.GooseFSRuntime, goosefsValue *GooseFS, key string, value string) {
- if _, found := runtime.Spec.Properties[key]; !found {
- goosefsValue.Properties[key] = value
- }
-}
-
-func (e *GooseFSEngine) setPortProperties(runtime *datav1alpha1.GooseFSRuntime, value *GooseFS) {
- setDefaultProperties(runtime, value, "goosefs.master.rpc.port", strconv.Itoa(value.Master.Ports.Rpc))
- setDefaultProperties(runtime, value, "goosefs.master.web.port", strconv.Itoa(value.Master.Ports.Web))
- setDefaultProperties(runtime, value, "goosefs.worker.rpc.port", strconv.Itoa(value.Worker.Ports.Rpc))
- setDefaultProperties(runtime, value, "goosefs.worker.web.port", strconv.Itoa(value.Worker.Ports.Web))
- setDefaultProperties(runtime, value, "goosefs.job.master.rpc.port", strconv.Itoa(value.JobMaster.Ports.Rpc))
- setDefaultProperties(runtime, value, "goosefs.job.master.web.port", strconv.Itoa(value.JobMaster.Ports.Web))
- setDefaultProperties(runtime, value, "goosefs.job.worker.rpc.port", strconv.Itoa(value.JobWorker.Ports.Rpc))
- setDefaultProperties(runtime, value, "goosefs.job.worker.web.port", strconv.Itoa(value.JobWorker.Ports.Web))
- setDefaultProperties(runtime, value, "goosefs.job.worker.data.port", strconv.Itoa(value.JobWorker.Ports.Data))
- if runtime.Spec.APIGateway.Enabled {
- setDefaultProperties(runtime, value, "goosefs.proxy.web.port", strconv.Itoa(value.APIGateway.Ports.Rest))
- }
-
- if value.Master.Ports.Embedded != 0 && value.JobMaster.Ports.Embedded != 0 {
- setDefaultProperties(runtime, value, "goosefs.master.embedded.journal.port", strconv.Itoa(value.Master.Ports.Embedded))
- setDefaultProperties(runtime, value, "goosefs.job.master.embedded.journal.port", strconv.Itoa(value.JobMaster.Ports.Embedded))
- }
-
- // If use EMBEDDED HA Mode, need set goosefs.master.embedded.journal.addresses
- if value.Master.Replicas > 1 {
- var journalAddresses string
- var journalAddress string
- var i int
- for i = 0; i < int(value.Master.Replicas); i++ {
- if i == int(value.Master.Replicas-1) {
- journalAddress = value.FullnameOverride + "-" + "master-" + strconv.Itoa(i) + ":" + strconv.Itoa(value.Master.Ports.Embedded)
- } else {
- journalAddress = value.FullnameOverride + "-" + "master-" + strconv.Itoa(i) + ":" + strconv.Itoa(value.Master.Ports.Embedded) + ","
- }
-
- journalAddresses += journalAddress
- }
- setDefaultProperties(runtime, value, "goosefs.master.embedded.journal.addresses", journalAddresses)
- }
-}
-
-func (e *GooseFSEngine) optimizeDefaultForMaster(runtime *datav1alpha1.GooseFSRuntime, value *GooseFS) {
- if len(runtime.Spec.Master.JvmOptions) > 0 {
- value.Master.JvmOptions = runtime.Spec.Master.JvmOptions
- }
-
- if len(value.Master.JvmOptions) == 0 {
- value.Master.JvmOptions = []string{
- "-Xmx16G",
- "-XX:+UnlockExperimentalVMOptions",
- }
- }
-}
-
-func (e *GooseFSEngine) optimizeDefaultForWorker(runtime *datav1alpha1.GooseFSRuntime, value *GooseFS) {
- if len(runtime.Spec.Worker.JvmOptions) > 0 {
- value.Worker.JvmOptions = runtime.Spec.Worker.JvmOptions
- }
- if len(value.Worker.JvmOptions) == 0 {
- value.Worker.JvmOptions = []string{
- "-Xmx12G",
- "-XX:+UnlockExperimentalVMOptions",
- "-XX:MaxDirectMemorySize=32g",
- }
- }
-}
-
-func (e *GooseFSEngine) optimizeDefaultFuse(runtime *datav1alpha1.GooseFSRuntime, value *GooseFS) {
-
- if len(runtime.Spec.Fuse.JvmOptions) > 0 {
- value.Fuse.JvmOptions = runtime.Spec.Fuse.JvmOptions
- }
-
- if len(value.Fuse.JvmOptions) == 0 {
- value.Fuse.JvmOptions = []string{
- "-Xmx16G",
- "-Xms16G",
- "-XX:+UseG1GC",
- "-XX:MaxDirectMemorySize=32g",
- "-XX:+UnlockExperimentalVMOptions",
- }
- }
-
- readOnly := false
- runtimeInfo := e.runtimeInfo
- if runtimeInfo != nil {
- accessModes, err := utils.GetAccessModesOfDataset(e.Client, runtimeInfo.GetName(), runtimeInfo.GetNamespace())
- if err != nil {
- e.Log.Info("Error:", "err", err)
- }
-
- if len(accessModes) > 0 {
- for _, mode := range accessModes {
- if mode == v1.ReadOnlyMany {
- readOnly = true
- }
- }
- }
- }
-
- if len(runtime.Spec.Fuse.Args) > 0 {
- value.Fuse.Args = runtime.Spec.Fuse.Args
- } else {
- if readOnly {
- // value.Fuse.Args = []string{"fuse", "--fuse-opts=kernel_cache,ro,max_read=131072,attr_timeout=7200,entry_timeout=7200,nonempty"}
- value.Fuse.Args = []string{"fuse", "--fuse-opts=ro,direct_io"}
- } else {
- // value.Fuse.Args = []string{"fuse", "--fuse-opts=kernel_cache,rw,max_read=131072,attr_timeout=7200,entry_timeout=7200,nonempty"}
- value.Fuse.Args = []string{"fuse", "--fuse-opts=rw,direct_io"}
- }
-
- }
-
-}
diff --git a/pkg/ddc/goosefs/transform_optimization_test.go b/pkg/ddc/goosefs/transform_optimization_test.go
deleted file mode 100644
index 08757360144..00000000000
--- a/pkg/ddc/goosefs/transform_optimization_test.go
+++ /dev/null
@@ -1,469 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "strconv"
-
- . "github.com/onsi/ginkgo/v2"
- . "github.com/onsi/gomega"
-
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/common"
- corev1 "k8s.io/api/core/v1"
-)
-
-var _ = Describe("GooseFSEngine Property Optimization", func() {
- var engine *GooseFSEngine
-
- BeforeEach(func() {
- engine = &GooseFSEngine{}
- })
-
- Context("single master replica", func() {
- It("should set journal type to UFS", func() {
- runtime := &datav1alpha1.GooseFSRuntime{
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- Properties: map[string]string{},
- },
- }
- value := &GooseFS{}
-
- engine.optimizeDefaultProperties(runtime, value)
-
- Expect(value.Properties["goosefs.master.journal.type"]).To(Equal("UFS"))
- })
- })
-
- Context("multiple master replicas", func() {
- It("should set journal type to EMBEDDED", func() {
- runtime := &datav1alpha1.GooseFSRuntime{
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- Properties: map[string]string{},
- },
- }
- value := &GooseFS{
- Master: Master{Replicas: 3},
- }
-
- engine.optimizeDefaultProperties(runtime, value)
-
- Expect(value.Properties["goosefs.master.journal.type"]).To(Equal("EMBEDDED"))
- })
- })
-
- Context("property already set in runtime", func() {
- It("should preserve existing property value", func() {
- runtime := &datav1alpha1.GooseFSRuntime{
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- Properties: map[string]string{
- "goosefs.fuse.jnifuse.enabled": "false",
- },
- },
- }
- value := &GooseFS{}
-
- engine.optimizeDefaultProperties(runtime, value)
-
- Expect(value.Properties["goosefs.fuse.jnifuse.enabled"]).To(Equal("false"))
- })
- })
-})
-
-var _ = Describe("GooseFSEngine HTTP Mount Optimization", func() {
- var engine *GooseFSEngine
-
- BeforeEach(func() {
- engine = &GooseFSEngine{}
- })
-
- It("should set block size for HTTP mount", func() {
- runtime := &datav1alpha1.GooseFSRuntime{
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- Properties: map[string]string{},
- },
- }
- value := &GooseFS{
- Properties: map[string]string{},
- Fuse: Fuse{
- Args: []string{"fuse", "--fuse-opts=kernel_cache,rw,max_read=131072,attr_timeout=7200,entry_timeout=7200,nonempty"},
- },
- }
- dataset := &datav1alpha1.Dataset{
- Spec: datav1alpha1.DatasetSpec{
- Mounts: []datav1alpha1.Mount{
- {MountPoint: "https://mirrors.bit.edu.cn/apache/zookeeper/zookeeper-3.6.2/"},
- },
- },
- }
-
- engine.optimizeDefaultProperties(runtime, value)
- engine.optimizeDefaultPropertiesAndFuseForHTTP(runtime, dataset, value)
-
- Expect(value.Properties["goosefs.user.block.size.bytes.default"]).To(Equal("256MB"))
- })
-})
-
-var _ = Describe("GooseFSEngine Default Property Setting", func() {
- Context("property not set in runtime", func() {
- It("should use default value", func() {
- runtime := &datav1alpha1.GooseFSRuntime{
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- Properties: map[string]string{},
- },
- }
- value := &GooseFS{
- Properties: map[string]string{},
- }
-
- setDefaultProperties(runtime, value, "goosefs.fuse.jnifuse.enabled", "true")
-
- Expect(value.Properties["goosefs.fuse.jnifuse.enabled"]).To(Equal("true"))
- })
- })
-
- Context("property already set in runtime", func() {
- It("should not set default when runtime has property", func() {
- runtime := &datav1alpha1.GooseFSRuntime{
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- Properties: map[string]string{
- "goosefs.fuse.jnifuse.enabled": "false",
- },
- },
- }
- value := &GooseFS{
- Properties: map[string]string{},
- }
-
- setDefaultProperties(runtime, value, "goosefs.fuse.jnifuse.enabled", "true")
-
- Expect(value.Properties).NotTo(HaveKey("goosefs.fuse.jnifuse.enabled"))
- })
- })
-})
-
-var _ = Describe("GooseFSEngine Master JVM Optimization", func() {
- var engine *GooseFSEngine
-
- BeforeEach(func() {
- engine = &GooseFSEngine{}
- })
-
- Context("no JVM options set", func() {
- It("should set default JVM options", func() {
- runtime := &datav1alpha1.GooseFSRuntime{
- Spec: datav1alpha1.GooseFSRuntimeSpec{},
- }
- value := &GooseFS{
- Properties: map[string]string{},
- }
-
- engine.optimizeDefaultForMaster(runtime, value)
-
- Expect(value.Master.JvmOptions).To(ContainElement("-Xmx16G"))
- Expect(value.Master.JvmOptions).To(ContainElement("-XX:+UnlockExperimentalVMOptions"))
- })
- })
-
- Context("JVM options already set", func() {
- It("should use runtime JVM options", func() {
- runtime := &datav1alpha1.GooseFSRuntime{
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- Master: datav1alpha1.GooseFSCompTemplateSpec{
- JvmOptions: []string{"-Xmx4G"},
- },
- },
- }
- value := &GooseFS{
- Properties: map[string]string{},
- Master: Master{},
- }
-
- engine.optimizeDefaultForMaster(runtime, value)
-
- Expect(value.Master.JvmOptions).To(HaveLen(1))
- Expect(value.Master.JvmOptions[0]).To(Equal("-Xmx4G"))
- })
- })
-})
-
-var _ = Describe("GooseFSEngine Worker JVM Optimization", func() {
- var engine *GooseFSEngine
-
- BeforeEach(func() {
- engine = &GooseFSEngine{}
- })
-
- Context("no JVM options set", func() {
- It("should set default JVM options", func() {
- runtime := &datav1alpha1.GooseFSRuntime{
- Spec: datav1alpha1.GooseFSRuntimeSpec{},
- }
- value := &GooseFS{
- Properties: map[string]string{},
- }
-
- engine.optimizeDefaultForWorker(runtime, value)
-
- Expect(value.Worker.JvmOptions).To(ContainElement("-Xmx12G"))
- Expect(value.Worker.JvmOptions).To(ContainElement("-XX:+UnlockExperimentalVMOptions"))
- Expect(value.Worker.JvmOptions).To(ContainElement("-XX:MaxDirectMemorySize=32g"))
- })
- })
-
- Context("JVM options already set", func() {
- It("should use runtime JVM options", func() {
- runtime := &datav1alpha1.GooseFSRuntime{
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- Worker: datav1alpha1.GooseFSCompTemplateSpec{
- JvmOptions: []string{"-Xmx4G"},
- },
- },
- }
- value := &GooseFS{
- Properties: map[string]string{},
- }
-
- engine.optimizeDefaultForWorker(runtime, value)
-
- Expect(value.Worker.JvmOptions).To(HaveLen(1))
- Expect(value.Worker.JvmOptions[0]).To(Equal("-Xmx4G"))
- })
- })
-})
-
-var _ = Describe("GooseFSEngine Fuse JVM Optimization", func() {
- var engine *GooseFSEngine
-
- BeforeEach(func() {
- engine = &GooseFSEngine{}
- })
-
- Context("no JVM options set", func() {
- It("should set default JVM options", func() {
- runtime := &datav1alpha1.GooseFSRuntime{
- Spec: datav1alpha1.GooseFSRuntimeSpec{},
- }
- value := &GooseFS{
- Properties: map[string]string{},
- }
-
- engine.optimizeDefaultFuse(runtime, value)
-
- Expect(value.Fuse.JvmOptions).To(ContainElement("-Xmx16G"))
- Expect(value.Fuse.JvmOptions).To(ContainElement("-Xms16G"))
- Expect(value.Fuse.JvmOptions).To(ContainElement("-XX:+UseG1GC"))
- Expect(value.Fuse.JvmOptions).To(ContainElement("-XX:MaxDirectMemorySize=32g"))
- Expect(value.Fuse.JvmOptions).To(ContainElement("-XX:+UnlockExperimentalVMOptions"))
- })
- })
-
- Context("JVM options already set", func() {
- It("should use runtime JVM options", func() {
- runtime := &datav1alpha1.GooseFSRuntime{
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- Fuse: datav1alpha1.GooseFSFuseSpec{
- JvmOptions: []string{"-Xmx4G"},
- },
- },
- }
- value := &GooseFS{
- Properties: map[string]string{},
- }
-
- engine.optimizeDefaultFuse(runtime, value)
-
- Expect(value.Fuse.JvmOptions).To(HaveLen(1))
- Expect(value.Fuse.JvmOptions[0]).To(Equal("-Xmx4G"))
- })
- })
-})
-
-var _ = Describe("GooseFSEngine Port Configuration", func() {
- const testPort = 20000
-
- var engine *GooseFSEngine
-
- BeforeEach(func() {
- engine = &GooseFSEngine{}
- })
-
- It("should set port properties correctly", func() {
- runtime := &datav1alpha1.GooseFSRuntime{}
- value := &GooseFS{
- Master: Master{
- Ports: Ports{
- Rpc: testPort,
- Web: testPort,
- Embedded: 0,
- },
- },
- Worker: Worker{
- Ports: Ports{
- Rpc: testPort,
- Web: testPort,
- },
- Resources: common.Resources{
- Requests: common.ResourceList{
- corev1.ResourceCPU: "100m",
- corev1.ResourceMemory: "100Mi",
- },
- },
- },
- JobMaster: JobMaster{
- Ports: Ports{
- Rpc: testPort,
- Web: testPort,
- Embedded: 0,
- },
- },
- JobWorker: JobWorker{
- Ports: Ports{
- Rpc: testPort,
- Web: testPort,
- Data: testPort,
- },
- Resources: common.Resources{
- Requests: common.ResourceList{
- corev1.ResourceCPU: "100m",
- corev1.ResourceMemory: "100Mi",
- },
- },
- },
- Properties: map[string]string{},
- }
-
- engine.setPortProperties(runtime, value)
-
- Expect(value.Properties["goosefs.master.rpc.port"]).To(Equal(strconv.Itoa(testPort)))
- })
-
- It("should set embedded journal ports when configured", func() {
- runtime := &datav1alpha1.GooseFSRuntime{}
- value := &GooseFS{
- Master: Master{
- Ports: Ports{
- Rpc: testPort,
- Web: testPort,
- Embedded: 19200,
- },
- },
- Worker: Worker{
- Ports: Ports{Rpc: testPort, Web: testPort},
- },
- JobMaster: JobMaster{
- Ports: Ports{
- Rpc: testPort,
- Web: testPort,
- Embedded: 19201,
- },
- },
- JobWorker: JobWorker{
- Ports: Ports{Rpc: testPort, Web: testPort, Data: testPort},
- },
- Properties: map[string]string{},
- }
-
- engine.setPortProperties(runtime, value)
-
- Expect(value.Properties["goosefs.master.embedded.journal.port"]).To(Equal("19200"))
- Expect(value.Properties["goosefs.job.master.embedded.journal.port"]).To(Equal("19201"))
- })
-
- It("should set API gateway port when enabled", func() {
- runtime := &datav1alpha1.GooseFSRuntime{
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- APIGateway: datav1alpha1.GooseFSCompTemplateSpec{
- Enabled: true,
- },
- },
- }
- value := &GooseFS{
- Master: Master{Ports: Ports{Rpc: testPort, Web: testPort}},
- Worker: Worker{Ports: Ports{Rpc: testPort, Web: testPort}},
- JobMaster: JobMaster{Ports: Ports{Rpc: testPort, Web: testPort}},
- JobWorker: JobWorker{Ports: Ports{Rpc: testPort, Web: testPort, Data: testPort}},
- APIGateway: APIGateway{Ports: Ports{Rest: 39999}},
- Properties: map[string]string{},
- }
-
- engine.setPortProperties(runtime, value)
-
- Expect(value.Properties["goosefs.proxy.web.port"]).To(Equal("39999"))
- })
-
- It("should set journal addresses for HA mode", func() {
- runtime := &datav1alpha1.GooseFSRuntime{}
- value := &GooseFS{
- FullnameOverride: "test-goosefs",
- Master: Master{
- Replicas: 3,
- Ports: Ports{Rpc: testPort, Web: testPort, Embedded: 19200},
- },
- Worker: Worker{Ports: Ports{Rpc: testPort, Web: testPort}},
- JobMaster: JobMaster{Ports: Ports{Rpc: testPort, Web: testPort, Embedded: 19201}},
- JobWorker: JobWorker{Ports: Ports{Rpc: testPort, Web: testPort, Data: testPort}},
- Properties: map[string]string{},
- }
-
- engine.setPortProperties(runtime, value)
-
- Expect(value.Properties["goosefs.master.embedded.journal.addresses"]).To(ContainSubstring("test-goosefs-master-0:19200"))
- Expect(value.Properties["goosefs.master.embedded.journal.addresses"]).To(ContainSubstring("test-goosefs-master-1:19200"))
- Expect(value.Properties["goosefs.master.embedded.journal.addresses"]).To(ContainSubstring("test-goosefs-master-2:19200"))
- })
-})
-
-var _ = Describe("GooseFSEngine Fuse Args Configuration", func() {
- var engine *GooseFSEngine
-
- BeforeEach(func() {
- engine = &GooseFSEngine{}
- })
-
- It("should use runtime fuse args when set", func() {
- runtime := &datav1alpha1.GooseFSRuntime{
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- Fuse: datav1alpha1.GooseFSFuseSpec{
- Args: []string{"fuse", "--custom-args"},
- },
- },
- }
- value := &GooseFS{
- Properties: map[string]string{},
- }
-
- engine.optimizeDefaultFuse(runtime, value)
-
- Expect(value.Fuse.Args).To(Equal([]string{"fuse", "--custom-args"}))
- })
-
- It("should set default rw fuse args when not set", func() {
- runtime := &datav1alpha1.GooseFSRuntime{
- Spec: datav1alpha1.GooseFSRuntimeSpec{},
- }
- value := &GooseFS{
- Properties: map[string]string{},
- }
-
- engine.optimizeDefaultFuse(runtime, value)
-
- Expect(value.Fuse.Args).To(ContainElement("fuse"))
- Expect(len(value.Fuse.Args)).To(BeNumerically(">=", 2))
- Expect(value.Fuse.Args[1]).To(ContainSubstring("rw"))
- })
-})
diff --git a/pkg/ddc/goosefs/transform_permission.go b/pkg/ddc/goosefs/transform_permission.go
deleted file mode 100644
index 93f9bad13cf..00000000000
--- a/pkg/ddc/goosefs/transform_permission.go
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
-)
-
-// transformSecurity transforms security configuration
-func (e *GooseFSEngine) transformPermission(runtime *datav1alpha1.GooseFSRuntime, value *GooseFS) {
-
- if len(value.Properties) == 0 {
- if len(runtime.Spec.Properties) > 0 {
- value.Properties = runtime.Spec.Properties
- } else {
- value.Properties = map[string]string{}
- }
- }
- value.Properties["goosefs.master.security.impersonation.root.users"] = "*"
- value.Properties["goosefs.master.security.impersonation.root.groups"] = "*"
- setDefaultProperties(runtime, value, "goosefs.security.authorization.permission.enabled", "false")
- // if runtime.Spec.RunAs != nil {
- // value.Properties[fmt.Sprintf("goosefs.master.security.impersonation.%d.users", runtime.Spec.RunAs.UID)]
- // value.Properties[fmt.Sprintf("goosefs.master.security.impersonation.%d.groups", runtime.Spec.RunAs.GID)]
- // }
-}
diff --git a/pkg/ddc/goosefs/transform_permission_test.go b/pkg/ddc/goosefs/transform_permission_test.go
deleted file mode 100644
index d666dc9b628..00000000000
--- a/pkg/ddc/goosefs/transform_permission_test.go
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
-Copyright 2023 The Fluid Author.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- . "github.com/onsi/ginkgo/v2"
- . "github.com/onsi/gomega"
-
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
-)
-
-var _ = Describe("TransformPermission", func() {
- type testCase struct {
- runtime *datav1alpha1.GooseFSRuntime
- value *GooseFS
- expect map[string]string
- }
-
- DescribeTable("should transform permission properties correctly",
- func(tc testCase) {
- engine := &GooseFSEngine{}
- engine.transformPermission(tc.runtime, tc.value)
-
- Expect(tc.value.Properties).To(Equal(tc.expect))
- },
- Entry("default fuse spec",
- testCase{
- runtime: &datav1alpha1.GooseFSRuntime{
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- Fuse: datav1alpha1.GooseFSFuseSpec{},
- },
- },
- value: &GooseFS{},
- expect: map[string]string{
- "goosefs.master.security.impersonation.root.users": "*",
- "goosefs.master.security.impersonation.root.groups": "*",
- "goosefs.security.authorization.permission.enabled": "false",
- },
- },
- ),
- )
-})
diff --git a/pkg/ddc/goosefs/transform_resources.go b/pkg/ddc/goosefs/transform_resources.go
deleted file mode 100644
index 5d362607cf8..00000000000
--- a/pkg/ddc/goosefs/transform_resources.go
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/common"
- "github.com/fluid-cloudnative/fluid/pkg/utils"
- "github.com/fluid-cloudnative/fluid/pkg/utils/tieredstore"
- corev1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/api/resource"
-)
-
-func (e *GooseFSEngine) transformResourcesForMaster(runtime *datav1alpha1.GooseFSRuntime, value *GooseFS) {
-
- if runtime == nil {
- return
- }
-
- if len(runtime.Spec.Master.Resources.Limits) > 0 || len(runtime.Spec.Master.Resources.Requests) > 0 {
- value.Master.Resources = utils.TransformCoreV1ResourcesToInternalResources(runtime.Spec.Master.Resources)
- }
- if len(runtime.Spec.JobMaster.Resources.Limits) > 0 || len(runtime.Spec.JobMaster.Resources.Requests) > 0 {
- value.JobMaster.Resources = utils.TransformCoreV1ResourcesToInternalResources(runtime.Spec.JobMaster.Resources)
- }
- if len(runtime.Spec.Master.Resources.Limits) == 0 && len(runtime.Spec.Master.Resources.Requests) == 0 {
- return
- }
-
- value.Master.Resources = utils.TransformCoreV1ResourcesToInternalResources(runtime.Spec.Master.Resources)
-}
-
-func (e *GooseFSEngine) transformResourcesForWorker(runtime *datav1alpha1.GooseFSRuntime, value *GooseFS) {
-
- if runtime.Spec.Worker.Resources.Limits == nil {
- e.Log.Info("skip setting memory limit")
- return
- }
-
- if _, found := runtime.Spec.Worker.Resources.Limits[corev1.ResourceMemory]; !found {
- e.Log.Info("skip setting memory limit")
- return
- }
-
- value.Worker.Resources = utils.TransformCoreV1ResourcesToInternalResources(runtime.Spec.Worker.Resources)
-
- // for job worker
- if len(runtime.Spec.JobWorker.Resources.Limits) > 0 || len(runtime.Spec.JobWorker.Resources.Requests) > 0 {
- value.JobWorker.Resources = utils.TransformCoreV1ResourcesToInternalResources(runtime.Spec.JobWorker.Resources)
- }
-
- runtimeInfo, err := e.getRuntimeInfo()
- if err != nil {
- e.Log.Error(err, "failed to transformResourcesForWorker")
- }
- storageMap := tieredstore.GetLevelStorageMap(runtimeInfo)
-
- e.Log.Info("transformResourcesForWorker", "storageMap", storageMap)
-
- // TODO(iluoeli): it should be xmx + direct memory
- memLimit := resource.MustParse("20Gi")
- if quantity, exists := runtime.Spec.Worker.Resources.Limits[corev1.ResourceMemory]; exists && !quantity.IsZero() {
- memLimit = quantity
- }
-
- for key, requirement := range storageMap {
- if value.Worker.Resources.Limits == nil {
- value.Worker.Resources.Limits = make(common.ResourceList)
- }
- if key == common.MemoryCacheStore {
- req := requirement.DeepCopy()
-
- memLimit.Add(req)
-
- e.Log.Info("update the requirement for memory", "requirement", memLimit)
-
- }
- // } else if key == common.DiskCacheStore {
- // req := requirement.DeepCopy()
-
- // e.Log.Info("update the requiremnet for disk", "requirement", req)
-
- // value.Worker.Resources.Limits[corev1.ResourceEphemeralStorage] = req.String()
- // }
- }
-
- value.Worker.Resources.Limits[corev1.ResourceMemory] = memLimit.String()
-}
-
-func (e *GooseFSEngine) transformResourcesForFuse(runtime *datav1alpha1.GooseFSRuntime, value *GooseFS) {
-
- if runtime.Spec.Fuse.Resources.Limits == nil {
- e.Log.Info("skip setting memory limit")
- return
- }
-
- if _, found := runtime.Spec.Fuse.Resources.Limits[corev1.ResourceMemory]; !found {
- e.Log.Info("skip setting memory limit")
- return
- }
-
- value.Fuse.Resources = utils.TransformCoreV1ResourcesToInternalResources(runtime.Spec.Fuse.Resources)
-
- runtimeInfo, err := e.getRuntimeInfo()
- if err != nil {
- e.Log.Error(err, "failed to transformResourcesForFuse")
- }
- storageMap := tieredstore.GetLevelStorageMap(runtimeInfo)
-
- e.Log.Info("transformFuse", "storageMap", storageMap)
-
- // TODO(iluoeli): it should be xmx + direct memory
- memLimit := resource.MustParse("50Gi")
- if quantity, exists := runtime.Spec.Fuse.Resources.Limits[corev1.ResourceMemory]; exists && !quantity.IsZero() {
- memLimit = quantity
- }
-
- for key, requirement := range storageMap {
- if value.Fuse.Resources.Limits == nil {
- value.Fuse.Resources.Limits = make(common.ResourceList)
- }
- if key == common.MemoryCacheStore {
- req := requirement.DeepCopy()
-
- memLimit.Add(req)
-
- e.Log.Info("update the requiremnet for memory", "requirement", memLimit)
-
- }
- // } else if key == common.DiskCacheStore {
- // req := requirement.DeepCopy()
- // e.Log.Info("update the requiremnet for disk", "requirement", req)
- // value.Fuse.Resources.Limits[corev1.ResourceEphemeralStorage] = req.String()
- // }
- }
- if value.Fuse.Resources.Limits != nil {
- value.Fuse.Resources.Limits[corev1.ResourceMemory] = memLimit.String()
- }
-
-}
-
-func (e *GooseFSEngine) transformTolerations(dataset *datav1alpha1.Dataset, value *GooseFS) {
- if len(dataset.Spec.Tolerations) > 0 {
- value.Tolerations = dataset.Spec.Tolerations
- }
-}
diff --git a/pkg/ddc/goosefs/transform_resources_test.go b/pkg/ddc/goosefs/transform_resources_test.go
deleted file mode 100644
index 5fd0fe15fbf..00000000000
--- a/pkg/ddc/goosefs/transform_resources_test.go
+++ /dev/null
@@ -1,298 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "reflect"
- "testing"
-
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/common"
- "github.com/fluid-cloudnative/fluid/pkg/ddc/base"
- "github.com/fluid-cloudnative/fluid/pkg/utils/fake"
- corev1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/api/resource"
-)
-
-func TestTransformResourcesForMaster(t *testing.T) {
- testCases := map[string]struct {
- runtime *datav1alpha1.GooseFSRuntime
- got *GooseFS
- want *GooseFS
- }{
- "test goosefs master pass through resources with limits and request case 1": {
- runtime: mockGooseFSRuntimeForMaster(
- corev1.ResourceRequirements{
- Requests: corev1.ResourceList{
- corev1.ResourceCPU: resource.MustParse("100m"),
- corev1.ResourceMemory: resource.MustParse("100Mi"),
- },
- Limits: corev1.ResourceList{
- corev1.ResourceCPU: resource.MustParse("400m"),
- corev1.ResourceMemory: resource.MustParse("400Mi"),
- },
- },
- ),
- got: &GooseFS{},
- want: &GooseFS{
- Master: Master{
- Resources: common.Resources{
- Requests: common.ResourceList{
- corev1.ResourceCPU: "100m",
- corev1.ResourceMemory: "100Mi",
- },
- Limits: common.ResourceList{
- corev1.ResourceCPU: "400m",
- corev1.ResourceMemory: "400Mi",
- },
- },
- },
- JobMaster: JobMaster{
- Resources: common.Resources{
- Requests: common.ResourceList{
- corev1.ResourceCPU: "100m",
- corev1.ResourceMemory: "100Mi",
- },
- Limits: common.ResourceList{
- corev1.ResourceCPU: "400m",
- corev1.ResourceMemory: "400Mi",
- },
- },
- },
- },
- },
- "test GooseFS master pass through resources with request case 1": {
- runtime: mockGooseFSRuntimeForMaster(
- corev1.ResourceRequirements{
- Requests: corev1.ResourceList{
- corev1.ResourceCPU: resource.MustParse("100m"),
- corev1.ResourceMemory: resource.MustParse("100Mi"),
- },
- },
- ),
- got: &GooseFS{},
- want: &GooseFS{
- Master: Master{
- Resources: common.Resources{
- Requests: common.ResourceList{
- corev1.ResourceCPU: "100m",
- corev1.ResourceMemory: "100Mi",
- },
- Limits: common.ResourceList{},
- },
- },
- JobMaster: JobMaster{
- Resources: common.Resources{
- Requests: common.ResourceList{
- corev1.ResourceCPU: "100m",
- corev1.ResourceMemory: "100Mi",
- },
- Limits: common.ResourceList{},
- },
- },
- },
- },
- "test goosefs master pass through resources without request and limit case 1": {
- runtime: mockGooseFSRuntimeForMaster(
- corev1.ResourceRequirements{
- Requests: corev1.ResourceList{},
- },
- ),
- got: &GooseFS{},
- want: &GooseFS{},
- },
- "test goosefs master pass through resources without request and limit case 2": {
- runtime: mockGooseFSRuntimeForMaster(corev1.ResourceRequirements{}),
- got: &GooseFS{},
- want: &GooseFS{},
- },
- "test goosefs master pass through resources without request and limit case 3": {
- runtime: mockGooseFSRuntimeForMaster(
- corev1.ResourceRequirements{
- Limits: corev1.ResourceList{},
- },
- ),
- got: &GooseFS{},
- want: &GooseFS{},
- },
- }
-
- engine := &GooseFSEngine{}
- for k, item := range testCases {
- engine.transformResourcesForMaster(item.runtime, item.got)
- if !reflect.DeepEqual(item.want.Master.Resources, item.got.Master.Resources) {
- t.Errorf("%s failure, want resource: %+v,got resource: %+v",
- k,
- item.want.Master.Resources,
- item.got.Master.Resources,
- )
- }
- }
-}
-
-func mockGooseFSRuntimeForMaster(res corev1.ResourceRequirements) *datav1alpha1.GooseFSRuntime {
- runtime := &datav1alpha1.GooseFSRuntime{
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- Master: datav1alpha1.GooseFSCompTemplateSpec{
- Resources: res,
- },
- JobMaster: datav1alpha1.GooseFSCompTemplateSpec{
- Resources: res,
- },
- },
- }
- return runtime
-
-}
-
-func TestTransformResourcesForWorkerNoValue(t *testing.T) {
- var tests = []struct {
- runtime *datav1alpha1.GooseFSRuntime
- goosefsValue *GooseFS
- }{
- {&datav1alpha1.GooseFSRuntime{
- Spec: datav1alpha1.GooseFSRuntimeSpec{},
- }, &GooseFS{
- Properties: map[string]string{},
- }},
- }
- for _, test := range tests {
- engine := &GooseFSEngine{Log: fake.NullLogger()}
- engine.transformResourcesForWorker(test.runtime, test.goosefsValue)
- if result, found := test.goosefsValue.Worker.Resources.Limits[corev1.ResourceMemory]; found {
- t.Errorf("expected nil, got %v", result)
- }
- }
-}
-
-func TestTransformResourcesForWorkerWithValue(t *testing.T) {
-
- resources := corev1.ResourceRequirements{}
- resources.Limits = make(corev1.ResourceList)
- resources.Limits[corev1.ResourceMemory] = resource.MustParse("2Gi")
- resources.Limits[corev1.ResourceCPU] = resource.MustParse("500m")
- resources.Requests = make(corev1.ResourceList)
- resources.Requests[corev1.ResourceMemory] = resource.MustParse("1Gi")
- resources.Requests[corev1.ResourceCPU] = resource.MustParse("500m")
-
- result := resource.MustParse("20Gi")
-
- var tests = []struct {
- runtime *datav1alpha1.GooseFSRuntime
- goosefsValue *GooseFS
- }{
- {&datav1alpha1.GooseFSRuntime{
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- Worker: datav1alpha1.GooseFSCompTemplateSpec{
- Resources: resources,
- },
- JobWorker: datav1alpha1.GooseFSCompTemplateSpec{
- Resources: resources,
- },
- TieredStore: datav1alpha1.TieredStore{
- Levels: []datav1alpha1.Level{{
- MediumType: common.Memory,
- Quota: &result,
- }},
- },
- },
- }, &GooseFS{
- Properties: map[string]string{},
- Master: Master{},
- }},
- }
- for _, test := range tests {
- engine := &GooseFSEngine{
- Log: fake.NullLogger(),
- Client: fake.NewFakeClient(),
- name: "test",
- namespace: "test",
- UnitTest: true,
- }
- engine.runtimeInfo, _ = base.BuildRuntimeInfo("test", "test", common.GooseFSRuntime, base.WithTieredStore(test.runtime.Spec.TieredStore))
- engine.transformResourcesForWorker(test.runtime, test.goosefsValue)
- if test.goosefsValue.Worker.Resources.Limits[corev1.ResourceMemory] != "22Gi" {
- t.Errorf("expected 22Gi, got %v", test.goosefsValue.Worker.Resources.Limits[corev1.ResourceMemory])
- }
- }
-}
-
-func TestTransformResourcesForFuseNoValue(t *testing.T) {
- var tests = []struct {
- runtime *datav1alpha1.GooseFSRuntime
- goosefsValue *GooseFS
- }{
- {&datav1alpha1.GooseFSRuntime{
- Spec: datav1alpha1.GooseFSRuntimeSpec{},
- }, &GooseFS{
- Properties: map[string]string{},
- }},
- }
- for _, test := range tests {
- engine := &GooseFSEngine{Log: fake.NullLogger()}
- engine.transformResourcesForFuse(test.runtime, test.goosefsValue)
- if result, found := test.goosefsValue.Fuse.Resources.Limits[corev1.ResourceMemory]; found {
- t.Errorf("expected nil, got %v", result)
- }
- }
-}
-
-func TestTransformResourcesForFuseWithValue(t *testing.T) {
-
- resources := corev1.ResourceRequirements{}
- resources.Limits = make(corev1.ResourceList)
- resources.Limits[corev1.ResourceMemory] = resource.MustParse("2Gi")
-
- result := resource.MustParse("20Gi")
-
- var tests = []struct {
- runtime *datav1alpha1.GooseFSRuntime
- goosefsValue *GooseFS
- }{
- {&datav1alpha1.GooseFSRuntime{
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- Fuse: datav1alpha1.GooseFSFuseSpec{
- Resources: resources,
- },
- TieredStore: datav1alpha1.TieredStore{
- Levels: []datav1alpha1.Level{{
- MediumType: common.Memory,
- Quota: &result,
- }},
- },
- },
- }, &GooseFS{
- Properties: map[string]string{},
- Master: Master{},
- JobMaster: JobMaster{},
- }},
- }
- for _, test := range tests {
- engine := &GooseFSEngine{
- Log: fake.NullLogger(),
- Client: fake.NewFakeClient(),
- name: "test",
- namespace: "test",
- UnitTest: true,
- }
- engine.runtimeInfo, _ = base.BuildRuntimeInfo("test", "test", common.GooseFSRuntime, base.WithTieredStore(test.runtime.Spec.TieredStore))
- engine.transformResourcesForFuse(test.runtime, test.goosefsValue)
- if test.goosefsValue.Fuse.Resources.Limits[corev1.ResourceMemory] != "22Gi" {
- t.Errorf("expected 22Gi, got %v", test.goosefsValue.Fuse.Resources.Limits[corev1.ResourceMemory])
- }
- }
-}
diff --git a/pkg/ddc/goosefs/transform_test.go b/pkg/ddc/goosefs/transform_test.go
deleted file mode 100644
index 9351d1469e9..00000000000
--- a/pkg/ddc/goosefs/transform_test.go
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- . "github.com/onsi/ginkgo/v2"
- . "github.com/onsi/gomega"
-
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/ddc/base"
- fakeutils "github.com/fluid-cloudnative/fluid/pkg/utils/fake"
- ctrl "sigs.k8s.io/controller-runtime"
- "sigs.k8s.io/controller-runtime/pkg/log/zap"
-)
-
-var _ = Describe("TransformFuse", func() {
- BeforeEach(func() {
- ctrl.SetLogger(zap.New(func(o *zap.Options) {
- o.Development = true
- }))
- })
-
- type testCase struct {
- runtime *datav1alpha1.GooseFSRuntime
- dataset *datav1alpha1.Dataset
- value *GooseFS
- expect []string
- }
-
- DescribeTable("should transform fuse configuration correctly",
- func(tc testCase) {
- runtimeInfo, err := base.BuildRuntimeInfo("test", "fluid", "goosefs")
- Expect(err).NotTo(HaveOccurred())
-
- engine := &GooseFSEngine{
- runtimeInfo: runtimeInfo,
- Client: fakeutils.NewFakeClientWithScheme(testScheme),
- Log: ctrl.Log,
- }
-
- err = engine.transformFuse(tc.runtime, tc.dataset, tc.value)
- Expect(err).NotTo(HaveOccurred())
- Expect(tc.value.Fuse.Args).To(Equal(tc.expect))
- },
- Entry("with owner UID and GID",
- func() testCase {
- var x int64 = 1000
- return testCase{
- runtime: &datav1alpha1.GooseFSRuntime{
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- Fuse: datav1alpha1.GooseFSFuseSpec{},
- },
- },
- dataset: &datav1alpha1.Dataset{
- Spec: datav1alpha1.DatasetSpec{
- Mounts: []datav1alpha1.Mount{{
- MountPoint: "local:///mnt/test",
- Name: "test",
- }},
- Owner: &datav1alpha1.User{
- UID: &x,
- GID: &x,
- },
- },
- },
- value: &GooseFS{},
- expect: []string{"fuse", "--fuse-opts=rw,direct_io,uid=1000,gid=1000,allow_other"},
- }
- }(),
- ),
- )
-})
diff --git a/pkg/ddc/goosefs/transform_ufs.go b/pkg/ddc/goosefs/transform_ufs.go
deleted file mode 100644
index 81304bd788f..00000000000
--- a/pkg/ddc/goosefs/transform_ufs.go
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "strings"
-
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/common"
- "github.com/fluid-cloudnative/fluid/pkg/utils"
-)
-
-// transform dataset which has ufsPaths and ufsVolumes
-func (e *GooseFSEngine) transformDatasetToVolume(runtime *datav1alpha1.GooseFSRuntime, dataset *datav1alpha1.Dataset, value *GooseFS) {
-
- mounts := dataset.Spec.Mounts
- for _, mount := range mounts {
- // if mount.MountPoint
- if strings.HasPrefix(mount.MountPoint, common.PathScheme.String()) {
- if len(value.UFSPaths) == 0 {
- value.UFSPaths = []UFSPath{}
- }
-
- ufsPath := UFSPath{}
- ufsPath.Name = mount.Name
- ufsPath.ContainerPath = utils.UFSPathBuilder{}.GenLocalStoragePath(mount)
- ufsPath.HostPath = strings.TrimPrefix(mount.MountPoint, common.PathScheme.String())
- value.UFSPaths = append(value.UFSPaths, ufsPath)
-
- } else if strings.HasPrefix(mount.MountPoint, common.VolumeScheme.String()) {
- if len(value.UFSVolumes) == 0 {
- value.UFSVolumes = []UFSVolume{}
- }
-
- // Split MountPoint into PVC name and subpath (if it contains a subpath)
- parts := strings.SplitN(strings.TrimPrefix(mount.MountPoint, common.VolumeScheme.String()), "/", 2)
-
- if len(parts) > 1 {
- // MountPoint contains subpath
- value.UFSVolumes = append(value.UFSVolumes, UFSVolume{
- Name: parts[0],
- SubPath: parts[1],
- ContainerPath: utils.UFSPathBuilder{}.GenLocalStoragePath(mount),
- })
- } else {
- // MountPoint does not contain subpath
- value.UFSVolumes = append(value.UFSVolumes, UFSVolume{
- Name: parts[0],
- ContainerPath: utils.UFSPathBuilder{}.GenLocalStoragePath(mount),
- })
- }
- }
- }
-
- if len(value.UFSPaths) > 0 {
- // fmt.Println("UFSPaths length 1")
- if dataset.Spec.NodeAffinity != nil {
- value.Master.Affinity = Affinity{
- NodeAffinity: translateCacheToNodeAffinity(dataset.Spec.NodeAffinity),
- }
- }
- }
-
-}
diff --git a/pkg/ddc/goosefs/transform_ufs_test.go b/pkg/ddc/goosefs/transform_ufs_test.go
deleted file mode 100644
index de28be03891..00000000000
--- a/pkg/ddc/goosefs/transform_ufs_test.go
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- . "github.com/onsi/ginkgo/v2"
- . "github.com/onsi/gomega"
-
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- v1 "k8s.io/api/core/v1"
-)
-
-var _ = Describe("TransformDatasetToVolume", Label("pkg.ddc.goosefs.transform_ufs_test.go"), func() {
- Describe("with local mount", func() {
- type testCase struct {
- runtime *datav1alpha1.GooseFSRuntime
- dataset *datav1alpha1.Dataset
- expectPath string
- expectHostPath string
- }
-
- DescribeTable("should transform dataset to volume correctly",
- func(tc testCase) {
- value := &GooseFS{}
- engine := &GooseFSEngine{}
- engine.transformDatasetToVolume(tc.runtime, tc.dataset, value)
-
- Expect(value.UFSPaths).To(HaveLen(1))
- Expect(value.UFSPaths[0].HostPath).To(Equal(tc.expectHostPath))
- Expect(value.UFSPaths[0].ContainerPath).To(Equal(tc.expectPath))
- },
- Entry("local mount without path",
- testCase{
- runtime: &datav1alpha1.GooseFSRuntime{},
- dataset: &datav1alpha1.Dataset{
- Spec: datav1alpha1.DatasetSpec{
- Mounts: []datav1alpha1.Mount{{
- MountPoint: "local:///mnt/test",
- Name: "test",
- }},
- },
- },
- expectPath: "/underFSStorage/test",
- expectHostPath: "/mnt/test",
- },
- ),
- Entry("local mount with root path",
- testCase{
- runtime: &datav1alpha1.GooseFSRuntime{},
- dataset: &datav1alpha1.Dataset{
- Spec: datav1alpha1.DatasetSpec{
- Mounts: []datav1alpha1.Mount{{
- MountPoint: "local:///mnt/test",
- Name: "test",
- Path: "/",
- }},
- },
- },
- expectPath: "/underFSStorage",
- expectHostPath: "/mnt/test",
- },
- ),
- )
- })
-
- Describe("with PVC mount", func() {
- type testCase struct {
- runtime *datav1alpha1.GooseFSRuntime
- dataset *datav1alpha1.Dataset
- expectName string
- expectPath string
- expectSubPath string
- }
-
- DescribeTable("should transform dataset to PVC correctly",
- func(tc testCase) {
- value := &GooseFS{}
- engine := &GooseFSEngine{}
- engine.transformDatasetToVolume(tc.runtime, tc.dataset, value)
-
- Expect(value.UFSVolumes).To(HaveLen(1))
- Expect(value.UFSVolumes[0].Name).To(Equal(tc.expectName))
- Expect(value.UFSVolumes[0].ContainerPath).To(Equal(tc.expectPath))
- Expect(value.UFSVolumes[0].SubPath).To(Equal(tc.expectSubPath))
- },
- Entry("PVC mount without path",
- testCase{
- runtime: &datav1alpha1.GooseFSRuntime{},
- dataset: &datav1alpha1.Dataset{
- Spec: datav1alpha1.DatasetSpec{
- Mounts: []datav1alpha1.Mount{{
- MountPoint: "pvc://test",
- Name: "test",
- }},
- },
- },
- expectName: "test",
- expectPath: "/underFSStorage/test",
- expectSubPath: "",
- },
- ),
- Entry("PVC mount with root path",
- testCase{
- runtime: &datav1alpha1.GooseFSRuntime{},
- dataset: &datav1alpha1.Dataset{
- Spec: datav1alpha1.DatasetSpec{
- Mounts: []datav1alpha1.Mount{{
- MountPoint: "pvc://test1",
- Name: "test1",
- Path: "/",
- }},
- },
- },
- expectName: "test1",
- expectPath: "/underFSStorage",
- expectSubPath: "",
- },
- ),
- Entry("PVC mount with subpath",
- testCase{
- runtime: &datav1alpha1.GooseFSRuntime{},
- dataset: &datav1alpha1.Dataset{
- Spec: datav1alpha1.DatasetSpec{
- Mounts: []datav1alpha1.Mount{{
- MountPoint: "pvc://test2/subpath",
- Name: "test2",
- }},
- },
- },
- expectName: "test2",
- expectPath: "/underFSStorage/test2",
- expectSubPath: "subpath",
- },
- ),
- Entry("PVC mount with subpath and root path",
- testCase{
- runtime: &datav1alpha1.GooseFSRuntime{},
- dataset: &datav1alpha1.Dataset{
- Spec: datav1alpha1.DatasetSpec{
- Mounts: []datav1alpha1.Mount{{
- MountPoint: "pvc://test3/subpath",
- Name: "test3",
- Path: "/",
- }},
- },
- },
- expectName: "test3",
- expectPath: "/underFSStorage",
- expectSubPath: "subpath",
- },
- ),
- )
- })
-
- Describe("with node affinity", func() {
- It("should set master affinity from dataset", func() {
- runtime := &datav1alpha1.GooseFSRuntime{}
- dataset := &datav1alpha1.Dataset{
- Spec: datav1alpha1.DatasetSpec{
- Mounts: []datav1alpha1.Mount{{
- MountPoint: "local:///mnt/test",
- Name: "test",
- }},
- NodeAffinity: &datav1alpha1.CacheableNodeAffinity{
- Required: &v1.NodeSelector{
- NodeSelectorTerms: []v1.NodeSelectorTerm{
- {
- MatchExpressions: []v1.NodeSelectorRequirement{
- {
- Operator: v1.NodeSelectorOpIn,
- Values: []string{"test-label-value"},
- },
- },
- },
- },
- },
- },
- },
- }
- value := &GooseFS{}
-
- engine := &GooseFSEngine{}
- engine.transformDatasetToVolume(runtime, dataset, value)
-
- Expect(value.Master.Affinity.NodeAffinity).NotTo(BeNil())
- })
- })
-})
diff --git a/pkg/ddc/goosefs/types.go b/pkg/ddc/goosefs/types.go
deleted file mode 100644
index 6a9e773af87..00000000000
--- a/pkg/ddc/goosefs/types.go
+++ /dev/null
@@ -1,242 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "fmt"
- "time"
-
- "github.com/fluid-cloudnative/fluid/pkg/common"
- v1 "k8s.io/api/core/v1"
-)
-
-// The value yaml file
-type GooseFS struct {
- FullnameOverride string `yaml:"fullnameOverride"`
- OwnerDatasetId string `yaml:"ownerDatasetId"`
-
- common.ImageInfo `yaml:",inline"`
- common.UserInfo `yaml:",inline"`
-
- NodeSelector map[string]string `yaml:"nodeSelector,omitempty"`
- JvmOptions []string `yaml:"jvmOptions,omitempty"`
-
- Properties map[string]string `yaml:"properties,omitempty"`
-
- Master Master `yaml:"master,omitempty"`
-
- JobMaster JobMaster `yaml:"jobMaster,omitempty"`
-
- Worker Worker `yaml:"worker,omitempty"`
-
- JobWorker JobWorker `yaml:"jobWorker,omitempty"`
-
- Fuse Fuse `yaml:"fuse,omitempty"`
-
- APIGateway APIGateway `yaml:"apiGateway,omitempty"`
-
- Tieredstore Tieredstore `yaml:"tieredstore,omitempty"`
-
- Metastore Metastore `yaml:"metastore,omitempty"`
-
- Journal Journal `yaml:"journal,omitempty"`
-
- ShortCircuit ShortCircuit `yaml:"shortCircuit,omitempty"`
- // Enablefluid bool `yaml:"enablefluid,omitempty"`
-
- UFSPaths []UFSPath `yaml:"ufsPaths,omitempty"`
-
- UFSVolumes []UFSVolume `yaml:"ufsVolumes,omitempty"`
-
- InitUsers common.InitUsers `yaml:"initUsers,omitempty"`
-
- Monitoring string `yaml:"monitoring,omitempty"`
-
- HadoopConfig HadoopConfig `yaml:"hadoopConfig,omitempty"`
-
- Tolerations []v1.Toleration `yaml:"tolerations,omitempty"`
-
- PlacementMode string `yaml:"placement,omitempty"`
-}
-
-type HadoopConfig struct {
- ConfigMap string `yaml:"configMap"`
- IncludeHdfsSite bool `yaml:"includeHdfsSite"`
- IncludeCoreSite bool `yaml:"includeCoreSite"`
-}
-
-type UFSPath struct {
- HostPath string `yaml:"hostPath"`
- UFSVolume `yaml:",inline"`
-}
-
-type UFSVolume struct {
- Name string `yaml:"name"`
- SubPath string `yaml:"subPath,omitempty"`
- ContainerPath string `yaml:"containerPath"`
-}
-
-type Metastore struct {
- VolumeType string `yaml:"volumeType,omitempty"`
- Size string `yaml:"size,omitempty"`
-}
-
-type Journal struct {
- VolumeType string `yaml:"volumeType,omitempty"`
- Size string `yaml:"size,omitempty"`
-}
-
-type ShortCircuit struct {
- Enable bool `yaml:"enable,omitempty"`
- Policy string `yaml:"policy,omitempty"`
- VolumeType string `yaml:"volumeType,omitempty"`
-}
-
-type Ports struct {
- Rpc int `yaml:"rpc,omitempty"`
- Web int `yaml:"web,omitempty"`
- Embedded int `yaml:"embedded,omitempty"`
- Data int `yaml:"data,omitempty"`
- Rest int `yaml:"rest,omitempty"`
-}
-
-type APIGateway struct {
- Enabled bool `yaml:"enabled,omitempty"`
- Ports Ports `yaml:"ports,omitempty"`
-}
-
-type JobMaster struct {
- Ports Ports `yaml:"ports,omitempty"`
- Resources common.Resources `yaml:"resources,omitempty"`
-}
-
-type JobWorker struct {
- Ports Ports `yaml:"ports,omitempty"`
- Resources common.Resources `yaml:"resources,omitempty"`
-}
-
-type Worker struct {
- JvmOptions []string `yaml:"jvmOptions,omitempty"`
- Env map[string]string `yaml:"env,omitempty"`
- NodeSelector map[string]string `yaml:"nodeSelector,omitempty"`
- Properties map[string]string `yaml:"properties,omitempty"`
- HostNetwork bool `yaml:"hostNetwork,omitempty"`
- Resources common.Resources `yaml:"resources,omitempty"`
- Ports Ports `yaml:"ports,omitempty"`
- Annotations map[string]string `json:"annotations,omitempty"`
-}
-
-type Master struct {
- JvmOptions []string `yaml:"jvmOptions,omitempty"`
- Env map[string]string `yaml:"env,omitempty"`
- Affinity Affinity `yaml:"affinity"`
- NodeSelector map[string]string `yaml:"nodeSelector,omitempty"`
- Properties map[string]string `yaml:"properties,omitempty"`
- Replicas int32 `yaml:"replicaCount,omitempty"`
- HostNetwork bool `yaml:"hostNetwork,omitempty"`
- Resources common.Resources `yaml:"resources,omitempty"`
- Ports Ports `yaml:"ports,omitempty"`
- BackupPath string `yaml:"backupPath,omitempty"`
- Restore Restore `yaml:"restore,omitempty"`
- Annotations map[string]string `json:"annotations,omitempty"`
-}
-
-type Restore struct {
- Enabled bool `yaml:"enabled,omitempty"`
- Path string `yaml:"path,omitempty"`
- PVCName string `yaml:"pvcName,omitempty"`
-}
-
-type Fuse struct {
- Image string `yaml:"image,omitempty"`
- NodeSelector map[string]string `yaml:"nodeSelector,omitempty"`
- ImageTag string `yaml:"imageTag,omitempty"`
- ImagePullPolicy string `yaml:"imagePullPolicy,omitempty"`
- Properties map[string]string `yaml:"properties,omitempty"`
- Env map[string]string `yaml:"env,omitempty"`
- JvmOptions []string `yaml:"jvmOptions,omitempty"`
- MountPath string `yaml:"mountPath,omitempty"`
- ShortCircuitPolicy string `yaml:"shortCircuitPolicy,omitempty"`
- Args []string `yaml:"args,omitempty"`
- HostNetwork bool `yaml:"hostNetwork,omitempty"`
- HostPID bool `json:"hostPID,omitempty"`
- Enabled bool `yaml:"enabled,omitempty"`
- Resources common.Resources `yaml:"resources,omitempty"`
- Global bool `yaml:"global,omitempty"`
- CriticalPod bool `yaml:"criticalPod,omitempty"`
- Annotations map[string]string `json:"annotations,omitempty"`
-}
-
-type Tieredstore struct {
- Levels []Level `yaml:"levels,omitempty"`
-}
-
-type Level struct {
- Alias string `yaml:"alias,omitempty"`
- Level int `yaml:"level"`
- Mediumtype string `yaml:"mediumtype,omitempty"`
- Type string `yaml:"type,omitempty"`
- Path string `yaml:"path,omitempty"`
- Quota string `yaml:"quota,omitempty"`
- High string `yaml:"high,omitempty"`
- Low string `yaml:"low,omitempty"`
-}
-
-type Affinity struct {
- NodeAffinity *NodeAffinity `yaml:"nodeAffinity"`
-}
-
-type cacheHitStates struct {
- cacheHitRatio string
- localHitRatio string
- remoteHitRatio string
-
- localThroughputRatio string
- remoteThroughputRatio string
- cacheThroughputRatio string
-
- bytesReadLocal int64
- bytesReadRemote int64
- bytesReadUfsAll int64
-
- timestamp time.Time
-}
-
-type cacheStates struct {
- cacheCapacity string
- // cacheable string
- // lowWaterMark string
- // highWaterMark string
- cached string
- cachedPercentage string
- cacheHitStates cacheHitStates
- // nonCacheable string
-}
-
-func (value *GooseFS) getTiredStoreLevel0Path(name, namespace string) (path string) {
- path = fmt.Sprintf("/dev/shm/%s/%s", namespace, name)
- if value == nil {
- return
- }
- for _, level := range value.Tieredstore.Levels {
- if level.Level == 0 {
- path = level.Path
- break
- }
- }
- return
-}
diff --git a/pkg/ddc/goosefs/types_selector.go b/pkg/ddc/goosefs/types_selector.go
deleted file mode 100644
index f38fae178e1..00000000000
--- a/pkg/ddc/goosefs/types_selector.go
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
-)
-
-// NodeSelectorRequirement is a selector that contains values, a key, and an operator
-// that relates the key and values.
-type NodeSelectorRequirement struct {
- // The label key that the selector applies to.
- Key string `yaml:"key,omitempty"`
- // Represents a key's relationship to a set of values.
- // Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
- Operator string `yaml:"operator,omitempty"`
- // +optional
- Values []string `yaml:"values,omitempty"`
-}
-
-// NodeSelectorTerm represents expressions and fields required to select nodes.
-// A null or empty node selector term matches no objects. The requirements of
-// them are ANDed.
-// The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
-type NodeSelectorTerm struct {
- // A list of node selector requirements by node's labels.
- MatchExpressions []NodeSelectorRequirement `yaml:"matchExpressions"`
-}
-
-// NodeSelector represents the union of the results of one or more label queries
-// over a set of nodes; that is, it represents the OR of the selectors represented
-// by the node selector terms.
-type NodeSelector struct {
- //Required. A list of node selector terms.
- NodeSelectorTerms []NodeSelectorTerm `yaml:"nodeSelectorTerms"`
-}
-
-type NodeAffinity struct {
- // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
- // If the affinity requirements specified by this field are not met at
- // scheduling time, the pod will not be scheduled onto the node.
- // If the affinity requirements specified by this field cease to be met
- // at some point during pod execution (e.g. due to an update), the system
- // will try to eventually evict the pod from its node.
- // +optional
- // RequiredDuringSchedulingRequiredDuringExecution *NodeSelector
-
- // If the affinity requirements specified by this field are not met at
- // scheduling time, the pod will not be scheduled onto the node.
- // If the affinity requirements specified by this field cease to be met
- // at some point during pod execution (e.g. due to an update), the system
- // may or may not try to eventually evict the pod from its node.
- // +optional
- RequiredDuringSchedulingIgnoredDuringExecution *NodeSelector `yaml:"requiredDuringSchedulingIgnoredDuringExecution"`
-}
-
-func translateCacheToNodeAffinity(dataAffinity *datav1alpha1.CacheableNodeAffinity) (nodeAffinity *NodeAffinity) {
- nodeAffinity = nil
- if dataAffinity == nil || dataAffinity.Required == nil {
- return
- }
-
- nodeAffinity = &NodeAffinity{
- RequiredDuringSchedulingIgnoredDuringExecution: &NodeSelector{
- NodeSelectorTerms: []NodeSelectorTerm{},
- },
- }
-
- for _, srcTerm := range dataAffinity.Required.NodeSelectorTerms {
- dstTerm := NodeSelectorTerm{
- MatchExpressions: []NodeSelectorRequirement{},
- }
-
- for _, srcMatch := range srcTerm.MatchExpressions {
-
- dstMatch := NodeSelectorRequirement{
- Key: srcMatch.Key,
- Operator: string(srcMatch.Operator),
- Values: srcMatch.Values,
- }
-
- dstTerm.MatchExpressions = append(dstTerm.MatchExpressions, dstMatch)
- }
- nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms = append(nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms,
- dstTerm)
-
- }
-
- return
-
-}
diff --git a/pkg/ddc/goosefs/types_test.go b/pkg/ddc/goosefs/types_test.go
deleted file mode 100644
index b541f487b3d..00000000000
--- a/pkg/ddc/goosefs/types_test.go
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- . "github.com/onsi/ginkgo/v2"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("GooseFS Types", func() {
- Describe("getTiredStoreLevel0Path", func() {
- var (
- goosefs *GooseFS
- name string
- namespace string
- )
-
- BeforeEach(func() {
- name = "goosefs-01"
- namespace = "default"
- })
-
- Context("when tieredstore has level 0", func() {
- It("should return the configured path", func() {
- goosefs = &GooseFS{
- Tieredstore: Tieredstore{
- Levels: []Level{
- {
- Level: 0,
- Path: "/mnt/demo/data",
- },
- },
- },
- }
- got := goosefs.getTiredStoreLevel0Path(name, namespace)
- Expect(got).To(Equal("/mnt/demo/data"))
- })
- })
-
- Context("when tieredstore has only level 1", func() {
- It("should return the default shm path", func() {
- goosefs = &GooseFS{
- Tieredstore: Tieredstore{
- Levels: []Level{
- {
- Level: 1,
- Path: "/mnt/demo/data",
- },
- },
- },
- }
- got := goosefs.getTiredStoreLevel0Path(name, namespace)
- Expect(got).To(Equal("/dev/shm/default/goosefs-01"))
- })
- })
-
- Context("when tieredstore has multiple levels", func() {
- It("should return the level 0 path", func() {
- goosefs = &GooseFS{
- Tieredstore: Tieredstore{
- Levels: []Level{
- {
- Level: 1,
- Path: "/mnt/ssd/data",
- },
- {
- Level: 0,
- Path: "/mnt/mem/data",
- },
- {
- Level: 2,
- Path: "/mnt/hdd/data",
- },
- },
- },
- }
- got := goosefs.getTiredStoreLevel0Path(name, namespace)
- Expect(got).To(Equal("/mnt/mem/data"))
- })
- })
-
- Context("when tieredstore is empty", func() {
- It("should return the default shm path", func() {
- goosefs = &GooseFS{
- Tieredstore: Tieredstore{
- Levels: []Level{},
- },
- }
- got := goosefs.getTiredStoreLevel0Path(name, namespace)
- Expect(got).To(Equal("/dev/shm/default/goosefs-01"))
- })
- })
-
- Context("with different namespace and name", func() {
- It("should construct the correct default path", func() {
- goosefs = &GooseFS{
- Tieredstore: Tieredstore{},
- }
- got := goosefs.getTiredStoreLevel0Path("mydata", "production")
- Expect(got).To(Equal("/dev/shm/production/mydata"))
- })
- })
-
- Context("when goosefs is nil", func() {
- It("should return the default shm path without panicking", func() {
- var goosefs *GooseFS = nil
- // Note: A nil check will be needed in getTiredStoreLevel0Path for this to pass
- got := goosefs.getTiredStoreLevel0Path(name, namespace)
- Expect(got).To(Equal("/dev/shm/default/goosefs-01"))
- })
- })
- })
-})
diff --git a/pkg/ddc/goosefs/ufs.go b/pkg/ddc/goosefs/ufs.go
deleted file mode 100644
index 4e18842b9f1..00000000000
--- a/pkg/ddc/goosefs/ufs.go
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/utils"
-)
-
-// UsedStorageBytes returns used storage size of GooseFS in bytes
-func (e *GooseFSEngine) UsedStorageBytes() (value int64, err error) {
- // return e.usedStorageBytesInternal()
- return e.usedStorageBytesInternal()
-}
-
-// FreeStorageBytes returns free storage size of GooseFS in bytes
-func (e *GooseFSEngine) FreeStorageBytes() (value int64, err error) {
- // return e.freeStorageBytesInternal()
- return e.freeStorageBytesInternal()
-}
-
-// TotalStorageBytes return total storage size of GooseFS in bytes
-func (e *GooseFSEngine) TotalStorageBytes() (value int64, err error) {
- // return e.totalStorageBytesInternal()
- return e.totalStorageBytesInternal()
-}
-
-// TotalFileNums returns the total num of files in GooseFS
-func (e *GooseFSEngine) TotalFileNums() (value int64, err error) {
- // return e.totalFileNumsInternal()
- return e.totalFileNumsInternal()
-}
-
-// ShouldCheckUFS checks if it requires checking UFS
-func (e *GooseFSEngine) ShouldCheckUFS() (should bool, err error) {
- // For GooseFS Engine, always attempt to prepare UFS
- should = true
- return
-}
-
-// PrepareUFS does all the UFS preparations
-func (e *GooseFSEngine) PrepareUFS() (err error) {
- // 1. Mount UFS (Synchronous Operation)
- shouldMountUfs, err := e.shouldMountUFS()
- if err != nil {
- return
- }
- e.Log.Info("shouldMountUFS", "should", shouldMountUfs)
-
- if shouldMountUfs {
- err = e.mountUFS()
- if err != nil {
- return
- }
- }
- e.Log.Info("mountUFS")
-
- err = e.SyncMetadata()
- if err != nil {
- // just report this error and ignore it because SyncMetadata isn't on the critical path of Setup
- e.Log.Error(err, "SyncMetadata")
- return nil
- }
-
- return
-}
-
-func (e *GooseFSEngine) ShouldUpdateUFS() (ufsToUpdate *utils.UFSToUpdate) {
- // 1. get the dataset
- dataset, err := utils.GetDataset(e.Client, e.name, e.namespace)
- if err != nil {
- e.Log.Error(err, "Failed to get the dataset")
- return
- }
-
- // 2.get the ufs to update
- ufsToUpdate = utils.NewUFSToUpdate(dataset)
- ufsToUpdate.AnalyzePathsDelta()
-
- return
-}
-
-func (e *GooseFSEngine) UpdateOnUFSChange(ufsToUpdate *utils.UFSToUpdate) (updateReady bool, err error) {
- // 1. check if need to update ufs
- if !ufsToUpdate.ShouldUpdate() {
- e.Log.Info("no need to update ufs",
- "namespace", e.namespace,
- "name", e.name)
- return
- }
-
- // 2. set update status to updating
- err = utils.UpdateMountStatus(e.Client, e.name, e.namespace, datav1alpha1.UpdatingDatasetPhase)
- if err != nil {
- e.Log.Error(err, "Failed to update dataset status to updating")
- return
- }
-
- // 3. process added and removed
- err = e.processUpdatingUFS(ufsToUpdate)
- if err != nil {
- e.Log.Error(err, "Failed to add or remove mount points")
- return
- }
- updateReady = true
- return
-}
-
-func (e *GooseFSEngine) ShouldSyncDatasetMounts() (should bool, err error) {
- return false, nil
-}
-
-func (e *GooseFSEngine) SyncDatasetMounts() (err error) {
- return nil
-}
diff --git a/pkg/ddc/goosefs/ufs_internal.go b/pkg/ddc/goosefs/ufs_internal.go
deleted file mode 100644
index a1ff175a7ab..00000000000
--- a/pkg/ddc/goosefs/ufs_internal.go
+++ /dev/null
@@ -1,352 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "context"
- "fmt"
- "reflect"
-
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/common"
- "github.com/fluid-cloudnative/fluid/pkg/ddc/goosefs/operations"
- "github.com/fluid-cloudnative/fluid/pkg/utils"
- "github.com/fluid-cloudnative/fluid/pkg/utils/kubeclient"
- securityutil "github.com/fluid-cloudnative/fluid/pkg/utils/security"
- "github.com/pkg/errors"
-)
-
-func (e *GooseFSEngine) usedStorageBytesInternal() (value int64, err error) {
- return
-}
-
-func (e *GooseFSEngine) freeStorageBytesInternal() (value int64, err error) {
- return
-}
-
-func (e *GooseFSEngine) totalStorageBytesInternal() (total int64, err error) {
- podName, containerName := e.getMasterPodInfo()
-
- fileUitls := operations.NewGooseFSFileUtils(podName, containerName, e.namespace, e.Log)
- _, _, total, err = fileUitls.Count("/")
- if err != nil {
- return
- }
-
- return
-}
-
-func (e *GooseFSEngine) totalFileNumsInternal() (fileCount int64, err error) {
- podName, containerName := e.getMasterPodInfo()
-
- fileUitls := operations.NewGooseFSFileUtils(podName, containerName, e.namespace, e.Log)
- fileCount, err = fileUitls.GetFileCount()
- if err != nil {
- return
- }
-
- return
-}
-
-// shouldMountUFS checks if there's any UFS that need to be mounted
-func (e *GooseFSEngine) shouldMountUFS() (should bool, err error) {
- dataset, err := utils.GetDataset(e.Client, e.name, e.namespace)
- if err != nil {
- return should, err
- }
- e.Log.Info("get dataset info", "dataset", dataset)
-
- podName, containerName := e.getMasterPodInfo()
- fileUtils := operations.NewGooseFSFileUtils(podName, containerName, e.namespace, e.Log)
-
- ready := fileUtils.Ready()
- if !ready {
- should = false
- err = fmt.Errorf("the UFS is not ready")
- return should, err
- }
-
- // Check if any of the Mounts has not been mounted in GooseFS
- for _, mount := range dataset.Spec.Mounts {
- if common.IsFluidNativeScheme(mount.MountPoint) {
- // No need for a mount point with Fluid native scheme('local://' and 'pvc://') to be mounted
- continue
- }
- goosefsPath := utils.UFSPathBuilder{}.GenUFSPathInUnifiedNamespace(mount)
- mounted, err := fileUtils.IsMounted(goosefsPath)
- if err != nil {
- should = false
- return should, err
- }
- if !mounted {
- e.Log.Info("Found dataset that is not mounted.", "dataset", dataset)
- should = true
- return should, err
- }
- }
-
- return should, err
-
-}
-
-// getMounts get slice of mounted paths and expected mount paths
-func (e *GooseFSEngine) getMounts() (resultInCtx []string, resultHaveMounted []string, err error) {
- dataset, err := utils.GetDataset(e.Client, e.name, e.namespace)
- e.Log.V(1).Info("get dataset info", "dataset", dataset)
- if err != nil {
- return resultInCtx, resultHaveMounted, err
- }
-
- podName, containerName := e.getMasterPodInfo()
- fileUitls := operations.NewGooseFSFileUtils(podName, containerName, e.namespace, e.Log)
-
- ready := fileUitls.Ready()
- if !ready {
- err = fmt.Errorf("the UFS is not ready")
- return resultInCtx, resultHaveMounted, err
- }
-
- // Check if any of the Mounts has not been mounted in GooseFS
- for _, mount := range dataset.Spec.Mounts {
- if common.IsFluidNativeScheme(mount.MountPoint) {
- // No need for a mount point with Fluid native scheme('local://' and 'pvc://') to be mounted
- continue
- }
- goosefsPathInCtx := utils.UFSPathBuilder{}.GenUFSPathInUnifiedNamespace(mount)
- resultInCtx = append(resultInCtx, goosefsPathInCtx)
- }
-
- // get the mount points have been mountted
- for _, mount := range dataset.Status.Mounts {
- if common.IsFluidNativeScheme(mount.MountPoint) {
- // No need for a mount point with Fluid native scheme('local://' and 'pvc://') to be mounted
- continue
- }
- goosefsPathHaveMountted := utils.UFSPathBuilder{}.GenUFSPathInUnifiedNamespace(mount)
- resultHaveMounted = append(resultHaveMounted, goosefsPathHaveMountted)
- }
-
- return resultInCtx, resultHaveMounted, err
-
-}
-
-// calculateMountPointsChanges will compare diff of spec mount and status mount,
-// to find need to be added mount point and removed mount point
-func (e *GooseFSEngine) calculateMountPointsChanges(mountsHaveMountted []string, mountsInContext []string) ([]string, []string) {
- removed := []string{}
- added := []string{}
-
- for _, v := range mountsHaveMountted {
- if !ContainsString(mountsInContext, v) {
- removed = append(removed, v)
- }
- }
-
- for _, v := range mountsInContext {
- if !ContainsString(mountsHaveMountted, v) {
- added = append(added, v)
- }
- }
-
- return added, removed
-}
-
-// ContainsString returns true if a string is present in a iteratee.
-func ContainsString(s []string, v string) bool {
- for _, vv := range s {
- if vv == v {
- return true
- }
- }
- return false
-}
-
-// processUpdatingUFS will mount needed mountpoint to ufs
-func (e *GooseFSEngine) processUpdatingUFS(ufsToUpdate *utils.UFSToUpdate) (err error) {
- dataset, err := utils.GetDataset(e.Client, e.name, e.namespace)
- if err != nil {
- return err
- }
-
- podName, containerName := e.getMasterPodInfo()
- fileUtils := operations.NewGooseFSFileUtils(podName, containerName, e.namespace, e.Log)
-
- ready := fileUtils.Ready()
- if !ready {
- return fmt.Errorf("the UFS is not ready, namespace:%s,name:%s", e.namespace, e.name)
- }
-
- // Iterate all the mount points, do mount if the mount point is in added array
- // TODO: not allow to edit FluidNativeScheme MountPoint
- for _, mount := range dataset.Spec.Mounts {
- if common.IsFluidNativeScheme(mount.MountPoint) {
- continue
- }
-
- goosefsPath := utils.UFSPathBuilder{}.GenUFSPathInUnifiedNamespace(mount)
- if len(ufsToUpdate.ToAdd()) > 0 && utils.ContainsString(ufsToUpdate.ToAdd(), goosefsPath) {
- mountOptions := map[string]string{}
- for key, value := range dataset.Spec.SharedOptions {
- mountOptions[key] = value
- }
-
- for key, value := range mount.Options {
- mountOptions[key] = value
- }
-
- // Configure mountOptions using encryptOptions
- // If encryptOptions have the same key with options, it will overwrite the corresponding value
- mountOptions, err = e.genEncryptOptions(dataset.Spec.SharedEncryptOptions, mountOptions, mount.Name)
- if err != nil {
- return err
- }
- mountOptions, err = e.genEncryptOptions(mount.EncryptOptions, mountOptions, mount.Name)
- if err != nil {
- return err
- }
- err = fileUtils.Mount(goosefsPath, mount.MountPoint, mountOptions, mount.ReadOnly, mount.Shared)
- if err != nil {
- return err
- }
- }
- }
-
- // unmount the mount point in the removed array
- if len(ufsToUpdate.ToRemove()) > 0 {
- for _, mountRemove := range ufsToUpdate.ToRemove() {
- err = fileUtils.UnMount(mountRemove)
- if err != nil {
- return err
- }
- }
- }
- // need to reset ufsTotal to Calculating so that SyncMetadata will work
- datasetToUpdate := dataset.DeepCopy()
- datasetToUpdate.Status.UfsTotal = MetadataSyncNotDoneMsg
- if !reflect.DeepEqual(dataset.Status, datasetToUpdate.Status) {
- err = e.Client.Status().Update(context.TODO(), datasetToUpdate)
- if err != nil {
- e.Log.Error(err, "fail to update ufsTotal of dataset to Calculating")
- }
- }
-
- err = e.SyncMetadata()
- if err != nil {
- // just report this error and ignore it because SyncMetadata isn't on the critical path of Setup
- e.Log.Error(err, "SyncMetadata", "dataset", e.name)
- return nil
- }
-
- return nil
-}
-
-// mountUFS() mount all UFSs to GooseFS according to mount points in `dataset.Spec`. If a mount point is Fluid-native, mountUFS() will skip it.
-func (e *GooseFSEngine) mountUFS() (err error) {
- dataset, err := utils.GetDataset(e.Client, e.name, e.namespace)
- if err != nil {
- return err
- }
-
- podName, containerName := e.getMasterPodInfo()
- fileUitls := operations.NewGooseFSFileUtils(podName, containerName, e.namespace, e.Log)
-
- ready := fileUitls.Ready()
- if !ready {
- return fmt.Errorf("the UFS is not ready")
- }
-
- // Iterate all the mount points, do mount if the mount point is not Fluid-native(e.g. Hostpath or PVC)
- for _, mount := range dataset.Spec.Mounts {
- mount := mount
- if common.IsFluidNativeScheme(mount.MountPoint) {
- continue
- }
-
- goosefsPath := utils.UFSPathBuilder{}.GenUFSPathInUnifiedNamespace(mount)
- mounted, err := fileUitls.IsMounted(goosefsPath)
- e.Log.Info("Check if the goosefs path is mounted.", "goosefsPath", goosefsPath, "mounted", mounted)
- if err != nil {
- return err
- }
-
- mOptions, err := e.genUFSMountOptions(mount, dataset.Spec.SharedOptions, dataset.Spec.SharedEncryptOptions)
- if err != nil {
- return errors.Wrapf(err, "gen ufs mount options by spec mount item failure,mount name:%s", mount.Name)
- }
- if !mounted {
- err = fileUitls.Mount(goosefsPath, mount.MountPoint, mOptions, mount.ReadOnly, mount.Shared)
- if err != nil {
- return err
- }
- }
-
- }
- return nil
-}
-
-// goosefs mount options
-func (e *GooseFSEngine) genUFSMountOptions(m datav1alpha1.Mount, SharedOptions map[string]string, SharedEncryptOptions []datav1alpha1.EncryptOption) (map[string]string, error) {
-
- // initialize goosefs mount options
- mOptions := map[string]string{}
- if len(SharedOptions) > 0 {
- mOptions = SharedOptions
- }
- for key, value := range m.Options {
- mOptions[key] = value
- }
-
- var err error
- mOptions, err = e.genEncryptOptions(SharedEncryptOptions, mOptions, m.Name)
- if err != nil {
- return mOptions, err
- }
-
- //gen public encryptOptions
- mOptions, err = e.genEncryptOptions(m.EncryptOptions, mOptions, m.Name)
- if err != nil {
- return mOptions, err
- }
-
- return mOptions, nil
-}
-
-// goosefs encrypt mount options
-func (e *GooseFSEngine) genEncryptOptions(EncryptOptions []datav1alpha1.EncryptOption, mOptions map[string]string, name string) (map[string]string, error) {
- for _, item := range EncryptOptions {
-
- if _, ok := mOptions[item.Name]; ok {
- err := fmt.Errorf("the option %s is set more than one times, please double check the dataset's option and encryptOptions", item.Name)
- return mOptions, err
- }
-
- securityutil.UpdateSensitiveKey(item.Name)
- sRef := item.ValueFrom.SecretKeyRef
- secret, err := kubeclient.GetSecret(e.Client, sRef.Name, e.namespace)
- if err != nil {
- e.Log.Error(err, "get secret by mount encrypt options failed", "name", item.Name)
- return mOptions, err
- }
-
- e.Log.Info("get value from secret", "mount name", name, "secret key", sRef.Key)
-
- v := secret.Data[sRef.Key]
- mOptions[item.Name] = string(v)
- }
-
- return mOptions, nil
-}
diff --git a/pkg/ddc/goosefs/ufs_internal_test.go b/pkg/ddc/goosefs/ufs_internal_test.go
deleted file mode 100644
index c5a8ed657f8..00000000000
--- a/pkg/ddc/goosefs/ufs_internal_test.go
+++ /dev/null
@@ -1,971 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "reflect"
- "testing"
-
- . "github.com/agiledragon/gomonkey/v2"
- corev1 "k8s.io/api/core/v1"
-
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/ddc/goosefs/operations"
- "github.com/fluid-cloudnative/fluid/pkg/utils"
- "github.com/fluid-cloudnative/fluid/pkg/utils/fake"
- "github.com/fluid-cloudnative/fluid/pkg/utils/kubeclient"
- "github.com/go-logr/logr"
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "sigs.k8s.io/controller-runtime/pkg/client"
-)
-
-func mockGooseFSFileUtilsCount() (value string, err error) {
- r := `File Count Folder Count Folder Size
- 6 1 571808905`
- return r, nil
-}
-
-func TestGooseFSEngine_calculateMountPointsChanges(t *testing.T) {
-
- testCases := map[string]struct {
- mounted []string
- current []string
- expect map[string][]string
- }{
- "calculate mount point changes test case 1": {
- mounted: []string{"hadoop3.3.0"},
- current: []string{"hadoopcurrent", "hadoop3.3.0"},
- expect: map[string][]string{"added": {"hadoopcurrent"}, "removed": {}},
- },
- "calculate mount point changes test case 2": {
- mounted: []string{"hadoopcurrent", "hadoop3.3.0"},
- current: []string{"hadoop3.3.0"},
- expect: map[string][]string{"added": {}, "removed": {"hadoopcurrent"}},
- },
- "calculate mount point changes test case 3": {
- mounted: []string{"hadoopcurrent", "hadoop3.2.2"},
- current: []string{"hadoop3.3.0", "hadoop3.2.2"},
- expect: map[string][]string{"added": {"hadoop3.3.0"}, "removed": {"hadoopcurrent"}},
- },
- "calculate mount point changes test case 4": {
- mounted: []string{"hadoop3.3.0"},
- current: []string{"hadoop3.3.0"},
- expect: map[string][]string{"added": {}, "removed": {}},
- },
- "calculate mount point changes test case 5": {
- mounted: []string{"hadoopcurrent", "hadoop3.2.2"},
- current: []string{"hadoop3.3.0", "hadoop3.2.2", "hadoop3.3.1"},
- expect: map[string][]string{"added": {"hadoop3.3.0", "hadoop3.3.1"}, "removed": {"hadoopcurrent"}},
- },
- }
-
- for _, item := range testCases {
- engine := &GooseFSEngine{}
- added, removed := engine.calculateMountPointsChanges(item.mounted, item.current)
-
- if !ArrayEqual(added, item.expect["added"]) {
- t.Errorf("expected added %v, got %v", item.expect["added"], added)
- }
- if !ArrayEqual(removed, item.expect["removed"]) {
- t.Errorf("expected removed %v, got %v", item.expect["removed"], removed)
- }
- }
-
-}
-
-func ArrayEqual(a, b []string) bool {
- if (a == nil) != (b == nil) {
- return false
- }
-
- if len(a) != len(b) {
- return false
- }
-
- for key, val := range a {
- if val != b[key] {
- return false
- }
- }
- return true
-}
-
-func TestUsedStorageBytesInternal(t *testing.T) {
- type fields struct {
- runtime *datav1alpha1.GooseFSRuntime
- name string
- namespace string
- Log logr.Logger
- Client client.Client
- }
- tests := []struct {
- name string
- fields fields
- wantValue int64
- wantErr bool
- }{
- // TODO: Add test cases.
- {
- name: "todo",
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- e := &GooseFSEngine{
- runtime: tt.fields.runtime,
- name: tt.fields.name,
- namespace: tt.fields.namespace,
- Log: tt.fields.Log,
- Client: tt.fields.Client,
- }
- gotValue, err := e.usedStorageBytesInternal()
- if (err != nil) != tt.wantErr {
- t.Errorf("GooseFSEngine.usedStorageBytesInternal() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if gotValue != tt.wantValue {
- t.Errorf("GooseFSEngine.usedStorageBytesInternal() = %v, want %v", gotValue, tt.wantValue)
- }
- })
- }
-}
-
-func TestFreeStorageBytesInternal(t *testing.T) {
- type fields struct {
- runtime *datav1alpha1.GooseFSRuntime
- name string
- namespace string
- Log logr.Logger
- Client client.Client
- }
- tests := []struct {
- name string
- fields fields
- wantValue int64
- wantErr bool
- }{
- // TODO: Add test cases.
- {
- name: "todo",
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- e := &GooseFSEngine{
- runtime: tt.fields.runtime,
- name: tt.fields.name,
- namespace: tt.fields.namespace,
- Log: tt.fields.Log,
- Client: tt.fields.Client,
- }
- gotValue, err := e.freeStorageBytesInternal()
- if (err != nil) != tt.wantErr {
- t.Errorf("GooseFSEngine.freeStorageBytesInternal() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if gotValue != tt.wantValue {
- t.Errorf("GooseFSEngine.freeStorageBytesInternal() = %v, want %v", gotValue, tt.wantValue)
- }
- })
- }
-}
-
-func TestTotalStorageBytesInternal(t *testing.T) {
- type fields struct {
- runtime *datav1alpha1.GooseFSRuntime
- name string
- namespace string
- Log logr.Logger
- }
- tests := []struct {
- name string
- fields fields
- wantTotal int64
- wantErr bool
- }{
- {
- name: "test",
- fields: fields{
- runtime: &datav1alpha1.GooseFSRuntime{
- ObjectMeta: v1.ObjectMeta{
- Name: "spark",
- Namespace: "default",
- },
- },
- name: "spark",
- namespace: "defaut",
- Log: fake.NullLogger(),
- },
- wantTotal: 571808905,
- wantErr: false,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- e := &GooseFSEngine{
- runtime: tt.fields.runtime,
- name: tt.fields.name,
- namespace: tt.fields.namespace,
- Log: tt.fields.Log,
- }
-
- patch1 := ApplyFunc(kubeclient.ExecCommandInContainer, func(podName string, containerName string, namespace string, cmd []string) (string, string, error) {
- summary, err := mockGooseFSFileUtilsCount()
- return summary, "", err
- })
- defer patch1.Reset()
- gotTotal, err := e.totalStorageBytesInternal()
- if (err != nil) != tt.wantErr {
- t.Errorf("GooseFSEngine.totalStorageBytesInternal() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if gotTotal != tt.wantTotal {
- t.Errorf("GooseFSEngine.totalStorageBytesInternal() = %v, want %v", gotTotal, tt.wantTotal)
- }
- })
- }
-}
-
-func TestTotalFileNumsInternal(t *testing.T) {
- type fields struct {
- runtime *datav1alpha1.GooseFSRuntime
- name string
- namespace string
- Log logr.Logger
- }
- tests := []struct {
- name string
- fields fields
- wantFileCount int64
- wantErr bool
- }{
- {
- name: "test",
- fields: fields{
- runtime: &datav1alpha1.GooseFSRuntime{
- ObjectMeta: v1.ObjectMeta{
- Name: "spark",
- Namespace: "default",
- },
- },
- name: "spark",
- namespace: "defaut",
- Log: fake.NullLogger(),
- },
- wantFileCount: 6,
- wantErr: false,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- e := &GooseFSEngine{
- runtime: tt.fields.runtime,
- name: tt.fields.name,
- namespace: tt.fields.namespace,
- Log: tt.fields.Log,
- }
-
- patch1 := ApplyFunc(kubeclient.ExecCommandInContainer, func(podName string, containerName string, namespace string, cmd []string) (string, string, error) {
- summary, err := mockGooseFSFileUtilsCount()
- return summary, "", err
- })
- defer patch1.Reset()
- gotFileCount, err := e.totalFileNumsInternal()
- if (err != nil) != tt.wantErr {
- t.Errorf("GooseFSEngine.totalFileNumsInternal() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if gotFileCount != tt.wantFileCount {
- t.Errorf("GooseFSEngine.totalFileNumsInternal() = %v, want %v", gotFileCount, tt.wantFileCount)
- }
- })
- }
-}
-
-func TestShouldMountUFS(t *testing.T) {
- type fields struct {
- runtime *datav1alpha1.GooseFSRuntime
- dataset *datav1alpha1.Dataset
- name string
- namespace string
- Log logr.Logger
- Client client.Client
- }
- tests := []struct {
- name string
- fields fields
- wantShould bool
- wantErr bool
- }{
- {
- name: "test",
- fields: fields{
- runtime: &datav1alpha1.GooseFSRuntime{
- ObjectMeta: v1.ObjectMeta{
- Name: "spark",
- Namespace: "default",
- },
- },
- dataset: &datav1alpha1.Dataset{
- ObjectMeta: v1.ObjectMeta{
- Name: "spark",
- Namespace: "default",
- },
- },
- name: "spark",
- namespace: "default",
- Log: fake.NullLogger(),
- },
- wantShould: false,
- wantErr: false,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- testObjs := []runtime.Object{}
- testObjs = append(testObjs, tt.fields.runtime, tt.fields.dataset)
- client := fake.NewFakeClientWithScheme(testScheme, testObjs...)
- e := &GooseFSEngine{
- runtime: tt.fields.runtime,
- name: tt.fields.name,
- namespace: tt.fields.namespace,
- Log: tt.fields.Log,
- Client: client,
- }
- patch1 := ApplyFunc(kubeclient.ExecCommandInContainer, func(podName string, containerName string, namespace string, cmd []string) (string, string, error) {
- summary := mockGooseFSReportSummary()
- return summary, "", nil
- })
- defer patch1.Reset()
- gotShould, err := e.shouldMountUFS()
- if (err != nil) != tt.wantErr {
- t.Errorf("GooseFSEngine.shouldMountUFS() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if gotShould != tt.wantShould {
- t.Errorf("GooseFSEngine.shouldMountUFS() = %v, want %v", gotShould, tt.wantShould)
- }
- })
- }
-}
-
-func TestGetMounts(t *testing.T) {
- type fields struct {
- runtime *datav1alpha1.GooseFSRuntime
- dataset *datav1alpha1.Dataset
- name string
- namespace string
- Log logr.Logger
- Client client.Client
- }
- tests := []struct {
- name string
- fields fields
- wantResultInCtx []string
- wantResultHaveMounted []string
- wantErr bool
- }{
- {
- name: "test",
- fields: fields{
- runtime: &datav1alpha1.GooseFSRuntime{
- ObjectMeta: v1.ObjectMeta{
- Name: "spark",
- Namespace: "default",
- },
- },
- dataset: &datav1alpha1.Dataset{
- ObjectMeta: v1.ObjectMeta{
- Name: "spark",
- Namespace: "default",
- },
- Spec: datav1alpha1.DatasetSpec{
- Mounts: []datav1alpha1.Mount{
- {
- Name: "test0",
- MountPoint: "cos://test0",
- Path: "/spec",
- },
- {
- Name: "test1",
- MountPoint: "cos://test1",
- Path: "/spec",
- },
- },
- },
- Status: datav1alpha1.DatasetStatus{
- Mounts: []datav1alpha1.Mount{
- {
- Name: "test0",
- MountPoint: "cos://test0",
- Path: "/status",
- },
- {
- Name: "test1",
- MountPoint: "cos://test1",
- Path: "/status",
- },
- },
- },
- },
- name: "spark",
- namespace: "default",
- Log: fake.NullLogger(),
- },
- wantResultInCtx: []string{"/spec", "/spec"},
- wantResultHaveMounted: []string{"/status", "/status"},
- wantErr: false,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- testObjs := []runtime.Object{}
- testObjs = append(testObjs, tt.fields.runtime, tt.fields.dataset)
- client := fake.NewFakeClientWithScheme(testScheme, testObjs...)
- e := &GooseFSEngine{
- runtime: tt.fields.runtime,
- name: tt.fields.name,
- namespace: tt.fields.namespace,
- Log: tt.fields.Log,
- Client: client,
- }
- var goosefsFileUtils operations.GooseFSFileUtils
- patch1 := ApplyMethod(reflect.TypeOf(goosefsFileUtils), "Ready", func(_ operations.GooseFSFileUtils) bool {
- return true
- })
- defer patch1.Reset()
-
- gotResultInCtx, gotResultHaveMounted, err := e.getMounts()
- if (err != nil) != tt.wantErr {
- t.Errorf("GooseFSEngine.getMounts() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if !reflect.DeepEqual(gotResultInCtx, tt.wantResultInCtx) {
- t.Errorf("GooseFSEngine.getMounts() gotResultInCtx = %v, want %v", gotResultInCtx, tt.wantResultInCtx)
- }
- if !reflect.DeepEqual(gotResultHaveMounted, tt.wantResultHaveMounted) {
- t.Errorf("GooseFSEngine.getMounts() gotResultHaveMounted = %v, want %v", gotResultHaveMounted, tt.wantResultHaveMounted)
- }
- })
- }
-}
-
-func TestContainsString(t *testing.T) {
- type args struct {
- s []string
- v string
- }
- tests := []struct {
- name string
- args args
- want bool
- }{
- {
- name: "test",
- args: args{
- s: []string{"1", "2"},
- v: "1",
- },
- want: true,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if got := ContainsString(tt.args.s, tt.args.v); got != tt.want {
- t.Errorf("ContainsString() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
-func TestProcessUpdatingUFS(t *testing.T) {
- type fields struct {
- runtime *datav1alpha1.GooseFSRuntime
- dataset *datav1alpha1.Dataset
- name string
- namespace string
- Log logr.Logger
- Client client.Client
- }
- tests := []struct {
- name string
- fields fields
- wantErr bool
- }{
- {
- name: "test0",
- fields: fields{
- runtime: &datav1alpha1.GooseFSRuntime{
- ObjectMeta: v1.ObjectMeta{
- Name: "spark",
- Namespace: "default",
- },
- },
- dataset: &datav1alpha1.Dataset{
- ObjectMeta: v1.ObjectMeta{
- Name: "spark",
- Namespace: "default",
- },
- Spec: datav1alpha1.DatasetSpec{
- Mounts: []datav1alpha1.Mount{
- {
- Name: "test0",
- MountPoint: "cos://test0",
- Path: "/spec",
- },
- {
- Name: "test1",
- MountPoint: "cos://test1",
- Path: "/spec",
- },
- },
- },
- Status: datav1alpha1.DatasetStatus{
- Mounts: []datav1alpha1.Mount{
- {
- Name: "test0",
- MountPoint: "cos://test0",
- Path: "/status",
- },
- {
- Name: "test1",
- MountPoint: "cos://test1",
- Path: "/status",
- },
- },
- },
- },
- name: "spark",
- namespace: "default",
- Log: fake.NullLogger(),
- },
- wantErr: false,
- },
- {
- name: "test1",
- fields: fields{
- runtime: &datav1alpha1.GooseFSRuntime{
- ObjectMeta: v1.ObjectMeta{
- Name: "hbase",
- Namespace: "default",
- },
- },
- dataset: &datav1alpha1.Dataset{
- ObjectMeta: v1.ObjectMeta{
- Name: "hbase",
- Namespace: "default",
- },
- Spec: datav1alpha1.DatasetSpec{
- Mounts: []datav1alpha1.Mount{
- {
- Name: "test0",
- MountPoint: "cos://test0",
- Path: "/spec",
- Options: map[string]string{"fs.cosn.bucket.region": "ap-shanghai",
- "fs.cosn.impl": "org.apache.hadoop.fs.CosFileSystem",
- "fs.AbstractFileSystem.cosn.impl": "org.apache.hadoop.fs.CosN",
- "fs.cos.app.id": "1251707795"},
- EncryptOptions: []datav1alpha1.EncryptOption{
- {
- Name: "access-key",
- ValueFrom: datav1alpha1.EncryptOptionSource{
- SecretKeyRef: datav1alpha1.SecretKeySelector{
- Name: "test0",
- Key: "access-key",
- }},
- }, {
- Name: "secret-key",
- ValueFrom: datav1alpha1.EncryptOptionSource{
- SecretKeyRef: datav1alpha1.SecretKeySelector{
- Name: "test0",
- Key: "secret-key",
- }},
- }, {
- Name: "metaurl",
- ValueFrom: datav1alpha1.EncryptOptionSource{
- SecretKeyRef: datav1alpha1.SecretKeySelector{
- Name: "test0",
- Key: "metaurl",
- }},
- },
- },
- },
- {
- Name: "test1",
- MountPoint: "cos://test1",
- Path: "/spec",
- },
- },
- },
- Status: datav1alpha1.DatasetStatus{
- Mounts: []datav1alpha1.Mount{
- {
- Name: "test0",
- MountPoint: "cos://test0",
- Path: "/status",
- },
- },
- },
- },
- name: "hbase",
- namespace: "default",
- Log: fake.NullLogger(),
- },
- wantErr: false,
- },
- {
- name: "test2",
- fields: fields{
- runtime: &datav1alpha1.GooseFSRuntime{
- ObjectMeta: v1.ObjectMeta{
- Name: "hadoop",
- Namespace: "default",
- },
- },
- dataset: &datav1alpha1.Dataset{
- ObjectMeta: v1.ObjectMeta{
- Name: "hadoop",
- Namespace: "default",
- },
- Spec: datav1alpha1.DatasetSpec{
- Mounts: []datav1alpha1.Mount{
- {
- Name: "test0",
- MountPoint: "cos://test0",
- Path: "/spec",
- Options: map[string]string{"fs.cosn.bucket.region": "ap-shanghai",
- "fs.cosn.impl": "org.apache.hadoop.fs.CosFileSystem",
- "fs.AbstractFileSystem.cosn.impl": "org.apache.hadoop.fs.CosN",
- "fs.cos.app.id": "1251707795"},
- EncryptOptions: []datav1alpha1.EncryptOption{
- {
- Name: "access-key",
- ValueFrom: datav1alpha1.EncryptOptionSource{
- SecretKeyRef: datav1alpha1.SecretKeySelector{
- Name: "test0",
- Key: "access-key",
- }},
- }, {
- Name: "secret-key",
- ValueFrom: datav1alpha1.EncryptOptionSource{
- SecretKeyRef: datav1alpha1.SecretKeySelector{
- Name: "test0",
- Key: "secret-key",
- }},
- }, {
- Name: "metaurl",
- ValueFrom: datav1alpha1.EncryptOptionSource{
- SecretKeyRef: datav1alpha1.SecretKeySelector{
- Name: "test0",
- Key: "metaurl",
- }},
- },
- },
- },
- {
- Name: "test1",
- MountPoint: "cos://test1",
- Path: "/spec",
- },
- },
- },
- Status: datav1alpha1.DatasetStatus{
- Mounts: []datav1alpha1.Mount{
- {
- Name: "test0",
- MountPoint: "cos://test0",
- Path: "/status",
- },
- {
- Name: "test1",
- MountPoint: "cos://test1",
- Path: "/status",
- },
- {
- Name: "test2",
- MountPoint: "cos://test2",
- Path: "/status",
- },
- },
- },
- },
- name: "hadoop",
- namespace: "default",
- Log: fake.NullLogger(),
- },
- wantErr: false,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- secret := corev1.Secret{
- ObjectMeta: v1.ObjectMeta{
- Name: "test0",
- Namespace: "default",
- },
- Data: map[string][]byte{
- "fs.cosn.userinfo.secretKey": []byte("key"),
- "fs.cosn.userinfo.secretId": []byte("id"),
- },
- }
- testObjs := []runtime.Object{}
- testObjs = append(testObjs, tt.fields.runtime, tt.fields.dataset, &secret)
- client := fake.NewFakeClientWithScheme(testScheme, testObjs...)
- e := &GooseFSEngine{
- runtime: tt.fields.runtime,
- name: tt.fields.name,
- namespace: tt.fields.namespace,
- Log: tt.fields.Log,
- Client: client,
- }
- var goosefsFileUtils operations.GooseFSFileUtils
- patch1 := ApplyMethod(reflect.TypeOf(goosefsFileUtils), "Ready", func(_ operations.GooseFSFileUtils) bool {
- return true
- })
- defer patch1.Reset()
- patch2 := ApplyMethod(reflect.TypeOf(goosefsFileUtils), "Mount", func(_ operations.GooseFSFileUtils, goosefsPath string,
- ufsPath string,
- options map[string]string,
- readOnly bool,
- shared bool) error {
- return nil
- })
- defer patch2.Reset()
- patch3 := ApplyMethod(reflect.TypeOf(goosefsFileUtils), "UnMount", func(_ operations.GooseFSFileUtils, goosefsPath string) error {
- return nil
- })
- defer patch3.Reset()
- patch4 := ApplyMethod(reflect.TypeOf(goosefsFileUtils), "LoadMetadataWithoutTimeout", func(_ operations.GooseFSFileUtils, goosefsPath string) error {
- return nil
- })
- defer patch4.Reset()
-
- ufs := e.ShouldUpdateUFS()
- err := utils.UpdateMountStatus(client, tt.fields.name, tt.fields.namespace, datav1alpha1.UpdatingDatasetPhase)
- if err != nil {
- t.Error("GooseFSEngine.UpdateMountStatus()")
- }
- if err := e.processUpdatingUFS(ufs); (err != nil) != tt.wantErr {
- t.Errorf("GooseFSEngine.processUpdatingUFS() error = %v, wantErr %v", err, tt.wantErr)
- }
- })
- }
-}
-
-func TestMountUFS(t *testing.T) {
- type fields struct {
- runtime *datav1alpha1.GooseFSRuntime
- dataset *datav1alpha1.Dataset
- name string
- namespace string
- Log logr.Logger
- Client client.Client
- }
- tests := []struct {
- name string
- fields fields
- wantErr bool
- }{
- {
- name: "test",
- fields: fields{
- runtime: &datav1alpha1.GooseFSRuntime{
- ObjectMeta: v1.ObjectMeta{
- Name: "spark",
- Namespace: "default",
- },
- },
- dataset: &datav1alpha1.Dataset{
- ObjectMeta: v1.ObjectMeta{
- Name: "spark",
- Namespace: "default",
- },
- Spec: datav1alpha1.DatasetSpec{
- Mounts: []datav1alpha1.Mount{
- {
- Name: "test0",
- MountPoint: "cos://test0",
- Path: "/spec",
- },
- },
- },
- Status: datav1alpha1.DatasetStatus{
- Mounts: []datav1alpha1.Mount{
- {
- Name: "test0",
- MountPoint: "cos://test0",
- Path: "/status",
- },
- },
- },
- },
- name: "spark",
- namespace: "default",
- Log: fake.NullLogger(),
- },
- wantErr: false,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- testObjs := []runtime.Object{}
- testObjs = append(testObjs, tt.fields.runtime, tt.fields.dataset)
- client := fake.NewFakeClientWithScheme(testScheme, testObjs...)
- e := &GooseFSEngine{
- runtime: tt.fields.runtime,
- name: tt.fields.name,
- namespace: tt.fields.namespace,
- Log: tt.fields.Log,
- Client: client,
- }
- var goosefsFileUtils operations.GooseFSFileUtils
- patch1 := ApplyMethod(reflect.TypeOf(goosefsFileUtils), "Ready", func(_ operations.GooseFSFileUtils) bool {
- return true
- })
- defer patch1.Reset()
- patch2 := ApplyMethod(reflect.TypeOf(goosefsFileUtils), "Mount", func(_ operations.GooseFSFileUtils, goosefsPath string,
- ufsPath string,
- options map[string]string,
- readOnly bool,
- shared bool) error {
- return nil
- })
- defer patch2.Reset()
- patch3 := ApplyMethod(reflect.TypeOf(goosefsFileUtils), "IsMounted", func(_ operations.GooseFSFileUtils, goosefsPath string,
- ) (bool, error) {
- return false, nil
- })
- defer patch3.Reset()
- if err := e.mountUFS(); (err != nil) != tt.wantErr {
- t.Errorf("GooseFSEngine.mountUFS() error = %v, wantErr %v", err, tt.wantErr)
- }
- })
- }
-}
-
-func TestGenUFSMountOptions(t *testing.T) {
- type fields struct {
- name string
- namespace string
- Log logr.Logger
- }
- type args struct {
- m datav1alpha1.Mount
- pm map[string]string
- pme []datav1alpha1.EncryptOption
- }
- tests := []struct {
- name string
- fields fields
- args args
- want map[string]string
- wantErr bool
- }{
- {
- name: "test",
- fields: fields{
- name: "spark",
- namespace: "default",
- Log: fake.NullLogger(),
- },
- args: args{
- pm: map[string]string{
- "key1": "value1",
- },
- pme: []datav1alpha1.EncryptOption{
- {
- Name: "key2",
- ValueFrom: datav1alpha1.EncryptOptionSource{
- SecretKeyRef: datav1alpha1.SecretKeySelector{
- Name: "mysecret",
- Key: "key2",
- },
- },
- },
- },
- m: datav1alpha1.Mount{
- Options: map[string]string{"fs.cosn.bucket.region": "ap-shanghai",
- "fs.cosn.impl": "org.apache.hadoop.fs.CosFileSystem",
- "fs.AbstractFileSystem.cosn.impl": "org.apache.hadoop.fs.CosN",
- "fs.cos.app.id": "1251707795"},
- EncryptOptions: []datav1alpha1.EncryptOption{
- {
- Name: "fs.cosn.userinfo.secretKey",
- ValueFrom: datav1alpha1.EncryptOptionSource{
- SecretKeyRef: datav1alpha1.SecretKeySelector{
- Name: "mysecret",
- Key: "fs.cosn.userinfo.secretKey",
- },
- },
- },
- {
- Name: "fs.cosn.userinfo.secretId",
- ValueFrom: datav1alpha1.EncryptOptionSource{
- SecretKeyRef: datav1alpha1.SecretKeySelector{
- Name: "mysecret",
- Key: "fs.cosn.userinfo.secretId",
- },
- },
- },
- },
- },
- },
- want: map[string]string{"fs.cosn.bucket.region": "ap-shanghai",
- "fs.cosn.impl": "org.apache.hadoop.fs.CosFileSystem",
- "fs.AbstractFileSystem.cosn.impl": "org.apache.hadoop.fs.CosN",
- "fs.cos.app.id": "1251707795",
- "fs.cosn.userinfo.secretKey": "key",
- "fs.cosn.userinfo.secretId": "id",
- "key1": "value1",
- "key2": "value2"},
- wantErr: false,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- secret := corev1.Secret{
- ObjectMeta: v1.ObjectMeta{
- Name: "mysecret",
- Namespace: "default",
- },
- Data: map[string][]byte{
- "fs.cosn.userinfo.secretKey": []byte("key"),
- "fs.cosn.userinfo.secretId": []byte("id"),
- "key2": []byte("value2"),
- },
- }
- testObjs := []runtime.Object{}
- testObjs = append(testObjs, &secret)
- client := fake.NewFakeClientWithScheme(testScheme, testObjs...)
- e := &GooseFSEngine{
- name: tt.fields.name,
- namespace: tt.fields.namespace,
- Log: tt.fields.Log,
- Client: client,
- }
- got, err := e.genUFSMountOptions(tt.args.m, tt.args.pm, tt.args.pme)
- if (err != nil) != tt.wantErr {
- t.Errorf("GooseFSEngine.genUFSMountOptions() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if !reflect.DeepEqual(got, tt.want) {
- t.Errorf("GooseFSEngine.genUFSMountOptions() = %v, want %v", got, tt.want)
- }
- })
- }
-}
diff --git a/pkg/ddc/goosefs/ufs_test.go b/pkg/ddc/goosefs/ufs_test.go
deleted file mode 100644
index 6f73fdb63e0..00000000000
--- a/pkg/ddc/goosefs/ufs_test.go
+++ /dev/null
@@ -1,555 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "fmt"
- "testing"
-
- "reflect"
-
- . "github.com/agiledragon/gomonkey/v2"
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/ddc/base"
- "github.com/fluid-cloudnative/fluid/pkg/ddc/goosefs/operations"
- "github.com/fluid-cloudnative/fluid/pkg/utils"
- "github.com/fluid-cloudnative/fluid/pkg/utils/fake"
- "github.com/fluid-cloudnative/fluid/pkg/utils/kubeclient"
- "github.com/go-logr/logr"
- corev1 "k8s.io/api/core/v1"
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
-)
-
-func mockExecCommandInContainerForTotalStorageBytes() (stdout string, stderr string, err error) {
- r := `File Count Folder Count Folder Size
- 50000 1000 6706560319`
- return r, "", nil
-}
-
-func mockExecCommandInContainerForTotalFileNums() (stdout string, stderr string, err error) {
- r := `Master.FilesCompleted (Type: COUNTER, Value: 1,331,167)`
- return r, "", nil
-}
-
-func TestUsedStorageBytes(t *testing.T) {
- type fields struct {
- }
- tests := []struct {
- name string
- fields fields
- wantValue int64
- wantErr bool
- }{
- {
- name: "test",
- fields: fields{},
- wantValue: 0,
- wantErr: false,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- e := &GooseFSEngine{}
- gotValue, err := e.UsedStorageBytes()
- if (err != nil) != tt.wantErr {
- t.Errorf("GooseFSEngine.UsedStorageBytes() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if gotValue != tt.wantValue {
- t.Errorf("GooseFSEngine.UsedStorageBytes() = %v, want %v", gotValue, tt.wantValue)
- }
- })
- }
-}
-
-func TestFreeStorageBytes(t *testing.T) {
- type fields struct {
- }
- tests := []struct {
- name string
- fields fields
- wantValue int64
- wantErr bool
- }{
- {
- name: "test",
- fields: fields{},
- wantValue: 0,
- wantErr: false,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- e := &GooseFSEngine{}
- gotValue, err := e.FreeStorageBytes()
- if (err != nil) != tt.wantErr {
- t.Errorf("GooseFSEngine.FreeStorageBytes() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if gotValue != tt.wantValue {
- t.Errorf("GooseFSEngine.FreeStorageBytes() = %v, want %v", gotValue, tt.wantValue)
- }
- })
- }
-}
-
-func TestTotalStorageBytes(t *testing.T) {
- type fields struct {
- runtime *datav1alpha1.GooseFSRuntime
- name string
- }
- tests := []struct {
- name string
- fields fields
- wantValue int64
- wantErr bool
- }{
- {
- name: "test",
- fields: fields{
- runtime: &datav1alpha1.GooseFSRuntime{
- ObjectMeta: v1.ObjectMeta{
- Name: "spark",
- },
- },
- },
- wantValue: 6706560319,
- wantErr: false,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- e := &GooseFSEngine{
- runtime: tt.fields.runtime,
- name: tt.fields.name,
- }
- patch1 := ApplyFunc(kubeclient.ExecCommandInContainer, func(podName string, containerName string, namespace string, cmd []string) (string, string, error) {
- stdout, stderr, err := mockExecCommandInContainerForTotalStorageBytes()
- return stdout, stderr, err
- })
- defer patch1.Reset()
- gotValue, err := e.TotalStorageBytes()
- if (err != nil) != tt.wantErr {
- t.Errorf("GooseFSEngine.TotalStorageBytes() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if gotValue != tt.wantValue {
- t.Errorf("GooseFSEngine.TotalStorageBytes() = %v, want %v", gotValue, tt.wantValue)
- }
- })
- }
-}
-
-func TestTotalFileNums(t *testing.T) {
- type fields struct {
- runtime *datav1alpha1.GooseFSRuntime
- name string
- }
- tests := []struct {
- name string
- fields fields
- wantValue int64
- wantErr bool
- }{
- {
- name: "test",
- fields: fields{
- runtime: &datav1alpha1.GooseFSRuntime{
- ObjectMeta: v1.ObjectMeta{
- Name: "spark",
- },
- },
- },
- wantValue: 1331167,
- wantErr: false,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- e := &GooseFSEngine{
- runtime: tt.fields.runtime,
- name: tt.fields.name,
- }
- patch1 := ApplyFunc(kubeclient.ExecCommandInContainer, func(podName string, containerName string, namespace string, cmd []string) (string, string, error) {
- stdout, stderr, err := mockExecCommandInContainerForTotalFileNums()
- return stdout, stderr, err
- })
- defer patch1.Reset()
- gotValue, err := e.TotalFileNums()
- if (err != nil) != tt.wantErr {
- t.Errorf("GooseFSEngine.TotalFileNums() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if gotValue != tt.wantValue {
- t.Errorf("GooseFSEngine.TotalFileNums() = %v, want %v", gotValue, tt.wantValue)
- }
- })
- }
-}
-
-func TestShouldCheckUFS(t *testing.T) {
- tests := []struct {
- name string
- wantShould bool
- wantErr bool
- }{
- {
- name: "test",
- wantShould: true,
- wantErr: false,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- e := &GooseFSEngine{}
- gotShould, err := e.ShouldCheckUFS()
- if (err != nil) != tt.wantErr {
- t.Errorf("GooseFSEngine.ShouldCheckUFS() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if gotShould != tt.wantShould {
- t.Errorf("GooseFSEngine.ShouldCheckUFS() = %v, want %v", gotShould, tt.wantShould)
- }
- })
- }
-}
-
-func TestPrepareUFS(t *testing.T) {
- type fields struct {
- runtime *datav1alpha1.GooseFSRuntime
- dataset *datav1alpha1.Dataset
- name string
- namespace string
- Log logr.Logger
- MetadataSyncDoneCh chan base.MetadataSyncResult
- }
- tests := []struct {
- name string
- fields fields
- wantErr bool
- }{
- {
- name: "test",
- fields: fields{
- runtime: &datav1alpha1.GooseFSRuntime{},
- dataset: &datav1alpha1.Dataset{
- ObjectMeta: v1.ObjectMeta{
- Name: "spark",
- Namespace: "default",
- },
- Spec: datav1alpha1.DatasetSpec{
- Mounts: []datav1alpha1.Mount{
- {
- MountPoint: "cosn://imagenet-1234567/",
- },
- },
- DataRestoreLocation: &datav1alpha1.DataRestoreLocation{
- Path: "local:///tmp/restore",
- NodeName: "192.168.0.1",
- },
- },
- Status: datav1alpha1.DatasetStatus{
- UfsTotal: "",
- },
- },
- name: "spark",
- namespace: "default",
- Log: fake.NullLogger(),
- },
- wantErr: false,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- s := runtime.NewScheme()
- s.AddKnownTypes(datav1alpha1.GroupVersion, tt.fields.runtime)
- s.AddKnownTypes(datav1alpha1.GroupVersion, tt.fields.dataset)
- _ = corev1.AddToScheme(s)
- mockClient := fake.NewFakeClientWithScheme(s, tt.fields.runtime, tt.fields.dataset)
-
- var gfsUtils operations.GooseFSFileUtils
- patch1 := ApplyMethod(reflect.TypeOf(gfsUtils), "Ready", func(_ operations.GooseFSFileUtils) bool {
- return true
- })
- defer patch1.Reset()
-
- patch2 := ApplyMethod(reflect.TypeOf(gfsUtils), "IsMounted", func(_ operations.GooseFSFileUtils, goosefsPath string) (bool, error) {
- return false, nil
- })
- defer patch2.Reset()
-
- patch3 := ApplyMethod(reflect.TypeOf(gfsUtils), "Mount", func(_ operations.GooseFSFileUtils, goosefsPath string, ufsPath string, options map[string]string, readOnly bool, shared bool) error {
- return nil
- })
- defer patch3.Reset()
-
- patch4 := ApplyMethod(reflect.TypeOf(gfsUtils), "QueryMetaDataInfoIntoFile", func(_ operations.GooseFSFileUtils, key operations.KeyOfMetaDataFile, filename string) (string, error) {
- return "10000", nil
- })
- defer patch4.Reset()
-
- e := &GooseFSEngine{
- runtime: tt.fields.runtime,
- name: tt.fields.name,
- namespace: tt.fields.namespace,
- Log: tt.fields.Log,
- Client: mockClient,
- MetadataSyncDoneCh: tt.fields.MetadataSyncDoneCh,
- }
- if err := e.PrepareUFS(); (err != nil) != tt.wantErr {
- t.Errorf("GooseFSEngine.PrepareUFS() error = %v, wantErr %v", err, tt.wantErr)
- }
- })
- }
-}
-
-func TestShouldUpdateUFS(t *testing.T) {
- type fields struct {
- runtime *datav1alpha1.GooseFSRuntime
- dataset *datav1alpha1.Dataset
- name string
- namespace string
- Log logr.Logger
- }
- tests := []struct {
- name string
- fields fields
- wantAdd []string
- wantRemove []string
- }{
- {
- name: "test0",
- fields: fields{
- runtime: &datav1alpha1.GooseFSRuntime{
- ObjectMeta: v1.ObjectMeta{
- Name: "spark",
- Namespace: "default",
- },
- },
- dataset: &datav1alpha1.Dataset{
- ObjectMeta: v1.ObjectMeta{
- Name: "spark",
- Namespace: "default",
- },
- Spec: datav1alpha1.DatasetSpec{
- Mounts: []datav1alpha1.Mount{
- {
- MountPoint: "cosn://imagenet-1234567/",
- },
- },
- },
- },
- name: "spark",
- namespace: "default",
- Log: fake.NullLogger(),
- },
- wantAdd: []string{"/"},
- wantRemove: []string{},
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- s := runtime.NewScheme()
- s.AddKnownTypes(datav1alpha1.GroupVersion, tt.fields.runtime)
- s.AddKnownTypes(datav1alpha1.GroupVersion, tt.fields.dataset)
- _ = corev1.AddToScheme(s)
- mockClient := fake.NewFakeClientWithScheme(s, tt.fields.runtime, tt.fields.dataset)
-
- e := &GooseFSEngine{
- runtime: tt.fields.runtime,
- name: tt.fields.name,
- namespace: tt.fields.namespace,
- Log: tt.fields.Log,
- Client: mockClient,
- }
-
- gotUfsToUpdate := e.ShouldUpdateUFS()
- if !reflect.DeepEqual(gotUfsToUpdate.ToAdd(), tt.wantAdd) {
- t.Errorf("GooseFSEngine.ShouldUpdateUFS add = %v, want %v", gotUfsToUpdate.ToAdd(), tt.wantAdd)
- }
-
- if !reflect.DeepEqual(gotUfsToUpdate.ToRemove(), tt.wantRemove) {
- t.Errorf("GooseFSEngine.ShouldUpdateUFS() remove = %v, want %v", gotUfsToUpdate.ToRemove(), tt.wantRemove)
- }
- })
- }
-}
-
-func TestUpdateOnUFSChange(t *testing.T) {
- type fields struct {
- runtime *datav1alpha1.GooseFSRuntime
- dataset *datav1alpha1.Dataset
- name string
- namespace string
- Log logr.Logger
- }
- tests := []struct {
- name string
- fields fields
- wantUpdateReady bool
- wantErr bool
- should bool
- notMount bool
- Ready bool
- }{
- {
- name: "test0",
- fields: fields{
- runtime: &datav1alpha1.GooseFSRuntime{
- ObjectMeta: v1.ObjectMeta{
- Name: "spark",
- Namespace: "default",
- },
- },
- dataset: &datav1alpha1.Dataset{
- ObjectMeta: v1.ObjectMeta{
- Name: "spark",
- Namespace: "default",
- },
- Spec: datav1alpha1.DatasetSpec{
- Mounts: []datav1alpha1.Mount{
- {
- MountPoint: "cosn://imagenet-1234567/",
- },
- },
- },
- },
- name: "spark",
- namespace: "default",
- Log: fake.NullLogger(),
- },
- wantErr: false,
- wantUpdateReady: true,
- should: true,
- Ready: true,
- },
- {
- name: "test1",
- fields: fields{
- runtime: &datav1alpha1.GooseFSRuntime{
- ObjectMeta: v1.ObjectMeta{
- Name: "hadoop",
- Namespace: "default",
- },
- },
- dataset: &datav1alpha1.Dataset{
- ObjectMeta: v1.ObjectMeta{
- Name: "hadoop",
- Namespace: "default",
- },
- Spec: datav1alpha1.DatasetSpec{
- Mounts: []datav1alpha1.Mount{
- {
- MountPoint: "cosn://imagenet-1234567/",
- },
- },
- },
- },
- name: "hadoop",
- namespace: "default",
- Log: fake.NullLogger(),
- },
- wantErr: false,
- wantUpdateReady: false,
- should: false,
- Ready: true,
- },
- {
- name: "test2",
- fields: fields{
- runtime: &datav1alpha1.GooseFSRuntime{
- ObjectMeta: v1.ObjectMeta{
- Name: "hbase",
- Namespace: "default",
- },
- },
- dataset: &datav1alpha1.Dataset{
- ObjectMeta: v1.ObjectMeta{
- Name: "hbase",
- Namespace: "default",
- },
- Spec: datav1alpha1.DatasetSpec{
- Mounts: []datav1alpha1.Mount{
- {
- MountPoint: "cosn://imagenet-1234567/",
- },
- },
- },
- },
- name: "hbase",
- namespace: "default",
- Log: fake.NullLogger(),
- },
- wantErr: true,
- wantUpdateReady: false,
- should: true,
- notMount: true,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- s := runtime.NewScheme()
- s.AddKnownTypes(datav1alpha1.GroupVersion, tt.fields.runtime)
- s.AddKnownTypes(datav1alpha1.GroupVersion, tt.fields.dataset)
- _ = corev1.AddToScheme(s)
- mockClient := fake.NewFakeClientWithScheme(s, tt.fields.runtime, tt.fields.dataset)
-
- e := &GooseFSEngine{
- runtime: tt.fields.runtime,
- name: tt.fields.name,
- namespace: tt.fields.namespace,
- Log: tt.fields.Log,
- Client: mockClient,
- }
-
- ufs := utils.NewUFSToUpdate(tt.fields.dataset)
- patch1 := ApplyMethod(reflect.TypeOf(ufs), "ShouldUpdate",
- func(_ *utils.UFSToUpdate) bool {
- return tt.should
- })
- defer patch1.Reset()
-
- var goosefsFileUtils operations.GooseFSFileUtils
- patch2 := ApplyMethod(reflect.TypeOf(goosefsFileUtils), "Ready", func(_ operations.GooseFSFileUtils) bool {
- return tt.Ready
- })
- defer patch2.Reset()
-
- patch3 := ApplyMethod(reflect.TypeOf(goosefsFileUtils), "Mount", func(_ operations.GooseFSFileUtils, goosefsPath string,
- ufsPath string,
- options map[string]string,
- readOnly bool,
- shared bool) error {
- if tt.notMount {
- return fmt.Errorf("Mount Error")
- } else {
- return nil
- }
- })
- defer patch3.Reset()
- gotUpdateReady, err := e.UpdateOnUFSChange(ufs)
- if (err != nil) != tt.wantErr {
- t.Errorf("GooseFSEngine.UpdateOnUFSChange() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if gotUpdateReady != tt.wantUpdateReady {
- t.Errorf("GooseFSEngine.UpdateOnUFSChange() = %v, want %v", gotUpdateReady, tt.wantUpdateReady)
- }
- })
- }
-}
diff --git a/pkg/ddc/goosefs/utils.go b/pkg/ddc/goosefs/utils.go
deleted file mode 100644
index fa7b749db7b..00000000000
--- a/pkg/ddc/goosefs/utils.go
+++ /dev/null
@@ -1,334 +0,0 @@
-/*
-Copyright 2022 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "context"
- "fmt"
- "strconv"
- "strings"
-
- appsv1 "k8s.io/api/apps/v1"
- v1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/types"
-
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/common"
- cdatabackup "github.com/fluid-cloudnative/fluid/pkg/databackup"
- "github.com/fluid-cloudnative/fluid/pkg/utils"
- "github.com/fluid-cloudnative/fluid/pkg/utils/docker"
-)
-
-func (e *GooseFSEngine) getDataSetFileNum() (string, error) {
- fileCount, err := e.TotalFileNums()
- if err != nil {
- return "", err
- }
- return strconv.FormatInt(fileCount, 10), err
-}
-
-// getRuntime gets the goosefs runtime
-func (e *GooseFSEngine) getRuntime() (*datav1alpha1.GooseFSRuntime, error) {
-
- key := types.NamespacedName{
- Name: e.name,
- Namespace: e.namespace,
- }
-
- var runtime datav1alpha1.GooseFSRuntime
- if err := e.Get(context.TODO(), key, &runtime); err != nil {
- return nil, err
- }
- return &runtime, nil
-}
-
-func (e *GooseFSEngine) getMasterStatefulset(name string, namespace string) (master *appsv1.StatefulSet, err error) {
- master = &appsv1.StatefulSet{}
- err = e.Client.Get(context.TODO(), types.NamespacedName{
- Namespace: namespace,
- Name: name,
- }, master)
-
- return master, err
-}
-
-func (e *GooseFSEngine) getDaemonset(name string, namespace string) (daemonset *appsv1.DaemonSet, err error) {
- daemonset = &appsv1.DaemonSet{}
- err = e.Client.Get(context.TODO(), types.NamespacedName{
- Name: name,
- Namespace: namespace,
- }, daemonset)
-
- return daemonset, err
-}
-
-// func (e *GooseFSEngine) getConfigMap(name string, namespace string) (configMap *corev1.ConfigMap, err error) {
-// configMap = &corev1.ConfigMap{}
-// err = e.Client.Get(context.TODO(), types.NamespacedName{
-// Name: name,
-// Namespace: namespace,
-// }, configMap)
-
-// return configMap, err
-// }
-
-func (e *GooseFSEngine) getMasterPodInfo() (podName string, containerName string) {
- podName = e.name + "-master-0"
- containerName = "goosefs-master"
-
- return
-}
-
-func (e *GooseFSEngine) getMasterName() (dsName string) {
- return e.name + "-master"
-}
-
-func (e *GooseFSEngine) getWorkerName() (dsName string) {
- return e.name + "-worker"
-}
-
-func (e *GooseFSEngine) getFuseName() (dsName string) {
- return e.name + "-fuse"
-}
-
-// getRunningPodsOfDaemonset gets worker pods
-//func (e *GooseFSEngine) getRunningPodsOfDaemonset(dsName, namespace string) (pods []corev1.Pod, err error) {
-//
-// ds, err := e.getDaemonset(dsName, namespace)
-// if err != nil {
-// return pods, err
-// }
-//
-// selector := ds.Spec.Selector.MatchLabels
-// // labels := selector.MatchLabels
-//
-// pods = []corev1.Pod{}
-// podList := &corev1.PodList{}
-// err = e.Client.List(context.TODO(), podList, options.InNamespace(namespace), options.MatchingLabels(selector))
-// if err != nil {
-// return pods, err
-// }
-//
-// for _, pod := range podList.Items {
-// if !podutil.IsPodReady(&pod) {
-// e.Log.Info("Skip the pod because it's not ready", "pod", pod.Name, "namespace", pod.Namespace)
-// continue
-// }
-// pods = append(pods, pod)
-// }
-//
-// return pods, nil
-//
-//}
-
-func (e *GooseFSEngine) getMountPoint() (mountPath string) {
- mountRoot := getMountRoot()
- e.Log.Info("mountRoot", "path", mountRoot)
- return fmt.Sprintf("%s/%s/%s/goosefs-fuse", mountRoot, e.namespace, e.name)
-}
-
-func (e *GooseFSEngine) getInitUserDir() string {
- dir := fmt.Sprintf("/tmp/fluid/%s/%s", e.namespace, e.name)
- e.Log.Info("Generate InitUser dir")
- return dir
-}
-
-// Init tierPaths when running as a non-root user: chmod on each path
-// Example: "/dev/shm:/var/lib/docker/goosefs:/dev/ssd"
-func (e *GooseFSEngine) getInitTierPathsEnv(runtime *datav1alpha1.GooseFSRuntime) string {
- var tierPaths []string
- for _, level := range runtime.Spec.TieredStore.Levels {
- paths := strings.Split(level.Path, ",")
- tierPaths = append(tierPaths, paths...)
- }
- return strings.Join(tierPaths, ":")
-}
-
-// getMountRoot returns the default path, if it's not set
-func getMountRoot() (path string) {
- path, err := utils.GetMountRoot()
- if err != nil {
- path = "/" + common.GooseFSRuntime
- } else {
- path = path + "/" + common.GooseFSRuntime
- }
- // e.Log.Info("Mount root", "path", path)
- return
-
-}
-
-func isPortInUsed(port int, usedPorts []int) bool {
- for _, usedPort := range usedPorts {
- if port == usedPort {
- return true
- }
- }
- return false
-}
-
-func (e *GooseFSEngine) parseRuntimeImage(image string, tag string, imagePullPolicy string) (string, string, string) {
- if len(imagePullPolicy) == 0 {
- imagePullPolicy = common.DefaultImagePullPolicy
- }
-
- if len(image) == 0 {
- image = docker.GetImageRepoFromEnv(common.GooseFSRuntimeImageEnv)
- if len(image) == 0 {
- runtimeImageInfo := strings.Split(common.DefaultGooseFSRuntimeImage, ":")
- if len(runtimeImageInfo) < 1 {
- panic("invalid default goosefs runtime image!")
- } else {
- image = runtimeImageInfo[0]
- }
- }
- }
-
- if len(tag) == 0 {
- tag = docker.GetImageTagFromEnv(common.GooseFSRuntimeImageEnv)
- if len(tag) == 0 {
- runtimeImageInfo := strings.Split(common.DefaultGooseFSRuntimeImage, ":")
- if len(runtimeImageInfo) < 2 {
- panic("invalid default goosefs runtime image!")
- } else {
- tag = runtimeImageInfo[1]
- }
- }
- }
-
- return image, tag, imagePullPolicy
-}
-
-func (e *GooseFSEngine) parseFuseImage(image string, tag string, imagePullPolicy string) (string, string, string) {
- if len(imagePullPolicy) == 0 {
- imagePullPolicy = common.DefaultImagePullPolicy
- }
-
- if len(image) == 0 {
- image = docker.GetImageRepoFromEnv(common.GooseFSFuseImageEnv)
- if len(image) == 0 {
- fuseImageInfo := strings.Split(common.DefaultGooseFSFuseImage, ":")
- if len(fuseImageInfo) < 1 {
- panic("invalid default goosefs fuse image!")
- } else {
- image = fuseImageInfo[0]
- }
- }
- }
-
- if len(tag) == 0 {
- tag = docker.GetImageTagFromEnv(common.GooseFSFuseImageEnv)
- if len(tag) == 0 {
- fuseImageInfo := strings.Split(common.DefaultGooseFSFuseImage, ":")
- if len(fuseImageInfo) < 2 {
- panic("invalid default init image!")
- } else {
- tag = fuseImageInfo[1]
- }
- }
- }
-
- return image, tag, imagePullPolicy
-}
-
-func (e *GooseFSEngine) GetMetadataInfoFile() string {
- return cdatabackup.GooseFSBackupPathPod + "/" + e.GetMetadataInfoFileName()
-}
-func (e *GooseFSEngine) GetMetadataFileName() string {
- return "metadata-backup-" + e.name + "-" + e.namespace + ".gz"
-}
-func (e *GooseFSEngine) GetMetadataInfoFileName() string {
- return e.name + "-" + e.namespace + ".yaml"
-}
-
-// GetWorkerUsedCapacity gets cache capacity usage for each worker as a map.
-// It parses result from stdout when executing `goosefs fsadmin report capacity` command
-// and extracts worker name(IP or hostname) along with used capacity for that worker
-func (e *GooseFSEngine) GetWorkerUsedCapacity() (map[string]int64, error) {
- // 2. run clean action
- capacityReport, err := e.reportCapacity()
- if err != nil {
- return nil, err
- }
-
- // An Example of capacityReport:
- /////////////////////////////////////////////////////////////////
- // Capacity information for all workers:
- // Total Capacity: 4096.00MB
- // Tier: MEM Size: 4096.00MB
- // Used Capacity: 443.89MB
- // Tier: MEM Size: 443.89MB
- // Used Percentage: 10%
- // Free Percentage: 90%
- //
- // Worker Name Last Heartbeat Storage MEM
- // 192.168.1.147 0 capacity 2048.00MB
- // used 443.89MB (21%)
- // 192.168.1.146 0 capacity 2048.00MB
- // used 0B (0%)
- /////////////////////////////////////////////////////////////////
- lines := strings.Split(capacityReport, "\n")
- startIdx := -1
- for i, line := range lines {
- if strings.HasPrefix(line, "Worker Name") {
- startIdx = i + 1
- break
- }
- }
-
- if startIdx == -1 {
- return nil, fmt.Errorf("can't parse result form goosefs fsadmin report capacity")
- }
-
- worker2UsedCapacityMap := make(map[string]int64)
- lenLines := len(lines)
- for lineIdx := startIdx; lineIdx < lenLines; lineIdx += 2 {
- // e.g. ["192.168.1.147", "0", "capacity", "2048.00MB", "used", "443.89MB", "(21%)"]
- workerInfoFields := append(strings.Fields(lines[lineIdx]), strings.Fields(lines[lineIdx+1])...)
- workerName := workerInfoFields[0]
- usedCapacity, _ := utils.FromHumanSize(workerInfoFields[5])
- worker2UsedCapacityMap[workerName] = usedCapacity
- }
-
- return worker2UsedCapacityMap, nil
-}
-
-// lookUpUsedCapacity looks up used capacity for a given node in a map.
-func lookUpUsedCapacity(node v1.Node, usedCapacityMap map[string]int64) int64 {
- var ip, hostname string
- for _, addr := range node.Status.Addresses {
- if addr.Type == v1.NodeInternalIP {
- ip = addr.Address
- }
- if addr.Type == v1.NodeInternalDNS {
- hostname = addr.Address
- }
- }
-
- if len(ip) != 0 {
- if usedCapacity, found := usedCapacityMap[ip]; found {
- return usedCapacity
- }
- }
-
- if len(hostname) != 0 {
- if usedCapacity, found := usedCapacityMap[hostname]; found {
- return usedCapacity
- }
- }
- // no info stored in GooseFS master. Scale in such node first.
- return 0
-}
diff --git a/pkg/ddc/goosefs/utils_test.go b/pkg/ddc/goosefs/utils_test.go
deleted file mode 100644
index 54591463c36..00000000000
--- a/pkg/ddc/goosefs/utils_test.go
+++ /dev/null
@@ -1,1021 +0,0 @@
-/*
-Copyright 2023 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "fmt"
- "os"
- "reflect"
- "testing"
-
- . "github.com/agiledragon/gomonkey/v2"
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/common"
- "github.com/fluid-cloudnative/fluid/pkg/utils"
- "github.com/fluid-cloudnative/fluid/pkg/utils/fake"
- "github.com/fluid-cloudnative/fluid/pkg/utils/kubeclient"
- "github.com/go-logr/logr"
- appsv1 "k8s.io/api/apps/v1"
- corev1 "k8s.io/api/core/v1"
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "sigs.k8s.io/controller-runtime/pkg/client"
-)
-
-func TestIsFluidNativeScheme(t *testing.T) {
-
- var tests = []struct {
- mountPoint string
- expect bool
- }{
- {"local:///test",
- true},
- {
- "pvc://test",
- true,
- }, {
- "oss://test",
- false,
- },
- }
- for _, test := range tests {
- result := common.IsFluidNativeScheme(test.mountPoint)
- if result != test.expect {
- t.Errorf("expect %v for %s, but got %v", test.expect, test.mountPoint, result)
- }
- }
-}
-
-func TestGooseFSEngine_getInitUserDir(t *testing.T) {
- type fields struct {
- runtime *datav1alpha1.GooseFSRuntime
- name string
- namespace string
- runtimeType string
- Log logr.Logger
- Client client.Client
- gracefulShutdownLimits int32
- retryShutdown int32
- }
- tests := []struct {
- name string
- fields fields
- want string
- }{
- {name: "test",
- fields: fields{runtime: &datav1alpha1.GooseFSRuntime{
- TypeMeta: v1.TypeMeta{},
- ObjectMeta: v1.ObjectMeta{},
- Spec: datav1alpha1.GooseFSRuntimeSpec{},
- Status: datav1alpha1.RuntimeStatus{},
- }, name: "test", namespace: "default", runtimeType: "goosefs", Log: fake.NullLogger()},
- want: fmt.Sprintf("/tmp/fluid/%s/%s", "default", "test"),
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- e := &GooseFSEngine{
- runtime: tt.fields.runtime,
- name: tt.fields.name,
- namespace: tt.fields.namespace,
- runtimeType: tt.fields.runtimeType,
- Log: tt.fields.Log,
- Client: tt.fields.Client,
- gracefulShutdownLimits: tt.fields.gracefulShutdownLimits,
- retryShutdown: tt.fields.retryShutdown,
- }
- if got := e.getInitUserDir(); got != tt.want {
- t.Errorf("GooseFSEngine.getInitUserDir() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
-func TestGooseFSEngine_getInitUsersArgs(t *testing.T) {
- type fields struct {
- runtime *datav1alpha1.GooseFSRuntime
- Log logr.Logger
- Client client.Client
- }
- f := func(s int64) *int64 {
- return &s
- }
- tests := []struct {
- name string
- fields fields
- want []string
- }{
- {name: "test",
- fields: fields{
- runtime: &datav1alpha1.GooseFSRuntime{
- TypeMeta: v1.TypeMeta{},
- ObjectMeta: v1.ObjectMeta{},
- Spec: datav1alpha1.GooseFSRuntimeSpec{RunAs: &datav1alpha1.User{UID: f(int64(1000)), GID: f(int64(1000)),
- UserName: "test", GroupName: "a"}},
- Status: datav1alpha1.RuntimeStatus{},
- },
- },
- want: []string{"1000:test:1000", "1000:a"}},
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- got := utils.GetInitUsersArgs(tt.fields.runtime.Spec.RunAs)
- var ne bool
- for i, src := range got {
- if src != tt.want[i] {
- ne = false
- }
- }
- if ne {
- t.Errorf("GooseFSEngine.getInitUsersArgs() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
-func TestMountRootWithEnvSet(t *testing.T) {
- var testCases = []struct {
- input string
- expected string
- }{
- {"/var/lib/mymount", "/var/lib/mymount/goosefs"},
- }
- for _, tc := range testCases {
- t.Setenv(utils.MountRoot, tc.input)
- if tc.expected != getMountRoot() {
- t.Errorf("expected %#v, got %#v",
- tc.expected, getMountRoot())
- }
- }
-}
-
-func TestMountRootWithoutEnvSet(t *testing.T) {
- var testCases = []struct {
- input string
- expected string
- }{
- {"/var/lib/mymount", "/goosefs"},
- }
-
- for _, tc := range testCases {
- _ = os.Unsetenv(utils.MountRoot)
- if tc.expected != getMountRoot() {
- t.Errorf("expected %#v, got %#v",
- tc.expected, getMountRoot())
- }
- }
-}
-func Test_isPortInUsed(t *testing.T) {
- type args struct {
- port int
- usedPorts []int
- }
- tests := []struct {
- name string
- args args
- want bool
- }{
- {name: "test",
- args: args{
- port: 20000,
- usedPorts: []int{20000, 20001, 20002, 20003, 20004, 20005, 20006, 20007, 20008},
- },
- want: true,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if got := isPortInUsed(tt.args.port, tt.args.usedPorts); got != tt.want {
- t.Errorf("isPortInUsed() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
-func Test_lookUpUsedCapacity(t *testing.T) {
- type args struct {
- node corev1.Node
- usedCapacityMap map[string]int64
- }
-
- internalIP := "192.168.1.147"
- var usageForInternalIP int64 = 1024
-
- internalHost := "slave001"
- var usageForInternalHost int64 = 4096
-
- usedCapacityMap := map[string]int64{}
- usedCapacityMap[internalIP] = usageForInternalIP
- usedCapacityMap[internalHost] = usageForInternalHost
-
- tests := []struct {
- name string
- args args
- want int64
- }{
- {
- name: "test_lookUpUsedCapacity_ip",
- args: args{
- node: corev1.Node{
- Status: corev1.NodeStatus{
- Addresses: []corev1.NodeAddress{
- {
- Type: corev1.NodeInternalIP,
- Address: internalIP,
- },
- },
- },
- },
- usedCapacityMap: usedCapacityMap,
- },
- want: usageForInternalIP,
- },
- {
- name: "test_lookUpUsedCapacity_hostname",
- args: args{
- node: corev1.Node{
- Status: corev1.NodeStatus{
- Addresses: []corev1.NodeAddress{
- {
- Type: corev1.NodeInternalDNS,
- Address: internalHost,
- },
- },
- },
- },
- usedCapacityMap: usedCapacityMap,
- },
- want: usageForInternalHost,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if got := lookUpUsedCapacity(tt.args.node, tt.args.usedCapacityMap); got != tt.want {
- t.Errorf("lookUpUsedCapacity() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
-func mockExecCommandInContainerForGetFileCount() (stdout string, stderr string, err error) {
- r := `Master.FilesCompleted (Type: COUNTER, Value: 1,000)`
- return r, "", nil
-}
-
-func mockExecCommandInContainerForWorkerUsedCapacity() (stdout string, stderr string, err error) {
- r := `Capacity information for all workers:
- Total Capacity: 4096.00MB
- Tier: MEM Size: 4096.00MB
- Used Capacity: 443.89MB
- Tier: MEM Size: 443.89MB
- Used Percentage: 10%
- Free Percentage: 90%
-
-Worker Name Last Heartbeat Storage MEM
-192.168.1.147 0 capacity 2048.00MB
- used 443.89MB (21%)
-192.168.1.146 0 capacity 2048.00MB
- used 0B (0%)`
- return r, "", nil
-}
-
-func TestGetDataSetFileNum(t *testing.T) {
- type fields struct {
- runtime *datav1alpha1.GooseFSRuntime
- name string
- namespace string
- Log logr.Logger
- }
- tests := []struct {
- name string
- fields fields
- want string
- wantErr bool
- }{
- {
- name: "test0",
- fields: fields{
- runtime: &datav1alpha1.GooseFSRuntime{
- ObjectMeta: v1.ObjectMeta{
- Name: "spark",
- Namespace: "default",
- },
- },
- name: "spark",
- namespace: "default",
- Log: fake.NullLogger(),
- },
- want: "1000",
- wantErr: false,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- e := &GooseFSEngine{
- runtime: tt.fields.runtime,
- name: tt.fields.name,
- namespace: tt.fields.namespace,
- Log: tt.fields.Log,
- }
-
- patch1 := ApplyFunc(kubeclient.ExecCommandInContainer, func(podName string, containerName string, namespace string, cmd []string) (string, string, error) {
- stdout, stderr, err := mockExecCommandInContainerForGetFileCount()
- return stdout, stderr, err
- })
- defer patch1.Reset()
-
- got, err := e.getDataSetFileNum()
- if (err != nil) != tt.wantErr {
- t.Errorf("GooseFSEngine.getDataSetFileNum() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if got != tt.want {
- t.Errorf("GooseFSEngine.getDataSetFileNum() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
-func TestGetRuntime(t *testing.T) {
- type fields struct {
- runtime *datav1alpha1.GooseFSRuntime
- name string
- namespace string
- }
- tests := []struct {
- name string
- fields fields
- want *datav1alpha1.GooseFSRuntime
- wantErr bool
- }{
- // TODO: Add test cases.
- {
- name: "test",
- fields: fields{
- runtime: &datav1alpha1.GooseFSRuntime{
- ObjectMeta: v1.ObjectMeta{
- Name: "spark",
- Namespace: "default",
- },
- },
- name: "spark",
- namespace: "default",
- },
- want: &datav1alpha1.GooseFSRuntime{
- TypeMeta: v1.TypeMeta{
- Kind: "GooseFSRuntime",
- APIVersion: "data.fluid.io/v1alpha1",
- },
- ObjectMeta: v1.ObjectMeta{
- Name: "spark",
- Namespace: "default",
- },
- },
- wantErr: false,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- s := runtime.NewScheme()
- s.AddKnownTypes(datav1alpha1.GroupVersion, tt.fields.runtime)
- _ = corev1.AddToScheme(s)
- mockClient := fake.NewFakeClientWithScheme(s, tt.want)
- e := &GooseFSEngine{
- runtime: tt.fields.runtime,
- name: tt.fields.name,
- namespace: tt.fields.namespace,
- Client: mockClient,
- }
- got, err := e.getRuntime()
- if (err != nil) != tt.wantErr {
- t.Errorf("GooseFSEngine.getRuntime() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if !reflect.DeepEqual(got, tt.want) {
- t.Errorf("GooseFSEngine.getRuntime() = %#v, want %#v", got, tt.want)
- }
- })
- }
-}
-
-func TestGetMasterStatefulset(t *testing.T) {
- type fields struct {
- runtime *datav1alpha1.GooseFSRuntime
- name string
- namespace string
- }
- tests := []struct {
- name string
- fields fields
- want *appsv1.StatefulSet
- wantErr bool
- }{
- {
- name: "test",
- fields: fields{
- runtime: &datav1alpha1.GooseFSRuntime{
- ObjectMeta: v1.ObjectMeta{
- Name: "spark-master",
- Namespace: "default",
- },
- },
- name: "spark-master",
- namespace: "default",
- },
- want: &appsv1.StatefulSet{
- ObjectMeta: v1.ObjectMeta{
- Name: "spark-master",
- Namespace: "default",
- },
- TypeMeta: v1.TypeMeta{
- Kind: "StatefulSet",
- APIVersion: "apps/v1",
- },
- },
- wantErr: false,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- s := runtime.NewScheme()
- s.AddKnownTypes(datav1alpha1.GroupVersion, tt.fields.runtime)
- s.AddKnownTypes(appsv1.SchemeGroupVersion, &appsv1.StatefulSet{})
- _ = corev1.AddToScheme(s)
- mockClient := fake.NewFakeClientWithScheme(s, tt.fields.runtime, tt.want)
- e := &GooseFSEngine{
- runtime: tt.fields.runtime,
- name: tt.fields.name,
- namespace: tt.fields.namespace,
- Client: mockClient,
- }
- gotMaster, err := e.getMasterStatefulset(tt.fields.name, tt.fields.namespace)
- if (err != nil) != tt.wantErr {
- t.Errorf("GooseFSEngine.getMasterStatefulset() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if !reflect.DeepEqual(gotMaster, tt.want) {
- t.Errorf("GooseFSEngine.getMasterStatefulset() = %#v, want %#v", gotMaster, tt.want)
- }
- })
- }
-}
-
-func TestGetDaemonset(t *testing.T) {
- type fields struct {
- runtime *datav1alpha1.GooseFSRuntime
- name string
- namespace string
- Client client.Client
- }
- tests := []struct {
- name string
- fields fields
- wantDaemonset *appsv1.DaemonSet
- wantErr bool
- }{
- {
- name: "test",
- fields: fields{
- runtime: &datav1alpha1.GooseFSRuntime{
- ObjectMeta: v1.ObjectMeta{
- Name: "spark-master",
- Namespace: "default",
- },
- },
- name: "spark-master",
- namespace: "default",
- },
- wantDaemonset: &appsv1.DaemonSet{
- ObjectMeta: v1.ObjectMeta{
- Name: "spark-master",
- Namespace: "default",
- },
- TypeMeta: v1.TypeMeta{
- Kind: "DaemonSet",
- APIVersion: "apps/v1",
- },
- },
- wantErr: false,
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- s := runtime.NewScheme()
- s.AddKnownTypes(datav1alpha1.GroupVersion, tt.fields.runtime)
- s.AddKnownTypes(appsv1.SchemeGroupVersion, &appsv1.DaemonSet{})
- _ = corev1.AddToScheme(s)
- mockClient := fake.NewFakeClientWithScheme(s, tt.fields.runtime, tt.wantDaemonset)
- e := &GooseFSEngine{
- runtime: tt.fields.runtime,
- name: tt.fields.name,
- namespace: tt.fields.namespace,
- Client: mockClient,
- }
- gotDaemonset, err := e.getDaemonset(tt.fields.name, tt.fields.namespace)
- if (err != nil) != tt.wantErr {
- t.Errorf("GooseFSEngine.getDaemonset() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if !reflect.DeepEqual(gotDaemonset, tt.wantDaemonset) {
- t.Errorf("GooseFSEngine.getDaemonset() = %#v, want %#v", gotDaemonset, tt.wantDaemonset)
- }
- })
- }
-}
-
-func TestGetMasterPodInfo(t *testing.T) {
- type fields struct {
- name string
- }
- tests := []struct {
- name string
- fields fields
- wantPodName string
- wantContainerName string
- }{
- {
- name: "test",
- fields: fields{
- name: "spark",
- },
- wantPodName: "spark-master-0",
- wantContainerName: "goosefs-master",
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- e := &GooseFSEngine{
- name: tt.fields.name,
- }
- gotPodName, gotContainerName := e.getMasterPodInfo()
- if gotPodName != tt.wantPodName {
- t.Errorf("GooseFSEngine.getMasterPodInfo() gotPodName = %v, want %v", gotPodName, tt.wantPodName)
- }
- if gotContainerName != tt.wantContainerName {
- t.Errorf("GooseFSEngine.getMasterPodInfo() gotContainerName = %v, want %v", gotContainerName, tt.wantContainerName)
- }
- })
- }
-}
-
-func TestGetMasterStatefulsetName(t *testing.T) {
- type fields struct {
- name string
- }
- tests := []struct {
- name string
- fields fields
- wantDsName string
- }{
- {
- name: "test",
- fields: fields{
- name: "spark",
- },
- wantDsName: "spark-master",
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- e := &GooseFSEngine{
- name: tt.fields.name,
- }
- if gotDsName := e.getMasterName(); gotDsName != tt.wantDsName {
- t.Errorf("GooseFSEngine.getMasterStatefulsetName() = %v, want %v", gotDsName, tt.wantDsName)
- }
- })
- }
-}
-
-func TestGetWorkerDaemonsetName(t *testing.T) {
- type fields struct {
- name string
- }
- tests := []struct {
- name string
- fields fields
- wantDsName string
- }{
- {
- name: "test",
- fields: fields{
- name: "spark",
- },
- wantDsName: "spark-worker",
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- e := &GooseFSEngine{
- name: tt.fields.name,
- }
- if gotDsName := e.getWorkerName(); gotDsName != tt.wantDsName {
- t.Errorf("GooseFSEngine.getWorkerDaemonsetName() = %v, want %v", gotDsName, tt.wantDsName)
- }
- })
- }
-}
-
-func TestGetFuseDaemonsetName(t *testing.T) {
- type fields struct {
- name string
- }
- tests := []struct {
- name string
- fields fields
- wantDsName string
- }{
- {
- name: "test",
- fields: fields{
- name: "spark",
- },
- wantDsName: "spark-fuse",
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- e := &GooseFSEngine{
- name: tt.fields.name,
- }
- if gotDsName := e.getFuseName(); gotDsName != tt.wantDsName {
- t.Errorf("GooseFSEngine.getFuseName() = %v, want %v", gotDsName, tt.wantDsName)
- }
- })
- }
-}
-
-func TestGetMountPoint(t *testing.T) {
- type fields struct {
- name string
- namespace string
- Log logr.Logger
- MountRoot string
- }
- tests := []struct {
- name string
- fields fields
- }{
- {
- name: "test",
- fields: fields{
- name: "spark",
- namespace: "default",
- Log: fake.NullLogger(),
- MountRoot: "/tmp",
- },
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- e := &GooseFSEngine{
- Log: tt.fields.Log,
- name: tt.fields.name,
- namespace: tt.fields.namespace,
- }
- t.Setenv("MOUNT_ROOT", tt.fields.MountRoot)
- wantMountPath := fmt.Sprintf("%s/%s/%s/goosefs-fuse", tt.fields.MountRoot+"/goosefs", tt.fields.namespace, e.name)
- if gotMountPath := e.getMountPoint(); gotMountPath != wantMountPath {
- t.Errorf("GooseFSEngine.getMountPoint() = %v, want %v", gotMountPath, wantMountPath)
- }
- })
- }
-}
-
-func TestGetInitTierPathsEnv(t *testing.T) {
- type fields struct {
- runtime *datav1alpha1.GooseFSRuntime
- }
- tests := []struct {
- name string
- fields fields
- want string
- }{
- // TODO: Add test cases.
- {
- name: "test",
- fields: fields{
- &datav1alpha1.GooseFSRuntime{
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- TieredStore: datav1alpha1.TieredStore{
- Levels: []datav1alpha1.Level{
- {
- Path: "/mnt/goosefs0",
- },
- {
- Path: "/mnt/goosefs1",
- },
- },
- },
- },
- },
- },
- want: "/mnt/goosefs0:/mnt/goosefs1",
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- e := &GooseFSEngine{
- runtime: tt.fields.runtime,
- }
- if got := e.getInitTierPathsEnv(tt.fields.runtime); got != tt.want {
- t.Errorf("GooseFSEngine.getInitTierPathsEnv() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
-func TestGetMountRoot(t *testing.T) {
- tests := []struct {
- name string
- wantPath string
- }{
- {
- name: "test",
- wantPath: "/tmp/goosefs",
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- t.Setenv("MOUNT_ROOT", "/tmp")
- if gotPath := getMountRoot(); gotPath != tt.wantPath {
- t.Errorf("getMountRoot() = %v, want %v", gotPath, tt.wantPath)
- }
- })
- }
-}
-
-func TestParseRuntimeImage(t *testing.T) {
- type args struct {
- image string
- tag string
- imagePullPolicy string
- }
- tests := []struct {
- name string
- args args
- want string
- want1 string
- want2 string
- }{
- {
- name: "test0",
- args: args{
- image: "ccr.ccs.tencentyun.com/qcloud/goosefs",
- tag: "v1.2.0",
- imagePullPolicy: "IfNotPresent",
- },
- want: "ccr.ccs.tencentyun.com/qcloud/goosefs",
- want1: "v1.2.0",
- want2: "IfNotPresent",
- },
- {
- name: "test0",
- args: args{
- image: "",
- tag: "",
- imagePullPolicy: "IfNotPresent",
- },
- want: "ccr.ccs.tencentyun.com/qcloud/goosefs",
- want1: "v1.2.0",
- want2: "IfNotPresent",
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- e := &GooseFSEngine{}
- t.Setenv(common.GooseFSRuntimeImageEnv, "ccr.ccs.tencentyun.com/qcloud/goosefs:v1.2.0")
- got, got1, got2 := e.parseRuntimeImage(tt.args.image, tt.args.tag, tt.args.imagePullPolicy)
- if got != tt.want {
- t.Errorf("GooseFSEngine.parseRuntimeImage() got = %v, want %v", got, tt.want)
- }
- if got1 != tt.want1 {
- t.Errorf("GooseFSEngine.parseRuntimeImage() got1 = %v, want %v", got1, tt.want1)
- }
- if got2 != tt.want2 {
- t.Errorf("GooseFSEngine.parseRuntimeImage() got2 = %v, want %v", got2, tt.want2)
- }
- })
- }
-}
-
-func TestParseFuseImage(t *testing.T) {
- type args struct {
- image string
- tag string
- imagePullPolicy string
- }
- tests := []struct {
- name string
- args args
- want string
- want1 string
- want2 string
- }{
- {
- name: "test0",
- args: args{
- image: "ccr.ccs.tencentyun.com/qcloud/goosefs-fuse",
- tag: "v1.2.0",
- imagePullPolicy: "IfNotPresent",
- },
- want: "ccr.ccs.tencentyun.com/qcloud/goosefs-fuse",
- want1: "v1.2.0",
- want2: "IfNotPresent",
- },
- {
- name: "test0",
- args: args{
- image: "",
- tag: "",
- imagePullPolicy: "IfNotPresent",
- },
- want: "ccr.ccs.tencentyun.com/qcloud/goosefs-fuse",
- want1: "v1.2.0",
- want2: "IfNotPresent",
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- e := &GooseFSEngine{}
- t.Setenv(common.GooseFSFuseImageEnv, "ccr.ccs.tencentyun.com/qcloud/goosefs-fuse:v1.2.0")
- got, got1, got2 := e.parseFuseImage(tt.args.image, tt.args.tag, tt.args.imagePullPolicy)
- if got != tt.want {
- t.Errorf("GooseFSEngine.parseFuseImage() got = %v, want %v", got, tt.want)
- }
- if got1 != tt.want1 {
- t.Errorf("GooseFSEngine.parseFuseImage() got1 = %v, want %v", got1, tt.want1)
- }
- if got2 != tt.want2 {
- t.Errorf("GooseFSEngine.parseFuseImage() got2 = %v, want %v", got2, tt.want2)
- }
- })
- }
-}
-
-func TestGetMetadataInfoFile(t *testing.T) {
- type fields struct {
- name string
- namespace string
- }
- tests := []struct {
- name string
- fields fields
- want string
- }{
- {
- name: "test",
- fields: fields{
- name: "spark",
- namespace: "default",
- },
- want: fmt.Sprintf("/goosefs_backups/%s-%s.yaml", "spark", "default"),
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- e := &GooseFSEngine{
- name: tt.fields.name,
- namespace: tt.fields.namespace,
- }
- if got := e.GetMetadataInfoFile(); got != tt.want {
- t.Errorf("GooseFSEngine.GetMetadataInfoFile() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
-func TestGetMetadataFileName(t *testing.T) {
- type fields struct {
- name string
- namespace string
- }
- tests := []struct {
- name string
- fields fields
- want string
- }{
- {
- name: "test",
- fields: fields{
- name: "spark",
- namespace: "default",
- },
- want: fmt.Sprintf("metadata-backup-%s-%s.gz", "spark", "default"),
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- e := &GooseFSEngine{
- name: tt.fields.name,
- namespace: tt.fields.namespace,
- }
- if got := e.GetMetadataFileName(); got != tt.want {
- t.Errorf("GooseFSEngine.GetMetadataFileName() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
-func TestGetMetadataInfoFileName(t *testing.T) {
- type fields struct {
- name string
- namespace string
- }
- tests := []struct {
- name string
- fields fields
- want string
- }{
- {
- name: "test",
- fields: fields{
- name: "spark",
- namespace: "default",
- },
- want: fmt.Sprintf("%s-%s.yaml", "spark", "default"),
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- e := &GooseFSEngine{
- name: tt.fields.name,
- namespace: tt.fields.namespace,
- }
- if got := e.GetMetadataInfoFileName(); got != tt.want {
- t.Errorf("GooseFSEngine.GetMetadataInfoFileName() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
-func TestGetWorkerUsedCapacity(t *testing.T) {
- type fields struct {
- runtime *datav1alpha1.GooseFSRuntime
- name string
- namespace string
- Log logr.Logger
- }
- tests := []struct {
- name string
- fields fields
- want map[string]int64
- wantErr bool
- }{
- {
- name: "test",
- fields: fields{
- runtime: &datav1alpha1.GooseFSRuntime{
- ObjectMeta: v1.ObjectMeta{
- Name: "spark",
- Namespace: "default",
- },
- },
- name: "spark",
- namespace: "default",
- Log: fake.NullLogger(),
- },
- want: map[string]int64{"192.168.1.146": 0, "192.168.1.147": 465452400},
- wantErr: false,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- e := &GooseFSEngine{
- runtime: tt.fields.runtime,
- name: tt.fields.name,
- namespace: tt.fields.namespace,
- Log: tt.fields.Log,
- }
-
- patch1 := ApplyFunc(kubeclient.ExecCommandInContainer, func(podName string, containerName string, namespace string, cmd []string) (string, string, error) {
- stdout, stderr, err := mockExecCommandInContainerForWorkerUsedCapacity()
- return stdout, stderr, err
- })
- defer patch1.Reset()
- got, err := e.GetWorkerUsedCapacity()
- if (err != nil) != tt.wantErr {
- t.Errorf("GooseFSEngine.GetWorkerUsedCapacity() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if !reflect.DeepEqual(got, tt.want) {
- t.Errorf("GooseFSEngine.GetWorkerUsedCapacity() = %v, want %v", got, tt.want)
- }
- })
- }
-}
diff --git a/pkg/ddc/goosefs/validate.go b/pkg/ddc/goosefs/validate.go
deleted file mode 100644
index b55a2952f89..00000000000
--- a/pkg/ddc/goosefs/validate.go
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
-Copyright 2024 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "github.com/fluid-cloudnative/fluid/pkg/ddc/base"
- cruntime "github.com/fluid-cloudnative/fluid/pkg/runtime"
-)
-
-func (e *GooseFSEngine) Validate(ctx cruntime.ReconcileRequestContext) (err error) {
- // XXXEngine.runtimeInfo must have full information about the bound dataset for further reconcilation.
- // getRuntimeInfo() here is a refresh to make sure the information is correctly set
- runtimeInfo, err := e.getRuntimeInfo()
- if err != nil {
- return err
- }
-
- err = base.ValidateRuntimeInfo(runtimeInfo)
- if err != nil {
- return err
- }
-
- // TODO: impl validation logic for GooseFSEngine
- return nil
-}
diff --git a/pkg/ddc/goosefs/worker.go b/pkg/ddc/goosefs/worker.go
deleted file mode 100644
index f67589f4ee0..00000000000
--- a/pkg/ddc/goosefs/worker.go
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
-Copyright 2023 The Fluid Author
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/common"
- "github.com/fluid-cloudnative/fluid/pkg/ctrl"
- "github.com/fluid-cloudnative/fluid/pkg/ddc/base"
- "github.com/fluid-cloudnative/fluid/pkg/utils"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/types"
- "k8s.io/client-go/util/retry"
- "sigs.k8s.io/controller-runtime/pkg/client"
-)
-
-// SetupWorkers checks the desired and current replicas of workers and makes an update
-// over the status by setting phases and conditions. The function
-// calls for a status update and finally returns error if anything unexpected happens.
-func (e *GooseFSEngine) SetupWorkers() (err error) {
-
- err = retry.RetryOnConflict(retry.DefaultBackoff, func() error {
- workers, err := ctrl.GetWorkersAsStatefulset(e.Client,
- types.NamespacedName{Namespace: e.namespace, Name: e.getWorkerName()})
- if err != nil {
- return err
- }
- runtime, err := e.getRuntime()
- if err != nil {
- return err
- }
-
- runtimeToUpdate := runtime.DeepCopy()
- return e.Helper.SetupWorkers(runtimeToUpdate, runtimeToUpdate.Status, workers)
- })
-
- if err != nil {
- _ = utils.LoggingErrorExceptConflict(e.Log, err, "Failed to setup workers", types.NamespacedName{Namespace: e.namespace, Name: e.name})
- return err
- }
-
- return
-}
-
-// ShouldSetupWorkers checks if we need setup the workers
-func (e *GooseFSEngine) ShouldSetupWorkers() (should bool, err error) {
- runtime, err := e.getRuntime()
- if err != nil {
- return
- }
-
- switch runtime.Status.WorkerPhase {
- case datav1alpha1.RuntimePhaseNone:
- should = true
- default:
- should = false
- }
-
- return
-}
-
-// CheckWorkersReady check the workers ready.
-func (e *GooseFSEngine) CheckWorkersReady() (ready bool, err error) {
- getRuntimeFn := func(client client.Client) (base.RuntimeInterface, error) {
- return utils.GetGooseFSRuntime(client, e.name, e.namespace)
- }
-
- ready, err = e.Helper.CheckAndSyncWorkerStatus(getRuntimeFn, types.NamespacedName{Namespace: e.namespace, Name: e.getWorkerName()})
- if err != nil {
- e.Log.Error(err, "fail to check and update worker status")
- return
- }
-
- if !ready {
- e.Log.Info("workers are not ready")
- }
-
- return
-}
-
-// getWorkerSelectors gets the selector of the worker
-func (e *GooseFSEngine) getWorkerSelectors() string {
- labels := map[string]string{
- "release": e.name,
- common.PodRoleType: WokrerPodRole,
- "app": common.GooseFSRuntime,
- }
- labelSelector := &metav1.LabelSelector{
- MatchLabels: labels,
- }
-
- selectorValue := ""
- selector, err := metav1.LabelSelectorAsSelector(labelSelector)
- if err != nil {
- e.Log.Error(err, "Failed to parse the labelSelector of the runtime", "labels", labels)
- } else {
- selectorValue = selector.String()
- }
- return selectorValue
-}
diff --git a/pkg/ddc/goosefs/worker_test.go b/pkg/ddc/goosefs/worker_test.go
deleted file mode 100644
index 2d73044e58c..00000000000
--- a/pkg/ddc/goosefs/worker_test.go
+++ /dev/null
@@ -1,501 +0,0 @@
-/*
-Copyright 2023 The Fluid Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package goosefs
-
-import (
- "testing"
-
- datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
- "github.com/fluid-cloudnative/fluid/pkg/common"
- ctrlhelper "github.com/fluid-cloudnative/fluid/pkg/ctrl"
- "github.com/fluid-cloudnative/fluid/pkg/ddc/base"
- "github.com/fluid-cloudnative/fluid/pkg/utils/fake"
- appsv1 "k8s.io/api/apps/v1"
- v1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/utils/ptr"
- ctrl "sigs.k8s.io/controller-runtime"
-)
-
-func TestSetupWorkers(t *testing.T) {
-
- // runtimeInfoSpark tests create worker in exclusive mode.
-
- runtimeInfoSpark, err := base.BuildRuntimeInfo("spark", "big-data", common.GooseFSRuntime)
-
- if err != nil {
- t.Errorf("fail to create the runtimeInfo with error %v", err)
- }
- runtimeInfoSpark.SetupWithDataset(&datav1alpha1.Dataset{
- Spec: datav1alpha1.DatasetSpec{PlacementMode: datav1alpha1.ExclusiveMode},
- })
-
- // runtimeInfoSpark tests create worker in shareMode mode.
- runtimeInfoHadoop, err := base.BuildRuntimeInfo("hadoop", "big-data", common.GooseFSRuntime)
- if err != nil {
- t.Errorf("fail to create the runtimeInfo with error %v", err)
- }
- runtimeInfoHadoop.SetupWithDataset(&datav1alpha1.Dataset{
- Spec: datav1alpha1.DatasetSpec{PlacementMode: datav1alpha1.ShareMode},
- })
- nodeSelector := map[string]string{
- "node-select": "true",
- }
- runtimeInfoHadoop.SetFuseNodeSelector(nodeSelector)
-
- type fields struct {
- replicas int32
- nodeInputs []*v1.Node
- worker *appsv1.StatefulSet
- deprecatedWorker *appsv1.DaemonSet
- runtime *datav1alpha1.GooseFSRuntime
- runtimeInfo base.RuntimeInfoInterface
- name string
- namespace string
- deprecated bool
- }
- tests := []struct {
- name string
- fields fields
- wantedNodeLabels map[string]map[string]string
- }{
- {
- name: "test0",
- fields: fields{
- replicas: 1,
- nodeInputs: []*v1.Node{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: "test-node-spark",
- },
- },
- },
- worker: &appsv1.StatefulSet{
-
- ObjectMeta: metav1.ObjectMeta{
- Name: "spark-worker",
- Namespace: "big-data",
- },
- Spec: appsv1.StatefulSetSpec{
- Replicas: ptr.To[int32](1),
- },
- },
- runtime: &datav1alpha1.GooseFSRuntime{
- ObjectMeta: metav1.ObjectMeta{
- Name: "spark",
- Namespace: "big-data",
- },
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- Replicas: 1,
- },
- },
- runtimeInfo: runtimeInfoSpark,
- name: "spark",
- namespace: "big-data",
- },
- wantedNodeLabels: map[string]map[string]string{
- "test-node-spark": {
- "fluid.io/dataset-num": "1",
- "fluid.io/s-goosefs-big-data-spark": "true",
- "fluid.io/s-big-data-spark": "true",
- "fluid.io/s-h-goosefs-t-big-data-spark": "0B",
- "fluid_exclusive": "big-data_spark",
- },
- },
- },
- {
- name: "test1",
- fields: fields{
- replicas: 1,
- worker: &appsv1.StatefulSet{
- ObjectMeta: metav1.ObjectMeta{
- Name: "hadoop-worker",
- Namespace: "big-data",
- },
- Spec: appsv1.StatefulSetSpec{
- Replicas: ptr.To[int32](1),
- },
- },
- runtime: &datav1alpha1.GooseFSRuntime{
- ObjectMeta: metav1.ObjectMeta{
- Name: "hadoop",
- Namespace: "big-data",
- },
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- Replicas: 1,
- },
- },
- runtimeInfo: runtimeInfoHadoop,
- name: "hadoop",
- namespace: "big-data",
- },
- wantedNodeLabels: map[string]map[string]string{
- "test-node-hadoop": {
- "fluid.io/dataset-num": "1",
- "fluid.io/s-goosefs-big-data-hadoop": "true",
- "fluid.io/s-big-data-hadoop": "true",
- "fluid.io/s-h-goosefs-t-big-data-hadoop": "0B",
- },
- },
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- runtimeObjs := []runtime.Object{}
- for _, nodeInput := range tt.fields.nodeInputs {
- runtimeObjs = append(runtimeObjs, nodeInput.DeepCopy())
- }
-
- runtimeObjs = append(runtimeObjs, tt.fields.worker.DeepCopy())
- s := runtime.NewScheme()
- data := &datav1alpha1.Dataset{
- ObjectMeta: metav1.ObjectMeta{
- Name: tt.fields.name,
- Namespace: tt.fields.namespace,
- },
- }
- s.AddKnownTypes(datav1alpha1.GroupVersion, tt.fields.runtime)
- s.AddKnownTypes(datav1alpha1.GroupVersion, data)
- s.AddKnownTypes(appsv1.SchemeGroupVersion, tt.fields.worker)
- if tt.fields.deprecatedWorker != nil {
- s.AddKnownTypes(appsv1.SchemeGroupVersion, tt.fields.deprecatedWorker)
- }
- _ = v1.AddToScheme(s)
- runtimeObjs = append(runtimeObjs, tt.fields.runtime)
- if tt.fields.deprecatedWorker != nil {
- runtimeObjs = append(runtimeObjs, tt.fields.deprecatedWorker)
- }
- runtimeObjs = append(runtimeObjs, data)
- mockClient := fake.NewFakeClientWithScheme(s, runtimeObjs...)
-
- e := &GooseFSEngine{
- runtime: tt.fields.runtime,
- runtimeInfo: tt.fields.runtimeInfo,
- Client: mockClient,
- name: tt.fields.name,
- namespace: tt.fields.namespace,
- Log: ctrl.Log.WithName(tt.fields.name),
- }
- e.Helper = ctrlhelper.BuildHelper(tt.fields.runtimeInfo, mockClient, e.Log)
- err := e.SetupWorkers()
- if err != nil {
- t.Errorf("testCase %s GooseFSEngine.SetupWorkers() error = %v", tt.name, err)
- }
- if !tt.fields.deprecated {
- if tt.fields.replicas != *tt.fields.worker.Spec.Replicas {
- t.Errorf("Failed to scale %v for %v", tt.name, tt.fields)
- }
- }
- })
- }
-}
-
-func TestShouldSetupWorkers(t *testing.T) {
- type fields struct {
- name string
- namespace string
- runtime *datav1alpha1.GooseFSRuntime
- }
- tests := []struct {
- name string
- fields fields
- wantShould bool
- wantErr bool
- }{
- {
- name: "test0",
- fields: fields{
- name: "spark",
- namespace: "big-data",
- runtime: &datav1alpha1.GooseFSRuntime{
- ObjectMeta: metav1.ObjectMeta{
- Name: "spark",
- Namespace: "big-data",
- },
- Status: datav1alpha1.RuntimeStatus{
- WorkerPhase: datav1alpha1.RuntimePhaseNone,
- },
- },
- },
- wantShould: true,
- wantErr: false,
- },
- {
- name: "test1",
- fields: fields{
- name: "hadoop",
- namespace: "big-data",
- runtime: &datav1alpha1.GooseFSRuntime{
- ObjectMeta: metav1.ObjectMeta{
- Name: "hadoop",
- Namespace: "big-data",
- },
- Status: datav1alpha1.RuntimeStatus{
- WorkerPhase: datav1alpha1.RuntimePhaseNotReady,
- },
- },
- },
- wantShould: false,
- wantErr: false,
- },
- {
- name: "test2",
- fields: fields{
- name: "hbase",
- namespace: "big-data",
- runtime: &datav1alpha1.GooseFSRuntime{
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase",
- Namespace: "big-data",
- },
- Status: datav1alpha1.RuntimeStatus{
- WorkerPhase: datav1alpha1.RuntimePhasePartialReady,
- },
- },
- },
- wantShould: false,
- wantErr: false,
- },
- {
- name: "test3",
- fields: fields{
- name: "tensorflow",
- namespace: "ml",
- runtime: &datav1alpha1.GooseFSRuntime{
- ObjectMeta: metav1.ObjectMeta{
- Name: "tensorflow",
- Namespace: "ml",
- },
- Status: datav1alpha1.RuntimeStatus{
- WorkerPhase: datav1alpha1.RuntimePhaseReady,
- },
- },
- },
- wantShould: false,
- wantErr: false,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- runtimeObjs := []runtime.Object{}
- data := &datav1alpha1.Dataset{
- ObjectMeta: metav1.ObjectMeta{
- Name: tt.fields.name,
- Namespace: tt.fields.namespace,
- },
- }
-
- s := runtime.NewScheme()
- s.AddKnownTypes(datav1alpha1.GroupVersion, tt.fields.runtime)
- s.AddKnownTypes(datav1alpha1.GroupVersion, data)
- _ = v1.AddToScheme(s)
- runtimeObjs = append(runtimeObjs, tt.fields.runtime, data)
- mockClient := fake.NewFakeClientWithScheme(s, runtimeObjs...)
- e := &GooseFSEngine{
- name: tt.fields.name,
- namespace: tt.fields.namespace,
- runtime: tt.fields.runtime,
- Client: mockClient,
- }
-
- gotShould, err := e.ShouldSetupWorkers()
- if (err != nil) != tt.wantErr {
- t.Errorf("GooseFSEngine.ShouldSetupWorkers() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if gotShould != tt.wantShould {
- t.Errorf("GooseFSEngine.ShouldSetupWorkers() = %v, want %v", gotShould, tt.wantShould)
- }
- })
- }
-}
-
-func TestCheckWorkersReady(t *testing.T) {
- type fields struct {
- runtime *datav1alpha1.GooseFSRuntime
- worker *appsv1.StatefulSet
- fuse *appsv1.DaemonSet
- name string
- namespace string
- }
- tests := []struct {
- name string
- fields fields
- wantReady bool
- wantErr bool
- }{
- {
- name: "test0",
- fields: fields{
- name: "spark",
- namespace: "big-data",
- runtime: &datav1alpha1.GooseFSRuntime{
- ObjectMeta: metav1.ObjectMeta{
- Name: "spark",
- Namespace: "big-data",
- },
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- Replicas: 1,
- Fuse: datav1alpha1.GooseFSFuseSpec{},
- },
- },
- worker: &appsv1.StatefulSet{
- ObjectMeta: metav1.ObjectMeta{
- Name: "spark-worker",
- Namespace: "big-data",
- },
- Spec: appsv1.StatefulSetSpec{
- Replicas: ptr.To[int32](1),
- },
- Status: appsv1.StatefulSetStatus{
- ReadyReplicas: 1,
- },
- },
- fuse: &appsv1.DaemonSet{
- ObjectMeta: metav1.ObjectMeta{
- Name: "spark-fuse",
- Namespace: "big-data",
- },
- Status: appsv1.DaemonSetStatus{
- NumberAvailable: 1,
- DesiredNumberScheduled: 1,
- CurrentNumberScheduled: 1,
- },
- },
- },
- wantReady: true,
- wantErr: false,
- },
- {
- name: "test1",
- fields: fields{
- name: "hbase",
- namespace: "big-data",
- runtime: &datav1alpha1.GooseFSRuntime{
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase",
- Namespace: "big-data",
- },
- Spec: datav1alpha1.GooseFSRuntimeSpec{
- Replicas: 1,
- Fuse: datav1alpha1.GooseFSFuseSpec{},
- },
- },
- worker: &appsv1.StatefulSet{
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase-worker",
- Namespace: "big-data",
- },
- Spec: appsv1.StatefulSetSpec{
- Replicas: ptr.To[int32](1),
- },
- Status: appsv1.StatefulSetStatus{
- ReadyReplicas: 0,
- },
- },
- fuse: &appsv1.DaemonSet{
- ObjectMeta: metav1.ObjectMeta{
- Name: "hbase-fuse",
- Namespace: "big-data",
- },
- Status: appsv1.DaemonSetStatus{
- NumberAvailable: 0,
- DesiredNumberScheduled: 1,
- CurrentNumberScheduled: 0,
- },
- },
- },
- wantReady: false,
- wantErr: false,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- runtimeObjs := []runtime.Object{}
- data := &datav1alpha1.Dataset{
- ObjectMeta: metav1.ObjectMeta{
- Name: tt.fields.name,
- Namespace: tt.fields.namespace,
- },
- }
-
- s := runtime.NewScheme()
- s.AddKnownTypes(datav1alpha1.GroupVersion, tt.fields.runtime)
- s.AddKnownTypes(datav1alpha1.GroupVersion, data)
- s.AddKnownTypes(appsv1.SchemeGroupVersion, tt.fields.worker)
- s.AddKnownTypes(appsv1.SchemeGroupVersion, tt.fields.fuse)
- _ = v1.AddToScheme(s)
-
- runtimeObjs = append(runtimeObjs, tt.fields.runtime, data, tt.fields.worker, tt.fields.fuse)
- mockClient := fake.NewFakeClientWithScheme(s, runtimeObjs...)
- e := &GooseFSEngine{
- runtime: tt.fields.runtime,
- name: tt.fields.name,
- namespace: tt.fields.namespace,
- Client: mockClient,
- Log: ctrl.Log.WithName(tt.fields.name),
- }
-
- runtimeInfo, err := base.BuildRuntimeInfo(tt.fields.name, tt.fields.namespace, common.GooseFSRuntime)
- if err != nil {
- t.Errorf("GooseFSEngine.CheckWorkersReady() error = %v", err)
- }
-
- e.Helper = ctrlhelper.BuildHelper(runtimeInfo, mockClient, e.Log)
-
- gotReady, err := e.CheckWorkersReady()
- if (err != nil) != tt.wantErr {
- t.Errorf("GooseFSEngine.CheckWorkersReady() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if gotReady != tt.wantReady {
- t.Errorf("GooseFSEngine.CheckWorkersReady() = %v, want %v", gotReady, tt.wantReady)
- }
- })
- }
-}
-
-func TestGetWorkerSelectors(t *testing.T) {
- type fields struct {
- name string
- }
- tests := []struct {
- name string
- fields fields
- want string
- }{
- {
- name: "test0",
- fields: fields{
- name: "spark",
- },
- want: "app=goosefs,release=spark,role=goosefs-worker",
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- e := &GooseFSEngine{
- name: tt.fields.name,
- }
- if got := e.getWorkerSelectors(); got != tt.want {
- t.Errorf("GooseFSEngine.getWorkerSelectors() = %v, want %v", got, tt.want)
- }
- })
- }
-}
diff --git a/pkg/ddc/thin/referencedataset/cm.go b/pkg/ddc/thin/referencedataset/cm.go
index 8f3a03de4f9..90a8e3d8e8d 100644
--- a/pkg/ddc/thin/referencedataset/cm.go
+++ b/pkg/ddc/thin/referencedataset/cm.go
@@ -116,13 +116,6 @@ func (e *ReferenceDatasetEngine) createConfigMapForRefDataset(client client.Clie
if err != nil {
return err
}
- case common.GooseFSRuntime:
- configMapName := physicalRuntimeName + "-config"
- err := kubeclient.CopyConfigMap(client, types.NamespacedName{Name: configMapName, Namespace: physicalRuntimeNamespace},
- types.NamespacedName{Name: configMapName, Namespace: refNameSpace}, ownerReference)
- if err != nil {
- return err
- }
case common.JindoRuntime:
clientConfigMapName := physicalRuntimeName + "-jindofs-client-config"
err := kubeclient.CopyConfigMap(client, types.NamespacedName{Name: clientConfigMapName, Namespace: physicalRuntimeNamespace},
diff --git a/pkg/ddc/thin/referencedataset/cm_test.go b/pkg/ddc/thin/referencedataset/cm_test.go
index b350d1460a6..f295124fb57 100644
--- a/pkg/ddc/thin/referencedataset/cm_test.go
+++ b/pkg/ddc/thin/referencedataset/cm_test.go
@@ -330,44 +330,6 @@ var _ = Describe("ConfigMap Operations", func() {
})
})
- Context("when physical runtime is GooseFSRuntime", func() {
- It("should copy config configmap successfully", func() {
- configMap := &corev1.ConfigMap{
- ObjectMeta: metav1.ObjectMeta{
- Name: "goosefs-config",
- Namespace: "source-ns",
- },
- }
-
- refDataset := &datav1alpha1.Dataset{
- ObjectMeta: metav1.ObjectMeta{
- Name: "ref-dataset",
- Namespace: "ref-ns",
- UID: types.UID("test-uid"),
- },
- TypeMeta: metav1.TypeMeta{
- APIVersion: "data.fluid.io/v1alpha1",
- Kind: "Dataset",
- },
- }
-
- testObjs = append(testObjs, configMap)
- fakeClient = fake.NewFakeClientWithScheme(testScheme, testObjs...)
- engine.Client = fakeClient
-
- runtimeInfo, err := base.BuildRuntimeInfo("goosefs", "source-ns", common.GooseFSRuntime)
- Expect(err).NotTo(HaveOccurred())
-
- err = engine.createConfigMapForRefDataset(fakeClient, refDataset, runtimeInfo)
- Expect(err).NotTo(HaveOccurred())
-
- var cmList corev1.ConfigMapList
- err = fakeClient.List(context.TODO(), &cmList, client.InNamespace("ref-ns"))
- Expect(err).NotTo(HaveOccurred())
- Expect(cmList.Items).To(HaveLen(1))
- })
- })
-
Context("when physical runtime is JindoRuntime", func() {
It("should copy both client and jindofs configmaps successfully", func() {
clientConfigMap := &corev1.ConfigMap{
diff --git a/pkg/utils/cmdguard/exec_pipes.go b/pkg/utils/cmdguard/exec_pipes.go
index c230940be4a..a5f0ba68795 100644
--- a/pkg/utils/cmdguard/exec_pipes.go
+++ b/pkg/utils/cmdguard/exec_pipes.go
@@ -43,7 +43,6 @@ var allowedFirstCommands = map[string]CommandValidater{
"df": PrefixMatch,
"mount": PrefixMatch,
"alluxio": PrefixMatch,
- "goosefs": PrefixMatch,
"ddc-helm": PrefixMatch,
}
diff --git a/pkg/utils/cmdguard/exec_pipes_test.go b/pkg/utils/cmdguard/exec_pipes_test.go
index 6017417f6a5..8e61c81c2be 100644
--- a/pkg/utils/cmdguard/exec_pipes_test.go
+++ b/pkg/utils/cmdguard/exec_pipes_test.go
@@ -62,11 +62,6 @@ var _ = Describe("ValidateShellPipeString", func() {
Expect(err).NotTo(HaveOccurred())
})
- It("should accept command with goosefs as first command", func() {
- err := validateShellPipeString("goosefs fs ls / | grep data")
- Expect(err).NotTo(HaveOccurred())
- })
-
// Test for allowed piped commands
It("should accept command with wc -l", func() {
err := validateShellPipeString("ls file | wc -l")
@@ -248,12 +243,6 @@ var _ = Describe("ShellCommand", func() {
Expect(cmd).NotTo(BeNil())
})
- It("should accept allowed commands like goosefs", func() {
- cmd, err := ShellCommand("bash", "-c", "goosefs fs ls /")
- Expect(err).NotTo(HaveOccurred())
- Expect(cmd).NotTo(BeNil())
- })
-
It("should accept allowed commands like ddc-helm", func() {
cmd, err := ShellCommand("bash", "-c", "ddc-helm version")
Expect(err).NotTo(HaveOccurred())
diff --git a/pkg/utils/databackup.go b/pkg/utils/databackup.go
index 42b1431eff3..12f2d596ba6 100644
--- a/pkg/utils/databackup.go
+++ b/pkg/utils/databackup.go
@@ -101,7 +101,7 @@ func GetRpcPortFromMasterContainer(container *v1.Container) (rpcPort int32) {
if container == nil {
return
}
- if container.Name == "alluxio-master" || container.Name == "goosefs-master" {
+ if container.Name == "alluxio-master" {
for _, port := range container.Ports {
if port.Name == "rpc" {
rpcPort = port.HostPort
diff --git a/pkg/utils/databackup_test.go b/pkg/utils/databackup_test.go
index 394fe9a00c0..720a10451af 100644
--- a/pkg/utils/databackup_test.go
+++ b/pkg/utils/databackup_test.go
@@ -223,25 +223,6 @@ func TestGetRpcPortFromMasterContainer(t *testing.T) {
},
wantRpcPort: 34,
},
- {
- name: "goosefs-test",
- args: args{
- container: &corev1.Container{
- Name: "goosefs-master",
- Ports: []corev1.ContainerPort{
- {
- Name: "rpc",
- HostPort: 44,
- },
- {
- Name: "rpc-test",
- HostPort: 5202,
- },
- },
- },
- },
- wantRpcPort: 44,
- },
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
diff --git a/pkg/utils/kubeclient/volume_mount.go b/pkg/utils/kubeclient/volume_mount.go
index a0d10cbb517..fed4d04e6ed 100644
--- a/pkg/utils/kubeclient/volume_mount.go
+++ b/pkg/utils/kubeclient/volume_mount.go
@@ -78,7 +78,6 @@ func GetFuseMountInContainer(mountType string, container corev1.Container) (volu
common.JindoRuntime: common.JindoChartName,
common.AlluxioMountType: common.AlluxioChart,
common.AlluxioRuntime: common.AlluxioChart,
- common.GooseFSMountType: common.GooseFSChart,
common.JuiceFSMountType: common.JuiceFSChart,
common.JuiceFSRuntime: common.JuiceFSChart,
}
@@ -114,7 +113,6 @@ func GetMountPathInContainer(container corev1.Container) (string, error) {
kv := map[string]string{
common.JindoChartName: "jindofs-fuse",
common.AlluxioChart: "alluxio-fuse",
- common.GooseFSChart: "goosefs-fuse",
common.JuiceFSChart: "juicefs-fuse",
}
// consider the env FLUID_FUSE_MOUNTPOINT
diff --git a/pkg/utils/kubeclient/volume_mount_test.go b/pkg/utils/kubeclient/volume_mount_test.go
index 336c4b59486..f95ecb4fefe 100644
--- a/pkg/utils/kubeclient/volume_mount_test.go
+++ b/pkg/utils/kubeclient/volume_mount_test.go
@@ -288,20 +288,6 @@ func TestGetMountPathInContainer(t *testing.T) {
want: "/test/jfs",
wantErr: false,
},
- {
- name: "test-goosefs",
- args: args{
- container: corev1.Container{
- Name: "test",
- VolumeMounts: []corev1.VolumeMount{{
- Name: "goosefs-fuse-mount",
- MountPath: "/runtime-mnt/goosefs/default/test",
- }},
- },
- },
- want: "/runtime-mnt/goosefs/default/test/goosefs-fuse",
- wantErr: false,
- },
{
name: "test-alluxio",
args: args{
diff --git a/pkg/utils/quantity.go b/pkg/utils/quantity.go
index e6170e450de..6b78389f494 100644
--- a/pkg/utils/quantity.go
+++ b/pkg/utils/quantity.go
@@ -50,20 +50,6 @@ func TransformQuantityToJindoUnit(q *resource.Quantity) (value string) {
return
}
-// TransformQuantityToGooseFSUnit transform a given input quantity to another one
-// that can be recognized by GooseFS. This is necessary because GooseFS takes decimal byte units(e.g. KB, MB, GB, etc.)
-// as binary byte units(e.g. Ki, Mi, Gi)
-func TransformQuantityToGooseFSUnit(q *resource.Quantity) (value string) {
- value = q.String()
-
- if strings.HasSuffix(value, "i") {
- value = strings.ReplaceAll(value, "i", "B")
- }
- return
- // return units.BytesSize(units.BytesSize(float64(q.Value())))
-
-}
-
// TransformQuantityToEFCUnit transform a given input quantity to another one
// that can be recognized by EFC. This is necessary because EFC takes decimal byte units(e.g. KB, MB, GB, etc.)
// as binary byte units(e.g. Ki, Mi, Gi)
diff --git a/pkg/utils/quantity_test.go b/pkg/utils/quantity_test.go
index 4e1e6bdeddc..4609622d8e7 100644
--- a/pkg/utils/quantity_test.go
+++ b/pkg/utils/quantity_test.go
@@ -109,35 +109,6 @@ func TestTransformQuantityToJindoUnit(t *testing.T) {
}
}
-func TestTransformQuantityToGooseFSUnit(t *testing.T) {
- testQuantity1 := resource.MustParse("10Gi")
- testQuantity2 := resource.MustParse("10M")
-
- tests := []struct {
- name string
- quantity *resource.Quantity
- wantValue string
- }{
- {
- name: "test1 for TransformQuantityToGooseFSUnit",
- quantity: &testQuantity1,
- wantValue: "10GB",
- },
- {
- name: "test2 for TransformQuantityToGooseFSUnit",
- quantity: &testQuantity2,
- wantValue: "10M",
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if gotValue := TransformQuantityToGooseFSUnit(tt.quantity); gotValue != tt.wantValue {
- t.Errorf("TransformQuantityToGooseFSUnit() = %v, want %v", gotValue, tt.wantValue)
- }
- })
- }
-}
-
func TestTransformQuantityToEFCUnit(t *testing.T) {
testQuantity1 := resource.MustParse("10Gi")
testQuantity2 := resource.MustParse("10M")
diff --git a/pkg/utils/runtimes.go b/pkg/utils/runtimes.go
index 1f2383710ef..20de69d59ba 100644
--- a/pkg/utils/runtimes.go
+++ b/pkg/utils/runtimes.go
@@ -82,20 +82,6 @@ func GetJindoRuntime(client client.Reader, name, namespace string) (*datav1alpha
return &runtime, nil
}
-// GetGooseFSRuntime gets GooseFS Runtime object with the given name and namespace
-func GetGooseFSRuntime(client client.Reader, name, namespace string) (*datav1alpha1.GooseFSRuntime, error) {
-
- key := types.NamespacedName{
- Namespace: namespace,
- Name: name,
- }
- var runtime datav1alpha1.GooseFSRuntime
- if err := client.Get(context.TODO(), key, &runtime); err != nil {
- return nil, err
- }
- return &runtime, nil
-}
-
// GetJuiceFSRuntime gets JuiceFS Runtime object with the given name and namespace
func GetJuiceFSRuntime(client client.Reader, name, namespace string) (*datav1alpha1.JuiceFSRuntime, error) {
diff --git a/pkg/utils/runtimes_test.go b/pkg/utils/runtimes_test.go
index 0e8c054b1a0..7aabb683d99 100644
--- a/pkg/utils/runtimes_test.go
+++ b/pkg/utils/runtimes_test.go
@@ -144,45 +144,6 @@ var _ = Describe("GetJindoRuntime", func() {
)
})
-var _ = Describe("GetGooseFSRuntime", func() {
- var (
- s *runtime.Scheme
- runtimeName = "goosefs-runtime-1"
- runtimeNs = "default"
- goosefsRuntime *datav1alpha1.GooseFSRuntime
- )
-
- BeforeEach(func() {
- goosefsRuntime = &datav1alpha1.GooseFSRuntime{
- ObjectMeta: metav1.ObjectMeta{
- Name: runtimeName,
- Namespace: runtimeNs,
- },
- }
- s = runtime.NewScheme()
- s.AddKnownTypes(datav1alpha1.GroupVersion, goosefsRuntime)
- })
-
- DescribeTable("should handle runtime lookup",
- func(name, namespace, wantName string, notFound bool) {
- fakeClient := fake.NewFakeClientWithScheme(s, goosefsRuntime)
- gotRuntime, err := GetGooseFSRuntime(fakeClient, name, namespace)
-
- if notFound {
- Expect(err).To(HaveOccurred())
- Expect(gotRuntime).To(BeNil())
- Expect(apierrs.IsNotFound(err)).To(BeTrue())
- } else {
- Expect(err).NotTo(HaveOccurred())
- Expect(gotRuntime.Name).To(Equal(wantName))
- }
- },
- Entry("existing runtime", "goosefs-runtime-1", "default", "goosefs-runtime-1", false),
- Entry("non-existent name", "goosefs-runtime-1not-exist", "default", "", true),
- Entry("non-existent namespace", "goosefs-runtime-1", "defaultnot-exist", "", true),
- )
-})
-
var _ = Describe("GetThinRuntime", func() {
var (
s *runtime.Scheme
diff --git a/tools/diagnose-fluid-goosefs.sh b/tools/diagnose-fluid-goosefs.sh
deleted file mode 100755
index 1c16bdcfbe1..00000000000
--- a/tools/diagnose-fluid-goosefs.sh
+++ /dev/null
@@ -1,161 +0,0 @@
-#!/usr/bin/env bash
-set +x
-
-print_usage() {
- echo "Usage:"
- echo " ./diagnose-fluid-goosefs.sh COMMAND [OPTIONS]"
- echo "COMMAND:"
- echo " help"
- echo " Display this help message."
- echo " collect"
- echo " Collect pods logs of controller and runtime."
- echo "OPTIONS:"
- echo " -r, --name name"
- echo " Set the name of runtime."
- echo " -n, --namespace name"
- echo " Set the namespace of runtime."
-}
-
-run() {
- echo
- echo "-----------------run $*------------------"
- timeout 10s "$@"
- if [ $? != 0 ]; then
- echo "failed to collect info: $*"
- fi
- echo "------------End of ${1}----------------"
-}
-
-helm_get() {
- run helm get all -n ${runtime_namespace} "${1}" &>"$diagnose_dir/helm-${1}.yaml"
-}
-
-pod_status() {
- local namespace=${1:-"default"}
- run kubectl get po -owide -n ${namespace} &>"$diagnose_dir/pods-${namespace}.log"
-}
-
-fluid_pod_logs() {
- core_component "${fluid_namespace}" "manager" "control-plane=goosefsruntime-controller"
- core_component "${fluid_namespace}" "manager" "control-plane=dataset-controller"
- core_component "${fluid_namespace}" "plugins" "app=csi-nodeplugin-fluid"
- core_component "${fluid_namespace}" "node-driver-registrar" "app=csi-nodeplugin-fluid"
-}
-
-runtime_pod_logs() {
- core_component "${runtime_namespace}" "goosefs-master" "role=goosefs-master" "release=${runtime_name}"
- core_component "${runtime_namespace}" "goosefs-job-master" "role=goosefs-master" "release=${runtime_name}"
- core_component "${runtime_namespace}" "goosefs-worker" "role=goosefs-worker" "release=${runtime_name}"
- core_component "${runtime_namespace}" "goosefs-job-worker" "role=goosefs-worker" "release=${runtime_name}"
- core_component "${runtime_namespace}" "goosefs-fuse" "role=goosefs-fuse" "release=${runtime_name}"
-}
-
-core_component() {
- # namespace container selectors...
- local namespace="$1"
- local container="$2"
- shift 2
- local selectors="$*"
- local constrains
- local pods
- constrains=$(echo "${selectors}" | tr ' ' ',')
- if [[ -n ${constrains} ]]; then
- constrains="-l ${constrains}"
- fi
- mkdir -p "$diagnose_dir/pods-${namespace}"
- pods=$(kubectl get po -n ${namespace} "${constrains}" | awk '{print $1}' | grep -v NAME)
- for po in ${pods}; do
- if [[ "${namespace}"="${fluid_namesapce}" ]]; then
- kubectl logs "${po}" -c "$container" -n ${namespace} &>"$diagnose_dir/pods-${namespace}/${po}-${container}.log" 2>&1
- else
- kubectl cp "${namespace}/${po}":/opt/goosefs/logs -c "${container}" "$diagnose_dir/pods-${namespace}/${po}-${container}" 2>&1
- fi
- done
-}
-
-kubectl_resource() {
- # runtime, dataset, pv and pvc should have the same name
- kubectl describe dataset --namespace ${runtime_namespace} ${runtime_name} &>"${diagnose_dir}/dataset-${runtime_name}.yaml" 2>&1
- kubectl describe goosefsruntime --namespace ${runtime_namespace} ${name} &>"${diagnose_dir}/goosefsruntime-${runtime_name}.yaml" 2>&1
- kubectl describe pv ${runtime_namespace}-${runtime_name} &>"${diagnose_dir}/pv-${runtime_name}.yaml" 2>&1
- kubectl describe pvc ${runtime_name} --namespace ${runtime_namespace} &>"${diagnose_dir}/pvc-${runtime_name}.yaml" 2>&1
-}
-
-archive() {
- tar -zcvf "${current_dir}/diagnose_fluid_${timestamp}.tar.gz" "${diagnose_dir}"
- echo "please get diagnose_fluid_${timestamp}.tar.gz for diagnostics"
-}
-
-pd_collect() {
- echo "Start collecting, runtime-name=${runtime_name}, runtime-namespace=${runtime_namespace}"
- helm_get "${fluid_name}"
- helm_get "${runtime_name}"
- pod_status "${fluid_namespace}"
- pod_status "${runtime_namespace}"
- runtime_pod_logs
- fluid_pod_logs
- kubectl_resource
- archive
-}
-
-collect()
-{
- # ensure params
- fluid_name=${fluid_name:-"fluid"}
- fluid_namespace=${fluid_namespace:-"fluid-system"}
- runtime_name=${runtime_name:?"the name of runtime must be set"}
- runtime_namespace=${runtime_namespace:-"default"}
-
- current_dir=$(pwd)
- timestamp=$(date +%s)
- diagnose_dir="/tmp/diagnose_fluid_${timestamp}"
- mkdir -p "$diagnose_dir"
-
- pd_collect
-}
-
-main() {
- if [[ $# -eq 0 ]]; then
- print_usage
- exit 1
- fi
-
- action="help"
-
- while [[ $# -gt 0 ]]; do
- case $1 in
- -h|--help|"-?")
- print_usage
- exit 0;
- ;;
- collect|help)
- action=$1
- ;;
- -r|--name)
- runtime_name=$2
- shift
- ;;
- -n|--namespace)
- runtime_namespace=$2
- shift
- ;;
- *)
- echo "Error: unsupported option $1" >&2
- print_usage
- exit 1
- ;;
- esac
- shift
- done
-
- case ${action} in
- collect)
- collect
- ;;
- help)
- print_usage
- ;;
- esac
-}
-
-main "$@"