diff --git a/.github/scripts/build-all-images.sh b/.github/scripts/build-all-images.sh index 37c83dc54fa..1522c9db2bf 100755 --- a/.github/scripts/build-all-images.sh +++ b/.github/scripts/build-all-images.sh @@ -10,6 +10,8 @@ function get_image_tag() { } function build_images() { + minio_e2e_img=local/minio-e2e:latest + oss_emulator_img=${IMG_REPO}/oss-emulator:e2e images=( ${IMG_REPO}/dataset-controller:${IMAGE_TAG} ${IMG_REPO}/application-controller:${IMAGE_TAG} @@ -24,9 +26,18 @@ function build_images() { ${IMG_REPO}/fluid-csi:${IMAGE_TAG} ${IMG_REPO}/fluid-webhook:${IMAGE_TAG} ${IMG_REPO}/fluid-crd-upgrader:${IMAGE_TAG} + ${minio_e2e_img} + ${oss_emulator_img} ) make docker-build-all + tmpdir=$(mktemp -d) + cat > ${tmpdir}/Dockerfile <<'EOF' +FROM minio/minio:latest +EOF + docker build -t ${minio_e2e_img} ${tmpdir} + rm -rf ${tmpdir} + docker build -t ${oss_emulator_img} test/gha-e2e/jindo/oss-emulator for img in ${images[@]}; do echo "Loading image $img to kind cluster..." diff --git a/charts/jindocache/Chart.yaml b/charts/jindocache/Chart.yaml index 5caabf7c999..bb3566ca1f9 100755 --- a/charts/jindocache/Chart.yaml +++ b/charts/jindocache/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 appVersion: 6.2.0 -version: 1.0.4 +version: 1.0.5 description: FileSystem on the cloud based on Aliyun Object Storage aimed for data acceleration. home: https://help.aliyun.com/document_detail/164207.html diff --git a/charts/jindocache/templates/_helpers.tpl b/charts/jindocache/templates/_helpers.tpl index 4501842c4ba..64f1099b0d0 100755 --- a/charts/jindocache/templates/_helpers.tpl +++ b/charts/jindocache/templates/_helpers.tpl @@ -44,7 +44,32 @@ Distribute credential key and values with secret volume mounting on Jindo's pods Distribute credential key and values with secret volumes */}} {{- define "jindofs.cred.secret.volumes" -}} -{{- if .Values.UseStsToken }} +{{- if .Values.secretProjections }} +- name: jindofs-secret-token + projected: + sources: + {{- if and .Values.UseStsToken .Values.secret }} + - secret: + name: {{ .Values.secret }} + {{- else if .Values.secret }} + - secret: + name: {{ .Values.secret }} + items: + - key: {{ .Values.secretKey }} + path: AccessKeyId + - key: {{ .Values.secretValue }} + path: AccessKeySecret + {{- end }} + {{- range .Values.secretProjections }} + - secret: + name: {{ .name }} + items: + {{- range .items }} + - key: {{ .key }} + path: {{ .path }} + {{- end }} + {{- end }} +{{- else if .Values.UseStsToken }} - name: jindofs-secret-token secret: secretName: {{ .Values.secret }} diff --git a/charts/jindocache/templates/fuse/daemonset.yaml b/charts/jindocache/templates/fuse/daemonset.yaml index e20acf73eea..dd7157b75c4 100755 --- a/charts/jindocache/templates/fuse/daemonset.yaml +++ b/charts/jindocache/templates/fuse/daemonset.yaml @@ -150,7 +150,7 @@ spec: subPath: hdfs-site.xml {{- end }} {{- end }} - {{- if .Values.secret }} + {{- if or .Values.secret .Values.secretProjections }} {{ include "jindofs.cred.secret.volumeMounts" . | nindent 12 }} {{- end }} {{- if .Values.ufsVolumes }} @@ -188,7 +188,7 @@ spec: hostPath: path: /dev/fuse type: CharDevice - {{- if .Values.secret }} + {{- if or .Values.secret .Values.secretProjections }} {{ include "jindofs.cred.secret.volumes" . | nindent 8 }} {{- end }} {{- if .Values.ufsVolumes }} diff --git a/charts/jindocache/templates/master/statefulset.yaml b/charts/jindocache/templates/master/statefulset.yaml index 5201b6e89d8..e84f247ab60 100755 --- a/charts/jindocache/templates/master/statefulset.yaml +++ b/charts/jindocache/templates/master/statefulset.yaml @@ -175,7 +175,7 @@ spec: subPath: hdfs-site.xml {{- end }} {{- end }} - {{- if .Values.secret }} + {{- if or .Values.secret .Values.secretProjections }} {{ include "jindofs.cred.secret.volumeMounts" . | nindent 12 }} {{- end }} {{- if .Values.ufsVolumes }} @@ -237,7 +237,7 @@ spec: name: {{ .Values.hadoopConfig.configMap }} {{- end }} {{- end }} - {{- if .Values.secret }} + {{- if or .Values.secret .Values.secretProjections }} {{ include "jindofs.cred.secret.volumes" . | nindent 8 }} {{- end }} {{- if .Values.master.volumes }} diff --git a/charts/jindocache/templates/worker/statefulset.yaml b/charts/jindocache/templates/worker/statefulset.yaml index 1c9ec92a1bd..fbc7a86bc61 100755 --- a/charts/jindocache/templates/worker/statefulset.yaml +++ b/charts/jindocache/templates/worker/statefulset.yaml @@ -159,7 +159,7 @@ spec: subPath: hdfs-site.xml {{- end }} {{- end }} - {{- if .Values.secret }} + {{- if or .Values.secret .Values.secretProjections }} {{ include "jindofs.cred.secret.volumeMounts" . | nindent 12 }} {{- end }} {{- if .Values.ufsVolumes }} @@ -222,7 +222,7 @@ spec: name: {{ .Values.hadoopConfig.configMap }} {{- end }} {{- end }} - {{- if .Values.secret }} + {{- if or .Values.secret .Values.secretProjections }} {{ include "jindofs.cred.secret.volumes" . | nindent 8 }} {{- end }} - name: bigboot-config diff --git a/charts/jindofsx/Chart.yaml b/charts/jindofsx/Chart.yaml index 9e7918c1434..a4d0b66eadd 100755 --- a/charts/jindofsx/Chart.yaml +++ b/charts/jindofsx/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 appVersion: 4.6.8 -version: 1.0.4 +version: 1.0.5 description: FileSystem on the cloud based on Aliyun Object Storage aimed for data acceleration. home: https://help.aliyun.com/document_detail/164207.html diff --git a/charts/jindofsx/templates/_helpers.tpl b/charts/jindofsx/templates/_helpers.tpl index 4501842c4ba..64f1099b0d0 100755 --- a/charts/jindofsx/templates/_helpers.tpl +++ b/charts/jindofsx/templates/_helpers.tpl @@ -44,7 +44,32 @@ Distribute credential key and values with secret volume mounting on Jindo's pods Distribute credential key and values with secret volumes */}} {{- define "jindofs.cred.secret.volumes" -}} -{{- if .Values.UseStsToken }} +{{- if .Values.secretProjections }} +- name: jindofs-secret-token + projected: + sources: + {{- if and .Values.UseStsToken .Values.secret }} + - secret: + name: {{ .Values.secret }} + {{- else if .Values.secret }} + - secret: + name: {{ .Values.secret }} + items: + - key: {{ .Values.secretKey }} + path: AccessKeyId + - key: {{ .Values.secretValue }} + path: AccessKeySecret + {{- end }} + {{- range .Values.secretProjections }} + - secret: + name: {{ .name }} + items: + {{- range .items }} + - key: {{ .key }} + path: {{ .path }} + {{- end }} + {{- end }} +{{- else if .Values.UseStsToken }} - name: jindofs-secret-token secret: secretName: {{ .Values.secret }} diff --git a/charts/jindofsx/templates/fuse/daemonset.yaml b/charts/jindofsx/templates/fuse/daemonset.yaml index 55930d18289..58c4f07410e 100755 --- a/charts/jindofsx/templates/fuse/daemonset.yaml +++ b/charts/jindofsx/templates/fuse/daemonset.yaml @@ -162,7 +162,7 @@ spec: subPath: hdfs-site.xml {{- end }} {{- end }} - {{- if .Values.secret }} + {{- if or .Values.secret .Values.secretProjections }} {{ include "jindofs.cred.secret.volumeMounts" . | nindent 12 }} {{- end }} {{- if .Values.ufsVolumes }} @@ -200,7 +200,7 @@ spec: hostPath: path: /dev/fuse type: CharDevice - {{- if .Values.secret }} + {{- if or .Values.secret .Values.secretProjections }} {{ include "jindofs.cred.secret.volumes" . | nindent 8 }} {{- end }} {{- if .Values.ufsVolumes }} diff --git a/charts/jindofsx/templates/master/statefulset.yaml b/charts/jindofsx/templates/master/statefulset.yaml index 3225f907ecf..09ef3f87f2c 100755 --- a/charts/jindofsx/templates/master/statefulset.yaml +++ b/charts/jindofsx/templates/master/statefulset.yaml @@ -187,7 +187,7 @@ spec: subPath: hdfs-site.xml {{- end }} {{- end }} - {{- if .Values.secret }} + {{- if or .Values.secret .Values.secretProjections }} {{ include "jindofs.cred.secret.volumeMounts" . | nindent 12 }} {{- end }} {{- if .Values.ufsVolumes }} @@ -249,7 +249,7 @@ spec: name: {{ .Values.hadoopConfig.configMap }} {{- end }} {{- end }} - {{- if .Values.secret }} + {{- if or .Values.secret .Values.secretProjections }} {{ include "jindofs.cred.secret.volumes" . | nindent 8 }} {{- end }} {{- if .Values.master.volumes }} diff --git a/charts/jindofsx/templates/worker/statefulset.yaml b/charts/jindofsx/templates/worker/statefulset.yaml index d00f501682a..db2d3779370 100755 --- a/charts/jindofsx/templates/worker/statefulset.yaml +++ b/charts/jindofsx/templates/worker/statefulset.yaml @@ -171,7 +171,7 @@ spec: subPath: hdfs-site.xml {{- end }} {{- end }} - {{- if .Values.secret }} + {{- if or .Values.secret .Values.secretProjections }} {{ include "jindofs.cred.secret.volumeMounts" . | nindent 12 }} {{- end }} {{- if .Values.ufsVolumes }} @@ -234,7 +234,7 @@ spec: name: {{ .Values.hadoopConfig.configMap }} {{- end }} {{- end }} - {{- if .Values.secret }} + {{- if or .Values.secret .Values.secretProjections }} {{ include "jindofs.cred.secret.volumes" . | nindent 8 }} {{- end }} - name: bigboot-config diff --git a/pkg/ddc/jindocache/transform.go b/pkg/ddc/jindocache/transform.go index bf7e491daec..6bda2863561 100644 --- a/pkg/ddc/jindocache/transform.go +++ b/pkg/ddc/jindocache/transform.go @@ -20,7 +20,6 @@ import ( "fmt" "maps" "os" - "regexp" "strconv" "strings" "time" @@ -46,6 +45,105 @@ type smartdataConfig struct { dnsServer string } +const ( + jindoOSSCredentialsProvider = "com.aliyun.jindodata.oss.auth.CustomCredentialsProvider" + jindoSecretProviderFormat = "JSON" + jindoSecretMountPath = "/token" + jindoCacheOSSBucketPrefix = "jindocache.oss.bucket." + jindoAccessKeyIDSuffix = ".accessKeyId" + jindoAccessKeySecretSuffix = ".accessKeySecret" + jindoEndpointSuffix = ".endpoint" + jindoDLSStorageEnableSuffix = ".data.lake.storage.enable" +) + +func buildBucketSecretURI(bucketName string) string { + return fmt.Sprintf("secrets://%s/%s/", jindoSecretMountPath, bucketName) +} + +func validateSecretKeyRef(secretKeyRef datav1alpha1.SecretKeySelector, optionName, mountPoint string) error { + if secretKeyRef.Name == "" || secretKeyRef.Key == "" { + return fmt.Errorf("encryptOption %s for mount %s must reference both secret name and key", optionName, mountPoint) + } + + return nil +} + +func getSecretDataValue(secret *corev1.Secret, secretName, secretKey string) (string, error) { + value, ok := secret.Data[secretKey] + if !ok { + return "", fmt.Errorf("secret %s does not contain key %s", secretName, secretKey) + } + + return string(value), nil +} + +func appendSecretProjection(projections []corev1.SecretProjection, secretName, secretKey, itemPath string) ([]corev1.SecretProjection, error) { + for i, projection := range projections { + for _, item := range projection.Items { + if item.Path != itemPath { + continue + } + if projection.Name == secretName && item.Key == secretKey { + return projections, nil + } + return nil, fmt.Errorf("conflicting secret projection for %s", itemPath) + } + if projection.Name == secretName { + projections[i].Items = append(projections[i].Items, corev1.KeyToPath{ + Key: secretKey, + Path: itemPath, + }) + return projections, nil + } + } + + return append(projections, corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: secretName, + }, + Items: []corev1.KeyToPath{{ + Key: secretKey, + Path: itemPath, + }}, + }), nil +} + +func setBucketSecretProviderProperties(properties map[string]string, prefix, bucketName, secretURI string) { + properties[fmt.Sprintf("%s.oss.bucket.%s.credentials.provider", prefix, bucketName)] = jindoOSSCredentialsProvider + properties[fmt.Sprintf("%s.oss.bucket.%s.provider.endpoint", prefix, bucketName)] = secretURI + properties[fmt.Sprintf("%s.oss.bucket.%s.provider.format", prefix, bucketName)] = jindoSecretProviderFormat +} + +func buildBucketPropertyKey(prefix, bucketName, suffix string) string { + return prefix + bucketName + suffix +} + +func extractSingleOSSEndpoint(properties map[string]string, prefix string) (string, bool) { + var endpoint string + for key, value := range properties { + if !strings.HasPrefix(key, prefix) || !strings.HasSuffix(key, jindoEndpointSuffix) { + continue + } + if endpoint == "" { + endpoint = value + continue + } + if endpoint != value { + return "", false + } + } + return endpoint, endpoint != "" +} + +func parseOSSMountBucket(mountPoint string) (string, error) { + bucket, _, _ := strings.Cut(strings.TrimPrefix(mountPoint, "oss://"), "/") + if bucket == "" { + return "", fmt.Errorf("incorrect oss mountPoint with %v, please check your path is dir or file ", mountPoint) + } + + return bucket, nil +} + func (e *JindoCacheEngine) transform(runtime *datav1alpha1.JindoRuntime) (value *Jindo, err error) { if runtime == nil { err = fmt.Errorf("the jindoRuntime is null") @@ -409,32 +507,44 @@ func (e *JindoCacheEngine) transformMaster(runtime *datav1alpha1.JindoRuntime, m } mountType := "oss" + ossBucketName := "" if strings.HasPrefix(mount.MountPoint, "oss://") { - var re = regexp.MustCompile(`(oss://(.*?))(/)`) - rm := re.FindStringSubmatch(mount.MountPoint) - if len(rm) < 3 { - err = fmt.Errorf("incorrect oss mountPoint with %v, please check your path is dir or file ", mount.MountPoint) + ossBucketName, err = parseOSSMountBucket(mount.MountPoint) + if err != nil { e.Log.Error(err, "mount.MountPoint", mount.MountPoint) return err } - bucketName := rm[2] if mount.Options["fs.oss.endpoint"] == "" { err = fmt.Errorf("oss endpoint can not be null, please check option") e.Log.Error(err, "oss endpoint can not be null") return err } - propertiesFileStore["jindocache.oss.bucket."+bucketName+".endpoint"] = mount.Options["fs.oss.endpoint"] + propertiesFileStore[buildBucketPropertyKey(jindoCacheOSSBucketPrefix, ossBucketName, jindoEndpointSuffix)] = mount.Options["fs.oss.endpoint"] if strings.Contains(mount.Options["fs.oss.endpoint"], "dls") { - propertiesFileStore["jindocache.oss.bucket."+bucketName+".data.lake.storage.enable"] = "true" - if os.Getenv("jindocache.internal.test") == "true" { - if mount.Options["fs.oss.accessKeyId"] != "" { - propertiesFileStore["jindocache.oss.bucket."+bucketName+".accessKeyId"] = mount.Options["fs.oss.accessKeyId"] - } - if mount.Options["fs.oss.accessKeySecret"] != "" { - propertiesFileStore["jindocache.oss.bucket."+bucketName+".accessKeySecret"] = mount.Options["fs.oss.accessKeySecret"] - } + propertiesFileStore[buildBucketPropertyKey(jindoCacheOSSBucketPrefix, ossBucketName, jindoDLSStorageEnableSuffix)] = "true" + } + + hasOSSAccessKeyIDRef := false + hasOSSAccessKeySecretRef := false + for _, encryptOption := range mount.EncryptOptions { + switch encryptOption.Name { + case "fs.oss.accessKeyId": + hasOSSAccessKeyIDRef = true + case "fs.oss.accessKeySecret": + hasOSSAccessKeySecretRef = true } } + if secretMountSupport && hasOSSAccessKeyIDRef != hasOSSAccessKeySecretRef { + err = fmt.Errorf("oss bucket %s must configure both fs.oss.accessKeyId and fs.oss.accessKeySecret in encryptOptions when secret mount is enabled", ossBucketName) + e.Log.Error(err, "mount.MountPoint", mount.MountPoint) + return err + } + if !hasOSSAccessKeyIDRef && mount.Options["fs.oss.accessKeyId"] != "" { + propertiesFileStore[buildBucketPropertyKey(jindoCacheOSSBucketPrefix, ossBucketName, jindoAccessKeyIDSuffix)] = mount.Options["fs.oss.accessKeyId"] + } + if !hasOSSAccessKeySecretRef && mount.Options["fs.oss.accessKeySecret"] != "" { + propertiesFileStore[buildBucketPropertyKey(jindoCacheOSSBucketPrefix, ossBucketName, jindoAccessKeySecretSuffix)] = mount.Options["fs.oss.accessKeySecret"] + } } // support s3 @@ -487,30 +597,81 @@ func (e *JindoCacheEngine) transformMaster(runtime *datav1alpha1.JindoRuntime, m for _, encryptOption := range mount.EncryptOptions { key := encryptOption.Name secretKeyRef := encryptOption.ValueFrom.SecretKeyRef + if err = validateSecretKeyRef(secretKeyRef, key, mount.MountPoint); err != nil { + e.Log.Error(err, "invalid encryptOption secret reference", "key", key, "mountPoint", mount.MountPoint) + return err + } + if mountType == "oss" && ossBucketName != "" { + if secretMountSupport { + itemPath := "" + switch key { + case "fs.oss.accessKeyId": + itemPath = ossBucketName + "/AccessKeyId" + case "fs.oss.accessKeySecret": + itemPath = ossBucketName + "/AccessKeySecret" + } + if itemPath != "" { + secretURI := buildBucketSecretURI(ossBucketName) + if value.BucketSecretPaths == nil { + value.BucketSecretPaths = map[string]string{} + } + value.BucketSecretPaths[ossBucketName] = secretURI + value.SecretProjections, err = appendSecretProjection(value.SecretProjections, secretKeyRef.Name, secretKeyRef.Key, itemPath) + if err != nil { + return err + } + e.Log.Info("Configure OSS bucket credential projection", "bucket", ossBucketName, "secretName", secretKeyRef.Name, "key", key) + } + continue + } + + secret, err := kubeclient.GetSecret(e.Client, secretKeyRef.Name, e.namespace) + if err != nil { + e.Log.Error(err, "can't get the input secret from dataset", "secretName", secretKeyRef.Name) + return err + } + secretValue, err := getSecretDataValue(secret, secretKeyRef.Name, secretKeyRef.Key) + if err != nil { + e.Log.Error(err, "missing key in referenced secret", "secretName", secretKeyRef.Name, "key", key) + return err + } + if key == "fs.oss.accessKeyId" { + propertiesFileStore[buildBucketPropertyKey(jindoCacheOSSBucketPrefix, ossBucketName, jindoAccessKeyIDSuffix)] = secretValue + } + if key == "fs.oss.accessKeySecret" { + propertiesFileStore[buildBucketPropertyKey(jindoCacheOSSBucketPrefix, ossBucketName, jindoAccessKeySecretSuffix)] = secretValue + } + e.Log.Info("Get OSS bucket credential from Secret successfully", "bucket", ossBucketName, "secretName", secretKeyRef.Name, "key", key) + continue + } if secretMountSupport { value.Secret = secretKeyRef.Name if key == "fs."+mountType+".accessKeyId" { value.SecretKey = secretKeyRef.Key - e.Log.Info(fmt.Sprintf("Get %s From %s!", key, secretKeyRef.Name)) + e.Log.Info("Get credential from Secret successfully", "key", key, "secretName", secretKeyRef.Name) } if key == "fs."+mountType+".accessKeySecret" { value.SecretValue = secretKeyRef.Key - e.Log.Info(fmt.Sprintf("Get %s From %s!", key, secretKeyRef.Name)) + e.Log.Info("Get credential from Secret successfully", "key", key, "secretName", secretKeyRef.Name) } } else { secret, err := kubeclient.GetSecret(e.Client, secretKeyRef.Name, e.namespace) if err != nil { e.Log.Error(err, "can't get the input secret from dataset", "secretName", secretKeyRef.Name) - break + return err + } + secretValue, err := getSecretDataValue(secret, secretKeyRef.Name, secretKeyRef.Key) + if err != nil { + e.Log.Error(err, "missing key in referenced secret", "secretName", secretKeyRef.Name, "key", key) + return err } - value := secret.Data[secretKeyRef.Key] if key == "fs."+mountType+".accessKeyId" { - propertiesFileStore["jindocache."+mountType+".accessKeyId"] = string(value) + propertiesFileStore["jindocache."+mountType+".accessKeyId"] = secretValue } if key == "fs."+mountType+".accessKeySecret" { - propertiesFileStore["jindocache."+mountType+".accessKeySecret"] = string(value) + propertiesFileStore["jindocache."+mountType+".accessKeySecret"] = secretValue } - e.Log.Info("Get Credential From Secret Successfully") + e.Log.Info("Get credential from Secret successfully", "key", key, "secretName", secretKeyRef.Name) } } value.MountType = mountType @@ -660,10 +821,25 @@ func (e *JindoCacheEngine) transformFuse(runtime *datav1alpha1.JindoRuntime, val } // set secret if len(value.Secret) != 0 { - properties["fs."+value.MountType+".credentials.provider"] = "com.aliyun.jindodata.oss.auth.CustomCredentialsProvider" + properties["fs."+value.MountType+".credentials.provider"] = jindoOSSCredentialsProvider properties["aliyun."+value.MountType+".provider.url"] = "secrets:///token/" properties["fs."+value.MountType+".provider.endpoint"] = "secrets:///token/" - properties["fs."+value.MountType+".provider.format"] = "JSON" + properties["fs."+value.MountType+".provider.format"] = jindoSecretProviderFormat + } + if len(value.BucketSecretPaths) != 0 { + properties["fs."+value.MountType+".credentials.provider"] = jindoOSSCredentialsProvider + properties["aliyun."+value.MountType+".provider.url"] = "secrets:///token/" + properties["fs."+value.MountType+".provider.endpoint"] = "secrets:///token/" + properties["fs."+value.MountType+".provider.format"] = jindoSecretProviderFormat + } + for bucketName, secretURI := range value.BucketSecretPaths { + setBucketSecretProviderProperties(properties, "fs", bucketName, secretURI) + properties["aliyun.oss.bucket."+bucketName+".provider.url"] = secretURI + } + if endpoint, ok := extractSingleOSSEndpoint(value.Master.FileStoreProperties, jindoCacheOSSBucketPrefix); ok { + properties["fs.oss.endpoint"] = endpoint + properties["jindocache.oss.endpoint"] = endpoint + properties["aliyun.oss.endpoint"] = endpoint } if len(runtime.Spec.Fuse.Properties) > 0 { @@ -754,6 +930,22 @@ func (e *JindoCacheEngine) transformLogConfig(runtime *datav1alpha1.JindoRuntime fuseProperties[k] = v } } + if len(value.BucketSecretPaths) != 0 { + fuseProperties["aliyun.oss.provider.url"] = "secrets:///token/" + fuseProperties["fs.oss.provider.endpoint"] = "secrets:///token/" + fuseProperties["fs.oss.provider.format"] = jindoSecretProviderFormat + for bucketName, secretURI := range value.BucketSecretPaths { + setBucketSecretProviderProperties(fuseProperties, "fs", bucketName, secretURI) + fuseProperties["aliyun.oss.bucket."+bucketName+".provider.url"] = secretURI + if endpoint, ok := value.Master.FileStoreProperties[buildBucketPropertyKey(jindoCacheOSSBucketPrefix, bucketName, jindoEndpointSuffix)]; ok { + fuseProperties["fs.oss.bucket."+bucketName+".endpoint"] = endpoint + } + } + if endpoint, ok := extractSingleOSSEndpoint(value.Master.FileStoreProperties, jindoCacheOSSBucketPrefix); ok { + fuseProperties["fs.oss.endpoint"] = endpoint + fuseProperties["aliyun.oss.endpoint"] = endpoint + } + } value.LogConfig = fsxProperties value.FuseLogConfig = fuseProperties @@ -856,7 +1048,9 @@ func (e *JindoCacheEngine) transformFuseArg(runtime *datav1alpha1.JindoRuntime, fuseArgs = append(fuseArgs, "-oentry_timeout=0") fuseArgs = append(fuseArgs, "-onegative_timeout=0") } - fuseArgs = append(fuseArgs, "-ono_symlink") + if len(dataset.Spec.Mounts) <= 1 { + fuseArgs = append(fuseArgs, "-ono_symlink") + } } if runtime.Spec.Master.Disabled && runtime.Spec.Worker.Disabled { fuseArgs = append(fuseArgs, "-ouri="+dataset.Spec.Mounts[0].MountPoint) @@ -952,6 +1146,17 @@ func (e *JindoCacheEngine) transformToken(value *Jindo) { } else { properties["default.credential.provider"] = "none" } + if len(value.BucketSecretPaths) != 0 { + properties["default.credential.provider"] = "secrets:///token/" + properties["jindocache."+value.MountType+".provider.endpoint"] = "secrets:///token/" + properties["jindocache."+value.MountType+".provider.format"] = jindoSecretProviderFormat + } + for bucketName, secretURI := range value.BucketSecretPaths { + setBucketSecretProviderProperties(properties, "jindocache", bucketName, secretURI) + } + if endpoint, ok := extractSingleOSSEndpoint(value.Master.FileStoreProperties, jindoCacheOSSBucketPrefix); ok { + properties["jindocache.oss.endpoint"] = endpoint + } value.Master.TokenProperties = properties } diff --git a/pkg/ddc/jindocache/transform_fuse_test.go b/pkg/ddc/jindocache/transform_fuse_test.go index bc0bf4429ed..560cd92d10b 100644 --- a/pkg/ddc/jindocache/transform_fuse_test.go +++ b/pkg/ddc/jindocache/transform_fuse_test.go @@ -135,32 +135,80 @@ func TestTransformSecret(t *testing.T) { func TestTransformFuseArg(t *testing.T) { var tests = []struct { - runtime *datav1alpha1.JindoRuntime - dataset *datav1alpha1.Dataset - jindoValue *Jindo - expect string + name string + runtime *datav1alpha1.JindoRuntime + dataset *datav1alpha1.Dataset + expect string + contains bool }{ - {&datav1alpha1.JindoRuntime{ - Spec: datav1alpha1.JindoRuntimeSpec{ - Secret: "secret", - Fuse: datav1alpha1.JindoFuseSpec{ - Args: []string{"-okernel_cache"}, + { + name: "preserve explicit args", + runtime: &datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{ + Secret: "secret", + Fuse: datav1alpha1.JindoFuseSpec{ + Args: []string{"-okernel_cache"}, + }, }, }, - }, &datav1alpha1.Dataset{ - Spec: datav1alpha1.DatasetSpec{ - Mounts: []datav1alpha1.Mount{{ - MountPoint: "local:///mnt/test", - Name: "test", - Path: "/", - }}, - }}, &Jindo{}, "-okernel_cache"}, + dataset: &datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{ + Mounts: []datav1alpha1.Mount{{ + MountPoint: "local:///mnt/test", + Name: "test", + Path: "/", + }}, + }, + }, + expect: "-okernel_cache", + contains: true, + }, + { + name: "default single-mount args disable symlink", + runtime: &datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{}, + }, + dataset: &datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{ + Mounts: []datav1alpha1.Mount{{ + MountPoint: "oss://bucket-a/", + Name: "bucketa", + Path: "/bucketa", + }}, + }, + }, + expect: "-ono_symlink", + contains: true, + }, + { + name: "default multi-mount args keep symlink support", + runtime: &datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{}, + }, + dataset: &datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{ + Mounts: []datav1alpha1.Mount{ + {MountPoint: "oss://bucket-a/", Name: "bucketa", Path: "/bucketa"}, + {MountPoint: "oss://bucket-b/", Name: "bucketb", Path: "/bucketb"}, + }, + }, + }, + expect: "-ono_symlink", + contains: false, + }, } for _, test := range tests { engine := &JindoCacheEngine{Log: fake.NullLogger()} properties := engine.transformFuseArg(test.runtime, test.dataset) - if properties[0] != test.expect { - t.Errorf("expected value %v, but got %v", test.expect, test.jindoValue.Fuse.RunAs) + found := false + for _, property := range properties { + if property == test.expect { + found = true + break + } + } + if found != test.contains { + t.Errorf("%s: expected contains=%v for %q, got %v in %#v", test.name, test.contains, test.expect, found, properties) } } } diff --git a/pkg/ddc/jindocache/transform_test.go b/pkg/ddc/jindocache/transform_test.go index fdaf0c7658a..a1c08c693f1 100644 --- a/pkg/ddc/jindocache/transform_test.go +++ b/pkg/ddc/jindocache/transform_test.go @@ -986,6 +986,38 @@ func TestTransformLogConfig(t *testing.T) { } } +func TestTransformLogConfigWithBucketSecretPaths(t *testing.T) { + engine := &JindoCacheEngine{Log: fake.NullLogger()} + value := &Jindo{ + BucketSecretPaths: map[string]string{ + "bucket-a": "secrets:///token/bucket-a/", + }, + Master: Master{ + FileStoreProperties: map[string]string{ + "jindocache.oss.bucket.bucket-a.endpoint": "oss-cn-shanghai.aliyuncs.com", + }, + }, + } + + engine.transformLogConfig(&datav1alpha1.JindoRuntime{}, value) + + if got := value.FuseLogConfig["fs.oss.provider.endpoint"]; got != "secrets:///token/" { + t.Fatalf("expected generic oss provider endpoint, got %q", got) + } + if got := value.FuseLogConfig["fs.oss.provider.format"]; got != jindoSecretProviderFormat { + t.Fatalf("expected generic oss provider format %q, got %q", jindoSecretProviderFormat, got) + } + if got := value.FuseLogConfig["fs.oss.endpoint"]; got != "oss-cn-shanghai.aliyuncs.com" { + t.Fatalf("expected generic fuse log endpoint, got %q", got) + } + if got := value.FuseLogConfig["fs.oss.bucket.bucket-a.provider.endpoint"]; got != "secrets:///token/bucket-a/" { + t.Fatalf("expected bucket-a fuse log provider endpoint, got %q", got) + } + if got := value.FuseLogConfig["aliyun.oss.bucket.bucket-a.provider.url"]; got != "secrets:///token/bucket-a/" { + t.Fatalf("expected bucket-a fuse log provider url, got %q", got) + } +} + func TestJindoCacheEngine_transformEnvVariables(t *testing.T) { type args struct { runtime *datav1alpha1.JindoRuntime @@ -1601,6 +1633,629 @@ func TestJindoEngine_transformSecret(t *testing.T) { } } +func TestJindoCacheEngine_transformMasterWithMultipleOSSEncryptOptions(t *testing.T) { + secretA := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret-a", + Namespace: "fluid", + }, + Data: map[string][]byte{ + "fs.oss.accessKeyId": []byte("ak-a"), + "fs.oss.accessKeySecret": []byte("sk-a"), + }, + } + secretB := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret-b", + Namespace: "fluid", + }, + Data: map[string][]byte{ + "fs.oss.accessKeyId": []byte("ak-b"), + "fs.oss.accessKeySecret": []byte("sk-b"), + }, + } + + s := runtime.NewScheme() + s.AddKnownTypes(datav1alpha1.GroupVersion, &datav1alpha1.JindoRuntime{}, &datav1alpha1.Dataset{}) + _ = corev1.AddToScheme(s) + + client := fake.NewFakeClientWithScheme(s, secretA.DeepCopy(), secretB.DeepCopy()) + engine := JindoCacheEngine{ + name: "test", + namespace: "fluid", + Client: client, + Log: fake.NullLogger(), + runtime: &datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{ + Fuse: datav1alpha1.JindoFuseSpec{}, + }, + }, + } + + dataset := &datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{ + Mounts: []datav1alpha1.Mount{ + { + MountPoint: "oss://bucket-a/data", + Name: "mount-a", + Options: map[string]string{ + "fs.oss.endpoint": "oss-cn-shanghai.aliyuncs.com", + }, + EncryptOptions: []datav1alpha1.EncryptOption{ + { + Name: "fs.oss.accessKeyId", + ValueFrom: datav1alpha1.EncryptOptionSource{ + SecretKeyRef: datav1alpha1.SecretKeySelector{Name: "secret-a", Key: "fs.oss.accessKeyId"}, + }, + }, + { + Name: "fs.oss.accessKeySecret", + ValueFrom: datav1alpha1.EncryptOptionSource{ + SecretKeyRef: datav1alpha1.SecretKeySelector{Name: "secret-a", Key: "fs.oss.accessKeySecret"}, + }, + }, + }, + }, + { + MountPoint: "oss://bucket-b/data", + Name: "mount-b", + Options: map[string]string{ + "fs.oss.endpoint": "oss-cn-shanghai.aliyuncs.com", + }, + EncryptOptions: []datav1alpha1.EncryptOption{ + { + Name: "fs.oss.accessKeyId", + ValueFrom: datav1alpha1.EncryptOptionSource{ + SecretKeyRef: datav1alpha1.SecretKeySelector{Name: "secret-b", Key: "fs.oss.accessKeyId"}, + }, + }, + { + Name: "fs.oss.accessKeySecret", + ValueFrom: datav1alpha1.EncryptOptionSource{ + SecretKeyRef: datav1alpha1.SecretKeySelector{Name: "secret-b", Key: "fs.oss.accessKeySecret"}, + }, + }, + }, + }, + }, + }, + } + + value := &Jindo{} + if err := engine.transformMaster(engine.runtime, "/test", value, dataset, true); err != nil { + t.Fatalf("transformMaster() error = %v", err) + } + + if _, ok := value.Master.FileStoreProperties["jindocache.oss.bucket.bucket-a.accessKeyId"]; ok { + t.Fatalf("expected bucket-a accessKeyId to stay out of fileStoreProperties") + } + if _, ok := value.Master.FileStoreProperties["jindocache.oss.bucket.bucket-a.accessKeySecret"]; ok { + t.Fatalf("expected bucket-a accessKeySecret to stay out of fileStoreProperties") + } + if _, ok := value.Master.FileStoreProperties["jindocache.oss.bucket.bucket-a.provider.endpoint"]; ok { + t.Fatalf("expected bucket-a provider endpoint to stay out of fileStoreProperties") + } + if _, ok := value.Master.FileStoreProperties["jindocache.oss.bucket.bucket-b.provider.endpoint"]; ok { + t.Fatalf("expected bucket-b provider endpoint to stay out of fileStoreProperties") + } + if value.Secret != "" || value.SecretKey != "" || value.SecretValue != "" { + t.Fatalf("expected mount-level OSS encryptOptions not to populate single secret fields, got secret=%q secretKey=%q secretValue=%q", value.Secret, value.SecretKey, value.SecretValue) + } + if len(value.SecretProjections) != 2 { + t.Fatalf("expected 2 grouped secret projections, got %d", len(value.SecretProjections)) + } + projectedPaths := map[string]string{} + for _, projection := range value.SecretProjections { + for _, item := range projection.Items { + projectedPaths[item.Path] = projection.Name + ":" + item.Key + } + } + if projectedPaths["bucket-a/AccessKeyId"] != "secret-a:fs.oss.accessKeyId" { + t.Fatalf("unexpected projection for bucket-a AccessKeyId: %q", projectedPaths["bucket-a/AccessKeyId"]) + } + if projectedPaths["bucket-a/AccessKeySecret"] != "secret-a:fs.oss.accessKeySecret" { + t.Fatalf("unexpected projection for bucket-a AccessKeySecret: %q", projectedPaths["bucket-a/AccessKeySecret"]) + } + if projectedPaths["bucket-b/AccessKeyId"] != "secret-b:fs.oss.accessKeyId" { + t.Fatalf("unexpected projection for bucket-b AccessKeyId: %q", projectedPaths["bucket-b/AccessKeyId"]) + } + if projectedPaths["bucket-b/AccessKeySecret"] != "secret-b:fs.oss.accessKeySecret" { + t.Fatalf("unexpected projection for bucket-b AccessKeySecret: %q", projectedPaths["bucket-b/AccessKeySecret"]) + } + + engine.transformToken(value) + if got := value.Master.TokenProperties["jindocache.oss.bucket.bucket-a.provider.endpoint"]; got != "secrets:///token/bucket-a/" { + t.Fatalf("expected bucket-a token provider endpoint, got %q", got) + } + if got := value.Master.TokenProperties["jindocache.oss.bucket.bucket-a.provider.format"]; got != jindoSecretProviderFormat { + t.Fatalf("expected bucket-a token provider format %q, got %q", jindoSecretProviderFormat, got) + } + if got := value.Master.TokenProperties["jindocache.oss.bucket.bucket-b.provider.endpoint"]; got != "secrets:///token/bucket-b/" { + t.Fatalf("expected bucket-b token provider endpoint, got %q", got) + } + + value.Worker.Port.Rpc = 6101 + engine.transformFuse(engine.runtime, value) + if got := value.Fuse.FuseProperties["fs.oss.provider.endpoint"]; got != "secrets:///token/" { + t.Fatalf("expected generic fuse provider endpoint, got %q", got) + } + if got := value.Fuse.FuseProperties["fs.oss.provider.format"]; got != jindoSecretProviderFormat { + t.Fatalf("expected generic fuse provider format %q, got %q", jindoSecretProviderFormat, got) + } + if got := value.Fuse.FuseProperties["fs.oss.endpoint"]; got != "oss-cn-shanghai.aliyuncs.com" { + t.Fatalf("expected generic fuse endpoint, got %q", got) + } + if got := value.Fuse.FuseProperties["aliyun.oss.bucket.bucket-a.provider.url"]; got != "secrets:///token/bucket-a/" { + t.Fatalf("expected bucket-a fuse provider url, got %q", got) + } + if got := value.Fuse.FuseProperties["fs.oss.bucket.bucket-b.credentials.provider"]; got != jindoOSSCredentialsProvider { + t.Fatalf("expected bucket-b fuse credentials provider %q, got %q", jindoOSSCredentialsProvider, got) + } +} + +func TestJindoCacheEngine_transformMasterDedupesSameBucketSecretProjection(t *testing.T) { + s := runtime.NewScheme() + s.AddKnownTypes(datav1alpha1.GroupVersion, &datav1alpha1.JindoRuntime{}, &datav1alpha1.Dataset{}) + _ = corev1.AddToScheme(s) + + engine := JindoCacheEngine{ + name: "test", + namespace: "fluid", + Client: fake.NewFakeClientWithScheme(s), + Log: fake.NullLogger(), + runtime: &datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{ + Fuse: datav1alpha1.JindoFuseSpec{}, + }, + }, + } + + dataset := &datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{ + Mounts: []datav1alpha1.Mount{ + { + MountPoint: "oss://bucket-a/data-1", + Name: "mount-a", + Options: map[string]string{ + "fs.oss.endpoint": "oss-cn-shanghai.aliyuncs.com", + }, + EncryptOptions: []datav1alpha1.EncryptOption{ + { + Name: "fs.oss.accessKeyId", + ValueFrom: datav1alpha1.EncryptOptionSource{ + SecretKeyRef: datav1alpha1.SecretKeySelector{Name: "secret-a", Key: "fs.oss.accessKeyId"}, + }, + }, + { + Name: "fs.oss.accessKeySecret", + ValueFrom: datav1alpha1.EncryptOptionSource{ + SecretKeyRef: datav1alpha1.SecretKeySelector{Name: "secret-a", Key: "fs.oss.accessKeySecret"}, + }, + }, + }, + }, + { + MountPoint: "oss://bucket-a/data-2", + Name: "mount-b", + Options: map[string]string{ + "fs.oss.endpoint": "oss-cn-shanghai.aliyuncs.com", + }, + EncryptOptions: []datav1alpha1.EncryptOption{ + { + Name: "fs.oss.accessKeyId", + ValueFrom: datav1alpha1.EncryptOptionSource{ + SecretKeyRef: datav1alpha1.SecretKeySelector{Name: "secret-a", Key: "fs.oss.accessKeyId"}, + }, + }, + { + Name: "fs.oss.accessKeySecret", + ValueFrom: datav1alpha1.EncryptOptionSource{ + SecretKeyRef: datav1alpha1.SecretKeySelector{Name: "secret-a", Key: "fs.oss.accessKeySecret"}, + }, + }, + }, + }, + }, + }, + } + + value := &Jindo{} + if err := engine.transformMaster(engine.runtime, "/test", value, dataset, true); err != nil { + t.Fatalf("transformMaster() error = %v", err) + } + + if len(value.SecretProjections) != 1 { + t.Fatalf("expected same-bucket projections to dedupe to 1 grouped entry, got %d", len(value.SecretProjections)) + } +} + +func TestJindoCacheEngine_transformMasterRejectsConflictingSameBucketSecretProjection(t *testing.T) { + s := runtime.NewScheme() + s.AddKnownTypes(datav1alpha1.GroupVersion, &datav1alpha1.JindoRuntime{}, &datav1alpha1.Dataset{}) + _ = corev1.AddToScheme(s) + + engine := JindoCacheEngine{ + name: "test", + namespace: "fluid", + Client: fake.NewFakeClientWithScheme(s), + Log: fake.NullLogger(), + runtime: &datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{ + Fuse: datav1alpha1.JindoFuseSpec{}, + }, + }, + } + + dataset := &datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{ + Mounts: []datav1alpha1.Mount{ + { + MountPoint: "oss://bucket-a/data-1", + Name: "mount-a", + Options: map[string]string{ + "fs.oss.endpoint": "oss-cn-shanghai.aliyuncs.com", + }, + EncryptOptions: []datav1alpha1.EncryptOption{ + { + Name: "fs.oss.accessKeyId", + ValueFrom: datav1alpha1.EncryptOptionSource{ + SecretKeyRef: datav1alpha1.SecretKeySelector{Name: "secret-a", Key: "fs.oss.accessKeyId"}, + }, + }, + { + Name: "fs.oss.accessKeySecret", + ValueFrom: datav1alpha1.EncryptOptionSource{ + SecretKeyRef: datav1alpha1.SecretKeySelector{Name: "secret-a", Key: "fs.oss.accessKeySecret"}, + }, + }, + }, + }, + { + MountPoint: "oss://bucket-a/data-2", + Name: "mount-b", + Options: map[string]string{ + "fs.oss.endpoint": "oss-cn-shanghai.aliyuncs.com", + }, + EncryptOptions: []datav1alpha1.EncryptOption{ + { + Name: "fs.oss.accessKeyId", + ValueFrom: datav1alpha1.EncryptOptionSource{ + SecretKeyRef: datav1alpha1.SecretKeySelector{Name: "secret-b", Key: "fs.oss.accessKeyId"}, + }, + }, + { + Name: "fs.oss.accessKeySecret", + ValueFrom: datav1alpha1.EncryptOptionSource{ + SecretKeyRef: datav1alpha1.SecretKeySelector{Name: "secret-b", Key: "fs.oss.accessKeySecret"}, + }, + }, + }, + }, + }, + }, + } + + if err := engine.transformMaster(engine.runtime, "/test", &Jindo{}, dataset, true); err == nil { + t.Fatalf("expected transformMaster() to reject conflicting same-bucket secret projections") + } +} + +func TestJindoCacheEngine_transformMasterSupportsInlineOSSCredentialsCompatibility(t *testing.T) { + s := runtime.NewScheme() + s.AddKnownTypes(datav1alpha1.GroupVersion, &datav1alpha1.JindoRuntime{}, &datav1alpha1.Dataset{}) + _ = corev1.AddToScheme(s) + + engine := JindoCacheEngine{ + name: "test", + namespace: "fluid", + Client: fake.NewFakeClientWithScheme(s), + Log: fake.NullLogger(), + runtime: &datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{ + Fuse: datav1alpha1.JindoFuseSpec{}, + }, + }, + } + + dataset := &datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{ + Mounts: []datav1alpha1.Mount{{ + MountPoint: "oss://bucket-a/data", + Name: "mount-a", + Options: map[string]string{ + "fs.oss.endpoint": "oss-cn-shanghai.aliyuncs.com", + "fs.oss.accessKeyId": "inline-ak", + "fs.oss.accessKeySecret": "inline-sk", + }, + }}, + }, + } + + value := &Jindo{} + if err := engine.transformMaster(engine.runtime, "/test", value, dataset, true); err != nil { + t.Fatalf("transformMaster() error = %v", err) + } + + if got := value.Master.FileStoreProperties["jindocache.oss.bucket.bucket-a.endpoint"]; got != "oss-cn-shanghai.aliyuncs.com" { + t.Fatalf("expected bucket-a endpoint to be preserved, got %q", got) + } + if got := value.Master.FileStoreProperties["jindocache.oss.bucket.bucket-a.accessKeyId"]; got != "inline-ak" { + t.Fatalf("expected inline bucket-a accessKeyId to be preserved, got %q", got) + } + if got := value.Master.FileStoreProperties["jindocache.oss.bucket.bucket-a.accessKeySecret"]; got != "inline-sk" { + t.Fatalf("expected inline bucket-a accessKeySecret to be preserved, got %q", got) + } + if len(value.SecretProjections) != 0 { + t.Fatalf("expected no secret projections for inline credentials, got %d", len(value.SecretProjections)) + } +} + +func TestJindoCacheEngine_transformMasterIgnoresNonOSSCredentialEncryptOptionForBucketSecretProjection(t *testing.T) { + s := runtime.NewScheme() + s.AddKnownTypes(datav1alpha1.GroupVersion, &datav1alpha1.JindoRuntime{}, &datav1alpha1.Dataset{}) + _ = corev1.AddToScheme(s) + + engine := JindoCacheEngine{ + name: "test", + namespace: "fluid", + Client: fake.NewFakeClientWithScheme(s), + Log: fake.NullLogger(), + runtime: &datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{ + Fuse: datav1alpha1.JindoFuseSpec{}, + }, + }, + } + + dataset := &datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{ + Mounts: []datav1alpha1.Mount{{ + MountPoint: "oss://bucket-a/data", + Name: "mount-a", + Options: map[string]string{ + "fs.oss.endpoint": "oss-cn-shanghai.aliyuncs.com", + }, + EncryptOptions: []datav1alpha1.EncryptOption{{ + Name: "fs.oss.sessionToken", + ValueFrom: datav1alpha1.EncryptOptionSource{ + SecretKeyRef: datav1alpha1.SecretKeySelector{Name: "secret-a", Key: "token"}, + }, + }}, + }}, + }, + } + + value := &Jindo{} + if err := engine.transformMaster(engine.runtime, "/test", value, dataset, true); err != nil { + t.Fatalf("transformMaster() error = %v", err) + } + + if len(value.SecretProjections) != 0 { + t.Fatalf("expected no secret projections for non-AK/SK encryptOptions, got %d", len(value.SecretProjections)) + } + if len(value.BucketSecretPaths) != 0 { + t.Fatalf("expected no bucket secret paths for non-AK/SK encryptOptions, got %#v", value.BucketSecretPaths) + } +} + +func TestJindoCacheEngine_transformMasterReturnsErrorWhenReferencedSecretMissing(t *testing.T) { + s := runtime.NewScheme() + s.AddKnownTypes(datav1alpha1.GroupVersion, &datav1alpha1.JindoRuntime{}, &datav1alpha1.Dataset{}) + _ = corev1.AddToScheme(s) + + engine := JindoCacheEngine{ + name: "test", + namespace: "fluid", + Client: fake.NewFakeClientWithScheme(s), + Log: fake.NullLogger(), + runtime: &datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{ + Fuse: datav1alpha1.JindoFuseSpec{}, + }, + }, + } + + dataset := &datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{ + Mounts: []datav1alpha1.Mount{{ + MountPoint: "oss://bucket-a/data", + Name: "mount-a", + Options: map[string]string{ + "fs.oss.endpoint": "oss-cn-shanghai.aliyuncs.com", + }, + EncryptOptions: []datav1alpha1.EncryptOption{{ + Name: "fs.oss.accessKeyId", + ValueFrom: datav1alpha1.EncryptOptionSource{ + SecretKeyRef: datav1alpha1.SecretKeySelector{Name: "missing-secret", Key: "fs.oss.accessKeyId"}, + }, + }}, + }}, + }, + } + + if err := engine.transformMaster(engine.runtime, "/test", &Jindo{}, dataset, false); err == nil { + t.Fatalf("expected transformMaster() to fail when the referenced secret is missing") + } +} + +func TestJindoCacheEngine_transformMasterRejectsEmptyReferencedSecretKey(t *testing.T) { + s := runtime.NewScheme() + s.AddKnownTypes(datav1alpha1.GroupVersion, &datav1alpha1.JindoRuntime{}, &datav1alpha1.Dataset{}) + _ = corev1.AddToScheme(s) + + engine := JindoCacheEngine{ + name: "test", + namespace: "fluid", + Client: fake.NewFakeClientWithScheme(s), + Log: fake.NullLogger(), + runtime: &datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{ + Fuse: datav1alpha1.JindoFuseSpec{}, + }, + }, + } + + dataset := &datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{ + Mounts: []datav1alpha1.Mount{{ + MountPoint: "oss://bucket-a/data", + Name: "mount-a", + Options: map[string]string{ + "fs.oss.endpoint": "oss-cn-shanghai.aliyuncs.com", + }, + EncryptOptions: []datav1alpha1.EncryptOption{ + { + Name: "fs.oss.accessKeyId", + ValueFrom: datav1alpha1.EncryptOptionSource{ + SecretKeyRef: datav1alpha1.SecretKeySelector{Name: "secret-a"}, + }, + }, + { + Name: "fs.oss.accessKeySecret", + ValueFrom: datav1alpha1.EncryptOptionSource{ + SecretKeyRef: datav1alpha1.SecretKeySelector{Name: "secret-a", Key: "fs.oss.accessKeySecret"}, + }, + }, + }, + }}, + }, + } + + if err := engine.transformMaster(engine.runtime, "/test", &Jindo{}, dataset, true); err == nil { + t.Fatalf("expected transformMaster() to fail when the referenced secret key is empty") + } +} + +func TestJindoCacheEngine_transformMasterReturnsErrorWhenReferencedSecretDataKeyMissing(t *testing.T) { + s := runtime.NewScheme() + s.AddKnownTypes(datav1alpha1.GroupVersion, &datav1alpha1.JindoRuntime{}, &datav1alpha1.Dataset{}) + _ = corev1.AddToScheme(s) + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret-a", + Namespace: "fluid", + }, + Data: map[string][]byte{ + "unexpected-key": []byte("ak-a"), + }, + } + + engine := JindoCacheEngine{ + name: "test", + namespace: "fluid", + Client: fake.NewFakeClientWithScheme(s, secret.DeepCopy()), + Log: fake.NullLogger(), + runtime: &datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{ + Fuse: datav1alpha1.JindoFuseSpec{}, + }, + }, + } + + dataset := &datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{ + Mounts: []datav1alpha1.Mount{{ + MountPoint: "oss://bucket-a/data", + Name: "mount-a", + Options: map[string]string{ + "fs.oss.endpoint": "oss-cn-shanghai.aliyuncs.com", + }, + EncryptOptions: []datav1alpha1.EncryptOption{{ + Name: "fs.oss.accessKeyId", + ValueFrom: datav1alpha1.EncryptOptionSource{ + SecretKeyRef: datav1alpha1.SecretKeySelector{Name: "secret-a", Key: "fs.oss.accessKeyId"}, + }, + }}, + }}, + }, + } + + if err := engine.transformMaster(engine.runtime, "/test", &Jindo{}, dataset, false); err == nil { + t.Fatalf("expected transformMaster() to fail when the referenced secret key is missing from secret data") + } +} + +func TestJindoCacheEngine_transformMasterAcceptsBucketRootMountPoint(t *testing.T) { + s := runtime.NewScheme() + s.AddKnownTypes(datav1alpha1.GroupVersion, &datav1alpha1.JindoRuntime{}, &datav1alpha1.Dataset{}) + _ = corev1.AddToScheme(s) + + engine := JindoCacheEngine{ + name: "test", + namespace: "fluid", + Client: fake.NewFakeClientWithScheme(s), + Log: fake.NullLogger(), + runtime: &datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{ + Fuse: datav1alpha1.JindoFuseSpec{}, + }, + }, + } + + dataset := &datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{ + Mounts: []datav1alpha1.Mount{{ + MountPoint: "oss://bucket-root", + Name: "mount-a", + Options: map[string]string{ + "fs.oss.endpoint": "oss-cn-shanghai.aliyuncs.com", + }, + }}, + }, + } + + value := &Jindo{} + if err := engine.transformMaster(engine.runtime, "/test", value, dataset, true); err != nil { + t.Fatalf("transformMaster() error = %v", err) + } + + if got := value.Master.FileStoreProperties["jindocache.oss.bucket.bucket-root.endpoint"]; got != "oss-cn-shanghai.aliyuncs.com" { + t.Fatalf("expected bucket-root endpoint to be preserved, got %q", got) + } +} + +func TestJindoCacheEngine_transformMasterRejectsMixedOSSInlineAndSecretProjection(t *testing.T) { + s := runtime.NewScheme() + s.AddKnownTypes(datav1alpha1.GroupVersion, &datav1alpha1.JindoRuntime{}, &datav1alpha1.Dataset{}) + _ = corev1.AddToScheme(s) + + engine := JindoCacheEngine{ + name: "test", + namespace: "fluid", + Client: fake.NewFakeClientWithScheme(s), + Log: fake.NullLogger(), + runtime: &datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{ + Fuse: datav1alpha1.JindoFuseSpec{}, + }, + }, + } + + dataset := &datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{ + Mounts: []datav1alpha1.Mount{{ + MountPoint: "oss://bucket-a/data", + Name: "mount-a", + Options: map[string]string{ + "fs.oss.endpoint": "oss-cn-shanghai.aliyuncs.com", + "fs.oss.accessKeySecret": "inline-sk", + }, + EncryptOptions: []datav1alpha1.EncryptOption{{ + Name: "fs.oss.accessKeyId", + ValueFrom: datav1alpha1.EncryptOptionSource{ + SecretKeyRef: datav1alpha1.SecretKeySelector{Name: "secret-a", Key: "custom-ak"}, + }, + }}, + }}, + }, + } + + if err := engine.transformMaster(engine.runtime, "/test", &Jindo{}, dataset, true); err == nil { + t.Fatalf("expected transformMaster() to reject mixed inline and secret-projected OSS credentials") + } +} + func TestJindoEngine_transformMountpoint(t *testing.T) { jindocacheSecret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/ddc/jindocache/types.go b/pkg/ddc/jindocache/types.go index 0515726510d..518f0aceed8 100644 --- a/pkg/ddc/jindocache/types.go +++ b/pkg/ddc/jindocache/types.go @@ -51,6 +51,8 @@ type Jindo struct { UFSVolumes []UFSVolume `json:"ufsVolumes,omitempty"` SecretKey string `json:"secretKey,omitempty"` SecretValue string `json:"secretValue,omitempty"` + SecretProjections []corev1.SecretProjection `json:"secretProjections,omitempty"` + BucketSecretPaths map[string]string `json:"bucketSecretPaths,omitempty"` UseStsToken bool `json:"UseStsToken"` MountType string `json:"mountType,omitempty"` ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"` diff --git a/pkg/ddc/jindofsx/transform.go b/pkg/ddc/jindofsx/transform.go index 0ba531baf19..f9b3a1231ce 100644 --- a/pkg/ddc/jindofsx/transform.go +++ b/pkg/ddc/jindofsx/transform.go @@ -22,7 +22,6 @@ import ( "fmt" "os" "reflect" - "regexp" "strconv" "strings" "time" @@ -50,6 +49,105 @@ type smartdataConfig struct { dnsServer string } +const ( + jindoOSSCredentialsProvider = "com.aliyun.jindodata.oss.auth.CustomCredentialsProvider" + jindoSecretProviderFormat = "JSON" + jindoSecretMountPath = "/token" + jindoFSxOSSBucketPrefix = "jindofsx.oss.bucket." + jindoAccessKeyIDSuffix = ".accessKeyId" + jindoAccessKeySecretSuffix = ".accessKeySecret" + jindoEndpointSuffix = ".endpoint" + jindoDLSStorageEnableSuffix = ".data.lake.storage.enable" +) + +func buildBucketSecretURI(bucketName string) string { + return fmt.Sprintf("secrets://%s/%s/", jindoSecretMountPath, bucketName) +} + +func validateSecretKeyRef(secretKeyRef datav1alpha1.SecretKeySelector, optionName, mountPoint string) error { + if secretKeyRef.Name == "" || secretKeyRef.Key == "" { + return fmt.Errorf("encryptOption %s for mount %s must reference both secret name and key", optionName, mountPoint) + } + + return nil +} + +func getSecretDataValue(secret *corev1.Secret, secretName, secretKey string) (string, error) { + value, ok := secret.Data[secretKey] + if !ok { + return "", fmt.Errorf("secret %s does not contain key %s", secretName, secretKey) + } + + return string(value), nil +} + +func appendSecretProjection(projections []corev1.SecretProjection, secretName, secretKey, itemPath string) ([]corev1.SecretProjection, error) { + for i, projection := range projections { + for _, item := range projection.Items { + if item.Path != itemPath { + continue + } + if projection.Name == secretName && item.Key == secretKey { + return projections, nil + } + return nil, fmt.Errorf("conflicting secret projection for %s", itemPath) + } + if projection.Name == secretName { + projections[i].Items = append(projections[i].Items, corev1.KeyToPath{ + Key: secretKey, + Path: itemPath, + }) + return projections, nil + } + } + + return append(projections, corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: secretName, + }, + Items: []corev1.KeyToPath{{ + Key: secretKey, + Path: itemPath, + }}, + }), nil +} + +func setBucketSecretProviderProperties(properties map[string]string, prefix, bucketName, secretURI string) { + properties[fmt.Sprintf("%s.oss.bucket.%s.credentials.provider", prefix, bucketName)] = jindoOSSCredentialsProvider + properties[fmt.Sprintf("%s.oss.bucket.%s.provider.endpoint", prefix, bucketName)] = secretURI + properties[fmt.Sprintf("%s.oss.bucket.%s.provider.format", prefix, bucketName)] = jindoSecretProviderFormat +} + +func buildBucketPropertyKey(prefix, bucketName, suffix string) string { + return prefix + bucketName + suffix +} + +func extractSingleOSSEndpoint(properties map[string]string, prefix string) (string, bool) { + var endpoint string + for key, value := range properties { + if !strings.HasPrefix(key, prefix) || !strings.HasSuffix(key, jindoEndpointSuffix) { + continue + } + if endpoint == "" { + endpoint = value + continue + } + if endpoint != value { + return "", false + } + } + return endpoint, endpoint != "" +} + +func parseOSSMountBucket(mountPoint string) (string, error) { + bucket, _, _ := strings.Cut(strings.TrimPrefix(mountPoint, "oss://"), "/") + if bucket == "" { + return "", fmt.Errorf("incorrect oss mountPoint with %v, please check your path is dir or file ", mountPoint) + } + + return bucket, nil +} + func (e *JindoFSxEngine) transform(runtime *datav1alpha1.JindoRuntime) (value *Jindo, err error) { if runtime == nil { err = fmt.Errorf("the jindoRuntime is null") @@ -309,29 +407,43 @@ func (e *JindoFSxEngine) transformMaster(runtime *datav1alpha1.JindoRuntime, met } mountType := "oss" + ossBucketName := "" if strings.HasPrefix(mount.MountPoint, "oss://") { - var re = regexp.MustCompile(`(oss://(.*?))(/)`) - rm := re.FindStringSubmatch(mount.MountPoint) - if len(rm) < 3 { - err = fmt.Errorf("incorrect oss mountPoint with %v, please check your path is dir or file ", mount.MountPoint) + ossBucketName, err = parseOSSMountBucket(mount.MountPoint) + if err != nil { e.Log.Error(err, "mount.MountPoint", mount.MountPoint) return } - bucketName := rm[2] - if mount.Options["fs.oss.accessKeyId"] != "" { - propertiesFileStore["jindofsx.oss.bucket."+bucketName+".accessKeyId"] = mount.Options["fs.oss.accessKeyId"] - } - if mount.Options["fs.oss.accessKeySecret"] != "" { - propertiesFileStore["jindofsx.oss.bucket."+bucketName+".accessKeySecret"] = mount.Options["fs.oss.accessKeySecret"] - } if mount.Options["fs.oss.endpoint"] == "" { - err = fmt.Errorf("oss endpoint can not be null, please check option") + err = fmt.Errorf("oss endpoint can not be null, please check option") e.Log.Error(err, "oss endpoint can not be null") return } - propertiesFileStore["jindofsx.oss.bucket."+bucketName+".endpoint"] = mount.Options["fs.oss.endpoint"] + propertiesFileStore[buildBucketPropertyKey(jindoFSxOSSBucketPrefix, ossBucketName, jindoEndpointSuffix)] = mount.Options["fs.oss.endpoint"] if strings.Contains(mount.Options["fs.oss.endpoint"], "dls") { - propertiesFileStore["jindofsx.oss.bucket."+bucketName+".data.lake.storage.enable"] = "true" + propertiesFileStore[buildBucketPropertyKey(jindoFSxOSSBucketPrefix, ossBucketName, jindoDLSStorageEnableSuffix)] = "true" + } + + hasOSSAccessKeyIDRef := false + hasOSSAccessKeySecretRef := false + for _, encryptOption := range mount.EncryptOptions { + switch encryptOption.Name { + case "fs.oss.accessKeyId": + hasOSSAccessKeyIDRef = true + case "fs.oss.accessKeySecret": + hasOSSAccessKeySecretRef = true + } + } + if secretMountSupport && hasOSSAccessKeyIDRef != hasOSSAccessKeySecretRef { + err = fmt.Errorf("oss bucket %s must configure both fs.oss.accessKeyId and fs.oss.accessKeySecret in encryptOptions when secret mount is enabled", ossBucketName) + e.Log.Error(err, "mount.MountPoint", mount.MountPoint) + return + } + if !hasOSSAccessKeyIDRef && mount.Options["fs.oss.accessKeyId"] != "" { + propertiesFileStore[buildBucketPropertyKey(jindoFSxOSSBucketPrefix, ossBucketName, jindoAccessKeyIDSuffix)] = mount.Options["fs.oss.accessKeyId"] + } + if !hasOSSAccessKeySecretRef && mount.Options["fs.oss.accessKeySecret"] != "" { + propertiesFileStore[buildBucketPropertyKey(jindoFSxOSSBucketPrefix, ossBucketName, jindoAccessKeySecretSuffix)] = mount.Options["fs.oss.accessKeySecret"] } } @@ -392,30 +504,81 @@ func (e *JindoFSxEngine) transformMaster(runtime *datav1alpha1.JindoRuntime, met for _, encryptOption := range mount.EncryptOptions { key := encryptOption.Name secretKeyRef := encryptOption.ValueFrom.SecretKeyRef + if err = validateSecretKeyRef(secretKeyRef, key, mount.MountPoint); err != nil { + e.Log.Error(err, "invalid encryptOption secret reference", "key", key, "mountPoint", mount.MountPoint) + return err + } + if mountType == "oss" && ossBucketName != "" { + if secretMountSupport { + itemPath := "" + switch key { + case "fs.oss.accessKeyId": + itemPath = ossBucketName + "/AccessKeyId" + case "fs.oss.accessKeySecret": + itemPath = ossBucketName + "/AccessKeySecret" + } + if itemPath != "" { + secretURI := buildBucketSecretURI(ossBucketName) + if value.BucketSecretPaths == nil { + value.BucketSecretPaths = map[string]string{} + } + value.BucketSecretPaths[ossBucketName] = secretURI + value.SecretProjections, err = appendSecretProjection(value.SecretProjections, secretKeyRef.Name, secretKeyRef.Key, itemPath) + if err != nil { + return err + } + e.Log.Info("Configure OSS bucket credential projection", "bucket", ossBucketName, "secretName", secretKeyRef.Name, "key", key) + } + continue + } + + secret, err := kubeclient.GetSecret(e.Client, secretKeyRef.Name, e.namespace) + if err != nil { + e.Log.Error(err, "can't get the input secret from dataset", "secretName", secretKeyRef.Name) + return err + } + secretValue, err := getSecretDataValue(secret, secretKeyRef.Name, secretKeyRef.Key) + if err != nil { + e.Log.Error(err, "missing key in referenced secret", "secretName", secretKeyRef.Name, "key", key) + return err + } + if key == "fs.oss.accessKeyId" { + propertiesFileStore[buildBucketPropertyKey(jindoFSxOSSBucketPrefix, ossBucketName, jindoAccessKeyIDSuffix)] = secretValue + } + if key == "fs.oss.accessKeySecret" { + propertiesFileStore[buildBucketPropertyKey(jindoFSxOSSBucketPrefix, ossBucketName, jindoAccessKeySecretSuffix)] = secretValue + } + e.Log.Info("Get OSS bucket credential from Secret successfully", "bucket", ossBucketName, "secretName", secretKeyRef.Name, "key", key) + continue + } if secretMountSupport { value.Secret = secretKeyRef.Name if key == "fs."+mountType+".accessKeyId" { - value.SecretKey = key - e.Log.Info("Get %s From %s!", key, secretKeyRef.Name) + value.SecretKey = secretKeyRef.Key + e.Log.Info("Get credential from Secret successfully", "key", key, "secretName", secretKeyRef.Name) } if key == "fs."+mountType+".accessKeySecret" { - value.SecretValue = key - e.Log.Info("Get %s From %s!", key, secretKeyRef.Name) + value.SecretValue = secretKeyRef.Key + e.Log.Info("Get credential from Secret successfully", "key", key, "secretName", secretKeyRef.Name) } } else { secret, err := kubeclient.GetSecret(e.Client, secretKeyRef.Name, e.namespace) if err != nil { - e.Log.Info("can't get the input secret from dataset", secretKeyRef.Name) - break + e.Log.Error(err, "can't get the input secret from dataset", "secretName", secretKeyRef.Name) + return err + } + secretValue, err := getSecretDataValue(secret, secretKeyRef.Name, secretKeyRef.Key) + if err != nil { + e.Log.Error(err, "missing key in referenced secret", "secretName", secretKeyRef.Name, "key", key) + return err } - value := secret.Data[secretKeyRef.Key] if key == "fs."+mountType+".accessKeyId" { - propertiesFileStore["jindofsx."+mountType+".accessKeyId"] = string(value) + propertiesFileStore["jindofsx."+mountType+".accessKeyId"] = secretValue } if key == "fs."+mountType+".accessKeySecret" { - propertiesFileStore["jindofsx."+mountType+".accessKeySecret"] = string(value) + propertiesFileStore["jindofsx."+mountType+".accessKeySecret"] = secretValue } - e.Log.Info("Get Credential From Secret Successfully") + e.Log.Info("Get credential from Secret successfully", "key", key, "secretName", secretKeyRef.Name) } } value.MountType = mountType @@ -728,10 +891,25 @@ func (e *JindoFSxEngine) transformFuse(runtime *datav1alpha1.JindoRuntime, value } // set secret if len(value.Secret) != 0 { - properties["fs."+value.MountType+".credentials.provider"] = "com.aliyun.jindodata.oss.auth.CustomCredentialsProvider" + properties["fs."+value.MountType+".credentials.provider"] = jindoOSSCredentialsProvider + properties["aliyun."+value.MountType+".provider.url"] = "secrets:///token/" + properties["fs."+value.MountType+".provider.endpoint"] = "secrets:///token/" + properties["fs."+value.MountType+".provider.format"] = jindoSecretProviderFormat + } + if len(value.BucketSecretPaths) != 0 { + properties["fs."+value.MountType+".credentials.provider"] = jindoOSSCredentialsProvider properties["aliyun."+value.MountType+".provider.url"] = "secrets:///token/" properties["fs."+value.MountType+".provider.endpoint"] = "secrets:///token/" - properties["fs."+value.MountType+".provider.format"] = "JSON" + properties["fs."+value.MountType+".provider.format"] = jindoSecretProviderFormat + } + for bucketName, secretURI := range value.BucketSecretPaths { + setBucketSecretProviderProperties(properties, "fs", bucketName, secretURI) + properties["aliyun.oss.bucket."+bucketName+".provider.url"] = secretURI + } + if endpoint, ok := extractSingleOSSEndpoint(value.Master.FileStoreProperties, jindoFSxOSSBucketPrefix); ok { + properties["fs.oss.endpoint"] = endpoint + properties["jindofsx.oss.endpoint"] = endpoint + properties["aliyun.oss.endpoint"] = endpoint } if len(runtime.Spec.Fuse.Properties) > 0 { @@ -778,6 +956,22 @@ func (e *JindoFSxEngine) transformLogConfig(runtime *datav1alpha1.JindoRuntime, fusePropreties[k] = v } } + if len(value.BucketSecretPaths) != 0 { + fusePropreties["aliyun.oss.provider.url"] = "secrets:///token/" + fusePropreties["fs.oss.provider.endpoint"] = "secrets:///token/" + fusePropreties["fs.oss.provider.format"] = jindoSecretProviderFormat + for bucketName, secretURI := range value.BucketSecretPaths { + setBucketSecretProviderProperties(fusePropreties, "fs", bucketName, secretURI) + fusePropreties["aliyun.oss.bucket."+bucketName+".provider.url"] = secretURI + if endpoint, ok := value.Master.FileStoreProperties[buildBucketPropertyKey(jindoFSxOSSBucketPrefix, bucketName, jindoEndpointSuffix)]; ok { + fusePropreties["fs.oss.bucket."+bucketName+".endpoint"] = endpoint + } + } + if endpoint, ok := extractSingleOSSEndpoint(value.Master.FileStoreProperties, jindoFSxOSSBucketPrefix); ok { + fusePropreties["fs.oss.endpoint"] = endpoint + fusePropreties["aliyun.oss.endpoint"] = endpoint + } + } value.LogConfig = fsxProperties value.FuseLogConfig = fusePropreties @@ -976,6 +1170,17 @@ func (e *JindoFSxEngine) transformToken(value *Jindo) { } else { properties["default.credential.provider"] = "none" } + if len(value.BucketSecretPaths) != 0 { + properties["default.credential.provider"] = "secrets:///token/" + properties["jindofsx."+value.MountType+".provider.endpoint"] = "secrets:///token/" + properties["jindofsx."+value.MountType+".provider.format"] = jindoSecretProviderFormat + } + for bucketName, secretURI := range value.BucketSecretPaths { + setBucketSecretProviderProperties(properties, "jindofsx", bucketName, secretURI) + } + if endpoint, ok := extractSingleOSSEndpoint(value.Master.FileStoreProperties, jindoFSxOSSBucketPrefix); ok { + properties["jindofsx.oss.endpoint"] = endpoint + } value.Master.TokenProperties = properties } diff --git a/pkg/ddc/jindofsx/transform_master_test.go b/pkg/ddc/jindofsx/transform_master_test.go index e0ab854c921..401d987d512 100644 --- a/pkg/ddc/jindofsx/transform_master_test.go +++ b/pkg/ddc/jindofsx/transform_master_test.go @@ -24,6 +24,9 @@ import ( datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1" "github.com/fluid-cloudnative/fluid/pkg/utils/fake" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" ) func TestTransformToken(t *testing.T) { @@ -85,3 +88,685 @@ func TestTransformMasterMountPath(t *testing.T) { } } } + +func TestJindoFSxEngine_transformMasterWithMultipleOSSEncryptOptions(t *testing.T) { + secretA := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret-a", + Namespace: "fluid", + }, + Data: map[string][]byte{ + "fs.oss.accessKeyId": []byte("ak-a"), + "fs.oss.accessKeySecret": []byte("sk-a"), + }, + } + secretB := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret-b", + Namespace: "fluid", + }, + Data: map[string][]byte{ + "fs.oss.accessKeyId": []byte("ak-b"), + "fs.oss.accessKeySecret": []byte("sk-b"), + }, + } + + s := runtime.NewScheme() + s.AddKnownTypes(datav1alpha1.GroupVersion, &datav1alpha1.JindoRuntime{}, &datav1alpha1.Dataset{}) + _ = corev1.AddToScheme(s) + + client := fake.NewFakeClientWithScheme(s, secretA.DeepCopy(), secretB.DeepCopy()) + engine := JindoFSxEngine{ + name: "test", + namespace: "fluid", + Client: client, + Log: fake.NullLogger(), + runtime: &datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{ + Fuse: datav1alpha1.JindoFuseSpec{}, + }, + }, + } + + dataset := &datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{ + Mounts: []datav1alpha1.Mount{ + { + MountPoint: "oss://bucket-a/data", + Name: "mount-a", + Options: map[string]string{ + "fs.oss.endpoint": "oss-cn-shanghai.aliyuncs.com", + }, + EncryptOptions: []datav1alpha1.EncryptOption{ + { + Name: "fs.oss.accessKeyId", + ValueFrom: datav1alpha1.EncryptOptionSource{ + SecretKeyRef: datav1alpha1.SecretKeySelector{Name: "secret-a", Key: "fs.oss.accessKeyId"}, + }, + }, + { + Name: "fs.oss.accessKeySecret", + ValueFrom: datav1alpha1.EncryptOptionSource{ + SecretKeyRef: datav1alpha1.SecretKeySelector{Name: "secret-a", Key: "fs.oss.accessKeySecret"}, + }, + }, + }, + }, + { + MountPoint: "oss://bucket-b/data", + Name: "mount-b", + Options: map[string]string{ + "fs.oss.endpoint": "oss-cn-shanghai.aliyuncs.com", + }, + EncryptOptions: []datav1alpha1.EncryptOption{ + { + Name: "fs.oss.accessKeyId", + ValueFrom: datav1alpha1.EncryptOptionSource{ + SecretKeyRef: datav1alpha1.SecretKeySelector{Name: "secret-b", Key: "fs.oss.accessKeyId"}, + }, + }, + { + Name: "fs.oss.accessKeySecret", + ValueFrom: datav1alpha1.EncryptOptionSource{ + SecretKeyRef: datav1alpha1.SecretKeySelector{Name: "secret-b", Key: "fs.oss.accessKeySecret"}, + }, + }, + }, + }, + }, + }, + } + + value := &Jindo{} + if err := engine.transformMaster(engine.runtime, "/test", value, dataset, true); err != nil { + t.Fatalf("transformMaster() error = %v", err) + } + + if _, ok := value.Master.FileStoreProperties["jindofsx.oss.bucket.bucket-a.accessKeyId"]; ok { + t.Fatalf("expected bucket-a accessKeyId to stay out of fileStoreProperties") + } + if _, ok := value.Master.FileStoreProperties["jindofsx.oss.bucket.bucket-a.accessKeySecret"]; ok { + t.Fatalf("expected bucket-a accessKeySecret to stay out of fileStoreProperties") + } + if _, ok := value.Master.FileStoreProperties["jindofsx.oss.bucket.bucket-a.provider.endpoint"]; ok { + t.Fatalf("expected bucket-a provider endpoint to stay out of fileStoreProperties") + } + if _, ok := value.Master.FileStoreProperties["jindofsx.oss.bucket.bucket-b.provider.endpoint"]; ok { + t.Fatalf("expected bucket-b provider endpoint to stay out of fileStoreProperties") + } + if value.Secret != "" || value.SecretKey != "" || value.SecretValue != "" { + t.Fatalf("expected mount-level OSS encryptOptions not to populate single secret fields, got secret=%q secretKey=%q secretValue=%q", value.Secret, value.SecretKey, value.SecretValue) + } + if len(value.SecretProjections) != 2 { + t.Fatalf("expected 2 grouped secret projections, got %d", len(value.SecretProjections)) + } + projectedPaths := map[string]string{} + for _, projection := range value.SecretProjections { + for _, item := range projection.Items { + projectedPaths[item.Path] = projection.Name + ":" + item.Key + } + } + if projectedPaths["bucket-a/AccessKeyId"] != "secret-a:fs.oss.accessKeyId" { + t.Fatalf("unexpected projection for bucket-a AccessKeyId: %q", projectedPaths["bucket-a/AccessKeyId"]) + } + if projectedPaths["bucket-a/AccessKeySecret"] != "secret-a:fs.oss.accessKeySecret" { + t.Fatalf("unexpected projection for bucket-a AccessKeySecret: %q", projectedPaths["bucket-a/AccessKeySecret"]) + } + if projectedPaths["bucket-b/AccessKeyId"] != "secret-b:fs.oss.accessKeyId" { + t.Fatalf("unexpected projection for bucket-b AccessKeyId: %q", projectedPaths["bucket-b/AccessKeyId"]) + } + if projectedPaths["bucket-b/AccessKeySecret"] != "secret-b:fs.oss.accessKeySecret" { + t.Fatalf("unexpected projection for bucket-b AccessKeySecret: %q", projectedPaths["bucket-b/AccessKeySecret"]) + } + + engine.transformToken(value) + if got := value.Master.TokenProperties["jindofsx.oss.bucket.bucket-a.provider.endpoint"]; got != "secrets:///token/bucket-a/" { + t.Fatalf("expected bucket-a token provider endpoint, got %q", got) + } + if got := value.Master.TokenProperties["jindofsx.oss.bucket.bucket-a.provider.format"]; got != jindoSecretProviderFormat { + t.Fatalf("expected bucket-a token provider format %q, got %q", jindoSecretProviderFormat, got) + } + if got := value.Master.TokenProperties["jindofsx.oss.bucket.bucket-b.provider.endpoint"]; got != "secrets:///token/bucket-b/" { + t.Fatalf("expected bucket-b token provider endpoint, got %q", got) + } + + value.Worker.Port.Rpc = 6101 + engine.transformFuse(engine.runtime, value) + if got := value.Fuse.FuseProperties["fs.oss.provider.endpoint"]; got != "secrets:///token/" { + t.Fatalf("expected generic fuse provider endpoint, got %q", got) + } + if got := value.Fuse.FuseProperties["fs.oss.provider.format"]; got != jindoSecretProviderFormat { + t.Fatalf("expected generic fuse provider format %q, got %q", jindoSecretProviderFormat, got) + } + if got := value.Fuse.FuseProperties["fs.oss.endpoint"]; got != "oss-cn-shanghai.aliyuncs.com" { + t.Fatalf("expected generic fuse endpoint, got %q", got) + } + if got := value.Fuse.FuseProperties["aliyun.oss.bucket.bucket-a.provider.url"]; got != "secrets:///token/bucket-a/" { + t.Fatalf("expected bucket-a fuse provider url, got %q", got) + } + if got := value.Fuse.FuseProperties["fs.oss.bucket.bucket-b.credentials.provider"]; got != jindoOSSCredentialsProvider { + t.Fatalf("expected bucket-b fuse credentials provider %q, got %q", jindoOSSCredentialsProvider, got) + } +} + +func TestJindoFSxEngine_transformMasterDedupesSameBucketSecretProjection(t *testing.T) { + s := runtime.NewScheme() + s.AddKnownTypes(datav1alpha1.GroupVersion, &datav1alpha1.JindoRuntime{}, &datav1alpha1.Dataset{}) + _ = corev1.AddToScheme(s) + + engine := JindoFSxEngine{ + name: "test", + namespace: "fluid", + Client: fake.NewFakeClientWithScheme(s), + Log: fake.NullLogger(), + runtime: &datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{ + Fuse: datav1alpha1.JindoFuseSpec{}, + }, + }, + } + + dataset := &datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{ + Mounts: []datav1alpha1.Mount{ + { + MountPoint: "oss://bucket-a/data-1", + Name: "mount-a", + Options: map[string]string{ + "fs.oss.endpoint": "oss-cn-shanghai.aliyuncs.com", + }, + EncryptOptions: []datav1alpha1.EncryptOption{ + { + Name: "fs.oss.accessKeyId", + ValueFrom: datav1alpha1.EncryptOptionSource{ + SecretKeyRef: datav1alpha1.SecretKeySelector{Name: "secret-a", Key: "fs.oss.accessKeyId"}, + }, + }, + { + Name: "fs.oss.accessKeySecret", + ValueFrom: datav1alpha1.EncryptOptionSource{ + SecretKeyRef: datav1alpha1.SecretKeySelector{Name: "secret-a", Key: "fs.oss.accessKeySecret"}, + }, + }, + }, + }, + { + MountPoint: "oss://bucket-a/data-2", + Name: "mount-b", + Options: map[string]string{ + "fs.oss.endpoint": "oss-cn-shanghai.aliyuncs.com", + }, + EncryptOptions: []datav1alpha1.EncryptOption{ + { + Name: "fs.oss.accessKeyId", + ValueFrom: datav1alpha1.EncryptOptionSource{ + SecretKeyRef: datav1alpha1.SecretKeySelector{Name: "secret-a", Key: "fs.oss.accessKeyId"}, + }, + }, + { + Name: "fs.oss.accessKeySecret", + ValueFrom: datav1alpha1.EncryptOptionSource{ + SecretKeyRef: datav1alpha1.SecretKeySelector{Name: "secret-a", Key: "fs.oss.accessKeySecret"}, + }, + }, + }, + }, + }, + }, + } + + value := &Jindo{} + if err := engine.transformMaster(engine.runtime, "/test", value, dataset, true); err != nil { + t.Fatalf("transformMaster() error = %v", err) + } + + if len(value.SecretProjections) != 1 { + t.Fatalf("expected same-bucket projections to dedupe to 1 grouped entry, got %d", len(value.SecretProjections)) + } +} + +func TestJindoFSxEngine_transformMasterRejectsConflictingSameBucketSecretProjection(t *testing.T) { + s := runtime.NewScheme() + s.AddKnownTypes(datav1alpha1.GroupVersion, &datav1alpha1.JindoRuntime{}, &datav1alpha1.Dataset{}) + _ = corev1.AddToScheme(s) + + engine := JindoFSxEngine{ + name: "test", + namespace: "fluid", + Client: fake.NewFakeClientWithScheme(s), + Log: fake.NullLogger(), + runtime: &datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{ + Fuse: datav1alpha1.JindoFuseSpec{}, + }, + }, + } + + dataset := &datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{ + Mounts: []datav1alpha1.Mount{ + { + MountPoint: "oss://bucket-a/data-1", + Name: "mount-a", + Options: map[string]string{ + "fs.oss.endpoint": "oss-cn-shanghai.aliyuncs.com", + }, + EncryptOptions: []datav1alpha1.EncryptOption{ + { + Name: "fs.oss.accessKeyId", + ValueFrom: datav1alpha1.EncryptOptionSource{ + SecretKeyRef: datav1alpha1.SecretKeySelector{Name: "secret-a", Key: "fs.oss.accessKeyId"}, + }, + }, + { + Name: "fs.oss.accessKeySecret", + ValueFrom: datav1alpha1.EncryptOptionSource{ + SecretKeyRef: datav1alpha1.SecretKeySelector{Name: "secret-a", Key: "fs.oss.accessKeySecret"}, + }, + }, + }, + }, + { + MountPoint: "oss://bucket-a/data-2", + Name: "mount-b", + Options: map[string]string{ + "fs.oss.endpoint": "oss-cn-shanghai.aliyuncs.com", + }, + EncryptOptions: []datav1alpha1.EncryptOption{ + { + Name: "fs.oss.accessKeyId", + ValueFrom: datav1alpha1.EncryptOptionSource{ + SecretKeyRef: datav1alpha1.SecretKeySelector{Name: "secret-b", Key: "fs.oss.accessKeyId"}, + }, + }, + { + Name: "fs.oss.accessKeySecret", + ValueFrom: datav1alpha1.EncryptOptionSource{ + SecretKeyRef: datav1alpha1.SecretKeySelector{Name: "secret-b", Key: "fs.oss.accessKeySecret"}, + }, + }, + }, + }, + }, + }, + } + + if err := engine.transformMaster(engine.runtime, "/test", &Jindo{}, dataset, true); err == nil { + t.Fatalf("expected transformMaster() to reject conflicting same-bucket secret projections") + } +} + +func TestJindoFSxEngine_transformMasterSupportsInlineOSSCredentialsCompatibility(t *testing.T) { + s := runtime.NewScheme() + s.AddKnownTypes(datav1alpha1.GroupVersion, &datav1alpha1.JindoRuntime{}, &datav1alpha1.Dataset{}) + _ = corev1.AddToScheme(s) + + engine := JindoFSxEngine{ + name: "test", + namespace: "fluid", + Client: fake.NewFakeClientWithScheme(s), + Log: fake.NullLogger(), + runtime: &datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{ + Fuse: datav1alpha1.JindoFuseSpec{}, + }, + }, + } + + dataset := &datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{ + Mounts: []datav1alpha1.Mount{{ + MountPoint: "oss://bucket-a/data", + Name: "mount-a", + Options: map[string]string{ + "fs.oss.endpoint": "oss-cn-shanghai.aliyuncs.com", + "fs.oss.accessKeyId": "inline-ak", + "fs.oss.accessKeySecret": "inline-sk", + }, + }}, + }, + } + + value := &Jindo{} + if err := engine.transformMaster(engine.runtime, "/test", value, dataset, true); err != nil { + t.Fatalf("transformMaster() error = %v", err) + } + + if got := value.Master.FileStoreProperties["jindofsx.oss.bucket.bucket-a.endpoint"]; got != "oss-cn-shanghai.aliyuncs.com" { + t.Fatalf("expected bucket-a endpoint to be preserved, got %q", got) + } + if got := value.Master.FileStoreProperties["jindofsx.oss.bucket.bucket-a.accessKeyId"]; got != "inline-ak" { + t.Fatalf("expected inline bucket-a accessKeyId to be preserved, got %q", got) + } + if got := value.Master.FileStoreProperties["jindofsx.oss.bucket.bucket-a.accessKeySecret"]; got != "inline-sk" { + t.Fatalf("expected inline bucket-a accessKeySecret to be preserved, got %q", got) + } + if len(value.SecretProjections) != 0 { + t.Fatalf("expected no secret projections for inline credentials, got %d", len(value.SecretProjections)) + } +} + +func TestJindoFSxEngine_transformMasterIgnoresNonOSSCredentialEncryptOptionForBucketSecretProjection(t *testing.T) { + s := runtime.NewScheme() + s.AddKnownTypes(datav1alpha1.GroupVersion, &datav1alpha1.JindoRuntime{}, &datav1alpha1.Dataset{}) + _ = corev1.AddToScheme(s) + + engine := JindoFSxEngine{ + name: "test", + namespace: "fluid", + Client: fake.NewFakeClientWithScheme(s), + Log: fake.NullLogger(), + runtime: &datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{ + Fuse: datav1alpha1.JindoFuseSpec{}, + }, + }, + } + + dataset := &datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{ + Mounts: []datav1alpha1.Mount{{ + MountPoint: "oss://bucket-a/data", + Name: "mount-a", + Options: map[string]string{ + "fs.oss.endpoint": "oss-cn-shanghai.aliyuncs.com", + }, + EncryptOptions: []datav1alpha1.EncryptOption{{ + Name: "fs.oss.sessionToken", + ValueFrom: datav1alpha1.EncryptOptionSource{ + SecretKeyRef: datav1alpha1.SecretKeySelector{Name: "secret-a", Key: "token"}, + }, + }}, + }}, + }, + } + + value := &Jindo{} + if err := engine.transformMaster(engine.runtime, "/test", value, dataset, true); err != nil { + t.Fatalf("transformMaster() error = %v", err) + } + + if len(value.SecretProjections) != 0 { + t.Fatalf("expected no secret projections for non-AK/SK encryptOptions, got %d", len(value.SecretProjections)) + } + if len(value.BucketSecretPaths) != 0 { + t.Fatalf("expected no bucket secret paths for non-AK/SK encryptOptions, got %#v", value.BucketSecretPaths) + } +} + +func TestJindoFSxEngine_transformMasterUsesReferencedSecretKeysForNonOSSMounts(t *testing.T) { + s := runtime.NewScheme() + s.AddKnownTypes(datav1alpha1.GroupVersion, &datav1alpha1.JindoRuntime{}, &datav1alpha1.Dataset{}) + _ = corev1.AddToScheme(s) + + engine := JindoFSxEngine{ + name: "test", + namespace: "fluid", + Client: fake.NewFakeClientWithScheme(s), + Log: fake.NullLogger(), + runtime: &datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{ + Fuse: datav1alpha1.JindoFuseSpec{}, + }, + }, + } + + dataset := &datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{ + Mounts: []datav1alpha1.Mount{{ + MountPoint: "s3://bucket-a/data", + Name: "mount-a", + Options: map[string]string{ + "fs.s3.endpoint": "s3.us-west-1.amazonaws.com", + }, + EncryptOptions: []datav1alpha1.EncryptOption{ + { + Name: "fs.s3.accessKeyId", + ValueFrom: datav1alpha1.EncryptOptionSource{ + SecretKeyRef: datav1alpha1.SecretKeySelector{Name: "secret-a", Key: "custom-ak"}, + }, + }, + { + Name: "fs.s3.accessKeySecret", + ValueFrom: datav1alpha1.EncryptOptionSource{ + SecretKeyRef: datav1alpha1.SecretKeySelector{Name: "secret-a", Key: "custom-sk"}, + }, + }, + }, + }}, + }, + } + + value := &Jindo{} + if err := engine.transformMaster(engine.runtime, "/test", value, dataset, true); err != nil { + t.Fatalf("transformMaster() error = %v", err) + } + + if value.Secret != "secret-a" { + t.Fatalf("expected secret name %q, got %q", "secret-a", value.Secret) + } + if value.SecretKey != "custom-ak" { + t.Fatalf("expected SecretKey to use referenced key %q, got %q", "custom-ak", value.SecretKey) + } + if value.SecretValue != "custom-sk" { + t.Fatalf("expected SecretValue to use referenced key %q, got %q", "custom-sk", value.SecretValue) + } +} + +func TestJindoFSxEngine_transformMasterReturnsErrorWhenReferencedSecretMissing(t *testing.T) { + s := runtime.NewScheme() + s.AddKnownTypes(datav1alpha1.GroupVersion, &datav1alpha1.JindoRuntime{}, &datav1alpha1.Dataset{}) + _ = corev1.AddToScheme(s) + + engine := JindoFSxEngine{ + name: "test", + namespace: "fluid", + Client: fake.NewFakeClientWithScheme(s), + Log: fake.NullLogger(), + runtime: &datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{ + Fuse: datav1alpha1.JindoFuseSpec{}, + }, + }, + } + + dataset := &datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{ + Mounts: []datav1alpha1.Mount{{ + MountPoint: "oss://bucket-a/data", + Name: "mount-a", + Options: map[string]string{ + "fs.oss.endpoint": "oss-cn-shanghai.aliyuncs.com", + }, + EncryptOptions: []datav1alpha1.EncryptOption{{ + Name: "fs.oss.accessKeyId", + ValueFrom: datav1alpha1.EncryptOptionSource{ + SecretKeyRef: datav1alpha1.SecretKeySelector{Name: "missing-secret", Key: "fs.oss.accessKeyId"}, + }, + }}, + }}, + }, + } + + if err := engine.transformMaster(engine.runtime, "/test", &Jindo{}, dataset, false); err == nil { + t.Fatalf("expected transformMaster() to fail when the referenced secret is missing") + } +} + +func TestJindoFSxEngine_transformMasterRejectsEmptyReferencedSecretKey(t *testing.T) { + s := runtime.NewScheme() + s.AddKnownTypes(datav1alpha1.GroupVersion, &datav1alpha1.JindoRuntime{}, &datav1alpha1.Dataset{}) + _ = corev1.AddToScheme(s) + + engine := JindoFSxEngine{ + name: "test", + namespace: "fluid", + Client: fake.NewFakeClientWithScheme(s), + Log: fake.NullLogger(), + runtime: &datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{ + Fuse: datav1alpha1.JindoFuseSpec{}, + }, + }, + } + + dataset := &datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{ + Mounts: []datav1alpha1.Mount{{ + MountPoint: "oss://bucket-a/data", + Name: "mount-a", + Options: map[string]string{ + "fs.oss.endpoint": "oss-cn-shanghai.aliyuncs.com", + }, + EncryptOptions: []datav1alpha1.EncryptOption{ + { + Name: "fs.oss.accessKeyId", + ValueFrom: datav1alpha1.EncryptOptionSource{ + SecretKeyRef: datav1alpha1.SecretKeySelector{Name: "secret-a"}, + }, + }, + { + Name: "fs.oss.accessKeySecret", + ValueFrom: datav1alpha1.EncryptOptionSource{ + SecretKeyRef: datav1alpha1.SecretKeySelector{Name: "secret-a", Key: "fs.oss.accessKeySecret"}, + }, + }, + }, + }}, + }, + } + + if err := engine.transformMaster(engine.runtime, "/test", &Jindo{}, dataset, true); err == nil { + t.Fatalf("expected transformMaster() to fail when the referenced secret key is empty") + } +} + +func TestJindoFSxEngine_transformMasterReturnsErrorWhenReferencedSecretDataKeyMissing(t *testing.T) { + s := runtime.NewScheme() + s.AddKnownTypes(datav1alpha1.GroupVersion, &datav1alpha1.JindoRuntime{}, &datav1alpha1.Dataset{}) + _ = corev1.AddToScheme(s) + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret-a", + Namespace: "fluid", + }, + Data: map[string][]byte{ + "unexpected-key": []byte("ak-a"), + }, + } + + engine := JindoFSxEngine{ + name: "test", + namespace: "fluid", + Client: fake.NewFakeClientWithScheme(s, secret.DeepCopy()), + Log: fake.NullLogger(), + runtime: &datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{ + Fuse: datav1alpha1.JindoFuseSpec{}, + }, + }, + } + + dataset := &datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{ + Mounts: []datav1alpha1.Mount{{ + MountPoint: "oss://bucket-a/data", + Name: "mount-a", + Options: map[string]string{ + "fs.oss.endpoint": "oss-cn-shanghai.aliyuncs.com", + }, + EncryptOptions: []datav1alpha1.EncryptOption{{ + Name: "fs.oss.accessKeyId", + ValueFrom: datav1alpha1.EncryptOptionSource{ + SecretKeyRef: datav1alpha1.SecretKeySelector{Name: "secret-a", Key: "fs.oss.accessKeyId"}, + }, + }}, + }}, + }, + } + + if err := engine.transformMaster(engine.runtime, "/test", &Jindo{}, dataset, false); err == nil { + t.Fatalf("expected transformMaster() to fail when the referenced secret key is missing from secret data") + } +} + +func TestJindoFSxEngine_transformMasterAcceptsBucketRootMountPoint(t *testing.T) { + s := runtime.NewScheme() + s.AddKnownTypes(datav1alpha1.GroupVersion, &datav1alpha1.JindoRuntime{}, &datav1alpha1.Dataset{}) + _ = corev1.AddToScheme(s) + + engine := JindoFSxEngine{ + name: "test", + namespace: "fluid", + Client: fake.NewFakeClientWithScheme(s), + Log: fake.NullLogger(), + runtime: &datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{ + Fuse: datav1alpha1.JindoFuseSpec{}, + }, + }, + } + + dataset := &datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{ + Mounts: []datav1alpha1.Mount{{ + MountPoint: "oss://bucket-root", + Name: "mount-a", + Options: map[string]string{ + "fs.oss.endpoint": "oss-cn-shanghai.aliyuncs.com", + }, + }}, + }, + } + + value := &Jindo{} + if err := engine.transformMaster(engine.runtime, "/test", value, dataset, true); err != nil { + t.Fatalf("transformMaster() error = %v", err) + } + + if got := value.Master.FileStoreProperties["jindofsx.oss.bucket.bucket-root.endpoint"]; got != "oss-cn-shanghai.aliyuncs.com" { + t.Fatalf("expected bucket-root endpoint to be preserved, got %q", got) + } +} + +func TestJindoFSxEngine_transformMasterRejectsMixedOSSInlineAndSecretProjection(t *testing.T) { + s := runtime.NewScheme() + s.AddKnownTypes(datav1alpha1.GroupVersion, &datav1alpha1.JindoRuntime{}, &datav1alpha1.Dataset{}) + _ = corev1.AddToScheme(s) + + engine := JindoFSxEngine{ + name: "test", + namespace: "fluid", + Client: fake.NewFakeClientWithScheme(s), + Log: fake.NullLogger(), + runtime: &datav1alpha1.JindoRuntime{ + Spec: datav1alpha1.JindoRuntimeSpec{ + Fuse: datav1alpha1.JindoFuseSpec{}, + }, + }, + } + + dataset := &datav1alpha1.Dataset{ + Spec: datav1alpha1.DatasetSpec{ + Mounts: []datav1alpha1.Mount{{ + MountPoint: "oss://bucket-a/data", + Name: "mount-a", + Options: map[string]string{ + "fs.oss.endpoint": "oss-cn-shanghai.aliyuncs.com", + "fs.oss.accessKeySecret": "inline-sk", + }, + EncryptOptions: []datav1alpha1.EncryptOption{{ + Name: "fs.oss.accessKeyId", + ValueFrom: datav1alpha1.EncryptOptionSource{ + SecretKeyRef: datav1alpha1.SecretKeySelector{Name: "secret-a", Key: "custom-ak"}, + }, + }}, + }}, + }, + } + + if err := engine.transformMaster(engine.runtime, "/test", &Jindo{}, dataset, true); err == nil { + t.Fatalf("expected transformMaster() to reject mixed inline and secret-projected OSS credentials") + } +} diff --git a/pkg/ddc/jindofsx/transform_test.go b/pkg/ddc/jindofsx/transform_test.go index 694e5a11128..4acc1995a12 100644 --- a/pkg/ddc/jindofsx/transform_test.go +++ b/pkg/ddc/jindofsx/transform_test.go @@ -941,6 +941,38 @@ func TestTransformLogConfig(t *testing.T) { } } +func TestTransformLogConfigWithBucketSecretPaths(t *testing.T) { + engine := &JindoFSxEngine{Log: fake.NullLogger()} + value := &Jindo{ + BucketSecretPaths: map[string]string{ + "bucket-a": "secrets:///token/bucket-a/", + }, + Master: Master{ + FileStoreProperties: map[string]string{ + "jindofsx.oss.bucket.bucket-a.endpoint": "oss-cn-shanghai.aliyuncs.com", + }, + }, + } + + engine.transformLogConfig(&datav1alpha1.JindoRuntime{}, value) + + if got := value.FuseLogConfig["fs.oss.provider.endpoint"]; got != "secrets:///token/" { + t.Fatalf("expected generic oss provider endpoint, got %q", got) + } + if got := value.FuseLogConfig["fs.oss.provider.format"]; got != jindoSecretProviderFormat { + t.Fatalf("expected generic oss provider format %q, got %q", jindoSecretProviderFormat, got) + } + if got := value.FuseLogConfig["fs.oss.endpoint"]; got != "oss-cn-shanghai.aliyuncs.com" { + t.Fatalf("expected generic fuse log endpoint, got %q", got) + } + if got := value.FuseLogConfig["fs.oss.bucket.bucket-a.provider.endpoint"]; got != "secrets:///token/bucket-a/" { + t.Fatalf("expected bucket-a fuse log provider endpoint, got %q", got) + } + if got := value.FuseLogConfig["aliyun.oss.bucket.bucket-a.provider.url"]; got != "secrets:///token/bucket-a/" { + t.Fatalf("expected bucket-a fuse log provider url, got %q", got) + } +} + func TestJindoFSxEngine_transformEnvVariables(t *testing.T) { type args struct { runtime *datav1alpha1.JindoRuntime diff --git a/pkg/ddc/jindofsx/types.go b/pkg/ddc/jindofsx/types.go index 0e8650b2eb6..dc41b642032 100644 --- a/pkg/ddc/jindofsx/types.go +++ b/pkg/ddc/jindofsx/types.go @@ -51,6 +51,8 @@ type Jindo struct { UFSVolumes []UFSVolume `json:"ufsVolumes,omitempty"` SecretKey string `json:"secretKey,omitempty"` SecretValue string `json:"secretValue,omitempty"` + SecretProjections []corev1.SecretProjection `json:"secretProjections,omitempty"` + BucketSecretPaths map[string]string `json:"bucketSecretPaths,omitempty"` UseStsToken bool `json:"UseStsToken"` MountType string `json:"mountType,omitempty"` ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"` diff --git a/test/gha-e2e/jindo/job.yaml b/test/gha-e2e/jindo/job.yaml index f65d6ea30ce..56732b9f7ae 100644 --- a/test/gha-e2e/jindo/job.yaml +++ b/test/gha-e2e/jindo/job.yaml @@ -9,14 +9,25 @@ spec: restartPolicy: Never containers: - name: busybox - image: busybox + image: registry-cn-hongkong.ack.aliyuncs.com/acs/smartdata:6.9.1-202509151826 + imagePullPolicy: IfNotPresent resources: limits: ephemeral-storage: "5Gi" command: ["/bin/sh"] args: - -c - - set -ex; test -n "$(ls /data)" && test "$(cat /data/testfile)" = "helloworld" + - | + set -eu + for i in $(seq 1 24); do + if test -n "$(ls /data 2>/dev/null)" && test "$(cat /data/testfile 2>/dev/null)" = "helloworld"; then + exit 0 + fi + sleep 5 + done + ls -al /data >&2 || true + cat /data/testfile >&2 || true + exit 1 volumeMounts: - mountPath: /data name: fluid-vol diff --git a/test/gha-e2e/jindo/minio.yaml b/test/gha-e2e/jindo/minio.yaml index 0dc594505c6..ee20ed2ccc0 100644 --- a/test/gha-e2e/jindo/minio.yaml +++ b/test/gha-e2e/jindo/minio.yaml @@ -2,6 +2,9 @@ apiVersion: v1 kind: Service metadata: name: minio + labels: + app: minio + app.kubernetes.io/part-of: jindo-e2e-minio spec: type: ClusterIP clusterIP: None @@ -11,17 +14,22 @@ spec: protocol: TCP selector: app: minio + app.kubernetes.io/part-of: jindo-e2e-minio --- apiVersion: apps/v1 kind: Deployment metadata: # This name uniquely identifies the Deployment name: minio + labels: + app: minio + app.kubernetes.io/part-of: jindo-e2e-minio spec: replicas: 1 selector: matchLabels: app: minio + app.kubernetes.io/part-of: jindo-e2e-minio strategy: type: Recreate template: @@ -29,13 +37,15 @@ spec: labels: # Label is used as selector in the service. app: minio + app.kubernetes.io/part-of: jindo-e2e-minio spec: hostname: mybucket subdomain: minio containers: - name: minio # Pulls the default Minio image from Docker Hub - image: minio/minio + image: local/minio-e2e:latest + imagePullPolicy: IfNotPresent resources: limits: memory: "512Mi" @@ -57,5 +67,138 @@ spec: value: "minioadmin" ports: - containerPort: 9000 - hostPort: 9000 + automountServiceAccountToken: false +--- +apiVersion: v1 +kind: Service +metadata: + name: minio-a + labels: + app: minio-a + app.kubernetes.io/part-of: jindo-e2e-minio +spec: + type: ClusterIP + clusterIP: None + ports: + - port: 9000 + targetPort: 9000 + protocol: TCP + selector: + app: minio-a + app.kubernetes.io/part-of: jindo-e2e-minio +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: minio-a + labels: + app: minio-a + app.kubernetes.io/part-of: jindo-e2e-minio +spec: + replicas: 1 + selector: + matchLabels: + app: minio-a + app.kubernetes.io/part-of: jindo-e2e-minio + strategy: + type: Recreate + template: + metadata: + labels: + app: minio-a + app.kubernetes.io/part-of: jindo-e2e-minio + spec: + hostname: bucketa + subdomain: minio-a + containers: + - name: minio + image: local/minio-e2e:latest + imagePullPolicy: IfNotPresent + resources: + limits: + memory: "512Mi" + args: + - server + - /data + startupProbe: + tcpSocket: + port: 9000 + initialDelaySeconds: 1 + periodSeconds: 3 + env: + - name: MINIO_DOMAIN + value: minio-a.default.svc.cluster.local + - name: MINIO_ROOT_USER + value: "bucketaadmin" + - name: MINIO_ROOT_PASSWORD + value: "bucketasecret" + ports: + - containerPort: 9000 + automountServiceAccountToken: false +--- +apiVersion: v1 +kind: Service +metadata: + name: minio-b + labels: + app: minio-b + app.kubernetes.io/part-of: jindo-e2e-minio +spec: + type: ClusterIP + clusterIP: None + ports: + - port: 9000 + targetPort: 9000 + protocol: TCP + selector: + app: minio-b + app.kubernetes.io/part-of: jindo-e2e-minio +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: minio-b + labels: + app: minio-b + app.kubernetes.io/part-of: jindo-e2e-minio +spec: + replicas: 1 + selector: + matchLabels: + app: minio-b + app.kubernetes.io/part-of: jindo-e2e-minio + strategy: + type: Recreate + template: + metadata: + labels: + app: minio-b + app.kubernetes.io/part-of: jindo-e2e-minio + spec: + hostname: bucketb + subdomain: minio-b + containers: + - name: minio + image: local/minio-e2e:latest + imagePullPolicy: IfNotPresent + resources: + limits: + memory: "512Mi" + args: + - server + - /data + startupProbe: + tcpSocket: + port: 9000 + initialDelaySeconds: 1 + periodSeconds: 3 + env: + - name: MINIO_DOMAIN + value: minio-b.default.svc.cluster.local + - name: MINIO_ROOT_USER + value: "bucketbadmin" + - name: MINIO_ROOT_PASSWORD + value: "bucketbsecret" + ports: + - containerPort: 9000 automountServiceAccountToken: false diff --git a/test/gha-e2e/jindo/multi-oss-dataset.yaml b/test/gha-e2e/jindo/multi-oss-dataset.yaml new file mode 100644 index 00000000000..e74f8c3194c --- /dev/null +++ b/test/gha-e2e/jindo/multi-oss-dataset.yaml @@ -0,0 +1,88 @@ +apiVersion: v1 +kind: Secret +metadata: + name: access-key-a + namespace: default +type: Opaque +stringData: + fs.oss.accessKeyId: __ACCESS_KEY_ID_A__ + fs.oss.accessKeySecret: __ACCESS_KEY_SECRET_A__ +--- +apiVersion: v1 +kind: Secret +metadata: + name: access-key-b + namespace: default +type: Opaque +stringData: + fs.oss.accessKeyId: __ACCESS_KEY_ID_B__ + fs.oss.accessKeySecret: __ACCESS_KEY_SECRET_B__ +--- +apiVersion: data.fluid.io/v1alpha1 +kind: Dataset +metadata: + name: jindo-multi-oss-demo +spec: + sharedOptions: + fs.oss.endpoint: "__ENDPOINT_A__" + mounts: + - mountPoint: oss://__BUCKET_A__/ + options: + fs.oss.endpoint: "__ENDPOINT_A__" + encryptOptions: + - name: fs.oss.accessKeyId + valueFrom: + secretKeyRef: + name: access-key-a + key: fs.oss.accessKeyId + - name: fs.oss.accessKeySecret + valueFrom: + secretKeyRef: + name: access-key-a + key: fs.oss.accessKeySecret + name: bucketa + path: /bucketa + - mountPoint: oss://__BUCKET_B__/ + options: + fs.oss.endpoint: "__ENDPOINT_B__" + encryptOptions: + - name: fs.oss.accessKeyId + valueFrom: + secretKeyRef: + name: access-key-b + key: fs.oss.accessKeyId + - name: fs.oss.accessKeySecret + valueFrom: + secretKeyRef: + name: access-key-b + key: fs.oss.accessKeySecret + name: bucketb + path: /bucketb + accessModes: + - ReadWriteMany +--- +apiVersion: data.fluid.io/v1alpha1 +kind: JindoRuntime +metadata: + name: jindo-multi-oss-demo + annotations: + data.fluid.io/metadataList: | + [{"Labels":{"fluid.io/node-publish-method":"symlink"},"selector":{"kind":"PersistentVolume"}}] +spec: + jindoVersion: + imagePullPolicy: IfNotPresent + fuse: + args: + - -oauto_cache + - -oattr_timeout=0 + - -oentry_timeout=0 + - -onegative_timeout=0 + imagePullPolicy: IfNotPresent + replicas: 1 + tieredstore: + levels: + - mediumtype: SSD + path: /var/lib/docker/jindo + quota: 1Gi + high: "0.99" + low: "0.95" diff --git a/test/gha-e2e/jindo/multi-oss-job.yaml b/test/gha-e2e/jindo/multi-oss-job.yaml new file mode 100644 index 00000000000..cd3f85cd9e9 --- /dev/null +++ b/test/gha-e2e/jindo/multi-oss-job.yaml @@ -0,0 +1,50 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: fluid-multi-oss-test +spec: + backoffLimit: 1 + template: + spec: + restartPolicy: Never + containers: + - name: busybox + image: registry-cn-hongkong.ack.aliyuncs.com/acs/smartdata:6.9.1-202509151826 + imagePullPolicy: IfNotPresent + resources: + requests: + cpu: "100m" + memory: "128Mi" + ephemeral-storage: "1Gi" + limits: + cpu: "500m" + memory: "256Mi" + ephemeral-storage: "5Gi" + command: ["/bin/sh"] + args: + - -c + - | + set -eu + for i in $(seq 1 48); do + value_a="$(cat /data/bucketa/__OBJECT_KEY_A__ 2>/dev/null || true)" + value_b="$(cat /data/bucketb/__OBJECT_KEY_B__ 2>/dev/null || true)" + if test "$value_a" = "__EXPECTED_DATA_A__" && test "$value_b" = "__EXPECTED_DATA_B__"; then + echo "verified both OSS mount paths from application pod" + exit 0 + fi + sleep 5 + done + ls -al /data >&2 || true + ls -al /data/bucketa >&2 || true + ls -al /data/bucketb >&2 || true + echo "bucket-a: ${value_a:-}" >&2 + echo "bucket-b: ${value_b:-}" >&2 + exit 1 + volumeMounts: + - mountPath: /data + name: fluid-vol + automountServiceAccountToken: false + volumes: + - name: fluid-vol + persistentVolumeClaim: + claimName: jindo-multi-oss-demo diff --git a/test/gha-e2e/jindo/oss-emulator.yaml b/test/gha-e2e/jindo/oss-emulator.yaml new file mode 100644 index 00000000000..d3b7d758c8f --- /dev/null +++ b/test/gha-e2e/jindo/oss-emulator.yaml @@ -0,0 +1,107 @@ +apiVersion: v1 +kind: Service +metadata: + name: oss + labels: + app.kubernetes.io/part-of: jindo-e2e-oss +spec: + clusterIP: None + ports: + - port: 9000 + targetPort: 9000 + selector: + app.kubernetes.io/part-of: jindo-e2e-oss +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: oss-a + labels: + app: oss-a + app.kubernetes.io/part-of: jindo-e2e-oss +spec: + replicas: 1 + selector: + matchLabels: + app: oss-a + app.kubernetes.io/part-of: jindo-e2e-oss + template: + metadata: + labels: + app: oss-a + app.kubernetes.io/part-of: jindo-e2e-oss + spec: + hostname: bucketa + subdomain: oss + containers: + - name: emulator + image: fluidcloudnative/oss-emulator:e2e + imagePullPolicy: IfNotPresent + resources: + requests: + cpu: "50m" + memory: "64Mi" + limits: + cpu: "250m" + memory: "128Mi" + env: + - name: BUCKET_NAME + value: bucketa + - name: ACCESS_KEY_ID + value: bucketaadmin + - name: OBJECT_KEY + value: testfile + - name: OBJECT_ALIAS + value: bucketatestfile + - name: OBJECT_VALUE + value: bucket-a-data + ports: + - containerPort: 9000 + automountServiceAccountToken: false +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: oss-b + labels: + app: oss-b + app.kubernetes.io/part-of: jindo-e2e-oss +spec: + replicas: 1 + selector: + matchLabels: + app: oss-b + app.kubernetes.io/part-of: jindo-e2e-oss + template: + metadata: + labels: + app: oss-b + app.kubernetes.io/part-of: jindo-e2e-oss + spec: + hostname: bucketb + subdomain: oss + containers: + - name: emulator + image: fluidcloudnative/oss-emulator:e2e + imagePullPolicy: IfNotPresent + resources: + requests: + cpu: "50m" + memory: "64Mi" + limits: + cpu: "250m" + memory: "128Mi" + env: + - name: BUCKET_NAME + value: bucketb + - name: ACCESS_KEY_ID + value: bucketbadmin + - name: OBJECT_KEY + value: testfile + - name: OBJECT_ALIAS + value: bucketbtestfile + - name: OBJECT_VALUE + value: bucket-b-data + ports: + - containerPort: 9000 + automountServiceAccountToken: false diff --git a/test/gha-e2e/jindo/oss-emulator/Dockerfile b/test/gha-e2e/jindo/oss-emulator/Dockerfile new file mode 100644 index 00000000000..70c9a915a28 --- /dev/null +++ b/test/gha-e2e/jindo/oss-emulator/Dockerfile @@ -0,0 +1,9 @@ +FROM golang:1.24.12-bookworm AS builder +WORKDIR /src +COPY main.go . +RUN CGO_ENABLED=0 go build -o /oss-emulator main.go + +FROM scratch +COPY --from=builder /oss-emulator /oss-emulator +EXPOSE 9000 +ENTRYPOINT ["/oss-emulator"] diff --git a/test/gha-e2e/jindo/oss-emulator/main.go b/test/gha-e2e/jindo/oss-emulator/main.go new file mode 100644 index 00000000000..b47c4370f63 --- /dev/null +++ b/test/gha-e2e/jindo/oss-emulator/main.go @@ -0,0 +1,251 @@ +package main + +import ( + "encoding/xml" + "fmt" + "log" + "net/http" + "os" + "strings" + "time" +) + +type listBucketResult struct { + XMLName xml.Name `xml:"ListBucketResult"` + Name string `xml:"Name"` + Prefix string `xml:"Prefix,omitempty"` + Delimiter string `xml:"Delimiter,omitempty"` + Marker string `xml:"Marker,omitempty"` + MaxKeys int `xml:"MaxKeys"` + KeyCount int `xml:"KeyCount,omitempty"` + IsTrunc bool `xml:"IsTruncated"` + Contents []objectEntry `xml:"Contents"` +} + +type objectEntry struct { + Key string `xml:"Key"` + LastModified string `xml:"LastModified"` + ETag string `xml:"ETag"` + Type string `xml:"Type"` + Size int `xml:"Size"` + StorageClass string `xml:"StorageClass"` +} + +type errorResponse struct { + XMLName xml.Name `xml:"Error"` + Code string `xml:"Code"` + Message string `xml:"Message"` + RequestID string `xml:"RequestId,omitempty"` + HostID string `xml:"HostId,omitempty"` +} + +const emulatorLastModified = "Tue, 20 Apr 2026 00:00:00 GMT" + +func main() { + bucketName := getenv("BUCKET_NAME", "bucket-a") + objectKey := getenv("OBJECT_KEY", "testfile") + objectAlias := getenv("OBJECT_ALIAS", "") + objectValue := getenv("OBJECT_VALUE", "bucket-a-data") + accessKeyID := getenv("ACCESS_KEY_ID", "") + addr := getenv("LISTEN_ADDR", ":9000") + + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + log.Printf("request method=%s host=%s path=%s rawQuery=%s", r.Method, r.Host, r.URL.Path, r.URL.RawQuery) + + if !authorizeRequest(w, r, accessKeyID) { + return + } + + if r.URL.Query().Get("location") != "" { + w.Header().Set("Content-Type", "application/xml") + fmt.Fprintf(w, "oss-cn-hangzhou") + return + } + + if r.Method == http.MethodHead || r.Method == http.MethodGet { + path := strings.TrimPrefix(r.URL.Path, "/") + if path == "" { + if r.Method == http.MethodHead { + w.WriteHeader(http.StatusOK) + return + } + + prefix := strings.TrimPrefix(r.URL.Query().Get("prefix"), "/") + if prefix != "" && !strings.HasSuffix(prefix, "/") { + prefix += "/" + } + + var contents []objectEntry + if strings.HasPrefix(objectKey, prefix) || prefix == "" { + contents = append(contents, objectEntry{ + Key: objectKey, + LastModified: "2026-04-20T00:00:00.000Z", + ETag: "\"dummy-etag\"", + Type: "Normal", + Size: len(objectValue), + StorageClass: "Standard", + }) + } + if objectAlias != "" && objectAlias != objectKey && (strings.HasPrefix(objectAlias, prefix) || prefix == "") { + contents = append(contents, objectEntry{ + Key: objectAlias, + LastModified: "2026-04-20T00:00:00.000Z", + ETag: "\"dummy-etag\"", + Type: "Normal", + Size: len(objectValue), + StorageClass: "Standard", + }) + } + + w.Header().Set("Content-Type", "application/xml") + if err := xml.NewEncoder(w).Encode(listBucketResult{ + Name: bucketName, + Prefix: r.URL.Query().Get("prefix"), + Delimiter: r.URL.Query().Get("delimiter"), + MaxKeys: 1000, + KeyCount: len(contents), + IsTrunc: false, + Contents: contents, + }); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } + return + } + + if path == objectKey || path == objectAlias { + writeObjectHeaders(w.Header(), len(objectValue), "Normal") + if r.Method == http.MethodGet { + _, _ = w.Write([]byte(objectValue)) + } + return + } + + trimmedObjectDir := strings.TrimSuffix(objectKey, "/") + objectDir := trimmedObjectDir + if idx := strings.LastIndex(trimmedObjectDir, "/"); idx >= 0 { + objectDir = trimmedObjectDir[:idx+1] + } + if path == objectDir || path == strings.TrimSuffix(objectDir, "/") { + writeObjectHeaders(w.Header(), 0, "Directory") + if r.Method == http.MethodHead { + w.WriteHeader(http.StatusOK) + return + } + w.Header().Set("Content-Type", "application/xml") + if err := xml.NewEncoder(w).Encode(listBucketResult{ + Name: bucketName, + Prefix: objectDir, + Delimiter: r.URL.Query().Get("delimiter"), + MaxKeys: 1000, + KeyCount: 1, + IsTrunc: false, + Contents: []objectEntry{{ + Key: objectKey, + LastModified: "2026-04-20T00:00:00.000Z", + ETag: "\"dummy-etag\"", + Type: "Normal", + Size: len(objectValue), + StorageClass: "Standard", + }}, + }); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } + return + } + + http.NotFound(w, r) + return + } + + http.Error(w, "unsupported method", http.StatusMethodNotAllowed) + }) + + log.Printf("starting local oss emulator for bucket=%s key=%s on %s", bucketName, objectKey, addr) + log.Fatal(http.ListenAndServe(addr, handler)) +} + +func getenv(key, fallback string) string { + if value := os.Getenv(key); value != "" { + return value + } + + return fallback +} + +func writeObjectHeaders(header http.Header, contentLength int, objectType string) { + header.Set("Content-Type", "application/octet-stream") + header.Set("Content-Length", fmt.Sprintf("%d", contentLength)) + header.Set("Content-MD5", "1B2M2Y8AsgTpgAmY7PhCfg==") + header.Set("Last-Modified", emulatorLastModified) + header.Set("Date", time.Now().UTC().Format(http.TimeFormat)) + header.Set("Server", "AliyunOSS") + header.Set("Accept-Ranges", "bytes") + header.Set("ETag", "\"dummy-etag\"") + header.Set("X-Oss-Request-Id", "oss-emulator") + header.Set("X-Oss-Version-Id", "null") + header.Set("X-Oss-Last-Access-Time", emulatorLastModified) + header.Set("X-Oss-Object-Type", objectType) + header.Set("X-Oss-Storage-Class", "Standard") + header.Set("X-Oss-Hash-Crc64ecma", "123456789") +} + +func authorizeRequest(w http.ResponseWriter, r *http.Request, expectedAccessKeyID string) bool { + if expectedAccessKeyID == "" { + return true + } + + accessKeyID := extractAccessKeyID(r) + if accessKeyID == expectedAccessKeyID { + return true + } + + log.Printf( + "reject unauthorized request: invalid access key id method=%s host=%s path=%s key_present=%t", + r.Method, + r.Host, + r.URL.Path, + accessKeyID != "", + ) + writeAuthError(w, "InvalidAccessKeyId", "The OSS emulator rejected the provided AccessKeyId.") + return false +} + +func extractAccessKeyID(r *http.Request) string { + if accessKeyID := r.URL.Query().Get("OSSAccessKeyId"); accessKeyID != "" { + return accessKeyID + } + + authHeader := strings.TrimSpace(r.Header.Get("Authorization")) + if strings.HasPrefix(authHeader, "OSS ") { + credential := strings.TrimPrefix(authHeader, "OSS ") + accessKeyID, _, _ := strings.Cut(credential, ":") + return strings.TrimSpace(accessKeyID) + } + + if strings.HasPrefix(authHeader, "OSS4-HMAC-SHA256 ") { + for _, part := range strings.Split(strings.TrimPrefix(authHeader, "OSS4-HMAC-SHA256 "), ",") { + part = strings.TrimSpace(part) + if !strings.HasPrefix(part, "Credential=") { + continue + } + credential := strings.TrimPrefix(part, "Credential=") + accessKeyID, _, _ := strings.Cut(credential, "/") + return strings.TrimSpace(accessKeyID) + } + } + + return "" +} + +func writeAuthError(w http.ResponseWriter, code, message string) { + w.Header().Set("Content-Type", "application/xml") + w.WriteHeader(http.StatusForbidden) + if err := xml.NewEncoder(w).Encode(errorResponse{ + Code: code, + Message: message, + RequestID: "oss-emulator", + HostID: "oss-emulator", + }); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} diff --git a/test/gha-e2e/jindo/test.sh b/test/gha-e2e/jindo/test.sh index faf76d42e04..62119426799 100644 --- a/test/gha-e2e/jindo/test.sh +++ b/test/gha-e2e/jindo/test.sh @@ -1,9 +1,35 @@ #!/bin/bash -testname="jindoruntime basic e2e" +testname="jindoruntime secret e2e" -dataset_name="jindo-demo" -job_name="fluid-test" +s3_dataset_name="jindo-demo" +s3_dataset_file="test/gha-e2e/jindo/dataset.yaml" +s3_job_name="fluid-test" +s3_job_file="test/gha-e2e/jindo/job.yaml" + +multi_oss_dataset_name="jindo-multi-oss-demo" +multi_oss_dataset_template="test/gha-e2e/jindo/multi-oss-dataset.yaml" +multi_oss_job_name="fluid-multi-oss-test" +multi_oss_job_template="test/gha-e2e/jindo/multi-oss-job.yaml" + +multi_oss_bucket_a="${JINDO_E2E_OSS_BUCKET_A:-bucketa}" +multi_oss_bucket_b="${JINDO_E2E_OSS_BUCKET_B:-bucketb}" +multi_oss_endpoint_a="${JINDO_E2E_OSS_ENDPOINT_A:-oss.default.svc.cluster.local:9000}" +multi_oss_endpoint_b="${JINDO_E2E_OSS_ENDPOINT_B:-oss.default.svc.cluster.local:9000}" +multi_oss_access_key_a="${JINDO_E2E_OSS_ACCESS_KEY_ID_A:-bucketaadmin}" +multi_oss_secret_key_a="${JINDO_E2E_OSS_ACCESS_KEY_SECRET_A:-bucketasecret}" +multi_oss_access_key_b="${JINDO_E2E_OSS_ACCESS_KEY_ID_B:-bucketbadmin}" +multi_oss_secret_key_b="${JINDO_E2E_OSS_ACCESS_KEY_SECRET_B:-bucketbsecret}" +multi_oss_object_key_a="${JINDO_E2E_OSS_OBJECT_KEY_A:-testfile}" +multi_oss_object_key_b="${JINDO_E2E_OSS_OBJECT_KEY_B:-testfile}" +multi_oss_expected_data_a="${JINDO_E2E_OSS_EXPECTED_DATA_A:-bucket-a-data}" +multi_oss_expected_data_b="${JINDO_E2E_OSS_EXPECTED_DATA_B:-bucket-b-data}" +multi_oss_seed_endpoint="" +multi_oss_backend="${JINDO_E2E_MULTI_OSS_BACKEND:-emulator}" + +rendered_dir="" +multi_oss_dataset_file="" +multi_oss_job_file="" function syslog() { echo ">>> $1" @@ -15,16 +41,257 @@ function panic() { exit 1 } +function is_real_oss_mode() { + [[ -n "${JINDO_E2E_OSS_ENDPOINT:-}" ]] +} + +function should_skip_s3_scenario() { + [[ "${JINDO_E2E_SKIP_S3_SCENARIO:-}" == "1" ]] +} + +function should_use_minio_multi_oss() { + [[ "$multi_oss_backend" == "minio" ]] +} + +function should_use_emulator_multi_oss() { + [[ "$multi_oss_backend" == "emulator" ]] +} + +function require_env() { + var_name=$1 + if [[ -z "${!var_name:-}" ]]; then + panic "required environment variable $var_name is empty" + fi +} + +function normalize_mount_endpoint() { + endpoint=$1 + endpoint="${endpoint#http://}" + endpoint="${endpoint#https://}" + endpoint="${endpoint%/}" + echo "$endpoint" +} + +function ensure_python_oss2() { + if python3 - <<'PY' >/dev/null 2>&1 +import importlib.util +import sys +sys.exit(0 if importlib.util.find_spec("oss2") else 1) +PY + then + return + fi + + syslog "Installing python module oss2 for real OSS verification" + python3 -m pip install --user oss2 >/dev/null || panic "failed to install python module oss2" +} + +function ensure_python_boto3() { + if python3 - <<'PY' >/dev/null 2>&1 +import importlib.util +import sys +mods = ("boto3", "botocore") +sys.exit(0 if all(importlib.util.find_spec(m) for m in mods) else 1) +PY + then + return + fi + + syslog "Installing python module boto3 for MinIO verification" + python3 -m pip install --user boto3 >/dev/null || panic "failed to install python module boto3" +} + +function seed_real_oss_bucket() { + bucket_name=$1 + access_key=$2 + secret_key=$3 + bucket_object_key=$4 + file_content=$5 + + OSS_BUCKET_NAME="$bucket_name" \ + OSS_ENDPOINT="$multi_oss_seed_endpoint" \ + OSS_ACCESS_KEY="$access_key" \ + OSS_SECRET_KEY="$secret_key" \ + OSS_OBJECT_KEY="$bucket_object_key" \ + OSS_FILE_CONTENT="$file_content" \ + python3 - <<'PY' || exit 1 +import os +import sys + +import oss2 + +bucket = oss2.Bucket( + oss2.Auth(os.environ["OSS_ACCESS_KEY"], os.environ["OSS_SECRET_KEY"]), + os.environ["OSS_ENDPOINT"], + os.environ["OSS_BUCKET_NAME"], +) +bucket.put_object(os.environ["OSS_OBJECT_KEY"], os.environ["OSS_FILE_CONTENT"].encode()) +value = bucket.get_object(os.environ["OSS_OBJECT_KEY"]).read().decode() +if value != os.environ["OSS_FILE_CONTENT"]: + raise SystemExit(f"unexpected object content: {value!r}") +print(f"seeded {os.environ['OSS_BUCKET_NAME']}/{os.environ['OSS_OBJECT_KEY']}") +PY +} + +function setup_real_oss() { + require_env JINDO_E2E_OSS_ENDPOINT + require_env JINDO_E2E_OSS_BUCKET_A + require_env JINDO_E2E_OSS_BUCKET_B + require_env JINDO_E2E_OSS_ACCESS_KEY_ID_A + require_env JINDO_E2E_OSS_ACCESS_KEY_SECRET_A + require_env JINDO_E2E_OSS_ACCESS_KEY_ID_B + require_env JINDO_E2E_OSS_ACCESS_KEY_SECRET_B + + ensure_python_oss2 + + multi_oss_bucket_a="$JINDO_E2E_OSS_BUCKET_A" + multi_oss_bucket_b="$JINDO_E2E_OSS_BUCKET_B" + multi_oss_endpoint_a="$(normalize_mount_endpoint "$JINDO_E2E_OSS_ENDPOINT")" + multi_oss_endpoint_b="$(normalize_mount_endpoint "$JINDO_E2E_OSS_ENDPOINT")" + multi_oss_seed_endpoint="${JINDO_E2E_OSS_SEED_ENDPOINT:-$JINDO_E2E_OSS_ENDPOINT}" + if [[ "$multi_oss_seed_endpoint" != http://* && "$multi_oss_seed_endpoint" != https://* ]]; then + multi_oss_seed_endpoint="https://$multi_oss_seed_endpoint" + fi + multi_oss_access_key_a="$JINDO_E2E_OSS_ACCESS_KEY_ID_A" + multi_oss_secret_key_a="$JINDO_E2E_OSS_ACCESS_KEY_SECRET_A" + multi_oss_access_key_b="$JINDO_E2E_OSS_ACCESS_KEY_ID_B" + multi_oss_secret_key_b="$JINDO_E2E_OSS_ACCESS_KEY_SECRET_B" + + seed_real_oss_bucket "$multi_oss_bucket_a" "$multi_oss_access_key_a" "$multi_oss_secret_key_a" "$multi_oss_object_key_a" "$multi_oss_expected_data_a" || panic "failed to seed real oss bucket $multi_oss_bucket_a" + seed_real_oss_bucket "$multi_oss_bucket_b" "$multi_oss_access_key_b" "$multi_oss_secret_key_b" "$multi_oss_object_key_b" "$multi_oss_expected_data_b" || panic "failed to seed real oss bucket $multi_oss_bucket_b" +} + +function render_multi_oss_files() { + rendered_dir=$(mktemp -d) + multi_oss_dataset_file="$rendered_dir/multi-oss-dataset.yaml" + multi_oss_job_file="$rendered_dir/multi-oss-job.yaml" + endpoint_a="$(normalize_mount_endpoint "$multi_oss_endpoint_a")" + endpoint_b="$(normalize_mount_endpoint "$multi_oss_endpoint_b")" + + sed \ + -e "s|__ACCESS_KEY_ID_A__|$multi_oss_access_key_a|g" \ + -e "s|__ACCESS_KEY_SECRET_A__|$multi_oss_secret_key_a|g" \ + -e "s|__ACCESS_KEY_ID_B__|$multi_oss_access_key_b|g" \ + -e "s|__ACCESS_KEY_SECRET_B__|$multi_oss_secret_key_b|g" \ + -e "s|__BUCKET_A__|$multi_oss_bucket_a|g" \ + -e "s|__BUCKET_B__|$multi_oss_bucket_b|g" \ + -e "s|__ENDPOINT_A__|$endpoint_a|g" \ + -e "s|__ENDPOINT_B__|$endpoint_b|g" \ + "$multi_oss_dataset_template" >"$multi_oss_dataset_file" + + sed \ + -e "s|__EXPECTED_DATA_A__|$multi_oss_expected_data_a|g" \ + -e "s|__EXPECTED_DATA_B__|$multi_oss_expected_data_b|g" \ + -e "s|__OBJECT_KEY_A__|$multi_oss_object_key_a|g" \ + -e "s|__OBJECT_KEY_B__|$multi_oss_object_key_b|g" \ + "$multi_oss_job_template" >"$multi_oss_job_file" +} + +function seed_minio_bucket() { + app_name=$1 + access_key=$2 + secret_key=$3 + bucket_name=$4 + object_key=$5 + file_content=$6 + local_port=$7 + port_forward_log="/tmp/${app_name}-port-forward.log" + + ensure_python_boto3 + + kubectl port-forward service/${app_name} ${local_port}:9000 >${port_forward_log} 2>&1 & + pf_pid=$! + + for i in $(seq 1 20); do + if python3 - </dev/null 2>&1 +import socket +s = socket.socket() +s.settimeout(1) +try: + s.connect(("127.0.0.1", ${local_port})) +finally: + s.close() +PY + then + break + fi + if ! kill -0 ${pf_pid} >/dev/null 2>&1; then + cat ${port_forward_log} >&2 || true + panic "port-forward for ${app_name} exited unexpectedly" + fi + sleep 1 + done + + MINIO_ENDPOINT="http://127.0.0.1:${local_port}" \ + MINIO_ACCESS_KEY="${access_key}" \ + MINIO_SECRET_KEY="${secret_key}" \ + MINIO_BUCKET_NAME="${bucket_name}" \ + MINIO_OBJECT_KEY="${object_key}" \ + MINIO_FILE_CONTENT="${file_content}" \ + python3 - <<'PY' +import os + +import boto3 +from botocore.client import Config +from botocore.exceptions import ClientError + +client = boto3.client( + "s3", + endpoint_url=os.environ["MINIO_ENDPOINT"], + aws_access_key_id=os.environ["MINIO_ACCESS_KEY"], + aws_secret_access_key=os.environ["MINIO_SECRET_KEY"], + region_name="us-east-1", + config=Config(signature_version="s3v4", s3={"addressing_style": "path"}), +) + +bucket = os.environ["MINIO_BUCKET_NAME"] +key = os.environ["MINIO_OBJECT_KEY"] +body = os.environ["MINIO_FILE_CONTENT"].encode() + +try: + client.head_bucket(Bucket=bucket) +except ClientError: + client.create_bucket(Bucket=bucket) + +client.put_object(Bucket=bucket, Key=key, Body=body) +value = client.get_object(Bucket=bucket, Key=key)["Body"].read().decode() +if value != os.environ["MINIO_FILE_CONTENT"]: + raise SystemExit(f"unexpected object content: {value!r}") +PY + rc=$? + + kill ${pf_pid} >/dev/null 2>&1 || true + wait ${pf_pid} 2>/dev/null || true + + if [[ ${rc} -ne 0 ]]; then + panic "failed to seed data into ${app_name}" + fi +} + function setup_minio() { kubectl create -f test/gha-e2e/jindo/minio.yaml - kubectl wait --for=condition=Ready --timeout=180s -l app=minio pod || panic "minio pod is not ready" + kubectl rollout status --timeout=180s deployment/minio || panic "minio deployment is not ready" + kubectl rollout status --timeout=180s deployment/minio-a || panic "minio-a deployment is not ready" + kubectl rollout status --timeout=180s deployment/minio-b || panic "minio-b deployment is not ready" - minio_pod=$(kubectl get pod -l app=minio -oname) - kubectl exec $minio_pod -- /bin/sh -c 'mc alias set myminio http://127.0.0.1:9000 minioadmin minioadmin && mc mb myminio/mybucket && echo "helloworld" > testfile && mc mv testfile myminio/mybucket/subpath/testfile && mc cat myminio/mybucket/subpath/testfile' || panic "failed to seed data into minio" + seed_minio_bucket minio minioadmin minioadmin mybucket testfile helloworld 19000 + if should_use_minio_multi_oss; then + seed_minio_bucket minio-a "$multi_oss_access_key_a" "$multi_oss_secret_key_a" "$multi_oss_bucket_a" "$multi_oss_object_key_a" "$multi_oss_expected_data_a" 19001 + seed_minio_bucket minio-b "$multi_oss_access_key_b" "$multi_oss_secret_key_b" "$multi_oss_bucket_b" "$multi_oss_object_key_b" "$multi_oss_expected_data_b" 19002 + fi +} + +function setup_oss_emulator() { + kubectl create -f test/gha-e2e/jindo/oss-emulator.yaml + kubectl rollout status --timeout=180s deployment/oss-a || panic "oss-a deployment is not ready" + kubectl rollout status --timeout=180s deployment/oss-b || panic "oss-b deployment is not ready" } function create_dataset() { - kubectl create -f test/gha-e2e/jindo/dataset.yaml + dataset_file=$1 + dataset_name=$2 + + kubectl create -f $dataset_file if [[ -z "$(kubectl get dataset $dataset_name -oname)" ]]; then panic "failed to create dataset $dataset_name" @@ -36,6 +303,7 @@ function create_dataset() { } function wait_dataset_bound() { + dataset_name=$1 deadline=180 # 3 minutes last_state="" log_interval=0 @@ -60,8 +328,106 @@ function wait_dataset_bound() { syslog "Found dataset $dataset_name status.phase==Bound" } +function wait_runtime_stable() { + runtime_name=$1 + deadline=300 + elapsed=0 + + while true; do + master_phase=$(kubectl get jindoruntime $runtime_name -ojsonpath='{@.status.masterPhase}' 2>/dev/null) + worker_phase=$(kubectl get jindoruntime $runtime_name -ojsonpath='{@.status.workerPhase}' 2>/dev/null) + fuse_phase=$(kubectl get jindoruntime $runtime_name -ojsonpath='{@.status.fusePhase}' 2>/dev/null) + fuse_pod=$(kubectl get pod -l release=$runtime_name,role=jindofs-fuse -ojsonpath='{.items[0].metadata.name}' 2>/dev/null) + fuse_restart_count="" + if [[ -n "$fuse_pod" ]]; then + fuse_restart_count=$(kubectl get pod $fuse_pod -ojsonpath='{@.status.containerStatuses[0].restartCount}' 2>/dev/null) + fi + + if [[ "$master_phase" == "Ready" && "$worker_phase" == "Ready" && "$fuse_phase" == "Ready" && -n "$fuse_pod" && -n "$fuse_restart_count" ]]; then + sleep 20 + fuse_restart_count_after=$(kubectl get pod $fuse_pod -ojsonpath='{@.status.containerStatuses[0].restartCount}' 2>/dev/null) + if [[ "$fuse_restart_count_after" == "$fuse_restart_count" ]]; then + syslog "Found runtime $runtime_name stable with fuse pod $fuse_pod (restartCount=$fuse_restart_count_after)" + break + fi + fi + + elapsed=$(expr $elapsed + 5) + if [[ "$elapsed" -ge "$deadline" ]]; then + panic "timeout waiting for jindoruntime $runtime_name to become stable" + fi + + sleep 5 + done +} + +function wait_runtime_components_ready() { + runtime_name=$1 + deadline=240 + elapsed=0 + + while true; do + master_phase=$(kubectl get jindoruntime $runtime_name -ojsonpath='{@.status.masterPhase}' 2>/dev/null) + worker_phase=$(kubectl get jindoruntime $runtime_name -ojsonpath='{@.status.workerPhase}' 2>/dev/null) + + if [[ "$master_phase" == "Ready" && "$worker_phase" == "Ready" ]]; then + syslog "Found runtime $runtime_name master/worker ready" + break + fi + + elapsed=$(expr $elapsed + 5) + if [[ "$elapsed" -ge "$deadline" ]]; then + panic "timeout waiting for jindoruntime $runtime_name master/worker to become ready" + fi + + sleep 5 + done +} + +function create_warmup_pod() { + dataset_name=$1 + warmup_name="${dataset_name}-warmup" + + cat </dev/null 2>&1 || true +} + +function wait_warmup_ready() { + dataset_name=$1 + kubectl wait --for=condition=Ready --timeout=180s pod/${dataset_name}-warmup >/dev/null || panic "warmup pod ${dataset_name}-warmup is not ready" +} + function create_job() { - kubectl create -f test/gha-e2e/jindo/job.yaml + job_file=$1 + job_name=$2 + + kubectl create -f $job_file if [[ -z "$(kubectl get job $job_name -oname)" ]]; then panic "failed to create job" @@ -69,10 +435,12 @@ function create_job() { } function wait_job_completed() { + job_name=$1 while true; do succeed=$(kubectl get job $job_name -ojsonpath='{@.status.succeeded}') failed=$(kubectl get job $job_name -ojsonpath='{@.status.failed}') if [[ "$failed" -ne "0" ]]; then + kubectl logs job/$job_name --all-containers --tail=-1 >/dev/stderr 2>&1 || true panic "job failed when accessing data" fi if [[ "$succeed" -eq "1" ]]; then @@ -83,20 +451,80 @@ function wait_job_completed() { syslog "Found succeeded job $job_name" } +function cleanup_scenario() { + dataset_file=$1 + dataset_name=$2 + job_file=$3 + + delete_warmup_pod $dataset_name + kubectl delete -f $job_file --ignore-not-found + kubectl delete -f $dataset_file --ignore-not-found + kubectl wait --for=delete --timeout=180s jindoruntime/$dataset_name >/dev/null 2>&1 || true +} + +function run_scenario() { + scenario_name=$1 + dataset_file=$2 + dataset_name=$3 + job_file=$4 + job_name=$5 + + syslog "Running scenario: $scenario_name" + create_dataset $dataset_file $dataset_name + wait_dataset_bound $dataset_name + if [[ "$dataset_name" == "$multi_oss_dataset_name" ]]; then + create_warmup_pod $dataset_name + wait_runtime_stable $dataset_name + wait_warmup_ready $dataset_name + else + wait_runtime_components_ready $dataset_name + fi + create_job $job_file $job_name + wait_job_completed $job_name + cleanup_scenario $dataset_file $dataset_name $job_file +} + function dump_env_and_clean_up() { - bash tools/diagnose-fluid-jindo.sh collect --name $dataset_name --namespace default --collect-path ./e2e-tmp/testcase-jindo.tgz + for dataset_name in $s3_dataset_name $multi_oss_dataset_name; do + if kubectl get dataset $dataset_name >/dev/null 2>&1; then + bash tools/diagnose-fluid-jindo.sh collect --name $dataset_name --namespace default --collect-path ./e2e-tmp/testcase-$dataset_name.tgz + fi + done syslog "Cleaning up resources for testcase $testname" - kubectl delete -f test/gha-e2e/jindo/ + kubectl delete -f test/gha-e2e/jindo/ --ignore-not-found + if [[ -n "$rendered_dir" && -d "$rendered_dir" ]]; then + rm -rf "$rendered_dir" + fi } function main() { syslog "[TESTCASE $testname STARTS AT $(date)]" - setup_minio - create_dataset + if is_real_oss_mode; then + multi_oss_backend="real" + elif ! should_use_minio_multi_oss && ! should_use_emulator_multi_oss; then + panic "unsupported JINDO_E2E_MULTI_OSS_BACKEND=${multi_oss_backend}, expected minio or emulator" + fi + + if ! should_skip_s3_scenario || should_use_minio_multi_oss; then + setup_minio + fi + if is_real_oss_mode; then + syslog "Using real OSS multi-mount verification as manual reinforcement" + setup_real_oss + elif should_use_minio_multi_oss; then + syslog "Using MinIO-backed multi-mount verification for optional compatibility checks" + multi_oss_endpoint_a="${JINDO_E2E_OSS_ENDPOINT_A:-minio-a.default.svc.cluster.local:9000}" + multi_oss_endpoint_b="${JINDO_E2E_OSS_ENDPOINT_B:-minio-b.default.svc.cluster.local:9000}" + else + syslog "Using emulator-backed multi-mount verification as the default CI gate" + setup_oss_emulator + fi + render_multi_oss_files trap dump_env_and_clean_up EXIT - wait_dataset_bound - create_job - wait_job_completed + if ! should_skip_s3_scenario; then + run_scenario "single-mount s3 secret" $s3_dataset_file $s3_dataset_name $s3_job_file $s3_job_name + fi + run_scenario "multi-mount oss secret projections" $multi_oss_dataset_file $multi_oss_dataset_name $multi_oss_job_file $multi_oss_job_name syslog "[TESTCASE $testname SUCCEEDED AT $(date)]" }