Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 28 additions & 4 deletions pkg/webhook/membercluster/membercluster_validating_webhook.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,15 @@ import (
"fmt"
"net/http"

admissionv1 "k8s.io/api/admission/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/webhook"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"

fleetnetworkingv1alpha1 "go.goms.io/fleet-networking/api/v1alpha1"
clusterv1beta1 "go.goms.io/fleet/apis/cluster/v1beta1"
"go.goms.io/fleet/pkg/utils"
"go.goms.io/fleet/pkg/utils/validator"
Expand All @@ -22,26 +25,47 @@ var (
)

type memberClusterValidator struct {
client client.Client
decoder webhook.AdmissionDecoder
}

// Add registers the webhook for K8s bulit-in object types.
func Add(mgr manager.Manager) error {
hookServer := mgr.GetWebhookServer()
hookServer.Register(ValidationPath, &webhook.Admission{Handler: &memberClusterValidator{admission.NewDecoder(mgr.GetScheme())}})
hookServer.Register(ValidationPath, &webhook.Admission{Handler: &memberClusterValidator{client: mgr.GetClient(), decoder: admission.NewDecoder(mgr.GetScheme())}})
return nil
}

// Handle memberClusterValidator checks to see if member cluster has valid fields.
func (v *memberClusterValidator) Handle(_ context.Context, req admission.Request) admission.Response {
func (v *memberClusterValidator) Handle(ctx context.Context, req admission.Request) admission.Response {
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Another naming nit: the added logic might no longer belong in a validator? It's more of a guard 😄

Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That is, the logic seems to be more aligned with those in fleetresourcehandler pkg?

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

fleetresourcehandler pkg is aim to validate the CR permissions so far. feel this package is better?

mcObjectName := types.NamespacedName{Name: req.Name, Namespace: req.Namespace}
klog.V(2).InfoS("Validating webhook handling member cluster", "operation", req.Operation, "memberCluster", mcObjectName)

if req.Operation == admissionv1.Delete { // Will reject the requests whenever the serviceExport is not deleted
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I have some random thoughts, these are not blocker per se, just something that occurred to me when reading the code:

Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We already have a webhook that guards against deletion attempts on Azure Fleet Manager from users (or any source other than RP, AKS support, etc.). So technically speaking the added logic is added to prevent RP (in most cases) from triggering the deletion. How would these components interact I guess? The concern is mostly that users won't see the error message that reminds them to delete service exports.

Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

For upstream they have full control so we generally assume that protection is not 100% a must.

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In the RP side, we should surface this error to the user. Will test that.

klog.V(2).InfoS("Validating webhook member cluster DELETE", "memberCluster", mcObjectName)
namespaceName := fmt.Sprintf(utils.NamespaceNameFormat, mcObjectName.Name)
internalServiceExportList := &fleetnetworkingv1alpha1.InternalServiceExportList{}
if err := v.client.List(ctx, internalServiceExportList, client.InNamespace(namespaceName)); err != nil {
klog.ErrorS(err, "Failed to list internalServiceExportList when validating")
return admission.Errored(http.StatusInternalServerError, fmt.Errorf("failed to list internalServiceExportList, please retry the request: %w", err))
}
for _, internalServiceExport := range internalServiceExportList.Items {
if internalServiceExport.DeletionTimestamp.IsZero() {
klog.Warning("ServiceExport exists in the member cluster, request is denied", "operation", req.Operation, "memberCluster", mcObjectName)
return admission.Denied(fmt.Sprintf("Please delete serviceExport %s in the member cluster before leaving, request is denied", internalServiceExport.Spec.ServiceReference.NamespacedName))
}
}
return admission.Allowed("Member cluster is ready to leave")
}

var mc clusterv1beta1.MemberCluster
klog.V(2).InfoS("Validating webhook handling member cluster", "operation", req.Operation, "namespacedName", types.NamespacedName{Name: req.Name})
if err := v.decoder.Decode(req, &mc); err != nil {
klog.ErrorS(err, "Failed to decode member cluster object for validating fields", "userName", req.UserInfo.Username, "groups", req.UserInfo.Groups)
return admission.Errored(http.StatusBadRequest, err)
}

if err := validator.ValidateMemberCluster(mc); err != nil {
klog.V(2).ErrorS(err, "Member cluster has invalid fields, request is denied", "operation", req.Operation, "namespacedName", types.NamespacedName{Name: mc.Name})
klog.V(2).ErrorS(err, "Member cluster has invalid fields, request is denied", "operation", req.Operation, "memberCluster", mcObjectName)
return admission.Denied(err.Error())
}
return admission.Allowed("Member cluster has valid fields")
Expand Down
1 change: 1 addition & 0 deletions pkg/webhook/webhook.go
Original file line number Diff line number Diff line change
Expand Up @@ -306,6 +306,7 @@ func (w *Config) buildFleetValidatingWebhooks() []admv1.ValidatingWebhook {
Operations: []admv1.OperationType{
admv1.Create,
admv1.Update,
admv1.Delete,
},
Rule: createRule([]string{clusterv1beta1.GroupVersion.Group}, []string{clusterv1beta1.GroupVersion.Version}, []string{memberClusterResourceName}, &clusterScope),
},
Expand Down
28 changes: 21 additions & 7 deletions test/e2e/fleet_guard_rail_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -789,24 +789,26 @@ var _ = Describe("fleet guard rail networking E2Es", Serial, Ordered, func() {
mcName := fmt.Sprintf(mcNameTemplate, GinkgoParallelProcess())
iseName := fmt.Sprintf(internalServiceExportNameTemplate, GinkgoParallelProcess())
imcNamespace := fmt.Sprintf(utils.NamespaceNameFormat, mcName)
var ise fleetnetworkingv1alpha1.InternalServiceExport

BeforeEach(func() {
createMemberCluster(mcName, "random-user", nil, map[string]string{fleetClusterResourceIDAnnotationKey: clusterID1})
checkInternalMemberClusterExists(mcName, imcNamespace)
ise := internalServiceExport(iseName, imcNamespace)
ise = internalServiceExport(iseName, imcNamespace)
// can return no kind match error.
Eventually(func(g Gomega) error {
return hubClient.Create(ctx, &ise)
}, eventuallyDuration, eventuallyInterval).Should(Succeed())
})

AfterEach(func() {
Expect(hubClient.Delete(ctx, &ise)).Should(Succeed(), "failed to delete Internal Service Export")

ensureMemberClusterAndRelatedResourcesDeletion(mcName)
})

It("should deny update operation on Internal service export resource in fleet-member namespace for user not in member cluster identity", func() {
Eventually(func(g Gomega) error {
var ise fleetnetworkingv1alpha1.InternalServiceExport
err := hubClient.Get(ctx, types.NamespacedName{Name: iseName, Namespace: imcNamespace}, &ise)
if err != nil {
return err
Expand Down Expand Up @@ -841,37 +843,49 @@ var _ = Describe("fleet guard rail networking E2Es", Serial, Ordered, func() {
ise := internalServiceExport(iseName, imcNamespace)
By("expecting successful CREATE of Internal Service Export")
Expect(impersonateHubClient.Create(ctx, &ise)).Should(Succeed())

By("deleting Internal Service Export")
// Cleanup the internalServiceExport so that it won't block the member deletion.
Expect(impersonateHubClient.Delete(ctx, &ise)).Should(Succeed(), "failed to delete Internal Service Export")
Comment thread
zhiying-lin marked this conversation as resolved.
})

It("should allow CREATE operation on Internal service import resource in fleet-member namespace for user in member cluster identity", func() {
ise := internalServiceImport(isiName, imcNamespace)
isi := internalServiceImport(isiName, imcNamespace)
By("expecting successful CREATE of Internal Service Import")
Expect(impersonateHubClient.Create(ctx, &ise)).Should(Succeed())
Expect(impersonateHubClient.Create(ctx, &isi)).Should(Succeed())

By("deleting Internal Service Import")
Expect(impersonateHubClient.Delete(ctx, &isi)).Should(Succeed(), "failed to delete Internal Service Import")
})

It("should allow CREATE operation on Endpoint slice export resource in fleet-member namespace for user in member cluster identity", func() {
ise := endpointSliceExport(epName, imcNamespace)
esx := endpointSliceExport(epName, imcNamespace)
By("expecting successful CREATE of Endpoint slice export")
Expect(impersonateHubClient.Create(ctx, &ise)).Should(Succeed())
Expect(impersonateHubClient.Create(ctx, &esx)).Should(Succeed())

By("deleting Endpoint Slice Export")
Expect(impersonateHubClient.Delete(ctx, &esx)).Should(Succeed(), "failed to delete EndpointSlice Export")
})
})

Context("allow request to modify network resources in fleet member namespaces, for user in member cluster identity", func() {
mcName := fmt.Sprintf(mcNameTemplate, GinkgoParallelProcess())
iseName := fmt.Sprintf(internalServiceExportNameTemplate, GinkgoParallelProcess())
imcNamespace := fmt.Sprintf(utils.NamespaceNameFormat, mcName)
var ise fleetnetworkingv1alpha1.InternalServiceExport

BeforeEach(func() {
createMemberCluster(mcName, testUser, nil, map[string]string{fleetClusterResourceIDAnnotationKey: clusterID1})
checkInternalMemberClusterExists(mcName, imcNamespace)
ise := internalServiceExport(iseName, imcNamespace)
ise = internalServiceExport(iseName, imcNamespace)
// can return no kind match error.
Eventually(func(g Gomega) error {
return hubClient.Create(ctx, &ise)
}, eventuallyDuration, eventuallyInterval).Should(Succeed())
})

AfterEach(func() {
Expect(hubClient.Delete(ctx, &ise)).Should(Succeed(), "failed to delete Internal Service Export")
ensureMemberClusterAndRelatedResourcesDeletion(mcName)
})

Expand Down
86 changes: 84 additions & 2 deletions test/e2e/join_and_leave_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,9 @@ Licensed under the MIT license.
package e2e

import (
"errors"
"fmt"
"reflect"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
Expand All @@ -18,6 +20,8 @@ import (
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"

fleetnetworkingv1alpha1 "go.goms.io/fleet-networking/api/v1alpha1"

clusterv1beta1 "go.goms.io/fleet/apis/cluster/v1beta1"
placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1"
"go.goms.io/fleet/pkg/utils"
Expand All @@ -31,6 +35,7 @@ const (
var _ = Describe("Test member cluster join and leave flow", Ordered, Serial, func() {
crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess())
workNamespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess())
internalServiceExportName := fmt.Sprintf("internal-service-export-%d", GinkgoParallelProcess())
var wantSelectedResources []placementv1beta1.ResourceIdentifier
BeforeAll(func() {
// Create the test resources.
Expand Down Expand Up @@ -95,9 +100,86 @@ var _ = Describe("Test member cluster join and leave flow", Ordered, Serial, fun
}
})

It("Should be able to unjoin a cluster with crp still running", func() {
By("remove all the clusters without deleting the CRP")
It("create a dummy internalServiceExport in the reserved member namespace", func() {
Comment thread
zhiying-lin marked this conversation as resolved.
for idx := range allMemberClusterNames {
memberCluster := allMemberClusters[idx]
namespaceName := fmt.Sprintf(utils.NamespaceNameFormat, memberCluster.ClusterName)
internalServiceExport := &fleetnetworkingv1alpha1.InternalServiceExport{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespaceName,
Name: internalServiceExportName,
// Add a custom finalizer; this would allow us to better observe
// the behavior of the controllers.
Finalizers: []string{customDeletionBlockerFinalizer},
},
Spec: fleetnetworkingv1alpha1.InternalServiceExportSpec{
ServiceReference: fleetnetworkingv1alpha1.ExportedObjectReference{
NamespacedName: "test-namespace/test-svc",
ClusterID: memberCluster.ClusterName,
Kind: "Service",
Namespace: "test-namespace",
Name: "test-svc",
ResourceVersion: "0",
UID: "00000000-0000-0000-0000-000000000000",
},
Ports: []fleetnetworkingv1alpha1.ServicePort{
{
Protocol: corev1.ProtocolTCP,
Port: 4848,
},
},
},
}
Expect(hubClient.Create(ctx, internalServiceExport)).To(Succeed(), "Failed to create internalServiceExport")
}
})

It("Should fail the unjoin requests", func() {
for idx := range allMemberClusters {
memberCluster := allMemberClusters[idx]
mcObj := &clusterv1beta1.MemberCluster{
ObjectMeta: metav1.ObjectMeta{
Name: memberCluster.ClusterName,
},
}
err := hubClient.Delete(ctx, mcObj)
Expect(err).ShouldNot(Succeed(), "Want the deletion to be denied")
var statusErr *apierrors.StatusError
Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Delete memberCluster call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&apierrors.StatusError{})))
Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("Please delete serviceExport test-namespace/test-svc in the member cluster before leaving, request is denied"))
}
})

It("Deleting the internalServiceExports", func() {
for idx := range allMemberClusterNames {
memberCluster := allMemberClusters[idx]
namespaceName := fmt.Sprintf(utils.NamespaceNameFormat, memberCluster.ClusterName)

internalSvcExportKey := types.NamespacedName{Namespace: namespaceName, Name: internalServiceExportName}
var export fleetnetworkingv1alpha1.InternalServiceExport
Expect(hubClient.Get(ctx, internalSvcExportKey, &export)).Should(Succeed(), "Failed to get internalServiceExport")
Expect(hubClient.Delete(ctx, &export)).To(Succeed(), "Failed to delete internalServiceExport")
}
})

It("Should be able to trigger the member cluster DELETE", func() {
setAllMemberClustersToLeave()
})

It("Removing the finalizer from the internalServiceExport", func() {
for idx := range allMemberClusterNames {
memberCluster := allMemberClusters[idx]
namespaceName := fmt.Sprintf(utils.NamespaceNameFormat, memberCluster.ClusterName)

internalSvcExportKey := types.NamespacedName{Namespace: namespaceName, Name: internalServiceExportName}
var export fleetnetworkingv1alpha1.InternalServiceExport
Expect(hubClient.Get(ctx, internalSvcExportKey, &export)).Should(Succeed(), "Failed to get internalServiceExport")
export.Finalizers = nil
Expect(hubClient.Update(ctx, &export)).To(Succeed(), "Failed to update internalServiceExport")
}
})

It("Should be able to unjoin a cluster with crp still running", func() {
checkIfAllMemberClustersHaveLeft()
})

Expand Down
Loading