Skip to content

Commit ff5a1fe

Browse files
feat: enhance rollback functionality to clean up CANARY and BASELINE variants (#6660)
Signed-off-by: Mohammed Firdous <124298708+mohammedfirdouss@users.noreply.github.com>
1 parent 0130b1f commit ff5a1fe

2 files changed

Lines changed: 189 additions & 6 deletions

File tree

pkg/app/pipedv1/plugin/kubernetes_multicluster/deployment/rollback.go

Lines changed: 25 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -147,8 +147,10 @@ func (p *Plugin) rollback(ctx context.Context, input *sdk.ExecuteStageInput[kube
147147
// When addVariantLabelToSelector is true, ensure that all workloads
148148
// have the variant label in their selector.
149149
var (
150-
variantLabel = cfg.Spec.VariantLabel.Key
151-
primaryVariant = cfg.Spec.VariantLabel.PrimaryValue
150+
variantLabel = cfg.Spec.VariantLabel.Key
151+
primaryVariant = cfg.Spec.VariantLabel.PrimaryValue
152+
canaryVariant = cfg.Spec.VariantLabel.CanaryValue
153+
baselineVariant = cfg.Spec.VariantLabel.BaselineValue
152154
)
153155
// TODO: Consider other fields to configure whether to add a variant label to the selector
154156
// because the rollback stage is executed in both quick sync and pipeline sync strategies.
@@ -186,17 +188,34 @@ func (p *Plugin) rollback(ctx context.Context, input *sdk.ExecuteStageInput[kube
186188
}
187189

188190
// Create the applier for the target cluster.
189-
applier := provider.NewApplier(provider.NewKubectl(kubectlPath), cfg.Spec.Input, deployTargetConfig, input.Logger)
191+
kubectl := provider.NewKubectl(kubectlPath)
192+
applier := provider.NewApplier(kubectl, cfg.Spec.Input, deployTargetConfig, input.Logger)
190193

191194
// Start applying all manifests to add or update running resources.
192195
if err := applyManifests(ctx, applier, manifests, cfg.Spec.Input.Namespace, lp); err != nil {
193196
lp.Errorf("Failed while applying manifests (%v)", err)
194197
return sdk.StageStatusFailure
195198
}
196199

197-
// TODO: implement prune resources
198-
// TODO: delete all resources of CANARY variant
199-
// TODO: delete all resources of BASELINE variant
200+
var failed bool
200201

202+
lp.Info("Start removing CANARY variant resources if exists")
203+
if err := deleteVariantResources(ctx, lp, kubectl, deployTargetConfig.KubeConfigPath, applier, input.Request.Deployment.ApplicationID, variantLabel, canaryVariant); err != nil {
204+
lp.Errorf("Failed while deleting CANARY variant resources (%v)", err)
205+
failed = true
206+
}
207+
208+
lp.Info("Start removing BASELINE variant resources if exists")
209+
if err := deleteVariantResources(ctx, lp, kubectl, deployTargetConfig.KubeConfigPath, applier, input.Request.Deployment.ApplicationID, variantLabel, baselineVariant); err != nil {
210+
lp.Errorf("Failed while deleting BASELINE variant resources (%v)", err)
211+
failed = true
212+
}
213+
214+
// TODO: prune resources which don't exist in the running manifests but exist in the target manifests.
215+
// This occurs when the user adds a new resource and the deployment pipeline fails.
216+
217+
if failed {
218+
return sdk.StageStatusFailure
219+
}
201220
return sdk.StageStatusSuccess
202221
}

pkg/app/pipedv1/plugin/kubernetes_multicluster/deployment/rollback_test.go

Lines changed: 164 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -617,3 +617,167 @@ func TestPlugin_executeK8sMultiRollbackStage_FailureRollback_when_at_least_one_o
617617

618618
assert.Equal(t, sdk.StageStatusSuccess, status)
619619
}
620+
621+
func TestPlugin_executeK8sMultiRollbackStage_CleansUpCanaryVariant(t *testing.T) {
622+
t.Parallel()
623+
624+
ctx := t.Context()
625+
testRegistry := toolregistrytest.NewTestToolRegistry(t)
626+
dtConfig, dynamicClient := setupTestDeployTargetConfigAndDynamicClient(t)
627+
628+
canaryDir := filepath.Join("testdata", "canary_rollout_without_create_service")
629+
simpleDir := filepath.Join("testdata", "simple")
630+
631+
canaryCfg := sdk.LoadApplicationConfigForTest[kubeconfig.KubernetesApplicationSpec](t, filepath.Join(canaryDir, "app.pipecd.yaml"), "kubernetes_multicluster")
632+
simpleCfg := sdk.LoadApplicationConfigForTest[kubeconfig.KubernetesApplicationSpec](t, filepath.Join(simpleDir, "app.pipecd.yaml"), "kubernetes_multicluster")
633+
634+
// Step 1: deploy canary variant so it exists in the cluster.
635+
canaryInput := &sdk.ExecuteStageInput[kubeconfig.KubernetesApplicationSpec]{
636+
Request: sdk.ExecuteStageRequest[kubeconfig.KubernetesApplicationSpec]{
637+
StageName: StageK8sMultiCanaryRollout,
638+
StageConfig: []byte(`{"replicas": 1}`),
639+
TargetDeploymentSource: sdk.DeploymentSource[kubeconfig.KubernetesApplicationSpec]{
640+
ApplicationDirectory: canaryDir,
641+
CommitHash: "target-hash",
642+
ApplicationConfig: canaryCfg,
643+
ApplicationConfigFilename: "app.pipecd.yaml",
644+
},
645+
Deployment: sdk.Deployment{
646+
PipedID: "piped-id",
647+
ApplicationID: "app-id",
648+
},
649+
},
650+
Client: sdk.NewClient(nil, "kubernetes_multicluster", "app-id", "stage-id", logpersistertest.NewTestLogPersister(t), testRegistry),
651+
Logger: zaptest.NewLogger(t),
652+
}
653+
654+
plugin := &Plugin{}
655+
status := plugin.executeK8sMultiCanaryRolloutStage(ctx, canaryInput, []*sdk.DeployTarget[kubeconfig.KubernetesDeployTargetConfig]{
656+
{Name: "default", Config: *dtConfig},
657+
})
658+
require.Equal(t, sdk.StageStatusSuccess, status)
659+
660+
// Verify canary deployment exists.
661+
_, err := dynamicClient.Resource(schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}).Namespace("default").Get(ctx, "simple-canary", metav1.GetOptions{})
662+
require.NoError(t, err, "canary deployment should exist before rollback")
663+
664+
// Step 2: run rollback — should clean up the canary variant.
665+
rollbackInput := &sdk.ExecuteStageInput[kubeconfig.KubernetesApplicationSpec]{
666+
Request: sdk.ExecuteStageRequest[kubeconfig.KubernetesApplicationSpec]{
667+
StageName: "K8S_MULTI_ROLLBACK",
668+
StageConfig: []byte(``),
669+
RunningDeploymentSource: sdk.DeploymentSource[kubeconfig.KubernetesApplicationSpec]{
670+
ApplicationDirectory: simpleDir,
671+
CommitHash: "previous-hash",
672+
ApplicationConfig: simpleCfg,
673+
ApplicationConfigFilename: "app.pipecd.yaml",
674+
},
675+
TargetDeploymentSource: sdk.DeploymentSource[kubeconfig.KubernetesApplicationSpec]{
676+
ApplicationDirectory: simpleDir,
677+
CommitHash: "target-hash",
678+
ApplicationConfig: simpleCfg,
679+
ApplicationConfigFilename: "app.pipecd.yaml",
680+
},
681+
Deployment: sdk.Deployment{
682+
PipedID: "piped-id",
683+
ApplicationID: "app-id",
684+
},
685+
},
686+
Client: sdk.NewClient(nil, "kubernetes_multicluster", "app-id", "stage-id", logpersistertest.NewTestLogPersister(t), testRegistry),
687+
Logger: zaptest.NewLogger(t),
688+
}
689+
690+
status = plugin.executeK8sMultiRollbackStage(ctx, rollbackInput, []*sdk.DeployTarget[kubeconfig.KubernetesDeployTargetConfig]{
691+
{Name: "default", Config: *dtConfig},
692+
})
693+
assert.Equal(t, sdk.StageStatusSuccess, status)
694+
695+
// Verify canary deployment was deleted.
696+
_, err = dynamicClient.Resource(schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}).Namespace("default").Get(ctx, "simple-canary", metav1.GetOptions{})
697+
assert.True(t, apierrors.IsNotFound(err), "canary deployment should be deleted after rollback")
698+
}
699+
700+
func TestPlugin_executeK8sMultiRollbackStage_CleansUpBaselineVariant(t *testing.T) {
701+
t.Parallel()
702+
703+
ctx := t.Context()
704+
testRegistry := toolregistrytest.NewTestToolRegistry(t)
705+
dtConfig, dynamicClient := setupTestDeployTargetConfigAndDynamicClient(t)
706+
707+
baselineDir := filepath.Join("testdata", "baseline_rollout_without_create_service")
708+
simpleDir := filepath.Join("testdata", "simple")
709+
710+
baselineCfg := sdk.LoadApplicationConfigForTest[kubeconfig.KubernetesApplicationSpec](t, filepath.Join(baselineDir, "app.pipecd.yaml"), "kubernetes_multicluster")
711+
simpleCfg := sdk.LoadApplicationConfigForTest[kubeconfig.KubernetesApplicationSpec](t, filepath.Join(simpleDir, "app.pipecd.yaml"), "kubernetes_multicluster")
712+
713+
// Step 1: deploy baseline variant so it exists in the cluster.
714+
baselineInput := &sdk.ExecuteStageInput[kubeconfig.KubernetesApplicationSpec]{
715+
Request: sdk.ExecuteStageRequest[kubeconfig.KubernetesApplicationSpec]{
716+
StageName: StageK8sMultiBaselineRollout,
717+
StageConfig: []byte(`{"replicas": 1}`),
718+
RunningDeploymentSource: sdk.DeploymentSource[kubeconfig.KubernetesApplicationSpec]{
719+
ApplicationDirectory: baselineDir,
720+
CommitHash: "previous-hash",
721+
ApplicationConfig: baselineCfg,
722+
ApplicationConfigFilename: "app.pipecd.yaml",
723+
},
724+
TargetDeploymentSource: sdk.DeploymentSource[kubeconfig.KubernetesApplicationSpec]{
725+
ApplicationDirectory: baselineDir,
726+
CommitHash: "target-hash",
727+
ApplicationConfig: baselineCfg,
728+
ApplicationConfigFilename: "app.pipecd.yaml",
729+
},
730+
Deployment: sdk.Deployment{
731+
PipedID: "piped-id",
732+
ApplicationID: "app-id",
733+
},
734+
},
735+
Client: sdk.NewClient(nil, "kubernetes_multicluster", "app-id", "stage-id", logpersistertest.NewTestLogPersister(t), testRegistry),
736+
Logger: zaptest.NewLogger(t),
737+
}
738+
739+
plugin := &Plugin{}
740+
status := plugin.executeK8sMultiBaselineRolloutStage(ctx, baselineInput, []*sdk.DeployTarget[kubeconfig.KubernetesDeployTargetConfig]{
741+
{Name: "default", Config: *dtConfig},
742+
})
743+
require.Equal(t, sdk.StageStatusSuccess, status)
744+
745+
// Verify baseline deployment exists.
746+
_, err := dynamicClient.Resource(schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}).Namespace("default").Get(ctx, "simple-baseline", metav1.GetOptions{})
747+
require.NoError(t, err, "baseline deployment should exist before rollback")
748+
749+
// Step 2: run rollback — should clean up the baseline variant.
750+
rollbackInput := &sdk.ExecuteStageInput[kubeconfig.KubernetesApplicationSpec]{
751+
Request: sdk.ExecuteStageRequest[kubeconfig.KubernetesApplicationSpec]{
752+
StageName: "K8S_MULTI_ROLLBACK",
753+
StageConfig: []byte(``),
754+
RunningDeploymentSource: sdk.DeploymentSource[kubeconfig.KubernetesApplicationSpec]{
755+
ApplicationDirectory: simpleDir,
756+
CommitHash: "previous-hash",
757+
ApplicationConfig: simpleCfg,
758+
ApplicationConfigFilename: "app.pipecd.yaml",
759+
},
760+
TargetDeploymentSource: sdk.DeploymentSource[kubeconfig.KubernetesApplicationSpec]{
761+
ApplicationDirectory: simpleDir,
762+
CommitHash: "target-hash",
763+
ApplicationConfig: simpleCfg,
764+
ApplicationConfigFilename: "app.pipecd.yaml",
765+
},
766+
Deployment: sdk.Deployment{
767+
PipedID: "piped-id",
768+
ApplicationID: "app-id",
769+
},
770+
},
771+
Client: sdk.NewClient(nil, "kubernetes_multicluster", "app-id", "stage-id", logpersistertest.NewTestLogPersister(t), testRegistry),
772+
Logger: zaptest.NewLogger(t),
773+
}
774+
775+
status = plugin.executeK8sMultiRollbackStage(ctx, rollbackInput, []*sdk.DeployTarget[kubeconfig.KubernetesDeployTargetConfig]{
776+
{Name: "default", Config: *dtConfig},
777+
})
778+
assert.Equal(t, sdk.StageStatusSuccess, status)
779+
780+
// Verify baseline deployment was deleted.
781+
_, err = dynamicClient.Resource(schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}).Namespace("default").Get(ctx, "simple-baseline", metav1.GetOptions{})
782+
assert.True(t, apierrors.IsNotFound(err), "baseline deployment should be deleted after rollback")
783+
}

0 commit comments

Comments
 (0)