This repository was archived by the owner on Apr 1, 2026. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 244
Expand file tree
/
Copy pathdelete.go
More file actions
265 lines (235 loc) · 10.1 KB
/
delete.go
File metadata and controls
265 lines (235 loc) · 10.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
package delete
import (
"context"
"fmt"
"path/filepath"
"github.com/devfile/library/v2/pkg/devfile/parser"
v1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/klog/v2"
"github.com/redhat-developer/odo/pkg/component"
"github.com/redhat-developer/odo/pkg/configAutomount"
"github.com/redhat-developer/odo/pkg/exec"
"github.com/redhat-developer/odo/pkg/kclient"
odolabels "github.com/redhat-developer/odo/pkg/labels"
"github.com/redhat-developer/odo/pkg/libdevfile"
"github.com/redhat-developer/odo/pkg/log"
clierrors "github.com/redhat-developer/odo/pkg/odo/cli/errors"
odocontext "github.com/redhat-developer/odo/pkg/odo/context"
"github.com/redhat-developer/odo/pkg/platform"
"github.com/redhat-developer/odo/pkg/podman"
"github.com/redhat-developer/odo/pkg/util"
)
type DeleteComponentClient struct {
kubeClient kclient.ClientInterface
podmanClient podman.Client
execClient exec.Client
configAutomountClient configAutomount.Client
}
var _ Client = (*DeleteComponentClient)(nil)
func NewDeleteComponentClient(
kubeClient kclient.ClientInterface,
podmanClient podman.Client,
execClient exec.Client,
configAutomountClient configAutomount.Client,
) *DeleteComponentClient {
return &DeleteComponentClient{
kubeClient: kubeClient,
podmanClient: podmanClient,
execClient: execClient,
configAutomountClient: configAutomountClient,
}
}
// ListClusterResourcesToDelete lists Kubernetes resources from cluster in namespace for a given odo component
// It only returns resources not owned by another resource of the component, letting the garbage collector do its job
func (do *DeleteComponentClient) ListClusterResourcesToDelete(
ctx context.Context,
componentName string,
namespace string,
mode string,
) ([]unstructured.Unstructured, error) {
var result []unstructured.Unstructured
selector := odolabels.GetSelector(componentName, odocontext.GetApplication(ctx), mode, false)
list, err := do.kubeClient.GetAllResourcesFromSelector(selector, namespace)
if err != nil {
return nil, err
}
for _, resource := range list {
// If the resource is Terminating, there is no sense in displaying it.
if resource.GetDeletionTimestamp() != nil {
continue
}
referenced := false
for _, ownerRef := range resource.GetOwnerReferences() {
if references(list, ownerRef) {
referenced = true
break
}
}
if !referenced {
result = append(result, resource)
}
}
return result, nil
}
func (do *DeleteComponentClient) DeleteResources(resources []unstructured.Unstructured, wait bool) []unstructured.Unstructured {
var failed []unstructured.Unstructured
for _, resource := range resources {
gvr, err := do.kubeClient.GetRestMappingFromUnstructured(resource)
if err != nil {
failed = append(failed, resource)
continue
}
err = do.kubeClient.DeleteDynamicResource(resource.GetName(), gvr.Resource, wait)
if err != nil && !kerrors.IsNotFound(err) {
klog.V(3).Infof("failed to delete resource %q (%s.%s.%s): %v", resource.GetName(), gvr.Resource.Group, gvr.Resource.Version, gvr.Resource.Resource, err)
failed = append(failed, resource)
}
}
return failed
}
// references returns true if ownerRef references a resource in the list
func references(list []unstructured.Unstructured, ownerRef metav1.OwnerReference) bool {
for _, resource := range list {
if ownerRef.APIVersion == resource.GetAPIVersion() && ownerRef.Kind == resource.GetKind() && ownerRef.Name == resource.GetName() {
return true
}
}
return false
}
// ListResourcesToDeleteFromDevfile parses all the devfile components and returns a list of resources that are present on the cluster and can be deleted
// Returns a Warning if an error happens communicating with the cluster
func (do DeleteComponentClient) ListClusterResourcesToDeleteFromDevfile(devfileObj parser.DevfileObj, appName string, componentName string, mode string) (isInnerLoopDeployed bool, resources []unstructured.Unstructured, err error) {
var deployment *v1.Deployment
if mode == odolabels.ComponentDevMode || mode == odolabels.ComponentAnyMode {
// Inner Loop
var deploymentName string
deploymentName, err = util.NamespaceKubernetesObject(componentName, appName)
if err != nil {
return isInnerLoopDeployed, resources, fmt.Errorf("failed to get the resource %q name for component %q; cause: %w", kclient.DeploymentKind, componentName, err)
}
deployment, err = do.kubeClient.GetDeploymentByName(deploymentName)
if err != nil && !kerrors.IsNotFound(err) {
// Kubernetes cluster access fails, return with a warning only
err = clierrors.NewWarning(fmt.Sprintf("failed to get deployment %q", deploymentName), err)
return isInnerLoopDeployed, resources, err
}
// if the deployment is found on the cluster,
// then convert it to unstructured.Unstructured object so that it can be appended to resources;
// else continue to outer loop
if deployment.Name != "" {
isInnerLoopDeployed = true
var unstructuredDeploy unstructured.Unstructured
unstructuredDeploy, err = kclient.ConvertK8sResourceToUnstructured(deployment)
if err != nil {
return isInnerLoopDeployed, resources, fmt.Errorf("failed to parse the resource %q: %q; cause: %w", kclient.DeploymentKind, deploymentName, err)
}
resources = append(resources, unstructuredDeploy)
}
}
// Parse the devfile for K8s resources; these may belong to either innerloop or outerloop
localK8sResources, err := libdevfile.ListKubernetesComponents(devfileObj, filepath.Dir(devfileObj.Ctx.GetAbsPath()))
if err != nil {
return isInnerLoopDeployed, resources, fmt.Errorf("failed to gather resources for deletion: %w", err)
}
localOCResources, err := libdevfile.ListOpenShiftComponents(devfileObj, filepath.Dir(devfileObj.Ctx.GetAbsPath()))
if err != nil {
return isInnerLoopDeployed, resources, fmt.Errorf("failed to gather resources for deletion: %w", err)
}
localAllResources := []unstructured.Unstructured{}
localAllResources = append(localAllResources, localOCResources...)
localAllResources = append(localAllResources, localK8sResources...)
for _, lr := range localAllResources {
var gvr *meta.RESTMapping
gvr, err = do.kubeClient.GetRestMappingFromUnstructured(lr)
if err != nil {
continue
}
// Try to fetch the resource from the cluster; if it exists, append it to the resources list
var cr *unstructured.Unstructured
cr, err = do.kubeClient.GetDynamicResource(gvr.Resource, lr.GetName())
// If a specific mode is asked for, then make sure it matches with the cr's mode.
if err != nil || (mode != odolabels.ComponentAnyMode && odolabels.GetMode(cr.GetLabels()) != mode) {
if cr != nil {
klog.V(4).Infof("Ignoring resource: %s/%s; its mode(%s) does not match with the given mode(%s)", gvr.Resource.Resource, lr.GetName(), odolabels.GetMode(cr.GetLabels()), mode)
} else {
klog.V(4).Infof("Ignoring resource: %s/%s; it does not exist on the cluster", gvr.Resource.Resource, lr.GetName())
}
continue
}
resources = append(resources, *cr)
}
return isInnerLoopDeployed, resources, nil
}
// ExecutePreStopEvents executes preStop events if any, as a precondition to deleting a devfile component deployment
func (do *DeleteComponentClient) ExecutePreStopEvents(ctx context.Context, devfileObj parser.DevfileObj, appName string, componentName string) error {
if !libdevfile.HasPreStopEvents(devfileObj) {
return nil
}
klog.V(4).Infof("Gathering information for component: %q", componentName)
klog.V(3).Infof("Checking component status for %q", componentName)
selector := odolabels.GetSelector(componentName, appName, odolabels.ComponentDevMode, false)
pod, err := do.kubeClient.GetRunningPodFromSelector(selector)
if err != nil {
klog.V(1).Info("Component not found on the cluster.")
if kerrors.IsForbidden(err) {
klog.V(3).Infof("Resource for %q forbidden", componentName)
log.Warningf("You are forbidden from accessing the resource. Please check if you the right permissions and try again.")
return nil
}
if e, ok := err.(*platform.PodNotFoundError); ok {
klog.V(3).Infof("Resource for %q not found; cause: %v", componentName, e)
log.Warningf("Resources not found on the cluster. Run `odo delete component -v <DEBUG_LEVEL_0-9>` to know more.")
return nil
}
return fmt.Errorf("unable to determine if component %s exists; cause: %v", componentName, err.Error())
}
klog.V(4).Infof("Executing %q event commands for component %q", libdevfile.PreStop, componentName)
// ignore the failures if any; delete should not fail because preStop events failed to execute
handler := component.NewRunHandler(
ctx,
do.kubeClient,
do.execClient,
do.configAutomountClient,
// TODO(feloy) set these values when we want to support Apply Image commands for PreStop events
nil, nil,
component.HandlerOptions{
PodName: pod.Name,
ContainersRunning: component.GetContainersNames(pod),
Msg: "Executing pre-stop command in container",
},
)
err = libdevfile.ExecPreStopEvents(ctx, devfileObj, handler)
if err != nil {
log.Warningf("Failed to execute %q event commands for component %q, cause: %v", libdevfile.PreStop, componentName, err.Error())
}
return nil
}
func (do *DeleteComponentClient) ListPodmanResourcesToDelete(appName string, componentName string, mode string) (isInnerLoopDeployed bool, pods []*corev1.Pod, err error) {
if mode == odolabels.ComponentDeployMode {
return false, nil, nil
}
// Inner Loop
var podName string
podName, err = util.NamespaceKubernetesObject(componentName, appName)
if err != nil {
return false, nil, fmt.Errorf("failed to get the resource %q name for component %q; cause: %w", kclient.DeploymentKind, componentName, err)
}
allPods, err := do.podmanClient.PodLs()
if err != nil {
err = clierrors.NewWarning("failed to get pods on podman", err)
return false, nil, err
}
if _, isInnerLoopDeployed = allPods[podName]; isInnerLoopDeployed {
podDef, err := do.podmanClient.KubeGenerate(podName)
if err != nil {
return false, nil, err
}
pods = append(pods, podDef)
}
return isInnerLoopDeployed, pods, nil
}