-
Notifications
You must be signed in to change notification settings - Fork 73
Expand file tree
/
Copy pathhooks.go
More file actions
263 lines (233 loc) · 8.77 KB
/
hooks.go
File metadata and controls
263 lines (233 loc) · 8.77 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
package steps
import (
"context"
"encoding/json"
"errors"
"fmt"
"os/exec"
"regexp"
"strconv"
"strings"
"sync"
"github.com/cucumber/godog"
"github.com/go-logr/logr"
"github.com/spf13/pflag"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/component-base/featuregate"
"k8s.io/klog/v2/textlogger"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/operator-framework/operator-controller/internal/operator-controller/features"
)
type resource struct {
name string
kind string
namespace string
}
// deploymentRestore records the original state of a deployment so it can be
// rolled back after a test that modifies deployment configuration.
type deploymentRestore struct {
name string // deployment name
namespace string
containerName string // container to patch (for env var restores)
patchedArgs bool // true when container args were modified (for TLS profile patches)
originalArgs []string // original container args; may be nil if args were unset
originalEnv []string // original env vars as "NAME=VALUE" (for proxy patches)
}
type scenarioContext struct {
id string
namespace string
clusterExtensionName string
clusterObjectSetName string
catalogs map[string]string // user-chosen name -> ClusterCatalog resource name
catalogPackageNames map[string]string // original package name -> parameterized name
addedResources []resource
removedResources []unstructured.Unstructured
metricsResponse map[string]string
leaderPods map[string]string // component name -> leader pod name
deploymentRestores []deploymentRestore
extensionObjects []client.Object
proxy *recordingProxy
}
// GatherClusterExtensionObjects collects all resources related to the ClusterExtension container in
// either their Helm release Secret or ClusterObjectSet depending on the applier being used
// and saves them into the context.
func (s *scenarioContext) GatherClusterExtensionObjects() error {
objs, err := listExtensionResources(s.clusterExtensionName)
if err != nil {
return fmt.Errorf("failed to load extension resources into context: %w", err)
}
s.extensionObjects = objs
return nil
}
// GetClusterExtensionObjects returns the ClusterExtension objects currently saved into the context.
// Will always return nil until GatherClusterExtensionObjects is called
func (s *scenarioContext) GetClusterExtensionObjects() []client.Object {
return s.extensionObjects
}
type contextKey string
const (
scenarioContextKey contextKey = "scenario-context"
)
var (
devMode = false
featureGates = map[featuregate.Feature]bool{
features.WebhookProviderCertManager: true,
features.PreflightPermissions: false,
features.SingleOwnNamespaceInstallSupport: false,
features.SyntheticPermissions: false,
features.WebhookProviderOpenshiftServiceCA: false,
features.HelmChartSupport: false,
features.BoxcutterRuntime: false,
features.DeploymentConfig: false,
catalogdHAFeature: false,
}
logger logr.Logger
)
func init() {
flagSet := pflag.CommandLine
flagSet.BoolVar(&devMode, "log.debug", false, "print debug log level")
}
func RegisterHooks(sc *godog.ScenarioContext) {
sc.Before(CheckFeatureTags)
sc.Before(CreateScenarioContext)
sc.After(ScenarioCleanup)
}
func detectOLMDeployment() (*appsv1.Deployment, error) {
raw, err := k8sClient("get", "deployments", "-A", "-l", "app.kubernetes.io/part-of=olm", "-o", "jsonpath={.items}")
if err != nil {
return nil, err
}
dl := []appsv1.Deployment{}
if err := json.Unmarshal([]byte(raw), &dl); err != nil {
return nil, fmt.Errorf("failed to unmarshal OLM deployments: %v", err)
}
for _, d := range dl {
if d.Name == olmDeploymentName {
return &d, nil
}
}
return nil, fmt.Errorf("failed to detect OLM Deployment")
}
func BeforeSuite() {
if devMode {
logger = textlogger.NewLogger(textlogger.NewConfig(textlogger.Verbosity(1)))
} else {
logger = textlogger.NewLogger(textlogger.NewConfig())
}
// Enable HA scenarios when the cluster has at least 2 nodes. This runs
// unconditionally so that upgrade scenarios (which install OLM in a Background
// step and return early below) still get the gate set correctly.
if out, err := k8sClient("get", "nodes", "--no-headers", "-o", "name"); err == nil &&
len(strings.Fields(strings.TrimSpace(out))) >= 2 {
featureGates[catalogdHAFeature] = true
}
olm, err := detectOLMDeployment()
if err != nil {
logger.Info("OLM deployments not found; skipping feature gate detection (upgrade scenarios will install OLM in Background)")
return
}
olmNamespace = olm.Namespace
featureGatePattern := regexp.MustCompile(`--feature-gates=([[:alnum:]]+)=(true|false)`)
for _, c := range olm.Spec.Template.Spec.Containers {
if c.Name == "manager" {
for _, arg := range c.Args {
if matches := featureGatePattern.FindStringSubmatch(arg); matches != nil {
v, err := strconv.ParseBool(matches[2])
if err != nil {
panic(fmt.Errorf("failed to parse feature gate %q=%q: %v", matches[1], matches[2], err))
}
featureGates[featuregate.Feature(matches[1])] = v
}
}
}
}
logger.Info(fmt.Sprintf("Enabled feature gates: %v", featureGates))
}
func CheckFeatureTags(ctx context.Context, sc *godog.Scenario) (context.Context, error) {
for _, tag := range sc.Tags {
if enabled, found := featureGates[featuregate.Feature(tag.Name[1:])]; found && !enabled {
logger.Info(fmt.Sprintf("Skipping scenario %q because feature gate %q is disabled", sc.Name, tag.Name[1:]))
return ctx, godog.ErrSkip
}
}
return ctx, nil
}
func CreateScenarioContext(ctx context.Context, sc *godog.Scenario) (context.Context, error) {
scCtx := &scenarioContext{
id: sc.Id,
namespace: fmt.Sprintf("ns-%s", sc.Id),
clusterExtensionName: fmt.Sprintf("ce-%s", sc.Id),
clusterObjectSetName: fmt.Sprintf("cos-%s", sc.Id),
catalogs: make(map[string]string),
catalogPackageNames: make(map[string]string),
metricsResponse: make(map[string]string),
leaderPods: make(map[string]string),
}
return context.WithValue(ctx, scenarioContextKey, scCtx), nil
}
func scenarioCtx(ctx context.Context) *scenarioContext {
return ctx.Value(scenarioContextKey).(*scenarioContext)
}
func stderrOutput(err error) string {
var exitErr *exec.ExitError
if errors.As(err, &exitErr) && exitErr != nil {
return string(exitErr.Stderr)
}
return ""
}
func ScenarioCleanup(ctx context.Context, _ *godog.Scenario, err error) (context.Context, error) {
sc := scenarioCtx(ctx)
// Stop the in-process recording proxy if one was started.
if sc.proxy != nil {
sc.proxy.stop()
}
// Restore any deployments that were modified during the scenario. Runs
// unconditionally (even on failure) to prevent a misconfigured deployment
// from bleeding into subsequent scenarios. Restored in LIFO order so that
// multiple patches to the same deployment unwind to the true original.
for i := len(sc.deploymentRestores) - 1; i >= 0; i-- {
dr := sc.deploymentRestores[i]
if dr.patchedArgs {
if err2 := patchDeploymentArgs(dr.namespace, dr.name, dr.originalArgs); err2 != nil {
logger.Info("Error restoring deployment args", "name", dr.name, "error", err2)
} else if _, err2 := k8sClient("rollout", "status", "-n", dr.namespace,
fmt.Sprintf("deployment/%s", dr.name), "--timeout=2m"); err2 != nil {
logger.Info("Timeout waiting for deployment rollout after restore", "name", dr.name)
}
}
if err2 := restoreDeployment(dr); err2 != nil {
logger.Info("Error restoring deployment env", "deployment", dr.name, "namespace", dr.namespace, "error", err2)
}
}
if err != nil {
return ctx, err
}
forDeletion := sc.addedResources
if sc.clusterExtensionName != "" {
forDeletion = append(forDeletion, resource{name: sc.clusterExtensionName, kind: "clusterextension"})
}
if sc.clusterObjectSetName != "" && featureGates[features.BoxcutterRuntime] {
forDeletion = append(forDeletion, resource{name: sc.clusterObjectSetName, kind: "clusterobjectset"})
}
for _, catalogName := range sc.catalogs {
forDeletion = append(forDeletion, resource{name: catalogName, kind: "clustercatalog"})
}
forDeletion = append(forDeletion, resource{name: sc.namespace, kind: "namespace"})
var wg sync.WaitGroup
for _, r := range forDeletion {
wg.Add(1)
go func(res resource) {
defer wg.Done()
args := []string{"delete", res.kind, res.name, "--ignore-not-found=true"}
if res.namespace != "" {
args = append(args, "-n", res.namespace)
}
if _, err := k8sClient(args...); err != nil {
logger.Info("Error deleting resource", "name", res.name, "namespace", res.namespace, "stderr", stderrOutput(err))
}
}(r)
}
wg.Wait()
return ctx, nil
}