Skip to content

Commit 1d2b903

Browse files
committed
Add metadata for v0.15 and update e2e
Bump ORC used in e2e tests to 2.5.0. Set correct metadata and contract versions. Update clusterctl upgrade tests to start from v1beta1 contract (CAPI v1.10). Add one test for verifying CAPI v1beta2 backwards compatibility. Signed-off-by: Lennart Jern <lennart.jern@est.tech>
1 parent 53815e2 commit 1d2b903

6 files changed

Lines changed: 226 additions & 35 deletions

File tree

metadata.yaml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,3 +31,6 @@ releaseSeries:
3131
- major: 0
3232
minor: 14
3333
contract: v1beta1
34+
- major: 0
35+
minor: 15
36+
contract: v1beta2

test/e2e/data/e2e_conf.yaml

Lines changed: 31 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ images:
1010
# Use local dev images built source tree;
1111
- name: gcr.io/k8s-staging-capi-openstack/capi-openstack-controller:e2e
1212
loadBehavior: mustLoad
13-
- name: quay.io/orc/openstack-resource-controller:v2.4.0
13+
- name: quay.io/orc/openstack-resource-controller:v2.5.0
1414
loadBehavior: tryLoad
1515

1616
providers:
@@ -36,6 +36,15 @@ providers:
3636
new: --metrics-addr=:8080
3737
files:
3838
- sourcePath: "../data/shared/capi/metadata.yaml"
39+
- name: "{go://sigs.k8s.io/cluster-api@v1.10}"
40+
value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/cluster-api@v1.10}/core-components.yaml"
41+
type: "url"
42+
contract: v1beta1
43+
replacements:
44+
- old: --metrics-addr=127.0.0.1:8080
45+
new: --metrics-addr=:8080
46+
files:
47+
- sourcePath: "../data/shared/capi/metadata.yaml"
3948
- name: kubeadm
4049
type: BootstrapProvider
4150
versions:
@@ -58,6 +67,15 @@ providers:
5867
new: --metrics-addr=:8080
5968
files:
6069
- sourcePath: "../data/shared/capi/metadata.yaml"
70+
- name: "{go://sigs.k8s.io/cluster-api@v1.10}"
71+
value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/cluster-api@v1.10}/bootstrap-components.yaml"
72+
type: "url"
73+
contract: v1beta1
74+
replacements:
75+
- old: --metrics-addr=127.0.0.1:8080
76+
new: --metrics-addr=:8080
77+
files:
78+
- sourcePath: "../data/shared/capi/metadata.yaml"
6179
- name: kubeadm
6280
type: ControlPlaneProvider
6381
versions:
@@ -80,6 +98,15 @@ providers:
8098
new: --metrics-addr=:8080
8199
files:
82100
- sourcePath: "../data/shared/capi/metadata.yaml"
101+
- name: "{go://sigs.k8s.io/cluster-api@v1.10}"
102+
value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/cluster-api@v1.10}/control-plane-components.yaml"
103+
type: "url"
104+
contract: v1beta1
105+
replacements:
106+
- old: --metrics-addr=127.0.0.1:8080
107+
new: --metrics-addr=:8080
108+
files:
109+
- sourcePath: "../data/shared/capi/metadata.yaml"
83110
- name: openstack
84111
type: InfrastructureProvider
85112
versions:
@@ -131,7 +158,7 @@ providers:
131158
- name: v0.15.99
132159
value: ../../../config/default
133160
# This is the upcoming version.
134-
contract: v1beta1
161+
contract: v1beta2
135162
files:
136163
- sourcePath: "../data/shared/provider/metadata.yaml"
137164
- sourcePath: "./infrastructure-openstack-no-artifact/cluster-template.yaml"
@@ -151,9 +178,9 @@ providers:
151178
- name: openstack-resource-controller
152179
type: RuntimeExtensionProvider # ORC isn't a provider but we fake it so it can be handled by the clusterctl machinery.
153180
versions:
154-
- name: v2.4.0
181+
- name: v2.5.0
155182
value: ../../../../cluster-api-provider-openstack/test/infrastructure/openstack-resource-controller/config/default
156-
contract: v1beta1
183+
contract: v1beta2
157184
files:
158185
- sourcePath: "../data/shared/openstack-resource-controller/metadata.yaml"
159186
replacements:

test/e2e/data/shared/openstack-resource-controller/metadata.yaml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,3 +17,6 @@ releaseSeries:
1717
- major: 2
1818
minor: 4
1919
contract: v1beta1
20+
- major: 2
21+
minor: 5
22+
contract: v1beta2

test/e2e/data/shared/provider/metadata.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,4 +33,4 @@ releaseSeries:
3333
contract: v1beta1
3434
- major: 0
3535
minor: 15
36-
contract: v1beta1
36+
contract: v1beta2

test/e2e/suites/e2e/clusterctl_upgrade_test.go

Lines changed: 187 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -20,10 +20,15 @@ package e2e
2020

2121
import (
2222
"context"
23+
"fmt"
24+
"io"
25+
"net/http"
26+
"strings"
2327

2428
. "github.com/onsi/ginkgo/v2"
2529
. "github.com/onsi/gomega"
2630
capi_e2e "sigs.k8s.io/cluster-api/test/e2e"
31+
capi_framework "sigs.k8s.io/cluster-api/test/framework"
2732
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
2833

2934
shared "sigs.k8s.io/cluster-api-provider-openstack/test/e2e/shared"
@@ -33,39 +38,76 @@ var (
3338
capoRelease012 string
3439
capoRelease013 string
3540
capoRelease014 string
41+
capiRelease110 string
3642
capiRelease112 string
3743
)
3844

45+
// NOTE: clusterctl v1.10 cannot handle RuntimeExtensionProvider in the local
46+
// filesystem repository created by the CAPI v1.13 test framework. And clusterctl
47+
// v1.12 refuses to operate against v1beta1 management clusters. Therefore, we
48+
// need to install ORC outside of the normal clusterctl init/upgrade by using
49+
// hooks instead.
50+
//
51+
// To also exercise the ORC upgrade path, we install ORC v1.0.2 (the version the
52+
// old CAPO releases were tested against) in PreInit and the current ORC version
53+
// in PreUpgrade. Both are installed by directly applying the upstream install
54+
// manifest, since clusterctl v1.10 cannot install ORC as a RuntimeExtensionProvider
55+
// from the local filesystem repository.
56+
//
57+
// The dedicated CAPO v0.14 + ORC upgrade test below uses clusterctl v1.12 with
58+
// CAPI v1.12 (creating a v1beta2 management cluster) so that clusterctl can
59+
// properly install and upgrade ORC as a RuntimeExtensionProvider via the normal
60+
// init/upgrade flow.
61+
62+
// orcInitVersion is the ORC version installed alongside the old CAPO release in
63+
// PreInit. v1.0.2 is the version the v0.12/v0.13/v0.14 CAPO releases were tested
64+
// against (see the previous InitWithRuntimeExtensionProviders entry).
65+
const orcInitVersion = "v1.0.2"
66+
3967
var _ = Describe("When testing clusterctl upgrades for CAPO (v0.12=>current) and ORC (v1.0.2=>current)[clusterctl-upgrade]", func() {
4068
BeforeEach(func(ctx context.Context) {
4169
// Note: This gives the version without the 'v' prefix, so we need to add it below.
4270
capoRelease012, err = clusterctl.ResolveRelease(ctx, "go://github.com/kubernetes-sigs/cluster-api-provider-openstack@v0.12")
4371
Expect(err).ToNot(HaveOccurred(), "failed to get stable release of CAPO")
4472
capoRelease012 = "v" + capoRelease012
4573
// Note: This gives the version without the 'v' prefix, so we need to add it below.
46-
capiRelease112, err = capi_e2e.GetStableReleaseOfMinor(ctx, "1.12")
74+
capiRelease110, err = capi_e2e.GetStableReleaseOfMinor(ctx, "1.10")
4775
Expect(err).ToNot(HaveOccurred(), "failed to get stable release of CAPI")
48-
capiRelease112 = "v" + capiRelease112
76+
capiRelease110 = "v" + capiRelease110
4977
})
5078

5179
capi_e2e.ClusterctlUpgradeSpec(context.TODO(), func() capi_e2e.ClusterctlUpgradeSpecInput {
5280
return capi_e2e.ClusterctlUpgradeSpecInput{
53-
E2EConfig: e2eCtx.E2EConfig,
54-
ClusterctlConfigPath: e2eCtx.Environment.ClusterctlConfigPath,
55-
BootstrapClusterProxy: e2eCtx.Environment.BootstrapClusterProxy,
56-
ArtifactFolder: e2eCtx.Settings.ArtifactFolder,
57-
SkipCleanup: false,
58-
InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/" + capiRelease112 + "/clusterctl-{OS}-{ARCH}",
59-
InitWithProvidersContract: "v1beta2",
60-
InitWithInfrastructureProviders: []string{"openstack:" + capoRelease012},
61-
InitWithCoreProvider: "cluster-api:" + capiRelease112,
62-
InitWithBootstrapProviders: []string{"kubeadm:" + capiRelease112},
63-
InitWithControlPlaneProviders: []string{"kubeadm:" + capiRelease112},
81+
E2EConfig: e2eCtx.E2EConfig,
82+
ClusterctlConfigPath: e2eCtx.Environment.ClusterctlConfigPath,
83+
BootstrapClusterProxy: e2eCtx.Environment.BootstrapClusterProxy,
84+
ArtifactFolder: e2eCtx.Settings.ArtifactFolder,
85+
SkipCleanup: false,
86+
InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/" + capiRelease110 + "/clusterctl-{OS}-{ARCH}",
87+
InitWithProvidersContract: "v1beta1",
88+
InitWithInfrastructureProviders: []string{"openstack:" + capoRelease012},
89+
InitWithCoreProvider: "cluster-api:" + capiRelease110,
90+
InitWithBootstrapProviders: []string{"kubeadm:" + capiRelease110},
91+
InitWithControlPlaneProviders: []string{"kubeadm:" + capiRelease110},
92+
// Pass an explicit (non-nil) empty slice so the upstream framework does
93+
// NOT fall back to the latest v1beta1 RuntimeExtensionProvider from the
94+
// e2e config (openstack-resource-controller:v1.0.2) and try to install
95+
// it via `clusterctl init`. clusterctl v1.10 cannot handle a
96+
// RuntimeExtensionProvider in the local filesystem repository, so we
97+
// instead install ORC ourselves in the PreInit hook below.
98+
InitWithRuntimeExtensionProviders: []string{},
6499
MgmtFlavor: shared.FlavorDefault,
65100
WorkloadFlavor: shared.FlavorCapiV1Beta1,
66101
InitWithKubernetesVersion: e2eCtx.E2EConfig.MustGetVariable(shared.KubernetesKindVersion),
67-
InitWithRuntimeExtensionProviders: []string{"openstack-resource-controller:v1.0.2"},
68102
UseKindForManagementCluster: true,
103+
// Install ORC v1.0.2 before clusterctl init
104+
PreInit: func(managementClusterProxy capi_framework.ClusterProxy) {
105+
installORC(context.Background(), managementClusterProxy, orcInitVersion)
106+
},
107+
// Upgrade ORC to the current version before clusterctl upgrade
108+
PreUpgrade: func(managementClusterProxy capi_framework.ClusterProxy) {
109+
installLatestORC(context.Background(), managementClusterProxy, e2eCtx.E2EConfig)
110+
},
69111
}
70112
})
71113
})
@@ -77,34 +119,86 @@ var _ = Describe("When testing clusterctl upgrades for CAPO (v0.13=>current) and
77119
Expect(err).ToNot(HaveOccurred(), "failed to get stable release of CAPO")
78120
capoRelease013 = "v" + capoRelease013
79121
// Note: This gives the version without the 'v' prefix, so we need to add it below.
80-
capiRelease112, err = capi_e2e.GetStableReleaseOfMinor(ctx, "1.12")
122+
capiRelease110, err = capi_e2e.GetStableReleaseOfMinor(ctx, "1.10")
81123
Expect(err).ToNot(HaveOccurred(), "failed to get stable release of CAPI")
82-
capiRelease112 = "v" + capiRelease112
124+
capiRelease110 = "v" + capiRelease110
83125
})
84126

85127
capi_e2e.ClusterctlUpgradeSpec(context.TODO(), func() capi_e2e.ClusterctlUpgradeSpecInput {
86128
return capi_e2e.ClusterctlUpgradeSpecInput{
87-
E2EConfig: e2eCtx.E2EConfig,
88-
ClusterctlConfigPath: e2eCtx.Environment.ClusterctlConfigPath,
89-
BootstrapClusterProxy: e2eCtx.Environment.BootstrapClusterProxy,
90-
ArtifactFolder: e2eCtx.Settings.ArtifactFolder,
91-
SkipCleanup: false,
92-
InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/" + capiRelease112 + "/clusterctl-{OS}-{ARCH}",
93-
InitWithProvidersContract: "v1beta2",
94-
InitWithInfrastructureProviders: []string{"openstack:" + capoRelease013},
95-
InitWithCoreProvider: "cluster-api:" + capiRelease112,
96-
InitWithBootstrapProviders: []string{"kubeadm:" + capiRelease112},
97-
InitWithControlPlaneProviders: []string{"kubeadm:" + capiRelease112},
129+
E2EConfig: e2eCtx.E2EConfig,
130+
ClusterctlConfigPath: e2eCtx.Environment.ClusterctlConfigPath,
131+
BootstrapClusterProxy: e2eCtx.Environment.BootstrapClusterProxy,
132+
ArtifactFolder: e2eCtx.Settings.ArtifactFolder,
133+
SkipCleanup: false,
134+
InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/" + capiRelease110 + "/clusterctl-{OS}-{ARCH}",
135+
InitWithProvidersContract: "v1beta1",
136+
InitWithInfrastructureProviders: []string{"openstack:" + capoRelease013},
137+
InitWithCoreProvider: "cluster-api:" + capiRelease110,
138+
InitWithBootstrapProviders: []string{"kubeadm:" + capiRelease110},
139+
InitWithControlPlaneProviders: []string{"kubeadm:" + capiRelease110},
140+
// See the v0.12 test above: explicit empty slice prevents the framework
141+
// from auto-installing ORC v1.0.2 via clusterctl v1.10 (which can't
142+
// handle RuntimeExtensionProviders in the local filesystem repository).
143+
InitWithRuntimeExtensionProviders: []string{},
98144
MgmtFlavor: shared.FlavorDefault,
99145
WorkloadFlavor: shared.FlavorCapiV1Beta1,
100146
InitWithKubernetesVersion: e2eCtx.E2EConfig.MustGetVariable(shared.KubernetesKindVersion),
101-
InitWithRuntimeExtensionProviders: []string{"openstack-resource-controller:v1.0.2"},
102147
UseKindForManagementCluster: true,
148+
PreInit: func(managementClusterProxy capi_framework.ClusterProxy) {
149+
installORC(context.Background(), managementClusterProxy, orcInitVersion)
150+
},
151+
PreUpgrade: func(managementClusterProxy capi_framework.ClusterProxy) {
152+
installLatestORC(context.Background(), managementClusterProxy, e2eCtx.E2EConfig)
153+
},
103154
}
104155
})
105156
})
106157

107158
var _ = Describe("When testing clusterctl upgrades for CAPO (v0.14=>current) and ORC (v1.0.2=>current)[clusterctl-upgrade]", func() {
159+
BeforeEach(func(ctx context.Context) {
160+
// Note: This gives the version without the 'v' prefix, so we need to add it below.
161+
capoRelease014, err = clusterctl.ResolveRelease(ctx, "go://github.com/kubernetes-sigs/cluster-api-provider-openstack@v0.14")
162+
Expect(err).ToNot(HaveOccurred(), "failed to get stable release of CAPO")
163+
capoRelease014 = "v" + capoRelease014
164+
// Note: This gives the version without the 'v' prefix, so we need to add it below.
165+
capiRelease110, err = capi_e2e.GetStableReleaseOfMinor(ctx, "1.10")
166+
Expect(err).ToNot(HaveOccurred(), "failed to get stable release of CAPI")
167+
capiRelease110 = "v" + capiRelease110
168+
})
169+
170+
capi_e2e.ClusterctlUpgradeSpec(context.TODO(), func() capi_e2e.ClusterctlUpgradeSpecInput {
171+
return capi_e2e.ClusterctlUpgradeSpecInput{
172+
E2EConfig: e2eCtx.E2EConfig,
173+
ClusterctlConfigPath: e2eCtx.Environment.ClusterctlConfigPath,
174+
BootstrapClusterProxy: e2eCtx.Environment.BootstrapClusterProxy,
175+
ArtifactFolder: e2eCtx.Settings.ArtifactFolder,
176+
SkipCleanup: false,
177+
InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/" + capiRelease110 + "/clusterctl-{OS}-{ARCH}",
178+
InitWithProvidersContract: "v1beta1",
179+
InitWithInfrastructureProviders: []string{"openstack:" + capoRelease014},
180+
InitWithCoreProvider: "cluster-api:" + capiRelease110,
181+
InitWithBootstrapProviders: []string{"kubeadm:" + capiRelease110},
182+
InitWithControlPlaneProviders: []string{"kubeadm:" + capiRelease110},
183+
// See the v0.12 test above: explicit empty slice prevents the framework
184+
// from auto-installing ORC v1.0.2 via clusterctl v1.10 (which can't
185+
// handle RuntimeExtensionProviders in the local filesystem repository).
186+
InitWithRuntimeExtensionProviders: []string{},
187+
MgmtFlavor: shared.FlavorDefault,
188+
WorkloadFlavor: shared.FlavorCapiV1Beta1,
189+
InitWithKubernetesVersion: e2eCtx.E2EConfig.MustGetVariable(shared.KubernetesKindVersion),
190+
UseKindForManagementCluster: true,
191+
PreInit: func(managementClusterProxy capi_framework.ClusterProxy) {
192+
installORC(context.Background(), managementClusterProxy, orcInitVersion)
193+
},
194+
PreUpgrade: func(managementClusterProxy capi_framework.ClusterProxy) {
195+
installLatestORC(context.Background(), managementClusterProxy, e2eCtx.E2EConfig)
196+
},
197+
}
198+
})
199+
})
200+
201+
var _ = Describe("When testing clusterctl upgrades for CAPO (v0.14=>current) and ORC (v1.0.2=>current) with CAPI v1beta2 [clusterctl-upgrade]", func() {
108202
BeforeEach(func(ctx context.Context) {
109203
// Note: This gives the version without the 'v' prefix, so we need to add it below.
110204
capoRelease014, err = clusterctl.ResolveRelease(ctx, "go://github.com/kubernetes-sigs/cluster-api-provider-openstack@v0.14")
@@ -116,6 +210,9 @@ var _ = Describe("When testing clusterctl upgrades for CAPO (v0.14=>current) and
116210
capiRelease112 = "v" + capiRelease112
117211
})
118212

213+
// Here we use clusterctl v1.12 with CAPI v1.12 to create a v1beta2 management
214+
// cluster. This allows us to also install ORC as RuntimeExtensionProvider and test
215+
// upgrading both CAPO (v0.14=>current) and ORC (v1.0.2=>current).
119216
capi_e2e.ClusterctlUpgradeSpec(context.TODO(), func() capi_e2e.ClusterctlUpgradeSpecInput {
120217
return capi_e2e.ClusterctlUpgradeSpecInput{
121218
E2EConfig: e2eCtx.E2EConfig,
@@ -124,7 +221,6 @@ var _ = Describe("When testing clusterctl upgrades for CAPO (v0.14=>current) and
124221
ArtifactFolder: e2eCtx.Settings.ArtifactFolder,
125222
SkipCleanup: false,
126223
InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/" + capiRelease112 + "/clusterctl-{OS}-{ARCH}",
127-
InitWithProvidersContract: "v1beta2",
128224
InitWithInfrastructureProviders: []string{"openstack:" + capoRelease014},
129225
InitWithCoreProvider: "cluster-api:" + capiRelease112,
130226
InitWithBootstrapProviders: []string{"kubeadm:" + capiRelease112},
@@ -137,3 +233,65 @@ var _ = Describe("When testing clusterctl upgrades for CAPO (v0.14=>current) and
137233
}
138234
})
139235
})
236+
237+
// installLatestORC downloads and applies the install manifest for the current/latest version of
238+
// the OpenStack Resource Controller (ORC) to the management cluster.
239+
//
240+
// The ORC version is derived from the e2e config (the latest version with contract v1beta2), so no
241+
// version hardcoding is required here — updating the provider entry in the e2e config is sufficient.
242+
//
243+
// After this function returns, the upgrade spec's own post-upgrade controller wait logic will handle
244+
// waiting for the ORC deployment to become available. This works because ORC resources carry the
245+
// "cluster.x-k8s.io/provider" label (added by the ORC kustomization), which causes them to be
246+
// discovered by framework.GetControllerDeployments.
247+
func installLatestORC(ctx context.Context, proxy capi_framework.ClusterProxy, e2eConfig *clusterctl.E2EConfig) {
248+
// GetProviderLatestVersionsByContract returns strings in the format "provider-name:version",
249+
// e.g. "openstack-resource-controller:v2.5.0".
250+
orcVersionStrings := e2eConfig.GetProviderLatestVersionsByContract("v1beta2", "openstack-resource-controller")
251+
Expect(orcVersionStrings).ToNot(BeEmpty(),
252+
"No ORC version with v1beta2 contract found in e2e config; cannot install ORC for upgrade")
253+
254+
parts := strings.SplitN(orcVersionStrings[0], ":", 2)
255+
Expect(parts).To(HaveLen(2),
256+
"Unexpected ORC provider version string format (expected 'name:version'): %q", orcVersionStrings[0])
257+
orcVersion := parts[1]
258+
259+
installORC(ctx, proxy, orcVersion)
260+
}
261+
262+
// installORC downloads and applies the upstream install manifest for the given version of the
263+
// OpenStack Resource Controller (ORC) to the management cluster.
264+
//
265+
// This is used to install ORC outside of the normal clusterctl init/upgrade flow, which is needed
266+
// for the upgrade tests that use clusterctl v1.10: clusterctl v1.10 cannot handle
267+
// RuntimeExtensionProvider in the local filesystem repository format used by the CAPI test
268+
// framework. CAPO has required ORC CRDs to be present at controller startup since v0.12, so the
269+
// initial ORC install must happen BEFORE clusterctl init runs the old CAPO controller (i.e. from
270+
// the PreInit hook).
271+
func installORC(ctx context.Context, proxy capi_framework.ClusterProxy, orcVersion string) {
272+
By(fmt.Sprintf("Installing ORC %s on the management cluster", orcVersion))
273+
274+
orcInstallURL := fmt.Sprintf(
275+
"https://github.com/k-orc/openstack-resource-controller/releases/download/%s/install.yaml",
276+
orcVersion,
277+
)
278+
By(fmt.Sprintf("Downloading ORC %s install manifest from %s", orcVersion, orcInstallURL))
279+
280+
req, err := http.NewRequestWithContext(ctx, http.MethodGet, orcInstallURL, http.NoBody)
281+
Expect(err).ToNot(HaveOccurred(), "Failed to create HTTP request for ORC install manifest")
282+
283+
resp, err := http.DefaultClient.Do(req) //nolint:bodyclose // closed below via defer
284+
Expect(err).ToNot(HaveOccurred(),
285+
"Failed to download ORC %s install manifest from %s", orcVersion, orcInstallURL)
286+
defer resp.Body.Close()
287+
288+
Expect(resp.StatusCode).To(Equal(http.StatusOK),
289+
"Unexpected HTTP status %d when downloading ORC install manifest from %s", resp.StatusCode, orcInstallURL)
290+
291+
orcManifest, err := io.ReadAll(resp.Body)
292+
Expect(err).ToNot(HaveOccurred(), "Failed to read ORC install manifest response body")
293+
294+
By(fmt.Sprintf("Applying ORC %s install manifest to the management cluster", orcVersion))
295+
Expect(proxy.CreateOrUpdate(ctx, orcManifest)).To(Succeed(),
296+
"Failed to apply ORC %s install manifest to management cluster", orcVersion)
297+
}

0 commit comments

Comments
 (0)