Skip to content

Commit bccaddb

Browse files
authored
test: refactor e2e test suite (#1187)
- Use `t.Cleanup` for teardown - `context.Background()` → `t.Context()` in tests/helpers - `%w` error wrapping - Standardize on testify - Exponential backoff (1s → 30s cap) for `CreateService` + `WaitForHTTPAvailable`
1 parent a9d4851 commit bccaddb

File tree

4 files changed

+182
-161
lines changed

4 files changed

+182
-161
lines changed

tests/e2e/cloud_test.go

Lines changed: 82 additions & 81 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ import (
1212
"time"
1313

1414
"github.com/stretchr/testify/assert"
15+
"github.com/stretchr/testify/require"
1516
corev1 "k8s.io/api/core/v1"
1617
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
1718
"k8s.io/apimachinery/pkg/util/wait"
@@ -25,17 +26,12 @@ import (
2526
func TestNodeSetCorrectNodeLabelsAndIPAddresses(t *testing.T) {
2627
t.Parallel()
2728

28-
ctx := context.Background()
29+
node, err := testCluster.k8sClient.CoreV1().Nodes().Get(t.Context(), testCluster.ControlNodeName(), metav1.GetOptions{})
30+
require.NoError(t, err)
2931

30-
node, err := testCluster.k8sClient.CoreV1().Nodes().Get(ctx, testCluster.ControlNodeName(), metav1.GetOptions{})
31-
assert.NoError(t, err)
32-
33-
server, _, err := testCluster.hcloud.Server.Get(ctx, testCluster.ControlNodeName())
34-
if err != nil {
35-
return
36-
}
32+
server, _, err := testCluster.hcloud.Server.Get(t.Context(), testCluster.ControlNodeName())
33+
require.NoError(t, err)
3734

38-
labels := node.Labels
3935
expectedLabels := map[string]string{
4036
"node.kubernetes.io/instance-type": server.ServerType.Name,
4137
"topology.kubernetes.io/region": server.Location.Name,
@@ -46,26 +42,15 @@ func TestNodeSetCorrectNodeLabelsAndIPAddresses(t *testing.T) {
4642
"instance.hetzner.cloud/provided-by": "cloud",
4743
}
4844
for expectedLabel, expectedValue := range expectedLabels {
49-
if labelValue, ok := labels[expectedLabel]; !ok || labelValue != expectedValue {
50-
t.Errorf("node have a not expected label %s, ok: %v, given value %s, expected value %s", expectedLabel, ok, labelValue, expectedValue)
51-
}
52-
}
53-
54-
for _, address := range node.Status.Addresses {
55-
if address.Type == corev1.NodeExternalIP {
56-
expectedIP := server.PublicNet.IPv4.IP.String()
57-
if expectedIP != address.Address {
58-
t.Errorf("Got %s as NodeExternalIP but expected %s", address.Address, expectedIP)
59-
}
60-
}
45+
assert.Equal(t, expectedValue, node.Labels[expectedLabel], "unexpected value for label %s", expectedLabel)
6146
}
6247

6348
for _, address := range node.Status.Addresses {
64-
if address.Type == corev1.NodeInternalIP {
65-
expectedIP := server.PrivateNet[0].IP.String()
66-
if expectedIP != address.Address {
67-
t.Errorf("Got %s as NodeInternalIP but expected %s", address.Address, expectedIP)
68-
}
49+
switch address.Type {
50+
case corev1.NodeExternalIP:
51+
assert.Equal(t, server.PublicNet.IPv4.IP.String(), address.Address, "unexpected NodeExternalIP")
52+
case corev1.NodeInternalIP:
53+
assert.Equal(t, server.PrivateNet[0].IP.String(), address.Address, "unexpected NodeInternalIP")
6954
}
7055
}
7156
}
@@ -77,52 +62,58 @@ func TestServiceLoadBalancersMinimalSetup(t *testing.T) {
7762
t: t,
7863
podName: "loadbalancer-minimal",
7964
}
65+
t.Cleanup(func() {
66+
lbTest.TearDown()
67+
})
8068

81-
pod := lbTest.DeployTestPod()
69+
pod, err := lbTest.DeployTestPod()
70+
require.NoError(t, err)
8271

8372
lbSvc := lbTest.ServiceDefinition(pod, map[string]string{
8473
string(annotation.LBLocation): "nbg1",
8574
})
8675

87-
lbSvc, err := lbTest.CreateService(lbSvc)
88-
if assert.NoError(t, err, "deploying test svc") {
89-
WaitForHTTPAvailable(t, lbSvc.Status.LoadBalancer.Ingress[0].IP, false)
90-
}
76+
lbSvc, err = lbTest.CreateService(lbSvc)
77+
require.NoError(t, err)
9178

92-
lbTest.TearDown()
79+
err = lbTest.WaitForHTTPAvailable(lbSvc.Status.LoadBalancer.Ingress[0].IP, false)
80+
require.NoError(t, err)
9381
}
9482

9583
func TestServiceLoadBalancersHTTPS(t *testing.T) {
9684
t.Parallel()
9785

98-
cert := testCluster.CreateTLSCertificate(t, "loadbalancer-https")
9986
lbTest := lbTestHelper{
10087
t: t,
10188
podName: "loadbalancer-https",
10289
port: 443,
10390
}
91+
t.Cleanup(func() {
92+
lbTest.TearDown()
93+
})
94+
95+
cert, err := testCluster.CreateTLSCertificate(t, "loadbalancer-https")
96+
require.NoError(t, err)
10497

105-
pod := lbTest.DeployTestPod()
98+
pod, err := lbTest.DeployTestPod()
99+
require.NoError(t, err)
106100

107101
lbSvc := lbTest.ServiceDefinition(pod, map[string]string{
108102
string(annotation.LBLocation): "nbg1",
109103
string(annotation.LBSvcHTTPCertificates): cert.Name,
110104
string(annotation.LBSvcProtocol): "https",
111105
})
112106

113-
lbSvc, err := lbTest.CreateService(lbSvc)
114-
if assert.NoError(t, err, "deploying test svc") {
115-
WaitForHTTPAvailable(t, lbSvc.Status.LoadBalancer.Ingress[0].IP, true)
116-
}
107+
lbSvc, err = lbTest.CreateService(lbSvc)
108+
require.NoError(t, err)
117109

118-
lbTest.TearDown()
110+
err = lbTest.WaitForHTTPAvailable(lbSvc.Status.LoadBalancer.Ingress[0].IP, true)
111+
require.NoError(t, err)
119112
}
120113

121114
func TestServiceLoadBalancersHTTPSWithManagedCertificate(t *testing.T) {
122115
t.Parallel()
123116

124-
ctx := context.Background()
125-
126117
if testCluster.certDomain == "" {
127118
t.Skip("Skipping because CERT_DOMAIN is not set")
128119
}
@@ -133,8 +124,12 @@ func TestServiceLoadBalancersHTTPSWithManagedCertificate(t *testing.T) {
133124
podName: "loadbalancer-https",
134125
port: 443,
135126
}
127+
t.Cleanup(func() {
128+
lbTest.TearDown()
129+
})
136130

137-
pod := lbTest.DeployTestPod()
131+
pod, err := lbTest.DeployTestPod()
132+
require.NoError(t, err)
138133

139134
lbSvc := lbTest.ServiceDefinition(pod, map[string]string{
140135
string(annotation.LBLocation): "nbg1",
@@ -144,62 +139,66 @@ func TestServiceLoadBalancersHTTPSWithManagedCertificate(t *testing.T) {
144139
string(annotation.LBSvcHTTPManagedCertificateUseACMEStaging): "true",
145140
})
146141

147-
lbSvc, err := lbTest.CreateService(lbSvc)
148-
if assert.NoError(t, err, "deploying test svc") {
149-
certs, err := testCluster.hcloud.Certificate.AllWithOpts(ctx, hcloud.CertificateListOpts{
150-
ListOpts: hcloud.ListOpts{
151-
LabelSelector: fmt.Sprintf("%s=%s", hcops.LabelServiceUID, lbSvc.ObjectMeta.UID),
152-
},
153-
})
154-
assert.NoError(t, err)
155-
if assert.Len(t, certs, 1) {
156-
testCluster.certificates.Add(certs[0].ID)
157-
}
158-
}
142+
lbSvc, err = lbTest.CreateService(lbSvc)
143+
require.NoError(t, err)
159144

160-
lbTest.TearDown()
145+
certs, err := testCluster.hcloud.Certificate.AllWithOpts(t.Context(), hcloud.CertificateListOpts{
146+
ListOpts: hcloud.ListOpts{
147+
LabelSelector: fmt.Sprintf("%s=%s", hcops.LabelServiceUID, lbSvc.ObjectMeta.UID),
148+
},
149+
})
150+
assert.NoError(t, err)
151+
if assert.Len(t, certs, 1) {
152+
testCluster.certificates.Add(certs[0].ID)
153+
}
161154
}
162155

163156
func TestServiceLoadBalancersWithPrivateNetwork(t *testing.T) {
164157
t.Parallel()
165158

166159
lbTest := lbTestHelper{t: t, podName: "loadbalancer-private-network"}
160+
t.Cleanup(func() {
161+
lbTest.TearDown()
162+
})
167163

168-
pod := lbTest.DeployTestPod()
164+
pod, err := lbTest.DeployTestPod()
165+
require.NoError(t, err)
169166

170167
ipRange := &net.IPNet{
171168
IP: net.IPv4(10, 0, 0, 0),
172169
Mask: net.CIDRMask(24, 32),
173170
}
174171

175-
lbSvcDefinition := lbTest.ServiceDefinition(pod, map[string]string{
172+
lbSvc := lbTest.ServiceDefinition(pod, map[string]string{
176173
string(annotation.LBLocation): "nbg1",
177174
string(annotation.LBUsePrivateIP): "true",
178175
string(annotation.PrivateSubnetIPRange): ipRange.String(),
179176
})
180177

181-
lbSvc, err := lbTest.CreateService(lbSvcDefinition)
182-
if assert.NoError(t, err, "deploying test svc") {
183-
WaitForHTTPAvailable(t, lbSvc.Status.LoadBalancer.Ingress[0].IP, false)
178+
lbSvc, err = lbTest.CreateService(lbSvc)
179+
require.NoError(t, err)
184180

185-
anyInIPRange := slices.ContainsFunc(lbSvc.Status.LoadBalancer.Ingress, func(ingress corev1.LoadBalancerIngress) bool {
186-
ip := net.ParseIP(ingress.IP)
187-
if ip == nil {
188-
return false
189-
}
190-
return ipRange.Contains(ip)
191-
})
192-
193-
assert.True(t, anyInIPRange)
194-
}
181+
err = lbTest.WaitForHTTPAvailable(lbSvc.Status.LoadBalancer.Ingress[0].IP, false)
182+
require.NoError(t, err)
195183

196-
lbTest.TearDown()
184+
anyInIPRange := slices.ContainsFunc(lbSvc.Status.LoadBalancer.Ingress, func(ingress corev1.LoadBalancerIngress) bool {
185+
ip := net.ParseIP(ingress.IP)
186+
if ip == nil {
187+
return false
188+
}
189+
return ipRange.Contains(ip)
190+
})
191+
assert.True(t, anyInIPRange)
197192
}
198193

199194
func TestRouteNetworksPodIPsAreAccessible(t *testing.T) {
200195
t.Parallel()
201196

202-
err := wait.PollUntilContextTimeout(context.Background(), 1*time.Second, 2*time.Minute, true, func(ctx context.Context) (bool, error) {
197+
var (
198+
nodeInternalIP string
199+
routeGateway string
200+
)
201+
err := wait.PollUntilContextTimeout(t.Context(), 1*time.Second, 2*time.Minute, true, func(ctx context.Context) (bool, error) {
203202
node, err := testCluster.k8sClient.CoreV1().Nodes().Get(ctx, testCluster.ControlNodeName(), metav1.GetOptions{})
204203
if err != nil {
205204
return false, err
@@ -210,18 +209,20 @@ func TestRouteNetworksPodIPsAreAccessible(t *testing.T) {
210209
return false, err
211210
}
212211
for _, route := range network.Routes {
213-
if route.Destination.String() == node.Spec.PodCIDR {
214-
for _, a := range node.Status.Addresses {
215-
if a.Type == corev1.NodeInternalIP {
216-
assert.Equal(t, a.Address, route.Gateway.String())
217-
}
212+
if route.Destination.String() != node.Spec.PodCIDR {
213+
continue
214+
}
215+
routeGateway = route.Gateway.String()
216+
for _, a := range node.Status.Addresses {
217+
if a.Type == corev1.NodeInternalIP {
218+
nodeInternalIP = a.Address
219+
break
218220
}
219-
return true, nil
220221
}
222+
return true, nil
221223
}
222224
return false, nil
223225
})
224-
if err != nil {
225-
t.Fatal(err)
226-
}
226+
require.NoError(t, err, "error waiting for pod IPs being accessible")
227+
assert.Equal(t, nodeInternalIP, routeGateway, "route gateway should match node internal IP")
227228
}

tests/e2e/e2e_test.go

Lines changed: 7 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -3,13 +3,13 @@
33
package e2e
44

55
import (
6-
"context"
76
"fmt"
87
"os"
98
"strings"
109
"testing"
1110

1211
"github.com/stretchr/testify/assert"
12+
"github.com/stretchr/testify/require"
1313
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
1414
)
1515

@@ -34,8 +34,8 @@ func TestPodIsPresent(t *testing.T) {
3434
t.Parallel()
3535

3636
t.Run("hcloud-cloud-controller-manager pod is present in kube-system", func(t *testing.T) {
37-
pods, err := testCluster.k8sClient.CoreV1().Pods("kube-system").List(context.Background(), metav1.ListOptions{})
38-
assert.NoError(t, err)
37+
pods, err := testCluster.k8sClient.CoreV1().Pods("kube-system").List(t.Context(), metav1.ListOptions{})
38+
require.NoError(t, err)
3939

4040
found := false
4141
for _, pod := range pods.Items {
@@ -44,20 +44,16 @@ func TestPodIsPresent(t *testing.T) {
4444
break
4545
}
4646
}
47-
if !found {
48-
t.Error("kube-system does not contain a pod named hcloud-cloud-controller-manager")
49-
}
47+
assert.True(t, found, "kube-system does not contain a pod named hcloud-cloud-controller-manager")
5048
})
5149

5250
t.Run("pod with app=hcloud-cloud-controller-manager is present in kube-system", func(t *testing.T) {
5351
pods, err := testCluster.k8sClient.CoreV1().Pods("kube-system").
54-
List(context.Background(), metav1.ListOptions{
52+
List(t.Context(), metav1.ListOptions{
5553
LabelSelector: "app.kubernetes.io/name=hcloud-cloud-controller-manager",
5654
})
57-
assert.NoError(t, err)
55+
require.NoError(t, err)
5856

59-
if len(pods.Items) == 0 {
60-
t.Fatal("kube-system does not contain a pod with label app=hcloud-cloud-controller-manager")
61-
}
57+
require.NotEmpty(t, pods.Items, "kube-system does not contain a pod with label app=hcloud-cloud-controller-manager")
6258
})
6359
}

0 commit comments

Comments
 (0)