Skip to content

Commit caa55f0

Browse files
committed
BUG/MEDIUM: preserve NodePort when reconciling Service ports
The Service port reconciler rebuilds .spec.ports from VirtualListeners on every event batch. VirtualListener-derived ports are emitted with no NodePort, so for Services of type NodePort the patch resets NodePort to 0 and Kubernetes re-allocates a random one on every reconcile. That makes the chart's NodePort defaults and any user-supplied nodePort from service.extraPorts effectively useless. This commit carry the existing NodePort through whenever the port number matches. That should cover both the steady-state case (preserve what Kubernetes allocated on the first reconcile) and the pre-seeded case (keep a user-provided nodePort when the chart-created Service's port number matches a Gateway listener, even if the port names differs). Signed-off-by: Pierre Cheynier <p.cheynier@criteo.com>
1 parent c3377c2 commit caa55f0

2 files changed

Lines changed: 92 additions & 11 deletions

File tree

k8s/gate/hugservice/hugservice.go

Lines changed: 34 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -96,15 +96,7 @@ func (*ServiceReconciler) buildVirtualListenerServicePorts(virtualListeners map[
9696
// reconcileServicePorts patches a single Service's port list if it differs from
9797
// the desired state (reserved ports + VirtualListener ports).
9898
func (r *ServiceReconciler) reconcileServicePorts(ctx context.Context, svc *corev1.Service, desiredVLPorts []corev1.ServicePort) {
99-
// Collect reserved ports from the current service (stat, metrics, …).
100-
var reservedPorts []corev1.ServicePort
101-
for _, p := range svc.Spec.Ports {
102-
if reservedPortNames[p.Name] {
103-
reservedPorts = append(reservedPorts, p)
104-
}
105-
}
106-
107-
desiredPorts := append(reservedPorts, desiredVLPorts...)
99+
desiredPorts := buildDesiredPorts(svc.Spec.Ports, desiredVLPorts)
108100

109101
if portsEqual(svc.Spec.Ports, desiredPorts) {
110102
return
@@ -127,10 +119,41 @@ func (r *ServiceReconciler) reconcileServicePorts(ctx context.Context, svc *core
127119
)
128120
}
129121

122+
// buildDesiredPorts merges the current Service ports with the desired set of
123+
// VirtualListener-derived ports (keeps reserved ports as-is, append the
124+
// VL ports, carrying over the NodePort assigned on the existing Service
125+
// whenever the port number matches - to keep it stable).
126+
func buildDesiredPorts(existing, desiredVLPorts []corev1.ServicePort) []corev1.ServicePort {
127+
existingNodePortByPort := make(map[int32]int32, len(existing))
128+
var reservedPorts []corev1.ServicePort
129+
for _, p := range existing {
130+
if p.NodePort != 0 {
131+
existingNodePortByPort[p.Port] = p.NodePort
132+
}
133+
if reservedPortNames[p.Name] {
134+
reservedPorts = append(reservedPorts, p)
135+
}
136+
}
137+
138+
merged := make([]corev1.ServicePort, 0, len(reservedPorts)+len(desiredVLPorts))
139+
merged = append(merged, reservedPorts...)
140+
for _, dp := range desiredVLPorts {
141+
if dp.NodePort == 0 {
142+
if np, ok := existingNodePortByPort[dp.Port]; ok {
143+
dp.NodePort = np
144+
}
145+
}
146+
merged = append(merged, dp)
147+
}
148+
return merged
149+
}
150+
130151
// portsEqual returns true if both slices describe the same set of ports
131152
// (matched by name, port, and targetPort, order-independent).
132-
// NodePort is intentionally excluded: it is assigned by Kubernetes and not
133-
// managed by the reconciler.
153+
// NodePort is intentionally excluded from the comparison: it is assigned by
154+
// Kubernetes (or the user, via NodePort-typed Services) and carried over by
155+
// buildDesiredPorts, so a reconcile that only differs by NodePort should not
156+
// trigger a patch.
134157
func portsEqual(a, b []corev1.ServicePort) bool {
135158
if len(a) != len(b) {
136159
return false

k8s/gate/hugservice/hugservice_test.go

Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
package hugservice
1515

1616
import (
17+
"reflect"
1718
"testing"
1819

1920
corev1 "k8s.io/api/core/v1"
@@ -24,6 +25,18 @@ func port(name string, port, nodePort int32) corev1.ServicePort {
2425
return corev1.ServicePort{Name: name, Port: port, NodePort: nodePort, TargetPort: intstr.FromInt32(port)}
2526
}
2627

28+
// vlPort builds a ServicePort as emitted by buildVirtualListenerServicePorts
29+
// (TCP, TargetPort==Port). nodePort=0 means "unset".
30+
func vlPort(name string, p, nodePort int32) corev1.ServicePort {
31+
return corev1.ServicePort{
32+
Name: name,
33+
Protocol: corev1.ProtocolTCP,
34+
Port: p,
35+
NodePort: nodePort,
36+
TargetPort: intstr.FromInt32(p),
37+
}
38+
}
39+
2740
func TestPortsEqual(t *testing.T) {
2841
tests := []struct {
2942
name string
@@ -111,3 +124,48 @@ func TestPortsEqual(t *testing.T) {
111124
})
112125
}
113126
}
127+
128+
// TestBuildDesiredPortsPreservesNodePort covers the two NodePort-preservation:
129+
// - a VL port matching an existing VL port by name/number.
130+
// - a VL port matching a pre-seeded entry only by port number.
131+
// Additionally, a brand-new listener must come out with NodePort=0 so
132+
// Kubernetes allocates.
133+
func TestBuildDesiredPortsPreservesNodePort(t *testing.T) {
134+
existing := []corev1.ServicePort{
135+
port("stat", 31024, 31721), // reserved, kept verbatim
136+
vlPort("tls-31443", 31443, 31364), // matches desired by name
137+
port("kube-example", 32132, 32132), // matches desired by port number
138+
}
139+
desiredVL := []corev1.ServicePort{
140+
vlPort("tls-31443", 31443, 0),
141+
vlPort("tls-32132", 32132, 0),
142+
vlPort("tls-40000", 40000, 0), // new listener, no existing match
143+
}
144+
want := []corev1.ServicePort{
145+
port("stat", 31024, 31721),
146+
vlPort("tls-31443", 31443, 31364),
147+
vlPort("tls-32132", 32132, 32132),
148+
vlPort("tls-40000", 40000, 0),
149+
}
150+
151+
got := buildDesiredPorts(existing, desiredVL)
152+
if !reflect.DeepEqual(got, want) {
153+
t.Fatalf("buildDesiredPorts() mismatch\n got = %#v\n want = %#v", got, want)
154+
}
155+
}
156+
157+
// TestBuildDesiredPortsIsStable checks, after the first reconcile with
158+
// Kubernetes filling any unset NodePorts, that a second reconcile is a
159+
// no-op. This is what prevents the NodePort flip on each event batch.
160+
func TestBuildDesiredPortsIsStable(t *testing.T) {
161+
desiredVL := []corev1.ServicePort{vlPort("tls-31443", 31443, 0)}
162+
first := buildDesiredPorts([]corev1.ServicePort{port("stat", 31024, 31024)}, desiredVL)
163+
for i := range first {
164+
if first[i].NodePort == 0 {
165+
first[i].NodePort = 30000 // simulate Kubernetes allocation
166+
}
167+
}
168+
if second := buildDesiredPorts(first, desiredVL); !reflect.DeepEqual(first, second) {
169+
t.Fatalf("reconcile is not stable\n first = %#v\n second = %#v", first, second)
170+
}
171+
}

0 commit comments

Comments
 (0)