Skip to content

Commit da94c34

Browse files
tmshortclaude
andcommitted
tests: add unit and e2e tests for HTTPS_PROXY support
- Set Proxy: http.ProxyFromEnvironment on the custom http.Transport in BuildHTTPClient so HTTPS_PROXY/NO_PROXY env vars are honoured - Add unit tests verifying the transport uses env-based proxy, tunnels connections through an HTTP CONNECT proxy, and fails when the proxy rejects the tunnel - Add an in-process recording proxy and deployment patch helpers to the e2e step library - Add two @httpproxy e2e scenarios: one verifying operator-controller blocks catalog fetches when the proxy is unreachable, one verifying CONNECT requests are routed through a configured proxy Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com> Signed-off-by: Todd Short <tshort@redhat.com>
1 parent 52bf60f commit da94c34

File tree

8 files changed

+715
-25
lines changed

8 files changed

+715
-25
lines changed

Makefile

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -254,10 +254,11 @@ $(eval $(call install-sh,standard,operator-controller-standard.yaml))
254254
.PHONY: test
255255
test: manifests generate fmt lint test-unit test-e2e test-regression #HELP Run all tests.
256256

257-
E2E_TIMEOUT ?= 10m
257+
E2E_TIMEOUT ?= 12m
258+
GODOG_ARGS ?=
258259
.PHONY: e2e
259260
e2e: #EXHELP Run the e2e tests.
260-
go test -count=1 -v ./test/e2e/features_test.go -timeout=$(E2E_TIMEOUT)
261+
go test -count=1 -v ./test/e2e/features_test.go -timeout=$(E2E_TIMEOUT) $(if $(GODOG_ARGS),-args $(GODOG_ARGS))
261262

262263
E2E_REGISTRY_NAME := docker-registry
263264
E2E_REGISTRY_NAMESPACE := operator-controller-e2e
@@ -331,7 +332,7 @@ test-experimental-e2e: COVERAGE_NAME := experimental-e2e
331332
test-experimental-e2e: export MANIFEST := $(EXPERIMENTAL_RELEASE_MANIFEST)
332333
test-experimental-e2e: export INSTALL_DEFAULT_CATALOGS := false
333334
test-experimental-e2e: PROMETHEUS_VALUES := helm/prom_experimental.yaml
334-
test-experimental-e2e: E2E_TIMEOUT := 15m
335+
test-experimental-e2e: E2E_TIMEOUT := 17m
335336
test-experimental-e2e: run-internal image-registry prometheus e2e e2e-coverage kind-clean #HELP Run experimental e2e test suite on local kind cluster
336337

337338
.PHONY: prometheus

internal/shared/util/http/httputil.go

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,10 +18,13 @@ func BuildHTTPClient(cpw *CertPoolWatcher) (*http.Client, error) {
1818
RootCAs: pool,
1919
MinVersion: tls.VersionTLS12,
2020
}
21-
tlsTransport := &http.Transport{
21+
httpClient.Transport = &http.Transport{
2222
TLSClientConfig: tlsConfig,
23+
// Proxy must be set explicitly; a nil Proxy field means "no proxy" and
24+
// ignores HTTPS_PROXY/NO_PROXY env vars. Only http.DefaultTransport sets
25+
// this by default; custom transports must opt in.
26+
Proxy: http.ProxyFromEnvironment,
2327
}
24-
httpClient.Transport = tlsTransport
2528

2629
return httpClient, nil
2730
}
Lines changed: 203 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,203 @@
1+
package http_test
2+
3+
import (
4+
"context"
5+
"encoding/pem"
6+
"io"
7+
"net"
8+
"net/http"
9+
"net/http/httptest"
10+
"net/url"
11+
"os"
12+
"path/filepath"
13+
"testing"
14+
"time"
15+
16+
"github.com/stretchr/testify/require"
17+
"sigs.k8s.io/controller-runtime/pkg/log"
18+
19+
httputil "github.com/operator-framework/operator-controller/internal/shared/util/http"
20+
)
21+
22+
// startRecordingProxy starts a plain-HTTP CONNECT proxy that tunnels HTTPS
23+
// connections and records the target host of each CONNECT request.
24+
func startRecordingProxy(proxied chan<- string) *httptest.Server {
25+
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
26+
if r.Method != http.MethodConnect {
27+
http.Error(w, "only CONNECT supported", http.StatusMethodNotAllowed)
28+
return
29+
}
30+
// Non-blocking: if there are unexpected extra CONNECT requests (retries,
31+
// parallel connections) we record the first one and drop the rest rather
32+
// than blocking the proxy handler goroutine.
33+
select {
34+
case proxied <- r.Host:
35+
default:
36+
}
37+
38+
dst, err := net.Dial("tcp", r.Host)
39+
if err != nil {
40+
http.Error(w, err.Error(), http.StatusBadGateway)
41+
return
42+
}
43+
defer dst.Close()
44+
45+
hj, ok := w.(http.Hijacker)
46+
if !ok {
47+
http.Error(w, "hijacking not supported", http.StatusInternalServerError)
48+
return
49+
}
50+
conn, bufrw, err := hj.Hijack()
51+
if err != nil {
52+
http.Error(w, err.Error(), http.StatusInternalServerError)
53+
return
54+
}
55+
defer conn.Close()
56+
57+
if _, err = conn.Write([]byte("HTTP/1.1 200 Connection established\r\n\r\n")); err != nil {
58+
return
59+
}
60+
61+
done := make(chan struct{}, 2)
62+
tunnel := func(dst io.Writer, src io.Reader) {
63+
defer func() { done <- struct{}{} }()
64+
_, _ = io.Copy(dst, src)
65+
// Half-close the write side so the other direction sees EOF and
66+
// its io.Copy returns, preventing the goroutine from hanging.
67+
if cw, ok := dst.(interface{ CloseWrite() error }); ok {
68+
_ = cw.CloseWrite()
69+
}
70+
}
71+
// Use bufrw (not conn) as the client→dst source: Hijack may have
72+
// buffered bytes (e.g. the TLS ClientHello) that arrived together with
73+
// the CONNECT headers; reading from conn directly would lose them.
74+
go tunnel(dst, bufrw)
75+
go tunnel(conn, dst)
76+
<-done
77+
<-done // wait for both directions before closing connections
78+
}))
79+
}
80+
81+
// certPoolWatcherForTLSServer creates a CertPoolWatcher that trusts the given
82+
// TLS test server's certificate.
83+
func certPoolWatcherForTLSServer(t *testing.T, server *httptest.Server) *httputil.CertPoolWatcher {
84+
t.Helper()
85+
86+
dir := t.TempDir()
87+
certPath := filepath.Join(dir, "server.pem")
88+
89+
certDER := server.TLS.Certificates[0].Certificate[0]
90+
f, err := os.Create(certPath)
91+
require.NoError(t, err)
92+
require.NoError(t, pem.Encode(f, &pem.Block{Type: "CERTIFICATE", Bytes: certDER}))
93+
require.NoError(t, f.Close())
94+
95+
cpw, err := httputil.NewCertPoolWatcher(dir, log.FromContext(context.Background()))
96+
require.NoError(t, err)
97+
require.NotNil(t, cpw)
98+
t.Cleanup(cpw.Done)
99+
require.NoError(t, cpw.Start(context.Background()))
100+
return cpw
101+
}
102+
103+
// TestBuildHTTPClientTransportUsesProxyFromEnvironment verifies that the
104+
// transport returned by BuildHTTPClient has Proxy set to http.ProxyFromEnvironment
105+
// so that HTTPS_PROXY and NO_PROXY env vars are honoured at runtime.
106+
func TestBuildHTTPClientTransportUsesProxyFromEnvironment(t *testing.T) {
107+
// Use system certs (empty dir) — we only need a valid CertPoolWatcher.
108+
cpw, err := httputil.NewCertPoolWatcher("", log.FromContext(context.Background()))
109+
require.NoError(t, err)
110+
t.Cleanup(cpw.Done)
111+
require.NoError(t, cpw.Start(context.Background()))
112+
113+
client, err := httputil.BuildHTTPClient(cpw)
114+
require.NoError(t, err)
115+
116+
transport, ok := client.Transport.(*http.Transport)
117+
require.True(t, ok)
118+
require.NotNil(t, transport.Proxy,
119+
"BuildHTTPClient must set transport.Proxy so that HTTPS_PROXY env vars are respected; "+
120+
"a nil Proxy field means no proxy regardless of environment")
121+
}
122+
123+
// TestBuildHTTPClientProxyTunnelsConnections verifies end-to-end that the
124+
// HTTP client produced by BuildHTTPClient correctly tunnels HTTPS connections
125+
// through an HTTP CONNECT proxy.
126+
//
127+
// The test overrides transport.Proxy with http.ProxyURL rather than relying on
128+
// HTTPS_PROXY: httptest servers bind to 127.0.0.1, which http.ProxyFromEnvironment
129+
// silently excludes from proxying, and env-var changes within the same process
130+
// are unreliable due to sync.Once caching. Using http.ProxyURL directly exercises
131+
// the same tunnelling code path that HTTPS_PROXY triggers in production.
132+
func TestBuildHTTPClientProxyTunnelsConnections(t *testing.T) {
133+
targetServer := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
134+
w.WriteHeader(http.StatusOK)
135+
}))
136+
defer targetServer.Close()
137+
138+
proxied := make(chan string, 1)
139+
proxyServer := startRecordingProxy(proxied)
140+
defer proxyServer.Close()
141+
142+
proxyURL, err := url.Parse(proxyServer.URL)
143+
require.NoError(t, err)
144+
145+
cpw := certPoolWatcherForTLSServer(t, targetServer)
146+
client, err := httputil.BuildHTTPClient(cpw)
147+
require.NoError(t, err)
148+
149+
// Point the transport directly at our test proxy, bypassing the loopback
150+
// exclusion and env-var caching of http.ProxyFromEnvironment.
151+
transport, ok := client.Transport.(*http.Transport)
152+
require.True(t, ok)
153+
transport.Proxy = http.ProxyURL(proxyURL)
154+
155+
resp, err := client.Get(targetServer.URL)
156+
require.NoError(t, err)
157+
resp.Body.Close()
158+
159+
select {
160+
case host := <-proxied:
161+
require.Equal(t, targetServer.Listener.Addr().String(), host,
162+
"proxy must have received a CONNECT request for the target server address")
163+
case <-time.After(5 * time.Second):
164+
t.Fatal("HTTPS connection to target server did not go through the proxy")
165+
}
166+
}
167+
168+
// TestBuildHTTPClientProxyBlocksWhenRejected verifies that when the proxy
169+
// rejects the CONNECT tunnel, the client request fails rather than silently
170+
// falling back to a direct connection.
171+
func TestBuildHTTPClientProxyBlocksWhenRejected(t *testing.T) {
172+
targetServer := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
173+
w.WriteHeader(http.StatusOK)
174+
}))
175+
defer targetServer.Close()
176+
177+
// A proxy that returns 403 Forbidden for every CONNECT request.
178+
rejectingProxy := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
179+
if r.Method == http.MethodConnect {
180+
http.Error(w, "proxy access denied", http.StatusForbidden)
181+
return
182+
}
183+
http.Error(w, "only CONNECT supported", http.StatusMethodNotAllowed)
184+
}))
185+
defer rejectingProxy.Close()
186+
187+
proxyURL, err := url.Parse(rejectingProxy.URL)
188+
require.NoError(t, err)
189+
190+
cpw := certPoolWatcherForTLSServer(t, targetServer)
191+
client, err := httputil.BuildHTTPClient(cpw)
192+
require.NoError(t, err)
193+
194+
transport, ok := client.Transport.(*http.Transport)
195+
require.True(t, ok)
196+
transport.Proxy = http.ProxyURL(proxyURL)
197+
198+
resp, err := client.Get(targetServer.URL)
199+
if resp != nil {
200+
resp.Body.Close()
201+
}
202+
require.Error(t, err, "request should fail when the proxy rejects the CONNECT tunnel")
203+
}

test/e2e/features/proxy.feature

Lines changed: 64 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,64 @@
1+
Feature: HTTPS proxy support for outbound catalog requests
2+
3+
OLM's operator-controller fetches catalog data from catalogd over HTTPS.
4+
When HTTPS_PROXY is set in the operator-controller's environment, all
5+
outbound HTTPS requests must be routed through the configured proxy.
6+
7+
Background:
8+
Given OLM is available
9+
And ClusterCatalog "test" serves bundles
10+
And ServiceAccount "olm-sa" with needed permissions is available in test namespace
11+
12+
@HTTPProxy
13+
Scenario: operator-controller respects HTTPS_PROXY when fetching catalog data
14+
Given the "operator-controller" component is configured with HTTPS_PROXY "http://127.0.0.1:39999"
15+
When ClusterExtension is applied
16+
"""
17+
apiVersion: olm.operatorframework.io/v1
18+
kind: ClusterExtension
19+
metadata:
20+
name: ${NAME}
21+
spec:
22+
namespace: ${TEST_NAMESPACE}
23+
serviceAccount:
24+
name: olm-sa
25+
source:
26+
sourceType: Catalog
27+
catalog:
28+
packageName: test
29+
selector:
30+
matchLabels:
31+
"olm.operatorframework.io/metadata.name": test-catalog
32+
"""
33+
Then ClusterExtension reports Progressing as True with Reason Retrying and Message includes:
34+
"""
35+
proxyconnect
36+
"""
37+
38+
@HTTPProxy
39+
Scenario: operator-controller sends catalog requests through a configured HTTPS proxy
40+
# The recording proxy runs on the host and cannot route to in-cluster service
41+
# addresses, so it responds 502 after recording the CONNECT. This is
42+
# intentional: the scenario only verifies that operator-controller respects
43+
# HTTPS_PROXY and sends catalog fetches through the proxy, not that the full
44+
# end-to-end request succeeds.
45+
Given the "operator-controller" component is configured with HTTPS_PROXY pointing to a recording proxy
46+
When ClusterExtension is applied
47+
"""
48+
apiVersion: olm.operatorframework.io/v1
49+
kind: ClusterExtension
50+
metadata:
51+
name: ${NAME}
52+
spec:
53+
namespace: ${TEST_NAMESPACE}
54+
serviceAccount:
55+
name: olm-sa
56+
source:
57+
sourceType: Catalog
58+
catalog:
59+
packageName: test
60+
selector:
61+
matchLabels:
62+
"olm.operatorframework.io/metadata.name": test-catalog
63+
"""
64+
Then the recording proxy received a CONNECT request for the catalogd service

test/e2e/steps/hooks.go

Lines changed: 28 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -27,12 +27,15 @@ type resource struct {
2727
namespace string
2828
}
2929

30-
// deploymentRestore records the original container args of a deployment so that
31-
// it can be patched back to its pre-test state during scenario cleanup.
30+
// deploymentRestore records the original state of a deployment so it can be
31+
// rolled back after a test that modifies deployment configuration.
3232
type deploymentRestore struct {
33-
namespace string
34-
deploymentName string
35-
originalArgs []string
33+
name string // deployment name
34+
namespace string
35+
containerName string // container to patch (for env var restores)
36+
patchedArgs bool // true when container args were modified (for TLS profile patches)
37+
originalArgs []string // original container args; may be nil if args were unset
38+
originalEnv []string // original env vars as "NAME=VALUE" (for proxy patches)
3639
}
3740

3841
type scenarioContext struct {
@@ -47,8 +50,8 @@ type scenarioContext struct {
4750
metricsResponse map[string]string
4851
leaderPods map[string]string // component name -> leader pod name
4952
deploymentRestores []deploymentRestore
50-
51-
extensionObjects []client.Object
53+
extensionObjects []client.Object
54+
proxy *recordingProxy
5255
}
5356

5457
// GatherClusterExtensionObjects collects all resources related to the ClusterExtension container in
@@ -192,19 +195,27 @@ func ScenarioCleanup(ctx context.Context, _ *godog.Scenario, err error) (context
192195
}
193196
}
194197

195-
// Always restore deployments whose args were modified during the scenario,
196-
// even when the scenario failed, so that a misconfigured TLS profile does
197-
// not leak into subsequent scenarios. Restore in reverse order so that
198-
// multiple patches to the same deployment unwind back to the true original.
198+
// Stop the in-process recording proxy if one was started.
199+
if sc.proxy != nil {
200+
sc.proxy.stop()
201+
}
202+
203+
// Restore any deployments that were modified during the scenario. Runs
204+
// unconditionally (even on failure) to prevent a misconfigured deployment
205+
// from bleeding into subsequent scenarios. Restored in LIFO order so that
206+
// multiple patches to the same deployment unwind to the true original.
199207
for i := len(sc.deploymentRestores) - 1; i >= 0; i-- {
200208
dr := sc.deploymentRestores[i]
201-
if err2 := patchDeploymentArgs(dr.namespace, dr.deploymentName, dr.originalArgs); err2 != nil {
202-
logger.Info("Error restoring deployment args", "name", dr.deploymentName, "error", err2)
203-
continue
209+
if dr.patchedArgs {
210+
if err2 := patchDeploymentArgs(dr.namespace, dr.name, dr.originalArgs); err2 != nil {
211+
logger.Info("Error restoring deployment args", "name", dr.name, "error", err2)
212+
} else if _, err2 := k8sClient("rollout", "status", "-n", dr.namespace,
213+
fmt.Sprintf("deployment/%s", dr.name), "--timeout=2m"); err2 != nil {
214+
logger.Info("Timeout waiting for deployment rollout after restore", "name", dr.name)
215+
}
204216
}
205-
if _, err2 := k8sClient("rollout", "status", "-n", dr.namespace,
206-
fmt.Sprintf("deployment/%s", dr.deploymentName), "--timeout=2m"); err2 != nil {
207-
logger.Info("Timeout waiting for deployment rollout after restore", "name", dr.deploymentName)
217+
if err2 := restoreDeployment(dr); err2 != nil {
218+
logger.Info("Error restoring deployment env", "deployment", dr.name, "namespace", dr.namespace, "error", err2)
208219
}
209220
}
210221

0 commit comments

Comments
 (0)