Skip to content

Commit b754486

Browse files
tmshortclaude
andcommitted
tests: add unit and e2e tests for HTTPS_PROXY support
- Set Proxy: http.ProxyFromEnvironment on the custom http.Transport in BuildHTTPClient so HTTPS_PROXY/NO_PROXY env vars are honoured - Add unit tests verifying the transport uses env-based proxy, tunnels connections through an HTTP CONNECT proxy, and fails when the proxy rejects the tunnel - Add an in-process recording proxy and deployment patch helpers to the e2e step library - Add two @httpproxy e2e scenarios: one verifying operator-controller blocks catalog fetches when the proxy is unreachable, one verifying CONNECT requests are routed through a configured proxy Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com> Signed-off-by: Todd Short <tshort@redhat.com>
1 parent e20da75 commit b754486

8 files changed

Lines changed: 702 additions & 23 deletions

File tree

Makefile

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -255,9 +255,10 @@ $(eval $(call install-sh,standard,operator-controller-standard.yaml))
255255
test: manifests generate fmt lint test-unit test-e2e test-regression #HELP Run all tests.
256256

257257
E2E_TIMEOUT ?= 10m
258+
GODOG_ARGS ?=
258259
.PHONY: e2e
259260
e2e: #EXHELP Run the e2e tests.
260-
go test -count=1 -v ./test/e2e/features_test.go -timeout=$(E2E_TIMEOUT)
261+
go test -count=1 -v ./test/e2e/features_test.go -timeout=$(E2E_TIMEOUT) $(if $(GODOG_ARGS),-args $(GODOG_ARGS))
261262

262263
E2E_REGISTRY_NAME := docker-registry
263264
E2E_REGISTRY_NAMESPACE := operator-controller-e2e

internal/shared/util/http/httputil.go

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,10 +18,13 @@ func BuildHTTPClient(cpw *CertPoolWatcher) (*http.Client, error) {
1818
RootCAs: pool,
1919
MinVersion: tls.VersionTLS12,
2020
}
21-
tlsTransport := &http.Transport{
21+
httpClient.Transport = &http.Transport{
2222
TLSClientConfig: tlsConfig,
23+
// Proxy must be set explicitly; a nil Proxy field means "no proxy" and
24+
// ignores HTTPS_PROXY/NO_PROXY env vars. Only http.DefaultTransport sets
25+
// this by default; custom transports must opt in.
26+
Proxy: http.ProxyFromEnvironment,
2327
}
24-
httpClient.Transport = tlsTransport
2528

2629
return httpClient, nil
2730
}
Lines changed: 198 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,198 @@
1+
package http_test
2+
3+
import (
4+
"context"
5+
"encoding/pem"
6+
"io"
7+
"net"
8+
"net/http"
9+
"net/http/httptest"
10+
"net/url"
11+
"os"
12+
"path/filepath"
13+
"testing"
14+
"time"
15+
16+
"github.com/stretchr/testify/require"
17+
"sigs.k8s.io/controller-runtime/pkg/log"
18+
19+
httputil "github.com/operator-framework/operator-controller/internal/shared/util/http"
20+
)
21+
22+
// startRecordingProxy starts a plain-HTTP CONNECT proxy that tunnels HTTPS
23+
// connections and records the target host of each CONNECT request.
24+
func startRecordingProxy(proxied chan<- string) *httptest.Server {
25+
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
26+
if r.Method != http.MethodConnect {
27+
http.Error(w, "only CONNECT supported", http.StatusMethodNotAllowed)
28+
return
29+
}
30+
// Non-blocking: if there are unexpected extra CONNECT requests (retries,
31+
// parallel connections) we record the first one and drop the rest rather
32+
// than blocking the proxy handler goroutine.
33+
select {
34+
case proxied <- r.Host:
35+
default:
36+
}
37+
38+
dst, err := net.Dial("tcp", r.Host)
39+
if err != nil {
40+
http.Error(w, err.Error(), http.StatusBadGateway)
41+
return
42+
}
43+
defer dst.Close()
44+
45+
hj, ok := w.(http.Hijacker)
46+
if !ok {
47+
http.Error(w, "hijacking not supported", http.StatusInternalServerError)
48+
return
49+
}
50+
conn, bufrw, err := hj.Hijack()
51+
if err != nil {
52+
http.Error(w, err.Error(), http.StatusInternalServerError)
53+
return
54+
}
55+
defer conn.Close()
56+
57+
if _, err = conn.Write([]byte("HTTP/1.1 200 Connection established\r\n\r\n")); err != nil {
58+
return
59+
}
60+
61+
done := make(chan struct{}, 2)
62+
tunnel := func(dst io.Writer, src io.Reader) {
63+
defer func() { done <- struct{}{} }()
64+
_, _ = io.Copy(dst, src)
65+
}
66+
// Use bufrw (not conn) as the client→dst source: Hijack may have
67+
// buffered bytes (e.g. the TLS ClientHello) that arrived together with
68+
// the CONNECT headers; reading from conn directly would lose them.
69+
go tunnel(dst, bufrw)
70+
go tunnel(conn, dst)
71+
<-done
72+
<-done // wait for both directions before closing connections
73+
}))
74+
}
75+
76+
// certPoolWatcherForTLSServer creates a CertPoolWatcher that trusts the given
77+
// TLS test server's certificate.
78+
func certPoolWatcherForTLSServer(t *testing.T, server *httptest.Server) *httputil.CertPoolWatcher {
79+
t.Helper()
80+
81+
dir := t.TempDir()
82+
certPath := filepath.Join(dir, "server.pem")
83+
84+
certDER := server.TLS.Certificates[0].Certificate[0]
85+
f, err := os.Create(certPath)
86+
require.NoError(t, err)
87+
require.NoError(t, pem.Encode(f, &pem.Block{Type: "CERTIFICATE", Bytes: certDER}))
88+
require.NoError(t, f.Close())
89+
90+
cpw, err := httputil.NewCertPoolWatcher(dir, log.FromContext(context.Background()))
91+
require.NoError(t, err)
92+
require.NotNil(t, cpw)
93+
t.Cleanup(cpw.Done)
94+
require.NoError(t, cpw.Start(context.Background()))
95+
return cpw
96+
}
97+
98+
// TestBuildHTTPClientTransportUsesProxyFromEnvironment verifies that the
99+
// transport returned by BuildHTTPClient has Proxy set to http.ProxyFromEnvironment
100+
// so that HTTPS_PROXY and NO_PROXY env vars are honoured at runtime.
101+
func TestBuildHTTPClientTransportUsesProxyFromEnvironment(t *testing.T) {
102+
// Use system certs (empty dir) — we only need a valid CertPoolWatcher.
103+
cpw, err := httputil.NewCertPoolWatcher("", log.FromContext(context.Background()))
104+
require.NoError(t, err)
105+
t.Cleanup(cpw.Done)
106+
require.NoError(t, cpw.Start(context.Background()))
107+
108+
client, err := httputil.BuildHTTPClient(cpw)
109+
require.NoError(t, err)
110+
111+
transport, ok := client.Transport.(*http.Transport)
112+
require.True(t, ok)
113+
require.NotNil(t, transport.Proxy,
114+
"BuildHTTPClient must set transport.Proxy so that HTTPS_PROXY env vars are respected; "+
115+
"a nil Proxy field means no proxy regardless of environment")
116+
}
117+
118+
// TestBuildHTTPClientProxyTunnelsConnections verifies end-to-end that the
119+
// HTTP client produced by BuildHTTPClient correctly tunnels HTTPS connections
120+
// through an HTTP CONNECT proxy.
121+
//
122+
// The test overrides transport.Proxy with http.ProxyURL rather than relying on
123+
// HTTPS_PROXY: httptest servers bind to 127.0.0.1, which http.ProxyFromEnvironment
124+
// silently excludes from proxying, and env-var changes within the same process
125+
// are unreliable due to sync.Once caching. Using http.ProxyURL directly exercises
126+
// the same tunnelling code path that HTTPS_PROXY triggers in production.
127+
func TestBuildHTTPClientProxyTunnelsConnections(t *testing.T) {
128+
targetServer := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
129+
w.WriteHeader(http.StatusOK)
130+
}))
131+
defer targetServer.Close()
132+
133+
proxied := make(chan string, 1)
134+
proxyServer := startRecordingProxy(proxied)
135+
defer proxyServer.Close()
136+
137+
proxyURL, err := url.Parse(proxyServer.URL)
138+
require.NoError(t, err)
139+
140+
cpw := certPoolWatcherForTLSServer(t, targetServer)
141+
client, err := httputil.BuildHTTPClient(cpw)
142+
require.NoError(t, err)
143+
144+
// Point the transport directly at our test proxy, bypassing the loopback
145+
// exclusion and env-var caching of http.ProxyFromEnvironment.
146+
transport, ok := client.Transport.(*http.Transport)
147+
require.True(t, ok)
148+
transport.Proxy = http.ProxyURL(proxyURL)
149+
150+
resp, err := client.Get(targetServer.URL)
151+
require.NoError(t, err)
152+
resp.Body.Close()
153+
154+
select {
155+
case host := <-proxied:
156+
require.Equal(t, targetServer.Listener.Addr().String(), host,
157+
"proxy must have received a CONNECT request for the target server address")
158+
case <-time.After(5 * time.Second):
159+
t.Fatal("HTTPS connection to target server did not go through the proxy")
160+
}
161+
}
162+
163+
// TestBuildHTTPClientProxyBlocksWhenRejected verifies that when the proxy
164+
// rejects the CONNECT tunnel, the client request fails rather than silently
165+
// falling back to a direct connection.
166+
func TestBuildHTTPClientProxyBlocksWhenRejected(t *testing.T) {
167+
targetServer := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
168+
w.WriteHeader(http.StatusOK)
169+
}))
170+
defer targetServer.Close()
171+
172+
// A proxy that returns 403 Forbidden for every CONNECT request.
173+
rejectingProxy := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
174+
if r.Method == http.MethodConnect {
175+
http.Error(w, "proxy access denied", http.StatusForbidden)
176+
return
177+
}
178+
http.Error(w, "only CONNECT supported", http.StatusMethodNotAllowed)
179+
}))
180+
defer rejectingProxy.Close()
181+
182+
proxyURL, err := url.Parse(rejectingProxy.URL)
183+
require.NoError(t, err)
184+
185+
cpw := certPoolWatcherForTLSServer(t, targetServer)
186+
client, err := httputil.BuildHTTPClient(cpw)
187+
require.NoError(t, err)
188+
189+
transport, ok := client.Transport.(*http.Transport)
190+
require.True(t, ok)
191+
transport.Proxy = http.ProxyURL(proxyURL)
192+
193+
resp, err := client.Get(targetServer.URL)
194+
if resp != nil {
195+
resp.Body.Close()
196+
}
197+
require.Error(t, err, "request should fail when the proxy rejects the CONNECT tunnel")
198+
}

test/e2e/features/proxy.feature

Lines changed: 64 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,64 @@
1+
Feature: HTTPS proxy support for outbound catalog requests
2+
3+
OLM's operator-controller fetches catalog data from catalogd over HTTPS.
4+
When HTTPS_PROXY is set in the operator-controller's environment, all
5+
outbound HTTPS requests must be routed through the configured proxy.
6+
7+
Background:
8+
Given OLM is available
9+
And ClusterCatalog "test" serves bundles
10+
And ServiceAccount "olm-sa" with needed permissions is available in test namespace
11+
12+
@HTTPProxy
13+
Scenario: operator-controller respects HTTPS_PROXY when fetching catalog data
14+
Given the "operator-controller" component is configured with HTTPS_PROXY "http://127.0.0.1:39999"
15+
When ClusterExtension is applied
16+
"""
17+
apiVersion: olm.operatorframework.io/v1
18+
kind: ClusterExtension
19+
metadata:
20+
name: ${NAME}
21+
spec:
22+
namespace: ${TEST_NAMESPACE}
23+
serviceAccount:
24+
name: olm-sa
25+
source:
26+
sourceType: Catalog
27+
catalog:
28+
packageName: test
29+
selector:
30+
matchLabels:
31+
"olm.operatorframework.io/metadata.name": test-catalog
32+
"""
33+
Then ClusterExtension reports Progressing as True with Reason Retrying and Message includes:
34+
"""
35+
proxyconnect
36+
"""
37+
38+
@HTTPProxy
39+
Scenario: operator-controller sends catalog requests through a configured HTTPS proxy
40+
# The recording proxy runs on the host and cannot route to in-cluster service
41+
# addresses, so it responds 502 after recording the CONNECT. This is
42+
# intentional: the scenario only verifies that operator-controller respects
43+
# HTTPS_PROXY and sends catalog fetches through the proxy, not that the full
44+
# end-to-end request succeeds.
45+
Given the "operator-controller" component is configured with HTTPS_PROXY pointing to a recording proxy
46+
When ClusterExtension is applied
47+
"""
48+
apiVersion: olm.operatorframework.io/v1
49+
kind: ClusterExtension
50+
metadata:
51+
name: ${NAME}
52+
spec:
53+
namespace: ${TEST_NAMESPACE}
54+
serviceAccount:
55+
name: olm-sa
56+
source:
57+
sourceType: Catalog
58+
catalog:
59+
packageName: test
60+
selector:
61+
matchLabels:
62+
"olm.operatorframework.io/metadata.name": test-catalog
63+
"""
64+
Then the recording proxy received a CONNECT request for the catalogd service

test/e2e/steps/hooks.go

Lines changed: 28 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -27,12 +27,15 @@ type resource struct {
2727
namespace string
2828
}
2929

30-
// deploymentRestore records the original container args of a deployment so that
31-
// it can be patched back to its pre-test state during scenario cleanup.
30+
// deploymentRestore records the original state of a deployment so it can be
31+
// rolled back after a test that modifies deployment configuration.
3232
type deploymentRestore struct {
33-
namespace string
34-
deploymentName string
35-
originalArgs []string
33+
name string // deployment name
34+
namespace string
35+
containerName string // container to patch (for env var restores)
36+
patchedArgs bool // true when container args were modified (for TLS profile patches)
37+
originalArgs []string // original container args; may be nil if args were unset
38+
originalEnv []string // original env vars as "NAME=VALUE" (for proxy patches)
3639
}
3740

3841
type scenarioContext struct {
@@ -47,8 +50,8 @@ type scenarioContext struct {
4750
metricsResponse map[string]string
4851
leaderPods map[string]string // component name -> leader pod name
4952
deploymentRestores []deploymentRestore
50-
51-
extensionObjects []client.Object
53+
extensionObjects []client.Object
54+
proxy *recordingProxy
5255
}
5356

5457
// GatherClusterExtensionObjects collects all resources related to the ClusterExtension container in
@@ -192,19 +195,27 @@ func ScenarioCleanup(ctx context.Context, _ *godog.Scenario, err error) (context
192195
}
193196
}
194197

195-
// Always restore deployments whose args were modified during the scenario,
196-
// even when the scenario failed, so that a misconfigured TLS profile does
197-
// not leak into subsequent scenarios. Restore in reverse order so that
198-
// multiple patches to the same deployment unwind back to the true original.
198+
// Stop the in-process recording proxy if one was started.
199+
if sc.proxy != nil {
200+
sc.proxy.stop()
201+
}
202+
203+
// Restore any deployments that were modified during the scenario. Runs
204+
// unconditionally (even on failure) to prevent a misconfigured deployment
205+
// from bleeding into subsequent scenarios. Restored in LIFO order so that
206+
// multiple patches to the same deployment unwind to the true original.
199207
for i := len(sc.deploymentRestores) - 1; i >= 0; i-- {
200208
dr := sc.deploymentRestores[i]
201-
if err2 := patchDeploymentArgs(dr.namespace, dr.deploymentName, dr.originalArgs); err2 != nil {
202-
logger.Info("Error restoring deployment args", "name", dr.deploymentName, "error", err2)
203-
continue
209+
if dr.patchedArgs {
210+
if err2 := patchDeploymentArgs(dr.namespace, dr.name, dr.originalArgs); err2 != nil {
211+
logger.Info("Error restoring deployment args", "name", dr.name, "error", err2)
212+
} else if _, err2 := k8sClient("rollout", "status", "-n", dr.namespace,
213+
fmt.Sprintf("deployment/%s", dr.name), "--timeout=2m"); err2 != nil {
214+
logger.Info("Timeout waiting for deployment rollout after restore", "name", dr.name)
215+
}
204216
}
205-
if _, err2 := k8sClient("rollout", "status", "-n", dr.namespace,
206-
fmt.Sprintf("deployment/%s", dr.deploymentName), "--timeout=2m"); err2 != nil {
207-
logger.Info("Timeout waiting for deployment rollout after restore", "name", dr.deploymentName)
217+
if err2 := restoreDeployment(dr); err2 != nil {
218+
logger.Info("Error restoring deployment env", "deployment", dr.name, "namespace", dr.namespace, "error", err2)
208219
}
209220
}
210221

0 commit comments

Comments
 (0)