Skip to content

Commit 0522dc4

Browse files
committed
test: add e2e tests for cluster-health-analyzer operand
Add an end-to-end test that deploys a Monitoring UIPlugin with the cluster-health-analyzer enabled, fires a synthetic PrometheusRule alert, and verifies the corresponding incident metric appears. Framework additions: SkipIfClusterVersionBelow (semver gate that fails on unknown version instead of silently skipping), DumpNamespaceDebug (on-failure diagnostic dump of deployments, pods and events), and AssertPromQLResultWithOptions (configurable poll interval/timeout). The UIPlugin setup is race-tolerant: deleteUIPluginIfExists issues a direct Delete and treats IsNotFound as success, avoiding a TOCTOU window between Get and Delete that could cause spurious failures. Made-with: Cursor
1 parent cbd6ba3 commit 0522dc4

4 files changed

Lines changed: 338 additions & 3 deletions

File tree

test/e2e/framework/assertions.go

Lines changed: 16 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -423,11 +423,26 @@ func (f *Framework) GetPodMetrics(pod *v1.Pod, opts ...func(*HTTPOptions)) ([]by
423423
// the callback function for additional checks.
424424
func (f *Framework) AssertPromQLResult(t *testing.T, expr string, callback func(model.Value) error) error {
425425
t.Helper()
426+
return f.AssertPromQLResultWithOptions(t, expr, callback)
427+
}
428+
429+
// AssertPromQLResultWithOptions is like AssertPromQLResult but accepts
430+
// WithTimeout and WithPollInterval options to override the default polling
431+
// parameters.
432+
func (f *Framework) AssertPromQLResultWithOptions(t *testing.T, expr string, callback func(model.Value) error, fns ...OptionFn) error {
433+
t.Helper()
434+
option := AssertOption{
435+
PollInterval: 20 * time.Second,
436+
WaitTimeout: 3 * DefaultTestTimeout,
437+
}
438+
for _, fn := range fns {
439+
fn(&option)
440+
}
426441
var (
427442
pollErr error
428443
v model.Value
429444
)
430-
if err := wait.PollUntilContextTimeout(context.Background(), 20*time.Second, 3*DefaultTestTimeout, true, func(context.Context) (bool, error) {
445+
if err := wait.PollUntilContextTimeout(context.Background(), option.PollInterval, option.WaitTimeout, true, func(context.Context) (bool, error) {
431446
v, pollErr = f.getPromQLResult(context.Background(), expr)
432447
if pollErr != nil {
433448
t.Logf("error from getPromQLResult(): %s", pollErr)

test/e2e/framework/framework.go

Lines changed: 95 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ import (
1414

1515
configv1 "github.com/openshift/api/config/v1"
1616
"github.com/pkg/errors"
17+
"golang.org/x/mod/semver"
1718
appsv1 "k8s.io/api/apps/v1"
1819
corev1 "k8s.io/api/core/v1"
1920
v1 "k8s.io/api/core/v1"
@@ -25,6 +26,7 @@ import (
2526
"k8s.io/client-go/rest"
2627
"k8s.io/client-go/tools/portforward"
2728
"k8s.io/client-go/transport/spdy"
29+
"k8s.io/utils/ptr"
2830
"sigs.k8s.io/controller-runtime/pkg/client"
2931
)
3032

@@ -257,3 +259,96 @@ func (f *Framework) CleanUp(t *testing.T, cleanupFunc func()) {
257259
}
258260
})
259261
}
262+
263+
// SkipIfClusterVersionBelow skips the test if the cluster version is below
264+
// minVersion. The minVersion string should be a semver-compatible version
265+
// (e.g. "4.19" or "v4.19").
266+
func (f *Framework) SkipIfClusterVersionBelow(t *testing.T, minVersion string) {
267+
t.Helper()
268+
cv := &configv1.ClusterVersion{}
269+
err := f.K8sClient.Get(t.Context(), client.ObjectKey{Name: "version"}, cv)
270+
if err != nil {
271+
t.Fatalf("failed to determine cluster version: %v", err)
272+
return
273+
}
274+
275+
actual := cv.Status.Desired.Version
276+
if actual == "" {
277+
t.Fatal("cluster version is empty")
278+
return
279+
}
280+
t.Logf("Detected cluster version: %s", actual)
281+
282+
if !strings.HasPrefix(actual, "v") {
283+
actual = "v" + actual
284+
}
285+
if !strings.HasPrefix(minVersion, "v") {
286+
minVersion = "v" + minVersion
287+
}
288+
289+
canonicalActual := fmt.Sprintf("%s-0", semver.Canonical(actual))
290+
canonicalMin := fmt.Sprintf("%s-0", semver.Canonical(minVersion))
291+
292+
if semver.Compare(canonicalActual, canonicalMin) < 0 {
293+
t.Skipf("Skipping: cluster version %s is below minimum required %s", cv.Status.Desired.Version, minVersion)
294+
}
295+
}
296+
297+
// DumpNamespaceDebug logs deployments (with conditions), pods (with container
298+
// statuses), and events for the given namespace. Useful as a t.Cleanup or
299+
// on-failure diagnostic helper.
300+
func (f *Framework) DumpNamespaceDebug(t *testing.T, namespace string) {
301+
t.Helper()
302+
ctx := context.WithoutCancel(t.Context())
303+
304+
t.Log("=== BEGIN DEBUG DUMP ===")
305+
defer t.Log("=== END DEBUG DUMP ===")
306+
307+
var deployments appsv1.DeploymentList
308+
if err := f.K8sClient.List(ctx, &deployments, client.InNamespace(namespace)); err != nil {
309+
t.Logf("Failed to list deployments in %s: %v", namespace, err)
310+
} else {
311+
t.Logf("Deployments in namespace %s: %d", namespace, len(deployments.Items))
312+
for _, d := range deployments.Items {
313+
t.Logf(" Deployment: name=%s replicas=%d readyReplicas=%d availableReplicas=%d",
314+
d.Name, ptr.Deref(d.Spec.Replicas, 0), d.Status.ReadyReplicas, d.Status.AvailableReplicas)
315+
for _, c := range d.Status.Conditions {
316+
t.Logf(" condition: type=%s status=%s reason=%s message=%s",
317+
c.Type, c.Status, c.Reason, c.Message)
318+
}
319+
}
320+
}
321+
322+
var pods corev1.PodList
323+
if err := f.K8sClient.List(ctx, &pods, client.InNamespace(namespace)); err != nil {
324+
t.Logf("Failed to list pods in %s: %v", namespace, err)
325+
} else {
326+
t.Logf("Pods in namespace %s: %d", namespace, len(pods.Items))
327+
for _, p := range pods.Items {
328+
t.Logf(" Pod: name=%s phase=%s", p.Name, p.Status.Phase)
329+
for _, cs := range p.Status.ContainerStatuses {
330+
switch {
331+
case cs.State.Running != nil:
332+
t.Logf(" container=%s ready=%v restarts=%d state=Running", cs.Name, cs.Ready, cs.RestartCount)
333+
case cs.State.Waiting != nil:
334+
t.Logf(" container=%s ready=%v restarts=%d state=Waiting reason=%s message=%s",
335+
cs.Name, cs.Ready, cs.RestartCount, cs.State.Waiting.Reason, cs.State.Waiting.Message)
336+
case cs.State.Terminated != nil:
337+
t.Logf(" container=%s ready=%v restarts=%d state=Terminated reason=%s exitCode=%d",
338+
cs.Name, cs.Ready, cs.RestartCount, cs.State.Terminated.Reason, cs.State.Terminated.ExitCode)
339+
}
340+
}
341+
}
342+
}
343+
344+
var events corev1.EventList
345+
if err := f.K8sClient.List(ctx, &events, client.InNamespace(namespace)); err != nil {
346+
t.Logf("Failed to list events in %s: %v", namespace, err)
347+
} else {
348+
t.Logf("Events in namespace %s: %d", namespace, len(events.Items))
349+
for _, e := range events.Items {
350+
t.Logf(" Event: involvedObject=%s/%s reason=%s message=%s type=%s count=%d",
351+
e.InvolvedObject.Kind, e.InvolvedObject.Name, e.Reason, e.Message, e.Type, e.Count)
352+
}
353+
}
354+
}
Lines changed: 221 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,221 @@
1+
package e2e
2+
3+
import (
4+
"context"
5+
"fmt"
6+
"strconv"
7+
"testing"
8+
"time"
9+
10+
monv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
11+
"github.com/prometheus/common/model"
12+
"gotest.tools/v3/assert"
13+
appsv1 "k8s.io/api/apps/v1"
14+
corev1 "k8s.io/api/core/v1"
15+
"k8s.io/apimachinery/pkg/api/errors"
16+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
17+
"k8s.io/apimachinery/pkg/util/intstr"
18+
"sigs.k8s.io/controller-runtime/pkg/client"
19+
20+
uiv1 "github.com/rhobs/observability-operator/pkg/apis/uiplugin/v1alpha1"
21+
"github.com/rhobs/observability-operator/test/e2e/framework"
22+
)
23+
24+
const (
25+
healthAnalyzerDeploymentName = "health-analyzer"
26+
prometheusRuleNamespace = "e2e-health-analyzer-rules"
27+
)
28+
29+
func clusterHealthAnalyzer(t *testing.T) {
30+
f.SkipIfClusterVersionBelow(t, "4.19")
31+
32+
err := monv1.AddToScheme(f.K8sClient.Scheme())
33+
assert.NilError(t, err, "failed to add monv1 to scheme")
34+
35+
plugin := resetMonitoringUIPlugin(t)
36+
err = f.K8sClient.Create(t.Context(), plugin)
37+
assert.NilError(t, err, "failed to create monitoring UIPlugin")
38+
39+
t.Cleanup(func() {
40+
if t.Failed() {
41+
dumpClusterHealthAnalyzerDebug(t, plugin.Name)
42+
}
43+
})
44+
45+
t.Log("Waiting for health-analyzer deployment to become ready...")
46+
haDeployment := appsv1.Deployment{}
47+
f.GetResourceWithRetry(t, healthAnalyzerDeploymentName, uiPluginInstallNS, &haDeployment)
48+
f.AssertDeploymentReady(healthAnalyzerDeploymentName, uiPluginInstallNS, framework.WithTimeout(5*time.Minute))(t)
49+
50+
suffix := strconv.FormatInt(time.Now().UnixNano()%100000, 10)
51+
ruleName := "e2e-health-analyzer-" + suffix
52+
alertName := "E2EHealthAnalyzer" + suffix
53+
54+
createRuleNamespace(t, prometheusRuleNamespace)
55+
56+
rule := newAlwaysFiringRule(t, ruleName, alertName)
57+
err = f.K8sClient.Create(t.Context(), rule)
58+
assert.NilError(t, err, "failed to create PrometheusRule")
59+
60+
t.Log("Waiting for alert to fire in Prometheus...")
61+
alertQuery := fmt.Sprintf(`ALERTS{alertname="%s",alertstate="firing"}`, alertName)
62+
err = f.AssertPromQLResultWithOptions(t, alertQuery,
63+
func(v model.Value) error {
64+
vec, ok := v.(model.Vector)
65+
if !ok || len(vec) == 0 {
66+
return fmt.Errorf("expected firing alert, got: %v", v)
67+
}
68+
return nil
69+
},
70+
framework.WithPollInterval(30*time.Second),
71+
framework.WithTimeout(10*time.Minute),
72+
)
73+
assert.NilError(t, err, "alert %s never fired", alertName)
74+
75+
t.Log("Waiting for cluster-health-analyzer to expose incident metric...")
76+
incidentQuery := fmt.Sprintf(`cluster_health_components_map{src_alertname="%s",src_severity="warning"}`, alertName)
77+
err = f.AssertPromQLResultWithOptions(t, incidentQuery,
78+
func(v model.Value) error {
79+
vec, ok := v.(model.Vector)
80+
if !ok || len(vec) == 0 {
81+
return fmt.Errorf("expected incident metric, got: %v", v)
82+
}
83+
return nil
84+
},
85+
framework.WithPollInterval(30*time.Second),
86+
framework.WithTimeout(15*time.Minute),
87+
)
88+
assert.NilError(t, err, "incident metric for %s never appeared", alertName)
89+
}
90+
91+
func resetMonitoringUIPlugin(t *testing.T) *uiv1.UIPlugin {
92+
plugin := &uiv1.UIPlugin{
93+
ObjectMeta: metav1.ObjectMeta{
94+
Name: "monitoring",
95+
},
96+
Spec: uiv1.UIPluginSpec{
97+
Type: uiv1.TypeMonitoring,
98+
Monitoring: &uiv1.MonitoringConfig{
99+
ClusterHealthAnalyzer: &uiv1.ClusterHealthAnalyzerReference{
100+
Enabled: true,
101+
},
102+
},
103+
},
104+
}
105+
106+
deleteUIPluginIfExists(t, plugin.Name)
107+
108+
f.CleanUp(t, func() {
109+
ctx := context.WithoutCancel(t.Context())
110+
if err := f.K8sClient.Delete(ctx, plugin); err != nil && !errors.IsNotFound(err) {
111+
t.Logf("warning: failed to delete UIPlugin during cleanup: %v", err)
112+
}
113+
waitForUIPluginDeletion(plugin)
114+
})
115+
return plugin
116+
}
117+
118+
func deleteUIPluginIfExists(t *testing.T, name string) {
119+
t.Helper()
120+
plugin := &uiv1.UIPlugin{
121+
ObjectMeta: metav1.ObjectMeta{Name: name},
122+
}
123+
err := f.K8sClient.Delete(t.Context(), plugin)
124+
if err != nil {
125+
if errors.IsNotFound(err) {
126+
return
127+
}
128+
t.Fatalf("failed to delete existing UIPlugin: %v", err)
129+
}
130+
t.Log("UIPlugin already existed, waiting for deletion...")
131+
waitForUIPluginDeletion(plugin)
132+
}
133+
134+
func createRuleNamespace(t *testing.T, name string) {
135+
t.Helper()
136+
ns := &corev1.Namespace{
137+
ObjectMeta: metav1.ObjectMeta{
138+
Name: name,
139+
Labels: map[string]string{
140+
"openshift.io/cluster-monitoring": "true",
141+
},
142+
},
143+
}
144+
if err := f.K8sClient.Create(t.Context(), ns); err != nil && !errors.IsAlreadyExists(err) {
145+
t.Fatalf("failed to create rule namespace %s: %v", name, err)
146+
}
147+
f.CleanUp(t, func() {
148+
ctx := context.WithoutCancel(t.Context())
149+
f.K8sClient.Delete(ctx, ns)
150+
})
151+
}
152+
153+
func newAlwaysFiringRule(t *testing.T, ruleName, alertName string) *monv1.PrometheusRule {
154+
rule := &monv1.PrometheusRule{
155+
ObjectMeta: metav1.ObjectMeta{
156+
Name: ruleName,
157+
Namespace: prometheusRuleNamespace,
158+
},
159+
Spec: monv1.PrometheusRuleSpec{
160+
Groups: []monv1.RuleGroup{{
161+
Name: "health-analyzer-test-" + ruleName,
162+
Rules: []monv1.Rule{{
163+
Alert: alertName,
164+
Expr: intstr.FromString(`vector(1)`),
165+
Labels: map[string]string{"severity": "warning"},
166+
Annotations: map[string]string{
167+
"summary": "E2E static test alert for cluster health analyzer.",
168+
},
169+
}},
170+
}},
171+
},
172+
}
173+
f.CleanUp(t, func() {
174+
ctx := context.WithoutCancel(t.Context())
175+
if err := f.K8sClient.Delete(ctx, rule); err != nil && !errors.IsNotFound(err) {
176+
t.Logf("warning: failed to delete PrometheusRule during cleanup: %v", err)
177+
}
178+
})
179+
return rule
180+
}
181+
182+
func dumpClusterHealthAnalyzerDebug(t *testing.T, pluginName string) {
183+
t.Helper()
184+
ctx := context.WithoutCancel(t.Context())
185+
186+
// UIPlugin-specific diagnostics
187+
var plugin uiv1.UIPlugin
188+
if err := f.K8sClient.Get(ctx, client.ObjectKey{Name: pluginName}, &plugin); err != nil {
189+
t.Logf("Failed to get UIPlugin %q: %v", pluginName, err)
190+
} else {
191+
t.Logf("UIPlugin %q generation=%d, resourceVersion=%s", pluginName, plugin.Generation, plugin.ResourceVersion)
192+
t.Logf("UIPlugin spec.type=%s", plugin.Spec.Type)
193+
if plugin.Spec.Monitoring != nil {
194+
if plugin.Spec.Monitoring.ClusterHealthAnalyzer != nil {
195+
t.Logf("UIPlugin spec.monitoring.clusterHealthAnalyzer.enabled=%v", plugin.Spec.Monitoring.ClusterHealthAnalyzer.Enabled)
196+
}
197+
if plugin.Spec.Monitoring.Incidents != nil {
198+
t.Logf("UIPlugin spec.monitoring.incidents.enabled=%v", plugin.Spec.Monitoring.Incidents.Enabled)
199+
}
200+
}
201+
if len(plugin.Status.Conditions) == 0 {
202+
t.Log("UIPlugin has no status conditions")
203+
}
204+
for _, c := range plugin.Status.Conditions {
205+
t.Logf("UIPlugin condition: type=%s status=%s reason=%s message=%s", c.Type, c.Status, c.Reason, c.Message)
206+
}
207+
}
208+
209+
var plugins uiv1.UIPluginList
210+
if err := f.K8sClient.List(ctx, &plugins); err != nil {
211+
t.Logf("Failed to list UIPlugins: %v", err)
212+
} else {
213+
t.Logf("Total UIPlugins in cluster: %d", len(plugins.Items))
214+
for _, p := range plugins.Items {
215+
t.Logf(" UIPlugin: name=%s type=%s conditions=%d", p.Name, p.Spec.Type, len(p.Status.Conditions))
216+
}
217+
}
218+
219+
// Generic namespace diagnostics (deployments, pods, events)
220+
f.DumpNamespaceDebug(t, uiPluginInstallNS)
221+
}

test/e2e/uiplugin_test.go

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,10 @@ func TestUIPlugin(t *testing.T) {
3434
name: "Create dashboards UIPlugin",
3535
scenario: dashboardsUIPlugin,
3636
},
37+
{
38+
name: "Cluster health analyzer",
39+
scenario: clusterHealthAnalyzer,
40+
},
3741
}
3842

3943
for _, tc := range ts {
@@ -63,13 +67,13 @@ func newDashboardsUIPlugin(t *testing.T) *uiv1.UIPlugin {
6367
}
6468
f.CleanUp(t, func() {
6569
f.K8sClient.Delete(context.Background(), db)
66-
waitForDBUIPluginDeletion(db)
70+
waitForUIPluginDeletion(db)
6771
})
6872

6973
return db
7074
}
7175

72-
func waitForDBUIPluginDeletion(db *uiv1.UIPlugin) error {
76+
func waitForUIPluginDeletion(db *uiv1.UIPlugin) error {
7377
return wait.PollUntilContextTimeout(context.Background(), 5*time.Second, wait.ForeverTestTimeout, true, func(ctx context.Context) (done bool, err error) {
7478
err = f.K8sClient.Get(context.Background(),
7579
client.ObjectKey{Name: db.Name},

0 commit comments

Comments
 (0)