Skip to content

Commit e300345

Browse files
RafalKoreptaclaude
andcommitted
operator: Replace otelutil/log with controller-runtime/pkg/log
Work around the noisy "ctx" key-value pair that common-go's otelutil/log appends to every log line, serializing the full context.Context wrapper chain (e.g. "context.Background.WithCancel.WithCancel.WithValue(...)"). Switch all call sites from log.Info(ctx, ...) / log.Error(ctx, err, ...) to the standard controller-runtime pattern: log := log.FromContext(ctx) followed by log.Info(...) / log.Error(err, ...). This is a temporary workaround until redpanda-data/common-go#160 lands, after which we can revert back to otelutil/log. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
1 parent 51f5a3e commit e300345

File tree

21 files changed

+97
-77
lines changed

21 files changed

+97
-77
lines changed

harpoon/go.mod

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -141,7 +141,6 @@ require (
141141
github.com/prometheus/common v0.67.5 // indirect
142142
github.com/prometheus/procfs v0.19.2 // indirect
143143
github.com/redpanda-data/common-go/goldenfile v0.0.0-20260109170727-1dd9f5d22ee1 // indirect
144-
github.com/redpanda-data/common-go/otelutil v0.0.0-20260109170727-1dd9f5d22ee1 // indirect
145144
github.com/rivo/uniseg v0.4.7 // indirect
146145
github.com/rogpeppe/go-internal v1.14.1 // indirect
147146
github.com/rubenv/sql-migrate v1.8.0 // indirect
@@ -166,10 +165,8 @@ require (
166165
go.opentelemetry.io/otel v1.40.0 // indirect
167166
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 // indirect
168167
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0 // indirect
169-
go.opentelemetry.io/otel/log v0.16.0 // indirect
170168
go.opentelemetry.io/otel/metric v1.40.0 // indirect
171169
go.opentelemetry.io/otel/sdk v1.40.0 // indirect
172-
go.opentelemetry.io/otel/sdk/log v0.16.0 // indirect
173170
go.opentelemetry.io/otel/trace v1.40.0 // indirect
174171
go.opentelemetry.io/proto/otlp v1.9.0 // indirect
175172
go.uber.org/multierr v1.11.0 // indirect

harpoon/go.sum

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -411,8 +411,6 @@ github.com/redpanda-data/common-go/goldenfile v0.0.0-20260109170727-1dd9f5d22ee1
411411
github.com/redpanda-data/common-go/goldenfile v0.0.0-20260109170727-1dd9f5d22ee1/go.mod h1:V3OBV2kcF/BDDytUZuKvIygbaXoGPT5VO3KmMAz+mBM=
412412
github.com/redpanda-data/common-go/kube v0.0.0-20260225221458-fec06b917c9a h1:Ce2o1IIQcR1UD8FX8sXwevI7JlEVnWoMtMLkawC5mbU=
413413
github.com/redpanda-data/common-go/kube v0.0.0-20260225221458-fec06b917c9a/go.mod h1:FMuuUG3IVwwSad+3da0WIPYHsUuvi3LHnjgSv8ovXug=
414-
github.com/redpanda-data/common-go/otelutil v0.0.0-20260109170727-1dd9f5d22ee1 h1:fLBU7IJZq8PR88AkEdI5G+yyBsftionGapvAOMPW9hg=
415-
github.com/redpanda-data/common-go/otelutil v0.0.0-20260109170727-1dd9f5d22ee1/go.mod h1:LzztzYWpD+e+A6hV6+eQhYY60zHQR7ePxhLgyMmRn/Q=
416414
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
417415
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
418416
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=

operator/api/redpanda/v1alpha2/zz_generated.deprecations_test.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,11 +30,12 @@ package v1alpha2
3030
import (
3131
"testing"
3232

33-
"github.com/redpanda-data/common-go/rp-controller-utils/deprecations"
3433
"github.com/stretchr/testify/require"
3534
"k8s.io/apimachinery/pkg/runtime"
3635
"k8s.io/utils/ptr"
3736
"sigs.k8s.io/controller-runtime/pkg/client"
37+
38+
"github.com/redpanda-data/common-go/rp-controller-utils/deprecations"
3839
)
3940

4041
func TestDeprecatedFieldWarnings(t *testing.T) {

operator/cmd/run/run.go

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@ import (
2121

2222
"github.com/cockroachdb/errors"
2323
"github.com/redpanda-data/common-go/kube"
24-
"github.com/redpanda-data/common-go/otelutil/log"
2524
"github.com/spf13/cobra"
2625
corev1 "k8s.io/api/core/v1"
2726
_ "k8s.io/client-go/plugin/pkg/client/auth"
@@ -30,6 +29,7 @@ import (
3029
"sigs.k8s.io/controller-runtime/pkg/certwatcher"
3130
"sigs.k8s.io/controller-runtime/pkg/client"
3231
"sigs.k8s.io/controller-runtime/pkg/healthz"
32+
controllerlog "sigs.k8s.io/controller-runtime/pkg/log"
3333
"sigs.k8s.io/controller-runtime/pkg/metrics/filters"
3434
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
3535
"sigs.k8s.io/controller-runtime/pkg/webhook"
@@ -560,7 +560,8 @@ func Run(
560560
// runnables for the custom resources in the vectorized group, AKA the V1
561561
// operator.
562562
func setupVectorizedControllers(ctx context.Context, mgr ctrl.Manager, factory internalclient.ClientFactory, cloudExpander *pkgsecrets.CloudExpander, opts *RunOptions) error {
563-
log.Info(ctx, "Starting Vectorized (V1) Controllers")
563+
log := controllerlog.FromContext(ctx)
564+
log.Info("Starting Vectorized (V1) Controllers")
564565

565566
configurator := resources.ConfiguratorSettings{
566567
ConfiguratorBaseImage: opts.configuratorBaseImage,
@@ -589,20 +590,20 @@ func setupVectorizedControllers(ctx context.Context, mgr ctrl.Manager, factory i
589590
CloudSecretsExpander: cloudExpander,
590591
Timeout: opts.rpClientTimeout,
591592
}).WithClusterDomain(opts.clusterDomain).WithConfiguratorSettings(configurator).SetupWithManager(mgr); err != nil {
592-
log.Error(ctx, err, "Unable to create controller", "controller", "Cluster")
593+
log.Error(err, "Unable to create controller", "controller", "Cluster")
593594
return err
594595
}
595596

596597
if err := vectorizedcontrollers.NewClusterMetricsController(mgr.GetClient()).SetupWithManager(mgr); err != nil {
597-
log.Error(ctx, err, "Unable to create controller", "controller", "ClustersMetrics")
598+
log.Error(err, "Unable to create controller", "controller", "ClustersMetrics")
598599
return err
599600
}
600601

601602
// Setup webhooks
602603
if opts.webhookEnabled {
603-
log.Info(ctx, "Setup webhook")
604+
log.Info("Setup webhook")
604605
if err := (&vectorizedv1alpha1.Cluster{}).SetupWebhookWithManager(mgr); err != nil {
605-
log.Error(ctx, err, "Unable to create webhook", "webhook", "RedpandaCluster")
606+
log.Error(err, "Unable to create webhook", "webhook", "RedpandaCluster")
606607
return err
607608
}
608609
}
@@ -626,7 +627,7 @@ func setupVectorizedControllers(ctx context.Context, mgr ctrl.Manager, factory i
626627
)
627628

628629
if err := d.SetupWithManager(mgr); err != nil {
629-
log.Error(ctx, err, "unable to create controller", "controller", "StatefulSetDecommissioner")
630+
log.Error(err, "unable to create controller", "controller", "StatefulSetDecommissioner")
630631
return err
631632
}
632633
}

operator/cmd/supervisor/supervisor.go

Lines changed: 14 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -19,9 +19,10 @@ import (
1919
"syscall"
2020
"time"
2121

22-
"github.com/redpanda-data/common-go/otelutil/log"
2322
"github.com/spf13/cobra"
2423
"golang.org/x/time/rate"
24+
//"github.com/redpanda-data/common-go/otelutil/log" bring back after https://github.com/redpanda-data/common-go/pull/160
25+
"sigs.k8s.io/controller-runtime/pkg/log"
2526
)
2627

2728
func Command() *cobra.Command {
@@ -51,6 +52,7 @@ The default retry parameters are tuned for a a long running process, such as the
5152

5253
ctx, cancel := signal.NotifyContext(cmd.Context(), os.Interrupt, syscall.SIGTERM)
5354
defer cancel()
55+
log := log.FromContext(ctx)
5456

5557
limiter := rate.NewLimiter(rate.Every(retryRate), retryBurst)
5658
runner := Runner{
@@ -80,13 +82,13 @@ The default retry parameters are tuned for a a long running process, such as the
8082
delay := reservation.Delay()
8183

8284
if delay > 0 {
83-
log.Info(ctx, "backing off retries", "delay", delay.String())
85+
log.Info( "backing off retries", "delay", delay.String())
8486
}
8587

8688
// Otherwise wait until we have a retry "credit" available.
8789
select {
8890
case <-ctx.Done():
89-
log.Info(ctx, "shutting down")
91+
log.Info( "shutting down")
9092
return
9193

9294
case <-time.After(reservation.Delay()):
@@ -117,7 +119,8 @@ type Runner struct {
117119
func (r *Runner) Run(
118120
ctx context.Context,
119121
) (cont bool) {
120-
log.Info(ctx, "starting process", "command", r.Command)
122+
log := log.FromContext(ctx)
123+
log.Info("starting process", "command", r.Command)
121124

122125
//nolint:gosec // This is running with a users permissions, nothing to exploit here.
123126
subprocess := exec.Command(r.Command[0], r.Command[1:]...)
@@ -127,7 +130,7 @@ func (r *Runner) Run(
127130
subprocess.Stdout = r.Stdout
128131

129132
if err := subprocess.Start(); err != nil {
130-
log.Error(ctx, err, "failed to start process", "command", r.Command)
133+
log.Error(err, "failed to start process", "command", r.Command)
131134
// If we fail to startup the provided process, don't continue to retry
132135
// as it's exceptionally unlikely this type of issue will resolve
133136
// itself.
@@ -142,19 +145,19 @@ func (r *Runner) Run(
142145
for {
143146
select {
144147
case err := <-doneCh:
145-
log.Error(ctx, err, "process exited")
148+
log.Error(err, "process exited")
146149
return true // Continue to re-run on any process crashes.
147150

148151
case sig := <-r.Signals:
149152
// Forward signals onto our child process.
150-
log.Info(ctx, "forwarding signal", "signal", sig)
153+
log.Info("forwarding signal", "signal", sig)
151154
if err := subprocess.Process.Signal(sig); err != nil {
152-
log.Error(ctx, err, "failed to forward signal to subprocess", "signal", sig)
155+
log.Error(err, "failed to forward signal to subprocess", "signal", sig)
153156
}
154157
continue
155158

156159
case <-ctx.Done():
157-
log.Info(ctx, "terminating process")
160+
log.Info("terminating process")
158161

159162
// Initiate the shutdown process by sending SIGTERM to match
160163
// Kubernetes' behavior[1] (in most cases).
@@ -167,10 +170,10 @@ func (r *Runner) Run(
167170
// before KILL'ing it.
168171
select {
169172
case <-doneCh:
170-
log.Info(ctx, "process gracefully terminated")
173+
log.Info("process gracefully terminated")
171174
case <-time.After(r.GracePeriod):
172175
_ = subprocess.Process.Kill()
173-
log.Info(ctx, "process killed")
176+
log.Info("process killed")
174177
}
175178

176179
// If the context has been cancelled, don't continue the loop.

operator/cmd/syncclusterconfig/superusers.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,8 @@ import (
1717
"sort"
1818
"strings"
1919

20-
"github.com/redpanda-data/common-go/otelutil/log"
20+
//"github.com/redpanda-data/common-go/otelutil/log" bring back after https://github.com/redpanda-data/common-go/pull/160
21+
"sigs.k8s.io/controller-runtime/pkg/log"
2122
)
2223

2324
// NormalizeSuperusers de-duplicates and sorts the superusers

operator/internal/controller/console/controller.go

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,6 @@ import (
2222
"github.com/imdario/mergo"
2323
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
2424
"github.com/redpanda-data/common-go/kube"
25-
"github.com/redpanda-data/common-go/otelutil/log"
2625
appsv1 "k8s.io/api/apps/v1"
2726
corev1 "k8s.io/api/core/v1"
2827
apierrors "k8s.io/apimachinery/pkg/api/errors"
@@ -32,6 +31,8 @@ import (
3231
ctrl "sigs.k8s.io/controller-runtime"
3332
"sigs.k8s.io/controller-runtime/pkg/client"
3433
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
34+
//"github.com/redpanda-data/common-go/otelutil/log" bring back after https://github.com/redpanda-data/common-go/pull/160
35+
"sigs.k8s.io/controller-runtime/pkg/log"
3536
mcbuilder "sigs.k8s.io/multicluster-runtime/pkg/builder"
3637
mcreconcile "sigs.k8s.io/multicluster-runtime/pkg/reconcile"
3738

@@ -134,7 +135,7 @@ func (c *Controller) Reconcile(ctx context.Context, req mcreconcile.Request) (ct
134135
// so we don't NEED to use a finalizer. It's here to prevent accidents if
135136
// we need cluster scoped resources one day.
136137
if !cr.DeletionTimestamp.IsZero() {
137-
log.Info(ctx, "GC'ing Console", "key", kube.AsKey(cr))
138+
log.FromContext(ctx).Info("GC'ing Console", "key", kube.AsKey(cr))
138139

139140
if controllerutil.RemoveFinalizer(cr, finalizerKey) {
140141
if _, err := syncer.DeleteAll(ctx); err != nil {
@@ -160,7 +161,7 @@ func (c *Controller) Reconcile(ctx context.Context, req mcreconcile.Request) (ct
160161
return ctrl.Result{}, nil
161162
}
162163

163-
log.Info(ctx, "reconciling Console", "key", kube.AsKey(cr))
164+
log.FromContext(ctx).Info("reconciling Console", "key", kube.AsKey(cr))
164165

165166
// Add the finalizer, if not present.
166167
if controllerutil.AddFinalizer(cr, finalizerKey) {
@@ -342,7 +343,7 @@ func (c *Controller) skipServiceMonitorWatchIfNotInstalled(ctx context.Context)
342343
if errors.Is(err, &meta.NoKindMatchError{}) {
343344
return true
344345
} else if err != nil {
345-
log.Error(ctx, err, "could not list ServiceMonitors")
346+
log.FromContext(ctx).Error(err, "could not list ServiceMonitors")
346347
return true
347348
}
348349
return false

operator/internal/controller/redpanda/multicluster_controller.go

Lines changed: 14 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@ import (
3232
"sigs.k8s.io/controller-runtime/pkg/cluster"
3333
ctrlcontroller "sigs.k8s.io/controller-runtime/pkg/controller"
3434
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
35+
controllerlog "sigs.k8s.io/controller-runtime/pkg/log"
3536
"sigs.k8s.io/controller-runtime/pkg/reconcile"
3637
mcbuilder "sigs.k8s.io/multicluster-runtime/pkg/builder"
3738
mcreconcile "sigs.k8s.io/multicluster-runtime/pkg/reconcile"
@@ -82,7 +83,7 @@ type stretchClusterReconciliationFn func(ctx context.Context, state *stretchClus
8283

8384
func (r *MulticlusterReconciler) Reconcile(ctx context.Context, req mcreconcile.Request) (result ctrl.Result, err error) {
8485
start := time.Now()
85-
l := log.FromContext(ctx).WithName("MulticlusterReconciler.Reconcile")
86+
l := controllerlog.FromContext(ctx).WithName("MulticlusterReconciler.Reconcile")
8687

8788
l.V(1).Info("Starting reconcile loop")
8889
defer func() {
@@ -228,7 +229,7 @@ func (r *MulticlusterReconciler) Reconcile(ctx context.Context, req mcreconcile.
228229
// persists the status, and returns drifted=true so the caller can abort reconciliation.
229230
// When specs are aligned it sets SpecSynced=True.
230231
func (r *MulticlusterReconciler) checkSpecConsistency(ctx context.Context, state *stretchClusterReconciliationState, sc *redpandav1alpha2.StretchCluster) (drifted bool, _ ctrl.Result, _ error) {
231-
l := log.FromContext(ctx).WithName("checkSpecConsistency")
232+
l := controllerlog.FromContext(ctx).WithName("checkSpecConsistency")
232233

233234
localSpec := sc.Spec
234235
var driftDetails []string
@@ -285,7 +286,7 @@ func specDiffFields(a, b redpandav1alpha2.StretchClusterSpec) []string {
285286
}
286287

287288
func (r *MulticlusterReconciler) fetchInitialState(ctx context.Context, sc *redpandav1alpha2.StretchCluster) (*stretchClusterReconciliationState, error) {
288-
logger := log.FromContext(ctx)
289+
logger := controllerlog.FromContext(ctx)
289290
logger.V(log.DebugLevel).Info("fetchInitialState")
290291

291292
sccluster := lifecycle.NewStretchClusterWithPools(sc, r.Manager.GetClusterNames())
@@ -343,7 +344,7 @@ func bootstrapSecretName(sc *redpandav1alpha2.StretchCluster) string {
343344

344345
func (r *MulticlusterReconciler) syncBootstrapUser(ctx context.Context, state *stretchClusterReconciliationState, _ cluster.Cluster) (_ ctrl.Result, err error) {
345346
ctx, span := trace.Start(ctx, "syncBootstrapUser")
346-
logger := log.FromContext(ctx)
347+
logger := controllerlog.FromContext(ctx)
347348

348349
defer func() {
349350
if err != nil {
@@ -454,7 +455,7 @@ func (r *MulticlusterReconciler) syncBootstrapUser(ctx context.Context, state *s
454455

455456
func (r *MulticlusterReconciler) reconcileResources(ctx context.Context, state *stretchClusterReconciliationState, cluster cluster.Cluster) (_ ctrl.Result, err error) {
456457
ctx, span := trace.Start(ctx, "reconcileResources")
457-
logger := log.FromContext(ctx)
458+
logger := controllerlog.FromContext(ctx)
458459

459460
defer func() {
460461
if err != nil {
@@ -477,7 +478,7 @@ func (r *MulticlusterReconciler) reconcileResources(ctx context.Context, state *
477478

478479
func (r *MulticlusterReconciler) reconcilePools(ctx context.Context, state *stretchClusterReconciliationState, cluster cluster.Cluster) (_ ctrl.Result, err error) {
479480
ctx, span := trace.Start(ctx, "reconcilePools")
480-
logger := log.FromContext(ctx)
481+
logger := controllerlog.FromContext(ctx)
481482

482483
defer func() {
483484
if err != nil {
@@ -542,7 +543,7 @@ func (r *MulticlusterReconciler) initAdminClient(ctx context.Context, state *str
542543
return ctrl.Result{}, nil
543544
}
544545

545-
logger := log.FromContext(ctx)
546+
logger := controllerlog.FromContext(ctx)
546547
adminAPIEndpoints := r.LifecycleClient.GetAdminAPIEndpoints(state.cluster)
547548
if len(adminAPIEndpoints) == 0 {
548549
return ctrl.Result{}, fmt.Errorf("no admin API endpoints found for cluster %s", state.cluster.Name)
@@ -561,7 +562,7 @@ func (r *MulticlusterReconciler) reconcileDecommission(ctx context.Context, stat
561562
var health rpadmin.ClusterHealthOverview
562563

563564
ctx, span := trace.Start(ctx, "reconcileDecommission")
564-
logger := log.FromContext(ctx)
565+
logger := controllerlog.FromContext(ctx)
565566

566567
defer func() {
567568
if err != nil {
@@ -683,7 +684,7 @@ func (r *MulticlusterReconciler) reconcileLicense(ctx context.Context, state *st
683684
var license *redpandav1alpha2.RedpandaLicenseStatus
684685

685686
ctx, span := trace.Start(ctx, "reconcileLicense")
686-
logger := log.FromContext(ctx)
687+
logger := controllerlog.FromContext(ctx)
687688

688689
defer func() {
689690
if err != nil {
@@ -775,7 +776,7 @@ func (r *MulticlusterReconciler) reconcileLicense(ctx context.Context, state *st
775776

776777
func (r *MulticlusterReconciler) reconcileClusterConfig(ctx context.Context, state *stretchClusterReconciliationState, cluster cluster.Cluster) (_ reconcile.Result, err error) {
777778
ctx, span := trace.Start(ctx, "reconcileClusterConfig")
778-
logger := log.FromContext(ctx)
779+
logger := controllerlog.FromContext(ctx)
779780

780781
defer func() {
781782
if err != nil {
@@ -816,7 +817,7 @@ func (r *MulticlusterReconciler) reconcileClusterConfig(ctx context.Context, sta
816817
func (r *MulticlusterReconciler) syncStatus(ctx context.Context, cluster cluster.Cluster, state *stretchClusterReconciliationState, result ctrl.Result, err error) (ctrl.Result, error) {
817818
original := state.cluster.StretchCluster.Status.DeepCopy()
818819
if r.LifecycleClient.SetClusterStatus(state.cluster, state.status) {
819-
log.FromContext(ctx).V(log.TraceLevel).Info("setting cluster status from diff", "original", original, "new", state.cluster.StretchCluster.Status)
820+
controllerlog.FromContext(ctx).V(log.TraceLevel).Info("setting cluster status from diff", "original", original, "new", state.cluster.StretchCluster.Status)
820821
syncErr := cluster.GetClient().Status().Update(ctx, state.cluster.StretchCluster)
821822
err = errors.Join(syncErr, err)
822823
}
@@ -846,7 +847,7 @@ func (r *MulticlusterReconciler) fetchClusterHealth(ctx context.Context, admin *
846847
// decommissioning the broker with the last pod ordinal and then patching the statefulset with
847848
// a single less replica.
848849
func (r *MulticlusterReconciler) scaleDown(ctx context.Context, admin *rpadmin.AdminAPI, cluster *lifecycle.StretchClusterWithPools, set *lifecycle.ScaleDownSet, brokerMap map[string]int) (bool, error) {
849-
logger := log.FromContext(ctx).WithName(fmt.Sprintf("MulticlusterReconciler[%T].scaleDown", *cluster))
850+
logger := controllerlog.FromContext(ctx).WithName(fmt.Sprintf("MulticlusterReconciler[%T].scaleDown", *cluster))
850851
logger.V(log.TraceLevel).Info("starting StatefulSet scale down", "StatefulSet", client.ObjectKeyFromObject(set.StatefulSet).String())
851852

852853
brokerID, ok := brokerMap[set.LastPod.GetName()]
@@ -879,7 +880,7 @@ func (r *MulticlusterReconciler) scaleDown(ctx context.Context, admin *rpadmin.A
879880

880881
// decommissionBroker handles decommissioning a broker and waiting until it has finished decommissioning
881882
func (r *MulticlusterReconciler) decommissionBroker(ctx context.Context, admin *rpadmin.AdminAPI, cluster *lifecycle.StretchClusterWithPools, set *lifecycle.ScaleDownSet, brokerID int) (bool, error) {
882-
logger := log.FromContext(ctx).WithName(fmt.Sprintf("MulticlusterReconciler[%T].decommissionBroker", *cluster))
883+
logger := controllerlog.FromContext(ctx).WithName(fmt.Sprintf("MulticlusterReconciler[%T].decommissionBroker", *cluster))
883884
logger.V(log.TraceLevel).Info("checking decommissioning status for pod", "Pod", client.ObjectKeyFromObject(set.LastPod).String())
884885

885886
decommissionStatus, err := admin.DecommissionBrokerStatus(ctx, brokerID)

operator/internal/controller/redpanda/multicluster_controller_test.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@ import (
1818
"time"
1919

2020
"github.com/redpanda-data/common-go/kube"
21-
"github.com/redpanda-data/common-go/otelutil/log"
2221
"github.com/redpanda-data/common-go/otelutil/trace"
2322
"github.com/stretchr/testify/suite"
2423
corev1 "k8s.io/api/core/v1"
@@ -28,6 +27,8 @@ import (
2827
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2928
"k8s.io/apimachinery/pkg/types"
3029
"sigs.k8s.io/controller-runtime/pkg/client"
30+
//"github.com/redpanda-data/common-go/otelutil/log" bring back after https://github.com/redpanda-data/common-go/pull/160
31+
"sigs.k8s.io/controller-runtime/pkg/log"
3132

3233
redpandav1alpha2 "github.com/redpanda-data/redpanda-operator/operator/api/redpanda/v1alpha2"
3334
crds "github.com/redpanda-data/redpanda-operator/operator/config/crd/bases"

0 commit comments

Comments
 (0)