Skip to content

Commit 7607623

Browse files
committed
operator: Replace zap logger with global ContextFree logger
The multicluster command constructed its own ctrlzap.New() logger and passed it to RaftConfiguration.Logger, which forwarded it to the controller-runtime manager via ctrl.Options.Logger. Because this logger had a non-nil sink, controller-runtime skipped the global logger fallback: if options.Logger.GetSink() == nil { options.Logger = log.Log // <-- never reached } The manager then used this raw zap sink to build per-reconciliation loggers in reconcileHandler: log := c.LogConstructor(&req) ctx = logf.IntoContext(ctx, log) When reconciler code called otelutil/log.FromContext(ctx), which appends "ctx", ctx to keysAndValues, the zap sink serialized context.Context as the full wrapper chain: "ctx":"context.Background.WithCancel.WithCancel.WithValue(logr...)" The global logger set by main.go via log.SetGlobals(logger.NewLogger()) wraps non-OTEL sinks with ContextFree, which strips context.Context values from keysAndValues before they reach the underlying sink. But the per-command zap logger bypassed SetGlobals entirely, so it never got the ContextFree wrapper. Fix by replacing the standalone ctrlzap.New() with log.FromContext(ctx), which retrieves the global logger already wrapped with ContextFree. Remove the now-unused --log-level flag and parseLogLevel helper, as log level is already configured globally via the root command's logOptions.
1 parent 51f5a3e commit 7607623

File tree

2 files changed

+13
-40
lines changed

2 files changed

+13
-40
lines changed

operator/cmd/multicluster/multicluster.go

Lines changed: 3 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -18,13 +18,12 @@ import (
1818

1919
"github.com/cockroachdb/errors"
2020
"github.com/redpanda-data/common-go/license"
21+
"github.com/redpanda-data/common-go/otelutil/log"
2122
"github.com/spf13/cobra"
22-
"go.uber.org/zap/zapcore"
2323
_ "k8s.io/client-go/plugin/pkg/client/auth"
2424
ctrl "sigs.k8s.io/controller-runtime"
2525
"sigs.k8s.io/controller-runtime/pkg/certwatcher"
2626
"sigs.k8s.io/controller-runtime/pkg/healthz"
27-
ctrlzap "sigs.k8s.io/controller-runtime/pkg/log/zap"
2827
"sigs.k8s.io/controller-runtime/pkg/metrics/filters"
2928
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
3029
"sigs.k8s.io/controller-runtime/pkg/webhook"
@@ -60,7 +59,6 @@ type MulticlusterOptions struct {
6059
KubernetesAPIServer string
6160
KubeconfigNamespace string
6261
KubeconfigName string
63-
LogLevel string
6462
BaseImage string
6563
BaseTag string
6664
HealthProbeBindAddress string
@@ -141,23 +139,6 @@ func peerFromFlag(value string) (RaftCluster, error) {
141139
}, nil
142140
}
143141

144-
func parseLogLevel(level string) (zapcore.Level, error) {
145-
switch level {
146-
case "debug":
147-
return zapcore.DebugLevel, nil
148-
case "info":
149-
return zapcore.InfoLevel, nil
150-
case "warn", "warning":
151-
return zapcore.WarnLevel, nil
152-
case "error":
153-
return zapcore.ErrorLevel, nil
154-
case "fatal":
155-
return zapcore.FatalLevel, nil
156-
default:
157-
return zapcore.InfoLevel, fmt.Errorf("unknown log level: %s, defaulting to info", level)
158-
}
159-
}
160-
161142
func (o *MulticlusterOptions) BindFlags(cmd *cobra.Command) {
162143
cmd.Flags().StringVar(&o.Name, "name", "", "raft node name")
163144
cmd.Flags().StringVar(&o.Address, "raft-address", "", "raft node address")
@@ -170,7 +151,6 @@ func (o *MulticlusterOptions) BindFlags(cmd *cobra.Command) {
170151
cmd.Flags().StringVar(&o.KubernetesAPIServer, "kubernetes-api-address", "", "raft kubernetes api server address")
171152
cmd.Flags().StringVar(&o.KubeconfigNamespace, "kubeconfig-namespace", "default", "raft kubeconfig namespace")
172153
cmd.Flags().StringVar(&o.KubeconfigName, "kubeconfig-name", "multicluster-kubeconfig", "raft kubeconfig name")
173-
cmd.Flags().StringVar(&o.LogLevel, "log-level", "info", "log level")
174154
cmd.Flags().StringVar(&o.WebhookCertPath, "webhook-cert-path", "", "path on disk to the webhook certificate, implies enabling webhooks")
175155
cmd.Flags().StringVar(&o.WebhookKeyPath, "webhook-key-path", "", "path on disk to the webhook certificate key, implies enabling webhooks")
176156
cmd.Flags().StringVar(&o.MetricsBindAddress, "metrics-bind-address", "", "address for binding metrics server")
@@ -207,7 +187,7 @@ func Run(
207187
ctx context.Context,
208188
opts *MulticlusterOptions,
209189
) error {
210-
setupLog := ctrl.LoggerFrom(ctx).WithName("setup")
190+
setupLog := log.FromContext(ctx).WithName("setup")
211191

212192
if err := opts.validate(); err != nil {
213193
return err
@@ -233,21 +213,12 @@ func Run(
233213
return err
234214
}
235215

236-
// Parse and configure log level
237-
logLevel, err := parseLogLevel(opts.LogLevel)
238-
if err != nil {
239-
setupLog.Error(err, "failed to parse log level, using default")
240-
}
241-
242-
// Create a logger with the specified log level
243-
raftLogger := ctrlzap.New(ctrlzap.Level(logLevel)).WithName("raft")
244-
245216
config := multicluster.RaftConfiguration{
246217
Name: opts.Name,
247218
Address: opts.Address,
248219
ElectionTimeout: opts.ElectionTimeout,
249220
HeartbeatInterval: opts.HeartbeatInterval,
250-
Logger: raftLogger,
221+
Logger: log.FromContext(ctx).WithName(opts.Name),
251222
RestConfig: k8sConfig,
252223
Meta: []byte("node-name=" + opts.Name),
253224
Scheme: controller.MulticlusterScheme,

pkg/multicluster/raft.go

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -256,7 +256,7 @@ func NewRaftRuntimeManager(config RaftConfiguration) (Manager, error) {
256256
}
257257
c, err := cluster.New(kubeConfig, func(o *cluster.Options) {
258258
o.Scheme = config.Scheme
259-
o.Logger = config.Logger
259+
o.Logger = config.Logger.WithName("clusterProvider")
260260
})
261261
if err != nil {
262262
return nil, err
@@ -268,7 +268,7 @@ func NewRaftRuntimeManager(config RaftConfiguration) (Manager, error) {
268268
if peer.Kubeconfig != nil {
269269
c, err := cluster.New(peer.Kubeconfig, func(o *cluster.Options) {
270270
o.Scheme = config.Scheme
271-
o.Logger = config.Logger
271+
o.Logger = config.Logger.WithName("clusterProvider")
272272
})
273273
if err != nil {
274274
return nil, err
@@ -288,7 +288,7 @@ func NewRaftRuntimeManager(config RaftConfiguration) (Manager, error) {
288288
ElectionTimeout: config.ElectionTimeout,
289289
HeartbeatInterval: config.HeartbeatInterval,
290290
GRPCMaxBackoff: config.GRPCMaxBackoff,
291-
Logger: &raftLogr{logger: config.Logger},
291+
Logger: &raftLogr{logger: config.Logger.WithName("raft")},
292292
}
293293

294294
if config.Bootstrap {
@@ -403,7 +403,7 @@ func NewRaftRuntimeManager(config RaftConfiguration) (Manager, error) {
403403
config.Logger.Info("initializing cluster for peer", "peer", peer.Name)
404404
c, err := cluster.New(kubeConfig, func(o *cluster.Options) {
405405
o.Scheme = config.Scheme
406-
o.Logger = config.Logger
406+
o.Logger = config.Logger.WithName("clusterProvider")
407407
})
408408
if err != nil {
409409
config.Logger.Error(err, "initializing cluster for peer", "peer", peer.Name)
@@ -424,7 +424,7 @@ func NewRaftRuntimeManager(config RaftConfiguration) (Manager, error) {
424424
}
425425
}
426426

427-
manager, err := newManager(config.Name, config.LocalLeaderElection != nil, config.Logger, restConfig, clusterProvider, broadcaster, func() string {
427+
manager, err := newManager(config.Name, config.LocalLeaderElection != nil, config.Logger.WithName("manager"), restConfig, clusterProvider, broadcaster, func() string {
428428
return idsToNames[currentLeader.Load()]
429429
}, func() map[string]cluster.Cluster {
430430
clusters := map[string]cluster.Cluster{}
@@ -492,7 +492,9 @@ func (m *raftManager) Health(req *http.Request) error {
492492
}
493493

494494
func newManager(localClusterName string, localLeaderElection bool, logger logr.Logger, config *rest.Config, provider multicluster.Provider, broadcaster *restartBroadcaster, getLeader func() string, getClusters func() map[string]cluster.Cluster, addOrReplaceCluster func(ctx context.Context, clusterName string, cl cluster.Cluster) error, manager *leaderelection.LeaderManager, opts manager.Options) (Manager, error) {
495-
mgr, err := mcmanager.New(config, provider, opts)
495+
optsCopy := opts
496+
optsCopy.Logger = opts.Logger.WithName("manager")
497+
mgr, err := mcmanager.New(config, provider, optsCopy)
496498
if err != nil {
497499
return nil, err
498500
}
@@ -504,11 +506,11 @@ func newManager(localClusterName string, localLeaderElection bool, logger logr.L
504506
return nil
505507
})
506508

507-
runnable := &leaderRunnable{manager: manager, logger: logger, broadcaster: broadcaster, getClusters: getClusters, needsLocalLeaderElection: localLeaderElection}
509+
runnable := &leaderRunnable{manager: manager, logger: logger.WithName("leader-runnable"), broadcaster: broadcaster, getClusters: getClusters, needsLocalLeaderElection: localLeaderElection}
508510
if err := mgr.Add(runnable); err != nil {
509511
return nil, err
510512
}
511-
return &raftManager{Manager: mgr, manager: manager, runnable: runnable, logger: logger, localClusterName: localClusterName, getLeader: getLeader, getClusters: getClusters, addOrReplaceCluster: addOrReplaceCluster}, nil
513+
return &raftManager{Manager: mgr, manager: manager, runnable: runnable, logger: logger.WithName("raft-manager"), localClusterName: localClusterName, getLeader: getLeader, getClusters: getClusters, addOrReplaceCluster: addOrReplaceCluster}, nil
512514
}
513515

514516
func (m *raftManager) Add(r mcmanager.Runnable) error {

0 commit comments

Comments
 (0)