Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion operator/chart/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ config:
imagePullSecrets: []

# -- Log level
# Valid values (from least to most verbose) are: `warn`, `info`, `debug`, and `trace`.
# Valid values (from least to most verbose) are: `warn`, `info`, `debug`.
logLevel: "info"

# -- Role-based Access Control (RBAC) configuration for the Redpanda Operator.
Expand Down
18 changes: 12 additions & 6 deletions operator/cmd/multicluster/multicluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -209,6 +209,18 @@ func Run(
) error {
setupLog := ctrl.LoggerFrom(ctx).WithName("setup")

// Parse and configure log level
logLevel, err := parseLogLevel(opts.LogLevel)
if err != nil {
setupLog.Error(err, "failed to parse log level, using default")
}

rootLogger := ctrlzap.New(ctrlzap.Level(logLevel)).WithName("multicluster")
ctrl.SetLogger(rootLogger)

// Reinitialize logger with correct log level
setupLog = ctrl.LoggerFrom(ctx).WithName("setup")

if err := opts.validate(); err != nil {
return err
}
Expand All @@ -233,12 +245,6 @@ func Run(
return err
}

// Parse and configure log level
logLevel, err := parseLogLevel(opts.LogLevel)
if err != nil {
setupLog.Error(err, "failed to parse log level, using default")
}

// Create a logger with the specified log level
raftLogger := ctrlzap.New(ctrlzap.Level(logLevel)).WithName("raft")

Expand Down
13 changes: 6 additions & 7 deletions operator/cmd/run/run.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ import (

"github.com/cockroachdb/errors"
"github.com/redpanda-data/common-go/kube"
"github.com/redpanda-data/common-go/otelutil/log"
"github.com/spf13/cobra"
corev1 "k8s.io/api/core/v1"
_ "k8s.io/client-go/plugin/pkg/client/auth"
Expand Down Expand Up @@ -560,7 +559,7 @@ func Run(
// runnables for the custom resources in the vectorized group, AKA the V1
// operator.
func setupVectorizedControllers(ctx context.Context, mgr ctrl.Manager, factory internalclient.ClientFactory, cloudExpander *pkgsecrets.CloudExpander, opts *RunOptions) error {
log.Info(ctx, "Starting Vectorized (V1) Controllers")
//log.Info(ctx, "Starting Vectorized (V1) Controllers")

configurator := resources.ConfiguratorSettings{
ConfiguratorBaseImage: opts.configuratorBaseImage,
Expand Down Expand Up @@ -589,20 +588,20 @@ func setupVectorizedControllers(ctx context.Context, mgr ctrl.Manager, factory i
CloudSecretsExpander: cloudExpander,
Timeout: opts.rpClientTimeout,
}).WithClusterDomain(opts.clusterDomain).WithConfiguratorSettings(configurator).SetupWithManager(mgr); err != nil {
log.Error(ctx, err, "Unable to create controller", "controller", "Cluster")
//log.Error(ctx, err, "Unable to create controller", "controller", "Cluster")
return err
}

if err := vectorizedcontrollers.NewClusterMetricsController(mgr.GetClient()).SetupWithManager(mgr); err != nil {
log.Error(ctx, err, "Unable to create controller", "controller", "ClustersMetrics")
//log.Error(ctx, err, "Unable to create controller", "controller", "ClustersMetrics")
return err
}

// Setup webhooks
if opts.webhookEnabled {
log.Info(ctx, "Setup webhook")
//log.Info(ctx, "Setup webhook")
if err := (&vectorizedv1alpha1.Cluster{}).SetupWebhookWithManager(mgr); err != nil {
log.Error(ctx, err, "Unable to create webhook", "webhook", "RedpandaCluster")
//log.Error(ctx, err, "Unable to create webhook", "webhook", "RedpandaCluster")
return err
}
}
Expand All @@ -626,7 +625,7 @@ func setupVectorizedControllers(ctx context.Context, mgr ctrl.Manager, factory i
)

if err := d.SetupWithManager(mgr); err != nil {
log.Error(ctx, err, "unable to create controller", "controller", "StatefulSetDecommissioner")
//log.Error(ctx, err, "unable to create controller", "controller", "StatefulSetDecommissioner")
return err
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ func (r *MulticlusterReconciler) Reconcile(ctx context.Context, req mcreconcile.
// now we memoize the admin client onto the state
r.initAdminClient,
// now we ensure that we reconcile all of our decommissioning nodes
r.reconcileDecommission,
r.reconcileNodePoolDecommission,
// now reconcile cluster configuration
r.reconcileClusterConfig,
// finally reconcile all of our license information
Expand Down Expand Up @@ -604,10 +604,10 @@ func (r *MulticlusterReconciler) initAdminClient(ctx context.Context, state *str
return ctrl.Result{}, nil
}

func (r *MulticlusterReconciler) reconcileDecommission(ctx context.Context, state *stretchClusterReconciliationState, cluster cluster.Cluster) (_ reconcile.Result, err error) {
func (r *MulticlusterReconciler) reconcileNodePoolDecommission(ctx context.Context, state *stretchClusterReconciliationState, cluster cluster.Cluster) (_ reconcile.Result, err error) {
var health rpadmin.ClusterHealthOverview

ctx, span := trace.Start(ctx, "reconcileDecommission")
ctx, span := trace.Start(ctx, "reconcileNodePoolDecommission")
logger := log.FromContext(ctx)

defer func() {
Expand Down Expand Up @@ -659,7 +659,9 @@ func (r *MulticlusterReconciler) reconcileDecommission(ctx context.Context, stat

// next scale down any over-provisioned pools, patching them to use the new spec
// and decommissioning any nodes as needed
for _, set := range state.pools.ToScaleDown() {
sets := state.pools.ToScaleDown()
logger.V(log.DebugLevel).Info("pool to scale down", "pool", sets)
for _, set := range sets {
requeue, err := r.scaleDown(ctx, state.admin, state.cluster, set, brokerMap)
result := ctrl.Result{}
if requeue {
Expand All @@ -671,15 +673,18 @@ func (r *MulticlusterReconciler) reconcileDecommission(ctx context.Context, stat

// at this point any set that needs to be deleted should have 0 replicas
// so we can attempt to delete them all in one pass
for _, set := range state.pools.ToDelete() {
logger.V(log.TraceLevel).Info("deleting StatefulSet", "StatefulSet", client.ObjectKeyFromObject(set).String())
toDelete := state.pools.ToDelete()
logger.V(log.DebugLevel).Info("pool to delete", "pool", toDelete)
for _, set := range toDelete {
logger.V(log.DebugLevel).Info("deleting StatefulSet", "StatefulSet", client.ObjectKeyFromObject(set).String())
if err := cluster.GetClient().Delete(ctx, set.StatefulSet); err != nil {
return ctrl.Result{}, errors.Wrap(err, "deleting statefulset")
}
}

// finally, we make sure we roll every pod that is not in-sync with its statefulset
rollSet := state.pools.PodsToRoll()
logger.V(log.DebugLevel).Info("pods to rool", "rollSet", rollSet)
rolled := false
for _, pod := range rollSet {
shouldRoll, continueExecution := false, false
Expand All @@ -702,7 +707,7 @@ func (r *MulticlusterReconciler) reconcileDecommission(ctx context.Context, stat

if shouldRoll {
rolled = true
logger.V(log.TraceLevel).Info("rolling pod", "Pod", client.ObjectKeyFromObject(pod).String())
logger.V(log.DebugLevel).Info("rolling pod", "Pod", client.ObjectKeyFromObject(pod).String())

if err := cluster.GetClient().Delete(ctx, pod.Pod); err != nil {
return ctrl.Result{}, errors.Wrap(err, "deleting pod")
Expand Down
Loading