Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 6 additions & 2 deletions internal/auth/rbac.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,12 +29,16 @@ func ReconcileRBAC(k8sClient client.Client, rbacName, saNamespace, saName string
}

// ReconcileMetricsAuthRBAC reconciles the ClusterRoleBinding that binds system:auth-delegator to the service account
func ReconcileMetricsAuthRBAC(k8sClient client.Client, commonName, saNamespace, saName string) error {
name := fmt.Sprintf("%s-metrics-auth", commonName)
func ReconcileMetricsAuthRBAC(k8sClient client.Client, name, saNamespace, saName string) error {
desiredMetricsAuthRoleBinding := NewMetricsAuthClusterRoleBinding(name, saNamespace, saName)
return reconcile.ClusterRoleBinding(k8sClient, desiredMetricsAuthRoleBinding.Name, func() *rbacv1.ClusterRoleBinding { return desiredMetricsAuthRoleBinding })
}

// DeleteMetricsAuthRBAC deletes the ClusterRoleBinding for metrics authentication
func DeleteMetricsAuthRBAC(k8sClient client.Client, name string) error {
return reconcile.DeleteClusterRoleBinding(k8sClient, name)
}

// NewMetricsAuthClusterRoleBinding binds the system:auth-delegator ClusterRole to the given service account.
func NewMetricsAuthClusterRoleBinding(name, saNamespace, saName string) *rbacv1.ClusterRoleBinding {
return runtime.NewClusterRoleBinding(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,9 @@ import (
internalcontext "github.com/openshift/cluster-logging-operator/internal/api/context"
internalinit "github.com/openshift/cluster-logging-operator/internal/api/initialize"
internalobs "github.com/openshift/cluster-logging-operator/internal/api/observability"
"github.com/openshift/cluster-logging-operator/internal/auth"
"github.com/openshift/cluster-logging-operator/internal/collector"
"github.com/openshift/cluster-logging-operator/internal/factory"
"github.com/openshift/cluster-logging-operator/internal/tls"
"github.com/openshift/cluster-logging-operator/internal/utils"
validations "github.com/openshift/cluster-logging-operator/internal/validations/observability"
Expand Down Expand Up @@ -75,7 +77,11 @@ func (r *ClusterLogForwarderReconciler) Reconcile(_ context.Context, req ctrl.Re
}

if cxt.Forwarder.DeletionTimestamp != nil {
// Resource is being deleted, no further reconciliation
// Resource is being deleted, clean up cluster-scoped resources
if err := cleanupClusterScopedResources(cxt); err != nil {
log.V(3).Error(err, "Failed to cleanup cluster-scoped resources")
return defaultRequeue, err
}
return defaultRequeue, nil
}

Expand Down Expand Up @@ -139,6 +145,13 @@ func RemoveStaleWorkload(k8Client client.Client, forwarder *obsv1.ClusterLogForw
return remove(k8Client, forwarder.Namespace, forwarder.Name)
}

// cleanupClusterScopedResources removes cluster-scoped resources that cannot be garbage collected
func cleanupClusterScopedResources(context internalcontext.ForwarderContext) error {
resourceNames := factory.ResourceNames(*context.Forwarder)
// Delete the metrics auth ClusterRoleBinding (NotFound errors are ignored by DeleteMetricsAuthRBAC)
return auth.DeleteMetricsAuthRBAC(context.Client, resourceNames.MetricsAuthClusterRoleBinding)
}

func MapSecrets(k8Client client.Client, namespace string, inputs internalobs.Inputs, outputs internalobs.Outputs) (secretMap map[string]*corev1.Secret, err error) {
names := set.New(inputs.SecretNames()...)
names.Insert(outputs.SecretNames()...)
Expand Down
6 changes: 6 additions & 0 deletions internal/controller/observability/collector.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,12 @@ func ReconcileCollector(context internalcontext.ForwarderContext, pollInterval,
return
}

// Add ClusterRoleBinding to allow the collector to validate bearer tokens for metrics endpoint
if err = auth.ReconcileMetricsAuthRBAC(context.Client, resourceNames.MetricsAuthClusterRoleBinding, context.Forwarder.Namespace, context.Forwarder.Spec.ServiceAccount.Name); err != nil {
log.V(3).Error(err, "auth.ReconcileMetricsAuthRBAC")
return
}
Comment thread
qodo-code-review[bot] marked this conversation as resolved.
Comment thread
qodo-code-review[bot] marked this conversation as resolved.

// TODO: This can be the same per NS but what is the ownerref? Multiple CLFs will clash
if err = collector.ReconcileTrustedCABundleConfigMap(context.Client, context.Forwarder.Namespace, resourceNames.CaTrustBundle, ownerRef); err != nil {
log.Error(err, "collector.ReconcileTrustedCABundleConfigMap")
Expand Down
11 changes: 11 additions & 0 deletions internal/controller/observability/collector_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package observability_test

import (
"context"
"fmt"
"time"

obs "github.com/openshift/cluster-logging-operator/api/observability/v1"
Expand All @@ -23,6 +24,7 @@ import (
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/scheme"
Expand Down Expand Up @@ -228,6 +230,15 @@ var _ = Describe("Reconciling the Collector", func() {
sm := &monitoringv1.ServiceMonitor{}
Expect(client.Get(context.TODO(), key, sm)).Should(Succeed(), "Exp. to create a ServiceMonitor for metrics")

// Verify the metrics auth ClusterRoleBinding exists and references system:auth-delegator
metricsAuthBinding := &rbacv1.ClusterRoleBinding{}
metricsAuthKey := types.NamespacedName{Name: fmt.Sprintf("cluster-logging-%s-%s-metrics-auth", namespaceName, clfName)}
Expect(client.Get(context.TODO(), metricsAuthKey, metricsAuthBinding)).Should(Succeed(), "Exp. to create a ClusterRoleBinding for metrics auth")
Expect(metricsAuthBinding.RoleRef.Name).To(Equal("system:auth-delegator"))
Expect(metricsAuthBinding.Subjects).To(HaveLen(1))
Expect(metricsAuthBinding.Subjects[0].Name).To(Equal(saName))
Expect(metricsAuthBinding.Subjects[0].Namespace).To(Equal(namespaceName))

},
Entry("when deployed as a DaemonSet", forwarder),
Entry("when deployed as a Deployment", receiverForwarder),
Expand Down
2 changes: 2 additions & 0 deletions internal/factory/resource_names.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ type ForwarderResourceNames struct {
SecretMetrics string
ConfigMap string
MetadataReaderClusterRoleBinding string
MetricsAuthClusterRoleBinding string
CaTrustBundle string
ServiceAccount string
InternalLogStoreSecret string
Expand All @@ -38,6 +39,7 @@ func ResourceNames(clf obsv1.ClusterLogForwarder) *ForwarderResourceNames {
SecretMetrics: resBaseName + "-metrics",
ConfigMap: resBaseName + "-config",
MetadataReaderClusterRoleBinding: fmt.Sprintf("cluster-logging-%s-%s-metadata-reader", clf.Namespace, resBaseName),
MetricsAuthClusterRoleBinding: fmt.Sprintf("cluster-logging-%s-%s-metrics-auth", clf.Namespace, resBaseName),
ForwarderName: clf.Name,
CaTrustBundle: resBaseName + "-trustbundle",
ServiceAccount: clf.Spec.ServiceAccount.Name,
Expand Down
29 changes: 24 additions & 5 deletions internal/generator/vector/api/sinks/prometheus_export.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,31 @@ import (
"github.com/openshift/cluster-logging-operator/internal/generator/vector/api/types/transport"
)

type PrometheusExporterAuthStrategy string

const (
PrometheusExporterAuthStrategySar PrometheusExporterAuthStrategy = "sar"
)

// PrometheusExporterAuth represents authentication configuration for the prometheus exporter
type PrometheusExporterAuth struct {
Strategy PrometheusExporterAuthStrategy `json:"strategy,omitempty" yaml:"strategy,omitempty" toml:"strategy,omitempty"`
Path string `json:"path,omitempty" yaml:"path,omitempty" toml:"path,omitempty"`
Resource string `json:"resource,omitempty" yaml:"resource,omitempty" toml:"resource,omitempty"`
Verb string `json:"verb,omitempty" yaml:"verb,omitempty" toml:"verb,omitempty"`
ResourceGroup string `json:"resource_group,omitempty" yaml:"resource_group,omitempty" toml:"resource_group,omitempty"`
Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty" toml:"namespace,omitempty"`
User string `json:"user,omitempty" yaml:"user,omitempty" toml:"user,omitempty"`
Groups []string `json:"groups,omitempty" yaml:"groups,omitempty" toml:"groups,omitempty"`
}

type PrometheusExporter struct {
Type types.SinkType `json:"type,omitempty" yaml:"type,omitempty" toml:"type,omitempty"`
Inputs []string `json:"inputs,omitempty" yaml:"inputs,omitempty" toml:"inputs,omitempty"`
Address string `json:"address,omitempty" yaml:"address,omitempty" toml:"address,omitempty"`
DefaultNamespace string `json:"default_namespace,omitempty" yaml:"default_namespace,omitempty" toml:"default_namespace,omitempty"`
TLS *transport.TlsEnabled `json:"tls,omitempty" yaml:"tls,omitempty" toml:"tls,omitempty"`
Type types.SinkType `json:"type,omitempty" yaml:"type,omitempty" toml:"type,omitempty"`
Inputs []string `json:"inputs,omitempty" yaml:"inputs,omitempty" toml:"inputs,omitempty"`
Address string `json:"address,omitempty" yaml:"address,omitempty" toml:"address,omitempty"`
DefaultNamespace string `json:"default_namespace,omitempty" yaml:"default_namespace,omitempty" toml:"default_namespace,omitempty"`
TLS *transport.TlsEnabled `json:"tls,omitempty" yaml:"tls,omitempty" toml:"tls,omitempty"`
Auth *PrometheusExporterAuth `json:"auth,omitempty" yaml:"auth,omitempty" toml:"auth,omitempty"`
}

func (p PrometheusExporter) SinkType() types.SinkType {
Expand Down
5 changes: 5 additions & 0 deletions internal/generator/vector/conf/complex.toml
Original file line number Diff line number Diff line change
Expand Up @@ -849,3 +849,8 @@ min_tls_version = "VersionTLS12"
ciphersuites = "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,ECDHE-ECDSA-AES128-GCM-SHA256,ECDHE-RSA-AES128-GCM-SHA256,ECDHE-ECDSA-AES256-GCM-SHA384,ECDHE-RSA-AES256-GCM-SHA384,ECDHE-ECDSA-CHACHA20-POLY1305,ECDHE-RSA-CHACHA20-POLY1305,DHE-RSA-AES128-GCM-SHA256,DHE-RSA-AES256-GCM-SHA384"
key_file = "/etc/collector/metrics/tls.key"
crt_file = "/etc/collector/metrics/tls.crt"

[sinks.prometheus_output.auth]
strategy = "sar"
path = "/metrics"
verb = "get"
5 changes: 5 additions & 0 deletions internal/generator/vector/conf/complex_http_receiver.toml
Original file line number Diff line number Diff line change
Expand Up @@ -904,3 +904,8 @@ min_tls_version = "VersionTLS12"
ciphersuites = "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,ECDHE-ECDSA-AES128-GCM-SHA256,ECDHE-RSA-AES128-GCM-SHA256,ECDHE-ECDSA-AES256-GCM-SHA384,ECDHE-RSA-AES256-GCM-SHA384,ECDHE-ECDSA-CHACHA20-POLY1305,ECDHE-RSA-CHACHA20-POLY1305,DHE-RSA-AES128-GCM-SHA256,DHE-RSA-AES256-GCM-SHA384"
key_file = "/etc/collector/metrics/tls.key"
crt_file = "/etc/collector/metrics/tls.crt"

[sinks.prometheus_output.auth]
strategy = "sar"
path = "/metrics"
verb = "get"
7 changes: 6 additions & 1 deletion internal/generator/vector/conf/container.toml
Original file line number Diff line number Diff line change
Expand Up @@ -431,4 +431,9 @@ enabled = true
key_file = "/etc/collector/metrics/tls.key"
crt_file = "/etc/collector/metrics/tls.crt"
min_tls_version = "VersionTLS12"
ciphersuites = "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,ECDHE-ECDSA-AES128-GCM-SHA256,ECDHE-RSA-AES128-GCM-SHA256,ECDHE-ECDSA-AES256-GCM-SHA384,ECDHE-RSA-AES256-GCM-SHA384,ECDHE-ECDSA-CHACHA20-POLY1305,ECDHE-RSA-CHACHA20-POLY1305,DHE-RSA-AES128-GCM-SHA256,DHE-RSA-AES256-GCM-SHA384"
ciphersuites = "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,ECDHE-ECDSA-AES128-GCM-SHA256,ECDHE-RSA-AES128-GCM-SHA256,ECDHE-ECDSA-AES256-GCM-SHA384,ECDHE-RSA-AES256-GCM-SHA384,ECDHE-ECDSA-CHACHA20-POLY1305,ECDHE-RSA-CHACHA20-POLY1305,DHE-RSA-AES128-GCM-SHA256,DHE-RSA-AES256-GCM-SHA384"

[sinks.prometheus_output.auth]
strategy = "sar"
path = "/metrics"
verb = "get"
5 changes: 5 additions & 0 deletions internal/generator/vector/output/metrics/prometheus.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,11 @@ func PrometheusOutput(inputs []string, op utils.Options) (sink types.Sink) {
s.TLS.KeyFile = "/etc/collector/metrics/tls.key"
s.TLS.CRTFile = "/etc/collector/metrics/tls.crt"
tls.SetTLSProfile(&s.TLS.TLS, op)
s.Auth = &sinks.PrometheusExporterAuth{
Strategy: sinks.PrometheusExporterAuthStrategySar,
Path: "/metrics",
Verb: "get",
}
Comment thread
jcantrill marked this conversation as resolved.
}, inputs...)
}

Expand Down
5 changes: 3 additions & 2 deletions internal/metrics/logfilemetricexporter/metric_exporter.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,8 @@ func Reconcile(lfmeInstance *loggingv1alpha1.LogFileMetricExporter,
CommonName: constants.LogfilesmetricexporterName,
ServiceAccount: constants.LogfilesmetricexporterName,
ServiceAccountTokenSecret: constants.LogfilesmetricexporterName + "-token",
MetadataReaderClusterRoleBinding: "cluster-logging-" + constants.LogfilesmetricexporterName + "-metadata-reader",
MetadataReaderClusterRoleBinding: fmt.Sprintf("cluster-logging-%s-%s-metadata-reader", lfmeInstance.Namespace, constants.LogfilesmetricexporterName),
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

How rename impact instaled cluster during upgrade?

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This functionality is new in this release, added by @Clee2691 so it will not matter

MetricsAuthClusterRoleBinding: fmt.Sprintf("cluster-logging-%s-%s-metrics-auth", lfmeInstance.Namespace, constants.LogfilesmetricexporterName),
}

if err := auth.ReconcileServiceAccount(requestClient, lfmeInstance.Namespace, resNames, owner); err != nil {
Expand All @@ -51,7 +52,7 @@ func Reconcile(lfmeInstance *loggingv1alpha1.LogFileMetricExporter,
return err
}

if err := auth.ReconcileMetricsAuthRBAC(requestClient, resNames.CommonName, lfmeInstance.Namespace, resNames.ServiceAccount); err != nil {
if err := auth.ReconcileMetricsAuthRBAC(requestClient, resNames.MetricsAuthClusterRoleBinding, lfmeInstance.Namespace, resNames.ServiceAccount); err != nil {
log.Error(err, "logfilemetricexporter.ReconcileMetricsRBAC")
return err
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,8 @@ var _ = Describe("Reconcile LogFileMetricExporter", func() {
// Metrics Auth RBAC
// Verify the metrics auth ClusterRoleBinding exists and references system:auth-delegator
metricsAuthBinding := &rbacv1.ClusterRoleBinding{}
Expect(reqClient.Get(context.TODO(), types.NamespacedName{Name: fmt.Sprintf("%s-metrics-auth", constants.LogfilesmetricexporterName)}, metricsAuthBinding)).Should(Succeed())
expectedMetricsAuthName := fmt.Sprintf("cluster-logging-%s-%s-metrics-auth", namespace.Name, constants.LogfilesmetricexporterName)
Expect(reqClient.Get(context.TODO(), types.NamespacedName{Name: expectedMetricsAuthName}, metricsAuthBinding)).Should(Succeed())
Expect(metricsAuthBinding.RoleRef.Name).To(Equal("system:auth-delegator"))
Expect(metricsAuthBinding.Subjects).To(HaveLen(1))
Expect(metricsAuthBinding.Subjects[0].Name).To(Equal(constants.LogfilesmetricexporterName))
Expand Down
9 changes: 7 additions & 2 deletions internal/reconcile/rbac.go
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,11 @@ func ClusterRoleBinding(k8sClient client.Client, name string, generator func() *

func DeleteClusterRoleBinding(k8sClient client.Client, name string) error {
object := runtime.NewClusterRoleBinding(name, rbacv1.RoleRef{})
log.V(3).Info("Deleting", "object", object)
return k8sClient.Delete(context.TODO(), object)
log.V(3).Info("Deleting ClusterRoleBinding", "name", name)
err := k8sClient.Delete(context.TODO(), object)
// Ignore NotFound errors - resource is already deleted
if apierrors.IsNotFound(err) {
return nil
}
return err
}
26 changes: 26 additions & 0 deletions internal/runtime/rbac.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,3 +68,29 @@ func NewClusterRoleBinding(name string, roleRef rbacv1.RoleRef, subjects ...rbac
Initialize(binding, "", name)
return binding
}

// NewNonResourceURLPolicyRule creates a PolicyRule for nonResourceURLs
func NewNonResourceURLPolicyRule(nonResourceURLs, verbs []string) rbacv1.PolicyRule {
return rbacv1.PolicyRule{
NonResourceURLs: nonResourceURLs,
Verbs: verbs,
}
}

// NewClusterRoleRef creates a RoleRef for a ClusterRole
func NewClusterRoleRef(roleName string) rbacv1.RoleRef {
return rbacv1.RoleRef{
APIGroup: rbacv1.GroupName,
Kind: "ClusterRole",
Name: roleName,
}
}

// NewServiceAccountSubject creates a Subject for a ServiceAccount
func NewServiceAccountSubject(name, namespace string) rbacv1.Subject {
return rbacv1.Subject{
Kind: "ServiceAccount",
Name: name,
Namespace: namespace,
}
}
49 changes: 46 additions & 3 deletions test/functional/metrics/metrics_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,15 @@ package metrics

import (
"fmt"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
obs "github.com/openshift/cluster-logging-operator/api/observability/v1"
"github.com/openshift/cluster-logging-operator/internal/constants"
"github.com/openshift/cluster-logging-operator/internal/runtime"
"github.com/openshift/cluster-logging-operator/test/framework/functional"
testruntime "github.com/openshift/cluster-logging-operator/test/runtime/observability"
rbacv1 "k8s.io/api/rbac/v1"
)

var _ = Describe("[Functional][Metrics]Function testing of collector metrics", func() {
Expand All @@ -19,10 +21,23 @@ var _ = Describe("[Functional][Metrics]Function testing of collector metrics", f
)

var (
framework *functional.CollectorFunctionalFramework
framework *functional.CollectorFunctionalFramework
metricsReaderRole *rbacv1.ClusterRole
metricsReaderBinding *rbacv1.ClusterRoleBinding
tokenReviewBinding *rbacv1.ClusterRoleBinding
)

AfterEach(func() {
// Clean up cluster-scoped RBAC resources
if tokenReviewBinding != nil {
_ = framework.Test.Delete(tokenReviewBinding)
}
if metricsReaderBinding != nil {
_ = framework.Test.Delete(metricsReaderBinding)
}
if metricsReaderRole != nil {
_ = framework.Test.Delete(metricsReaderRole)
}
framework.Cleanup()
})

Expand All @@ -31,16 +46,44 @@ var _ = Describe("[Functional][Metrics]Function testing of collector metrics", f
testruntime.NewClusterLogForwarderBuilder(framework.Forwarder).
FromInput(obs.InputTypeApplication).
ToHttpOutput()

// Create ClusterRole to allow GET on /metrics
roleName := fmt.Sprintf("%s-metrics-reader", framework.Name)
metricsReaderRole = runtime.NewClusterRole(
roleName,
runtime.NewNonResourceURLPolicyRule([]string{"/metrics"}, []string{"get"}),
)
Expect(framework.Test.Create(metricsReaderRole)).To(Succeed())

// Create ClusterRoleBinding to bind the service account to the metrics reader role
metricsReaderBinding = runtime.NewClusterRoleBinding(
roleName,
runtime.NewClusterRoleRef(roleName),
runtime.NewServiceAccountSubject("default", framework.Namespace),
)
Expect(framework.Test.Create(metricsReaderBinding)).To(Succeed())

// Create ClusterRoleBinding to allow collector to do TokenReviews
tokenReviewBinding = runtime.NewClusterRoleBinding(
fmt.Sprintf("%s-token-reviewer", framework.Name),
runtime.NewClusterRoleRef("system:auth-delegator"),
runtime.NewServiceAccountSubject("default", framework.Namespace),
)
Expect(framework.Test.Create(tokenReviewBinding)).To(Succeed())
})
It("should return successfully when all outputs are up", func() {
Expect(framework.Deploy()).To(BeNil())
metrics, _ := framework.RunCommand(constants.CollectorName, "curl", "-ksv", fmt.Sprintf("https://%s.%s:24231/metrics", framework.Name, framework.Namespace))
metricsURL := fmt.Sprintf("https://%s.%s:24231/metrics", framework.Name, framework.Namespace)
curlCmd := fmt.Sprintf(`curl -ksv -H "Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" %s`, metricsURL)
metrics, _ := framework.RunCommand(constants.CollectorName, "sh", "-c", curlCmd)
Expect(metrics).To(ContainSubstring(sampleMetric))
})

It("should return successfully even when the output is down", func() {
Expect(framework.DeployWithVisitor(func(builder *runtime.PodBuilder) error { return nil })).To(BeNil())
metrics, _ := framework.RunCommand(constants.CollectorName, "curl", "-ksv", fmt.Sprintf("https://%s.%s:24231/metrics", framework.Name, framework.Namespace))
metricsURL := fmt.Sprintf("https://%s.%s:24231/metrics", framework.Name, framework.Namespace)
curlCmd := fmt.Sprintf(`curl -ksv -H "Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" %s`, metricsURL)
metrics, _ := framework.RunCommand(constants.CollectorName, "sh", "-c", curlCmd)
Expect(metrics).To(ContainSubstring(sampleMetric))
})

Expand Down