diff --git a/go.mod b/go.mod
index 70b66d2edc..9a9fc138e9 100644
--- a/go.mod
+++ b/go.mod
@@ -35,8 +35,8 @@ require (
go.opentelemetry.io/otel v1.43.0
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.42.0
go.opentelemetry.io/otel/metric v1.43.0
- go.opentelemetry.io/otel/sdk v1.42.0
- go.opentelemetry.io/otel/sdk/metric v1.42.0
+ go.opentelemetry.io/otel/sdk v1.43.0
+ go.opentelemetry.io/otel/sdk/metric v1.43.0
go.uber.org/multierr v1.11.0
go.uber.org/zap v1.27.1
golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67
diff --git a/go.sum b/go.sum
index 6c1a40ba71..e3f7cb815e 100644
--- a/go.sum
+++ b/go.sum
@@ -344,10 +344,10 @@ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.42.0 h1:MdK
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.42.0/go.mod h1:RolT8tWtfHcjajEH5wFIZ4Dgh5jpPdFXYV9pTAk/qjc=
go.opentelemetry.io/otel/metric v1.43.0 h1:d7638QeInOnuwOONPp4JAOGfbCEpYb+K6DVWvdxGzgM=
go.opentelemetry.io/otel/metric v1.43.0/go.mod h1:RDnPtIxvqlgO8GRW18W6Z/4P462ldprJtfxHxyKd2PY=
-go.opentelemetry.io/otel/sdk v1.42.0 h1:LyC8+jqk6UJwdrI/8VydAq/hvkFKNHZVIWuslJXYsDo=
-go.opentelemetry.io/otel/sdk v1.42.0/go.mod h1:rGHCAxd9DAph0joO4W6OPwxjNTYWghRWmkHuGbayMts=
-go.opentelemetry.io/otel/sdk/metric v1.42.0 h1:D/1QR46Clz6ajyZ3G8SgNlTJKBdGp84q9RKCAZ3YGuA=
-go.opentelemetry.io/otel/sdk/metric v1.42.0/go.mod h1:Ua6AAlDKdZ7tdvaQKfSmnFTdHx37+J4ba8MwVCYM5hc=
+go.opentelemetry.io/otel/sdk v1.43.0 h1:pi5mE86i5rTeLXqoF/hhiBtUNcrAGHLKQdhg4h4V9Dg=
+go.opentelemetry.io/otel/sdk v1.43.0/go.mod h1:P+IkVU3iWukmiit/Yf9AWvpyRDlUeBaRg6Y+C58QHzg=
+go.opentelemetry.io/otel/sdk/metric v1.43.0 h1:S88dyqXjJkuBNLeMcVPRFXpRw2fuwdvfCGLEo89fDkw=
+go.opentelemetry.io/otel/sdk/metric v1.43.0/go.mod h1:C/RJtwSEJ5hzTiUz5pXF1kILHStzb9zFlIEe85bhj6A=
go.opentelemetry.io/otel/trace v1.43.0 h1:BkNrHpup+4k4w+ZZ86CZoHHEkohws8AY+WTX09nk+3A=
go.opentelemetry.io/otel/trace v1.43.0/go.mod h1:/QJhyVBUUswCphDVxq+8mld+AvhXZLhe+8WVFxiFff0=
go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A=
diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/features.go b/vendor/go.opentelemetry.io/otel/sdk/internal/x/features.go
index bfeb73e811..694b64a318 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/internal/x/features.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/features.go
@@ -37,3 +37,18 @@ var Observability = newFeature(
return "", false
},
)
+
+// PerSeriesStartTimestamps is an experimental feature flag that determines if the SDK
+// uses the new Start Timestamps specification.
+//
+// To enable this feature set the OTEL_GO_X_PER_SERIES_START_TIMESTAMPS environment variable
+// to the case-insensitive string value of "true".
+var PerSeriesStartTimestamps = newFeature(
+ []string{"PER_SERIES_START_TIMESTAMPS"},
+ func(v string) (bool, bool) {
+ if strings.EqualFold(v, "true") {
+ return true, true
+ }
+ return false, false
+ },
+)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/config.go b/vendor/go.opentelemetry.io/otel/sdk/metric/config.go
index c6440a1346..306e5e3cdc 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/config.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/config.go
@@ -160,12 +160,14 @@ func WithExemplarFilter(filter exemplar.Filter) Option {
})
}
-// WithCardinalityLimit sets the cardinality limit for the MeterProvider.
+// WithCardinalityLimit sets the global cardinality limit for the MeterProvider.
//
// The cardinality limit is the hard limit on the number of metric datapoints
// that can be collected for a single instrument in a single collect cycle.
//
// Setting this to a zero or negative value means no limit is applied.
+// This value applies to all instrument kinds, but can be overridden per kind by
+// the reader's cardinality limit selector (see [WithCardinalityLimitSelector]).
func WithCardinalityLimit(limit int) Option {
// For backward compatibility, the environment variable `OTEL_GO_X_CARDINALITY_LIMIT`
// can also be used to set this value.
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go
index 2aeba43789..312d73c457 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go
@@ -8,10 +8,12 @@ import (
"errors"
"math"
"sync"
+ "sync/atomic"
"time"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/internal/x"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
)
@@ -20,11 +22,6 @@ const (
expoMinScale = -10
smallestNonZeroNormalFloat64 = 0x1p-1022
-
- // These redefine the Math constants with a type, so the compiler won't coerce
- // them into an int on 32 bit platforms.
- maxInt64 int64 = math.MaxInt64
- minInt64 int64 = math.MinInt64
)
// expoHistogramDataPoint is a single data point in an exponential histogram.
@@ -32,19 +29,19 @@ type expoHistogramDataPoint[N int64 | float64] struct {
attrs attribute.Set
res FilteredExemplarReservoir[N]
- min N
- max N
- sum N
+ minMax atomicMinMax[N]
+ sum atomicCounter[N]
maxSize int
noMinMax bool
noSum bool
- scale int32
+ scale atomic.Int32
posBuckets expoBuckets
negBuckets expoBuckets
- zeroCount uint64
+ zeroCount atomic.Uint64
+ startTime time.Time
}
func newExpoHistogramDataPoint[N int64 | float64](
@@ -53,42 +50,30 @@ func newExpoHistogramDataPoint[N int64 | float64](
maxScale int32,
noMinMax, noSum bool,
) *expoHistogramDataPoint[N] { // nolint:revive // we need this control flag
- f := math.MaxFloat64
- ma := N(f) // if N is int64, max will overflow to -9223372036854775808
- mi := N(-f)
- if N(maxInt64) > N(f) {
- ma = N(maxInt64)
- mi = N(minInt64)
- }
- return &expoHistogramDataPoint[N]{
- attrs: attrs,
- min: ma,
- max: mi,
- maxSize: maxSize,
- noMinMax: noMinMax,
- noSum: noSum,
- scale: maxScale,
+ dp := &expoHistogramDataPoint[N]{
+ attrs: attrs,
+ maxSize: maxSize,
+ noMinMax: noMinMax,
+ noSum: noSum,
+ startTime: now(),
}
+ dp.scale.Store(maxScale)
+ return dp
}
// record adds a new measurement to the histogram. It will rescale the buckets if needed.
func (p *expoHistogramDataPoint[N]) record(v N) {
if !p.noMinMax {
- if v < p.min {
- p.min = v
- }
- if v > p.max {
- p.max = v
- }
+ p.minMax.Update(v)
}
if !p.noSum {
- p.sum += v
+ p.sum.add(v)
}
absV := math.Abs(float64(v))
if float64(absV) == 0.0 {
- p.zeroCount++
+ p.zeroCount.Add(1)
return
}
@@ -102,14 +87,15 @@ func (p *expoHistogramDataPoint[N]) record(v N) {
// If the new bin would make the counts larger than maxScale, we need to
// downscale current measurements.
if scaleDelta := p.scaleChange(bin, bucket.startBin, len(bucket.counts)); scaleDelta > 0 {
- if p.scale-scaleDelta < expoMinScale {
+ currentScale := p.scale.Load()
+ if currentScale-scaleDelta < expoMinScale {
// With a scale of -10 there is only two buckets for the whole range of float64 values.
// This can only happen if there is a max size of 1.
otel.Handle(errors.New("exponential histogram scale underflow"))
return
}
// Downscale
- p.scale -= scaleDelta
+ p.scale.Add(-scaleDelta)
p.posBuckets.downscale(scaleDelta)
p.negBuckets.downscale(scaleDelta)
@@ -124,7 +110,8 @@ func (p *expoHistogramDataPoint[N]) getBin(v float64) int32 {
frac, expInt := math.Frexp(v)
// 11-bit exponential.
exp := int32(expInt) // nolint: gosec
- if p.scale <= 0 {
+ scale := p.scale.Load()
+ if scale <= 0 {
// Because of the choice of fraction is always 1 power of two higher than we want.
var correction int32 = 1
if frac == .5 {
@@ -132,9 +119,9 @@ func (p *expoHistogramDataPoint[N]) getBin(v float64) int32 {
// will be one higher than we want.
correction = 2
}
- return (exp - correction) >> (-p.scale)
+ return (exp - correction) >> (-scale)
}
- return exp<
= b.startBin && int(bin) <= endBin {
- b.counts[bin-b.startBin]++
+ b.counts[bin-b.startBin].Add(1)
return
}
// if the new bin is before the current start add spaces to the counts
@@ -223,16 +211,22 @@ func (b *expoBuckets) record(bin int32) {
shift := b.startBin - bin
if newLength > cap(b.counts) {
- b.counts = append(b.counts, make([]uint64, newLength-len(b.counts))...)
+ b.counts = append(b.counts, make([]atomic.Uint64, newLength-len(b.counts))...)
}
- copy(b.counts[shift:origLen+int(shift)], b.counts)
b.counts = b.counts[:newLength]
+
+ // Shift existing elements to the right. Go's copy() doesn't work for
+ // structs like atomic.Uint64.
+ for i := origLen - 1; i >= 0; i-- {
+ b.counts[i+int(shift)].Store(b.counts[i].Load())
+ }
+
for i := 1; i < int(shift); i++ {
- b.counts[i] = 0
+ b.counts[i].Store(0)
}
b.startBin = bin
- b.counts[0] = 1
+ b.counts[0].Store(1)
return
}
// if the new is after the end add spaces to the end
@@ -240,15 +234,15 @@ func (b *expoBuckets) record(bin int32) {
if int(bin-b.startBin) < cap(b.counts) {
b.counts = b.counts[:bin-b.startBin+1]
for i := endBin + 1 - int(b.startBin); i < len(b.counts); i++ {
- b.counts[i] = 0
+ b.counts[i].Store(0)
}
- b.counts[bin-b.startBin] = 1
+ b.counts[bin-b.startBin].Store(1)
return
}
- end := make([]uint64, int(bin-b.startBin)-len(b.counts)+1)
+ end := make([]atomic.Uint64, int(bin-b.startBin)-len(b.counts)+1)
b.counts = append(b.counts, end...)
- b.counts[bin-b.startBin] = 1
+ b.counts[bin-b.startBin].Store(1)
}
}
@@ -275,10 +269,10 @@ func (b *expoBuckets) downscale(delta int32) {
for i := 1; i < len(b.counts); i++ {
idx := i + int(offset)
if idx%int(steps) == 0 {
- b.counts[idx/int(steps)] = b.counts[i]
+ b.counts[idx/int(steps)].Store(b.counts[i].Load())
continue
}
- b.counts[idx/int(steps)] += b.counts[i]
+ b.counts[idx/int(steps)].Add(b.counts[i].Load())
}
lastIdx := (len(b.counts) - 1 + int(offset)) / int(steps)
@@ -288,8 +282,8 @@ func (b *expoBuckets) downscale(delta int32) {
func (b *expoBuckets) count() uint64 {
var total uint64
- for _, count := range b.counts {
- total += count
+ for i := range b.counts {
+ total += b.counts[i].Load()
}
return total
}
@@ -386,8 +380,8 @@ func (e *expoHistogram[N]) delta(
hDPts[i].StartTime = e.start
hDPts[i].Time = t
hDPts[i].Count = val.count()
- hDPts[i].Scale = val.scale
- hDPts[i].ZeroCount = val.zeroCount
+ hDPts[i].Scale = val.scale.Load()
+ hDPts[i].ZeroCount = val.zeroCount.Load()
hDPts[i].ZeroThreshold = 0.0
hDPts[i].PositiveBucket.Offset = val.posBuckets.startBin
@@ -396,7 +390,9 @@ func (e *expoHistogram[N]) delta(
len(val.posBuckets.counts),
len(val.posBuckets.counts),
)
- copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts)
+ for j := range val.posBuckets.counts {
+ hDPts[i].PositiveBucket.Counts[j] = val.posBuckets.counts[j].Load()
+ }
hDPts[i].NegativeBucket.Offset = val.negBuckets.startBin
hDPts[i].NegativeBucket.Counts = reset(
@@ -404,14 +400,18 @@ func (e *expoHistogram[N]) delta(
len(val.negBuckets.counts),
len(val.negBuckets.counts),
)
- copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts)
+ for j := range val.negBuckets.counts {
+ hDPts[i].NegativeBucket.Counts[j] = val.negBuckets.counts[j].Load()
+ }
if !e.noSum {
- hDPts[i].Sum = val.sum
+ hDPts[i].Sum = val.sum.load()
}
if !e.noMinMax {
- hDPts[i].Min = metricdata.NewExtrema(val.min)
- hDPts[i].Max = metricdata.NewExtrema(val.max)
+ if val.minMax.set.Load() {
+ hDPts[i].Min = metricdata.NewExtrema(val.minMax.minimum.Load())
+ hDPts[i].Max = metricdata.NewExtrema(val.minMax.maximum.Load())
+ }
}
collectExemplars(&hDPts[i].Exemplars, val.res.Collect)
@@ -443,14 +443,21 @@ func (e *expoHistogram[N]) cumulative(
n := len(e.values)
hDPts := reset(h.DataPoints, n, n)
+ perSeriesStartTimeEnabled := x.PerSeriesStartTimestamps.Enabled()
+
var i int
for _, val := range e.values {
hDPts[i].Attributes = val.attrs
- hDPts[i].StartTime = e.start
+
+ startTime := e.start
+ if perSeriesStartTimeEnabled {
+ startTime = val.startTime
+ }
+ hDPts[i].StartTime = startTime
hDPts[i].Time = t
hDPts[i].Count = val.count()
- hDPts[i].Scale = val.scale
- hDPts[i].ZeroCount = val.zeroCount
+ hDPts[i].Scale = val.scale.Load()
+ hDPts[i].ZeroCount = val.zeroCount.Load()
hDPts[i].ZeroThreshold = 0.0
hDPts[i].PositiveBucket.Offset = val.posBuckets.startBin
@@ -459,7 +466,9 @@ func (e *expoHistogram[N]) cumulative(
len(val.posBuckets.counts),
len(val.posBuckets.counts),
)
- copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts)
+ for j := range val.posBuckets.counts {
+ hDPts[i].PositiveBucket.Counts[j] = val.posBuckets.counts[j].Load()
+ }
hDPts[i].NegativeBucket.Offset = val.negBuckets.startBin
hDPts[i].NegativeBucket.Counts = reset(
@@ -467,14 +476,18 @@ func (e *expoHistogram[N]) cumulative(
len(val.negBuckets.counts),
len(val.negBuckets.counts),
)
- copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts)
+ for j := range val.negBuckets.counts {
+ hDPts[i].NegativeBucket.Counts[j] = val.negBuckets.counts[j].Load()
+ }
if !e.noSum {
- hDPts[i].Sum = val.sum
+ hDPts[i].Sum = val.sum.load()
}
if !e.noMinMax {
- hDPts[i].Min = metricdata.NewExtrema(val.min)
- hDPts[i].Max = metricdata.NewExtrema(val.max)
+ if val.minMax.set.Load() {
+ hDPts[i].Min = metricdata.NewExtrema(val.minMax.minimum.Load())
+ hDPts[i].Max = metricdata.NewExtrema(val.minMax.maximum.Load())
+ }
}
collectExemplars(&hDPts[i].Exemplars, val.res.Collect)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go
index 421325fb72..83582c670c 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go
@@ -11,6 +11,7 @@ import (
"time"
"go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/internal/x"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
)
@@ -27,8 +28,9 @@ type hotColdHistogramPoint[N int64 | float64] struct {
hcwg hotColdWaitGroup
hotColdPoint [2]histogramPointCounters[N]
- attrs attribute.Set
- res FilteredExemplarReservoir[N]
+ attrs attribute.Set
+ res FilteredExemplarReservoir[N]
+ startTime time.Time
}
// histogramPointCounters contains only the atomic counter data, and is used by
@@ -298,6 +300,7 @@ func (s *cumulativeHistogram[N]) measure(
counts: make([]atomic.Uint64, len(s.bounds)+1),
},
},
+ startTime: now(),
}
return hPt
}).(*hotColdHistogramPoint[N])
@@ -339,16 +342,23 @@ func (s *cumulativeHistogram[N]) collect(
// current length for capacity.
hDPts := reset(h.DataPoints, 0, s.values.Len())
+ perSeriesStartTimeEnabled := x.PerSeriesStartTimestamps.Enabled()
+
var i int
s.values.Range(func(_, value any) bool {
val := value.(*hotColdHistogramPoint[N])
+
+ startTime := s.start
+ if perSeriesStartTimeEnabled {
+ startTime = val.startTime
+ }
// swap, observe, and clear the point
readIdx := val.hcwg.swapHotAndWait()
var bucketCounts []uint64
count := val.hotColdPoint[readIdx].loadCountsInto(&bucketCounts)
newPt := metricdata.HistogramDataPoint[N]{
Attributes: val.attrs,
- StartTime: s.start,
+ StartTime: startTime,
Time: t,
Count: count,
Bounds: bounds,
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go
index 4924d732cb..4c004bc99d 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go
@@ -8,14 +8,16 @@ import (
"time"
"go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/internal/x"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
)
// lastValuePoint is timestamped measurement data.
type lastValuePoint[N int64 | float64] struct {
- attrs attribute.Set
- value atomicN[N]
- res FilteredExemplarReservoir[N]
+ attrs attribute.Set
+ value atomicN[N]
+ res FilteredExemplarReservoir[N]
+ startTime time.Time
}
// lastValueMap summarizes a set of measurements as the last one made.
@@ -31,10 +33,13 @@ func (s *lastValueMap[N]) measure(
droppedAttr []attribute.KeyValue,
) {
lv := s.values.LoadOrStoreAttr(fltrAttr, func(attr attribute.Set) any {
- return &lastValuePoint[N]{
- res: s.newRes(attr),
- attrs: attr,
+ p := &lastValuePoint[N]{
+ res: s.newRes(attr),
+ attrs: attr,
+ startTime: now(),
}
+ p.value.Store(value)
+ return p
}).(*lastValuePoint[N])
lv.value.Store(value)
@@ -156,12 +161,19 @@ func (s *cumulativeLastValue[N]) collect(
// current length for capacity.
dPts := reset(gData.DataPoints, 0, s.values.Len())
+ perSeriesStartTimeEnabled := x.PerSeriesStartTimestamps.Enabled()
+
var i int
s.values.Range(func(_, value any) bool {
v := value.(*lastValuePoint[N])
+
+ startTime := s.start
+ if perSeriesStartTimeEnabled {
+ startTime = v.startTime
+ }
newPt := metricdata.DataPoint[N]{
Attributes: v.attrs,
- StartTime: s.start,
+ StartTime: startTime,
Time: t,
Value: v.value.Load(),
}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go
index 66cb68085f..3fe7c7cf04 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go
@@ -8,13 +8,15 @@ import (
"time"
"go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/internal/x"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
)
type sumValue[N int64 | float64] struct {
- n atomicCounter[N]
- res FilteredExemplarReservoir[N]
- attrs attribute.Set
+ n atomicCounter[N]
+ res FilteredExemplarReservoir[N]
+ attrs attribute.Set
+ startTime time.Time
}
type sumValueMap[N int64 | float64] struct {
@@ -30,8 +32,9 @@ func (s *sumValueMap[N]) measure(
) {
sv := s.values.LoadOrStoreAttr(fltrAttr, func(attr attribute.Set) any {
return &sumValue[N]{
- res: s.newRes(attr),
- attrs: attr,
+ res: s.newRes(attr),
+ attrs: attr,
+ startTime: now(),
}
}).(*sumValue[N])
sv.n.add(value)
@@ -160,12 +163,19 @@ func (s *cumulativeSum[N]) collect(
// current length for capacity.
dPts := reset(sData.DataPoints, 0, s.values.Len())
+ perSeriesStartTimeEnabled := x.PerSeriesStartTimestamps.Enabled()
+
var i int
s.values.Range(func(_, value any) bool {
val := value.(*sumValue[N])
+
+ startTime := s.start
+ if perSeriesStartTimeEnabled {
+ startTime = val.startTime
+ }
newPt := metricdata.DataPoint[N]{
Attributes: val.attrs,
- StartTime: s.start,
+ StartTime: startTime,
Time: t,
Value: val.n.load(),
}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go
index 5b0630207b..0357afd455 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go
@@ -32,8 +32,9 @@ type ManualReader struct {
isShutdown bool
externalProducers atomic.Value
- temporalitySelector TemporalitySelector
- aggregationSelector AggregationSelector
+ temporalitySelector TemporalitySelector
+ aggregationSelector AggregationSelector
+ cardinalityLimitSelector CardinalityLimitSelector
inst *observ.Instrumentation
}
@@ -45,8 +46,9 @@ var _ = map[Reader]struct{}{&ManualReader{}: {}}
func NewManualReader(opts ...ManualReaderOption) *ManualReader {
cfg := newManualReaderConfig(opts)
r := &ManualReader{
- temporalitySelector: cfg.temporalitySelector,
- aggregationSelector: cfg.aggregationSelector,
+ temporalitySelector: cfg.temporalitySelector,
+ aggregationSelector: cfg.aggregationSelector,
+ cardinalityLimitSelector: cfg.cardinalityLimitSelector,
}
r.externalProducers.Store(cfg.producers)
@@ -89,6 +91,11 @@ func (mr *ManualReader) aggregation(
return mr.aggregationSelector(kind)
}
+// cardinalityLimit returns the cardinality limit for kind.
+func (mr *ManualReader) cardinalityLimit(kind InstrumentKind) (int, bool) {
+ return mr.cardinalityLimitSelector(kind)
+}
+
// Shutdown closes any connections and frees any resources used by the reader.
//
// This method is safe to call concurrently.
@@ -179,16 +186,18 @@ func (r *ManualReader) MarshalLog() any {
// manualReaderConfig contains configuration options for a ManualReader.
type manualReaderConfig struct {
- temporalitySelector TemporalitySelector
- aggregationSelector AggregationSelector
- producers []Producer
+ temporalitySelector TemporalitySelector
+ aggregationSelector AggregationSelector
+ cardinalityLimitSelector CardinalityLimitSelector
+ producers []Producer
}
// newManualReaderConfig returns a manualReaderConfig configured with options.
func newManualReaderConfig(opts []ManualReaderOption) manualReaderConfig {
cfg := manualReaderConfig{
- temporalitySelector: DefaultTemporalitySelector,
- aggregationSelector: DefaultAggregationSelector,
+ temporalitySelector: DefaultTemporalitySelector,
+ aggregationSelector: DefaultAggregationSelector,
+ cardinalityLimitSelector: defaultCardinalityLimitSelector,
}
for _, opt := range opts {
cfg = opt.applyManual(cfg)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go
index 32a7e19325..d1efc9f374 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go
@@ -26,17 +26,19 @@ const (
// periodicReaderConfig contains configuration options for a PeriodicReader.
type periodicReaderConfig struct {
- interval time.Duration
- timeout time.Duration
- producers []Producer
+ interval time.Duration
+ timeout time.Duration
+ producers []Producer
+ cardinalityLimitSelector CardinalityLimitSelector
}
// newPeriodicReaderConfig returns a periodicReaderConfig configured with
// options.
func newPeriodicReaderConfig(options []PeriodicReaderOption) periodicReaderConfig {
c := periodicReaderConfig{
- interval: envDuration(envInterval, defaultInterval),
- timeout: envDuration(envTimeout, defaultTimeout),
+ interval: envDuration(envInterval, defaultInterval),
+ timeout: envDuration(envTimeout, defaultTimeout),
+ cardinalityLimitSelector: defaultCardinalityLimitSelector,
}
for _, o := range options {
c = o.applyPeriodic(c)
@@ -111,12 +113,13 @@ func NewPeriodicReader(exporter Exporter, options ...PeriodicReaderOption) *Peri
context.Background(),
)
r := &PeriodicReader{
- interval: conf.interval,
- timeout: conf.timeout,
- exporter: exporter,
- flushCh: make(chan chan error),
- cancel: cancel,
- done: make(chan struct{}),
+ interval: conf.interval,
+ timeout: conf.timeout,
+ exporter: exporter,
+ flushCh: make(chan chan error),
+ cancel: cancel,
+ done: make(chan struct{}),
+ cardinalityLimitSelector: conf.cardinalityLimitSelector,
rmPool: sync.Pool{
New: func() any {
return &metricdata.ResourceMetrics{}
@@ -170,6 +173,8 @@ type PeriodicReader struct {
rmPool sync.Pool
+ cardinalityLimitSelector CardinalityLimitSelector
+
inst *observ.Instrumentation
}
@@ -222,6 +227,11 @@ func (r *PeriodicReader) aggregation(
return r.exporter.Aggregation(kind)
}
+// cardinalityLimit returns the cardinality limit for kind.
+func (r *PeriodicReader) cardinalityLimit(kind InstrumentKind) (int, bool) {
+ return r.cardinalityLimitSelector(kind)
+}
+
// collectAndExport gather all metric data related to the periodicReader r from
// the SDK and exports it with r's exporter.
func (r *PeriodicReader) collectAndExport(ctx context.Context) error {
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go
index ab269cdfd6..34300a786c 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go
@@ -301,7 +301,7 @@ func (i *inserter[N]) addCallback(cback func(context.Context) error) {
i.pipeline.callbacks = append(i.pipeline.callbacks, cback)
}
-var aggIDCount uint64
+var aggIDCount atomic.Uint64
// aggVal is the cached value in an aggregators cache.
type aggVal[N int64 | float64] struct {
@@ -395,9 +395,7 @@ func (i *inserter[N]) cachedAggregator(
b.Filter = stream.AttributeFilter
// A value less than or equal to zero will disable the aggregation
// limits for the builder (an all the created aggregates).
- // cardinalityLimit will be 0 by default if unset (or
- // unrecognized input). Use that value directly.
- b.AggregationLimit = i.pipeline.cardinalityLimit
+ b.AggregationLimit = i.getCardinalityLimit(kind)
in, out, err := i.aggregateFunc(b, stream.Aggregation, kind)
if err != nil {
return aggVal[N]{0, nil, err}
@@ -413,12 +411,24 @@ func (i *inserter[N]) cachedAggregator(
unit: stream.Unit,
compAgg: out,
})
- id := atomic.AddUint64(&aggIDCount, 1)
+ id := aggIDCount.Add(1)
return aggVal[N]{id, in, err}
})
return cv.Measure, cv.ID, cv.Err
}
+// getCardinalityLimit returns the cardinality limit for the given instrument kind.
+// When the reader's selector returns fallback = true, the pipeline's global
+// limit is used, then the default if global is unset. When fallback is false,
+// the selector's limit is used (0 or less means unlimited).
+func (i *inserter[N]) getCardinalityLimit(kind InstrumentKind) int {
+ limit, fallback := i.pipeline.reader.cardinalityLimit(kind)
+ if fallback {
+ return i.pipeline.cardinalityLimit
+ }
+ return limit
+}
+
// logConflict validates if an instrument with the same case-insensitive name
// as id has already been created. If that instrument conflicts with id, a
// warning is logged.
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go
index 7b205c736c..99079dd278 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go
@@ -59,6 +59,15 @@ type Reader interface {
// Reader methods.
aggregation(InstrumentKind) Aggregation // nolint:revive // import-shadow for method scoped by type.
+ // cardinalityLimit returns the cardinality limit for an instrument kind.
+ // When fallback is true, the pipeline falls back to the provider's global limit.
+ // When fallback is false, limit is used: 0 or less means no limit (unlimited),
+ // and a positive value is the limit for that kind.
+ //
+ // This method needs to be concurrent safe with itself and all the other
+ // Reader methods.
+ cardinalityLimit(InstrumentKind) (limit int, fallback bool)
+
// Collect gathers and returns all metric data related to the Reader from
// the SDK and stores it in rm. An error is returned if this is called
// after Shutdown or if rm is nil.
@@ -192,6 +201,25 @@ func DefaultAggregationSelector(ik InstrumentKind) Aggregation {
panic("unknown instrument kind")
}
+// CardinalityLimitSelector selects the cardinality limit to use based on the
+// InstrumentKind. The cardinality limit is the maximum number of distinct
+// attribute sets that can be recorded for a single instrument.
+//
+// The selector returns (limit, fallback). When fallback is true, the pipeline
+// falls back to the provider's global cardinality limit.
+// When fallback is false, the limit is applied: a value of 0 or less means
+// no limit, and a positive value is the limit for that kind.
+// To avoid overriding the provider's global limit, return (0, true).
+type CardinalityLimitSelector func(InstrumentKind) (limit int, fallback bool)
+
+// defaultCardinalityLimitSelector is the default CardinalityLimitSelector used
+// if WithCardinalityLimitSelector is not provided. It returns (0, true) for all
+// instrument kinds, allowing the pipeline to fall back to the provider's global
+// limit.
+func defaultCardinalityLimitSelector(_ InstrumentKind) (int, bool) {
+ return 0, true
+}
+
// ReaderOption is an option which can be applied to manual or Periodic
// readers.
type ReaderOption interface {
@@ -220,3 +248,33 @@ func (o producerOption) applyPeriodic(c periodicReaderConfig) periodicReaderConf
c.producers = append(c.producers, o.p)
return c
}
+
+// WithCardinalityLimitSelector sets the CardinalityLimitSelector a reader will
+// use to determine the cardinality limit for an instrument based on its kind.
+// If this option is not used, the reader will use the
+// defaultCardinalityLimitSelector.
+//
+// The selector should return (limit, false) to set a positive limit,
+// (0, false) to explicitly specify unlimited, or
+// (0, true) to fall back to the provider's global limit.
+//
+// See [CardinalityLimitSelector] for more details.
+func WithCardinalityLimitSelector(selector CardinalityLimitSelector) ReaderOption {
+ return cardinalityLimitSelectorOption{selector: selector}
+}
+
+type cardinalityLimitSelectorOption struct {
+ selector CardinalityLimitSelector
+}
+
+// applyManual returns a manualReaderConfig with option applied.
+func (o cardinalityLimitSelectorOption) applyManual(c manualReaderConfig) manualReaderConfig {
+ c.cardinalityLimitSelector = o.selector
+ return c
+}
+
+// applyPeriodic returns a periodicReaderConfig with option applied.
+func (o cardinalityLimitSelectorOption) applyPeriodic(c periodicReaderConfig) periodicReaderConfig {
+ c.cardinalityLimitSelector = o.selector
+ return c
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go
index 6ea297f079..26752be7d7 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go
@@ -5,5 +5,5 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric"
// version is the current release version of the metric SDK in use.
func version() string {
- return "1.42.0"
+ return "1.43.0"
}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/config.go b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go
index 0d6e213d92..a3d647d92c 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/config.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go
@@ -193,3 +193,11 @@ func WithContainer() Option {
func WithContainerID() Option {
return WithDetectors(cgroupContainerIDDetector{})
}
+
+// WithService adds all the Service attributes to the configured Resource.
+func WithService() Option {
+ return WithDetectors(
+ defaultServiceInstanceIDDetector{},
+ defaultServiceNameDetector{},
+ )
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go
index 1a72b756ae..755c082427 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go
@@ -31,19 +31,19 @@ type hostIDReaderBSD struct {
readFile fileReader
}
-// read attempts to read the machine-id from /etc/hostid. If not found it will
-// execute `kenv -q smbios.system.uuid`. If neither location yields an id an
-// error will be returned.
+// read attempts to read the machine-id from /etc/hostid.
+// If not found it will execute: /bin/kenv -q smbios.system.uuid.
+// If neither location yields an id an error will be returned.
func (r *hostIDReaderBSD) read() (string, error) {
if result, err := r.readFile("/etc/hostid"); err == nil {
return strings.TrimSpace(result), nil
}
- if result, err := r.execCommand("kenv", "-q", "smbios.system.uuid"); err == nil {
+ if result, err := r.execCommand("/bin/kenv", "-q", "smbios.system.uuid"); err == nil {
return strings.TrimSpace(result), nil
}
- return "", errors.New("host id not found in: /etc/hostid or kenv")
+ return "", errors.New("host id not found in: /etc/hostid or /bin/kenv")
}
// hostIDReaderDarwin implements hostIDReader.
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go
index 28e1e4f7eb..f715be53ed 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go
@@ -232,6 +232,15 @@ func Empty() *Resource {
// Default returns an instance of Resource with a default
// "service.name" and OpenTelemetrySDK attributes.
func Default() *Resource {
+ return DefaultWithContext(context.Background())
+}
+
+// DefaultWithContext returns an instance of Resource with a default
+// "service.name" and OpenTelemetrySDK attributes.
+//
+// If the default resource has already been initialized, the provided ctx
+// is ignored and the cached resource is returned.
+func DefaultWithContext(ctx context.Context) *Resource {
defaultResourceOnce.Do(func() {
var err error
defaultDetectors := []Detector{
@@ -243,7 +252,7 @@ func Default() *Resource {
defaultDetectors = append([]Detector{defaultServiceInstanceIDDetector{}}, defaultDetectors...)
}
defaultResource, err = Detect(
- context.Background(),
+ ctx,
defaultDetectors...,
)
if err != nil {
@@ -260,8 +269,14 @@ func Default() *Resource {
// Environment returns an instance of Resource with attributes
// extracted from the OTEL_RESOURCE_ATTRIBUTES environment variable.
func Environment() *Resource {
+ return EnvironmentWithContext(context.Background())
+}
+
+// EnvironmentWithContext returns an instance of Resource with attributes
+// extracted from the OTEL_RESOURCE_ATTRIBUTES environment variable.
+func EnvironmentWithContext(ctx context.Context) *Resource {
detector := &fromEnv{}
- resource, err := detector.Detect(context.Background())
+ resource, err := detector.Detect(ctx)
if err != nil {
otel.Handle(err)
}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go
index 96e0fc60f9..766731dd25 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/version.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/version.go
@@ -6,5 +6,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk"
// Version is the current release version of the OpenTelemetry SDK in use.
func Version() string {
- return "1.42.0"
+ return "1.43.0"
}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 34a9763d7a..7eaa8004c4 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -541,13 +541,13 @@ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/trans
go.opentelemetry.io/otel/metric
go.opentelemetry.io/otel/metric/embedded
go.opentelemetry.io/otel/metric/noop
-# go.opentelemetry.io/otel/sdk v1.42.0
+# go.opentelemetry.io/otel/sdk v1.43.0
## explicit; go 1.25.0
go.opentelemetry.io/otel/sdk
go.opentelemetry.io/otel/sdk/instrumentation
go.opentelemetry.io/otel/sdk/internal/x
go.opentelemetry.io/otel/sdk/resource
-# go.opentelemetry.io/otel/sdk/metric v1.42.0
+# go.opentelemetry.io/otel/sdk/metric v1.43.0
## explicit; go 1.25.0
go.opentelemetry.io/otel/sdk/metric
go.opentelemetry.io/otel/sdk/metric/exemplar