You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Batcher refactor and performance observability (#2310)
* Remove incorrectly used sandbox context from feature flag resolve
* Batcher refactor for easier read, performance metrics
Batcher loop is now easier to navigate. Time waits replaced with ticker.
Metrics for performance monitoring are now included. Different batchers
can be run in parallel with name attribute used to distinguish.
* Use batcher naming
* Trace batch callback for speed
* Allow export of `batcher` metrics
* Fix Push/Stop race condition and stale doc comment in batcher
Guard Start, Stop, and Push with a sync.RWMutex to prevent a
send-on-closed-channel panic that could occur when Stop closed
b.ch between Push's started check and the channel send.
Also update the QueueSize doc comment to reflect that Push now
returns ErrBatcherQueueFull instead of the old (false, nil).
* Reset ticker after each flush for consistent MaxDelay window
Without reset, a size-triggered flush near a tick boundary would
give the next batch less than MaxDelay to accumulate. Now each
batch always gets a full MaxDelay window after the previous flush.
mItemsDropped=utils.Must(meter.Int64Counter("batcher.items.dropped", metric.WithDescription("Number of items dropped because the batcher queue was full"), metric.WithUnit("{item}")))
33
+
mQueueLen=utils.Must(meter.Int64Gauge("batcher.queue.length", metric.WithDescription("Current number of items waiting in the batcher queue"), metric.WithUnit("{item}")))
34
+
mFlushBatchSize=utils.Must(meter.Int64Histogram("batcher.flush.batch_size", metric.WithDescription("Number of items per flushed batch"), metric.WithUnit("{item}")))
35
+
mFlushWaitDuration=utils.Must(meter.Int64Histogram("batcher.flush.wait_duration", metric.WithDescription("Time from first item enqueued in a batch to when the batch is flushed"), metric.WithUnit("ms")))
36
+
mFlushDuration=utils.Must(meter.Int64Histogram("batcher.flush.duration", metric.WithDescription("Time spent executing BatcherFunc per flush"), metric.WithUnit("ms")))
37
+
)
38
+
22
39
// Batcher groups items in batches and calls Func on them.
23
40
//
24
41
// See also BytesBatcher.
@@ -29,20 +46,24 @@ type Batcher[T any] struct {
29
46
// Maximum batch size that will be passed to BatcherFunc.
30
47
MaxBatchSizeint
31
48
32
-
// Maximum delay between Push() and BatcherFunc call.
49
+
// Maximum delay between the first item being enqueued in a batch and the
50
+
// BatcherFunc call for that batch.
33
51
MaxDelay time.Duration
34
52
35
53
// Maximum unprocessed items' queue size.
36
54
QueueSizeint
37
55
38
-
// ErrorHandler is called when BatcherFunc returns an error
39
-
// If not set, errors from BatcherFunc will be silently dropped
40
-
// This allows customizing error handling behavior - e.g. logging, metrics, etc.
56
+
// ErrorHandler is called when BatcherFunc returns an error.
57
+
// If not set, errors from BatcherFunc will be silently dropped.
41
58
ErrorHandlerfunc(error)
42
59
43
60
// Synchronization primitives.
44
-
chchanT
45
-
doneChchanstruct{}
61
+
mu sync.RWMutex
62
+
chchanT
63
+
doneChchanstruct{}
64
+
startedbool
65
+
66
+
attrs metric.MeasurementOption
46
67
}
47
68
48
69
// BatcherFunc is called by Batcher when batch is ready to be processed.
0 commit comments