Skip to content

Commit f0487d1

Browse files
authored
Enable telemetry by default with DSN-controlled priority and full event correctness tests (#349)
## Summary This PR extends the telemetry implementation across two areas. ### 1. DSN / config changes (original scope) - `EnableTelemetry *bool` tristate in `telemetry.Config`: `nil` = defer to server flag, `&true` = client opt-in, `&false` = client opt-out. - Two-level enable priority: DSN `enableTelemetry=true` → always on; otherwise use server feature flag. - Two new DSN params: `telemetry_retry_count` and `telemetry_retry_delay`. ### 2. Telemetry gap fixes (new in this PR) Four correctness bugs found during end-to-end testing against a real warehouse: **EXECUTE_STATEMENT / CLOSE_STATEMENT silently lost on shutdown** Root cause: `agg.cancel()` fired while a worker was mid-HTTP-export. Fix: added `inFlight sync.WaitGroup`; `close()` calls `inFlight.Wait()` before `cancel()`. **`total_chunks_present: null` for paginated CloudFetch** Root cause: server reports 1 link per `FetchResults` call; grand total never in a single response. Fix: pass `r.chunkCount` through `closeCallback`; `connection.go` sets `chunk_total_present` if the server never reported it. **`operation_latency_ms: null` for CLOSE_STATEMENT** Root cause: `CloseOperation` RPC completes in <1ms → rounds to 0; `omitempty` drops 0. Fix: removed `omitempty` from `OperationLatencyMs`. **CloudFetch S3 timing fields not populated** Root cause: per-S3-file download time was not measured. Fix: added `onFileDownloaded func(downloadMs int64)` callback to `cloudIPCStreamIterator`; `connection.go` aggregates initial/slowest/sum timings. ### 3. DSN parameters (full set) | Parameter | Type | Default | Description | |---|---|---|---| | `enableTelemetry` | bool | unset | Overrides server flag when set | | `telemetry_batch_size` | int | 100 | Events per batch | | `telemetry_flush_interval` | duration | 5s | Periodic flush interval | | `telemetry_retry_count` | int | 3 | Max retry attempts on export failure | | `telemetry_retry_delay` | duration | 100ms | Base delay between retries (exponential backoff) | ## Key files changed - `telemetry/aggregator.go` — `inFlight` WaitGroup; 5-step `close()` ordering - `telemetry/interceptor.go` — `RecordOperation` takes `statementID` so CLOSE_STATEMENT carries `sql_statement_id` - `telemetry/request.go` — removed `omitempty` from `OperationLatencyMs` - `connection.go` — `closeCallback(latencyMs, chunkCount, err)` + `cloudFetchCallback` wiring - `internal/rows/rows.go` — `closeCallback` passes `r.chunkCount`; `cloudFetchCallback` threaded through - `internal/rows/arrowbased/batchloader.go` — `onFileDownloaded` callback per S3 file download ## Test plan - [x] `go build ./...` — clean compile - [x] `go test ./telemetry/... -count=1` — all pass - [x] `go test ./internal/rows/... -count=1` — all pass - [x] `go test ./... -short -count=1` — full suite passes ### New correctness tests **`telemetry/aggregator_test.go`** (new file, 5 tests): - `WaitsForInFlightWorkerExports` — `close()` blocks until all HTTP exports finish, even if workers picked up jobs before the drain step ran - `DrainsPendingQueueJobsBeforeCancel` — jobs sitting in `exportQueue` are exported synchronously during drain - `InFlightAddBeforeSend` — `inFlight.Add(1)` precedes the channel send so no job is invisible to `Wait()` - `SafeToCallMultipleTimes` — concurrent `close()` calls do not deadlock (`sync.Once`) - `DropWhenQueueFull` — drop path calls `inFlight.Done()` so `Wait()` is never permanently blocked **`telemetry/integration_test.go`** (2 new tests): - `OperationLatencyMs_ZeroNotOmitted` — raw JSON contains `"operation_latency_ms":0`, not absent - `ChunkTotalPresent_DerivedFromChunkCount` — `chunk_total_present` tag propagates to `ChunkDetails` **`internal/rows/arrowbased/batchloader_test.go`** (2 new tests): - `OnFileDownloaded` callback invoked once per file with positive `downloadMs` - Nil callback is safe on non-telemetry paths (no panic) **`internal/rows/rows_test.go`** (2 new tests): - `CloseCallback_ReceivesChunkCount` — callback gets correct total pages after multi-page iteration - `CloseCallback_NilDoesNotPanic` — nil `closeCallback` is safe on `rows.Close()` This pull request was AI-assisted by Isaac.
1 parent 4faa786 commit f0487d1

27 files changed

Lines changed: 2020 additions & 1100 deletions

connection.go

Lines changed: 155 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ func (c *conn) Close() error {
6060

6161
// Record DELETE_SESSION regardless of error (matches JDBC), then flush and release
6262
if c.telemetry != nil {
63-
c.telemetry.RecordOperation(ctx, c.id, telemetry.OperationTypeDeleteSession, time.Since(closeStart).Milliseconds(), err)
63+
c.telemetry.RecordOperation(ctx, c.id, "", telemetry.OperationTypeDeleteSession, time.Since(closeStart).Milliseconds(), err)
6464
_ = c.telemetry.Close(ctx)
6565
telemetry.ReleaseForConnection(c.cfg.Host)
6666
}
@@ -130,15 +130,20 @@ func (c *conn) ExecContext(ctx context.Context, query string, args []driver.Name
130130
executeStart := time.Now()
131131
exStmtResp, opStatusResp, err := c.runQuery(ctx, query, args)
132132
log, ctx = client.LoggerAndContext(ctx, exStmtResp)
133-
stagingErr := c.execStagingOperation(exStmtResp, ctx)
134133

135-
// Telemetry: track statement execution
134+
// Telemetry: set up metric context BEFORE staging operation so that the
135+
// staging op's telemetryUpdate callback can attach tags to the metric context.
136136
var statementID string
137137
var closeOpErr error // Track CloseOperation errors for telemetry
138138
if c.telemetry != nil && exStmtResp != nil && exStmtResp.OperationHandle != nil && exStmtResp.OperationHandle.OperationId != nil {
139139
statementID = client.SprintGuid(exStmtResp.OperationHandle.OperationId.GUID)
140-
// Use BeforeExecuteWithTime to set the correct start time (before execution)
141140
ctx = c.telemetry.BeforeExecuteWithTime(ctx, c.id, statementID, executeStart)
141+
c.telemetry.AddTag(ctx, telemetry.TagOperationType, telemetry.OperationTypeExecuteStatement)
142+
}
143+
144+
stagingErr := c.execStagingOperation(exStmtResp, ctx)
145+
146+
if c.telemetry != nil && statementID != "" {
142147
defer func() {
143148
finalErr := err
144149
if stagingErr != nil {
@@ -163,7 +168,7 @@ func (c *conn) ExecContext(ctx context.Context, query string, args []driver.Name
163168
OperationHandle: exStmtResp.OperationHandle,
164169
})
165170
if c.telemetry != nil {
166-
c.telemetry.RecordOperation(ctx, c.id, telemetry.OperationTypeCloseStatement, time.Since(closeOpStart).Milliseconds(), err1)
171+
c.telemetry.RecordOperation(ctx, c.id, statementID, telemetry.OperationTypeCloseStatement, time.Since(closeOpStart).Milliseconds(), err1)
167172
}
168173
if err1 != nil {
169174
log.Err(err1).Msg("databricks: failed to close operation after executing statement")
@@ -179,14 +184,55 @@ func (c *conn) ExecContext(ctx context.Context, query string, args []driver.Name
179184

180185
if stagingErr != nil {
181186
log.Err(stagingErr).Msgf("databricks: failed to execute query: query %s", query)
182-
return nil, dbsqlerrint.NewExecutionError(ctx, dbsqlerr.ErrQueryExecution, err, opStatusResp)
187+
return nil, dbsqlerrint.NewExecutionError(ctx, dbsqlerr.ErrQueryExecution, stagingErr, opStatusResp)
183188
}
184189

185190
res := result{AffectedRows: opStatusResp.GetNumModifiedRows()}
186191

187192
return &res, nil
188193
}
189194

195+
// chunkTimingAccumulator aggregates per-chunk fetch latencies for telemetry.
196+
// It tracks the initial, slowest, and cumulative latencies, plus the number
197+
// of CloudFetch file downloads. All fields should be accessed under the
198+
// serialization provided by database/sql's closemu (see QueryContext).
199+
type chunkTimingAccumulator struct {
200+
initialMs int64
201+
slowestMs int64
202+
sumMs int64
203+
initialSet bool
204+
// cloudFetchFileCount counts individual S3 files downloaded via CloudFetch.
205+
// Used to set chunk_total_present correctly for both bulk and paginated CloudFetch:
206+
// - paginated CF (1 link/FetchResults): file count == page count == correct total
207+
// - bulk CF (all links in DirectResults): file count == actual S3 downloads
208+
// For inline ArrowBatch results this stays 0 and chunk_total_present falls back to chunkCount.
209+
cloudFetchFileCount int
210+
}
211+
212+
// record accumulates a single chunk or download latency. Returns true if
213+
// the latency was positive and tags should be updated; false otherwise.
214+
func (a *chunkTimingAccumulator) record(latencyMs int64) bool {
215+
if latencyMs <= 0 {
216+
return false
217+
}
218+
if !a.initialSet {
219+
a.initialMs = latencyMs
220+
a.initialSet = true
221+
}
222+
if latencyMs > a.slowestMs {
223+
a.slowestMs = latencyMs
224+
}
225+
a.sumMs += latencyMs
226+
return true
227+
}
228+
229+
// applyTags writes the current timing state to the telemetry context.
230+
func (a *chunkTimingAccumulator) applyTags(ctx context.Context, interceptor *telemetry.Interceptor) {
231+
interceptor.AddTag(ctx, telemetry.TagChunkInitialLatencyMs, a.initialMs)
232+
interceptor.AddTag(ctx, telemetry.TagChunkSlowestLatencyMs, a.slowestMs)
233+
interceptor.AddTag(ctx, telemetry.TagChunkSumLatencyMs, a.sumMs)
234+
}
235+
190236
// QueryContext executes a query that may return rows, such as a
191237
// SELECT.
192238
//
@@ -206,32 +252,116 @@ func (c *conn) QueryContext(ctx context.Context, query string, args []driver.Nam
206252
log, ctx = client.LoggerAndContext(ctx, exStmtResp)
207253
defer log.Duration(msg, start)
208254

209-
// Telemetry: track statement execution
255+
// Telemetry: set up metric context for the statement.
256+
// BeforeExecuteWithTime anchors startTime to before runQuery() ran.
210257
var statementID string
211258
if c.telemetry != nil && exStmtResp != nil && exStmtResp.OperationHandle != nil && exStmtResp.OperationHandle.OperationId != nil {
212259
statementID = client.SprintGuid(exStmtResp.OperationHandle.OperationId.GUID)
213-
// Use BeforeExecuteWithTime to set the correct start time (before execution)
214260
ctx = c.telemetry.BeforeExecuteWithTime(ctx, c.id, statementID, executeStart)
215-
defer func() {
216-
c.telemetry.AfterExecute(ctx, err)
217-
c.telemetry.CompleteStatement(ctx, statementID, err != nil)
218-
}()
261+
c.telemetry.AddTag(ctx, telemetry.TagOperationType, telemetry.OperationTypeExecuteStatement)
219262
}
220263

221264
if err != nil {
265+
// Error path: finalize and emit the EXECUTE_STATEMENT metric immediately —
266+
// there are no rows to iterate so the metric is complete right now.
267+
if c.telemetry != nil && statementID != "" {
268+
c.telemetry.AfterExecute(ctx, err)
269+
c.telemetry.CompleteStatement(ctx, statementID, true)
270+
}
222271
log.Err(err).Msg("databricks: failed to run query") // To log query we need to redact credentials
223272
return nil, dbsqlerrint.NewExecutionError(ctx, dbsqlerr.ErrQueryExecution, err, opStatusResp)
224273
}
225274

226-
// Telemetry callback for tracking row fetching metrics
227-
telemetryUpdate := func(chunkCount int, bytesDownloaded int64) {
228-
if c.telemetry != nil {
229-
c.telemetry.AddTag(ctx, "chunk_count", chunkCount)
230-
c.telemetry.AddTag(ctx, "bytes_downloaded", bytesDownloaded)
275+
// Success path: freeze execute latency NOW (before row iteration inflates time.Since).
276+
// AfterExecute/CompleteStatement are called from closeCallback after all chunks
277+
// are fetched, so the final metric carries complete chunk timing data.
278+
if c.telemetry != nil && statementID != "" {
279+
c.telemetry.FinalizeLatency(ctx)
280+
}
281+
282+
// chunkTimingAccumulator aggregates per-chunk fetch latencies across all
283+
// fetchResultPage calls. These fields are safe without a mutex because they
284+
// are only mutated from callbacks serialized by database/sql's closemu lock:
285+
// telemetryUpdate and cloudFetchCallback run inside rows.Next() (which
286+
// holds closemu.RLock), and closeCallback runs inside rows.Close() (which
287+
// holds closemu.Lock). This ensures mutual exclusion even when Close() is
288+
// called from database/sql's awaitDone goroutine on context cancellation.
289+
var timing chunkTimingAccumulator
290+
291+
// Detach from caller's context so that telemetry tag writes and flushes
292+
// survive context cancellation (e.g. query timeout, database/sql awaitDone).
293+
// All three callbacks (telemetryUpdate, cloudFetchCallback, closeCallback)
294+
// use this detached context uniformly.
295+
telemetryCtx := context2.WithoutCancel(ctx)
296+
297+
// Telemetry callback invoked after each result page is fetched.
298+
telemetryUpdate := func(chunkCount int, bytesDownloaded int64, chunkIndex int, chunkLatencyMs int64, _ int32) {
299+
if c.telemetry == nil {
300+
return
301+
}
302+
c.telemetry.AddTag(telemetryCtx, telemetry.TagChunkCount, chunkCount)
303+
c.telemetry.AddTag(telemetryCtx, telemetry.TagBytesDownloaded, bytesDownloaded)
304+
305+
// Aggregate per-chunk fetch latencies (skip direct results where latency is 0).
306+
if timing.record(chunkLatencyMs) {
307+
timing.applyTags(telemetryCtx, c.telemetry)
308+
}
309+
// chunk_total_present is set definitively in closeCallback once all pages are known.
310+
}
311+
312+
// cloudFetchCallback is invoked per S3 file download for CloudFetch result sets.
313+
// It aggregates individual file download times into the same initial/slowest/sum vars
314+
// used for inline chunk timing, matching JDBC's per-chunk HTTP GET timing model.
315+
// For inline (non-CloudFetch) result sets this is never called.
316+
var cloudFetchCallback func(downloadMs int64)
317+
if c.telemetry != nil {
318+
cloudFetchCallback = func(downloadMs int64) {
319+
timing.cloudFetchFileCount++ // always count files for chunk_total_present, even sub-ms downloads
320+
if timing.record(downloadMs) {
321+
timing.applyTags(telemetryCtx, c.telemetry)
322+
}
323+
}
324+
}
325+
326+
// closeCallback is invoked from rows.Close() after all rows have been consumed.
327+
// At that point chunk timing is fully accumulated in telemetryCtx tags, so we
328+
// finalize EXECUTE_STATEMENT here rather than at QueryContext return time.
329+
var closeCallback func(latencyMs int64, chunkCount int, iterErr error, closeErr error)
330+
if c.telemetry != nil && statementID != "" {
331+
interceptor := c.telemetry
332+
connID := c.id
333+
stmtID := statementID
334+
closeCallback = func(latencyMs int64, chunkCount int, iterErr error, closeErr error) {
335+
// Set chunk_total_present to the definitive total now that all iteration is done.
336+
// For CloudFetch, use cloudFetchFileCount (actual S3 downloads) — this handles
337+
// both paginated CF (1 link/page, so file count == page count) and bulk CF
338+
// (all links in DirectResults, so file count == total S3 files).
339+
// For inline ArrowBatch, cloudFetchFileCount is 0; fall back to chunkCount.
340+
if timing.cloudFetchFileCount > 0 {
341+
interceptor.AddTag(telemetryCtx, telemetry.TagChunkTotalPresent, timing.cloudFetchFileCount)
342+
} else if chunkCount > 0 {
343+
interceptor.AddTag(telemetryCtx, telemetry.TagChunkTotalPresent, chunkCount)
344+
}
345+
// EXECUTE_STATEMENT uses the iteration error (row consumption failure)
346+
// to correctly report whether the statement succeeded or failed.
347+
interceptor.AfterExecute(telemetryCtx, iterErr)
348+
interceptor.CompleteStatement(telemetryCtx, stmtID, iterErr != nil)
349+
// CLOSE_STATEMENT uses the actual CloseOperation RPC error.
350+
interceptor.RecordOperation(telemetryCtx, connID, stmtID, telemetry.OperationTypeCloseStatement, latencyMs, closeErr)
351+
}
352+
} else if c.telemetry != nil {
353+
interceptor := c.telemetry
354+
connID := c.id
355+
closeCallback = func(latencyMs int64, _ int, _ error, closeErr error) {
356+
interceptor.RecordOperation(telemetryCtx, connID, "", telemetry.OperationTypeCloseStatement, latencyMs, closeErr)
231357
}
232358
}
233359

234-
rows, err := rows.NewRows(ctx, exStmtResp.OperationHandle, c.client, c.cfg, exStmtResp.DirectResults, telemetryUpdate)
360+
rows, err := rows.NewRows(ctx, exStmtResp.OperationHandle, c.client, c.cfg, exStmtResp.DirectResults, &rows.TelemetryCallbacks{
361+
OnChunkFetched: telemetryUpdate,
362+
OnClose: closeCallback,
363+
OnCloudFetchFile: cloudFetchCallback,
364+
})
235365
return rows, err
236366

237367
}
@@ -396,14 +526,7 @@ func (c *conn) executeStatement(ctx context.Context, query string, args []driver
396526
}
397527
}
398528

399-
executeStart := time.Now()
400529
resp, err := c.client.ExecuteStatement(ctx, &req)
401-
// Record the Thrift call latency as a separate operation metric.
402-
// This is distinct from the statement-level metric (BeforeExecuteWithTime), which
403-
// measures end-to-end latency including polling and row fetching.
404-
if c.telemetry != nil {
405-
c.telemetry.RecordOperation(ctx, c.id, telemetry.OperationTypeExecuteStatement, time.Since(executeStart).Milliseconds(), err)
406-
}
407530
var log *logger.DBSQLLogger
408531
log, ctx = client.LoggerAndContext(ctx, resp)
409532

@@ -668,14 +791,16 @@ func (c *conn) execStagingOperation(
668791
}
669792

670793
if len(driverctx.StagingPathsFromContext(ctx)) != 0 {
671-
// Telemetry callback for staging operation row fetching
672-
telemetryUpdate := func(chunkCount int, bytesDownloaded int64) {
794+
// Telemetry callback for staging operation row fetching (chunk timing not tracked for staging ops).
795+
telemetryUpdate := func(chunkCount int, bytesDownloaded int64, chunkIndex int, chunkLatencyMs int64, totalChunksPresent int32) {
673796
if c.telemetry != nil {
674-
c.telemetry.AddTag(ctx, "chunk_count", chunkCount)
675-
c.telemetry.AddTag(ctx, "bytes_downloaded", bytesDownloaded)
797+
c.telemetry.AddTag(ctx, telemetry.TagChunkCount, chunkCount)
798+
c.telemetry.AddTag(ctx, telemetry.TagBytesDownloaded, bytesDownloaded)
676799
}
677800
}
678-
row, err = rows.NewRows(ctx, exStmtResp.OperationHandle, c.client, c.cfg, exStmtResp.DirectResults, telemetryUpdate)
801+
row, err = rows.NewRows(ctx, exStmtResp.OperationHandle, c.client, c.cfg, exStmtResp.DirectResults, &rows.TelemetryCallbacks{
802+
OnChunkFetched: telemetryUpdate,
803+
})
679804
if err != nil {
680805
return dbsqlerrint.NewDriverError(ctx, "error reading row.", err)
681806
}

connection_test.go

Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1975,6 +1975,59 @@ func TestConn_execStagingOperation(t *testing.T) {
19751975
})
19761976
}
19771977

1978+
func TestChunkTimingAccumulator_Record(t *testing.T) {
1979+
tests := []struct {
1980+
name string
1981+
latencies []int64
1982+
wantInit int64
1983+
wantSlow int64
1984+
wantSum int64
1985+
wantReturn []bool
1986+
}{
1987+
{"zero latency skipped", []int64{0}, 0, 0, 0, []bool{false}},
1988+
{"negative skipped", []int64{-5}, 0, 0, 0, []bool{false}},
1989+
{"single positive", []int64{10}, 10, 10, 10, []bool{true}},
1990+
{"initial preserved across calls", []int64{10, 20}, 10, 20, 30, []bool{true, true}},
1991+
{"slowest tracks max not last", []int64{30, 10, 50}, 30, 50, 90, []bool{true, true, true}},
1992+
{"zero interleaved skipped", []int64{10, 0, 20}, 10, 20, 30, []bool{true, false, true}},
1993+
}
1994+
for _, tt := range tests {
1995+
t.Run(tt.name, func(t *testing.T) {
1996+
var a chunkTimingAccumulator
1997+
for i, lat := range tt.latencies {
1998+
got := a.record(lat)
1999+
if got != tt.wantReturn[i] {
2000+
t.Errorf("record(%d) = %v, want %v", lat, got, tt.wantReturn[i])
2001+
}
2002+
}
2003+
if a.initialMs != tt.wantInit {
2004+
t.Errorf("initialMs = %d, want %d", a.initialMs, tt.wantInit)
2005+
}
2006+
if a.slowestMs != tt.wantSlow {
2007+
t.Errorf("slowestMs = %d, want %d", a.slowestMs, tt.wantSlow)
2008+
}
2009+
if a.sumMs != tt.wantSum {
2010+
t.Errorf("sumMs = %d, want %d", a.sumMs, tt.wantSum)
2011+
}
2012+
})
2013+
}
2014+
}
2015+
2016+
func TestChunkTimingAccumulator_CloudFetchFileCount(t *testing.T) {
2017+
var a chunkTimingAccumulator
2018+
a.cloudFetchFileCount++
2019+
a.record(0) // sub-ms download — still counted but not timed
2020+
a.cloudFetchFileCount++
2021+
a.record(5)
2022+
2023+
if a.cloudFetchFileCount != 2 {
2024+
t.Errorf("cloudFetchFileCount = %d, want 2", a.cloudFetchFileCount)
2025+
}
2026+
if a.initialMs != 5 {
2027+
t.Errorf("initialMs = %d, want 5 (zero-latency file should not set initial)", a.initialMs)
2028+
}
2029+
}
2030+
19782031
func getTestSession() *cli_service.TOpenSessionResp {
19792032
return &cli_service.TOpenSessionResp{SessionHandle: &cli_service.TSessionHandle{
19802033
SessionId: &cli_service.THandleIdentifier{

connector.go

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -81,18 +81,19 @@ func (c *connector) Connect(ctx context.Context) (driver.Conn, error) {
8181
log := logger.WithContext(conn.id, driverctx.CorrelationIdFromContext(ctx), "")
8282

8383
// Initialize telemetry: client config overlay decides; if unset, feature flags decide
84-
conn.telemetry = telemetry.InitializeForConnection(
85-
ctx,
86-
c.cfg.Host,
87-
c.cfg.DriverVersion,
88-
c.client,
89-
c.cfg.EnableTelemetry,
90-
c.cfg.TelemetryBatchSize,
91-
c.cfg.TelemetryFlushInterval,
92-
)
84+
conn.telemetry = telemetry.InitializeForConnection(ctx, telemetry.TelemetryInitOptions{
85+
Host: c.cfg.Host,
86+
DriverVersion: c.cfg.DriverVersion,
87+
HTTPClient: c.client,
88+
EnableTelemetry: c.cfg.EnableTelemetry,
89+
BatchSize: c.cfg.TelemetryBatchSize,
90+
FlushInterval: c.cfg.TelemetryFlushInterval,
91+
RetryCount: c.cfg.TelemetryRetryCount,
92+
RetryDelay: c.cfg.TelemetryRetryDelay,
93+
})
9394
if conn.telemetry != nil {
9495
log.Debug().Msg("telemetry initialized for connection")
95-
conn.telemetry.RecordOperation(ctx, conn.id, telemetry.OperationTypeCreateSession, sessionLatencyMs, nil)
96+
conn.telemetry.RecordOperation(ctx, conn.id, "", telemetry.OperationTypeCreateSession, sessionLatencyMs, nil)
9697
}
9798

9899
log.Info().Msgf("connect: host=%s port=%d httpPath=%s serverProtocolVersion=0x%X", c.cfg.Host, c.cfg.Port, c.cfg.HTTPPath, session.ServerProtocolVersion)

0 commit comments

Comments
 (0)