Skip to content

Commit a781837

Browse files
committed
More attempts at fixing the tests
1 parent 7b707ac commit a781837

3 files changed

Lines changed: 22 additions & 2 deletions

File tree

tracer/src/Datadog.Trace/ClrProfiler/AutoInstrumentation/Logging/ILogger/DirectSubmission/DirectSubmissionLoggerProvider.cs

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66

77
using System;
88
using System.Collections.Concurrent;
9+
using System.Threading.Tasks;
910
using Datadog.Trace.DuckTyping;
1011
using Datadog.Trace.Logging.DirectSubmission;
1112
using Datadog.Trace.Logging.DirectSubmission.Formatting;
@@ -74,6 +75,18 @@ private DirectSubmissionLogger CreateLoggerImplementation(string name)
7475
[DuckReverseMethod]
7576
public void Dispose()
7677
{
78+
// Flush the shared log sink on a best-effort basis so pending batches drain when
79+
// the ILoggerFactory is disposed deterministically (host shutdown, test teardown)
80+
// rather than waiting for the process-exit hook. The sink is a global singleton
81+
// owned by TracerManager, so this does not dispose it.
82+
try
83+
{
84+
_sink.FlushAsync().Wait(TimeSpan.FromSeconds(10));
85+
}
86+
catch
87+
{
88+
// Never throw from Dispose. Final flush still runs via the process-exit hook.
89+
}
7790
}
7891

7992
/// <summary>

tracer/test/Datadog.Trace.ClrProfiler.IntegrationTests/OpenTelemetrySdkTests.cs

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -716,9 +716,11 @@ private static async Task ClearTestAgentSession(string testAgentHost, int maxRet
716716
/// <summary>
717717
/// Polls the test-agent for data until non-empty results are returned or timeout is reached.
718718
/// The sample app exports data during shutdown, so there can be a brief delay
719-
/// between process exit and data appearing in the test-agent.
719+
/// between process exit and data appearing in the test-agent. The timeout is generous
720+
/// because first-time gRPC connections (TCP+HTTP/2+TLS handshake) plus tracer shutdown
721+
/// flushing can stack up on slower CI runners.
720722
/// </summary>
721-
private static async Task<JToken> WaitForTestAgentData(string url, int timeoutSeconds = 30, int pollIntervalMs = 500)
723+
private static async Task<JToken> WaitForTestAgentData(string url, int timeoutSeconds = 60, int pollIntervalMs = 500)
722724
{
723725
using var httpClient = new HttpClient { Timeout = TimeSpan.FromSeconds(10) };
724726
var deadline = DateTime.UtcNow.AddSeconds(timeoutSeconds);

tracer/test/test-applications/integrations/Samples.OpenTelemetrySdk/Program.cs

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -145,6 +145,11 @@ public static async Task Main(string[] args)
145145
}
146146

147147
#if OTEL_1_2
148+
// Flush OTLP metric batches before Dispose. MeterProviderSdk.Dispose caps its shutdown
149+
// call at 5s, but the first gRPC export (TCP+HTTP/2+TLS handshake) can exceed that and
150+
// the OTel SDK's metric export timeout default is 30s. Force a flush on the critical
151+
// path with the full export timeout instead of racing the 5s shutdown cap.
152+
meterProvider?.ForceFlush(timeoutMilliseconds: 30_000);
148153
meterProvider?.Dispose();
149154
#endif
150155
#if OTEL_1_9

0 commit comments

Comments
 (0)