diff --git a/.changeset/ai-completion-id.md b/.changeset/ai-completion-id.md new file mode 100644 index 0000000000..455e688f88 --- /dev/null +++ b/.changeset/ai-completion-id.md @@ -0,0 +1,5 @@ +--- +"@posthog/ai": minor +--- + +Add `$ai_completion_id`, `$ai_system_fingerprint`, and `$ai_request_id` properties to `$ai_generation` events for OpenAI and Azure OpenAI wrappers. These enable direct correlation between PostHog events and OpenAI's Logs dashboard (`platform.openai.com/logs/{completion_id}`). diff --git a/packages/ai/src/openai/azure.ts b/packages/ai/src/openai/azure.ts index d6f4022347..5d237570b5 100644 --- a/packages/ai/src/openai/azure.ts +++ b/packages/ai/src/openai/azure.ts @@ -107,6 +107,8 @@ export class WrappedCompletions extends AzureOpenAI.Chat.Completions { const contentBlocks: FormattedContent = [] let accumulatedContent = '' let modelFromResponse: string | undefined + let completionIdFromResponse: string | undefined + let systemFingerprintFromResponse: string | undefined let firstTokenTime: number | undefined let usage: { inputTokens?: number @@ -129,10 +131,16 @@ export class WrappedCompletions extends AzureOpenAI.Chat.Completions { >() for await (const chunk of stream1) { - // Extract model from response if not in params + // Extract model and completion metadata from chunk if (!modelFromResponse && chunk.model) { modelFromResponse = chunk.model } + if (!completionIdFromResponse && chunk.id) { + completionIdFromResponse = chunk.id + } + if (systemFingerprintFromResponse === undefined && chunk.system_fingerprint) { + systemFingerprintFromResponse = chunk.system_fingerprint + } const choice = chunk?.choices?.[0] @@ -242,6 +250,8 @@ export class WrappedCompletions extends AzureOpenAI.Chat.Completions { params: body, httpStatus: 200, usage, + completionId: completionIdFromResponse, + systemFingerprint: systemFingerprintFromResponse, }) } catch (error: unknown) { const enrichedError = await sendEventWithErrorToPosthog({ @@ -288,6 +298,9 @@ export class WrappedCompletions extends AzureOpenAI.Chat.Completions { reasoningTokens: result.usage?.completion_tokens_details?.reasoning_tokens ?? 0, cacheReadInputTokens: result.usage?.prompt_tokens_details?.cached_tokens ?? 0, }, + completionId: result.id, + systemFingerprint: result.system_fingerprint, + requestId: (result as any)._request_id, // x-request-id header exposed by the OpenAI SDK }) } return result @@ -370,6 +383,7 @@ export class WrappedResponses extends AzureOpenAI.Responses { try { let finalContent: any[] = [] let modelFromResponse: string | undefined + let completionIdFromResponse: string | undefined let firstTokenTime: number | undefined let usage: { inputTokens?: number @@ -388,10 +402,13 @@ export class WrappedResponses extends AzureOpenAI.Responses { } if ('response' in chunk && chunk.response) { - // Extract model from response if not in params (for stored prompts) + // Extract model and completion ID from response object in chunk if (!modelFromResponse && chunk.response.model) { modelFromResponse = chunk.response.model } + if (!completionIdFromResponse && chunk.response.id) { + completionIdFromResponse = chunk.response.id + } } if ( chunk.type === 'response.completed' && @@ -426,6 +443,7 @@ export class WrappedResponses extends AzureOpenAI.Responses { params: body, httpStatus: 200, usage, + completionId: completionIdFromResponse, }) } catch (error: unknown) { const enrichedError = await sendEventWithErrorToPosthog({ @@ -471,6 +489,8 @@ export class WrappedResponses extends AzureOpenAI.Responses { reasoningTokens: result.usage?.output_tokens_details?.reasoning_tokens ?? 0, cacheReadInputTokens: result.usage?.input_tokens_details?.cached_tokens ?? 0, }, + completionId: result.id, + requestId: (result as any)._request_id, // x-request-id header exposed by the OpenAI SDK }) } return result @@ -535,6 +555,8 @@ export class WrappedResponses extends AzureOpenAI.Responses { reasoningTokens: result.usage?.output_tokens_details?.reasoning_tokens ?? 0, cacheReadInputTokens: result.usage?.input_tokens_details?.cached_tokens ?? 0, }, + completionId: result.id, + requestId: (result as any)._request_id, }) return result }, diff --git a/packages/ai/src/openai/index.ts b/packages/ai/src/openai/index.ts index f5bbe124de..8af5ded001 100644 --- a/packages/ai/src/openai/index.ts +++ b/packages/ai/src/openai/index.ts @@ -120,6 +120,8 @@ export class WrappedCompletions extends Completions { const contentBlocks: FormattedContent = [] let accumulatedContent = '' let modelFromResponse: string | undefined + let completionIdFromResponse: string | undefined + let systemFingerprintFromResponse: string | undefined let firstTokenTime: number | undefined let usage: { inputTokens?: number @@ -145,10 +147,16 @@ export class WrappedCompletions extends Completions { let rawUsageData: unknown for await (const chunk of stream1) { - // Extract model from chunk (Chat Completions chunks have model field) + // Extract model and completion metadata from chunk if (!modelFromResponse && chunk.model) { modelFromResponse = chunk.model } + if (!completionIdFromResponse && chunk.id) { + completionIdFromResponse = chunk.id + } + if (systemFingerprintFromResponse === undefined && chunk.system_fingerprint) { + systemFingerprintFromResponse = chunk.system_fingerprint + } const choice = chunk?.choices?.[0] @@ -274,6 +282,8 @@ export class WrappedCompletions extends Completions { rawUsage: rawUsageData, }, tools: availableTools, + completionId: completionIdFromResponse, + systemFingerprint: systemFingerprintFromResponse, }) } catch (error: unknown) { const enrichedError = await sendEventWithErrorToPosthog({ @@ -325,6 +335,9 @@ export class WrappedCompletions extends Completions { rawUsage: result.usage, }, tools: availableTools, + completionId: result.id, + systemFingerprint: result.system_fingerprint, + requestId: (result as any)._request_id, // x-request-id header exposed by the OpenAI SDK }) } return result @@ -407,6 +420,7 @@ export class WrappedResponses extends Responses { try { let finalContent: unknown[] = [] let modelFromResponse: string | undefined + let completionIdFromResponse: string | undefined let firstTokenTime: number | undefined let usage: { inputTokens?: number @@ -428,10 +442,13 @@ export class WrappedResponses extends Responses { } if ('response' in chunk && chunk.response) { - // Extract model from response object in chunk (for stored prompts) + // Extract model and completion ID from response object in chunk if (!modelFromResponse && chunk.response.model) { modelFromResponse = chunk.response.model } + if (!completionIdFromResponse && chunk.response.id) { + completionIdFromResponse = chunk.response.id + } const chunkWebSearchCount = calculateWebSearchCount(chunk.response) if (chunkWebSearchCount > 0 && chunkWebSearchCount > (usage.webSearchCount ?? 0)) { @@ -486,6 +503,7 @@ export class WrappedResponses extends Responses { rawUsage: rawUsageData, }, tools: availableTools, + completionId: completionIdFromResponse, }) } catch (error: unknown) { const enrichedError = await sendEventWithErrorToPosthog({ @@ -539,6 +557,8 @@ export class WrappedResponses extends Responses { rawUsage: result.usage, }, tools: availableTools, + completionId: result.id, + requestId: (result as any)._request_id, // x-request-id header exposed by the OpenAI SDK }) } return result @@ -610,6 +630,8 @@ export class WrappedResponses extends Responses { cacheReadInputTokens: result.usage?.input_tokens_details?.cached_tokens ?? 0, rawUsage: result.usage, }, + completionId: result.id, + requestId: (result as any)._request_id, }) return result }, diff --git a/packages/ai/src/utils.ts b/packages/ai/src/utils.ts index a5a13e94ea..760654eaf8 100644 --- a/packages/ai/src/utils.ts +++ b/packages/ai/src/utils.ts @@ -584,6 +584,9 @@ export type SendEventToPosthogParams = { exceptionId?: string tools?: ChatCompletionTool[] | AnthropicTool[] | GeminiTool[] | null captureImmediate?: boolean + completionId?: string + systemFingerprint?: string | null + requestId?: string } function sanitizeValues(obj: any): any { @@ -690,6 +693,9 @@ export const sendEventToPosthog = async ({ exceptionId, tools, captureImmediate = false, + completionId, + systemFingerprint, + requestId, }: SendEventToPosthogParams): Promise => { if (!client.capture) { return Promise.resolve() @@ -746,6 +752,9 @@ export const sendEventToPosthog = async ({ $ai_tokens_source: getTokensSource(params.posthogProperties), ...(distinctId ? {} : { $process_person_profile: false }), ...(tools ? { $ai_tools: tools } : {}), + ...(completionId ? { $ai_completion_id: completionId } : {}), + ...(systemFingerprint ? { $ai_system_fingerprint: systemFingerprint } : {}), + ...(requestId ? { $ai_request_id: requestId } : {}), ...errorData, ...costOverrideData, } diff --git a/packages/ai/tests/openai.test.ts b/packages/ai/tests/openai.test.ts index ce72a56cf0..d961e5031c 100644 --- a/packages/ai/tests/openai.test.ts +++ b/packages/ai/tests/openai.test.ts @@ -155,6 +155,7 @@ const createMockStreamChunks = (options: { model: 'gpt-4', object: 'chat.completion.chunk', created: Date.now() / 1000, + system_fingerprint: 'fp_stream_test', } if (options.content) { @@ -283,10 +284,12 @@ describe('PostHogOpenAI - Jest test suite', () => { // Default chat completion mock for non-streaming responses mockOpenAiChatResponse = { - id: 'test-response-id', + id: 'chatcmpl-test-response-id', model: 'gpt-4', object: 'chat.completion', created: Date.now() / 1000, + system_fingerprint: 'fp_test123', + _request_id: 'req_test-request-id', choices: [ { index: 0, @@ -448,6 +451,9 @@ describe('PostHogOpenAI - Jest test suite', () => { expect(properties['foo']).toBe('bar') expect(typeof properties['$ai_latency']).toBe('number') expect(properties['$ai_usage']).toBeDefined() + expect(properties['$ai_completion_id']).toBe('chatcmpl-test-response-id') + expect(properties['$ai_system_fingerprint']).toBe('fp_test123') + expect(properties['$ai_request_id']).toBe('req_test-request-id') }) conditionalTest('groups', async () => { @@ -629,6 +635,7 @@ describe('PostHogOpenAI - Jest test suite', () => { expect(properties['$ai_http_status']).toBe(200) expect(properties['foo']).toBe('bar') expect(typeof properties['$ai_latency']).toBe('number') + expect(properties['$ai_completion_id']).toBe('test-parsed-response-id') }) conditionalTest('responses parse with instructions parameter', async () => { @@ -759,6 +766,9 @@ describe('PostHogOpenAI - Jest test suite', () => { expect(properties['$ai_input_tokens']).toBe(25) expect(properties['$ai_output_tokens']).toBe(15) expect(properties['streamTest']).toBe(true) + // Verify completion metadata from streaming chunks + expect(properties['$ai_completion_id']).toBe('chatcmpl-test') + expect(properties['$ai_system_fingerprint']).toBe('fp_stream_test') }) conditionalTest('handles streaming with tool calls', async () => {