Skip to content

Commit aec3a56

Browse files
committed
Dome fixes
1 parent 6842a50 commit aec3a56

File tree

2 files changed

+54
-0
lines changed

2 files changed

+54
-0
lines changed

packages/instrumentation-llamaindex/src/custom-llm-instrumentation.ts

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ import {
2424
ATTR_GEN_AI_REQUEST_MODEL,
2525
ATTR_GEN_AI_REQUEST_TOP_P,
2626
ATTR_GEN_AI_RESPONSE_FINISH_REASONS,
27+
ATTR_GEN_AI_RESPONSE_ID,
2728
ATTR_GEN_AI_RESPONSE_MODEL,
2829
ATTR_GEN_AI_USAGE_INPUT_TOKENS,
2930
ATTR_GEN_AI_USAGE_OUTPUT_TOKENS,
@@ -48,6 +49,8 @@ type AsyncResponseType =
4849

4950
const classNameToProviderName: Record<string, string> = {
5051
OpenAI: GEN_AI_PROVIDER_NAME_VALUE_OPENAI,
52+
// Future providers: Anthropic: "anthropic", Gemini: "gcp.gemini", etc.
53+
// See well-known values: https://opentelemetry.io/docs/specs/semconv/registry/attributes/gen-ai/#gen-ai-provider-name
5154
};
5255

5356
export const openAIFinishReasonMap: Record<string, string> = {
@@ -151,6 +154,9 @@ export class CustomLLMInstrumentation {
151154

152155
try {
153156
const raw = (result as any).raw;
157+
if (raw?.id) {
158+
span.setAttribute(ATTR_GEN_AI_RESPONSE_ID, raw.id);
159+
}
154160
const finishReason: string | null =
155161
raw?.choices?.[0]?.finish_reason ?? null;
156162

@@ -219,6 +225,9 @@ export class CustomLLMInstrumentation {
219225
// response — available when stream_options: { include_usage: true }
220226
// is set on the LLM (OpenAI sends usage in the final streaming chunk).
221227
const lastRaw = lastChunk?.raw as any;
228+
if (lastRaw?.id) {
229+
span.setAttribute(ATTR_GEN_AI_RESPONSE_ID, lastRaw.id);
230+
}
222231
const finishReason: string | null =
223232
lastRaw?.choices?.[0]?.finish_reason ?? null;
224233
const usage = lastRaw?.usage ?? null;
@@ -244,6 +253,16 @@ export class CustomLLMInstrumentation {
244253
);
245254
}
246255

256+
if (!finishReason && !usage) {
257+
this.diag.debug(
258+
"LlamaIndex streaming: no finish_reason or usage in last chunk. " +
259+
"Set stream_options: { include_usage: true } on the LLM to capture token usage.",
260+
);
261+
}
262+
263+
// Note: streaming only produces text parts — LlamaIndex's streaming interface
264+
// yields text deltas only, not full content blocks. Tool calls or multi-modal
265+
// content are collapsed into a single text string by llmGeneratorWrapper.
247266
if (shouldSendPrompts(this.config())) {
248267
span.setAttribute(
249268
ATTR_GEN_AI_OUTPUT_MESSAGES,

packages/instrumentation-llamaindex/test/instrumentation.test.ts

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@ import {
3131
ATTR_GEN_AI_PROVIDER_NAME,
3232
ATTR_GEN_AI_REQUEST_MODEL,
3333
ATTR_GEN_AI_RESPONSE_FINISH_REASONS,
34+
ATTR_GEN_AI_RESPONSE_ID,
3435
ATTR_GEN_AI_RESPONSE_MODEL,
3536
ATTR_GEN_AI_USAGE_INPUT_TOKENS,
3637
ATTR_GEN_AI_USAGE_OUTPUT_TOKENS,
@@ -235,6 +236,7 @@ function makeMockChat(options: {
235236
return {
236237
message: { role: "assistant", content: responseContent },
237238
raw: {
239+
id: "chatcmpl-test123",
238240
choices: [{ finish_reason: finishReason }],
239241
usage: {
240242
prompt_tokens: promptTokens,
@@ -264,6 +266,7 @@ function makeMockChatWithStreamUsage(options: {
264266
yield {
265267
delta: responseContent,
266268
raw: {
269+
id: "chatcmpl-test123",
267270
choices: [{ finish_reason: finishReason }],
268271
usage: {
269272
prompt_tokens: promptTokens,
@@ -436,6 +439,38 @@ describe("CustomLLMInstrumentation — OTel 1.40 attributes", () => {
436439
);
437440
});
438441

442+
it("sets gen_ai.response.id", async () => {
443+
const instr = makeInstrumentation();
444+
const chat = makeMockChat({});
445+
const wrapped = instr.chatWrapper({ className: "OpenAI" })(chat as any);
446+
await wrapped.call(
447+
{ metadata: mockLLMMeta },
448+
{ messages: [{ role: "user", content: "hi" }] },
449+
);
450+
451+
const span = otelExporter.getFinishedSpans()[0];
452+
assert.strictEqual(
453+
span.attributes[ATTR_GEN_AI_RESPONSE_ID],
454+
"chatcmpl-test123",
455+
);
456+
});
457+
458+
it("unknown finish_reason passes through as-is to span attribute", async () => {
459+
const instr = makeInstrumentation();
460+
const chat = makeMockChat({ finishReason: "some_future_reason" });
461+
const wrapped = instr.chatWrapper({ className: "OpenAI" })(chat as any);
462+
await wrapped.call(
463+
{ metadata: mockLLMMeta },
464+
{ messages: [{ role: "user", content: "hi" }] },
465+
);
466+
467+
const span = otelExporter.getFinishedSpans()[0];
468+
assert.deepStrictEqual(
469+
span.attributes[ATTR_GEN_AI_RESPONSE_FINISH_REASONS],
470+
["some_future_reason"],
471+
);
472+
});
473+
439474
it("sets token usage attributes", async () => {
440475
const instr = makeInstrumentation();
441476
const chat = makeMockChat({ promptTokens: 10, completionTokens: 5 });

0 commit comments

Comments
 (0)