Skip to content

Commit 5a85b6c

Browse files
committed
enableTruncation for vercel
1 parent 5a7de44 commit 5a85b6c

File tree

7 files changed

+106
-16
lines changed

7 files changed

+106
-16
lines changed
Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
import * as Sentry from '@sentry/node';
2+
import { loggingTransport } from '@sentry-internal/node-integration-tests';
3+
4+
Sentry.init({
5+
dsn: 'https://public@dsn.ingest.sentry.io/1337',
6+
release: '1.0',
7+
tracesSampleRate: 1.0,
8+
sendDefaultPii: true,
9+
transport: loggingTransport,
10+
integrations: [
11+
Sentry.vercelAIIntegration({
12+
recordInputs: true,
13+
recordOutputs: true,
14+
enableTruncation: false,
15+
}),
16+
],
17+
});
Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
import * as Sentry from '@sentry/node';
2+
import { generateText } from 'ai';
3+
import { MockLanguageModelV1 } from 'ai/test';
4+
5+
async function run() {
6+
await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
7+
// Multiple messages with long content (would normally be truncated and popped to last message only)
8+
const longContent = 'A'.repeat(50_000);
9+
await generateText({
10+
experimental_telemetry: { isEnabled: true },
11+
model: new MockLanguageModelV1({
12+
doGenerate: async () => ({
13+
rawCall: { rawPrompt: null, rawSettings: {} },
14+
finishReason: 'stop',
15+
usage: { promptTokens: 10, completionTokens: 5 },
16+
text: 'Response',
17+
}),
18+
}),
19+
messages: [
20+
{ role: 'user', content: longContent },
21+
{ role: 'assistant', content: 'Some reply' },
22+
{ role: 'user', content: 'Follow-up question' },
23+
],
24+
});
25+
});
26+
}
27+
28+
run();

dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -950,4 +950,37 @@ describe('Vercel AI integration', () => {
950950
.completed();
951951
});
952952
});
953+
954+
const longContent = 'A'.repeat(50_000);
955+
956+
createEsmAndCjsTests(
957+
__dirname,
958+
'scenario-no-truncation.mjs',
959+
'instrument-no-truncation.mjs',
960+
(createRunner, test) => {
961+
test('does not truncate input messages when enableTruncation is false', async () => {
962+
await createRunner()
963+
.expect({
964+
transaction: {
965+
transaction: 'main',
966+
spans: expect.arrayContaining([
967+
// Multiple messages should all be preserved (no popping to last message only)
968+
expect.objectContaining({
969+
data: expect.objectContaining({
970+
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: JSON.stringify([
971+
{ role: 'user', content: longContent },
972+
{ role: 'assistant', content: 'Some reply' },
973+
{ role: 'user', content: 'Follow-up question' },
974+
]),
975+
[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 3,
976+
}),
977+
}),
978+
]),
979+
},
980+
})
981+
.start()
982+
.completed();
983+
});
984+
},
985+
);
953986
});

packages/core/src/tracing/vercel-ai/index.ts

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ function mapVercelAiOperationName(operationName: string): string {
9494
* Post-process spans emitted by the Vercel AI SDK.
9595
* This is supposed to be used in `client.on('spanStart', ...)
9696
*/
97-
function onVercelAiSpanStart(span: Span): void {
97+
function onVercelAiSpanStart(span: Span, enableTruncation: boolean): void {
9898
const { data: attributes, description: name } = spanToJSON(span);
9999

100100
if (!name) {
@@ -114,7 +114,7 @@ function onVercelAiSpanStart(span: Span): void {
114114
return;
115115
}
116116

117-
processGenerateSpan(span, name, attributes);
117+
processGenerateSpan(span, name, attributes, enableTruncation);
118118
}
119119

120120
function vercelAiEventProcessor(event: Event): Event {
@@ -396,7 +396,7 @@ function processToolCallSpan(span: Span, attributes: SpanAttributes): void {
396396
}
397397
}
398398

399-
function processGenerateSpan(span: Span, name: string, attributes: SpanAttributes): void {
399+
function processGenerateSpan(span: Span, name: string, attributes: SpanAttributes, enableTruncation: boolean): void {
400400
span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN, 'auto.vercelai.otel');
401401

402402
const nameWthoutAi = name.replace('ai.', '');
@@ -408,7 +408,7 @@ function processGenerateSpan(span: Span, name: string, attributes: SpanAttribute
408408
span.setAttribute('gen_ai.function_id', functionId);
409409
}
410410

411-
requestMessagesFromPrompt(span, attributes);
411+
requestMessagesFromPrompt(span, attributes, enableTruncation);
412412

413413
if (attributes[AI_MODEL_ID_ATTRIBUTE] && !attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]) {
414414
span.setAttribute(GEN_AI_RESPONSE_MODEL_ATTRIBUTE, attributes[AI_MODEL_ID_ATTRIBUTE]);
@@ -444,8 +444,9 @@ function processGenerateSpan(span: Span, name: string, attributes: SpanAttribute
444444
/**
445445
* Add event processors to the given client to process Vercel AI spans.
446446
*/
447-
export function addVercelAiProcessors(client: Client): void {
448-
client.on('spanStart', onVercelAiSpanStart);
447+
export function addVercelAiProcessors(client: Client, options?: { enableTruncation?: boolean }): void {
448+
const enableTruncation = options?.enableTruncation ?? true;
449+
client.on('spanStart', span => onVercelAiSpanStart(span, enableTruncation));
449450
// Note: We cannot do this on `spanEnd`, because the span cannot be mutated anymore at this point
450451
client.addEventProcessor(Object.assign(vercelAiEventProcessor, { id: 'VercelAiEventProcessor' }));
451452
}

packages/core/src/tracing/vercel-ai/utils.ts

Lines changed: 12 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ import {
1616
GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE,
1717
GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE,
1818
} from '../ai/gen-ai-attributes';
19-
import { extractSystemInstructions, getTruncatedJsonString } from '../ai/utils';
19+
import { extractSystemInstructions, getJsonString, getTruncatedJsonString } from '../ai/utils';
2020
import { toolCallSpanContextMap } from './constants';
2121
import type { TokenSummary, ToolCallSpanContext } from './types';
2222
import { AI_PROMPT_ATTRIBUTE, AI_PROMPT_MESSAGES_ATTRIBUTE } from './vercel-ai-attributes';
@@ -227,7 +227,7 @@ export function convertUserInputToMessagesFormat(userInput: string): { role: str
227227
* Generate a request.messages JSON array from the prompt field in the
228228
* invoke_agent op
229229
*/
230-
export function requestMessagesFromPrompt(span: Span, attributes: SpanAttributes): void {
230+
export function requestMessagesFromPrompt(span: Span, attributes: SpanAttributes, enableTruncation: boolean): void {
231231
if (
232232
typeof attributes[AI_PROMPT_ATTRIBUTE] === 'string' &&
233233
!attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE] &&
@@ -247,11 +247,13 @@ export function requestMessagesFromPrompt(span: Span, attributes: SpanAttributes
247247
}
248248

249249
const filteredLength = Array.isArray(filteredMessages) ? filteredMessages.length : 0;
250-
const truncatedMessages = getTruncatedJsonString(filteredMessages);
250+
const messagesJson = enableTruncation
251+
? getTruncatedJsonString(filteredMessages)
252+
: getJsonString(filteredMessages);
251253

252254
span.setAttributes({
253-
[AI_PROMPT_ATTRIBUTE]: truncatedMessages,
254-
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: truncatedMessages,
255+
[AI_PROMPT_ATTRIBUTE]: messagesJson,
256+
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: messagesJson,
255257
[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: filteredLength,
256258
});
257259
}
@@ -268,11 +270,13 @@ export function requestMessagesFromPrompt(span: Span, attributes: SpanAttributes
268270
}
269271

270272
const filteredLength = Array.isArray(filteredMessages) ? filteredMessages.length : 0;
271-
const truncatedMessages = getTruncatedJsonString(filteredMessages);
273+
const messagesJson = enableTruncation
274+
? getTruncatedJsonString(filteredMessages)
275+
: getJsonString(filteredMessages);
272276

273277
span.setAttributes({
274-
[AI_PROMPT_MESSAGES_ATTRIBUTE]: truncatedMessages,
275-
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: truncatedMessages,
278+
[AI_PROMPT_MESSAGES_ATTRIBUTE]: messagesJson,
279+
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: messagesJson,
276280
[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: filteredLength,
277281
});
278282
}

packages/node/src/integrations/tracing/vercelai/index.ts

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,10 +30,11 @@ const _vercelAIIntegration = ((options: VercelAiOptions = {}) => {
3030
// Note that this can only be detected if the 'Modules' integration is available, and running in CJS mode
3131
const shouldForce = options.force ?? shouldForceIntegration(client);
3232

33+
const processorOptions = { enableTruncation: options.enableTruncation };
3334
if (shouldForce) {
34-
addVercelAiProcessors(client);
35+
addVercelAiProcessors(client, processorOptions);
3536
} else {
36-
instrumentation?.callWhenPatched(() => addVercelAiProcessors(client));
37+
instrumentation?.callWhenPatched(() => addVercelAiProcessors(client, processorOptions));
3738
}
3839
},
3940
};

packages/node/src/integrations/tracing/vercelai/types.ts

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,12 @@ export interface VercelAiOptions {
6262
* If you want to register the span processors even when the ai package usage cannot be detected, you can set `force` to `true`.
6363
*/
6464
force?: boolean;
65+
66+
/**
67+
* Enable or disable truncation of recorded input messages.
68+
* Defaults to `true`.
69+
*/
70+
enableTruncation?: boolean;
6571
}
6672

6773
export interface VercelAiIntegration extends Integration {

0 commit comments

Comments
 (0)