Skip to content

Commit ec2411a

Browse files
committed
Re recorderd ai sdk tests + added the ai sdk example
1 parent d1fbe6a commit ec2411a

28 files changed

Lines changed: 1188 additions & 1380 deletions

File tree

  • packages
    • sample-app/src
    • traceloop-sdk
      • recordings
        • Attachment-API-Integration-Tests_3751859535/Dataset-with-File-Column_1881713521/should-create-a-dataset-with-file-column-type_3148476545
        • Dataset-API-Comprehensive-Tests_1618738334
        • Test-AI-SDK-Agent-Integration-with-Recording_2039949225
          • should-preserve-original-AI-SDK-span-name-when-no-agent-metadata-is-provided_1735519430
          • should-propagate-agent-name-to-tool-call-spans_3577231859
          • should-properly-scope-agent-names-in-nested-agent-scenarios_1670012146
          • should-use-agent-name-for-generateObject-with-agent-metadata_1744675110
          • should-use-agent-name-for-streamText-with-agent-metadata_4019571713
        • Test-AI-SDK-Integration-with-Recording_156038438
          • should-capture-OpenAI-provider-spans-correctly-with-recording_3593617962
          • should-capture-and-transform-OpenAI-cache-tokens-from-providerMetadata_2332139343
          • should-set-LLM_INPUT_MESSAGES-and-LLM_OUTPUT_MESSAGES-attributes-for-chat-completions_99541399
        • Test-Agent-Decorator_2969879889
          • should-create-spans-for-agents-using-decoration-syntax_1932039671
          • should-create-spans-for-agents-using-withAgent-syntax_3895564654
          • should-propagate-agent-name-to-manual-LLM-instrumentation_2332462647
        • Test-SDK-Decorators_847855269
          • should-create-spans-for-manual-LLM-instrumentation_981493419
          • should-create-spans-for-workflows-using-decoration-syntax-method-variant_2462514347
          • should-create-spans-for-workflows-using-decoration-syntax_3330947443
          • should-create-spans-for-workflows-using-withWorkflow-syntax_3788948678
          • should-create-workflow-and-tasks-spans-with-chained-entity-names_971051426
          • should-fix-Vercel-AI-spans-to-match-OpenLLMetry-format_2061519753
          • should-not-create-spans-if-suppressed_3154458667
          • should-not-log-prompts-if-traceContent-is-disabled_2300077433
          • should-not-mix-association-properties-for-traces-that-run-in-parallel_4012223284
      • test/ai-sdk
Lines changed: 295 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,295 @@
1+
/**
2+
* Vercel AI SDK - OTel 1.40 Migration Validation Sample
3+
*
4+
* Covers all major use cases to validate span shapes after the OTel 1.40 migration:
5+
* 1. generateText - OpenAI + Anthropic (basic chat)
6+
* 2. streamText - OpenAI streaming
7+
* 3. generateObject - structured output
8+
* 4. generateText with tools + multi-step (agentic loop)
9+
* 5. Agent span naming via ai.telemetry.metadata.agent
10+
* 6. Conversation ID propagation
11+
*
12+
* Run via run.sh (sets OPENAI_API_KEY, ANTHROPIC_API_KEY, TRACELOOP_API_KEY).
13+
*
14+
* Key attributes to verify in Traceloop dashboard:
15+
* - gen_ai.provider.name → "openai" / "anthropic" (OTel 1.40 lowercase)
16+
* - gen_ai.request.model → model name
17+
* - gen_ai.operation.name → "chat" / "execute_tool"
18+
* - gen_ai.input.messages → [{role, parts:[{type:"text",content:...}]}]
19+
* - gen_ai.output.messages → [{role,finish_reason,parts:[...]}]
20+
* - gen_ai.usage.input_tokens / output_tokens
21+
* - gen_ai.response.finish_reasons → ["stop"] / ["tool_call"]
22+
* - gen_ai.tool.definitions → JSON array of tool objects (source format)
23+
* - Span name format: "chat {model}" / "execute_tool {toolName}"
24+
*/
25+
26+
import * as traceloop from "@traceloop/node-server-sdk";
27+
import { openai } from "@ai-sdk/openai";
28+
import { anthropic } from "@ai-sdk/anthropic";
29+
import {
30+
generateText,
31+
streamText,
32+
generateObject,
33+
tool,
34+
stepCountIs,
35+
} from "ai";
36+
import { z } from "zod";
37+
38+
traceloop.initialize({
39+
appName: "sample_vercel_ai_otel140",
40+
disableBatch: true,
41+
});
42+
43+
// ─── Shared tools ──────────────────────────────────────────────────────────
44+
45+
const getWeather = tool({
46+
description: "Get current weather for a location",
47+
inputSchema: z.object({
48+
location: z.string().describe("City name"),
49+
units: z.enum(["celsius", "fahrenheit"]).optional().default("celsius"),
50+
}),
51+
execute: async ({ location, units }) => {
52+
console.log(` [tool] getWeather(${location}, ${units})`);
53+
return {
54+
location,
55+
temperature: units === "celsius" ? 22 : 72,
56+
condition: "Sunny",
57+
humidity: 55,
58+
};
59+
},
60+
});
61+
62+
const searchWeb = tool({
63+
description: "Search the web for information",
64+
inputSchema: z.object({
65+
query: z.string().describe("Search query"),
66+
}),
67+
execute: async ({ query }) => {
68+
console.log(` [tool] searchWeb("${query}")`);
69+
return {
70+
results: [
71+
{ title: `Result 1 for: ${query}`, url: "https://example.com/1" },
72+
{ title: `Result 2 for: ${query}`, url: "https://example.com/2" },
73+
],
74+
};
75+
},
76+
});
77+
78+
// ─── 1. Basic generateText — OpenAI ────────────────────────────────────────
79+
80+
async function testGenerateTextOpenAI() {
81+
console.log("\n▶ [1] generateText — OpenAI");
82+
83+
const result = await traceloop.withWorkflow(
84+
{ name: "openai_basic_chat" },
85+
async () =>
86+
generateText({
87+
model: openai("gpt-4o-mini"),
88+
messages: [
89+
{ role: "system", content: "You are a concise assistant." },
90+
{ role: "user", content: "What is the capital of France?" },
91+
],
92+
experimental_telemetry: {
93+
isEnabled: true,
94+
metadata: { scenario: "basic_chat", provider: "openai" },
95+
},
96+
}),
97+
);
98+
99+
console.log(` Response: ${result.text}`);
100+
console.log(
101+
` Tokens: ${result.usage.promptTokens} in / ${result.usage.completionTokens} out`,
102+
);
103+
}
104+
105+
// ─── 2. Basic generateText — Anthropic ─────────────────────────────────────
106+
107+
async function testGenerateTextAnthropic() {
108+
console.log("\n▶ [2] generateText — Anthropic");
109+
110+
const result = await traceloop.withWorkflow(
111+
{ name: "anthropic_basic_chat" },
112+
async () =>
113+
generateText({
114+
model: anthropic("claude-haiku-4-5"),
115+
messages: [
116+
{ role: "user", content: "What is the capital of Germany?" },
117+
],
118+
experimental_telemetry: {
119+
isEnabled: true,
120+
metadata: { scenario: "basic_chat", provider: "anthropic" },
121+
},
122+
}),
123+
);
124+
125+
console.log(` Response: ${result.text}`);
126+
console.log(
127+
` Tokens: ${result.usage.promptTokens} in / ${result.usage.completionTokens} out`,
128+
);
129+
}
130+
131+
// ─── 3. streamText — OpenAI ─────────────────────────────────────────────────
132+
133+
async function testStreamText() {
134+
console.log("\n▶ [3] streamText — OpenAI");
135+
136+
const result = await traceloop.withWorkflow(
137+
{ name: "openai_stream" },
138+
async () => {
139+
const stream = streamText({
140+
model: openai("gpt-4o-mini"),
141+
prompt: "Count from 1 to 5, one number per line.",
142+
experimental_telemetry: {
143+
isEnabled: true,
144+
metadata: { scenario: "streaming" },
145+
},
146+
});
147+
148+
let fullText = "";
149+
process.stdout.write(" Stream: ");
150+
for await (const chunk of stream.textStream) {
151+
process.stdout.write(chunk);
152+
fullText += chunk;
153+
}
154+
console.log();
155+
return fullText;
156+
},
157+
);
158+
159+
console.log(` Streamed ${result.length} chars`);
160+
}
161+
162+
// ─── 4. generateObject — structured output ──────────────────────────────────
163+
164+
async function testGenerateObject() {
165+
console.log("\n▶ [4] generateObject — OpenAI structured output");
166+
167+
const result = await traceloop.withWorkflow(
168+
{ name: "openai_structured_output" },
169+
async () =>
170+
generateObject({
171+
model: openai("gpt-4o-mini"),
172+
schema: z.object({
173+
city: z.string(),
174+
country: z.string(),
175+
population: z.number(),
176+
famousFor: z.array(z.string()).max(3),
177+
}),
178+
prompt: "Give me facts about Paris, France.",
179+
experimental_telemetry: {
180+
isEnabled: true,
181+
metadata: { scenario: "structured_output" },
182+
},
183+
}),
184+
);
185+
186+
console.log(` Object:`, result.object);
187+
}
188+
189+
// ─── 5. generateText with tools — multi-step agent loop ─────────────────────
190+
191+
async function testToolsOpenAI() {
192+
console.log("\n▶ [5] generateText + tools (multi-step) — OpenAI");
193+
194+
await traceloop.withAgent({ name: "travel_researcher" }, async () =>
195+
generateText({
196+
model: openai("gpt-4o-mini"),
197+
prompt:
198+
"What's the weather in Tokyo right now? Also search for 'best things to do in Tokyo'.",
199+
tools: { getWeather, searchWeb },
200+
stopWhen: stepCountIs(4),
201+
experimental_telemetry: {
202+
isEnabled: true,
203+
metadata: {
204+
agent: "travel_researcher",
205+
scenario: "multi_step_tools",
206+
},
207+
},
208+
}),
209+
);
210+
211+
console.log(" Agent completed tool-use loop");
212+
}
213+
214+
// ─── 6. generateText with tools — Anthropic ─────────────────────────────────
215+
216+
async function testToolsAnthropic() {
217+
console.log("\n▶ [6] generateText + tools — Anthropic");
218+
219+
await traceloop.withAgent({ name: "weather_agent_anthropic" }, async () =>
220+
generateText({
221+
model: anthropic("claude-haiku-4-5"),
222+
prompt: "What is the weather like in London?",
223+
tools: { getWeather },
224+
stopWhen: stepCountIs(3),
225+
experimental_telemetry: {
226+
isEnabled: true,
227+
metadata: {
228+
agent: "weather_agent_anthropic",
229+
scenario: "tool_call_anthropic",
230+
},
231+
},
232+
}),
233+
);
234+
235+
console.log(" Anthropic agent completed");
236+
}
237+
238+
// ─── 7. Conversation ID propagation ─────────────────────────────────────────
239+
240+
async function testConversationId() {
241+
console.log("\n▶ [7] Conversation ID — OpenAI multi-turn");
242+
243+
const conversationId = `conv-${Date.now()}`;
244+
245+
await traceloop.withWorkflow({ name: "multi_turn_chat" }, async () => {
246+
const turn1 = await generateText({
247+
model: openai("gpt-4o-mini"),
248+
messages: [{ role: "user", content: "My name is Alice." }],
249+
experimental_telemetry: {
250+
isEnabled: true,
251+
metadata: { conversationId, turn: "1" },
252+
},
253+
});
254+
console.log(` Turn 1: ${turn1.text}`);
255+
256+
const turn2 = await generateText({
257+
model: openai("gpt-4o-mini"),
258+
messages: [
259+
{ role: "user", content: "My name is Alice." },
260+
{ role: "assistant", content: turn1.text },
261+
{ role: "user", content: "What is my name?" },
262+
],
263+
experimental_telemetry: {
264+
isEnabled: true,
265+
metadata: { conversationId, turn: "2" },
266+
},
267+
});
268+
console.log(` Turn 2: ${turn2.text}`);
269+
});
270+
}
271+
272+
// ─── main ────────────────────────────────────────────────────────────────────
273+
274+
async function main() {
275+
console.log("=".repeat(60));
276+
console.log(" Vercel AI SDK — OTel 1.40 Migration Validation");
277+
console.log("=".repeat(60));
278+
279+
await testGenerateTextOpenAI();
280+
await testGenerateTextAnthropic();
281+
await testStreamText();
282+
await testGenerateObject();
283+
await testToolsOpenAI();
284+
await testToolsAnthropic();
285+
await testConversationId();
286+
287+
console.log("\n" + "=".repeat(60));
288+
console.log(" All scenarios complete — check Traceloop dashboard");
289+
console.log("=".repeat(60));
290+
}
291+
292+
main().catch((err) => {
293+
console.error("Fatal error:", err);
294+
process.exit(1);
295+
});

0 commit comments

Comments
 (0)