Skip to content
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
import * as Sentry from '@sentry/node';
import express from 'express';
import OpenAI from 'openai';

function startMockServer() {
const app = express();
app.use(express.json());

// Chat completions endpoint
app.post('/openai/chat/completions', (req, res) => {
const { model } = req.body;

res.send({
id: 'chatcmpl-mock123',
object: 'chat.completion',
created: 1677652288,
model: model,
choices: [
{
index: 0,
message: {
role: 'assistant',
content: 'Mock response from OpenAI',
},
finish_reason: 'stop',
},
],
usage: {
prompt_tokens: 10,
completion_tokens: 15,
total_tokens: 25,
},
});
});

return new Promise(resolve => {
const server = app.listen(0, () => {
resolve(server);
});
});
}

async function run() {
const server = await startMockServer();

// Test: Multiple chat completions in the same conversation with manual conversation ID
await Sentry.startSpan({ op: 'function', name: 'chat-with-manual-conversation-id' }, async () => {
const client = new OpenAI({
baseURL: `http://localhost:${server.address().port}/openai`,
apiKey: 'mock-api-key',
});

// Set conversation ID manually using Sentry API
Sentry.setConversationId('user_chat_session_abc123');

// First message in the conversation
await client.chat.completions.create({
model: 'gpt-4',
messages: [{ role: 'user', content: 'What is the capital of France?' }],
});

// Second message in the same conversation
await client.chat.completions.create({
model: 'gpt-4',
messages: [{ role: 'user', content: 'Tell me more about it' }],
});

// Third message in the same conversation
await client.chat.completions.create({
model: 'gpt-4',
messages: [{ role: 'user', content: 'What is its population?' }],
});
});

server.close();
await Sentry.flush(2000);
}

run();
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
import * as Sentry from '@sentry/node';
import express from 'express';
import OpenAI from 'openai';

function startMockServer() {
const app = express();
app.use(express.json());

// Chat completions endpoint
app.post('/openai/chat/completions', (req, res) => {
const { model } = req.body;

res.send({
id: 'chatcmpl-mock123',
object: 'chat.completion',
created: 1677652288,
model: model,
choices: [
{
index: 0,
message: {
role: 'assistant',
content: 'Mock response from OpenAI',
},
finish_reason: 'stop',
},
],
usage: {
prompt_tokens: 10,
completion_tokens: 15,
total_tokens: 25,
},
});
});

return new Promise(resolve => {
const server = app.listen(0, () => {
resolve(server);
});
});
}

async function run() {
const server = await startMockServer();
const client = new OpenAI({
baseURL: `http://localhost:${server.address().port}/openai`,
apiKey: 'mock-api-key',
});

// First request/conversation scope
await Sentry.withScope(async scope => {
// Set conversation ID for this request scope BEFORE starting the span
scope.setConversationId('conv_user1_session_abc');

await Sentry.startSpan({ op: 'http.server', name: 'GET /chat/conversation-1' }, async () => {
// First message in conversation 1
await client.chat.completions.create({
model: 'gpt-4',
messages: [{ role: 'user', content: 'Hello from conversation 1' }],
});

// Second message in conversation 1
await client.chat.completions.create({
model: 'gpt-4',
messages: [{ role: 'user', content: 'Follow-up in conversation 1' }],
});
});
});

server.close();
await Sentry.flush(2000);
}

run();
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
import * as Sentry from '@sentry/node';
import express from 'express';
import OpenAI from 'openai';

function startMockServer() {
const app = express();
app.use(express.json());

// Chat completions endpoint
app.post('/openai/chat/completions', (req, res) => {
const { model } = req.body;

res.send({
id: 'chatcmpl-mock123',
object: 'chat.completion',
created: 1677652288,
model: model,
choices: [
{
index: 0,
message: {
role: 'assistant',
content: 'Mock response from OpenAI',
},
finish_reason: 'stop',
},
],
usage: {
prompt_tokens: 10,
completion_tokens: 15,
total_tokens: 25,
},
});
});

return new Promise(resolve => {
const server = app.listen(0, () => {
resolve(server);
});
});
}

async function run() {
const server = await startMockServer();
const client = new OpenAI({
baseURL: `http://localhost:${server.address().port}/openai`,
apiKey: 'mock-api-key',
});

// Second request/conversation scope (completely separate)
await Sentry.withScope(async scope => {
// Set different conversation ID for this request scope BEFORE starting the span
scope.setConversationId('conv_user2_session_xyz');

await Sentry.startSpan({ op: 'http.server', name: 'GET /chat/conversation-2' }, async () => {
// First message in conversation 2
await client.chat.completions.create({
model: 'gpt-4',
messages: [{ role: 'user', content: 'Hello from conversation 2' }],
});

// Second message in conversation 2
await client.chat.completions.create({
model: 'gpt-4',
messages: [{ role: 'user', content: 'Follow-up in conversation 2' }],
});
});
});

server.close();
await Sentry.flush(2000);
}

run();
140 changes: 140 additions & 0 deletions dev-packages/node-integration-tests/suites/tracing/openai/test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -720,4 +720,144 @@ describe('OpenAI integration', () => {
.completed();
});
});

// Test for manual conversation ID setting using setConversationId()
const EXPECTED_TRANSACTION_MANUAL_CONVERSATION_ID = {
transaction: 'chat-with-manual-conversation-id',
spans: expect.arrayContaining([
// All three chat completion spans should have the same manually-set conversation ID
expect.objectContaining({
data: expect.objectContaining({
'gen_ai.conversation.id': 'user_chat_session_abc123',
'gen_ai.system': 'openai',
'gen_ai.request.model': 'gpt-4',
'gen_ai.operation.name': 'chat',
'sentry.op': 'gen_ai.chat',
}),
description: 'chat gpt-4',
op: 'gen_ai.chat',
origin: 'auto.ai.openai',
status: 'ok',
}),
expect.objectContaining({
data: expect.objectContaining({
'gen_ai.conversation.id': 'user_chat_session_abc123',
'gen_ai.system': 'openai',
'gen_ai.request.model': 'gpt-4',
'gen_ai.operation.name': 'chat',
'sentry.op': 'gen_ai.chat',
}),
description: 'chat gpt-4',
op: 'gen_ai.chat',
origin: 'auto.ai.openai',
status: 'ok',
}),
expect.objectContaining({
data: expect.objectContaining({
'gen_ai.conversation.id': 'user_chat_session_abc123',
'gen_ai.system': 'openai',
'gen_ai.request.model': 'gpt-4',
'gen_ai.operation.name': 'chat',
'sentry.op': 'gen_ai.chat',
}),
description: 'chat gpt-4',
op: 'gen_ai.chat',
origin: 'auto.ai.openai',
status: 'ok',
}),
]),
};

createEsmAndCjsTests(__dirname, 'scenario-manual-conversation-id.mjs', 'instrument.mjs', (createRunner, test) => {
test('attaches manual conversation ID set via setConversationId() to all chat spans', async () => {
await createRunner()
.ignore('event')
.expect({ transaction: EXPECTED_TRANSACTION_MANUAL_CONVERSATION_ID })
.start()
.completed();
});
});

// Test for scope isolation - different scopes have different conversation IDs
const EXPECTED_TRANSACTION_CONVERSATION_1 = {
transaction: 'GET /chat/conversation-1',
spans: expect.arrayContaining([
// Both chat completion spans in conversation 1 should have conv_user1_session_abc
expect.objectContaining({
data: expect.objectContaining({
'gen_ai.conversation.id': 'conv_user1_session_abc',
'gen_ai.system': 'openai',
'gen_ai.request.model': 'gpt-4',
'sentry.op': 'gen_ai.chat',
}),
description: 'chat gpt-4',
op: 'gen_ai.chat',
origin: 'auto.ai.openai',
status: 'ok',
}),
expect.objectContaining({
data: expect.objectContaining({
'gen_ai.conversation.id': 'conv_user1_session_abc',
'gen_ai.system': 'openai',
'gen_ai.request.model': 'gpt-4',
'sentry.op': 'gen_ai.chat',
}),
description: 'chat gpt-4',
op: 'gen_ai.chat',
origin: 'auto.ai.openai',
status: 'ok',
}),
]),
};

const EXPECTED_TRANSACTION_CONVERSATION_2 = {
transaction: 'GET /chat/conversation-2',
spans: expect.arrayContaining([
// Both chat completion spans in conversation 2 should have conv_user2_session_xyz
expect.objectContaining({
data: expect.objectContaining({
'gen_ai.conversation.id': 'conv_user2_session_xyz',
'gen_ai.system': 'openai',
'gen_ai.request.model': 'gpt-4',
'sentry.op': 'gen_ai.chat',
}),
description: 'chat gpt-4',
op: 'gen_ai.chat',
origin: 'auto.ai.openai',
status: 'ok',
}),
expect.objectContaining({
data: expect.objectContaining({
'gen_ai.conversation.id': 'conv_user2_session_xyz',
'gen_ai.system': 'openai',
'gen_ai.request.model': 'gpt-4',
'sentry.op': 'gen_ai.chat',
}),
description: 'chat gpt-4',
op: 'gen_ai.chat',
origin: 'auto.ai.openai',
status: 'ok',
}),
]),
};

createEsmAndCjsTests(__dirname, 'scenario-separate-scope-1.mjs', 'instrument.mjs', (createRunner, test) => {
test('isolates conversation IDs across separate scopes - conversation 1', async () => {
await createRunner()
.ignore('event')
.expect({ transaction: EXPECTED_TRANSACTION_CONVERSATION_1 })
.start()
.completed();
});
});

createEsmAndCjsTests(__dirname, 'scenario-separate-scope-2.mjs', 'instrument.mjs', (createRunner, test) => {
test('isolates conversation IDs across separate scopes - conversation 2', async () => {
await createRunner()
.ignore('event')
.expect({ transaction: EXPECTED_TRANSACTION_CONVERSATION_2 })
.start()
.completed();
});
});
});
1 change: 1 addition & 0 deletions packages/astro/src/index.server.ts
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,7 @@ export {
SEMANTIC_ATTRIBUTE_SENTRY_SAMPLE_RATE,
SEMANTIC_ATTRIBUTE_SENTRY_SOURCE,
setContext,
setConversationId,
setCurrentClient,
setExtra,
setExtras,
Comment thread
RulaKhaled marked this conversation as resolved.
Expand Down
1 change: 1 addition & 0 deletions packages/aws-serverless/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ export {
Scope,
SDK_VERSION,
setContext,
setConversationId,
setExtra,
setExtras,
setTag,
Expand Down
1 change: 1 addition & 0 deletions packages/bun/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ export {
Scope,
SDK_VERSION,
setContext,
setConversationId,
setExtra,
setExtras,
setTag,
Expand Down
Loading
Loading