-
-
Notifications
You must be signed in to change notification settings - Fork 1.8k
Expand file tree
/
Copy pathscenario-conversation.mjs
More file actions
95 lines (82 loc) · 2.46 KB
/
scenario-conversation.mjs
File metadata and controls
95 lines (82 loc) · 2.46 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
import * as Sentry from '@sentry/node';
import express from 'express';
import OpenAI from 'openai';
function startMockServer() {
const app = express();
app.use(express.json());
// Conversations API endpoint - create conversation
app.post('/openai/conversations', (req, res) => {
res.send({
id: 'conv_689667905b048191b4740501625afd940c7533ace33a2dab',
object: 'conversation',
created_at: 1704067200,
metadata: {},
});
});
// Responses API endpoint - with conversation support
app.post('/openai/responses', (req, res) => {
const { model, conversation, previous_response_id } = req.body;
res.send({
id: 'resp_mock_conv_123',
object: 'response',
created_at: 1704067210,
model: model,
output: [
{
type: 'message',
id: 'msg_mock_output_1',
status: 'completed',
role: 'assistant',
content: [
{
type: 'output_text',
text: `Response with conversation: ${conversation || 'none'}, previous_response_id: ${previous_response_id || 'none'}`,
annotations: [],
},
],
},
],
output_text: `Response with conversation: ${conversation || 'none'}`,
status: 'completed',
usage: {
input_tokens: 10,
output_tokens: 15,
total_tokens: 25,
},
});
});
return new Promise(resolve => {
const server = app.listen(0, () => {
resolve(server);
});
});
}
async function run() {
const server = await startMockServer();
await Sentry.startSpan({ op: 'function', name: 'conversation-test' }, async () => {
const client = new OpenAI({
baseURL: `http://localhost:${server.address().port}/openai`,
apiKey: 'mock-api-key',
});
// Test 1: Create a conversation
const conversation = await client.conversations.create();
// Test 2: Use conversation ID in responses.create
await client.responses.create({
model: 'gpt-4',
input: 'Hello, this is a conversation test',
conversation: conversation.id,
});
// Test 3: Use previous_response_id for chaining (without formal conversation)
const firstResponse = await client.responses.create({
model: 'gpt-4',
input: 'Tell me a joke',
});
await client.responses.create({
model: 'gpt-4',
input: 'Explain why that is funny',
previous_response_id: firstResponse.id,
});
});
server.close();
}
run();