Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
import { OpenAI } from 'openai';

import { OpenAIAgentRunner } from '../src/OpenAIAgentRunner';

jest.mock('openai', () => ({
OpenAI: jest.fn().mockImplementation(() => ({
chat: {
completions: {
create: jest.fn(),
},
},
})),
}));

describe('OpenAIAgentRunner', () => {
let mockOpenAI: jest.Mocked<OpenAI>;

beforeEach(() => {
mockOpenAI = new OpenAI() as jest.Mocked<OpenAI>;
});

it('returns content with no toolCalls when the model does not invoke tools', async () => {
(mockOpenAI.chat.completions.create as jest.Mock).mockResolvedValue({
choices: [{ message: { content: 'Done', tool_calls: [] } }],
usage: { prompt_tokens: 8, completion_tokens: 4, total_tokens: 12 },
} as any);

const runner = new OpenAIAgentRunner(mockOpenAI, 'gpt-4o', {}, '', [], {});
const result = await runner.run([{ role: 'user', content: 'Say done' }]);

expect(result.content).toBe('Done');
expect(result.metrics.success).toBe(true);
expect(result.metrics.toolCalls).toBeUndefined();
expect(result.metrics.usage).toEqual({ total: 12, input: 8, output: 4 });
});

it('executes tools, populates toolCalls, and aggregates token usage across iterations', async () => {
const create = mockOpenAI.chat.completions.create as jest.Mock;
create
.mockResolvedValueOnce({
choices: [
{
message: {
content: null,
tool_calls: [
{
id: 'call_1',
function: { name: 'lookup', arguments: '{"id":42}' },
},
],
},
},
],
usage: { prompt_tokens: 10, completion_tokens: 4, total_tokens: 14 },
} as any)
.mockResolvedValueOnce({
choices: [{ message: { content: 'The answer is 42.', tool_calls: [] } }],
usage: { prompt_tokens: 6, completion_tokens: 8, total_tokens: 14 },
} as any);

const lookup = jest.fn().mockResolvedValue({ value: 42 });
const toolDefinitions = [
{
type: 'function',
function: { name: 'lookup', parameters: { type: 'object' } },
},
];
const runner = new OpenAIAgentRunner(
mockOpenAI,
'gpt-4o',
{},
'You are an expert.',
toolDefinitions,
{ lookup },
);

const result = await runner.run([{ role: 'user', content: 'Look up 42' }]);

expect(lookup).toHaveBeenCalledWith({ id: 42 });
expect(create).toHaveBeenCalledTimes(2);
expect(create.mock.calls[0][0].tools).toBe(toolDefinitions);
expect(create.mock.calls[0][0].messages[0]).toEqual({
role: 'system',
content: 'You are an expert.',
});
expect(result.content).toBe('The answer is 42.');
expect(result.metrics.toolCalls).toEqual(['lookup']);
expect(result.metrics.usage).toEqual({ total: 28, input: 16, output: 12 });
});

it('records the tool call and continues when a tool is missing from the registry', async () => {
const create = mockOpenAI.chat.completions.create as jest.Mock;
create
.mockResolvedValueOnce({
choices: [
{
message: {
content: null,
tool_calls: [{ id: 'call_x', function: { name: 'missing', arguments: '{}' } }],
},
},
],
usage: { prompt_tokens: 1, completion_tokens: 1, total_tokens: 2 },
} as any)
.mockResolvedValueOnce({
choices: [{ message: { content: 'fallback', tool_calls: [] } }],
usage: { prompt_tokens: 1, completion_tokens: 1, total_tokens: 2 },
} as any);

const runner = new OpenAIAgentRunner(mockOpenAI, 'gpt-4o', {}, '', [], {});
const result = await runner.run([{ role: 'user', content: 'go' }]);

expect(result.content).toBe('fallback');
expect(result.metrics.toolCalls).toEqual(['missing']);
});

it('returns an unsuccessful RunnerResult when the API call throws', async () => {
(mockOpenAI.chat.completions.create as jest.Mock).mockRejectedValue(new Error('boom'));

const runner = new OpenAIAgentRunner(mockOpenAI, 'gpt-4o', {}, '', [], {});
const result = await runner.run([{ role: 'user', content: 'Hi' }]);

expect(result.content).toBe('');
expect(result.metrics.success).toBe(false);
});
});
Original file line number Diff line number Diff line change
@@ -0,0 +1,123 @@
import { OpenAI } from 'openai';

import { OpenAIModelRunner } from '../src/OpenAIModelRunner';

jest.mock('openai', () => ({
OpenAI: jest.fn().mockImplementation(() => ({
chat: {
completions: {
create: jest.fn(),
},
},
})),
}));

describe('OpenAIModelRunner', () => {
let mockOpenAI: jest.Mocked<OpenAI>;
let runner: OpenAIModelRunner;

beforeEach(() => {
mockOpenAI = new OpenAI() as jest.Mocked<OpenAI>;
runner = new OpenAIModelRunner(mockOpenAI, 'gpt-3.5-turbo', {});
});

describe('run (chat completion)', () => {
it('returns a RunnerResult with content, metrics, and raw response', async () => {
const mockResponse = {
choices: [{ message: { content: 'Hello there!' } }],
usage: { prompt_tokens: 10, completion_tokens: 5, total_tokens: 15 },
};
(mockOpenAI.chat.completions.create as jest.Mock).mockResolvedValue(mockResponse as any);

const result = await runner.run([{ role: 'user', content: 'Hi' }]);

expect(mockOpenAI.chat.completions.create).toHaveBeenCalledWith({
model: 'gpt-3.5-turbo',
messages: [{ role: 'user', content: 'Hi' }],
});
expect(result.content).toBe('Hello there!');
expect(result.metrics).toEqual({
success: true,
usage: { total: 15, input: 10, output: 5 },
});
expect(result.raw).toBe(mockResponse);
expect(result.parsed).toBeUndefined();
});

it('marks the result unsuccessful when response has no content', async () => {
const mockResponse = { choices: [{ message: {} }] };
(mockOpenAI.chat.completions.create as jest.Mock).mockResolvedValue(mockResponse as any);

const result = await runner.run([{ role: 'user', content: 'Hi' }]);

expect(result.content).toBe('');
expect(result.metrics.success).toBe(false);
});

it('returns an unsuccessful RunnerResult when the API call throws', async () => {
(mockOpenAI.chat.completions.create as jest.Mock).mockRejectedValue(new Error('boom'));

const result = await runner.run([{ role: 'user', content: 'Hi' }]);

expect(result.content).toBe('');
expect(result.metrics.success).toBe(false);
expect(result.raw).toBeUndefined();
});
});

describe('run (structured output)', () => {
it('parses structured output and exposes it via parsed', async () => {
const mockResponse = {
choices: [{ message: { content: '{"name":"Ada","age":36}' } }],
usage: { prompt_tokens: 20, completion_tokens: 10, total_tokens: 30 },
};
(mockOpenAI.chat.completions.create as jest.Mock).mockResolvedValue(mockResponse as any);

const schema = {
type: 'object',
properties: { name: { type: 'string' }, age: { type: 'number' } },
required: ['name', 'age'],
};
const result = await runner.run(
[{ role: 'user', content: 'Tell me about a person' }],
schema,
);

expect(mockOpenAI.chat.completions.create).toHaveBeenCalledWith({
model: 'gpt-3.5-turbo',
messages: [{ role: 'user', content: 'Tell me about a person' }],
response_format: {
type: 'json_schema',
json_schema: {
name: 'structured_output',
schema,
strict: true,
},
},
});
expect(result.content).toBe('{"name":"Ada","age":36}');
expect(result.parsed).toEqual({ name: 'Ada', age: 36 });
expect(result.metrics.success).toBe(true);
});

it('marks the result unsuccessful when structured output is not valid JSON', async () => {
const mockResponse = {
choices: [{ message: { content: 'not json' } }],
usage: { prompt_tokens: 5, completion_tokens: 2, total_tokens: 7 },
};
(mockOpenAI.chat.completions.create as jest.Mock).mockResolvedValue(mockResponse as any);

const result = await runner.run([{ role: 'user', content: 'Hi' }], { type: 'object' });

expect(result.content).toBe('not json');
expect(result.parsed).toBeUndefined();
expect(result.metrics.success).toBe(false);
});
});

describe('getClient', () => {
it('returns the underlying OpenAI client', () => {
expect(runner.getClient()).toBe(mockOpenAI);
});
});
});
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
import { OpenAI } from 'openai';

import type { LDAIAgentConfig, LDAICompletionConfig } from '@launchdarkly/server-sdk-ai';

import { OpenAIAgentRunner } from '../src/OpenAIAgentRunner';
import { OpenAIModelRunner } from '../src/OpenAIModelRunner';
import { OpenAIRunnerFactory } from '../src/OpenAIRunnerFactory';

jest.mock('openai', () => ({
OpenAI: jest.fn().mockImplementation(() => ({
chat: { completions: { create: jest.fn() } },
})),
}));

describe('OpenAIRunnerFactory', () => {
let mockOpenAI: jest.Mocked<OpenAI>;
let factory: OpenAIRunnerFactory;

beforeEach(() => {
mockOpenAI = new OpenAI() as jest.Mocked<OpenAI>;
factory = new OpenAIRunnerFactory(mockOpenAI);
});

describe('createModel', () => {
it('builds an OpenAIModelRunner that shares the factory client', () => {
const config: LDAICompletionConfig = {
key: 'completion',
enabled: true,
model: { name: 'gpt-4o', parameters: { temperature: 0.5 } },
};

const runner = factory.createModel(config);

expect(runner).toBeInstanceOf(OpenAIModelRunner);
expect(runner.getClient()).toBe(mockOpenAI);
});

it('builds a model runner from a minimal config', () => {
const runner = factory.createModel({ key: 'completion', enabled: true });
expect(runner).toBeInstanceOf(OpenAIModelRunner);
});
});

describe('createAgent', () => {
it('builds an OpenAIAgentRunner without tools when none are configured', () => {
const config: LDAIAgentConfig = {
key: 'agent',
enabled: true,
model: { name: 'gpt-4o' },
instructions: 'be helpful',
};

const runner = factory.createAgent(config);

expect(runner).toBeInstanceOf(OpenAIAgentRunner);
});

it('extracts tool definitions from model.parameters.tools', () => {
const tools = [{ type: 'function', function: { name: 'lookup' } }];
const config: LDAIAgentConfig = {
key: 'agent',
enabled: true,
model: { name: 'gpt-4o', parameters: { tools, temperature: 0.7 } },
instructions: 'be helpful',
};

const runner = factory.createAgent(config, { lookup: () => 'ok' });

expect(runner).toBeInstanceOf(OpenAIAgentRunner);
});
});

describe('getClient', () => {
it('returns the underlying OpenAI client', () => {
expect(factory.getClient()).toBe(mockOpenAI);
});
});

describe('create', () => {
it('creates an OpenAIRunnerFactory instance', async () => {
const f = await OpenAIRunnerFactory.create();
expect(f).toBeInstanceOf(OpenAIRunnerFactory);
expect(f.getClient()).toBeDefined();
});
});
});
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
import {
convertMessagesToOpenAI,
getAIMetricsFromResponse,
getAIUsageFromResponse,
} from '../src/openaiHelper';

describe('convertMessagesToOpenAI', () => {
it('converts LDMessages to OpenAI message dicts preserving role and content', () => {
const messages = convertMessagesToOpenAI([
{ role: 'system', content: 'You are X' },
{ role: 'user', content: 'Hi' },
{ role: 'assistant', content: 'Hello' },
]);

expect(messages).toEqual([
{ role: 'system', content: 'You are X' },
{ role: 'user', content: 'Hi' },
{ role: 'assistant', content: 'Hello' },
]);
});
});

describe('getAIUsageFromResponse', () => {
it('returns undefined when usage is missing', () => {
expect(getAIUsageFromResponse({})).toBeUndefined();
});

it('maps OpenAI prompt/completion/total token fields to LDTokenUsage', () => {
const usage = getAIUsageFromResponse({
usage: { prompt_tokens: 5, completion_tokens: 10, total_tokens: 15 },
});

expect(usage).toEqual({ total: 15, input: 5, output: 10 });
});
});

describe('getAIMetricsFromResponse', () => {
it('returns success=true with usage extracted from the response', () => {
const metrics = getAIMetricsFromResponse({
usage: { prompt_tokens: 1, completion_tokens: 2, total_tokens: 3 },
});

expect(metrics).toEqual({
success: true,
usage: { total: 3, input: 1, output: 2 },
});
});
});
Loading
Loading