Skip to content

Commit 4f67ecf

Browse files
committed
Harden adapters and expand Ollama live coverage
1 parent 0531a19 commit 4f67ecf

12 files changed

Lines changed: 946 additions & 46 deletions

File tree

README.md

Lines changed: 32 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -27,19 +27,19 @@ cross-provider message normalization, and an optional stateful agent runtime.
2727
```bash
2828
git clone git@github.com:constructive-io/agentic-kit.git
2929
cd agentic-kit
30-
yarn install
31-
yarn build
32-
yarn test
30+
pnpm install
31+
pnpm build
32+
pnpm test
3333
```
3434

3535
## Usage
3636

3737
```typescript
38-
import { complete, getModel } from 'agentic-kit';
38+
import { complete, getModel } from "agentic-kit";
3939

40-
const model = getModel('openai', 'gpt-4o-mini');
40+
const model = getModel("openai", "gpt-4o-mini");
4141
const message = await complete(model!, {
42-
messages: [{ role: 'user', content: 'Hello', timestamp: Date.now() }],
42+
messages: [{ role: "user", content: "Hello", timestamp: Date.now() }],
4343
});
4444

4545
console.log(message.content);
@@ -48,3 +48,29 @@ console.log(message.content);
4848
## Contributing
4949

5050
See individual package READMEs for docs and local dev instructions.
51+
52+
## Testing
53+
54+
Default tests stay deterministic and local:
55+
56+
```bash
57+
pnpm test
58+
```
59+
60+
There is also a local-only Ollama live lane that does not hit hosted
61+
providers. The default root command runs the fast smoke tier:
62+
63+
```bash
64+
OLLAMA_LIVE_MODEL=qwen3.5:4b pnpm test:live:ollama
65+
```
66+
67+
Run the broader lane explicitly when you want slower behavioral coverage:
68+
69+
```bash
70+
OLLAMA_LIVE_MODEL=qwen3.5:4b pnpm test:live:ollama:extended
71+
```
72+
73+
The Ollama live script performs a preflight against `OLLAMA_BASE_URL` and exits
74+
cleanly if the local server or requested model is unavailable. If
75+
`nomic-embed-text:latest` is installed, the lane also exercises local embedding
76+
generation.

package.json

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,9 @@
1818
"clean": "pnpm -r run clean",
1919
"build": "pnpm -r run build",
2020
"build:dev": "pnpm -r run build:dev",
21+
"test": "pnpm -r run test",
22+
"test:live:ollama": "pnpm --filter @agentic-kit/ollama run test:live:smoke",
23+
"test:live:ollama:extended": "pnpm --filter @agentic-kit/ollama run test:live:extended",
2124
"lint": "pnpm -r run lint",
2225
"internal:deps": "makage update-workspace",
2326
"deps": "pnpm up -r -i -L"

packages/agent/__tests__/agent.test.ts

Lines changed: 182 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,9 @@
1-
import { createAssistantMessageEventStream, type Context, type ModelDescriptor } from 'agentic-kit';
1+
import {
2+
createAssistantMessageEventStream,
3+
type AssistantMessage,
4+
type Context,
5+
type ModelDescriptor,
6+
} from 'agentic-kit';
27

38
import { Agent } from '../src';
49

@@ -147,4 +152,180 @@ describe('@agentic-kit/agent', () => {
147152
content: [{ type: 'text', text: 'done' }],
148153
});
149154
});
155+
156+
it('turns tool argument validation failures into error tool results and continues', async () => {
157+
const responses = [
158+
createAssistantResponse({
159+
stopReason: 'toolUse',
160+
content: [{ type: 'toolCall', id: 'tool_1', name: 'echo', arguments: {} }],
161+
}),
162+
createAssistantResponse({
163+
stopReason: 'stop',
164+
content: [{ type: 'text', text: 'recovered' }],
165+
}),
166+
];
167+
168+
let callIndex = 0;
169+
const agent = new Agent({
170+
initialState: { model: createModel() },
171+
streamFn: () => streamMessage(responses[callIndex++]),
172+
});
173+
174+
const execute = jest.fn(async () => ({
175+
content: [{ type: 'text' as const, text: 'should not run' }],
176+
}));
177+
178+
agent.setTools([
179+
{
180+
name: 'echo',
181+
label: 'Echo',
182+
description: 'Echo text',
183+
parameters: {
184+
type: 'object',
185+
properties: {
186+
text: { type: 'string' },
187+
},
188+
required: ['text'],
189+
},
190+
execute,
191+
},
192+
]);
193+
194+
await agent.prompt('hello');
195+
196+
expect(execute).not.toHaveBeenCalled();
197+
expect(agent.state.messages[2]).toMatchObject({
198+
role: 'toolResult',
199+
toolName: 'echo',
200+
isError: true,
201+
});
202+
expect(agent.state.messages[2].content[0]).toMatchObject({
203+
type: 'text',
204+
text: expect.stringContaining('Tool argument validation failed'),
205+
});
206+
expect(agent.state.messages[3]).toMatchObject({
207+
role: 'assistant',
208+
content: [{ type: 'text', text: 'recovered' }],
209+
});
210+
});
211+
212+
it('records aborted assistant turns when the active stream is cancelled', async () => {
213+
const agent = new Agent({
214+
initialState: { model: createModel() },
215+
streamFn: (_model: ModelDescriptor, _context: Context, options) => {
216+
const stream = createAssistantMessageEventStream();
217+
const partial = createAssistantResponse({
218+
stopReason: 'stop',
219+
content: [{ type: 'text', text: '' }],
220+
});
221+
222+
queueMicrotask(() => {
223+
stream.push({ type: 'start', partial });
224+
225+
options?.signal?.addEventListener(
226+
'abort',
227+
() => {
228+
const aborted = createAssistantResponse({
229+
stopReason: 'aborted',
230+
errorMessage: 'aborted by test',
231+
content: [],
232+
});
233+
stream.push({ type: 'error', reason: 'aborted', error: aborted });
234+
stream.end(aborted);
235+
},
236+
{ once: true }
237+
);
238+
});
239+
240+
return stream;
241+
},
242+
});
243+
244+
const pending = agent.prompt('slow');
245+
setTimeout(() => agent.abort(), 0);
246+
await pending;
247+
248+
expect(agent.state.error).toBe('aborted by test');
249+
expect(agent.state.messages.at(-1)).toMatchObject({
250+
role: 'assistant',
251+
stopReason: 'aborted',
252+
errorMessage: 'aborted by test',
253+
});
254+
expect(agent.state.isStreaming).toBe(false);
255+
expect(agent.state.streamMessage).toBeNull();
256+
});
150257
});
258+
259+
function createAssistantResponse(overrides: Partial<AssistantMessage>): AssistantMessage {
260+
return {
261+
...createAssistantResponseBase(),
262+
...overrides,
263+
};
264+
}
265+
266+
function createAssistantResponseBase(): AssistantMessage {
267+
return {
268+
role: 'assistant' as const,
269+
api: 'fake',
270+
provider: 'fake',
271+
model: 'demo',
272+
usage: {
273+
input: 1,
274+
output: 1,
275+
cacheRead: 0,
276+
cacheWrite: 0,
277+
totalTokens: 2,
278+
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
279+
},
280+
stopReason: 'stop' as const,
281+
timestamp: Date.now(),
282+
content: [] as AssistantMessage['content'],
283+
};
284+
}
285+
286+
function streamMessage(message: AssistantMessage) {
287+
const stream = createAssistantMessageEventStream();
288+
289+
queueMicrotask(() => {
290+
stream.push({ type: 'start', partial: message });
291+
if (message.content[0]?.type === 'toolCall') {
292+
stream.push({
293+
type: 'toolcall_start',
294+
contentIndex: 0,
295+
partial: message,
296+
});
297+
stream.push({
298+
type: 'toolcall_end',
299+
contentIndex: 0,
300+
toolCall: message.content[0],
301+
partial: message,
302+
});
303+
} else {
304+
stream.push({
305+
type: 'text_start',
306+
contentIndex: 0,
307+
partial: message,
308+
});
309+
stream.push({
310+
type: 'text_delta',
311+
contentIndex: 0,
312+
delta: message.content[0]?.type === 'text' ? message.content[0].text : '',
313+
partial: message,
314+
});
315+
stream.push({
316+
type: 'text_end',
317+
contentIndex: 0,
318+
content: message.content[0]?.type === 'text' ? message.content[0].text : '',
319+
partial: message,
320+
});
321+
}
322+
stream.push({
323+
type: 'done',
324+
reason: message.stopReason === 'toolUse' ? 'toolUse' : 'stop',
325+
message,
326+
});
327+
stream.end(message);
328+
});
329+
330+
return stream;
331+
}

packages/agentic-kit/__tests__/adapter.test.ts

Lines changed: 106 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,85 @@ describe('agentic-kit core', () => {
7171
});
7272
});
7373

74+
it('drops aborted assistant turns and rewrites tool result ids for stricter providers', () => {
75+
const sourceModel = createFakeModel();
76+
const targetModel: ModelDescriptor = {
77+
...sourceModel,
78+
provider: 'anthropic',
79+
api: 'anthropic-messages',
80+
id: 'claude-demo',
81+
};
82+
83+
const transformed = transformMessages(
84+
[
85+
{
86+
role: 'assistant',
87+
api: sourceModel.api,
88+
provider: sourceModel.provider,
89+
model: sourceModel.id,
90+
usage: {
91+
input: 0,
92+
output: 0,
93+
cacheRead: 0,
94+
cacheWrite: 0,
95+
totalTokens: 0,
96+
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
97+
},
98+
stopReason: 'toolUse',
99+
timestamp: Date.now(),
100+
content: [
101+
{ type: 'toolCall', id: 'call|needs-normalizing', name: 'lookup', arguments: { city: 'Paris' } },
102+
],
103+
},
104+
{
105+
role: 'toolResult',
106+
toolCallId: 'call|needs-normalizing',
107+
toolName: 'lookup',
108+
content: [{ type: 'text', text: 'ok' }],
109+
isError: false,
110+
timestamp: Date.now(),
111+
},
112+
{
113+
role: 'assistant',
114+
api: sourceModel.api,
115+
provider: sourceModel.provider,
116+
model: sourceModel.id,
117+
usage: {
118+
input: 0,
119+
output: 0,
120+
cacheRead: 0,
121+
cacheWrite: 0,
122+
totalTokens: 0,
123+
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
124+
},
125+
stopReason: 'aborted',
126+
errorMessage: 'cancelled',
127+
timestamp: Date.now(),
128+
content: [{ type: 'text', text: 'partial' }],
129+
},
130+
],
131+
targetModel
132+
);
133+
134+
expect(transformed).toHaveLength(2);
135+
expect(transformed[0]).toMatchObject({
136+
role: 'assistant',
137+
content: [
138+
{
139+
type: 'toolCall',
140+
id: 'call_needs-normalizing',
141+
name: 'lookup',
142+
},
143+
],
144+
});
145+
expect(transformed[1]).toMatchObject({
146+
role: 'toolResult',
147+
toolCallId: 'call_needs-normalizing',
148+
toolName: 'lookup',
149+
isError: false,
150+
});
151+
});
152+
74153
it('keeps the legacy AgentKit generate API working through structured streams', async () => {
75154
const provider: ProviderAdapter & { name: string } = {
76155
api: 'fake-api',
@@ -134,4 +213,31 @@ describe('agentic-kit core', () => {
134213
expect(chunks).toEqual(['hello world']);
135214
await expect(kit.generate({ model: 'demo', prompt: 'hi' })).resolves.toBe('hello world');
136215
});
216+
217+
it('extracts assistant text from mixed content blocks', () => {
218+
const text = getMessageText({
219+
role: 'assistant',
220+
api: 'fake-api',
221+
provider: 'fake',
222+
model: 'demo',
223+
usage: {
224+
input: 0,
225+
output: 0,
226+
cacheRead: 0,
227+
cacheWrite: 0,
228+
totalTokens: 0,
229+
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
230+
},
231+
stopReason: 'stop',
232+
timestamp: Date.now(),
233+
content: [
234+
{ type: 'thinking', thinking: 'ignore me' },
235+
{ type: 'text', text: 'hello ' },
236+
{ type: 'toolCall', id: 'tool_1', name: 'lookup', arguments: { city: 'Paris' } },
237+
{ type: 'text', text: 'world' },
238+
],
239+
});
240+
241+
expect(text).toBe('hello world');
242+
});
137243
});

0 commit comments

Comments
 (0)