Skip to content

Commit 8c89caf

Browse files
CHORE: Added more tests for caching and the openai provider
1 parent c0db3e1 commit 8c89caf

3 files changed

Lines changed: 143 additions & 3 deletions

File tree

src/index.ts

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -227,7 +227,9 @@ export class PromptOpsKit {
227227
}
228228

229229
const adapter = getAdapter(options.provider);
230-
const validation = adapter.validate(resolved);
230+
const validation = adapter.validate(resolved, {
231+
openaiResponses: options.openaiResponses,
232+
});
231233

232234
if (!validation.valid) {
233235
throw new Error(

tests/cache.test.ts

Lines changed: 49 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
import { afterEach, beforeEach, describe, expect, it } from 'vitest';
2-
import { mkdtemp, rm, writeFile } from 'node:fs/promises';
2+
import { mkdtemp, rm, unlink, utimes, writeFile } from 'node:fs/promises';
33
import { join } from 'node:path';
44
import { tmpdir } from 'node:os';
55
import { PromptCache } from '../src/cache.js';
@@ -36,4 +36,51 @@ describe('PromptCache', () => {
3636
expect(cache.get(fileB)).toBeUndefined();
3737
expect(cache.get(fileC)).toBe('asset-c');
3838
});
39-
});
39+
40+
it('invalidates an entry when the file mtime changes', async () => {
41+
const file = join(tmpDir, 'changed.md');
42+
await writeFile(file, 'original');
43+
44+
const cache = new PromptCache<string>(2);
45+
cache.set(file, 'asset');
46+
47+
expect(cache.get(file)).toBe('asset');
48+
49+
const future = new Date(Date.now() + 10_000);
50+
await utimes(file, future, future);
51+
52+
expect(cache.get(file)).toBeUndefined();
53+
expect(cache.size).toBe(0);
54+
});
55+
56+
it('invalidates an entry when the file is deleted', async () => {
57+
const file = join(tmpDir, 'deleted.md');
58+
await writeFile(file, 'content');
59+
60+
const cache = new PromptCache<string>(2);
61+
cache.set(file, 'asset');
62+
63+
await unlink(file);
64+
65+
expect(cache.get(file)).toBeUndefined();
66+
expect(cache.size).toBe(0);
67+
});
68+
69+
it('clears all entries', async () => {
70+
const fileA = join(tmpDir, 'clear-a.md');
71+
const fileB = join(tmpDir, 'clear-b.md');
72+
73+
await writeFile(fileA, 'A');
74+
await writeFile(fileB, 'B');
75+
76+
const cache = new PromptCache<string>(2);
77+
cache.set(fileA, 'asset-a');
78+
cache.set(fileB, 'asset-b');
79+
80+
cache.clear();
81+
82+
expect(cache.size).toBe(0);
83+
expect(cache.get(fileA)).toBeUndefined();
84+
expect(cache.get(fileB)).toBeUndefined();
85+
});
86+
});

tests/providers.test.ts

Lines changed: 91 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -479,6 +479,97 @@ describe('OpenAI Responses adapter', () => {
479479
);
480480
});
481481

482+
it('reports Responses-specific validation warnings and model errors', () => {
483+
const validation = openaiResponsesAdapter.validate({
484+
...baseAsset,
485+
model: undefined,
486+
reasoning: { budget_tokens: 1000 },
487+
response: {
488+
schema: {
489+
type: 'object',
490+
properties: {
491+
answer: { type: 'string' },
492+
},
493+
},
494+
},
495+
});
496+
497+
expect(validation.valid).toBe(false);
498+
expect(validation.errors).toContain('OpenAI Responses adapter requires a model to be specified.');
499+
expect(validation.warnings).toContain(
500+
'OpenAI Responses uses reasoning.effort, not budget_tokens. budget_tokens will be ignored.',
501+
);
502+
expect(validation.warnings).toContain(
503+
'OpenAI Responses response.schema requires response.format: json. schema will still be applied as JSON schema output.',
504+
);
505+
});
506+
507+
it('renders default schema names, non-strict schemas, unknown tool stubs, and conversation ids', () => {
508+
const result = openaiResponsesAdapter.render(
509+
{
510+
...baseAsset,
511+
response: {
512+
format: 'json',
513+
schema_strict: false,
514+
schema: {
515+
type: 'object',
516+
properties: {
517+
answer: { type: 'string' },
518+
},
519+
},
520+
},
521+
tools: ['lookup_customer'],
522+
},
523+
{
524+
variables: { name: 'World' },
525+
openaiResponses: {
526+
conversation: 'conv_456',
527+
},
528+
},
529+
);
530+
531+
expect(result.body.conversation).toBe('conv_456');
532+
expect(result.body.text).toEqual({
533+
format: {
534+
type: 'json_schema',
535+
name: 'test_response',
536+
schema: {
537+
type: 'object',
538+
properties: {
539+
answer: { type: 'string' },
540+
},
541+
},
542+
strict: false,
543+
},
544+
});
545+
expect(result.body.tools).toEqual([{ type: 'function', name: 'lookup_customer' }]);
546+
});
547+
548+
it('rejects invalid Responses runtime options through PromptOpsKit.renderPrompt', async () => {
549+
const kit = createPromptOpsKit({ sourceDir: '.', cache: false });
550+
551+
await expect(
552+
kit.renderPrompt({
553+
provider: 'openai-responses',
554+
source: [
555+
'---',
556+
'id: inline-responses',
557+
'provider: openai-responses',
558+
'model: gpt-5.4',
559+
'---',
560+
'',
561+
'# Prompt template',
562+
'',
563+
'Hello.',
564+
].join('\n'),
565+
openaiResponses: {
566+
previous_response_id: 'resp_123',
567+
conversation: 'conv_456',
568+
},
569+
}),
570+
).rejects.toThrow('OpenAI Responses options "conversation" and "previous_response_id" cannot both be set.');
571+
});
572+
482573
it('includes history messages as input items', () => {
483574
const result = openaiResponsesAdapter.render(baseAsset, {
484575
variables: { name: 'World' },

0 commit comments

Comments
 (0)