Skip to content

Commit 0079a41

Browse files
committed
feat: drop Genkit dependency
Switches fully from Genkit to AI SDK which has proved to be easier to integrate and maintain.
1 parent 9d5d0a2 commit 0079a41

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

41 files changed

+291
-3334
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@ You can customize the `web-codegen-scorer eval` script with the following flags:
8888
- Example: `web-codegen-scorer eval --model=gemini-2.5-flash --autorater-model=gemini-2.5-flash --env=<config path>`
8989

9090
- `--runner=<name>`: Specifies the runner to use to execute the eval. Supported runners are
91-
`genkit` (default), `ai-sdk`, `gemini-cli`, `claude-code` or `codex`.
91+
`ai-sdk` (default), `gemini-cli`, `claude-code` or `codex`.
9292

9393
- `--local`: Runs the script in local mode for the initial code generation request. Instead of
9494
calling the LLM, it will attempt to read the initial code from a corresponding file in the

docs/model-setup.md

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3,9 +3,8 @@
33
If you want to test out a model that isn't yet available in the runner, you can add
44
support for it by following these steps:
55

6-
1. Ensure that the provider of the model is supported by [Genkit](https://genkit.dev/).
7-
2. Find the provider for the model in `runner/codegen/genkit/providers`. If the provider hasn't been
8-
implemented yet, do so by creating a new `GenkitModelProvider` and adding it to the
9-
`MODEL_PROVIDERS` in `runner/genkit/models.ts`.
10-
3. Add your model to the `GenkitModelProvider` configs.
6+
1. Ensure that the provider of the model is supported by [AI SDK](https://ai-sdk.dev/).
7+
2. Find the provider for the model in `runner/codegen/ai-sdk`. If the provider doesn't exist,
8+
implement it by following the pattern from the existing providers.
9+
3. Add your model to the `SUPPORTED_MODELS` array.
1110
4. Done! 🎉 You can now run your model by passing `--model=<your model ID>`.

package.json

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -53,13 +53,12 @@
5353
"dependencies": {
5454
"@ai-sdk/anthropic": "3.0.12",
5555
"@ai-sdk/google": "3.0.7",
56+
"@ai-sdk/mcp": "1.0.10",
5657
"@ai-sdk/openai": "3.0.9",
5758
"@ai-sdk/provider": "3.0.2",
59+
"@ai-sdk/xai": "^3.0.26",
5860
"@anthropic-ai/sdk": "^0.68.0",
5961
"@axe-core/puppeteer": "^4.11.0",
60-
"@genkit-ai/compat-oai": "1.23.0",
61-
"@genkit-ai/googleai": "1.22.0",
62-
"@genkit-ai/mcp": "1.22.0",
6362
"@google/genai": "1.29.1",
6463
"@inquirer/prompts": "^8.2.0",
6564
"@safety-web/runner": "0.4.0-alpha.14",
@@ -73,8 +72,6 @@
7372
"chalk": "^5.6.2",
7473
"cli-progress": "^3.12.0",
7574
"file-type": "^21.3.0",
76-
"genkit": "^1.27.0",
77-
"genkitx-anthropic": "0.25.0",
7875
"handlebars": "^4.7.8",
7976
"lighthouse": "^13.0.1",
8077
"limiter": "^3.0.0",

pnpm-lock.yaml

Lines changed: 128 additions & 2288 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

report-app/angular.json

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -20,13 +20,8 @@
2020
"@firebase/app",
2121
"@firebase/firestore",
2222
"tiktoken",
23-
"genkit",
24-
"@genkit-ai/compat-oai",
25-
"@genkit-ai/googleai",
26-
"@genkit-ai/mcp",
2723
"@google/genai",
2824
"@google/generative-ai",
29-
"genkitx-anthropic",
3025
"node-fetch"
3126
],
3227
"allowedCommonJsDependencies": [

report-app/public/ai-sdk.png

2.74 KB
Loading

report-app/report-server.ts

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -14,14 +14,13 @@ import {
1414
AiChatRequest,
1515
AIConfigState,
1616
AssessmentResultFromReportServer,
17-
IndividualAssessmentState,
1817
RunInfo,
1918
RunInfoFromReportServer,
2019
} from '../runner/shared-interfaces';
2120

2221
// This will result in a lot of loading and would slow down the serving,
2322
// so it's loaded lazily below.
24-
import {type GenkitRunner} from '../runner/codegen/genkit/genkit-runner';
23+
import {type AiSdkRunner} from '../runner/codegen/ai-sdk/ai-sdk-runner';
2524

2625
const app = express();
2726
const reportsLoader = await getReportLoader();
@@ -89,11 +88,11 @@ app.get('/api/reports/:id', async (req, res) => {
8988
res.json(result ?? []);
9089
});
9190

92-
let llm: Promise<GenkitRunner> | null = null;
91+
let llm: Promise<AiSdkRunner> | null = null;
9392

94-
/** Lazily initializes and returns the genkit runner. */
95-
async function getOrCreateGenkitLlmRunner() {
96-
const llm = new (await import('../runner/codegen/genkit/genkit-runner')).GenkitRunner();
93+
/** Lazily initializes and returns the LLM runner. */
94+
async function getOrCreateRunner() {
95+
const llm = new (await import('../runner/codegen/ai-sdk/ai-sdk-runner')).AiSdkRunner();
9796
// Gracefully shut down the runner on exit.
9897
process.on('SIGINT', () => llm!.dispose());
9998
process.on('SIGTERM', () => llm!.dispose());
@@ -116,7 +115,7 @@ app.post('/api/reports/:id/chat', async (req, res) => {
116115

117116
const abortController = new AbortController();
118117
const summary = await chatWithReportAI(
119-
await (llm ?? getOrCreateGenkitLlmRunner()),
118+
await (llm ?? getOrCreateRunner()),
120119
prompt,
121120
abortController.signal,
122121
allAssessments,
@@ -138,9 +137,9 @@ app.post('/api/reports/:id/chat', async (req, res) => {
138137

139138
app.get('/api/ai-config-state', async (req, res) => {
140139
try {
141-
const llm = await getOrCreateGenkitLlmRunner();
140+
const llm = await getOrCreateRunner();
142141
return res.json({
143-
configuredModels: llm.getSupportedModelsWithAPIKey(),
142+
configuredModels: llm.getSupportedModels(),
144143
} satisfies AIConfigState);
145144
} catch (e) {
146145
console.error('Could not instantiate LLM instance. Error:', e);

report-app/src/app/shared/provider-label.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ const exactMatches: Record<string, string> = {
99
'gemini-cli': 'gemini.webp',
1010
genkit: 'genkit.png',
1111
codex: 'open-ai.png',
12+
'ai-sdk': 'ai-sdk.png',
1213
};
1314

1415
@Component({

runner/codegen/ai-sdk/ai-sdk-model-options.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import {AnthropicProviderOptions} from '@ai-sdk/anthropic';
22
import {GoogleGenerativeAIProviderOptions} from '@ai-sdk/google';
3+
import {XaiProviderOptions} from '@ai-sdk/xai';
34
import {OpenAIResponsesProviderOptions} from '@ai-sdk/openai';
45
import {LanguageModelV3, SharedV3ProviderOptions} from '@ai-sdk/provider';
56

@@ -9,6 +10,7 @@ export type AiSdkModelOptions = {
910
| {anthropic: AnthropicProviderOptions}
1011
| {google: GoogleGenerativeAIProviderOptions}
1112
| {openai: OpenAIResponsesProviderOptions}
13+
| {xai: XaiProviderOptions}
1214
// This supports extensions of `AISdkRunner` for custom model providers.
1315
| SharedV3ProviderOptions;
1416
};

runner/codegen/ai-sdk/ai-sdk-runner.ts

Lines changed: 70 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,14 @@
1-
import {AnthropicProviderOptions} from '@ai-sdk/anthropic';
2-
import {GoogleGenerativeAIProviderOptions} from '@ai-sdk/google';
3-
import {OpenAIResponsesProviderOptions} from '@ai-sdk/openai';
41
import {
52
FilePart,
63
generateText,
7-
LanguageModel,
84
ModelMessage,
95
Output,
106
SystemModelMessage,
117
TextPart,
8+
ToolSet,
129
} from 'ai';
10+
import {createMCPClient, MCPClient} from '@ai-sdk/mcp';
11+
import {Experimental_StdioMCPTransport as StdioClientTransport} from '@ai-sdk/mcp/mcp-stdio';
1312
import z from 'zod';
1413
import {combineAbortSignals} from '../../utils/abort-signal.js';
1514
import {callWithTimeout} from '../../utils/timeout.js';
@@ -21,24 +20,33 @@ import {
2120
LocalLlmGenerateFilesResponse,
2221
LocalLlmGenerateTextRequestOptions,
2322
LocalLlmGenerateTextResponse,
23+
McpServerDetails,
24+
McpServerOptions,
2425
PromptDataMessage,
2526
} from '../llm-runner.js';
2627
import {ANTHROPIC_MODELS, getAiSdkModelOptionsForAnthropic} from './anthropic.js';
2728
import {getAiSdkModelOptionsForGoogle, GOOGLE_MODELS} from './google.js';
2829
import {getAiSdkModelOptionsForOpenAI, OPENAI_MODELS} from './openai.js';
2930
import {AiSdkModelOptions} from './ai-sdk-model-options.js';
31+
import {getAiSdkModelOptionsForXai, XAI_MODELS} from './xai.js';
3032

31-
const SUPPORTED_MODELS = [...GOOGLE_MODELS, ...ANTHROPIC_MODELS, ...OPENAI_MODELS] as const;
33+
const SUPPORTED_MODELS = [
34+
...GOOGLE_MODELS,
35+
...ANTHROPIC_MODELS,
36+
...OPENAI_MODELS,
37+
...XAI_MODELS,
38+
] as const;
3239

3340
// Increased to a very high value as we rely on an actual timeout
3441
// that aborts stuck LLM requests. WCS is targeting stability here;
3542
// even if it involves many exponential backoff-waiting.
3643
const DEFAULT_MAX_RETRIES = 100000;
3744

3845
export class AiSdkRunner implements LlmRunner {
39-
displayName = 'AI SDK';
40-
id = 'ai-sdk';
41-
hasBuiltInRepairLoop = true;
46+
readonly displayName = 'AI SDK';
47+
readonly id = 'ai-sdk';
48+
readonly hasBuiltInRepairLoop = true;
49+
private mcpClients: MCPClient[] | null = null;
4250

4351
async generateText(
4452
options: LocalLlmGenerateTextRequestOptions,
@@ -49,6 +57,7 @@ export class AiSdkRunner implements LlmRunner {
4957
abortSignal: abortSignal,
5058
messages: this.convertRequestToMessagesList(options),
5159
maxRetries: DEFAULT_MAX_RETRIES,
60+
tools: await this.getTools(),
5261
}),
5362
);
5463

@@ -75,6 +84,7 @@ export class AiSdkRunner implements LlmRunner {
7584
output: Output.object<z.infer<T>>({schema: options.schema}),
7685
abortSignal: abortSignal,
7786
maxRetries: DEFAULT_MAX_RETRIES,
87+
tools: await this.getTools(),
7888
}),
7989
);
8090

@@ -120,7 +130,42 @@ export class AiSdkRunner implements LlmRunner {
120130
return [...SUPPORTED_MODELS];
121131
}
122132

123-
async dispose(): Promise<void> {}
133+
async dispose(): Promise<void> {
134+
if (this.mcpClients) {
135+
for (const client of this.mcpClients) {
136+
try {
137+
await client.close();
138+
} catch (error) {
139+
console.error(`Failed to close MCP client`, error);
140+
}
141+
}
142+
}
143+
}
144+
145+
async startMcpServerHost(
146+
_hostName: string,
147+
servers: McpServerOptions[],
148+
): Promise<McpServerDetails> {
149+
const details: McpServerDetails = {resources: [], tools: []};
150+
151+
for (const server of servers) {
152+
const client = await createMCPClient({
153+
transport: new StdioClientTransport({
154+
command: server.command,
155+
args: server.args,
156+
env: server.env,
157+
}),
158+
});
159+
160+
const [resources, tools] = await Promise.all([client.listResources(), client.tools()]);
161+
resources.resources.forEach(r => details.resources.push(r.name));
162+
details.tools.push(...Object.keys(tools));
163+
this.mcpClients ??= [];
164+
this.mcpClients.push(client);
165+
}
166+
167+
return details;
168+
}
124169

125170
private async _wrapRequestWithTimeoutAndRateLimiting<T>(
126171
request: LocalLlmGenerateTextRequestOptions | LocalLlmConstrainedOutputGenerateRequestOptions,
@@ -145,7 +190,8 @@ export class AiSdkRunner implements LlmRunner {
145190
const result =
146191
(await getAiSdkModelOptionsForGoogle(request.model)) ??
147192
(await getAiSdkModelOptionsForAnthropic(request.model)) ??
148-
(await getAiSdkModelOptionsForOpenAI(request.model));
193+
(await getAiSdkModelOptionsForOpenAI(request.model)) ??
194+
(await getAiSdkModelOptionsForXai(request.model));
149195
if (result === null) {
150196
throw new Error(`Unexpected unsupported model: ${request.model}`);
151197
}
@@ -198,4 +244,18 @@ export class AiSdkRunner implements LlmRunner {
198244
}
199245
return result;
200246
}
247+
248+
private async getTools(): Promise<ToolSet | undefined> {
249+
let tools: ToolSet | undefined;
250+
251+
if (this.mcpClients) {
252+
for (const client of this.mcpClients) {
253+
const clientTools = (await client.tools()) as ToolSet;
254+
tools ??= {};
255+
Object.keys(clientTools).forEach(name => (tools![name] = clientTools[name]));
256+
}
257+
}
258+
259+
return tools;
260+
}
201261
}

0 commit comments

Comments
 (0)