Skip to content

Commit a2f684b

Browse files
Claudehotlong
andauthored
Add LLM provider auto-detection to AIServicePlugin
- Add detectAdapter() private method to auto-detect LLM providers from env vars - Support AI_GATEWAY_MODEL, OPENAI_API_KEY, ANTHROPIC_API_KEY, GOOGLE_GENERATIVE_AI_API_KEY - Add comprehensive logging of selected adapter and warnings for missing SDKs - Handle dynamic import failures as soft errors with automatic fallback - Remove redundant detection logic from CLI serve.ts - Add unit tests for auto-detection behavior - Update CHANGELOG.md with new feature details Agent-Logs-Url: https://github.com/objectstack-ai/framework/sessions/7436aca6-b645-4fd8-9459-bef40b5867ec Co-authored-by: hotlong <50353452+hotlong@users.noreply.github.com>
1 parent 435b2e6 commit a2f684b

4 files changed

Lines changed: 233 additions & 45 deletions

File tree

CHANGELOG.md

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,17 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
88
## [Unreleased]
99

1010
### Added
11+
- **AIServicePlugin Auto-Detection** — AIServicePlugin now automatically detects and initializes
12+
LLM providers based on environment variables, eliminating the need for manual adapter configuration
13+
in each deployment:
14+
- Auto-detection priority: `AI_GATEWAY_MODEL``OPENAI_API_KEY``ANTHROPIC_API_KEY``GOOGLE_GENERATIVE_AI_API_KEY`
15+
- Graceful fallback to MemoryLLMAdapter when no provider is configured
16+
- Comprehensive logging of selected provider and warnings for missing SDKs
17+
- Supports custom model selection via `AI_MODEL` environment variable
18+
- Consistent behavior across CLI, Vercel, Docker, and custom deployments
19+
- Dynamic import failures are handled as soft errors with automatic fallback
20+
([#1067](https://github.com/objectstack-ai/framework/issues/1067))
21+
1122
- **Metadata Versioning & History** — Comprehensive version history tracking and rollback capabilities
1223
for metadata items. Key features include:
1324
- `MetadataHistoryRecordSchema` defining structure for historical snapshots

packages/cli/src/commands/serve.ts

Lines changed: 5 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -344,51 +344,12 @@ export default class Serve extends Command {
344344
if (!hasAIPlugin) {
345345
try {
346346
const aiPkg = '@objectstack/service-ai';
347-
const { AIServicePlugin, VercelLLMAdapter } = await import(/* webpackIgnore: true */ aiPkg);
348-
349-
// Auto-detect LLM provider from environment variables.
350-
// Priority: 1) Vercel AI Gateway 2) Direct provider SDKs 3) MemoryLLMAdapter (echo)
351-
let adapter: any = undefined;
352-
353-
// 1. Vercel AI Gateway — works with any provider via gateway('provider/model')
354-
// Uses OIDC on Vercel, VERCEL_API_KEY locally.
355-
const gatewayModel = process.env.AI_GATEWAY_MODEL; // e.g. 'anthropic/claude-sonnet-4.6'
356-
if (gatewayModel) {
357-
try {
358-
const gatewayPkg = '@ai-sdk/gateway';
359-
const { gateway } = await import(/* webpackIgnore: true */ gatewayPkg);
360-
adapter = new VercelLLMAdapter({ model: gateway(gatewayModel) });
361-
} catch {
362-
// @ai-sdk/gateway not installed
363-
}
364-
}
365-
366-
// 2. Direct provider SDKs
367-
if (!adapter) {
368-
const providerConfigs: Array<{ envKey: string; pkg: string; factory: string; defaultModel: string }> = [
369-
{ envKey: 'OPENAI_API_KEY', pkg: '@ai-sdk/openai', factory: 'openai', defaultModel: 'gpt-4o' },
370-
{ envKey: 'ANTHROPIC_API_KEY', pkg: '@ai-sdk/anthropic', factory: 'anthropic', defaultModel: 'claude-sonnet-4-20250514' },
371-
{ envKey: 'GOOGLE_GENERATIVE_AI_API_KEY', pkg: '@ai-sdk/google', factory: 'google', defaultModel: 'gemini-2.0-flash' },
372-
];
373-
374-
for (const { envKey, pkg, factory, defaultModel } of providerConfigs) {
375-
if (process.env[envKey]) {
376-
try {
377-
const mod = await import(/* webpackIgnore: true */ pkg);
378-
const createModel = mod[factory] ?? mod.default;
379-
if (typeof createModel === 'function') {
380-
const modelId = process.env.AI_MODEL ?? defaultModel;
381-
adapter = new VercelLLMAdapter({ model: createModel(modelId) });
382-
break;
383-
}
384-
} catch {
385-
// Provider SDK not installed — skip
386-
}
387-
}
388-
}
389-
}
347+
const { AIServicePlugin } = await import(/* webpackIgnore: true */ aiPkg);
390348

391-
await kernel.use(new AIServicePlugin(adapter ? { adapter } : undefined));
349+
// AIServicePlugin will auto-detect LLM provider from environment variables
350+
// (AI_GATEWAY_MODEL, OPENAI_API_KEY, ANTHROPIC_API_KEY, GOOGLE_GENERATIVE_AI_API_KEY)
351+
// No need to manually construct the adapter here.
352+
await kernel.use(new AIServicePlugin());
392353
trackPlugin('AIService');
393354
} catch {
394355
// @objectstack/service-ai not installed — AI features unavailable

packages/services/service-ai/src/__tests__/ai-service.test.ts

Lines changed: 112 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -837,4 +837,116 @@ describe('AIServicePlugin', () => {
837837

838838
expect(ctx.hook).toHaveBeenCalledWith('ai:beforeChat', expect.any(Function));
839839
});
840+
841+
// ── LLM Provider Auto-Detection ─────────────────────────────────
842+
843+
it('should use MemoryLLMAdapter when no env vars are set', async () => {
844+
const plugin = new AIServicePlugin();
845+
const ctx = createMockContext();
846+
847+
// Ensure no LLM provider env vars are set
848+
const oldEnv = { ...process.env };
849+
delete process.env.AI_GATEWAY_MODEL;
850+
delete process.env.OPENAI_API_KEY;
851+
delete process.env.ANTHROPIC_API_KEY;
852+
delete process.env.GOOGLE_GENERATIVE_AI_API_KEY;
853+
854+
try {
855+
await plugin.init(ctx);
856+
857+
const service = ctx.getService<AIService>('ai');
858+
expect(service.adapterName).toBe('memory');
859+
860+
// Verify warning was logged
861+
expect(silentLogger.warn).toHaveBeenCalledWith(
862+
expect.stringContaining('No LLM provider configured')
863+
);
864+
} finally {
865+
// Restore environment
866+
process.env = oldEnv;
867+
}
868+
});
869+
870+
it('should fallback to MemoryLLMAdapter when provider SDK is not installed', async () => {
871+
const plugin = new AIServicePlugin();
872+
const ctx = createMockContext();
873+
874+
const oldEnv = { ...process.env };
875+
// Set env var, but the SDK won't be available in test environment
876+
process.env.OPENAI_API_KEY = 'fake-openai-key';
877+
delete process.env.AI_GATEWAY_MODEL;
878+
delete process.env.ANTHROPIC_API_KEY;
879+
delete process.env.GOOGLE_GENERATIVE_AI_API_KEY;
880+
881+
try {
882+
await plugin.init(ctx);
883+
884+
const service = ctx.getService<AIService>('ai');
885+
// Should fall back to memory because @ai-sdk/openai is not installed
886+
expect(service.adapterName).toBe('memory');
887+
888+
// Verify warning was logged about SDK load failure
889+
expect(silentLogger.warn).toHaveBeenCalledWith(
890+
expect.stringContaining('Failed to load @ai-sdk/openai'),
891+
expect.objectContaining({ error: expect.any(String) })
892+
);
893+
894+
// Verify warning was logged about final fallback
895+
expect(silentLogger.warn).toHaveBeenCalledWith(
896+
expect.stringContaining('No LLM provider configured')
897+
);
898+
} finally {
899+
process.env = oldEnv;
900+
}
901+
});
902+
903+
it('should prefer explicit adapter over auto-detection', async () => {
904+
const customAdapter: LLMAdapter = {
905+
name: 'custom-explicit',
906+
chat: async () => ({ content: 'explicit' }),
907+
complete: async () => ({ content: '' }),
908+
};
909+
910+
const plugin = new AIServicePlugin({ adapter: customAdapter });
911+
const ctx = createMockContext();
912+
913+
const oldEnv = { ...process.env };
914+
process.env.OPENAI_API_KEY = 'fake-openai-key';
915+
916+
try {
917+
await plugin.init(ctx);
918+
919+
const service = ctx.getService<AIService>('ai');
920+
expect(service.adapterName).toBe('custom-explicit');
921+
922+
// Verify it logged as explicitly configured
923+
expect(silentLogger.info).toHaveBeenCalledWith(
924+
expect.stringContaining('explicitly configured')
925+
);
926+
} finally {
927+
process.env = oldEnv;
928+
}
929+
});
930+
931+
it('should log adapter selection', async () => {
932+
const plugin = new AIServicePlugin();
933+
const ctx = createMockContext();
934+
935+
const oldEnv = { ...process.env };
936+
delete process.env.AI_GATEWAY_MODEL;
937+
delete process.env.OPENAI_API_KEY;
938+
delete process.env.ANTHROPIC_API_KEY;
939+
delete process.env.GOOGLE_GENERATIVE_AI_API_KEY;
940+
941+
try {
942+
await plugin.init(ctx);
943+
944+
// Verify adapter selection was logged
945+
expect(silentLogger.info).toHaveBeenCalledWith(
946+
expect.stringContaining('Using LLM adapter')
947+
);
948+
} finally {
949+
process.env = oldEnv;
950+
}
951+
});
840952
});

packages/services/service-ai/src/plugin.ts

Lines changed: 105 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,8 @@ import { registerDataTools } from './tools/data-tools.js';
1212
import { registerMetadataTools } from './tools/metadata-tools.js';
1313
import { AgentRuntime } from './agent-runtime.js';
1414
import { DATA_CHAT_AGENT, METADATA_ASSISTANT_AGENT } from './agents/index.js';
15+
import { VercelLLMAdapter } from './adapters/vercel-adapter.js';
16+
import { MemoryLLMAdapter } from './adapters/memory-adapter.js';
1517

1618
/**
1719
* Configuration options for the AIServicePlugin.
@@ -61,6 +63,90 @@ export class AIServicePlugin implements Plugin {
6163
this.options = options;
6264
}
6365

66+
/**
67+
* Auto-detect LLM provider from environment variables.
68+
*
69+
* Priority order:
70+
* 1. AI_GATEWAY_MODEL → Vercel AI Gateway
71+
* 2. OPENAI_API_KEY → OpenAI
72+
* 3. ANTHROPIC_API_KEY → Anthropic
73+
* 4. GOOGLE_GENERATIVE_AI_API_KEY → Google
74+
* 5. Fallback → MemoryLLMAdapter
75+
*
76+
* Returns the adapter and a description for logging.
77+
*/
78+
private async detectAdapter(ctx: PluginContext): Promise<{ adapter: LLMAdapter; description: string }> {
79+
// 1. Vercel AI Gateway — works with any provider via gateway('provider/model')
80+
const gatewayModel = process.env.AI_GATEWAY_MODEL;
81+
if (gatewayModel) {
82+
try {
83+
const gatewayPkg = '@ai-sdk/gateway';
84+
const { gateway } = await import(/* webpackIgnore: true */ gatewayPkg);
85+
const adapter = new VercelLLMAdapter({ model: gateway(gatewayModel) });
86+
return { adapter, description: `Vercel AI Gateway (model: ${gatewayModel})` };
87+
} catch (err) {
88+
ctx.logger.warn(
89+
`[AI] Failed to load @ai-sdk/gateway for AI_GATEWAY_MODEL=${gatewayModel}, trying next provider`,
90+
err instanceof Error ? { error: err.message } : undefined
91+
);
92+
}
93+
}
94+
95+
// 2. Direct provider SDKs
96+
const providerConfigs: Array<{
97+
envKey: string;
98+
pkg: string;
99+
factory: string;
100+
defaultModel: string;
101+
displayName: string;
102+
}> = [
103+
{
104+
envKey: 'OPENAI_API_KEY',
105+
pkg: '@ai-sdk/openai',
106+
factory: 'openai',
107+
defaultModel: 'gpt-4o',
108+
displayName: 'OpenAI'
109+
},
110+
{
111+
envKey: 'ANTHROPIC_API_KEY',
112+
pkg: '@ai-sdk/anthropic',
113+
factory: 'anthropic',
114+
defaultModel: 'claude-sonnet-4-20250514',
115+
displayName: 'Anthropic'
116+
},
117+
{
118+
envKey: 'GOOGLE_GENERATIVE_AI_API_KEY',
119+
pkg: '@ai-sdk/google',
120+
factory: 'google',
121+
defaultModel: 'gemini-2.0-flash',
122+
displayName: 'Google'
123+
},
124+
];
125+
126+
for (const { envKey, pkg, factory, defaultModel, displayName } of providerConfigs) {
127+
if (process.env[envKey]) {
128+
try {
129+
const mod = await import(/* webpackIgnore: true */ pkg);
130+
const createModel = mod[factory] ?? mod.default;
131+
if (typeof createModel === 'function') {
132+
const modelId = process.env.AI_MODEL ?? defaultModel;
133+
const adapter = new VercelLLMAdapter({ model: createModel(modelId) });
134+
return { adapter, description: `${displayName} (model: ${modelId})` };
135+
}
136+
} catch (err) {
137+
ctx.logger.warn(
138+
`[AI] Failed to load ${pkg} for ${envKey}, trying next provider`,
139+
err instanceof Error ? { error: err.message } : undefined
140+
);
141+
}
142+
}
143+
}
144+
145+
// 3. Fallback to MemoryLLMAdapter
146+
ctx.logger.warn('[AI] No LLM provider configured via environment variables. Falling back to MemoryLLMAdapter (echo mode). Set AI_GATEWAY_MODEL, OPENAI_API_KEY, ANTHROPIC_API_KEY, or GOOGLE_GENERATIVE_AI_API_KEY to use a real LLM.');
147+
return { adapter: new MemoryLLMAdapter(), description: 'MemoryLLMAdapter (echo mode - for testing only)' };
148+
}
149+
64150
async init(ctx: PluginContext): Promise<void> {
65151
// Check if there is an existing AI service (e.g. from dev-plugin)
66152
let hasExisting = false;
@@ -88,8 +174,26 @@ export class AIServicePlugin implements Plugin {
88174
}
89175
}
90176

177+
// Determine LLM adapter: explicit > auto-detect from env > MemoryLLMAdapter fallback
178+
let adapter: LLMAdapter;
179+
let adapterDescription: string;
180+
181+
if (this.options.adapter) {
182+
// User provided an explicit adapter
183+
adapter = this.options.adapter;
184+
adapterDescription = `${adapter.name} (explicitly configured)`;
185+
} else {
186+
// Auto-detect from environment variables
187+
const detected = await this.detectAdapter(ctx);
188+
adapter = detected.adapter;
189+
adapterDescription = detected.description;
190+
}
191+
192+
// Log the selected adapter
193+
ctx.logger.info(`[AI] Using LLM adapter: ${adapterDescription}`);
194+
91195
const config: AIServiceConfig = {
92-
adapter: this.options.adapter,
196+
adapter,
93197
logger: ctx.logger,
94198
conversationService,
95199
};

0 commit comments

Comments
 (0)