Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions .changeset/fix-google-runtime-race.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
"@iqai/adk": minor
---

Add `GoogleLlmConfig` and `AiSdkLlmOptions` for explicit, request-scoped Google client configuration. This eliminates process.env race conditions when multiple GoogleLlm or AiSdkLlm instances with different backends run concurrently in a multi-tenant server. Env-based fallback is preserved when no config is provided.
39 changes: 26 additions & 13 deletions packages/adk/src/models/ai-sdk.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import { Logger } from "@adk/logger";
import type { Content, Part } from "@google/genai";
import { type Content, GoogleGenAI, type Part } from "@google/genai";
import {
AssistantContent,
generateText,
Expand Down Expand Up @@ -52,14 +52,23 @@ interface AiSdkRequestParams {
};
}

/**
* Options for AiSdkLlm.
*/
export interface AiSdkLlmOptions {
/** Pre-built Google GenAI client for context caching (avoids env race conditions) */
googleGenaiClient?: GoogleGenAI;
}

/**
* AI SDK integration that accepts a pre-configured LanguageModel.
* Enables ADK-TS to work with any provider supported by Vercel's AI SDK.
*/
export class AiSdkLlm extends BaseLlm {
private modelInstance: LanguageModel;
#modelInstance: LanguageModel;
protected logger = new Logger({ name: "AiSdkLlm" });
private cacheManager: ContextCacheManager | null = null;
#cacheManager: ContextCacheManager | null = null;
#options?: AiSdkLlmOptions;

/**
* Model provider patterns for detection
Expand All @@ -72,15 +81,17 @@ export class AiSdkLlm extends BaseLlm {

/**
* Constructor accepts a pre-configured LanguageModel instance
* @param model - Pre-configured LanguageModel from provider(modelName)
* @param modelInstance - Pre-configured LanguageModel from provider(modelName)
* @param options - Optional configuration (e.g. googleGenaiClient for caching)
*/
constructor(modelInstance: LanguageModel) {
constructor(modelInstance: LanguageModel, options?: AiSdkLlmOptions) {
let modelId = "ai-sdk-model";
if (typeof modelInstance !== "string") {
modelId = modelInstance.modelId;
}
super(modelId);
this.modelInstance = modelInstance;
this.#modelInstance = modelInstance;
this.#options = options;
}

/**
Expand Down Expand Up @@ -132,11 +143,13 @@ export class AiSdkLlm extends BaseLlm {

/**
* Initializes the cache manager for Google models
* The manager lazily initializes its Google GenAI client on first use
*/
private initializeCacheManager(): void {
if (!this.cacheManager) {
this.cacheManager = new GeminiContextCacheManager(this.logger);
if (!this.#cacheManager) {
this.#cacheManager = new GeminiContextCacheManager(
this.logger,
this.#options?.googleGenaiClient,
);
}
}

Expand All @@ -152,14 +165,14 @@ export class AiSdkLlm extends BaseLlm {
this.initializeCacheManager();

// Normalize model ID for Google API compatibility
const modelId = this.getModelId(this.modelInstance);
const modelId = this.getModelId(this.#modelInstance);
llmRequest.model = this.normalizeGoogleModelId(modelId);

this.logger.debug(`Using model for caching: ${llmRequest.model}`);

// Handle caching through the manager
const cacheMetadata =
await this.cacheManager!.handleContextCaching(llmRequest);
await this.#cacheManager!.handleContextCaching(llmRequest);

if (cacheMetadata?.cacheName) {
this.logger.debug(`Using cache: ${cacheMetadata.cacheName}`);
Expand All @@ -182,7 +195,7 @@ export class AiSdkLlm extends BaseLlm {
cacheMetadata: CacheMetadata | null,
): AiSdkRequestParams {
const params: AiSdkRequestParams = {
model: this.modelInstance,
model: this.#modelInstance,
messages,
maxTokens: llmRequest.config?.maxOutputTokens,
temperature: llmRequest.config?.temperature,
Expand Down Expand Up @@ -251,7 +264,7 @@ export class AiSdkLlm extends BaseLlm {
stream = false,
): AsyncGenerator<LlmResponse, void, unknown> {
try {
const provider = this.detectModelProvider(this.modelInstance);
const provider = this.detectModelProvider(this.#modelInstance);
const messages = this.convertToAiSdkMessages(llmRequest);
const systemMessage = llmRequest.getSystemInstructionText();
const tools = this.convertToAiSdkTools(llmRequest);
Expand Down
157 changes: 102 additions & 55 deletions packages/adk/src/models/google-llm.ts
Original file line number Diff line number Diff line change
Expand Up @@ -107,20 +107,35 @@ class StreamingResponseAggregator {
}
}

/**
* Explicit configuration for GoogleLlm — avoids process.env race conditions
* in multi-tenant servers.
*/
export interface GoogleLlmConfig {
apiKey?: string;
vertexai?: boolean;
project?: string;
location?: string;
/** Pre-built client — bypasses all other config / env vars */
client?: GoogleGenAI;
}

/**
* Integration for Gemini models.
*/
export class GoogleLlm extends BaseLlm {
private _apiClient?: GoogleGenAI;
private _liveApiClient?: GoogleGenAI;
private _apiBackend?: GoogleLLMVariant;
private _trackingHeaders?: Record<string, string>;
#apiClient?: GoogleGenAI;
#liveApiClient?: GoogleGenAI;
#apiBackend?: GoogleLLMVariant;
#trackingHeaders?: Record<string, string>;
#config?: GoogleLlmConfig;

/**
* Constructor for Gemini
*/
constructor(model = "gemini-2.5-flash") {
constructor(model = "gemini-2.5-flash", config?: GoogleLlmConfig) {
super(model);
this.#config = config;
}

/**
Expand Down Expand Up @@ -282,63 +297,50 @@ export class GoogleLlm extends BaseLlm {
* Provides the api client.
*/
get apiClient(): GoogleGenAI {
if (!this._apiClient) {
const useVertexAI = process.env.GOOGLE_GENAI_USE_VERTEXAI === "true";
const apiKey = process.env.GOOGLE_API_KEY;
const project = process.env.GOOGLE_CLOUD_PROJECT;
const location = process.env.GOOGLE_CLOUD_LOCATION;

if (useVertexAI && project && location) {
this._apiClient = new GoogleGenAI({
vertexai: true,
project,
location,
});
} else if (apiKey) {
this._apiClient = new GoogleGenAI({
apiKey,
});
} else {
throw new Error(
"Google API Key or Vertex AI configuration is required. " +
"Set GOOGLE_API_KEY or GOOGLE_GENAI_USE_VERTEXAI=true with GOOGLE_CLOUD_PROJECT and GOOGLE_CLOUD_LOCATION.",
);
}
if (!this.#apiClient) {
this.#apiClient = this.#buildClient();
}
return this._apiClient;
return this.#apiClient;
}

/**
* Gets the API backend type.
*/
get apiBackend(): GoogleLLMVariant {
if (!this._apiBackend) {
const useVertexAI = process.env.GOOGLE_GENAI_USE_VERTEXAI === "true";
this._apiBackend = useVertexAI
? GoogleLLMVariant.VERTEX_AI
: GoogleLLMVariant.GEMINI_API;
if (!this.#apiBackend) {
const cfg = this.#config;
if (cfg?.vertexai && cfg.project && cfg.location) {
this.#apiBackend = GoogleLLMVariant.VERTEX_AI;
} else if (cfg) {
this.#apiBackend = GoogleLLMVariant.GEMINI_API;
} else {
const useVertexAI = process.env.GOOGLE_GENAI_USE_VERTEXAI === "true";
this.#apiBackend = useVertexAI
? GoogleLLMVariant.VERTEX_AI
: GoogleLLMVariant.GEMINI_API;
}
}
return this._apiBackend;
return this.#apiBackend;
}

/**
* Gets the tracking headers.
*/
get trackingHeaders(): Record<string, string> {
if (!this._trackingHeaders) {
if (!this.#trackingHeaders) {
let frameworkLabel = "google-adk/1.0.0"; // Replace with actual version
if (process.env[AGENT_ENGINE_TELEMETRY_ENV_VARIABLE_NAME]) {
frameworkLabel = `${frameworkLabel}+${AGENT_ENGINE_TELEMETRY_TAG}`;
}
const languageLabel = `gl-node/${process.version}`;
const versionHeaderValue = `${frameworkLabel} ${languageLabel}`;

this._trackingHeaders = {
this.#trackingHeaders = {
"x-goog-api-client": versionHeaderValue,
"user-agent": versionHeaderValue,
};
}
return this._trackingHeaders;
return this.#trackingHeaders;
}

/**
Expand All @@ -354,28 +356,73 @@ export class GoogleLlm extends BaseLlm {
* Gets the live API client.
*/
get liveApiClient(): GoogleGenAI {
if (!this._liveApiClient) {
const useVertexAI = process.env.GOOGLE_GENAI_USE_VERTEXAI === "true";
const apiKey = process.env.GOOGLE_API_KEY;
const project = process.env.GOOGLE_CLOUD_PROJECT;
const location = process.env.GOOGLE_CLOUD_LOCATION;

if (useVertexAI && project && location) {
this._liveApiClient = new GoogleGenAI({
if (!this.#liveApiClient) {
this.#liveApiClient = this.#buildClient({
apiVersion: this.liveApiVersion,
});
}
return this.#liveApiClient;
}

/**
* Builds a GoogleGenAI client from explicit config (if provided) or env vars.
*/
#buildClient(overrides?: { apiVersion?: string }): GoogleGenAI {
const cfg = this.#config;

// 1. Pre-built client injection — ignore overrides (caller should pre-configure)
if (cfg?.client) {
return cfg.client;
}

// 2. Explicit config fields
if (cfg) {
if (cfg.vertexai && cfg.project && cfg.location) {
return new GoogleGenAI({
vertexai: true,
project,
location,
apiVersion: this.liveApiVersion,
project: cfg.project,
location: cfg.location,
...overrides,
});
} else if (apiKey) {
this._liveApiClient = new GoogleGenAI({
apiKey,
apiVersion: this.liveApiVersion,
}
if (cfg.apiKey) {
return new GoogleGenAI({
apiKey: cfg.apiKey,
...overrides,
});
} else {
throw new Error("API configuration required for live client");
}
// Config was provided but is incomplete — fail fast rather than
// silently falling through to process.env (which would reintroduce
// the race condition this config is meant to prevent).
throw new Error(
"Incomplete GoogleLlmConfig: provide apiKey, vertexai + project + location, or a pre-built client.",
);
}
return this._liveApiClient;

// 3. Env fallback (only when no config was provided)
const useVertexAI = process.env.GOOGLE_GENAI_USE_VERTEXAI === "true";
const apiKey = process.env.GOOGLE_API_KEY;
const project = process.env.GOOGLE_CLOUD_PROJECT;
const location = process.env.GOOGLE_CLOUD_LOCATION;

if (useVertexAI && project && location) {
return new GoogleGenAI({
vertexai: true,
project,
location,
...overrides,
});
}
if (apiKey) {
return new GoogleGenAI({
apiKey,
...overrides,
});
}

throw new Error(
"Google API Key or Vertex AI configuration is required. " +
"Set GOOGLE_API_KEY or GOOGLE_GENAI_USE_VERTEXAI=true with GOOGLE_CLOUD_PROJECT and GOOGLE_CLOUD_LOCATION.",
);
}
}
Loading