Skip to content

Commit 7f1efcc

Browse files
waleedlatif1claude
andauthored
fix(blocks): resolve Ollama models incorrectly requiring API key in Docker (#3976)
* fix(blocks): resolve Ollama models incorrectly requiring API key in Docker Server-side validation failed for Ollama models like mistral:latest because the Zustand providers store is empty on the server and getProviderFromModel misidentified them via regex pattern matching (e.g. mistral:latest matched Mistral AI's /^mistral/ pattern). Replace the hardcoded CLOUD_PROVIDER_PREFIXES list with existing data sources: - Provider store (definitive on client, checks all provider buckets) - getBaseModelProviders() from PROVIDER_DEFINITIONS (server-side static cloud model lookup) - Slash convention for dynamic cloud providers (fireworks/, openrouter/, etc.) - isOllamaConfigured feature flag using existing OLLAMA_URL env var Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * refactor: remove getProviderFromModel regex fallback from API key validation The fallback was the last piece of regex-based matching in the function and only ran for self-hosted without OLLAMA_URL on the server — a path where Ollama models cannot appear in the dropdown anyway. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * lint * fix: handle vLLM models in store provider check vLLM is a local model server like Ollama and should not require an API key. Add vllm to the store provider check as a safety net for models that may not have the vllm/ prefix. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> --------- Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
1 parent a680cec commit 7f1efcc

File tree

3 files changed

+297
-32
lines changed

3 files changed

+297
-32
lines changed

apps/sim/blocks/utils.test.ts

Lines changed: 267 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,267 @@
1+
/**
2+
* @vitest-environment node
3+
*/
4+
import { beforeEach, describe, expect, it, vi } from 'vitest'
5+
6+
const { mockIsHosted, mockIsAzureConfigured, mockIsOllamaConfigured } = vi.hoisted(() => ({
7+
mockIsHosted: { value: false },
8+
mockIsAzureConfigured: { value: false },
9+
mockIsOllamaConfigured: { value: false },
10+
}))
11+
12+
const {
13+
mockGetHostedModels,
14+
mockGetProviderModels,
15+
mockGetProviderIcon,
16+
mockGetBaseModelProviders,
17+
} = vi.hoisted(() => ({
18+
mockGetHostedModels: vi.fn(() => []),
19+
mockGetProviderModels: vi.fn(() => []),
20+
mockGetProviderIcon: vi.fn(() => null),
21+
mockGetBaseModelProviders: vi.fn(() => ({})),
22+
}))
23+
24+
const { mockProviders } = vi.hoisted(() => ({
25+
mockProviders: {
26+
value: {
27+
base: { models: [] as string[], isLoading: false },
28+
ollama: { models: [] as string[], isLoading: false },
29+
vllm: { models: [] as string[], isLoading: false },
30+
openrouter: { models: [] as string[], isLoading: false },
31+
fireworks: { models: [] as string[], isLoading: false },
32+
},
33+
},
34+
}))
35+
36+
vi.mock('@/lib/core/config/feature-flags', () => ({
37+
get isHosted() {
38+
return mockIsHosted.value
39+
},
40+
get isAzureConfigured() {
41+
return mockIsAzureConfigured.value
42+
},
43+
get isOllamaConfigured() {
44+
return mockIsOllamaConfigured.value
45+
},
46+
}))
47+
48+
vi.mock('@/providers/models', () => ({
49+
getHostedModels: mockGetHostedModels,
50+
getProviderModels: mockGetProviderModels,
51+
getProviderIcon: mockGetProviderIcon,
52+
getBaseModelProviders: mockGetBaseModelProviders,
53+
}))
54+
55+
vi.mock('@/stores/providers/store', () => ({
56+
useProvidersStore: {
57+
getState: () => ({
58+
get providers() {
59+
return mockProviders.value
60+
},
61+
}),
62+
},
63+
}))
64+
65+
vi.mock('@/lib/oauth/utils', () => ({
66+
getScopesForService: vi.fn(() => []),
67+
}))
68+
69+
import { getApiKeyCondition } from '@/blocks/utils'
70+
71+
const BASE_CLOUD_MODELS: Record<string, string> = {
72+
'gpt-4o': 'openai',
73+
'claude-sonnet-4-5': 'anthropic',
74+
'gemini-2.5-pro': 'google',
75+
'mistral-large-latest': 'mistral',
76+
}
77+
78+
describe('getApiKeyCondition / shouldRequireApiKeyForModel', () => {
79+
const evaluateCondition = (model: string): boolean => {
80+
const conditionFn = getApiKeyCondition()
81+
const condition = conditionFn({ model })
82+
if ('not' in condition && condition.not) return false
83+
if (condition.value === '__no_model_selected__') return false
84+
return true
85+
}
86+
87+
beforeEach(() => {
88+
vi.clearAllMocks()
89+
mockIsHosted.value = false
90+
mockIsAzureConfigured.value = false
91+
mockIsOllamaConfigured.value = false
92+
mockProviders.value = {
93+
base: { models: [], isLoading: false },
94+
ollama: { models: [], isLoading: false },
95+
vllm: { models: [], isLoading: false },
96+
openrouter: { models: [], isLoading: false },
97+
fireworks: { models: [], isLoading: false },
98+
}
99+
mockGetHostedModels.mockReturnValue([])
100+
mockGetProviderModels.mockReturnValue([])
101+
mockGetBaseModelProviders.mockReturnValue({})
102+
})
103+
104+
describe('empty or missing model', () => {
105+
it('does not require API key when model is empty', () => {
106+
expect(evaluateCondition('')).toBe(false)
107+
})
108+
109+
it('does not require API key when model is whitespace', () => {
110+
expect(evaluateCondition(' ')).toBe(false)
111+
})
112+
})
113+
114+
describe('hosted models', () => {
115+
it('does not require API key for hosted models on hosted platform', () => {
116+
mockIsHosted.value = true
117+
mockGetHostedModels.mockReturnValue(['gpt-4o', 'claude-sonnet-4-5'])
118+
expect(evaluateCondition('gpt-4o')).toBe(false)
119+
expect(evaluateCondition('claude-sonnet-4-5')).toBe(false)
120+
})
121+
122+
it('requires API key for non-hosted models on hosted platform', () => {
123+
mockIsHosted.value = true
124+
mockGetHostedModels.mockReturnValue(['gpt-4o'])
125+
expect(evaluateCondition('claude-sonnet-4-5')).toBe(true)
126+
})
127+
})
128+
129+
describe('Vertex AI models', () => {
130+
it('does not require API key for vertex/ prefixed models', () => {
131+
expect(evaluateCondition('vertex/gemini-2.5-pro')).toBe(false)
132+
})
133+
})
134+
135+
describe('Bedrock models', () => {
136+
it('does not require API key for bedrock/ prefixed models', () => {
137+
expect(evaluateCondition('bedrock/anthropic.claude-v2')).toBe(false)
138+
})
139+
})
140+
141+
describe('Azure models', () => {
142+
it('does not require API key for azure/ models when Azure is configured', () => {
143+
mockIsAzureConfigured.value = true
144+
expect(evaluateCondition('azure/gpt-4o')).toBe(false)
145+
expect(evaluateCondition('azure-openai/gpt-4o')).toBe(false)
146+
expect(evaluateCondition('azure-anthropic/claude-sonnet-4-5')).toBe(false)
147+
})
148+
149+
it('requires API key for azure/ models when Azure is not configured', () => {
150+
mockIsAzureConfigured.value = false
151+
expect(evaluateCondition('azure/gpt-4o')).toBe(true)
152+
})
153+
})
154+
155+
describe('vLLM models', () => {
156+
it('does not require API key for vllm/ prefixed models', () => {
157+
expect(evaluateCondition('vllm/my-model')).toBe(false)
158+
expect(evaluateCondition('vllm/llama-3-70b')).toBe(false)
159+
})
160+
})
161+
162+
describe('provider store lookup (client-side)', () => {
163+
it('does not require API key when model is in the Ollama store bucket', () => {
164+
mockProviders.value.ollama.models = ['llama3:latest', 'mistral:latest']
165+
expect(evaluateCondition('llama3:latest')).toBe(false)
166+
expect(evaluateCondition('mistral:latest')).toBe(false)
167+
})
168+
169+
it('requires API key when model is in the base store bucket', () => {
170+
mockProviders.value.base.models = ['gpt-4o', 'claude-sonnet-4-5']
171+
expect(evaluateCondition('gpt-4o')).toBe(true)
172+
expect(evaluateCondition('claude-sonnet-4-5')).toBe(true)
173+
})
174+
175+
it('does not require API key when model is in the vLLM store bucket', () => {
176+
mockProviders.value.vllm.models = ['my-custom-model']
177+
expect(evaluateCondition('my-custom-model')).toBe(false)
178+
})
179+
180+
it('requires API key when model is in the fireworks store bucket', () => {
181+
mockProviders.value.fireworks.models = ['fireworks/llama-3']
182+
expect(evaluateCondition('fireworks/llama-3')).toBe(true)
183+
})
184+
185+
it('requires API key when model is in the openrouter store bucket', () => {
186+
mockProviders.value.openrouter.models = ['openrouter/anthropic/claude']
187+
expect(evaluateCondition('openrouter/anthropic/claude')).toBe(true)
188+
})
189+
190+
it('is case-insensitive for store lookup', () => {
191+
mockProviders.value.ollama.models = ['Llama3:Latest']
192+
expect(evaluateCondition('llama3:latest')).toBe(false)
193+
})
194+
})
195+
196+
describe('Ollama — OLLAMA_URL env var (server-safe)', () => {
197+
it('does not require API key for unknown models when OLLAMA_URL is set', () => {
198+
mockIsOllamaConfigured.value = true
199+
expect(evaluateCondition('llama3:latest')).toBe(false)
200+
expect(evaluateCondition('phi3:latest')).toBe(false)
201+
expect(evaluateCondition('gemma2:latest')).toBe(false)
202+
expect(evaluateCondition('deepseek-coder:latest')).toBe(false)
203+
})
204+
205+
it('does not require API key for Ollama models that match cloud provider regex patterns', () => {
206+
mockIsOllamaConfigured.value = true
207+
expect(evaluateCondition('mistral:latest')).toBe(false)
208+
expect(evaluateCondition('mistral')).toBe(false)
209+
expect(evaluateCondition('mistral-nemo')).toBe(false)
210+
expect(evaluateCondition('gpt2')).toBe(false)
211+
})
212+
213+
it('requires API key for known cloud models even when OLLAMA_URL is set', () => {
214+
mockIsOllamaConfigured.value = true
215+
mockGetBaseModelProviders.mockReturnValue(BASE_CLOUD_MODELS)
216+
expect(evaluateCondition('gpt-4o')).toBe(true)
217+
expect(evaluateCondition('claude-sonnet-4-5')).toBe(true)
218+
expect(evaluateCondition('gemini-2.5-pro')).toBe(true)
219+
expect(evaluateCondition('mistral-large-latest')).toBe(true)
220+
})
221+
222+
it('requires API key for slash-prefixed cloud models when OLLAMA_URL is set', () => {
223+
mockIsOllamaConfigured.value = true
224+
expect(evaluateCondition('azure/gpt-4o')).toBe(true)
225+
expect(evaluateCondition('fireworks/llama-3')).toBe(true)
226+
expect(evaluateCondition('openrouter/anthropic/claude')).toBe(true)
227+
expect(evaluateCondition('groq/llama-3')).toBe(true)
228+
})
229+
})
230+
231+
describe('cloud provider models that need API key', () => {
232+
it('requires API key for standard cloud models on hosted platform', () => {
233+
mockIsHosted.value = true
234+
mockGetHostedModels.mockReturnValue([])
235+
expect(evaluateCondition('gpt-4o')).toBe(true)
236+
expect(evaluateCondition('claude-sonnet-4-5')).toBe(true)
237+
expect(evaluateCondition('gemini-2.5-pro')).toBe(true)
238+
expect(evaluateCondition('mistral-large-latest')).toBe(true)
239+
})
240+
241+
it('requires API key for prefixed cloud models on hosted platform', () => {
242+
mockIsHosted.value = true
243+
expect(evaluateCondition('fireworks/llama-3')).toBe(true)
244+
expect(evaluateCondition('openrouter/anthropic/claude')).toBe(true)
245+
expect(evaluateCondition('groq/llama-3')).toBe(true)
246+
expect(evaluateCondition('cerebras/gpt-oss-120b')).toBe(true)
247+
})
248+
249+
it('requires API key for prefixed cloud models on self-hosted', () => {
250+
mockIsHosted.value = false
251+
expect(evaluateCondition('fireworks/llama-3')).toBe(true)
252+
expect(evaluateCondition('openrouter/anthropic/claude')).toBe(true)
253+
expect(evaluateCondition('groq/llama-3')).toBe(true)
254+
expect(evaluateCondition('cerebras/gpt-oss-120b')).toBe(true)
255+
})
256+
})
257+
258+
describe('self-hosted without OLLAMA_URL', () => {
259+
it('requires API key for any model (Ollama models cannot appear without OLLAMA_URL)', () => {
260+
mockIsHosted.value = false
261+
mockIsOllamaConfigured.value = false
262+
expect(evaluateCondition('llama3:latest')).toBe(true)
263+
expect(evaluateCondition('mistral:latest')).toBe(true)
264+
expect(evaluateCondition('gpt-4o')).toBe(true)
265+
})
266+
})
267+
})

apps/sim/blocks/utils.ts

Lines changed: 22 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
1-
import { isAzureConfigured, isHosted } from '@/lib/core/config/feature-flags'
1+
import { isAzureConfigured, isHosted, isOllamaConfigured } from '@/lib/core/config/feature-flags'
22
import { getScopesForService } from '@/lib/oauth/utils'
33
import type { BlockOutput, OutputFieldDefinition, SubBlockConfig } from '@/blocks/types'
44
import {
5+
getBaseModelProviders,
56
getHostedModels,
6-
getProviderFromModel,
77
getProviderIcon,
88
getProviderModels,
99
} from '@/providers/models'
@@ -100,11 +100,15 @@ export function resolveOutputType(
100100
return resolvedOutputs
101101
}
102102

103-
/**
104-
* Helper to get current Ollama models from store
105-
*/
106-
const getCurrentOllamaModels = () => {
107-
return useProvidersStore.getState().providers.ollama.models
103+
function getProviderFromStore(model: string): string | null {
104+
const { providers } = useProvidersStore.getState()
105+
const normalized = model.toLowerCase()
106+
for (const [key, state] of Object.entries(providers)) {
107+
if (state.models.some((m: string) => m.toLowerCase() === normalized)) {
108+
return key
109+
}
110+
}
111+
return null
108112
}
109113

110114
function buildModelVisibilityCondition(model: string, shouldShow: boolean) {
@@ -119,16 +123,14 @@ function shouldRequireApiKeyForModel(model: string): boolean {
119123
const normalizedModel = model.trim().toLowerCase()
120124
if (!normalizedModel) return false
121125

122-
const hostedModels = getHostedModels()
123-
const isHostedModel = hostedModels.some(
124-
(hostedModel) => hostedModel.toLowerCase() === normalizedModel
125-
)
126-
if (isHosted && isHostedModel) return false
126+
if (isHosted) {
127+
const hostedModels = getHostedModels()
128+
if (hostedModels.some((m) => m.toLowerCase() === normalizedModel)) return false
129+
}
127130

128131
if (normalizedModel.startsWith('vertex/') || normalizedModel.startsWith('bedrock/')) {
129132
return false
130133
}
131-
132134
if (
133135
isAzureConfigured &&
134136
(normalizedModel.startsWith('azure/') ||
@@ -138,30 +140,18 @@ function shouldRequireApiKeyForModel(model: string): boolean {
138140
) {
139141
return false
140142
}
141-
142143
if (normalizedModel.startsWith('vllm/')) {
143144
return false
144145
}
145146

146-
const currentOllamaModels = getCurrentOllamaModels()
147-
if (currentOllamaModels.some((ollamaModel) => ollamaModel.toLowerCase() === normalizedModel)) {
148-
return false
149-
}
147+
const storeProvider = getProviderFromStore(normalizedModel)
148+
if (storeProvider === 'ollama' || storeProvider === 'vllm') return false
149+
if (storeProvider) return true
150150

151-
if (!isHosted) {
152-
try {
153-
const providerId = getProviderFromModel(model)
154-
if (
155-
providerId === 'ollama' ||
156-
providerId === 'vllm' ||
157-
providerId === 'vertex' ||
158-
providerId === 'bedrock'
159-
) {
160-
return false
161-
}
162-
} catch {
163-
// If model resolution fails, fall through and require an API key.
164-
}
151+
if (isOllamaConfigured) {
152+
if (normalizedModel.includes('/')) return true
153+
if (normalizedModel in getBaseModelProviders()) return true
154+
return false
165155
}
166156

167157
return true

apps/sim/lib/core/config/feature-flags.ts

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -122,6 +122,14 @@ export const isInboxEnabled = isTruthy(env.INBOX_ENABLED)
122122
*/
123123
export const isE2bEnabled = isTruthy(env.E2B_ENABLED)
124124

125+
/**
126+
* Whether Ollama is configured (OLLAMA_URL is set).
127+
* When true, models that are not in the static cloud model list and have no
128+
* slash-prefixed provider namespace are assumed to be Ollama models
129+
* and do not require an API key.
130+
*/
131+
export const isOllamaConfigured = Boolean(env.OLLAMA_URL)
132+
125133
/**
126134
* Whether Azure OpenAI / Azure Anthropic credentials are pre-configured at the server level
127135
* (via AZURE_OPENAI_ENDPOINT, AZURE_OPENAI_API_KEY, AZURE_ANTHROPIC_ENDPOINT, etc.).

0 commit comments

Comments
 (0)