Skip to content

Commit 3f855a1

Browse files
waleedlatif1claude
andcommitted
fix(blocks): resolve Ollama models incorrectly requiring API key in Docker
Server-side validation failed for Ollama models like mistral:latest because the Zustand providers store is empty on the server and getProviderFromModel misidentified them via regex pattern matching (e.g. mistral:latest matched Mistral AI's /^mistral/ pattern). Replace the hardcoded CLOUD_PROVIDER_PREFIXES list with existing data sources: - Provider store (definitive on client, checks all provider buckets) - getBaseModelProviders() from PROVIDER_DEFINITIONS (server-side static cloud model lookup) - Slash convention for dynamic cloud providers (fireworks/, openrouter/, etc.) - isOllamaConfigured feature flag using existing OLLAMA_URL env var Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
1 parent 235f074 commit 3f855a1

File tree

3 files changed

+335
-26
lines changed

3 files changed

+335
-26
lines changed

apps/sim/blocks/utils.test.ts

Lines changed: 303 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,303 @@
1+
/**
2+
* @vitest-environment node
3+
*/
4+
import { beforeEach, describe, expect, it, vi } from 'vitest'
5+
6+
const { mockIsHosted, mockIsAzureConfigured, mockIsOllamaConfigured } = vi.hoisted(() => ({
7+
mockIsHosted: { value: false },
8+
mockIsAzureConfigured: { value: false },
9+
mockIsOllamaConfigured: { value: false },
10+
}))
11+
12+
const {
13+
mockGetHostedModels,
14+
mockGetProviderModels,
15+
mockGetProviderIcon,
16+
mockGetProviderFromModel,
17+
mockGetBaseModelProviders,
18+
} = vi.hoisted(() => ({
19+
mockGetHostedModels: vi.fn(() => []),
20+
mockGetProviderModels: vi.fn(() => []),
21+
mockGetProviderIcon: vi.fn(() => null),
22+
mockGetProviderFromModel: vi.fn(() => 'ollama'),
23+
mockGetBaseModelProviders: vi.fn(() => ({})),
24+
}))
25+
26+
const { mockProviders } = vi.hoisted(() => ({
27+
mockProviders: {
28+
value: {
29+
base: { models: [] as string[], isLoading: false },
30+
ollama: { models: [] as string[], isLoading: false },
31+
vllm: { models: [] as string[], isLoading: false },
32+
openrouter: { models: [] as string[], isLoading: false },
33+
fireworks: { models: [] as string[], isLoading: false },
34+
},
35+
},
36+
}))
37+
38+
vi.mock('@/lib/core/config/feature-flags', () => ({
39+
get isHosted() {
40+
return mockIsHosted.value
41+
},
42+
get isAzureConfigured() {
43+
return mockIsAzureConfigured.value
44+
},
45+
get isOllamaConfigured() {
46+
return mockIsOllamaConfigured.value
47+
},
48+
}))
49+
50+
vi.mock('@/providers/models', () => ({
51+
getHostedModels: mockGetHostedModels,
52+
getProviderModels: mockGetProviderModels,
53+
getProviderIcon: mockGetProviderIcon,
54+
getProviderFromModel: mockGetProviderFromModel,
55+
getBaseModelProviders: mockGetBaseModelProviders,
56+
}))
57+
58+
vi.mock('@/stores/providers/store', () => ({
59+
useProvidersStore: {
60+
getState: () => ({
61+
get providers() {
62+
return mockProviders.value
63+
},
64+
}),
65+
},
66+
}))
67+
68+
vi.mock('@/lib/oauth/utils', () => ({
69+
getScopesForService: vi.fn(() => []),
70+
}))
71+
72+
import { getApiKeyCondition } from '@/blocks/utils'
73+
74+
/**
75+
* Simulates getProviderFromModel behavior: checks known prefix patterns,
76+
* defaults to 'ollama' for unrecognized models (matching real implementation).
77+
*/
78+
function simulateGetProviderFromModel(model: string): string {
79+
const m = model.toLowerCase()
80+
if (m.startsWith('fireworks/')) return 'fireworks'
81+
if (m.startsWith('openrouter/')) return 'openrouter'
82+
if (m.startsWith('vllm/')) return 'vllm'
83+
if (m.startsWith('vertex/')) return 'vertex'
84+
if (m.startsWith('bedrock/')) return 'bedrock'
85+
if (m.startsWith('azure/')) return 'azure-openai'
86+
if (m.startsWith('azure-openai/')) return 'azure-openai'
87+
if (m.startsWith('azure-anthropic/')) return 'azure-anthropic'
88+
if (m.startsWith('groq/')) return 'groq'
89+
if (m.startsWith('cerebras/')) return 'cerebras'
90+
if (/^gpt/.test(m) || /^o\d/.test(m)) return 'openai'
91+
if (/^claude/.test(m)) return 'anthropic'
92+
if (/^gemini/.test(m)) return 'google'
93+
if (/^grok/.test(m)) return 'xai'
94+
if (/^mistral/.test(m) || /^magistral/.test(m)) return 'mistral'
95+
return 'ollama'
96+
}
97+
98+
const BASE_CLOUD_MODELS: Record<string, string> = {
99+
'gpt-4o': 'openai',
100+
'claude-sonnet-4-5': 'anthropic',
101+
'gemini-2.5-pro': 'google',
102+
'mistral-large-latest': 'mistral',
103+
}
104+
105+
describe('getApiKeyCondition / shouldRequireApiKeyForModel', () => {
106+
const evaluateCondition = (model: string): boolean => {
107+
const conditionFn = getApiKeyCondition()
108+
const condition = conditionFn({ model })
109+
if ('not' in condition && condition.not) return false
110+
if (condition.value === '__no_model_selected__') return false
111+
return true
112+
}
113+
114+
beforeEach(() => {
115+
vi.clearAllMocks()
116+
mockIsHosted.value = false
117+
mockIsAzureConfigured.value = false
118+
mockIsOllamaConfigured.value = false
119+
mockProviders.value = {
120+
base: { models: [], isLoading: false },
121+
ollama: { models: [], isLoading: false },
122+
vllm: { models: [], isLoading: false },
123+
openrouter: { models: [], isLoading: false },
124+
fireworks: { models: [], isLoading: false },
125+
}
126+
mockGetHostedModels.mockReturnValue([])
127+
mockGetProviderModels.mockReturnValue([])
128+
mockGetProviderFromModel.mockImplementation(simulateGetProviderFromModel)
129+
mockGetBaseModelProviders.mockReturnValue({})
130+
})
131+
132+
describe('empty or missing model', () => {
133+
it('does not require API key when model is empty', () => {
134+
expect(evaluateCondition('')).toBe(false)
135+
})
136+
137+
it('does not require API key when model is whitespace', () => {
138+
expect(evaluateCondition(' ')).toBe(false)
139+
})
140+
})
141+
142+
describe('hosted models', () => {
143+
it('does not require API key for hosted models on hosted platform', () => {
144+
mockIsHosted.value = true
145+
mockGetHostedModels.mockReturnValue(['gpt-4o', 'claude-sonnet-4-5'])
146+
expect(evaluateCondition('gpt-4o')).toBe(false)
147+
expect(evaluateCondition('claude-sonnet-4-5')).toBe(false)
148+
})
149+
150+
it('requires API key for non-hosted models on hosted platform', () => {
151+
mockIsHosted.value = true
152+
mockGetHostedModels.mockReturnValue(['gpt-4o'])
153+
expect(evaluateCondition('claude-sonnet-4-5')).toBe(true)
154+
})
155+
})
156+
157+
describe('Vertex AI models', () => {
158+
it('does not require API key for vertex/ prefixed models', () => {
159+
expect(evaluateCondition('vertex/gemini-2.5-pro')).toBe(false)
160+
})
161+
})
162+
163+
describe('Bedrock models', () => {
164+
it('does not require API key for bedrock/ prefixed models', () => {
165+
expect(evaluateCondition('bedrock/anthropic.claude-v2')).toBe(false)
166+
})
167+
})
168+
169+
describe('Azure models', () => {
170+
it('does not require API key for azure/ models when Azure is configured', () => {
171+
mockIsAzureConfigured.value = true
172+
expect(evaluateCondition('azure/gpt-4o')).toBe(false)
173+
expect(evaluateCondition('azure-openai/gpt-4o')).toBe(false)
174+
expect(evaluateCondition('azure-anthropic/claude-sonnet-4-5')).toBe(false)
175+
})
176+
177+
it('requires API key for azure/ models when Azure is not configured', () => {
178+
mockIsAzureConfigured.value = false
179+
expect(evaluateCondition('azure/gpt-4o')).toBe(true)
180+
})
181+
})
182+
183+
describe('vLLM models', () => {
184+
it('does not require API key for vllm/ prefixed models', () => {
185+
expect(evaluateCondition('vllm/my-model')).toBe(false)
186+
expect(evaluateCondition('vllm/llama-3-70b')).toBe(false)
187+
})
188+
})
189+
190+
describe('provider store lookup (client-side)', () => {
191+
it('does not require API key when model is in the Ollama store bucket', () => {
192+
mockProviders.value.ollama.models = ['llama3:latest', 'mistral:latest']
193+
expect(evaluateCondition('llama3:latest')).toBe(false)
194+
expect(evaluateCondition('mistral:latest')).toBe(false)
195+
})
196+
197+
it('requires API key when model is in the base store bucket', () => {
198+
mockProviders.value.base.models = ['gpt-4o', 'claude-sonnet-4-5']
199+
expect(evaluateCondition('gpt-4o')).toBe(true)
200+
expect(evaluateCondition('claude-sonnet-4-5')).toBe(true)
201+
})
202+
203+
it('requires API key when model is in the fireworks store bucket', () => {
204+
mockProviders.value.fireworks.models = ['fireworks/llama-3']
205+
expect(evaluateCondition('fireworks/llama-3')).toBe(true)
206+
})
207+
208+
it('requires API key when model is in the openrouter store bucket', () => {
209+
mockProviders.value.openrouter.models = ['openrouter/anthropic/claude']
210+
expect(evaluateCondition('openrouter/anthropic/claude')).toBe(true)
211+
})
212+
213+
it('is case-insensitive for store lookup', () => {
214+
mockProviders.value.ollama.models = ['Llama3:Latest']
215+
expect(evaluateCondition('llama3:latest')).toBe(false)
216+
})
217+
})
218+
219+
describe('Ollama — OLLAMA_URL env var (server-safe)', () => {
220+
it('does not require API key for unknown models when OLLAMA_URL is set', () => {
221+
mockIsOllamaConfigured.value = true
222+
expect(evaluateCondition('llama3:latest')).toBe(false)
223+
expect(evaluateCondition('phi3:latest')).toBe(false)
224+
expect(evaluateCondition('gemma2:latest')).toBe(false)
225+
expect(evaluateCondition('deepseek-coder:latest')).toBe(false)
226+
})
227+
228+
it('does not require API key for Ollama models that match cloud provider regex patterns', () => {
229+
mockIsOllamaConfigured.value = true
230+
expect(evaluateCondition('mistral:latest')).toBe(false)
231+
expect(evaluateCondition('mistral')).toBe(false)
232+
expect(evaluateCondition('mistral-nemo')).toBe(false)
233+
expect(evaluateCondition('gpt2')).toBe(false)
234+
})
235+
236+
it('requires API key for known cloud models even when OLLAMA_URL is set', () => {
237+
mockIsOllamaConfigured.value = true
238+
mockGetBaseModelProviders.mockReturnValue(BASE_CLOUD_MODELS)
239+
expect(evaluateCondition('gpt-4o')).toBe(true)
240+
expect(evaluateCondition('claude-sonnet-4-5')).toBe(true)
241+
expect(evaluateCondition('gemini-2.5-pro')).toBe(true)
242+
expect(evaluateCondition('mistral-large-latest')).toBe(true)
243+
})
244+
245+
it('requires API key for slash-prefixed cloud models when OLLAMA_URL is set', () => {
246+
mockIsOllamaConfigured.value = true
247+
expect(evaluateCondition('azure/gpt-4o')).toBe(true)
248+
expect(evaluateCondition('fireworks/llama-3')).toBe(true)
249+
expect(evaluateCondition('openrouter/anthropic/claude')).toBe(true)
250+
expect(evaluateCondition('groq/llama-3')).toBe(true)
251+
})
252+
})
253+
254+
describe('cloud provider models that need API key', () => {
255+
it('requires API key for standard cloud models on hosted platform', () => {
256+
mockIsHosted.value = true
257+
mockGetHostedModels.mockReturnValue([])
258+
expect(evaluateCondition('gpt-4o')).toBe(true)
259+
expect(evaluateCondition('claude-sonnet-4-5')).toBe(true)
260+
expect(evaluateCondition('gemini-2.5-pro')).toBe(true)
261+
expect(evaluateCondition('mistral-large-latest')).toBe(true)
262+
})
263+
264+
it('requires API key for prefixed cloud models on hosted platform', () => {
265+
mockIsHosted.value = true
266+
expect(evaluateCondition('fireworks/llama-3')).toBe(true)
267+
expect(evaluateCondition('openrouter/anthropic/claude')).toBe(true)
268+
expect(evaluateCondition('groq/llama-3')).toBe(true)
269+
expect(evaluateCondition('cerebras/gpt-oss-120b')).toBe(true)
270+
})
271+
272+
it('requires API key for prefixed cloud models on self-hosted', () => {
273+
mockIsHosted.value = false
274+
expect(evaluateCondition('fireworks/llama-3')).toBe(true)
275+
expect(evaluateCondition('openrouter/anthropic/claude')).toBe(true)
276+
expect(evaluateCondition('groq/llama-3')).toBe(true)
277+
expect(evaluateCondition('cerebras/gpt-oss-120b')).toBe(true)
278+
})
279+
})
280+
281+
describe('self-hosted getProviderFromModel fallback', () => {
282+
it('does not require API key when getProviderFromModel defaults to ollama', () => {
283+
mockIsHosted.value = false
284+
mockIsOllamaConfigured.value = false
285+
expect(evaluateCondition('llama3:latest')).toBe(false)
286+
expect(evaluateCondition('phi3:latest')).toBe(false)
287+
})
288+
289+
it('requires API key when getProviderFromModel returns a cloud provider', () => {
290+
mockIsHosted.value = false
291+
mockIsOllamaConfigured.value = false
292+
expect(evaluateCondition('mistral:latest')).toBe(true)
293+
expect(evaluateCondition('gpt2')).toBe(true)
294+
})
295+
296+
it('does not run getProviderFromModel fallback on hosted platform', () => {
297+
mockIsHosted.value = true
298+
mockGetHostedModels.mockReturnValue([])
299+
expect(evaluateCondition('llama3:latest')).toBe(true)
300+
expect(mockGetProviderFromModel).not.toHaveBeenCalled()
301+
})
302+
})
303+
})

apps/sim/blocks/utils.ts

Lines changed: 24 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,8 @@
1-
import { isAzureConfigured, isHosted } from '@/lib/core/config/feature-flags'
1+
import { isAzureConfigured, isHosted, isOllamaConfigured } from '@/lib/core/config/feature-flags'
22
import { getScopesForService } from '@/lib/oauth/utils'
33
import type { BlockOutput, OutputFieldDefinition, SubBlockConfig } from '@/blocks/types'
44
import {
5+
getBaseModelProviders,
56
getHostedModels,
67
getProviderFromModel,
78
getProviderIcon,
@@ -100,11 +101,15 @@ export function resolveOutputType(
100101
return resolvedOutputs
101102
}
102103

103-
/**
104-
* Helper to get current Ollama models from store
105-
*/
106-
const getCurrentOllamaModels = () => {
107-
return useProvidersStore.getState().providers.ollama.models
104+
function getProviderFromStore(model: string): string | null {
105+
const { providers } = useProvidersStore.getState()
106+
const normalized = model.toLowerCase()
107+
for (const [key, state] of Object.entries(providers)) {
108+
if (state.models.some((m: string) => m.toLowerCase() === normalized)) {
109+
return key
110+
}
111+
}
112+
return null
108113
}
109114

110115
function buildModelVisibilityCondition(model: string, shouldShow: boolean) {
@@ -119,16 +124,14 @@ function shouldRequireApiKeyForModel(model: string): boolean {
119124
const normalizedModel = model.trim().toLowerCase()
120125
if (!normalizedModel) return false
121126

122-
const hostedModels = getHostedModels()
123-
const isHostedModel = hostedModels.some(
124-
(hostedModel) => hostedModel.toLowerCase() === normalizedModel
125-
)
126-
if (isHosted && isHostedModel) return false
127+
if (isHosted) {
128+
const hostedModels = getHostedModels()
129+
if (hostedModels.some((m) => m.toLowerCase() === normalizedModel)) return false
130+
}
127131

128132
if (normalizedModel.startsWith('vertex/') || normalizedModel.startsWith('bedrock/')) {
129133
return false
130134
}
131-
132135
if (
133136
isAzureConfigured &&
134137
(normalizedModel.startsWith('azure/') ||
@@ -138,30 +141,25 @@ function shouldRequireApiKeyForModel(model: string): boolean {
138141
) {
139142
return false
140143
}
141-
142144
if (normalizedModel.startsWith('vllm/')) {
143145
return false
144146
}
145147

146-
const currentOllamaModels = getCurrentOllamaModels()
147-
if (currentOllamaModels.some((ollamaModel) => ollamaModel.toLowerCase() === normalizedModel)) {
148+
const storeProvider = getProviderFromStore(normalizedModel)
149+
if (storeProvider === 'ollama') return false
150+
if (storeProvider) return true
151+
152+
if (isOllamaConfigured) {
153+
if (normalizedModel.includes('/')) return true
154+
if (normalizedModel in getBaseModelProviders()) return true
148155
return false
149156
}
150157

151158
if (!isHosted) {
152159
try {
153160
const providerId = getProviderFromModel(model)
154-
if (
155-
providerId === 'ollama' ||
156-
providerId === 'vllm' ||
157-
providerId === 'vertex' ||
158-
providerId === 'bedrock'
159-
) {
160-
return false
161-
}
162-
} catch {
163-
// If model resolution fails, fall through and require an API key.
164-
}
161+
if (['ollama', 'vllm', 'vertex', 'bedrock'].includes(providerId)) return false
162+
} catch {}
165163
}
166164

167165
return true

0 commit comments

Comments
 (0)