|
| 1 | +/** |
| 2 | + * @vitest-environment node |
| 3 | + */ |
| 4 | +import { beforeEach, describe, expect, it, vi } from 'vitest' |
| 5 | + |
| 6 | +const { mockIsHosted, mockIsAzureConfigured, mockIsOllamaConfigured } = vi.hoisted(() => ({ |
| 7 | + mockIsHosted: { value: false }, |
| 8 | + mockIsAzureConfigured: { value: false }, |
| 9 | + mockIsOllamaConfigured: { value: false }, |
| 10 | +})) |
| 11 | + |
| 12 | +const { |
| 13 | + mockGetHostedModels, |
| 14 | + mockGetProviderModels, |
| 15 | + mockGetProviderIcon, |
| 16 | + mockGetProviderFromModel, |
| 17 | + mockGetBaseModelProviders, |
| 18 | +} = vi.hoisted(() => ({ |
| 19 | + mockGetHostedModels: vi.fn(() => []), |
| 20 | + mockGetProviderModels: vi.fn(() => []), |
| 21 | + mockGetProviderIcon: vi.fn(() => null), |
| 22 | + mockGetProviderFromModel: vi.fn(() => 'ollama'), |
| 23 | + mockGetBaseModelProviders: vi.fn(() => ({})), |
| 24 | +})) |
| 25 | + |
| 26 | +const { mockProviders } = vi.hoisted(() => ({ |
| 27 | + mockProviders: { |
| 28 | + value: { |
| 29 | + base: { models: [] as string[], isLoading: false }, |
| 30 | + ollama: { models: [] as string[], isLoading: false }, |
| 31 | + vllm: { models: [] as string[], isLoading: false }, |
| 32 | + openrouter: { models: [] as string[], isLoading: false }, |
| 33 | + fireworks: { models: [] as string[], isLoading: false }, |
| 34 | + }, |
| 35 | + }, |
| 36 | +})) |
| 37 | + |
| 38 | +vi.mock('@/lib/core/config/feature-flags', () => ({ |
| 39 | + get isHosted() { |
| 40 | + return mockIsHosted.value |
| 41 | + }, |
| 42 | + get isAzureConfigured() { |
| 43 | + return mockIsAzureConfigured.value |
| 44 | + }, |
| 45 | + get isOllamaConfigured() { |
| 46 | + return mockIsOllamaConfigured.value |
| 47 | + }, |
| 48 | +})) |
| 49 | + |
| 50 | +vi.mock('@/providers/models', () => ({ |
| 51 | + getHostedModels: mockGetHostedModels, |
| 52 | + getProviderModels: mockGetProviderModels, |
| 53 | + getProviderIcon: mockGetProviderIcon, |
| 54 | + getProviderFromModel: mockGetProviderFromModel, |
| 55 | + getBaseModelProviders: mockGetBaseModelProviders, |
| 56 | +})) |
| 57 | + |
| 58 | +vi.mock('@/stores/providers/store', () => ({ |
| 59 | + useProvidersStore: { |
| 60 | + getState: () => ({ |
| 61 | + get providers() { |
| 62 | + return mockProviders.value |
| 63 | + }, |
| 64 | + }), |
| 65 | + }, |
| 66 | +})) |
| 67 | + |
| 68 | +vi.mock('@/lib/oauth/utils', () => ({ |
| 69 | + getScopesForService: vi.fn(() => []), |
| 70 | +})) |
| 71 | + |
| 72 | +import { getApiKeyCondition } from '@/blocks/utils' |
| 73 | + |
| 74 | +/** |
| 75 | + * Simulates getProviderFromModel behavior: checks known prefix patterns, |
| 76 | + * defaults to 'ollama' for unrecognized models (matching real implementation). |
| 77 | + */ |
| 78 | +function simulateGetProviderFromModel(model: string): string { |
| 79 | + const m = model.toLowerCase() |
| 80 | + if (m.startsWith('fireworks/')) return 'fireworks' |
| 81 | + if (m.startsWith('openrouter/')) return 'openrouter' |
| 82 | + if (m.startsWith('vllm/')) return 'vllm' |
| 83 | + if (m.startsWith('vertex/')) return 'vertex' |
| 84 | + if (m.startsWith('bedrock/')) return 'bedrock' |
| 85 | + if (m.startsWith('azure/')) return 'azure-openai' |
| 86 | + if (m.startsWith('azure-openai/')) return 'azure-openai' |
| 87 | + if (m.startsWith('azure-anthropic/')) return 'azure-anthropic' |
| 88 | + if (m.startsWith('groq/')) return 'groq' |
| 89 | + if (m.startsWith('cerebras/')) return 'cerebras' |
| 90 | + if (/^gpt/.test(m) || /^o\d/.test(m)) return 'openai' |
| 91 | + if (/^claude/.test(m)) return 'anthropic' |
| 92 | + if (/^gemini/.test(m)) return 'google' |
| 93 | + if (/^grok/.test(m)) return 'xai' |
| 94 | + if (/^mistral/.test(m) || /^magistral/.test(m)) return 'mistral' |
| 95 | + return 'ollama' |
| 96 | +} |
| 97 | + |
| 98 | +const BASE_CLOUD_MODELS: Record<string, string> = { |
| 99 | + 'gpt-4o': 'openai', |
| 100 | + 'claude-sonnet-4-5': 'anthropic', |
| 101 | + 'gemini-2.5-pro': 'google', |
| 102 | + 'mistral-large-latest': 'mistral', |
| 103 | +} |
| 104 | + |
| 105 | +describe('getApiKeyCondition / shouldRequireApiKeyForModel', () => { |
| 106 | + const evaluateCondition = (model: string): boolean => { |
| 107 | + const conditionFn = getApiKeyCondition() |
| 108 | + const condition = conditionFn({ model }) |
| 109 | + if ('not' in condition && condition.not) return false |
| 110 | + if (condition.value === '__no_model_selected__') return false |
| 111 | + return true |
| 112 | + } |
| 113 | + |
| 114 | + beforeEach(() => { |
| 115 | + vi.clearAllMocks() |
| 116 | + mockIsHosted.value = false |
| 117 | + mockIsAzureConfigured.value = false |
| 118 | + mockIsOllamaConfigured.value = false |
| 119 | + mockProviders.value = { |
| 120 | + base: { models: [], isLoading: false }, |
| 121 | + ollama: { models: [], isLoading: false }, |
| 122 | + vllm: { models: [], isLoading: false }, |
| 123 | + openrouter: { models: [], isLoading: false }, |
| 124 | + fireworks: { models: [], isLoading: false }, |
| 125 | + } |
| 126 | + mockGetHostedModels.mockReturnValue([]) |
| 127 | + mockGetProviderModels.mockReturnValue([]) |
| 128 | + mockGetProviderFromModel.mockImplementation(simulateGetProviderFromModel) |
| 129 | + mockGetBaseModelProviders.mockReturnValue({}) |
| 130 | + }) |
| 131 | + |
| 132 | + describe('empty or missing model', () => { |
| 133 | + it('does not require API key when model is empty', () => { |
| 134 | + expect(evaluateCondition('')).toBe(false) |
| 135 | + }) |
| 136 | + |
| 137 | + it('does not require API key when model is whitespace', () => { |
| 138 | + expect(evaluateCondition(' ')).toBe(false) |
| 139 | + }) |
| 140 | + }) |
| 141 | + |
| 142 | + describe('hosted models', () => { |
| 143 | + it('does not require API key for hosted models on hosted platform', () => { |
| 144 | + mockIsHosted.value = true |
| 145 | + mockGetHostedModels.mockReturnValue(['gpt-4o', 'claude-sonnet-4-5']) |
| 146 | + expect(evaluateCondition('gpt-4o')).toBe(false) |
| 147 | + expect(evaluateCondition('claude-sonnet-4-5')).toBe(false) |
| 148 | + }) |
| 149 | + |
| 150 | + it('requires API key for non-hosted models on hosted platform', () => { |
| 151 | + mockIsHosted.value = true |
| 152 | + mockGetHostedModels.mockReturnValue(['gpt-4o']) |
| 153 | + expect(evaluateCondition('claude-sonnet-4-5')).toBe(true) |
| 154 | + }) |
| 155 | + }) |
| 156 | + |
| 157 | + describe('Vertex AI models', () => { |
| 158 | + it('does not require API key for vertex/ prefixed models', () => { |
| 159 | + expect(evaluateCondition('vertex/gemini-2.5-pro')).toBe(false) |
| 160 | + }) |
| 161 | + }) |
| 162 | + |
| 163 | + describe('Bedrock models', () => { |
| 164 | + it('does not require API key for bedrock/ prefixed models', () => { |
| 165 | + expect(evaluateCondition('bedrock/anthropic.claude-v2')).toBe(false) |
| 166 | + }) |
| 167 | + }) |
| 168 | + |
| 169 | + describe('Azure models', () => { |
| 170 | + it('does not require API key for azure/ models when Azure is configured', () => { |
| 171 | + mockIsAzureConfigured.value = true |
| 172 | + expect(evaluateCondition('azure/gpt-4o')).toBe(false) |
| 173 | + expect(evaluateCondition('azure-openai/gpt-4o')).toBe(false) |
| 174 | + expect(evaluateCondition('azure-anthropic/claude-sonnet-4-5')).toBe(false) |
| 175 | + }) |
| 176 | + |
| 177 | + it('requires API key for azure/ models when Azure is not configured', () => { |
| 178 | + mockIsAzureConfigured.value = false |
| 179 | + expect(evaluateCondition('azure/gpt-4o')).toBe(true) |
| 180 | + }) |
| 181 | + }) |
| 182 | + |
| 183 | + describe('vLLM models', () => { |
| 184 | + it('does not require API key for vllm/ prefixed models', () => { |
| 185 | + expect(evaluateCondition('vllm/my-model')).toBe(false) |
| 186 | + expect(evaluateCondition('vllm/llama-3-70b')).toBe(false) |
| 187 | + }) |
| 188 | + }) |
| 189 | + |
| 190 | + describe('provider store lookup (client-side)', () => { |
| 191 | + it('does not require API key when model is in the Ollama store bucket', () => { |
| 192 | + mockProviders.value.ollama.models = ['llama3:latest', 'mistral:latest'] |
| 193 | + expect(evaluateCondition('llama3:latest')).toBe(false) |
| 194 | + expect(evaluateCondition('mistral:latest')).toBe(false) |
| 195 | + }) |
| 196 | + |
| 197 | + it('requires API key when model is in the base store bucket', () => { |
| 198 | + mockProviders.value.base.models = ['gpt-4o', 'claude-sonnet-4-5'] |
| 199 | + expect(evaluateCondition('gpt-4o')).toBe(true) |
| 200 | + expect(evaluateCondition('claude-sonnet-4-5')).toBe(true) |
| 201 | + }) |
| 202 | + |
| 203 | + it('requires API key when model is in the fireworks store bucket', () => { |
| 204 | + mockProviders.value.fireworks.models = ['fireworks/llama-3'] |
| 205 | + expect(evaluateCondition('fireworks/llama-3')).toBe(true) |
| 206 | + }) |
| 207 | + |
| 208 | + it('requires API key when model is in the openrouter store bucket', () => { |
| 209 | + mockProviders.value.openrouter.models = ['openrouter/anthropic/claude'] |
| 210 | + expect(evaluateCondition('openrouter/anthropic/claude')).toBe(true) |
| 211 | + }) |
| 212 | + |
| 213 | + it('is case-insensitive for store lookup', () => { |
| 214 | + mockProviders.value.ollama.models = ['Llama3:Latest'] |
| 215 | + expect(evaluateCondition('llama3:latest')).toBe(false) |
| 216 | + }) |
| 217 | + }) |
| 218 | + |
| 219 | + describe('Ollama — OLLAMA_URL env var (server-safe)', () => { |
| 220 | + it('does not require API key for unknown models when OLLAMA_URL is set', () => { |
| 221 | + mockIsOllamaConfigured.value = true |
| 222 | + expect(evaluateCondition('llama3:latest')).toBe(false) |
| 223 | + expect(evaluateCondition('phi3:latest')).toBe(false) |
| 224 | + expect(evaluateCondition('gemma2:latest')).toBe(false) |
| 225 | + expect(evaluateCondition('deepseek-coder:latest')).toBe(false) |
| 226 | + }) |
| 227 | + |
| 228 | + it('does not require API key for Ollama models that match cloud provider regex patterns', () => { |
| 229 | + mockIsOllamaConfigured.value = true |
| 230 | + expect(evaluateCondition('mistral:latest')).toBe(false) |
| 231 | + expect(evaluateCondition('mistral')).toBe(false) |
| 232 | + expect(evaluateCondition('mistral-nemo')).toBe(false) |
| 233 | + expect(evaluateCondition('gpt2')).toBe(false) |
| 234 | + }) |
| 235 | + |
| 236 | + it('requires API key for known cloud models even when OLLAMA_URL is set', () => { |
| 237 | + mockIsOllamaConfigured.value = true |
| 238 | + mockGetBaseModelProviders.mockReturnValue(BASE_CLOUD_MODELS) |
| 239 | + expect(evaluateCondition('gpt-4o')).toBe(true) |
| 240 | + expect(evaluateCondition('claude-sonnet-4-5')).toBe(true) |
| 241 | + expect(evaluateCondition('gemini-2.5-pro')).toBe(true) |
| 242 | + expect(evaluateCondition('mistral-large-latest')).toBe(true) |
| 243 | + }) |
| 244 | + |
| 245 | + it('requires API key for slash-prefixed cloud models when OLLAMA_URL is set', () => { |
| 246 | + mockIsOllamaConfigured.value = true |
| 247 | + expect(evaluateCondition('azure/gpt-4o')).toBe(true) |
| 248 | + expect(evaluateCondition('fireworks/llama-3')).toBe(true) |
| 249 | + expect(evaluateCondition('openrouter/anthropic/claude')).toBe(true) |
| 250 | + expect(evaluateCondition('groq/llama-3')).toBe(true) |
| 251 | + }) |
| 252 | + }) |
| 253 | + |
| 254 | + describe('cloud provider models that need API key', () => { |
| 255 | + it('requires API key for standard cloud models on hosted platform', () => { |
| 256 | + mockIsHosted.value = true |
| 257 | + mockGetHostedModels.mockReturnValue([]) |
| 258 | + expect(evaluateCondition('gpt-4o')).toBe(true) |
| 259 | + expect(evaluateCondition('claude-sonnet-4-5')).toBe(true) |
| 260 | + expect(evaluateCondition('gemini-2.5-pro')).toBe(true) |
| 261 | + expect(evaluateCondition('mistral-large-latest')).toBe(true) |
| 262 | + }) |
| 263 | + |
| 264 | + it('requires API key for prefixed cloud models on hosted platform', () => { |
| 265 | + mockIsHosted.value = true |
| 266 | + expect(evaluateCondition('fireworks/llama-3')).toBe(true) |
| 267 | + expect(evaluateCondition('openrouter/anthropic/claude')).toBe(true) |
| 268 | + expect(evaluateCondition('groq/llama-3')).toBe(true) |
| 269 | + expect(evaluateCondition('cerebras/gpt-oss-120b')).toBe(true) |
| 270 | + }) |
| 271 | + |
| 272 | + it('requires API key for prefixed cloud models on self-hosted', () => { |
| 273 | + mockIsHosted.value = false |
| 274 | + expect(evaluateCondition('fireworks/llama-3')).toBe(true) |
| 275 | + expect(evaluateCondition('openrouter/anthropic/claude')).toBe(true) |
| 276 | + expect(evaluateCondition('groq/llama-3')).toBe(true) |
| 277 | + expect(evaluateCondition('cerebras/gpt-oss-120b')).toBe(true) |
| 278 | + }) |
| 279 | + }) |
| 280 | + |
| 281 | + describe('self-hosted getProviderFromModel fallback', () => { |
| 282 | + it('does not require API key when getProviderFromModel defaults to ollama', () => { |
| 283 | + mockIsHosted.value = false |
| 284 | + mockIsOllamaConfigured.value = false |
| 285 | + expect(evaluateCondition('llama3:latest')).toBe(false) |
| 286 | + expect(evaluateCondition('phi3:latest')).toBe(false) |
| 287 | + }) |
| 288 | + |
| 289 | + it('requires API key when getProviderFromModel returns a cloud provider', () => { |
| 290 | + mockIsHosted.value = false |
| 291 | + mockIsOllamaConfigured.value = false |
| 292 | + expect(evaluateCondition('mistral:latest')).toBe(true) |
| 293 | + expect(evaluateCondition('gpt2')).toBe(true) |
| 294 | + }) |
| 295 | + |
| 296 | + it('does not run getProviderFromModel fallback on hosted platform', () => { |
| 297 | + mockIsHosted.value = true |
| 298 | + mockGetHostedModels.mockReturnValue([]) |
| 299 | + expect(evaluateCondition('llama3:latest')).toBe(true) |
| 300 | + expect(mockGetProviderFromModel).not.toHaveBeenCalled() |
| 301 | + }) |
| 302 | + }) |
| 303 | +}) |
0 commit comments