|
| 1 | +/** |
| 2 | + * @vitest-environment node |
| 3 | + */ |
| 4 | +import { beforeEach, describe, expect, it, vi } from 'vitest' |
| 5 | + |
| 6 | +const { mockIsHosted, mockIsAzureConfigured, mockIsOllamaConfigured } = vi.hoisted(() => ({ |
| 7 | + mockIsHosted: { value: false }, |
| 8 | + mockIsAzureConfigured: { value: false }, |
| 9 | + mockIsOllamaConfigured: { value: false }, |
| 10 | +})) |
| 11 | + |
| 12 | +const { |
| 13 | + mockGetHostedModels, |
| 14 | + mockGetProviderModels, |
| 15 | + mockGetProviderIcon, |
| 16 | + mockGetBaseModelProviders, |
| 17 | +} = vi.hoisted(() => ({ |
| 18 | + mockGetHostedModels: vi.fn(() => []), |
| 19 | + mockGetProviderModels: vi.fn(() => []), |
| 20 | + mockGetProviderIcon: vi.fn(() => null), |
| 21 | + mockGetBaseModelProviders: vi.fn(() => ({})), |
| 22 | +})) |
| 23 | + |
| 24 | +const { mockProviders } = vi.hoisted(() => ({ |
| 25 | + mockProviders: { |
| 26 | + value: { |
| 27 | + base: { models: [] as string[], isLoading: false }, |
| 28 | + ollama: { models: [] as string[], isLoading: false }, |
| 29 | + vllm: { models: [] as string[], isLoading: false }, |
| 30 | + openrouter: { models: [] as string[], isLoading: false }, |
| 31 | + fireworks: { models: [] as string[], isLoading: false }, |
| 32 | + }, |
| 33 | + }, |
| 34 | +})) |
| 35 | + |
| 36 | +vi.mock('@/lib/core/config/feature-flags', () => ({ |
| 37 | + get isHosted() { |
| 38 | + return mockIsHosted.value |
| 39 | + }, |
| 40 | + get isAzureConfigured() { |
| 41 | + return mockIsAzureConfigured.value |
| 42 | + }, |
| 43 | + get isOllamaConfigured() { |
| 44 | + return mockIsOllamaConfigured.value |
| 45 | + }, |
| 46 | +})) |
| 47 | + |
| 48 | +vi.mock('@/providers/models', () => ({ |
| 49 | + getHostedModels: mockGetHostedModels, |
| 50 | + getProviderModels: mockGetProviderModels, |
| 51 | + getProviderIcon: mockGetProviderIcon, |
| 52 | + getBaseModelProviders: mockGetBaseModelProviders, |
| 53 | +})) |
| 54 | + |
| 55 | +vi.mock('@/stores/providers/store', () => ({ |
| 56 | + useProvidersStore: { |
| 57 | + getState: () => ({ |
| 58 | + get providers() { |
| 59 | + return mockProviders.value |
| 60 | + }, |
| 61 | + }), |
| 62 | + }, |
| 63 | +})) |
| 64 | + |
| 65 | +vi.mock('@/lib/oauth/utils', () => ({ |
| 66 | + getScopesForService: vi.fn(() => []), |
| 67 | +})) |
| 68 | + |
| 69 | +import { getApiKeyCondition } from '@/blocks/utils' |
| 70 | + |
| 71 | +const BASE_CLOUD_MODELS: Record<string, string> = { |
| 72 | + 'gpt-4o': 'openai', |
| 73 | + 'claude-sonnet-4-5': 'anthropic', |
| 74 | + 'gemini-2.5-pro': 'google', |
| 75 | + 'mistral-large-latest': 'mistral', |
| 76 | +} |
| 77 | + |
| 78 | +describe('getApiKeyCondition / shouldRequireApiKeyForModel', () => { |
| 79 | + const evaluateCondition = (model: string): boolean => { |
| 80 | + const conditionFn = getApiKeyCondition() |
| 81 | + const condition = conditionFn({ model }) |
| 82 | + if ('not' in condition && condition.not) return false |
| 83 | + if (condition.value === '__no_model_selected__') return false |
| 84 | + return true |
| 85 | + } |
| 86 | + |
| 87 | + beforeEach(() => { |
| 88 | + vi.clearAllMocks() |
| 89 | + mockIsHosted.value = false |
| 90 | + mockIsAzureConfigured.value = false |
| 91 | + mockIsOllamaConfigured.value = false |
| 92 | + mockProviders.value = { |
| 93 | + base: { models: [], isLoading: false }, |
| 94 | + ollama: { models: [], isLoading: false }, |
| 95 | + vllm: { models: [], isLoading: false }, |
| 96 | + openrouter: { models: [], isLoading: false }, |
| 97 | + fireworks: { models: [], isLoading: false }, |
| 98 | + } |
| 99 | + mockGetHostedModels.mockReturnValue([]) |
| 100 | + mockGetProviderModels.mockReturnValue([]) |
| 101 | + mockGetBaseModelProviders.mockReturnValue({}) |
| 102 | + }) |
| 103 | + |
| 104 | + describe('empty or missing model', () => { |
| 105 | + it('does not require API key when model is empty', () => { |
| 106 | + expect(evaluateCondition('')).toBe(false) |
| 107 | + }) |
| 108 | + |
| 109 | + it('does not require API key when model is whitespace', () => { |
| 110 | + expect(evaluateCondition(' ')).toBe(false) |
| 111 | + }) |
| 112 | + }) |
| 113 | + |
| 114 | + describe('hosted models', () => { |
| 115 | + it('does not require API key for hosted models on hosted platform', () => { |
| 116 | + mockIsHosted.value = true |
| 117 | + mockGetHostedModels.mockReturnValue(['gpt-4o', 'claude-sonnet-4-5']) |
| 118 | + expect(evaluateCondition('gpt-4o')).toBe(false) |
| 119 | + expect(evaluateCondition('claude-sonnet-4-5')).toBe(false) |
| 120 | + }) |
| 121 | + |
| 122 | + it('requires API key for non-hosted models on hosted platform', () => { |
| 123 | + mockIsHosted.value = true |
| 124 | + mockGetHostedModels.mockReturnValue(['gpt-4o']) |
| 125 | + expect(evaluateCondition('claude-sonnet-4-5')).toBe(true) |
| 126 | + }) |
| 127 | + }) |
| 128 | + |
| 129 | + describe('Vertex AI models', () => { |
| 130 | + it('does not require API key for vertex/ prefixed models', () => { |
| 131 | + expect(evaluateCondition('vertex/gemini-2.5-pro')).toBe(false) |
| 132 | + }) |
| 133 | + }) |
| 134 | + |
| 135 | + describe('Bedrock models', () => { |
| 136 | + it('does not require API key for bedrock/ prefixed models', () => { |
| 137 | + expect(evaluateCondition('bedrock/anthropic.claude-v2')).toBe(false) |
| 138 | + }) |
| 139 | + }) |
| 140 | + |
| 141 | + describe('Azure models', () => { |
| 142 | + it('does not require API key for azure/ models when Azure is configured', () => { |
| 143 | + mockIsAzureConfigured.value = true |
| 144 | + expect(evaluateCondition('azure/gpt-4o')).toBe(false) |
| 145 | + expect(evaluateCondition('azure-openai/gpt-4o')).toBe(false) |
| 146 | + expect(evaluateCondition('azure-anthropic/claude-sonnet-4-5')).toBe(false) |
| 147 | + }) |
| 148 | + |
| 149 | + it('requires API key for azure/ models when Azure is not configured', () => { |
| 150 | + mockIsAzureConfigured.value = false |
| 151 | + expect(evaluateCondition('azure/gpt-4o')).toBe(true) |
| 152 | + }) |
| 153 | + }) |
| 154 | + |
| 155 | + describe('vLLM models', () => { |
| 156 | + it('does not require API key for vllm/ prefixed models', () => { |
| 157 | + expect(evaluateCondition('vllm/my-model')).toBe(false) |
| 158 | + expect(evaluateCondition('vllm/llama-3-70b')).toBe(false) |
| 159 | + }) |
| 160 | + }) |
| 161 | + |
| 162 | + describe('provider store lookup (client-side)', () => { |
| 163 | + it('does not require API key when model is in the Ollama store bucket', () => { |
| 164 | + mockProviders.value.ollama.models = ['llama3:latest', 'mistral:latest'] |
| 165 | + expect(evaluateCondition('llama3:latest')).toBe(false) |
| 166 | + expect(evaluateCondition('mistral:latest')).toBe(false) |
| 167 | + }) |
| 168 | + |
| 169 | + it('requires API key when model is in the base store bucket', () => { |
| 170 | + mockProviders.value.base.models = ['gpt-4o', 'claude-sonnet-4-5'] |
| 171 | + expect(evaluateCondition('gpt-4o')).toBe(true) |
| 172 | + expect(evaluateCondition('claude-sonnet-4-5')).toBe(true) |
| 173 | + }) |
| 174 | + |
| 175 | + it('does not require API key when model is in the vLLM store bucket', () => { |
| 176 | + mockProviders.value.vllm.models = ['my-custom-model'] |
| 177 | + expect(evaluateCondition('my-custom-model')).toBe(false) |
| 178 | + }) |
| 179 | + |
| 180 | + it('requires API key when model is in the fireworks store bucket', () => { |
| 181 | + mockProviders.value.fireworks.models = ['fireworks/llama-3'] |
| 182 | + expect(evaluateCondition('fireworks/llama-3')).toBe(true) |
| 183 | + }) |
| 184 | + |
| 185 | + it('requires API key when model is in the openrouter store bucket', () => { |
| 186 | + mockProviders.value.openrouter.models = ['openrouter/anthropic/claude'] |
| 187 | + expect(evaluateCondition('openrouter/anthropic/claude')).toBe(true) |
| 188 | + }) |
| 189 | + |
| 190 | + it('is case-insensitive for store lookup', () => { |
| 191 | + mockProviders.value.ollama.models = ['Llama3:Latest'] |
| 192 | + expect(evaluateCondition('llama3:latest')).toBe(false) |
| 193 | + }) |
| 194 | + }) |
| 195 | + |
| 196 | + describe('Ollama — OLLAMA_URL env var (server-safe)', () => { |
| 197 | + it('does not require API key for unknown models when OLLAMA_URL is set', () => { |
| 198 | + mockIsOllamaConfigured.value = true |
| 199 | + expect(evaluateCondition('llama3:latest')).toBe(false) |
| 200 | + expect(evaluateCondition('phi3:latest')).toBe(false) |
| 201 | + expect(evaluateCondition('gemma2:latest')).toBe(false) |
| 202 | + expect(evaluateCondition('deepseek-coder:latest')).toBe(false) |
| 203 | + }) |
| 204 | + |
| 205 | + it('does not require API key for Ollama models that match cloud provider regex patterns', () => { |
| 206 | + mockIsOllamaConfigured.value = true |
| 207 | + expect(evaluateCondition('mistral:latest')).toBe(false) |
| 208 | + expect(evaluateCondition('mistral')).toBe(false) |
| 209 | + expect(evaluateCondition('mistral-nemo')).toBe(false) |
| 210 | + expect(evaluateCondition('gpt2')).toBe(false) |
| 211 | + }) |
| 212 | + |
| 213 | + it('requires API key for known cloud models even when OLLAMA_URL is set', () => { |
| 214 | + mockIsOllamaConfigured.value = true |
| 215 | + mockGetBaseModelProviders.mockReturnValue(BASE_CLOUD_MODELS) |
| 216 | + expect(evaluateCondition('gpt-4o')).toBe(true) |
| 217 | + expect(evaluateCondition('claude-sonnet-4-5')).toBe(true) |
| 218 | + expect(evaluateCondition('gemini-2.5-pro')).toBe(true) |
| 219 | + expect(evaluateCondition('mistral-large-latest')).toBe(true) |
| 220 | + }) |
| 221 | + |
| 222 | + it('requires API key for slash-prefixed cloud models when OLLAMA_URL is set', () => { |
| 223 | + mockIsOllamaConfigured.value = true |
| 224 | + expect(evaluateCondition('azure/gpt-4o')).toBe(true) |
| 225 | + expect(evaluateCondition('fireworks/llama-3')).toBe(true) |
| 226 | + expect(evaluateCondition('openrouter/anthropic/claude')).toBe(true) |
| 227 | + expect(evaluateCondition('groq/llama-3')).toBe(true) |
| 228 | + }) |
| 229 | + }) |
| 230 | + |
| 231 | + describe('cloud provider models that need API key', () => { |
| 232 | + it('requires API key for standard cloud models on hosted platform', () => { |
| 233 | + mockIsHosted.value = true |
| 234 | + mockGetHostedModels.mockReturnValue([]) |
| 235 | + expect(evaluateCondition('gpt-4o')).toBe(true) |
| 236 | + expect(evaluateCondition('claude-sonnet-4-5')).toBe(true) |
| 237 | + expect(evaluateCondition('gemini-2.5-pro')).toBe(true) |
| 238 | + expect(evaluateCondition('mistral-large-latest')).toBe(true) |
| 239 | + }) |
| 240 | + |
| 241 | + it('requires API key for prefixed cloud models on hosted platform', () => { |
| 242 | + mockIsHosted.value = true |
| 243 | + expect(evaluateCondition('fireworks/llama-3')).toBe(true) |
| 244 | + expect(evaluateCondition('openrouter/anthropic/claude')).toBe(true) |
| 245 | + expect(evaluateCondition('groq/llama-3')).toBe(true) |
| 246 | + expect(evaluateCondition('cerebras/gpt-oss-120b')).toBe(true) |
| 247 | + }) |
| 248 | + |
| 249 | + it('requires API key for prefixed cloud models on self-hosted', () => { |
| 250 | + mockIsHosted.value = false |
| 251 | + expect(evaluateCondition('fireworks/llama-3')).toBe(true) |
| 252 | + expect(evaluateCondition('openrouter/anthropic/claude')).toBe(true) |
| 253 | + expect(evaluateCondition('groq/llama-3')).toBe(true) |
| 254 | + expect(evaluateCondition('cerebras/gpt-oss-120b')).toBe(true) |
| 255 | + }) |
| 256 | + }) |
| 257 | + |
| 258 | + describe('self-hosted without OLLAMA_URL', () => { |
| 259 | + it('requires API key for any model (Ollama models cannot appear without OLLAMA_URL)', () => { |
| 260 | + mockIsHosted.value = false |
| 261 | + mockIsOllamaConfigured.value = false |
| 262 | + expect(evaluateCondition('llama3:latest')).toBe(true) |
| 263 | + expect(evaluateCondition('mistral:latest')).toBe(true) |
| 264 | + expect(evaluateCondition('gpt-4o')).toBe(true) |
| 265 | + }) |
| 266 | + }) |
| 267 | +}) |
0 commit comments