diff --git a/src/main/presenter/configPresenter/index.ts b/src/main/presenter/configPresenter/index.ts
index 5aafd5df5..021b9d668 100644
--- a/src/main/presenter/configPresenter/index.ts
+++ b/src/main/presenter/configPresenter/index.ts
@@ -21,7 +21,12 @@ import {
} from '@shared/presenter'
import { ProviderBatchUpdate } from '@shared/provider-operations'
import { SearchEngineTemplate } from '@shared/chat'
-import { ModelType } from '@shared/model'
+import {
+ ModelType,
+ isNewApiEndpointType,
+ resolveNewApiCapabilityProviderId,
+ type NewApiEndpointType
+} from '@shared/model'
import {
DEFAULT_MODEL_CAPABILITY_FALLBACKS,
resolveModelContextLength,
@@ -531,19 +536,70 @@ export class ConfigPresenter implements IConfigPresenter {
return providerDbLoader.refreshIfNeeded(force)
}
+ private resolveNewApiCapabilityEndpointType(modelId: string): NewApiEndpointType {
+ const modelConfig = this.getModelConfig(modelId, 'new-api')
+ if (isNewApiEndpointType(modelConfig.endpointType)) {
+ return modelConfig.endpointType
+ }
+
+ const storedModel =
+ this.getProviderModels('new-api').find((model) => model.id === modelId) ??
+ this.getCustomModels('new-api').find((model) => model.id === modelId)
+
+ if (storedModel) {
+ if (isNewApiEndpointType(storedModel.endpointType)) {
+ return storedModel.endpointType
+ }
+
+ const supportedEndpointTypes =
+ storedModel.supportedEndpointTypes?.filter(isNewApiEndpointType) ?? []
+ if (
+ storedModel.type === ModelType.ImageGeneration &&
+ supportedEndpointTypes.includes('image-generation')
+ ) {
+ return 'image-generation'
+ }
+ if (supportedEndpointTypes.length > 0) {
+ return supportedEndpointTypes[0]
+ }
+ if (storedModel.type === ModelType.ImageGeneration) {
+ return 'image-generation'
+ }
+ }
+
+ return 'openai'
+ }
+
+ private resolveCapabilityProviderId(providerId: string, modelId: string): string {
+ if (providerId.trim().toLowerCase() !== 'new-api') {
+ return providerId
+ }
+
+ return resolveNewApiCapabilityProviderId(this.resolveNewApiCapabilityEndpointType(modelId))
+ }
+
supportsReasoningCapability(providerId: string, modelId: string): boolean {
- return modelCapabilities.supportsReasoning(providerId, modelId)
+ return modelCapabilities.supportsReasoning(
+ this.resolveCapabilityProviderId(providerId, modelId),
+ modelId
+ )
}
getReasoningPortrait(providerId: string, modelId: string): ReasoningPortrait | null {
- return modelCapabilities.getReasoningPortrait(providerId, modelId)
+ return modelCapabilities.getReasoningPortrait(
+ this.resolveCapabilityProviderId(providerId, modelId),
+ modelId
+ )
}
getThinkingBudgetRange(
providerId: string,
modelId: string
): { min?: number; max?: number; default?: number } {
- return modelCapabilities.getThinkingBudgetRange(providerId, modelId)
+ return modelCapabilities.getThinkingBudgetRange(
+ this.resolveCapabilityProviderId(providerId, modelId),
+ modelId
+ )
}
supportsSearchCapability(providerId: string, modelId: string): boolean {
@@ -558,22 +614,34 @@ export class ConfigPresenter implements IConfigPresenter {
}
supportsReasoningEffortCapability(providerId: string, modelId: string): boolean {
- return modelCapabilities.supportsReasoningEffort(providerId, modelId)
+ return modelCapabilities.supportsReasoningEffort(
+ this.resolveCapabilityProviderId(providerId, modelId),
+ modelId
+ )
}
getReasoningEffortDefault(
providerId: string,
modelId: string
): 'minimal' | 'low' | 'medium' | 'high' | undefined {
- return modelCapabilities.getReasoningEffortDefault(providerId, modelId)
+ return modelCapabilities.getReasoningEffortDefault(
+ this.resolveCapabilityProviderId(providerId, modelId),
+ modelId
+ )
}
supportsVerbosityCapability(providerId: string, modelId: string): boolean {
- return modelCapabilities.supportsVerbosity(providerId, modelId)
+ return modelCapabilities.supportsVerbosity(
+ this.resolveCapabilityProviderId(providerId, modelId),
+ modelId
+ )
}
getVerbosityDefault(providerId: string, modelId: string): 'low' | 'medium' | 'high' | undefined {
- return modelCapabilities.getVerbosityDefault(providerId, modelId)
+ return modelCapabilities.getVerbosityDefault(
+ this.resolveCapabilityProviderId(providerId, modelId),
+ modelId
+ )
}
private migrateConfigData(oldVersion: string | undefined): void {
diff --git a/src/main/presenter/configPresenter/modelConfig.ts b/src/main/presenter/configPresenter/modelConfig.ts
index d1c7ac4f9..ddd0c38e3 100644
--- a/src/main/presenter/configPresenter/modelConfig.ts
+++ b/src/main/presenter/configPresenter/modelConfig.ts
@@ -1,4 +1,4 @@
-import { ApiEndpointType, ModelType } from '@shared/model'
+import { ApiEndpointType, ModelType, isNewApiEndpointType } from '@shared/model'
import { IModelConfig, ModelConfig, ModelConfigSource } from '@shared/presenter'
import {
DEFAULT_MODEL_CAPABILITY_FALLBACKS,
@@ -453,6 +453,7 @@ export class ModelConfigHelper {
temperature: 0.6,
type: ModelType.Chat,
apiEndpoint: ApiEndpointType.Chat,
+ endpointType: undefined,
thinkingBudget: undefined,
forceInterleavedThinkingCompat: undefined,
reasoningEffort: undefined,
@@ -476,6 +477,9 @@ export class ModelConfigHelper {
maxCompletionTokens: storedConfig.maxCompletionTokens ?? finalConfig.maxCompletionTokens,
conversationId: storedConfig.conversationId ?? finalConfig.conversationId,
apiEndpoint: storedConfig.apiEndpoint ?? finalConfig.apiEndpoint,
+ endpointType: isNewApiEndpointType(storedConfig.endpointType)
+ ? storedConfig.endpointType
+ : finalConfig.endpointType,
enableSearch: storedConfig.enableSearch ?? finalConfig.enableSearch,
forcedSearch: storedConfig.forcedSearch ?? finalConfig.forcedSearch,
searchStrategy: storedConfig.searchStrategy ?? finalConfig.searchStrategy,
diff --git a/src/main/presenter/configPresenter/providerModelHelper.ts b/src/main/presenter/configPresenter/providerModelHelper.ts
index 322d0e2ed..6c03b6aa6 100644
--- a/src/main/presenter/configPresenter/providerModelHelper.ts
+++ b/src/main/presenter/configPresenter/providerModelHelper.ts
@@ -93,6 +93,7 @@ export class ProviderModelHelper {
model.reasoning =
model.reasoning !== undefined ? model.reasoning : config.reasoning || false
model.type = model.type !== undefined ? model.type : config.type || ModelType.Chat
+ model.endpointType = config.endpointType ?? model.endpointType
} else {
model.vision = model.vision || false
model.functionCall = model.functionCall || false
@@ -153,10 +154,12 @@ export class ProviderModelHelper {
const store = this.getProviderModelStore(providerId)
const customModels = (store.get('custom_models') || []) as MODEL_META[]
return customModels.map((model) => {
+ const config = this.getModelConfig(model.id, providerId)
model.vision = model.vision !== undefined ? model.vision : false
model.functionCall = model.functionCall !== undefined ? model.functionCall : false
model.reasoning = model.reasoning !== undefined ? model.reasoning : false
model.type = model.type || ModelType.Chat
+ model.endpointType = config?.endpointType ?? model.endpointType
return model
})
}
diff --git a/src/main/presenter/configPresenter/providers.ts b/src/main/presenter/configPresenter/providers.ts
index dc1b89c74..b45c89811 100644
--- a/src/main/presenter/configPresenter/providers.ts
+++ b/src/main/presenter/configPresenter/providers.ts
@@ -202,6 +202,21 @@ export const DEFAULT_PROVIDERS: LLM_PROVIDER_BASE[] = [
defaultBaseUrl: 'https://open.cherryin.ai/v1'
}
},
+ {
+ id: 'new-api',
+ name: 'New API',
+ apiType: 'new-api',
+ apiKey: '',
+ baseUrl: 'https://www.newapi.ai',
+ enable: false,
+ websites: {
+ official: 'https://www.newapi.ai/',
+ apiKey: 'https://www.newapi.ai/token',
+ docs: 'https://www.newapi.ai/zh/docs/api',
+ models: 'https://www.newapi.ai/zh/docs/api',
+ defaultBaseUrl: 'https://www.newapi.ai'
+ }
+ },
{
id: 'openai',
name: 'OpenAI',
diff --git a/src/main/presenter/llmProviderPresenter/baseProvider.ts b/src/main/presenter/llmProviderPresenter/baseProvider.ts
index 1b4e3180e..137cc453e 100644
--- a/src/main/presenter/llmProviderPresenter/baseProvider.ts
+++ b/src/main/presenter/llmProviderPresenter/baseProvider.ts
@@ -77,6 +77,10 @@ export abstract class BaseLLMProvider {
return BaseLLMProvider.DEFAULT_MODEL_FETCH_TIMEOUT
}
+ protected getCapabilityProviderId(): string {
+ return this.provider.capabilityProviderId || this.provider.id
+ }
+
/**
* Load cached model data from configuration
* Called in constructor to avoid needing to re-fetch model lists every time
diff --git a/src/main/presenter/llmProviderPresenter/managers/modelManager.ts b/src/main/presenter/llmProviderPresenter/managers/modelManager.ts
index c0b521356..d96bcd827 100644
--- a/src/main/presenter/llmProviderPresenter/managers/modelManager.ts
+++ b/src/main/presenter/llmProviderPresenter/managers/modelManager.ts
@@ -42,12 +42,14 @@ export class ModelManager {
model.functionCall = config.functionCall
model.reasoning = config.reasoning
model.type = config.type
+ model.endpointType = config.endpointType ?? model.endpointType
} else {
model.vision = model.vision !== undefined ? model.vision : config.vision
model.functionCall =
model.functionCall !== undefined ? model.functionCall : config.functionCall
model.reasoning = model.reasoning !== undefined ? model.reasoning : config.reasoning
model.type = model.type || config.type
+ model.endpointType = model.endpointType ?? config.endpointType
}
return model
diff --git a/src/main/presenter/llmProviderPresenter/managers/providerInstanceManager.ts b/src/main/presenter/llmProviderPresenter/managers/providerInstanceManager.ts
index 22020d458..84380e734 100644
--- a/src/main/presenter/llmProviderPresenter/managers/providerInstanceManager.ts
+++ b/src/main/presenter/llmProviderPresenter/managers/providerInstanceManager.ts
@@ -34,6 +34,7 @@ import { JiekouProvider } from '../providers/jiekouProvider'
import { ZenmuxProvider } from '../providers/zenmuxProvider'
import { O3fanProvider } from '../providers/o3fanProvider'
import { VoiceAIProvider } from '../providers/voiceAIProvider'
+import { NewApiProvider } from '../providers/newApiProvider'
import { RateLimitManager } from './rateLimitManager'
import { StreamState } from '../types'
import { AcpSessionPersistence } from '../acp'
@@ -90,6 +91,7 @@ export class ProviderInstanceManager {
['voiceai', VoiceAIProvider],
['openai-responses', OpenAIResponsesProvider],
['cherryin', CherryInProvider],
+ ['new-api', NewApiProvider],
['lmstudio', LMStudioProvider],
['together', TogetherProvider],
['groq', GroqProvider],
@@ -124,6 +126,7 @@ export class ProviderInstanceManager {
['voiceai', VoiceAIProvider],
['openai-compatible', OpenAICompatibleProvider],
['openai-responses', OpenAIResponsesProvider],
+ ['new-api', NewApiProvider],
['lmstudio', LMStudioProvider],
['together', TogetherProvider],
['groq', GroqProvider],
diff --git a/src/main/presenter/llmProviderPresenter/providers/geminiProvider.ts b/src/main/presenter/llmProviderPresenter/providers/geminiProvider.ts
index 259d650e0..c4f0680d8 100644
--- a/src/main/presenter/llmProviderPresenter/providers/geminiProvider.ts
+++ b/src/main/presenter/llmProviderPresenter/providers/geminiProvider.ts
@@ -329,7 +329,10 @@ export class GeminiProvider extends BaseLLMProvider {
// 判断模型是否支持 thinkingBudget
private supportsThinkingBudget(modelId: string): boolean {
const normalized = modelId.replace(/^models\//i, '')
- const range = modelCapabilities.getThinkingBudgetRange(this.provider.id, normalized)
+ const range = modelCapabilities.getThinkingBudgetRange(
+ this.getCapabilityProviderId(),
+ normalized
+ )
return (
typeof range.default === 'number' ||
typeof range.min === 'number' ||
diff --git a/src/main/presenter/llmProviderPresenter/providers/newApiProvider.ts b/src/main/presenter/llmProviderPresenter/providers/newApiProvider.ts
new file mode 100644
index 000000000..f214dab68
--- /dev/null
+++ b/src/main/presenter/llmProviderPresenter/providers/newApiProvider.ts
@@ -0,0 +1,672 @@
+import Anthropic from '@anthropic-ai/sdk'
+import {
+ ChatMessage,
+ IConfigPresenter,
+ KeyStatus,
+ LLMCoreStreamEvent,
+ LLM_EMBEDDING_ATTRS,
+ LLM_PROVIDER,
+ LLMResponse,
+ MCPToolDefinition,
+ MODEL_META,
+ ModelConfig
+} from '@shared/presenter'
+import {
+ ApiEndpointType,
+ ModelType,
+ isNewApiEndpointType,
+ resolveNewApiCapabilityProviderId,
+ type NewApiEndpointType
+} from '@shared/model'
+import { ProxyAgent } from 'undici'
+import { BaseLLMProvider } from '../baseProvider'
+import { proxyConfig } from '../../proxyConfig'
+import { AnthropicProvider } from './anthropicProvider'
+import { GeminiProvider } from './geminiProvider'
+import { OpenAICompatibleProvider } from './openAICompatibleProvider'
+import { OpenAIResponsesProvider } from './openAIResponsesProvider'
+import type { ProviderMcpRuntimePort } from '../runtimePorts'
+
+type NewApiModelRecord = {
+ id?: unknown
+ name?: unknown
+ owned_by?: unknown
+ description?: unknown
+ type?: unknown
+ supported_endpoint_types?: unknown
+ context_length?: unknown
+ contextLength?: unknown
+ input_token_limit?: unknown
+ max_input_tokens?: unknown
+ max_tokens?: unknown
+ max_output_tokens?: unknown
+ output_token_limit?: unknown
+}
+
+type NewApiModelsResponse = {
+ data?: NewApiModelRecord[]
+}
+
+const DEFAULT_NEW_API_BASE_URL = 'https://www.newapi.ai'
+
+class NewApiOpenAIChatDelegate extends OpenAICompatibleProvider {
+ protected override async init() {
+ this.isInitialized = true
+ }
+}
+
+class NewApiOpenAIResponsesDelegate extends OpenAIResponsesProvider {
+ protected override async init() {
+ this.isInitialized = true
+ }
+}
+
+class NewApiGeminiDelegate extends GeminiProvider {
+ protected override async init() {
+ this.isInitialized = true
+ }
+}
+
+class NewApiAnthropicDelegate extends AnthropicProvider {
+ private clientInitialized = false
+
+ protected override async init() {}
+
+ public async ensureClientInitialized(): Promise {
+ const apiKey = this.provider.apiKey || process.env.ANTHROPIC_API_KEY || null
+ if (!apiKey) {
+ this.clientInitialized = false
+ this.isInitialized = false
+ return
+ }
+
+ const proxyUrl = proxyConfig.getProxyUrl()
+ const fetchOptions: { dispatcher?: ProxyAgent } = {}
+
+ if (proxyUrl) {
+ fetchOptions.dispatcher = new ProxyAgent(proxyUrl)
+ }
+
+ const self = this as unknown as { anthropic?: Anthropic }
+ self.anthropic = new Anthropic({
+ apiKey,
+ baseURL: this.provider.baseUrl || DEFAULT_NEW_API_BASE_URL,
+ defaultHeaders: this.defaultHeaders,
+ fetchOptions
+ })
+
+ this.clientInitialized = true
+ this.isInitialized = true
+ }
+
+ public isClientInitialized(): boolean {
+ return this.clientInitialized
+ }
+
+ public override onProxyResolved(): void {
+ void this.ensureClientInitialized()
+ }
+}
+
+export class NewApiProvider extends BaseLLMProvider {
+ private readonly openaiChatDelegate: NewApiOpenAIChatDelegate
+ private readonly openaiResponsesDelegate: NewApiOpenAIResponsesDelegate
+ private readonly anthropicDelegate: NewApiAnthropicDelegate
+ private readonly geminiDelegate: NewApiGeminiDelegate
+
+ constructor(
+ provider: LLM_PROVIDER,
+ configPresenter: IConfigPresenter,
+ mcpRuntime?: ProviderMcpRuntimePort
+ ) {
+ super(provider, configPresenter, mcpRuntime)
+
+ const host = this.getNormalizedBaseHost()
+
+ this.openaiChatDelegate = new NewApiOpenAIChatDelegate(
+ this.buildDelegateProvider({
+ apiType: 'openai-completions',
+ baseUrl: `${host}/v1`,
+ capabilityProviderId: resolveNewApiCapabilityProviderId('openai')
+ }),
+ configPresenter,
+ mcpRuntime
+ )
+
+ this.openaiResponsesDelegate = new NewApiOpenAIResponsesDelegate(
+ this.buildDelegateProvider({
+ apiType: 'openai-responses',
+ baseUrl: `${host}/v1`,
+ capabilityProviderId: resolveNewApiCapabilityProviderId('openai-response')
+ }),
+ configPresenter,
+ mcpRuntime
+ )
+
+ this.anthropicDelegate = new NewApiAnthropicDelegate(
+ this.buildDelegateProvider({
+ apiType: 'anthropic',
+ baseUrl: host,
+ capabilityProviderId: resolveNewApiCapabilityProviderId('anthropic')
+ }),
+ configPresenter,
+ mcpRuntime
+ )
+
+ this.geminiDelegate = new NewApiGeminiDelegate(
+ this.buildDelegateProvider({
+ apiType: 'gemini',
+ baseUrl: host,
+ capabilityProviderId: resolveNewApiCapabilityProviderId('gemini')
+ }),
+ configPresenter,
+ mcpRuntime
+ )
+
+ this.init()
+ }
+
+ private getNormalizedBaseHost(): string {
+ const rawBaseUrl = (this.provider.baseUrl || DEFAULT_NEW_API_BASE_URL).trim()
+ const normalizedBaseUrl = rawBaseUrl.replace(/\/+$/, '')
+ return normalizedBaseUrl.replace(/\/(v1|v1beta(?:\d+)?)$/i, '') || DEFAULT_NEW_API_BASE_URL
+ }
+
+ private getStoredModelMeta(modelId: string): MODEL_META | undefined {
+ return [...this.models, ...this.customModels].find((model) => model.id === modelId)
+ }
+
+ private buildDelegateProvider(overrides: Partial): LLM_PROVIDER {
+ return {
+ ...this.provider,
+ ...overrides
+ }
+ }
+
+ private getDefaultEndpointType(model: Pick) {
+ const supportedEndpointTypes = model.supportedEndpointTypes ?? []
+ if (supportedEndpointTypes.length === 0) {
+ return model.type === ModelType.ImageGeneration ? 'image-generation' : undefined
+ }
+
+ if (
+ model.type === ModelType.ImageGeneration &&
+ supportedEndpointTypes.includes('image-generation')
+ ) {
+ return 'image-generation'
+ }
+
+ return supportedEndpointTypes[0]
+ }
+
+ private resolveEndpointType(modelId: string): NewApiEndpointType {
+ const modelConfig = this.configPresenter.getModelConfig(modelId, this.provider.id)
+ if (isNewApiEndpointType(modelConfig.endpointType)) {
+ return modelConfig.endpointType
+ }
+
+ const storedModel = this.getStoredModelMeta(modelId)
+ if (storedModel && isNewApiEndpointType(storedModel.endpointType)) {
+ return storedModel.endpointType
+ }
+
+ const defaultEndpointType = storedModel ? this.getDefaultEndpointType(storedModel) : undefined
+ return defaultEndpointType ?? 'openai'
+ }
+
+ private buildImageModelConfig(modelId: string, modelConfig?: ModelConfig): ModelConfig {
+ const baseConfig = modelConfig ?? this.configPresenter.getModelConfig(modelId, this.provider.id)
+ return {
+ ...baseConfig,
+ apiEndpoint: ApiEndpointType.Image,
+ type: ModelType.ImageGeneration,
+ endpointType: 'image-generation'
+ }
+ }
+
+ private buildFallbackSummaryTitle(messages: ChatMessage[]): string {
+ const latestUserMessage = [...messages].reverse().find((message) => message.role === 'user')
+ const rawContent = latestUserMessage?.content
+
+ const textContent =
+ typeof rawContent === 'string'
+ ? rawContent
+ : Array.isArray(rawContent)
+ ? rawContent
+ .filter((part) => part.type === 'text' && typeof part.text === 'string')
+ .map((part) => part.text)
+ .join(' ')
+ : ''
+
+ const normalizedTitle = textContent.replace(/\s+/g, ' ').trim()
+ if (!normalizedTitle) {
+ return 'New Conversation'
+ }
+
+ return normalizedTitle.slice(0, 60)
+ }
+
+ private inferModelType(rawModel: NewApiModelRecord, supported: NewApiEndpointType[]) {
+ const normalizedRawType =
+ typeof rawModel.type === 'string' ? rawModel.type.trim().toLowerCase() : ''
+ const normalizedModelId = typeof rawModel.id === 'string' ? rawModel.id.toLowerCase() : ''
+
+ if (
+ normalizedRawType === 'imagegeneration' ||
+ normalizedRawType === 'image-generation' ||
+ normalizedRawType === 'image' ||
+ supported.includes('image-generation')
+ ) {
+ return ModelType.ImageGeneration
+ }
+
+ if (
+ normalizedRawType === 'embedding' ||
+ normalizedRawType === 'embeddings' ||
+ normalizedModelId.includes('embedding')
+ ) {
+ return ModelType.Embedding
+ }
+
+ if (normalizedRawType === 'rerank' || normalizedModelId.includes('rerank')) {
+ return ModelType.Rerank
+ }
+
+ return undefined
+ }
+
+ private toGeminiMessages(messages: ChatMessage[]): Array<{
+ role: 'system' | 'user' | 'assistant'
+ content: string
+ }> {
+ return messages
+ .filter((message): message is ChatMessage & { role: 'system' | 'user' | 'assistant' } => {
+ return message.role === 'system' || message.role === 'user' || message.role === 'assistant'
+ })
+ .map((message) => ({
+ role: message.role,
+ content:
+ typeof message.content === 'string'
+ ? message.content
+ : Array.isArray(message.content)
+ ? message.content
+ .filter((part) => part.type === 'text' && typeof part.text === 'string')
+ .map((part) => part.text)
+ .join('\n')
+ : ''
+ }))
+ }
+
+ private resolveContextLength(rawModel: NewApiModelRecord): number | undefined {
+ const candidates = [
+ rawModel.context_length,
+ rawModel.contextLength,
+ rawModel.input_token_limit,
+ rawModel.max_input_tokens
+ ]
+
+ const firstNumber = candidates.find(
+ (candidate): candidate is number =>
+ typeof candidate === 'number' && Number.isFinite(candidate)
+ )
+ return firstNumber
+ }
+
+ private resolveMaxTokens(rawModel: NewApiModelRecord): number | undefined {
+ const candidates = [
+ rawModel.max_tokens,
+ rawModel.max_output_tokens,
+ rawModel.output_token_limit
+ ]
+
+ const firstNumber = candidates.find(
+ (candidate): candidate is number =>
+ typeof candidate === 'number' && Number.isFinite(candidate)
+ )
+ return firstNumber
+ }
+
+ private async ensureAnthropicDelegateReady(): Promise {
+ await this.anthropicDelegate.ensureClientInitialized()
+
+ if (!this.anthropicDelegate.isClientInitialized()) {
+ throw new Error('Anthropic SDK not initialized')
+ }
+
+ return this.anthropicDelegate
+ }
+
+ private async collectImageCompletion(
+ messages: ChatMessage[],
+ modelId: string,
+ temperature?: number,
+ maxTokens?: number
+ ): Promise {
+ const response: LLMResponse = {
+ content: ''
+ }
+
+ const modelConfig = this.buildImageModelConfig(modelId)
+
+ for await (const event of this.openaiChatDelegate.coreStream(
+ messages,
+ modelId,
+ modelConfig,
+ temperature ?? modelConfig.temperature ?? 0.7,
+ maxTokens ?? modelConfig.maxTokens ?? 1024,
+ []
+ )) {
+ switch (event.type) {
+ case 'text':
+ response.content += event.content
+ break
+ case 'reasoning':
+ response.reasoning_content = `${response.reasoning_content ?? ''}${event.reasoning_content}`
+ break
+ case 'image_data':
+ if (!response.content) {
+ response.content = event.image_data.data
+ }
+ break
+ case 'usage':
+ response.totalUsage = event.usage
+ break
+ case 'error':
+ throw new Error(event.error_message)
+ }
+ }
+
+ return response
+ }
+
+ private async syncProviderManagedEndpointType(models: MODEL_META[]): Promise {
+ for (const model of models) {
+ if (this.configPresenter.hasUserModelConfig(model.id, this.provider.id)) {
+ continue
+ }
+
+ const existingConfig = this.configPresenter.getModelConfig(model.id, this.provider.id)
+ const defaultEndpointType = this.getDefaultEndpointType(model)
+ const nextApiEndpoint =
+ defaultEndpointType === 'image-generation' ? ApiEndpointType.Image : ApiEndpointType.Chat
+
+ this.configPresenter.setModelConfig(
+ model.id,
+ this.provider.id,
+ {
+ ...existingConfig,
+ type: model.type ?? existingConfig.type,
+ apiEndpoint: nextApiEndpoint,
+ endpointType: defaultEndpointType ?? existingConfig.endpointType
+ },
+ { source: 'provider' }
+ )
+ }
+ }
+
+ protected async fetchProviderModels(): Promise {
+ const controller = new AbortController()
+ const timeout = setTimeout(() => controller.abort(), this.getModelFetchTimeout())
+
+ try {
+ const proxyUrl = proxyConfig.getProxyUrl()
+ const dispatcher = proxyUrl ? new ProxyAgent(proxyUrl) : undefined
+ const response = await fetch(`${this.getNormalizedBaseHost()}/v1/models`, {
+ method: 'GET',
+ headers: {
+ Authorization: `Bearer ${this.provider.apiKey}`,
+ 'Content-Type': 'application/json',
+ ...this.defaultHeaders
+ },
+ signal: controller.signal,
+ ...(dispatcher ? ({ dispatcher } as Record) : {})
+ })
+
+ if (!response.ok) {
+ const responseText = await response.text()
+ throw new Error(responseText || `Failed to fetch models: ${response.status}`)
+ }
+
+ const payload = (await response.json()) as NewApiModelsResponse
+ const rawModels = Array.isArray(payload.data) ? payload.data : []
+
+ const models = rawModels
+ .filter((rawModel): rawModel is NewApiModelRecord & { id: string } => {
+ return typeof rawModel.id === 'string' && rawModel.id.trim().length > 0
+ })
+ .map((rawModel) => {
+ const supportedEndpointTypes = Array.isArray(rawModel.supported_endpoint_types)
+ ? rawModel.supported_endpoint_types.filter(isNewApiEndpointType)
+ : []
+ const type = this.inferModelType(rawModel, supportedEndpointTypes)
+ const contextLength = this.resolveContextLength(rawModel)
+ const maxTokens = this.resolveMaxTokens(rawModel)
+ const model: MODEL_META = {
+ id: rawModel.id,
+ name: typeof rawModel.name === 'string' ? rawModel.name : rawModel.id,
+ group: typeof rawModel.owned_by === 'string' ? rawModel.owned_by : 'default',
+ providerId: this.provider.id,
+ isCustom: false,
+ supportedEndpointTypes,
+ endpointType: this.getDefaultEndpointType({
+ supportedEndpointTypes,
+ type
+ }),
+ ...(typeof rawModel.description === 'string'
+ ? { description: rawModel.description }
+ : {}),
+ ...(type ? { type } : {}),
+ ...(contextLength !== undefined ? { contextLength } : {}),
+ ...(maxTokens !== undefined ? { maxTokens } : {})
+ }
+ return model
+ })
+
+ await this.syncProviderManagedEndpointType(models)
+ return models
+ } finally {
+ clearTimeout(timeout)
+ }
+ }
+
+ public override onProxyResolved(): void {
+ this.openaiChatDelegate.onProxyResolved()
+ this.openaiResponsesDelegate.onProxyResolved()
+ this.geminiDelegate.onProxyResolved()
+ this.anthropicDelegate.onProxyResolved()
+ }
+
+ public async check(): Promise<{ isOk: boolean; errorMsg: string | null }> {
+ try {
+ await this.fetchProviderModels()
+ return { isOk: true, errorMsg: null }
+ } catch (error) {
+ return {
+ isOk: false,
+ errorMsg: error instanceof Error ? error.message : String(error)
+ }
+ }
+ }
+
+ public async summaryTitles(messages: ChatMessage[], modelId: string): Promise {
+ const endpointType = this.resolveEndpointType(modelId)
+
+ switch (endpointType) {
+ case 'anthropic': {
+ const delegate = await this.ensureAnthropicDelegateReady()
+ return delegate.summaryTitles(messages, modelId)
+ }
+ case 'gemini':
+ return this.geminiDelegate.summaryTitles(this.toGeminiMessages(messages), modelId)
+ case 'openai-response':
+ return this.openaiResponsesDelegate.summaryTitles(messages, modelId)
+ case 'image-generation':
+ return this.buildFallbackSummaryTitle(messages)
+ case 'openai':
+ default:
+ return this.openaiChatDelegate.summaryTitles(messages, modelId)
+ }
+ }
+
+ public async completions(
+ messages: ChatMessage[],
+ modelId: string,
+ temperature?: number,
+ maxTokens?: number
+ ): Promise {
+ const endpointType = this.resolveEndpointType(modelId)
+
+ switch (endpointType) {
+ case 'anthropic': {
+ const delegate = await this.ensureAnthropicDelegateReady()
+ return delegate.completions(messages, modelId, temperature, maxTokens)
+ }
+ case 'gemini':
+ return this.geminiDelegate.completions(
+ this.toGeminiMessages(messages),
+ modelId,
+ temperature,
+ maxTokens
+ )
+ case 'openai-response':
+ return this.openaiResponsesDelegate.completions(messages, modelId, temperature, maxTokens)
+ case 'image-generation':
+ return this.collectImageCompletion(messages, modelId, temperature, maxTokens)
+ case 'openai':
+ default:
+ return this.openaiChatDelegate.completions(messages, modelId, temperature, maxTokens)
+ }
+ }
+
+ public async summaries(
+ text: string,
+ modelId: string,
+ temperature?: number,
+ maxTokens?: number
+ ): Promise {
+ const endpointType = this.resolveEndpointType(modelId)
+
+ switch (endpointType) {
+ case 'anthropic': {
+ const delegate = await this.ensureAnthropicDelegateReady()
+ return delegate.summaries(text, modelId, temperature, maxTokens)
+ }
+ case 'gemini':
+ return this.geminiDelegate.summaries(text, modelId, temperature, maxTokens)
+ case 'openai-response':
+ return this.openaiResponsesDelegate.summaries(text, modelId, temperature, maxTokens)
+ case 'image-generation':
+ return this.collectImageCompletion(
+ [{ role: 'user', content: text }],
+ modelId,
+ temperature,
+ maxTokens
+ )
+ case 'openai':
+ default:
+ return this.openaiChatDelegate.summaries(text, modelId, temperature, maxTokens)
+ }
+ }
+
+ public async generateText(
+ prompt: string,
+ modelId: string,
+ temperature?: number,
+ maxTokens?: number
+ ): Promise {
+ const endpointType = this.resolveEndpointType(modelId)
+
+ switch (endpointType) {
+ case 'anthropic': {
+ const delegate = await this.ensureAnthropicDelegateReady()
+ return delegate.generateText(prompt, modelId, temperature, maxTokens)
+ }
+ case 'gemini':
+ return this.geminiDelegate.generateText(prompt, modelId, temperature, maxTokens)
+ case 'openai-response':
+ return this.openaiResponsesDelegate.generateText(prompt, modelId, temperature, maxTokens)
+ case 'image-generation':
+ return this.collectImageCompletion(
+ [{ role: 'user', content: prompt }],
+ modelId,
+ temperature,
+ maxTokens
+ )
+ case 'openai':
+ default:
+ return this.openaiChatDelegate.generateText(prompt, modelId, temperature, maxTokens)
+ }
+ }
+
+ public async *coreStream(
+ messages: ChatMessage[],
+ modelId: string,
+ modelConfig: ModelConfig,
+ temperature: number,
+ maxTokens: number,
+ tools: MCPToolDefinition[]
+ ): AsyncGenerator {
+ const endpointType = this.resolveEndpointType(modelId)
+
+ switch (endpointType) {
+ case 'anthropic': {
+ const delegate = await this.ensureAnthropicDelegateReady()
+ yield* delegate.coreStream(messages, modelId, modelConfig, temperature, maxTokens, tools)
+ return
+ }
+ case 'gemini':
+ yield* this.geminiDelegate.coreStream(
+ messages,
+ modelId,
+ modelConfig,
+ temperature,
+ maxTokens,
+ tools
+ )
+ return
+ case 'openai-response':
+ yield* this.openaiResponsesDelegate.coreStream(
+ messages,
+ modelId,
+ modelConfig,
+ temperature,
+ maxTokens,
+ tools
+ )
+ return
+ case 'image-generation':
+ yield* this.openaiChatDelegate.coreStream(
+ messages,
+ modelId,
+ this.buildImageModelConfig(modelId, modelConfig),
+ temperature,
+ maxTokens,
+ tools
+ )
+ return
+ case 'openai':
+ default:
+ yield* this.openaiChatDelegate.coreStream(
+ messages,
+ modelId,
+ modelConfig,
+ temperature,
+ maxTokens,
+ tools
+ )
+ return
+ }
+ }
+
+ public async getEmbeddings(modelId: string, texts: string[]): Promise {
+ return this.openaiChatDelegate.getEmbeddings(modelId, texts)
+ }
+
+ public async getDimensions(modelId: string): Promise {
+ return this.openaiChatDelegate.getDimensions(modelId)
+ }
+
+ public async getKeyStatus(): Promise {
+ return this.openaiChatDelegate.getKeyStatus()
+ }
+}
diff --git a/src/main/presenter/llmProviderPresenter/providers/openAICompatibleProvider.ts b/src/main/presenter/llmProviderPresenter/providers/openAICompatibleProvider.ts
index 8d6c1640c..039e19397 100644
--- a/src/main/presenter/llmProviderPresenter/providers/openAICompatibleProvider.ts
+++ b/src/main/presenter/llmProviderPresenter/providers/openAICompatibleProvider.ts
@@ -137,11 +137,11 @@ export class OpenAICompatibleProvider extends BaseLLMProvider {
}
private supportsEffortParameter(modelId: string): boolean {
- return modelCapabilities.supportsReasoningEffort(this.provider.id, modelId)
+ return modelCapabilities.supportsReasoningEffort(this.getCapabilityProviderId(), modelId)
}
private supportsVerbosityParameter(modelId: string): boolean {
- return modelCapabilities.supportsVerbosity(this.provider.id, modelId)
+ return modelCapabilities.supportsVerbosity(this.getCapabilityProviderId(), modelId)
}
private resolveTraceAuthToken(): string {
diff --git a/src/main/presenter/llmProviderPresenter/providers/openAIResponsesProvider.ts b/src/main/presenter/llmProviderPresenter/providers/openAIResponsesProvider.ts
index 67a477f4b..d10ebed0a 100644
--- a/src/main/presenter/llmProviderPresenter/providers/openAIResponsesProvider.ts
+++ b/src/main/presenter/llmProviderPresenter/providers/openAIResponsesProvider.ts
@@ -114,11 +114,11 @@ export class OpenAIResponsesProvider extends BaseLLMProvider {
}
private supportsEffortParameter(modelId: string): boolean {
- return modelCapabilities.supportsReasoningEffort(this.provider.id, modelId)
+ return modelCapabilities.supportsReasoningEffort(this.getCapabilityProviderId(), modelId)
}
private supportsVerbosityParameter(modelId: string): boolean {
- return modelCapabilities.supportsVerbosity(this.provider.id, modelId)
+ return modelCapabilities.supportsVerbosity(this.getCapabilityProviderId(), modelId)
}
private resolveTraceAuthToken(): string {
diff --git a/src/renderer/settings/components/ProviderApiConfig.vue b/src/renderer/settings/components/ProviderApiConfig.vue
index 83a9d2f7e..1440e377c 100644
--- a/src/renderer/settings/components/ProviderApiConfig.vue
+++ b/src/renderer/settings/components/ProviderApiConfig.vue
@@ -171,9 +171,7 @@
{{ t('settings.provider.howToGet') }}: {{ t('settings.provider.getKeyTip') }}
-
{{
- provider.name
- }}
+
{{ provider.name }}
{{ t('settings.provider.getKeyTipEnd') }}
@@ -216,6 +214,7 @@ const { toast } = useToast()
const EDITABLE_BASE_URL_PROVIDER_IDS = new Set([
'openai',
'openai-responses',
+ 'new-api',
'anthropic',
'gemini',
'ollama',
@@ -253,6 +252,23 @@ const showLockedBaseUrl = computed(
() => !isBaseUrlEditableByDefault.value && !baseUrlUnlocked.value
)
const shouldRefreshProviderDbFirst = computed(() => isProviderDbBackedProvider(props.provider.id))
+const providerApiKeyUrl = computed(() => {
+ if (props.provider.id !== 'new-api') {
+ return props.providerWebsites?.apiKey || ''
+ }
+
+ const normalizedHost = apiHost.value.trim() || defaultBaseUrl.value
+ if (!normalizedHost) {
+ return props.providerWebsites?.apiKey || ''
+ }
+
+ try {
+ const parsedUrl = new URL(normalizedHost)
+ return `${parsedUrl.origin}/console/token`
+ } catch {
+ return props.providerWebsites?.apiKey || ''
+ }
+})
watch(
() => props.provider,
diff --git a/src/renderer/src/assets/llm-icons/newapi.svg b/src/renderer/src/assets/llm-icons/newapi.svg
new file mode 100644
index 000000000..02cfb4a9c
--- /dev/null
+++ b/src/renderer/src/assets/llm-icons/newapi.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/src/renderer/src/components/chat/ChatStatusBar.vue b/src/renderer/src/components/chat/ChatStatusBar.vue
index bcd1228a4..af2cf4195 100644
--- a/src/renderer/src/components/chat/ChatStatusBar.vue
+++ b/src/renderer/src/components/chat/ChatStatusBar.vue
@@ -741,6 +741,7 @@ import type {
SessionGenerationSettings
} from '@shared/types/agent-interface'
import { normalizeDeepChatSubagentConfig } from '@shared/lib/deepchatSubagents'
+import { isChatSelectableModelType } from '@shared/model'
import type { ReasoningPortrait } from '@shared/types/model-db'
import {
normalizeLegacyThinkingBudgetValue,
@@ -1002,11 +1003,15 @@ const providerNameMap = computed(() => {
return map
})
+const getChatSelectableModels = (models: RENDERER_MODEL_META[]): RENDERER_MODEL_META[] =>
+ models.filter((model) => isChatSelectableModelType(model.type))
+
const modelGroups = computed(() => {
const groupsById = new Map(
modelStore.enabledModels
.filter((group) => group.providerId !== 'acp')
- .map((group) => [group.providerId, group.models] as const)
+ .map((group) => [group.providerId, getChatSelectableModels(group.models)] as const)
+ .filter(([, models]) => models.length > 0)
)
const result: GroupedModelList[] = []
@@ -1390,7 +1395,10 @@ const getAcpOptionDisplayValue = (option: AcpConfigOption): string => {
const findEnabledModelMeta = (providerId: string, modelId: string): RENDERER_MODEL_META | null => {
const group = modelStore.enabledModels.find((item) => item.providerId === providerId)
- return group?.models.find((model) => model.id === modelId) ?? null
+ return (
+ group?.models.find((model) => model.id === modelId && isChatSelectableModelType(model.type)) ??
+ null
+ )
}
const getReasoningEffortOptions = (
@@ -1499,13 +1507,13 @@ const findEnabledModel = (providerId: string, modelId: string): ModelSelection |
const pickFirstEnabledModel = (): ModelSelection | null => {
for (const group of modelStore.enabledModels) {
if (group.providerId === 'acp') continue
- const firstModel = group.models[0]
+ const firstModel = group.models.find((model) => isChatSelectableModelType(model.type))
if (firstModel) {
return { providerId: group.providerId, modelId: firstModel.id }
}
}
for (const group of modelStore.enabledModels) {
- const firstModel = group.models[0]
+ const firstModel = group.models.find((model) => isChatSelectableModelType(model.type))
if (firstModel) {
return { providerId: group.providerId, modelId: firstModel.id }
}
diff --git a/src/renderer/src/components/icons/ModelIcon.vue b/src/renderer/src/components/icons/ModelIcon.vue
index e0ee3e5f1..424d9d925 100644
--- a/src/renderer/src/components/icons/ModelIcon.vue
+++ b/src/renderer/src/components/icons/ModelIcon.vue
@@ -4,6 +4,7 @@ import { useProviderStore } from '@/stores/providerStore'
import { useAgentStore } from '@/stores/ui/agent'
import AcpAgentIcon from './AcpAgentIcon.vue'
import cherryinColorIcon from '@/assets/llm-icons/cherryin-color.png?url'
+import newApiColorIcon from '@/assets/llm-icons/newapi.svg?url'
import adobeColorIcon from '@/assets/llm-icons/adobe-color.svg?url'
import zeaburColorIcon from '@/assets/llm-icons/zeabur-color.svg?url'
import zhipuColorIcon from '@/assets/llm-icons/zhipu-color.svg?url'
@@ -85,6 +86,7 @@ const icons = {
'dimcode-acp': dimcodeColorIcon,
o3fan: o3fanColorIcon,
cherryin: cherryinColorIcon,
+ 'new-api': newApiColorIcon,
modelscope: modelscopeColorIcon,
'302ai': _302aiIcon,
aihubmix: aihubmixColorIcon,
diff --git a/src/renderer/src/components/settings/ModelConfigDialog.vue b/src/renderer/src/components/settings/ModelConfigDialog.vue
index f77cfafeb..85907b610 100644
--- a/src/renderer/src/components/settings/ModelConfigDialog.vue
+++ b/src/renderer/src/components/settings/ModelConfigDialog.vue
@@ -146,6 +146,34 @@
+
+
+
+
+ {{ t('settings.model.modelConfig.endpointType.description') }}
+
+
+ {{ errors.endpointType }}
+
+
+
@@ -380,7 +408,13 @@
import { ref, computed, watch, onMounted } from 'vue'
import { storeToRefs } from 'pinia'
import { useI18n } from 'vue-i18n'
-import { ApiEndpointType, ModelType } from '@shared/model'
+import {
+ ApiEndpointType,
+ ModelType,
+ NEW_API_ENDPOINT_TYPES,
+ isNewApiEndpointType,
+ type NewApiEndpointType
+} from '@shared/model'
import type { ModelConfig } from '@shared/presenter'
import type { ReasoningPortrait } from '@shared/types/model-db'
import {
@@ -475,9 +509,18 @@ const isResponsesProvider = computed(() => {
return apiType === 'openai' || apiType === 'openai-responses'
})
+const isNewApiProvider = computed(() => {
+ if (providerIdLower.value === 'new-api') {
+ return true
+ }
+
+ return currentProvider.value?.apiType?.toLowerCase() === 'new-api'
+})
+
const showApiEndpointSelector = computed(
- () => !isResponsesProvider.value && isOpenAICompatibleProvider.value
+ () => !isNewApiProvider.value && !isResponsesProvider.value && isOpenAICompatibleProvider.value
)
+const showEndpointTypeSelector = computed(() => isNewApiProvider.value)
const createDefaultConfig = (): ModelConfig => ({
maxTokens: DEFAULT_MODEL_MAX_TOKENS,
@@ -489,6 +532,7 @@ const createDefaultConfig = (): ModelConfig => ({
forceInterleavedThinkingCompat: undefined,
type: ModelType.Chat,
apiEndpoint: ApiEndpointType.Chat,
+ endpointType: undefined,
reasoningEffort: 'medium',
verbosity: 'medium'
})
@@ -669,6 +713,54 @@ const isThinkingBudgetSentinel = (
return sentinelValues.has(roundedValue)
}
+const capabilitySupportsReasoning = ref(null)
+const capabilityBudgetRange = ref<{ min?: number; max?: number; default?: number } | null>(null)
+const capabilitySupportsEffort = ref(null)
+const capabilityEffortDefault = ref<'minimal' | 'low' | 'medium' | 'high' | undefined>(undefined)
+const capabilitySupportsVerbosity = ref(null)
+const capabilityVerbosityDefault = ref<'low' | 'medium' | 'high' | undefined>(undefined)
+
+const fetchCapabilities = async () => {
+ if (!props.providerId || !props.modelId) {
+ capabilityReasoningPortrait.value = null
+ capabilitySupportsReasoning.value = null
+ capabilityBudgetRange.value = null
+ capabilitySupportsEffort.value = null
+ capabilityEffortDefault.value = undefined
+ capabilitySupportsVerbosity.value = null
+ capabilityVerbosityDefault.value = undefined
+ return
+ }
+ try {
+ const portrait =
+ (await configPresenter.getReasoningPortrait?.(props.providerId, props.modelId)) ?? null
+ capabilityReasoningPortrait.value = portrait
+ capabilitySupportsReasoning.value =
+ typeof portrait?.supported === 'boolean' ? portrait.supported : null
+ capabilityBudgetRange.value = portrait?.budget
+ ? {
+ ...(typeof portrait.budget.min === 'number' ? { min: portrait.budget.min } : {}),
+ ...(typeof portrait.budget.max === 'number' ? { max: portrait.budget.max } : {}),
+ ...(typeof portrait.budget.default === 'number'
+ ? { default: portrait.budget.default }
+ : {})
+ }
+ : null
+ capabilitySupportsEffort.value = hasReasoningEffortSupport(portrait)
+ capabilityEffortDefault.value = normalizeReasoningEffortValue(portrait, portrait?.effort)
+ capabilitySupportsVerbosity.value = hasVerbositySupport(portrait)
+ capabilityVerbosityDefault.value = normalizeVerbosityValue(portrait, portrait?.verbosity)
+ } catch {
+ capabilityReasoningPortrait.value = null
+ capabilitySupportsReasoning.value = null
+ capabilityBudgetRange.value = null
+ capabilitySupportsEffort.value = null
+ capabilityEffortDefault.value = undefined
+ capabilitySupportsVerbosity.value = null
+ capabilityVerbosityDefault.value = undefined
+ }
+}
+
const providerCustomModelList = computed(() => {
if (!props.providerId) return []
return customModels.value.find((entry) => entry.providerId === props.providerId)?.models ?? []
@@ -681,6 +773,33 @@ const providerStandardModelList = computed(() => {
)
})
+const currentModelLookupId = computed(() =>
+ (isCreateMode.value ? modelIdField.value : props.modelId || modelIdField.value).trim()
+)
+
+const providerModelMeta = computed(() => {
+ const targetModelId = currentModelLookupId.value
+ if (!targetModelId) return null
+
+ return (
+ providerStandardModelList.value.find((model) => model.id === targetModelId) ??
+ providerCustomModelList.value.find((model) => model.id === targetModelId) ??
+ null
+ )
+})
+
+const availableEndpointTypes = computed(() => {
+ const supportedEndpointTypes = providerModelMeta.value?.supportedEndpointTypes
+ if (Array.isArray(supportedEndpointTypes) && supportedEndpointTypes.length > 0) {
+ const normalizedEndpointTypes = supportedEndpointTypes.filter(isNewApiEndpointType)
+ if (normalizedEndpointTypes.length > 0) {
+ return normalizedEndpointTypes
+ }
+ }
+
+ return [...NEW_API_ENDPOINT_TYPES]
+})
+
const currentCustomModel = computed(() => {
if (!props.providerId || !props.modelId) return null
return providerCustomModelList.value.find((model) => model.id === props.modelId) ?? null
@@ -709,9 +828,39 @@ const buildCustomModelPayload = (id: string, name: string, enabled?: boolean) =>
vision: config.value.vision ?? DEFAULT_MODEL_VISION,
functionCall: config.value.functionCall ?? DEFAULT_MODEL_FUNCTION_CALL,
reasoning: config.value.reasoning ?? false,
- type: config.value.type ?? ModelType.Chat
+ type: config.value.type ?? ModelType.Chat,
+ endpointType: config.value.endpointType
})
+const syncNewApiDerivedFields = () => {
+ if (!showEndpointTypeSelector.value) {
+ return
+ }
+
+ if (!isNewApiEndpointType(config.value.endpointType)) {
+ config.value.endpointType =
+ providerModelMeta.value?.endpointType ??
+ providerModelMeta.value?.supportedEndpointTypes?.[0] ??
+ availableEndpointTypes.value[0]
+ }
+
+ if (config.value.endpointType === 'image-generation') {
+ config.value.apiEndpoint = ApiEndpointType.Image
+ config.value.type = ModelType.ImageGeneration
+ return
+ }
+
+ config.value.apiEndpoint = ApiEndpointType.Chat
+
+ if (config.value.type === ModelType.ImageGeneration) {
+ const providerModelType = providerModelMeta.value?.type
+ config.value.type =
+ providerModelType && providerModelType !== ModelType.ImageGeneration
+ ? providerModelType
+ : ModelType.Chat
+ }
+}
+
const initializeIdentityFields = () => {
if (isCreateMode.value) {
modelNameField.value = ''
@@ -733,6 +882,7 @@ const loadConfig = async () => {
if (isCreateMode.value) {
config.value = createDefaultConfig()
+ syncNewApiDerivedFields()
await fetchCapabilities()
return
}
@@ -743,6 +893,13 @@ const loadConfig = async () => {
const modelConfig = await modelConfigStore.getModelConfig(props.modelId, props.providerId)
config.value = { ...modelConfig }
+ if (showEndpointTypeSelector.value && !isNewApiEndpointType(config.value.endpointType)) {
+ config.value.endpointType =
+ providerModelMeta.value?.endpointType ??
+ providerModelMeta.value?.supportedEndpointTypes?.[0] ??
+ availableEndpointTypes.value[0]
+ }
+
if (showApiEndpointSelector.value && !config.value.apiEndpoint) {
config.value.apiEndpoint = ApiEndpointType.Chat
}
@@ -791,6 +948,8 @@ const loadConfig = async () => {
capabilityReasoningPortrait.value?.budget?.max
)
}
+
+ syncNewApiDerivedFields()
}
// 验证表单
@@ -840,6 +999,10 @@ const validateForm = () => {
errors.value.temperature = t('settings.model.modelConfig.validation.temperatureMax')
}
}
+
+ if (showEndpointTypeSelector.value && !isNewApiEndpointType(config.value.endpointType)) {
+ errors.value.endpointType = t('settings.model.modelConfig.endpointType.required')
+ }
}
// 表单是否有效
@@ -891,7 +1054,8 @@ const handleSave = async () => {
vision: config.value.vision,
functionCall: config.value.functionCall,
reasoning: config.value.reasoning,
- type: config.value.type ?? ModelType.Chat
+ type: config.value.type ?? ModelType.Chat,
+ endpointType: config.value.endpointType
})
}
@@ -944,6 +1108,13 @@ watch(
{ immediate: true }
)
+watch(
+ () => [config.value.endpointType, config.value.type, showEndpointTypeSelector.value],
+ () => {
+ syncNewApiDerivedFields()
+ }
+)
+
const supportsVerbosity = computed(() => capabilitySupportsVerbosity.value === true)
const isDeepSeekV31Model = computed(() => {
@@ -986,54 +1157,6 @@ const showInterleavedThinking = computed(() => {
)
})
-const capabilitySupportsReasoning = ref(null)
-const capabilityBudgetRange = ref<{ min?: number; max?: number; default?: number } | null>(null)
-const capabilitySupportsEffort = ref(null)
-const capabilityEffortDefault = ref<'minimal' | 'low' | 'medium' | 'high' | undefined>(undefined)
-const capabilitySupportsVerbosity = ref(null)
-const capabilityVerbosityDefault = ref<'low' | 'medium' | 'high' | undefined>(undefined)
-
-const fetchCapabilities = async () => {
- if (!props.providerId || !props.modelId) {
- capabilityReasoningPortrait.value = null
- capabilitySupportsReasoning.value = null
- capabilityBudgetRange.value = null
- capabilitySupportsEffort.value = null
- capabilityEffortDefault.value = undefined
- capabilitySupportsVerbosity.value = null
- capabilityVerbosityDefault.value = undefined
- return
- }
- try {
- const portrait =
- (await configPresenter.getReasoningPortrait?.(props.providerId, props.modelId)) ?? null
- capabilityReasoningPortrait.value = portrait
- capabilitySupportsReasoning.value =
- typeof portrait?.supported === 'boolean' ? portrait.supported : null
- capabilityBudgetRange.value = portrait?.budget
- ? {
- ...(typeof portrait.budget.min === 'number' ? { min: portrait.budget.min } : {}),
- ...(typeof portrait.budget.max === 'number' ? { max: portrait.budget.max } : {}),
- ...(typeof portrait.budget.default === 'number'
- ? { default: portrait.budget.default }
- : {})
- }
- : null
- capabilitySupportsEffort.value = hasReasoningEffortSupport(portrait)
- capabilityEffortDefault.value = normalizeReasoningEffortValue(portrait, portrait?.effort)
- capabilitySupportsVerbosity.value = hasVerbositySupport(portrait)
- capabilityVerbosityDefault.value = normalizeVerbosityValue(portrait, portrait?.verbosity)
- } catch {
- capabilityReasoningPortrait.value = null
- capabilitySupportsReasoning.value = null
- capabilityBudgetRange.value = null
- capabilitySupportsEffort.value = null
- capabilityEffortDefault.value = undefined
- capabilitySupportsVerbosity.value = null
- capabilityVerbosityDefault.value = undefined
- }
-}
-
watch(
() => [props.providerId, props.modelId, props.open],
async () => {
diff --git a/src/renderer/src/i18n/da-DK/settings.json b/src/renderer/src/i18n/da-DK/settings.json
index 4d8aad592..e051a7eea 100644
--- a/src/renderer/src/i18n/da-DK/settings.json
+++ b/src/renderer/src/i18n/da-DK/settings.json
@@ -398,6 +398,19 @@
"chat": "tekstgenerering",
"image": "Billedgenerering"
}
+ },
+ "endpointType": {
+ "label": "Endpunkttype",
+ "description": "Vælg hvilken upstream-protokol New API skal bruge til denne model.",
+ "placeholder": "Vælg endpunkttype",
+ "required": "Endpunkttype er påkrævet",
+ "options": {
+ "openai": "OpenAI Chat",
+ "openai-response": "OpenAI Responses",
+ "anthropic": "Anthropic Messages",
+ "gemini": "Gemini Native",
+ "image-generation": "Billedgenerering"
+ }
}
}
},
@@ -520,7 +533,6 @@
"description": "Det anbefales ikke at ændre basis-URL'en for denne udbyder. Et forkert endpoint kan medføre fejl i forespørgsler eller anden uventet adfærd. Du kan stadig fortsætte, hvis du forstår risikoen.",
"confirm": "Fortsæt"
},
-
"deleteProvider": {
"title": "Bekræft sletning af udbyder",
"content": "Er du sikker på, at du vil slette udbyderen \"{name}\"? Handlingen kan ikke fortrydes.",
diff --git a/src/renderer/src/i18n/en-US/settings.json b/src/renderer/src/i18n/en-US/settings.json
index b8febe199..749b49b27 100644
--- a/src/renderer/src/i18n/en-US/settings.json
+++ b/src/renderer/src/i18n/en-US/settings.json
@@ -514,6 +514,19 @@
"chat": "Text Generation",
"image": "Image Generation"
}
+ },
+ "endpointType": {
+ "label": "Endpoint Type",
+ "description": "Select which upstream protocol New API should use for this model.",
+ "placeholder": "Select endpoint type",
+ "required": "Endpoint type is required",
+ "options": {
+ "openai": "OpenAI Chat",
+ "openai-response": "OpenAI Responses",
+ "anthropic": "Anthropic Messages",
+ "gemini": "Gemini Native",
+ "image-generation": "Image Generation"
+ }
}
}
},
@@ -616,7 +629,6 @@
"description": "Changing the Base URL for this provider is not recommended. An incorrect endpoint may cause request failures or other unexpected behavior. You can still continue if you understand the risk.",
"confirm": "Continue"
},
-
"deleteProvider": {
"title": "Confirm Delete Provider",
"content": "Are you sure you want to delete provider \"{name}\"? This action cannot be undone.",
diff --git a/src/renderer/src/i18n/fa-IR/settings.json b/src/renderer/src/i18n/fa-IR/settings.json
index 793c781e0..23dd6c68d 100644
--- a/src/renderer/src/i18n/fa-IR/settings.json
+++ b/src/renderer/src/i18n/fa-IR/settings.json
@@ -452,6 +452,19 @@
"chat": "تولید متن",
"image": "تولید تصویر"
}
+ },
+ "endpointType": {
+ "label": "نوع نقطهپایان",
+ "description": "پروتکل بالادستی که New API باید برای این مدل استفاده کند را انتخاب کنید.",
+ "placeholder": "نوع نقطهپایان را انتخاب کنید",
+ "required": "نوع نقطهپایان الزامی است",
+ "options": {
+ "openai": "OpenAI Chat",
+ "openai-response": "OpenAI Responses",
+ "anthropic": "Anthropic Messages",
+ "gemini": "Gemini Native",
+ "image-generation": "تولید تصویر"
+ }
}
}
},
@@ -586,7 +599,6 @@
"description": "تغییر آدرس پایه برای این ارائهدهنده توصیه نمیشود. یک نقطهپایان نادرست ممکن است باعث شکست درخواستها یا رفتارهای غیرمنتظره دیگر شود. اگر از این ریسک آگاه هستید، میتوانید ادامه دهید.",
"confirm": "ادامه"
},
-
"deleteProvider": {
"title": "پذیرش پاک کردن فراهمکننده",
"content": "آیا مطمئن هستید که میخواهید فراهمکننده \"{name}\" را پاک کنید؟ این کنش بازگشتپذیر نیست.",
diff --git a/src/renderer/src/i18n/fr-FR/settings.json b/src/renderer/src/i18n/fr-FR/settings.json
index 822627b48..6a87de86f 100644
--- a/src/renderer/src/i18n/fr-FR/settings.json
+++ b/src/renderer/src/i18n/fr-FR/settings.json
@@ -452,6 +452,19 @@
"chat": "Génération de texte",
"image": "Génération d'images"
}
+ },
+ "endpointType": {
+ "label": "Type de point de terminaison",
+ "description": "Sélectionnez le protocole amont que New API doit utiliser pour ce modèle.",
+ "placeholder": "Sélectionner le type de point de terminaison",
+ "required": "Le type de point de terminaison est requis",
+ "options": {
+ "openai": "OpenAI Chat",
+ "openai-response": "OpenAI Responses",
+ "anthropic": "Anthropic Messages",
+ "gemini": "Gemini Native",
+ "image-generation": "Génération d'images"
+ }
}
}
},
@@ -586,7 +599,6 @@
"description": "Il n’est pas recommandé de modifier l’URL de base de ce fournisseur. Un point de terminaison incorrect peut provoquer des échecs de requête ou d’autres comportements inattendus. Vous pouvez tout de même continuer si vous comprenez le risque.",
"confirm": "Continuer"
},
-
"deleteProvider": {
"title": "Confirmer la suppression du fournisseur",
"content": "Êtes-vous sûr de vouloir supprimer le fournisseur \"{name}\" ? Cette action ne peut pas être annulée.",
diff --git a/src/renderer/src/i18n/he-IL/settings.json b/src/renderer/src/i18n/he-IL/settings.json
index a628f77bc..22e6c28a2 100644
--- a/src/renderer/src/i18n/he-IL/settings.json
+++ b/src/renderer/src/i18n/he-IL/settings.json
@@ -452,6 +452,19 @@
"chat": "יצירת טקסט",
"image": "יצירת תמונות"
}
+ },
+ "endpointType": {
+ "label": "סוג נקודת קצה",
+ "description": "בחר באיזה פרוטוקול upstream ישתמש New API עבור מודל זה.",
+ "placeholder": "בחר סוג נקודת קצה",
+ "required": "סוג נקודת קצה נדרש",
+ "options": {
+ "openai": "OpenAI Chat",
+ "openai-response": "OpenAI Responses",
+ "anthropic": "Anthropic Messages",
+ "gemini": "Gemini Native",
+ "image-generation": "יצירת תמונות"
+ }
}
}
},
@@ -586,7 +599,6 @@
"description": "לא מומלץ לשנות את כתובת הבסיס של הספק הזה. נקודת קצה שגויה עלולה לגרום לכשלי בקשות או להתנהגות בלתי צפויה אחרת. אפשר עדיין להמשיך אם מבינים את הסיכון.",
"confirm": "המשך"
},
-
"deleteProvider": {
"title": "אשר מחיקת ספק",
"content": "האם אתה בטוח שברצונך למחוק את הספק \"{name}\"? לא ניתן לבטל פעולה זו.",
diff --git a/src/renderer/src/i18n/ja-JP/settings.json b/src/renderer/src/i18n/ja-JP/settings.json
index 45b0e4781..7f4157a6a 100644
--- a/src/renderer/src/i18n/ja-JP/settings.json
+++ b/src/renderer/src/i18n/ja-JP/settings.json
@@ -452,6 +452,19 @@
"chat": "テキスト生成",
"image": "画像生成"
}
+ },
+ "endpointType": {
+ "label": "エンドポイントタイプ",
+ "description": "このモデルに対してNew APIが使用するアップストリームプロトコルを選択してください。",
+ "placeholder": "エンドポイントタイプを選択",
+ "required": "エンドポイントタイプは必須です",
+ "options": {
+ "openai": "OpenAI Chat",
+ "openai-response": "OpenAI Responses",
+ "anthropic": "Anthropic Messages",
+ "gemini": "Gemini Native",
+ "image-generation": "画像生成"
+ }
}
}
},
@@ -586,7 +599,6 @@
"description": "このプロバイダーのベースURLを変更することは推奨されません。誤ったエンドポイントを設定すると、リクエストの失敗やその他の予期しない動作を引き起こす可能性があります。リスクを理解している場合は、そのまま続行できます。",
"confirm": "続行"
},
-
"deleteProvider": {
"title": "プロバイダーの削除確認",
"content": "プロバイダー \"{name}\" を削除してもよろしいですか?この操作は元に戻せません。",
diff --git a/src/renderer/src/i18n/ko-KR/settings.json b/src/renderer/src/i18n/ko-KR/settings.json
index b80f50ab2..4faa7f2af 100644
--- a/src/renderer/src/i18n/ko-KR/settings.json
+++ b/src/renderer/src/i18n/ko-KR/settings.json
@@ -452,6 +452,19 @@
"chat": "텍스트 생성",
"image": "이미지 생성"
}
+ },
+ "endpointType": {
+ "label": "엔드포인트 유형",
+ "description": "이 모델에 대해 New API가 사용할 업스트림 프로토콜을 선택하세요.",
+ "placeholder": "엔드포인트 유형 선택",
+ "required": "엔드포인트 유형은 필수입니다",
+ "options": {
+ "openai": "OpenAI Chat",
+ "openai-response": "OpenAI Responses",
+ "anthropic": "Anthropic Messages",
+ "gemini": "Gemini Native",
+ "image-generation": "이미지 생성"
+ }
}
}
},
@@ -580,7 +593,6 @@
"description": "이 제공자의 기본 URL을 변경하는 것은 권장되지 않습니다. 잘못된 엔드포인트를 입력하면 요청 실패나 기타 예상치 못한 동작이 발생할 수 있습니다. 위험을 이해한다면 계속 진행할 수 있습니다.",
"confirm": "계속"
},
-
"deleteProvider": {
"title": "제공자 삭제",
"content": "제공자 \"{name}\"을(를) 삭제하시겠습니까? 이 작업은 취소할 수 없습니다.",
diff --git a/src/renderer/src/i18n/pt-BR/settings.json b/src/renderer/src/i18n/pt-BR/settings.json
index e31a330c9..89e8d19e4 100644
--- a/src/renderer/src/i18n/pt-BR/settings.json
+++ b/src/renderer/src/i18n/pt-BR/settings.json
@@ -452,6 +452,19 @@
"chat": "Geração de texto",
"image": "Geração de Imagens"
}
+ },
+ "endpointType": {
+ "label": "Tipo de endpoint",
+ "description": "Selecione qual protocolo upstream o New API deve usar para este modelo.",
+ "placeholder": "Selecione o tipo de endpoint",
+ "required": "O tipo de endpoint é obrigatório",
+ "options": {
+ "openai": "OpenAI Chat",
+ "openai-response": "OpenAI Responses",
+ "anthropic": "Anthropic Messages",
+ "gemini": "Gemini Native",
+ "image-generation": "Geração de Imagens"
+ }
}
}
},
@@ -586,7 +599,6 @@
"description": "Não é recomendado alterar a URL base deste provedor. Um endpoint incorreto pode causar falhas nas requisições ou outros comportamentos inesperados. Você ainda pode continuar se entender o risco.",
"confirm": "Continuar"
},
-
"deleteProvider": {
"title": "Confirmar Exclusão do Provedor",
"content": "Tem certeza de que deseja excluir o provedor \"{name}\"? Esta ação não pode ser desfeita.",
diff --git a/src/renderer/src/i18n/ru-RU/settings.json b/src/renderer/src/i18n/ru-RU/settings.json
index 9f62864ba..0ccfd17cc 100644
--- a/src/renderer/src/i18n/ru-RU/settings.json
+++ b/src/renderer/src/i18n/ru-RU/settings.json
@@ -452,6 +452,19 @@
"chat": "генерация текста",
"image": "Генерация изображений"
}
+ },
+ "endpointType": {
+ "label": "Тип конечной точки",
+ "description": "Выберите, какой upstream-протокол New API должен использовать для этой модели.",
+ "placeholder": "Выберите тип конечной точки",
+ "required": "Тип конечной точки обязателен",
+ "options": {
+ "openai": "OpenAI Chat",
+ "openai-response": "OpenAI Responses",
+ "anthropic": "Anthropic Messages",
+ "gemini": "Gemini Native",
+ "image-generation": "Генерация изображений"
+ }
}
}
},
@@ -580,7 +593,6 @@
"description": "Изменять базовый URL для этого провайдера не рекомендуется. Неверный адрес конечной точки может привести к ошибкам запросов или другому непредвиденному поведению. Если вы понимаете риск, всё равно можно продолжить.",
"confirm": "Продолжить"
},
-
"deleteProvider": {
"title": "Подтверждение удаления провайдера",
"content": "Вы уверены, что хотите удалить провайдера \"{name}\"? Это действие нельзя отменить.",
diff --git a/src/renderer/src/i18n/zh-CN/settings.json b/src/renderer/src/i18n/zh-CN/settings.json
index 35d5488bc..dbe735be4 100644
--- a/src/renderer/src/i18n/zh-CN/settings.json
+++ b/src/renderer/src/i18n/zh-CN/settings.json
@@ -514,6 +514,19 @@
"temperatureRequired": "温度不能为空",
"temperatureMin": "温度必须大于等于0",
"temperatureMax": "温度必须小于等于2"
+ },
+ "endpointType": {
+ "label": "协议类型",
+ "description": "选择 New API 为当前模型使用的上游协议。",
+ "placeholder": "选择协议类型",
+ "required": "协议类型为必填项",
+ "options": {
+ "openai": "OpenAI Chat",
+ "openai-response": "OpenAI Responses",
+ "anthropic": "Anthropic Messages",
+ "gemini": "Gemini Native",
+ "image-generation": "Image Generation"
+ }
}
}
},
@@ -721,7 +734,6 @@
"description": "此类 provider 不建议修改 Base URL,错误的地址可能导致请求失败或行为异常。确认后仍可继续手动修改。",
"confirm": "继续修改"
},
-
"deleteProvider": {
"title": "确认删除服务商",
"content": "是否确认删除服务商 \"{name}\"?此操作不可恢复。",
diff --git a/src/renderer/src/i18n/zh-HK/settings.json b/src/renderer/src/i18n/zh-HK/settings.json
index 671f1f277..9936cb84f 100644
--- a/src/renderer/src/i18n/zh-HK/settings.json
+++ b/src/renderer/src/i18n/zh-HK/settings.json
@@ -452,6 +452,19 @@
"chat": "文本生成",
"image": "圖片生成"
}
+ },
+ "endpointType": {
+ "label": "協議類型",
+ "description": "選擇 New API 為當前模型使用的上游協議。",
+ "placeholder": "選擇協議類型",
+ "required": "協議類型為必填項",
+ "options": {
+ "openai": "OpenAI Chat",
+ "openai-response": "OpenAI Responses",
+ "anthropic": "Anthropic Messages",
+ "gemini": "Gemini Native",
+ "image-generation": "圖片生成"
+ }
}
}
},
@@ -581,7 +594,6 @@
"description": "此類 provider 不建議修改 Base URL,錯誤的位址可能導致請求失敗或行為異常。確認後仍可繼續手動修改。",
"confirm": "繼續修改"
},
-
"deleteProvider": {
"title": "確認刪除服務商",
"content": "是否確認刪除服務商 \"{name}\"?此操作不可恢復。",
diff --git a/src/renderer/src/i18n/zh-TW/settings.json b/src/renderer/src/i18n/zh-TW/settings.json
index 34175887e..9b479ab6b 100644
--- a/src/renderer/src/i18n/zh-TW/settings.json
+++ b/src/renderer/src/i18n/zh-TW/settings.json
@@ -452,6 +452,19 @@
"chat": "文字生成",
"image": "圖片生成"
}
+ },
+ "endpointType": {
+ "label": "協議類型",
+ "description": "選擇 New API 為當前模型使用的上游協議。",
+ "placeholder": "選擇協議類型",
+ "required": "協議類型為必填項",
+ "options": {
+ "openai": "OpenAI Chat",
+ "openai-response": "OpenAI Responses",
+ "anthropic": "Anthropic Messages",
+ "gemini": "Gemini Native",
+ "image-generation": "圖片生成"
+ }
}
}
},
@@ -586,7 +599,6 @@
"description": "此類 provider 不建議修改 Base URL,錯誤的位址可能導致請求失敗或行為異常。確認後仍可繼續手動修改。",
"confirm": "繼續修改"
},
-
"deleteProvider": {
"title": "確認刪除服務提供者",
"content": "是否確認刪除服務提供者「{name}」?此操作無法復原。",
diff --git a/src/renderer/src/pages/NewThreadPage.vue b/src/renderer/src/pages/NewThreadPage.vue
index 47329e395..c724ed3d2 100644
--- a/src/renderer/src/pages/NewThreadPage.vue
+++ b/src/renderer/src/pages/NewThreadPage.vue
@@ -110,6 +110,7 @@ import type {
SessionGenerationSettings
} from '@shared/types/agent-interface'
import { normalizeDeepChatSubagentConfig } from '@shared/lib/deepchatSubagents'
+import { isChatSelectableModelType, type ModelType } from '@shared/model'
const projectStore = useProjectStore()
const sessionStore = useSessionStore()
@@ -177,13 +178,17 @@ const isAcpWorkdirMissing = computed(() => {
return !projectStore.selectedProject?.path?.trim()
})
+const isChatSelectableModel = (model: { type?: ModelType }) => isChatSelectableModelType(model.type)
+
const getEnabledModel = (
providerId?: string,
modelId?: string
): { providerId: string; modelId: string } | null => {
if (!providerId || !modelId) return null
const matched = modelStore.enabledModels.some(
- (group) => group.providerId === providerId && group.models.some((model) => model.id === modelId)
+ (group) =>
+ group.providerId === providerId &&
+ group.models.some((model) => model.id === modelId && isChatSelectableModel(model))
)
return matched ? { providerId, modelId } : null
}
@@ -218,8 +223,9 @@ async function resolveModel(): Promise<{ providerId: string; modelId: string } |
// 3. First available enabled model
for (const group of modelStore.enabledModels) {
- if (group.models.length > 0) {
- return { providerId: group.providerId, modelId: group.models[0].id }
+ const firstChatSelectableModel = group.models.find(isChatSelectableModel)
+ if (firstChatSelectableModel) {
+ return { providerId: group.providerId, modelId: firstChatSelectableModel.id }
}
}
@@ -245,14 +251,18 @@ const resolveStartModelSelection = (
}
for (const group of modelStore.enabledModels) {
- const matched = group.models.find((model) => model.id.toLowerCase() === normalizedModelId)
+ const matched = group.models.find(
+ (model) => model.id.toLowerCase() === normalizedModelId && isChatSelectableModel(model)
+ )
if (matched) {
return { providerId: group.providerId, modelId: matched.id }
}
}
for (const group of modelStore.enabledModels) {
- const matched = group.models.find((model) => model.id.toLowerCase().includes(normalizedModelId))
+ const matched = group.models.find(
+ (model) => model.id.toLowerCase().includes(normalizedModelId) && isChatSelectableModel(model)
+ )
if (matched) {
return { providerId: group.providerId, modelId: matched.id }
}
diff --git a/src/renderer/src/stores/modelStore.ts b/src/renderer/src/stores/modelStore.ts
index 1646e828e..c6b97f89e 100644
--- a/src/renderer/src/stores/modelStore.ts
+++ b/src/renderer/src/stores/modelStore.ts
@@ -118,7 +118,9 @@ export const useModelStore = defineStore('model', () => {
functionCall: resolveModelFunctionCall(model.functionCall),
reasoning: model.reasoning ?? false,
enableSearch: (model as RENDERER_MODEL_META).enableSearch ?? false,
- type: (model.type ?? ModelType.Chat) as ModelType
+ type: (model.type ?? ModelType.Chat) as ModelType,
+ supportedEndpointTypes: model.supportedEndpointTypes,
+ endpointType: model.endpointType
})
const createQueryHandle = (
@@ -215,7 +217,8 @@ export const useModelStore = defineStore('model', () => {
vision: resolveModelVision(config.vision ?? normalized.vision),
functionCall: resolveModelFunctionCall(config.functionCall ?? normalized.functionCall),
reasoning: config.reasoning ?? normalized.reasoning ?? false,
- type: config.type ?? normalized.type ?? ModelType.Chat
+ type: config.type ?? normalized.type ?? ModelType.Chat,
+ endpointType: config.endpointType ?? normalized.endpointType
}
}
} catch (error) {
@@ -355,7 +358,10 @@ export const useModelStore = defineStore('model', () => {
(model as RENDERER_MODEL_META).enableSearch ??
(fallback as RENDERER_MODEL_META | undefined)?.enableSearch ??
false,
- type: (model.type ?? fallback?.type ?? ModelType.Chat) as ModelType
+ type: (model.type ?? fallback?.type ?? ModelType.Chat) as ModelType,
+ supportedEndpointTypes:
+ model.supportedEndpointTypes ?? fallback?.supportedEndpointTypes,
+ endpointType: model.endpointType ?? fallback?.endpointType
}
}
@@ -409,7 +415,9 @@ export const useModelStore = defineStore('model', () => {
vision: meta.vision || false,
functionCall: meta.functionCall || false,
reasoning: meta.reasoning || false,
- type: (meta.type || ModelType.Chat) as ModelType
+ type: (meta.type || ModelType.Chat) as ModelType,
+ supportedEndpointTypes: meta.supportedEndpointTypes,
+ endpointType: meta.endpointType
}))
}
} catch (error) {
diff --git a/src/shared/model.ts b/src/shared/model.ts
index 2a53fc9e5..97d93cc40 100644
--- a/src/shared/model.ts
+++ b/src/shared/model.ts
@@ -13,3 +13,37 @@ export enum ApiEndpointType {
Image = 'image',
Video = 'video'
}
+
+export const NEW_API_ENDPOINT_TYPES = [
+ 'openai',
+ 'openai-response',
+ 'anthropic',
+ 'gemini',
+ 'image-generation'
+] as const
+
+export type NewApiEndpointType = (typeof NEW_API_ENDPOINT_TYPES)[number]
+
+export type NewApiCapabilityProviderId = 'openai' | 'anthropic' | 'gemini'
+
+export const isNewApiEndpointType = (value: unknown): value is NewApiEndpointType =>
+ typeof value === 'string' && NEW_API_ENDPOINT_TYPES.includes(value as NewApiEndpointType)
+
+export const resolveNewApiCapabilityProviderId = (
+ endpointType: NewApiEndpointType
+): NewApiCapabilityProviderId => {
+ switch (endpointType) {
+ case 'anthropic':
+ return 'anthropic'
+ case 'gemini':
+ return 'gemini'
+ case 'openai':
+ case 'openai-response':
+ case 'image-generation':
+ default:
+ return 'openai'
+ }
+}
+
+export const isChatSelectableModelType = (type: ModelType | undefined): boolean =>
+ type === undefined || type === ModelType.Chat || type === ModelType.ImageGeneration
diff --git a/src/shared/types/presenters/legacy.presenters.d.ts b/src/shared/types/presenters/legacy.presenters.d.ts
index 6778d9b58..c62592531 100644
--- a/src/shared/types/presenters/legacy.presenters.d.ts
+++ b/src/shared/types/presenters/legacy.presenters.d.ts
@@ -3,6 +3,7 @@ import { BrowserWindow } from 'electron'
import { MessageFile } from './chat'
import { ShowResponse } from 'ollama'
import { ShortcutKeySetting } from '@/presenter/configPresenter/shortcutKeySettings'
+import type { NewApiEndpointType } from '@shared/model'
import { ApiEndpointType, ModelType } from '@shared/model'
import type {
HookEventName,
@@ -171,6 +172,7 @@ export interface ModelConfig {
maxCompletionTokens?: number // GPT-5 series uses this parameter to replace maxTokens
conversationId?: string
apiEndpoint?: ApiEndpointType
+ endpointType?: NewApiEndpointType
// Search-related parameters
enableSearch?: boolean
forcedSearch?: boolean
@@ -774,6 +776,8 @@ export type RENDERER_MODEL_META = {
contextLength?: number
maxTokens?: number
description?: string
+ supportedEndpointTypes?: NewApiEndpointType[]
+ endpointType?: NewApiEndpointType
}
export type MODEL_META = {
id: string
@@ -789,9 +793,12 @@ export type MODEL_META = {
contextLength?: number
maxTokens?: number
description?: string
+ supportedEndpointTypes?: NewApiEndpointType[]
+ endpointType?: NewApiEndpointType
}
export type LLM_PROVIDER = {
id: string
+ capabilityProviderId?: string
name: string
apiType: string
apiKey: string
diff --git a/src/shared/types/presenters/llmprovider.presenter.d.ts b/src/shared/types/presenters/llmprovider.presenter.d.ts
index 3a629dd71..6e77a2b54 100644
--- a/src/shared/types/presenters/llmprovider.presenter.d.ts
+++ b/src/shared/types/presenters/llmprovider.presenter.d.ts
@@ -1,6 +1,7 @@
import { ShowResponse } from 'ollama'
import type { ChatMessage } from '../core/chat-message'
import { ModelType } from '../core/model'
+import type { NewApiEndpointType } from '@shared/model'
import type { AcpDebugRequest, AcpDebugRunResult, AcpWorkdirInfo } from './legacy.presenters'
/**
@@ -23,6 +24,8 @@ export type RENDERER_MODEL_META = {
contextLength?: number
maxTokens?: number
description?: string
+ supportedEndpointTypes?: NewApiEndpointType[]
+ endpointType?: NewApiEndpointType
}
export type MODEL_META = {
@@ -40,10 +43,13 @@ export type MODEL_META = {
contextLength?: number
maxTokens?: number
description?: string
+ supportedEndpointTypes?: NewApiEndpointType[]
+ endpointType?: NewApiEndpointType
}
export type LLM_PROVIDER = {
id: string
+ capabilityProviderId?: string
name: string
apiType: string
apiKey: string
diff --git a/test/main/presenter/llmProviderPresenter/newApiProvider.test.ts b/test/main/presenter/llmProviderPresenter/newApiProvider.test.ts
new file mode 100644
index 000000000..b27612daa
--- /dev/null
+++ b/test/main/presenter/llmProviderPresenter/newApiProvider.test.ts
@@ -0,0 +1,288 @@
+import { beforeEach, describe, expect, it, vi } from 'vitest'
+import type {
+ ChatMessage,
+ IConfigPresenter,
+ LLMCoreStreamEvent,
+ LLM_PROVIDER,
+ ModelConfig
+} from '../../../../src/shared/presenter'
+import { ApiEndpointType, ModelType } from '../../../../src/shared/model'
+import { NewApiProvider } from '../../../../src/main/presenter/llmProviderPresenter/providers/newApiProvider'
+
+const {
+ mockOpenAIChatCreate,
+ mockOpenAIResponsesCreate,
+ mockOpenAIModelsList,
+ mockAnthropicModelsList,
+ mockAnthropicMessagesCreate,
+ mockGetProxyUrl
+} = vi.hoisted(() => ({
+ mockOpenAIChatCreate: vi.fn(),
+ mockOpenAIResponsesCreate: vi.fn(),
+ mockOpenAIModelsList: vi.fn().mockResolvedValue({ data: [] }),
+ mockAnthropicModelsList: vi.fn().mockResolvedValue({ data: [] }),
+ mockAnthropicMessagesCreate: vi.fn().mockResolvedValue({}),
+ mockGetProxyUrl: vi.fn().mockReturnValue(null)
+}))
+
+vi.mock('electron', () => ({
+ app: {
+ getName: vi.fn(() => 'DeepChat'),
+ getVersion: vi.fn(() => '0.0.0-test'),
+ getPath: vi.fn(() => '/mock/path'),
+ isReady: vi.fn(() => true),
+ on: vi.fn()
+ },
+ session: {},
+ ipcMain: {
+ on: vi.fn(),
+ handle: vi.fn(),
+ removeHandler: vi.fn()
+ },
+ BrowserWindow: vi.fn(() => ({
+ loadURL: vi.fn(),
+ loadFile: vi.fn(),
+ on: vi.fn(),
+ webContents: { send: vi.fn(), on: vi.fn(), isDestroyed: vi.fn(() => false) },
+ isDestroyed: vi.fn(() => false),
+ close: vi.fn(),
+ show: vi.fn(),
+ hide: vi.fn()
+ })),
+ dialog: {
+ showOpenDialog: vi.fn()
+ },
+ shell: {
+ openExternal: vi.fn()
+ }
+}))
+
+vi.mock('openai', () => {
+ class MockOpenAI {
+ chat = {
+ completions: {
+ create: mockOpenAIChatCreate
+ }
+ }
+ responses = {
+ create: mockOpenAIResponsesCreate
+ }
+ models = {
+ list: mockOpenAIModelsList
+ }
+ }
+
+ return {
+ default: MockOpenAI,
+ AzureOpenAI: MockOpenAI
+ }
+})
+
+vi.mock('@anthropic-ai/sdk', () => {
+ class MockAnthropic {
+ models = {
+ list: mockAnthropicModelsList
+ }
+ messages = {
+ create: mockAnthropicMessagesCreate
+ }
+
+ constructor(_: Record) {}
+ }
+
+ return {
+ default: MockAnthropic
+ }
+})
+
+vi.mock('@google/genai', () => ({
+ Content: class {},
+ GoogleGenAI: class MockGoogleGenAI {
+ models = {
+ list: vi.fn().mockResolvedValue([]),
+ generateContent: vi.fn().mockResolvedValue({ text: 'ok' })
+ }
+
+ constructor(_: Record) {}
+ },
+ FunctionCallingConfigMode: {
+ ANY: 'ANY',
+ AUTO: 'AUTO',
+ NONE: 'NONE'
+ },
+ GenerateContentParameters: class {},
+ GenerateContentResponseUsageMetadata: class {},
+ GenerateContentConfig: class {},
+ HarmBlockThreshold: {
+ BLOCK_NONE: 'BLOCK_NONE',
+ BLOCK_LOW_AND_ABOVE: 'BLOCK_LOW_AND_ABOVE',
+ BLOCK_MEDIUM_AND_ABOVE: 'BLOCK_MEDIUM_AND_ABOVE',
+ BLOCK_ONLY_HIGH: 'BLOCK_ONLY_HIGH',
+ HARM_BLOCK_THRESHOLD_UNSPECIFIED: 'HARM_BLOCK_THRESHOLD_UNSPECIFIED'
+ },
+ HarmCategory: {
+ HARM_CATEGORY_HARASSMENT: 'HARM_CATEGORY_HARASSMENT',
+ HARM_CATEGORY_HATE_SPEECH: 'HARM_CATEGORY_HATE_SPEECH',
+ HARM_CATEGORY_SEXUALLY_EXPLICIT: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',
+ HARM_CATEGORY_DANGEROUS_CONTENT: 'HARM_CATEGORY_DANGEROUS_CONTENT'
+ },
+ Modality: {
+ TEXT: 'TEXT',
+ IMAGE: 'IMAGE'
+ },
+ Part: class {},
+ SafetySetting: class {},
+ Tool: class {}
+}))
+
+vi.mock('@/presenter', () => ({
+ presenter: {
+ devicePresenter: {
+ cacheImage: vi.fn()
+ }
+ }
+}))
+
+vi.mock('@/eventbus', () => ({
+ eventBus: {
+ on: vi.fn(),
+ sendToRenderer: vi.fn(),
+ sendToMain: vi.fn(),
+ emit: vi.fn(),
+ send: vi.fn()
+ },
+ SendTarget: {
+ ALL_WINDOWS: 'ALL_WINDOWS'
+ }
+}))
+
+vi.mock('@/events', () => ({
+ CONFIG_EVENTS: {
+ PROXY_RESOLVED: 'PROXY_RESOLVED',
+ PROVIDER_ATOMIC_UPDATE: 'PROVIDER_ATOMIC_UPDATE',
+ PROVIDER_BATCH_UPDATE: 'PROVIDER_BATCH_UPDATE',
+ MODEL_LIST_CHANGED: 'MODEL_LIST_CHANGED'
+ },
+ NOTIFICATION_EVENTS: {
+ SHOW_ERROR: 'SHOW_ERROR'
+ }
+}))
+
+vi.mock('../../../../src/main/presenter/proxyConfig', () => ({
+ proxyConfig: {
+ getProxyUrl: mockGetProxyUrl
+ }
+}))
+
+vi.mock('../../../../src/main/presenter/configPresenter/modelCapabilities', () => ({
+ modelCapabilities: {
+ supportsReasoningEffort: vi.fn().mockReturnValue(false),
+ supportsVerbosity: vi.fn().mockReturnValue(false),
+ supportsReasoning: vi.fn().mockReturnValue(false),
+ supportsVision: vi.fn().mockReturnValue(false),
+ supportsToolCall: vi.fn().mockReturnValue(false),
+ supportsImageOutput: vi.fn().mockReturnValue(false),
+ getThinkingBudgetRange: vi.fn().mockReturnValue({}),
+ resolveProviderId: vi.fn((providerId: string) => providerId)
+ }
+}))
+
+const createProvider = (overrides?: Partial): LLM_PROVIDER => ({
+ id: 'new-api',
+ name: 'New API',
+ apiType: 'new-api',
+ apiKey: 'test-key',
+ baseUrl: 'https://www.newapi.ai',
+ enable: false,
+ models: [],
+ customModels: [],
+ enabledModels: [],
+ disabledModels: [],
+ ...overrides
+})
+
+const createConfigPresenter = (
+ modelConfigById: Record> = {}
+): IConfigPresenter =>
+ ({
+ getProviders: vi.fn().mockReturnValue([]),
+ getProviderModels: vi.fn().mockReturnValue([]),
+ getCustomModels: vi.fn().mockReturnValue([]),
+ getDbProviderModels: vi.fn().mockReturnValue([]),
+ getModelConfig: vi.fn((modelId: string) => ({
+ type: ModelType.Chat,
+ apiEndpoint: ApiEndpointType.Chat,
+ ...modelConfigById[modelId]
+ })),
+ getSetting: vi.fn().mockReturnValue(undefined),
+ getModelStatus: vi.fn().mockReturnValue(false),
+ setProviderModels: vi.fn(),
+ hasUserModelConfig: vi.fn().mockReturnValue(false),
+ setModelConfig: vi.fn()
+ }) as unknown as IConfigPresenter
+
+describe('NewApiProvider capability routing', () => {
+ beforeEach(() => {
+ vi.clearAllMocks()
+ })
+
+ it('maps openai-response delegates to openai capability semantics', () => {
+ const provider = new NewApiProvider(createProvider(), createConfigPresenter())
+ const delegateProvider = (provider as any).openaiResponsesDelegate.provider as LLM_PROVIDER
+
+ expect(delegateProvider.id).toBe('new-api')
+ expect(delegateProvider.capabilityProviderId).toBe('openai')
+ expect(delegateProvider.apiType).toBe('openai-responses')
+ })
+
+ it('maps gemini delegates to gemini capability semantics', () => {
+ const provider = new NewApiProvider(createProvider(), createConfigPresenter())
+ const delegateProvider = (provider as any).geminiDelegate.provider as LLM_PROVIDER
+
+ expect(delegateProvider.id).toBe('new-api')
+ expect(delegateProvider.capabilityProviderId).toBe('gemini')
+ expect(delegateProvider.apiType).toBe('gemini')
+ })
+
+ it('maps anthropic delegates to anthropic capability semantics', () => {
+ const provider = new NewApiProvider(createProvider(), createConfigPresenter())
+ const delegateProvider = (provider as any).anthropicDelegate.provider as LLM_PROVIDER
+
+ expect(delegateProvider.id).toBe('new-api')
+ expect(delegateProvider.capabilityProviderId).toBe('anthropic')
+ expect(delegateProvider.apiType).toBe('anthropic')
+ })
+
+ it('keeps image-generation on the image runtime route while using openai capabilities', async () => {
+ const configPresenter = createConfigPresenter({
+ 'gpt-image-1': {
+ endpointType: 'image-generation',
+ apiEndpoint: ApiEndpointType.Chat,
+ type: ModelType.Chat
+ }
+ })
+ const provider = new NewApiProvider(createProvider(), configPresenter)
+ const openaiChatDelegate = (provider as any).openaiChatDelegate
+ const coreStreamSpy = vi
+ .spyOn(openaiChatDelegate, 'coreStream')
+ .mockImplementation(async function* (
+ _messages: ChatMessage[],
+ _modelId: string,
+ modelConfig: ModelConfig
+ ): AsyncIterable {
+ expect(modelConfig.apiEndpoint).toBe(ApiEndpointType.Image)
+ expect(modelConfig.type).toBe(ModelType.ImageGeneration)
+ expect(modelConfig.endpointType).toBe('image-generation')
+ yield { type: 'text', content: 'generated-image' } as LLMCoreStreamEvent
+ })
+
+ const result = await provider.completions(
+ [{ role: 'user', content: 'Draw a cat' }],
+ 'gpt-image-1'
+ )
+
+ expect(openaiChatDelegate.provider.capabilityProviderId).toBe('openai')
+ expect(coreStreamSpy).toHaveBeenCalledOnce()
+ expect(result.content).toBe('generated-image')
+ })
+})
diff --git a/test/renderer/components/ChatStatusBar.test.ts b/test/renderer/components/ChatStatusBar.test.ts
index 481657b22..e54ba234c 100644
--- a/test/renderer/components/ChatStatusBar.test.ts
+++ b/test/renderer/components/ChatStatusBar.test.ts
@@ -20,7 +20,11 @@ type TestGenerationSettings = {
type ExtraModelGroup = {
providerId: string
providerName: string
- models: Array<{ id: string; name: string }>
+ models: Array<{
+ id: string
+ name: string
+ type?: 'chat' | 'embedding' | 'rerank' | 'imageGeneration'
+ }>
}
type SetupOptions = {
@@ -656,6 +660,51 @@ describe('ChatStatusBar model and session panels', () => {
expect((wrapper.vm as any).isModelSettingsExpanded).toBe(true)
})
+ it('filters embedding and rerank models out of the chat model list', async () => {
+ const { wrapper } = await setup({
+ extraModelGroups: [
+ {
+ providerId: 'new-api',
+ providerName: 'New API',
+ models: [
+ { id: 'text-embedding-3-large', name: 'Embedding', type: 'embedding' },
+ { id: 'bge-rerank-v2', name: 'Rerank', type: 'rerank' },
+ { id: 'gpt-4.1', name: 'GPT-4.1', type: 'chat' },
+ { id: 'gpt-image-1', name: 'GPT Image 1', type: 'imageGeneration' }
+ ]
+ }
+ ]
+ })
+
+ const filteredGroups = (wrapper.vm as any).filteredModelGroups as Array<{
+ providerId: string
+ models: Array<{ id: string }>
+ }>
+ const newApiGroup = filteredGroups.find((group) => group.providerId === 'new-api')
+
+ expect(newApiGroup?.models.map((model) => model.id)).toEqual(['gpt-4.1', 'gpt-image-1'])
+ expect(wrapper.text()).not.toContain('text-embedding-3-large')
+ expect(wrapper.text()).not.toContain('bge-rerank-v2')
+ })
+
+ it('skips non-chat defaults and falls back to the first chat-selectable model', async () => {
+ const { wrapper, draftStore } = await setup({
+ extraModelGroups: [
+ {
+ providerId: 'new-api',
+ providerName: 'New API',
+ models: [{ id: 'text-embedding-3-large', name: 'Embedding', type: 'embedding' }]
+ }
+ ],
+ defaultModel: { providerId: 'new-api', modelId: 'text-embedding-3-large' },
+ preferredModel: undefined
+ })
+
+ expect(draftStore.providerId).toBe('openai')
+ expect(draftStore.modelId).toBe('gpt-4')
+ expect((wrapper.vm as any).displayModelText).toBe('gpt-4')
+ })
+
it('shows reasoning effort controls only when model capability supports it', async () => {
const enabled = await setup({
hasActiveSession: true,
diff --git a/test/renderer/components/ModelConfigDialog.test.ts b/test/renderer/components/ModelConfigDialog.test.ts
index a678ad37b..fc5a6915f 100644
--- a/test/renderer/components/ModelConfigDialog.test.ts
+++ b/test/renderer/components/ModelConfigDialog.test.ts
@@ -1,7 +1,8 @@
import { describe, expect, it, vi } from 'vitest'
-import { defineComponent, reactive } from 'vue'
+import { defineComponent, nextTick, reactive, ref } from 'vue'
import { flushPromises, mount } from '@vue/test-utils'
import type { ReasoningPortrait } from '../../../src/shared/types/model-db'
+import { ApiEndpointType, ModelType } from '../../../src/shared/model'
const passthrough = (name: string) =>
defineComponent({
@@ -16,6 +17,10 @@ type SetupOptions = {
providerApiType?: string
modelConfig?: Record
reasoningPortrait?: ReasoningPortrait | null
+ mode?: 'create' | 'edit'
+ isCustomModel?: boolean
+ providerModels?: Array>
+ customModels?: Array>
}
const setup = async (options: SetupOptions) => {
@@ -39,11 +44,16 @@ const setup = async (options: SetupOptions) => {
}
const modelStore = reactive({
- customModels: [],
+ customModels: [
+ {
+ providerId: options.providerId,
+ models: options.customModels ?? []
+ }
+ ],
allProviderModels: [
{
providerId: options.providerId,
- models: [{ id: options.modelId, name: options.modelName }]
+ models: options.providerModels ?? [{ id: options.modelId, name: options.modelName }]
}
],
addCustomModel: vi.fn().mockResolvedValue(undefined),
@@ -66,6 +76,12 @@ const setup = async (options: SetupOptions) => {
vi.doMock('@/stores/modelStore', () => ({
useModelStore: () => modelStore
}))
+ vi.doMock('pinia', () => ({
+ storeToRefs: () => ({
+ customModels: ref(modelStore.customModels),
+ allProviderModels: ref(modelStore.allProviderModels)
+ })
+ }))
vi.doMock('@/stores/providerStore', () => ({
useProviderStore: () => providerStore
}))
@@ -84,7 +100,9 @@ const setup = async (options: SetupOptions) => {
open: true,
modelId: options.modelId,
modelName: options.modelName,
- providerId: options.providerId
+ providerId: options.providerId,
+ mode: options.mode ?? 'edit',
+ isCustomModel: options.isCustomModel ?? false
},
global: {
stubs: {
@@ -116,7 +134,7 @@ const setup = async (options: SetupOptions) => {
await flushPromises()
- return { wrapper }
+ return { wrapper, modelConfigStore }
}
describe('ModelConfigDialog reasoning portraits', () => {
@@ -217,3 +235,92 @@ describe('ModelConfigDialog reasoning portraits', () => {
expect(wrapper.text()).not.toContain('settings.model.modelConfig.thinkingBudget.label')
})
})
+
+describe('ModelConfigDialog new-api endpoint normalization', () => {
+ it('restores chat routing and provider model type when switching away from image-generation', async () => {
+ const { wrapper, modelConfigStore } = await setup({
+ providerId: 'new-api',
+ modelId: 'gpt-4.1',
+ modelName: 'GPT-4.1',
+ providerApiType: 'new-api',
+ providerModels: [
+ {
+ id: 'gpt-4.1',
+ name: 'GPT-4.1',
+ type: ModelType.Chat,
+ supportedEndpointTypes: ['openai', 'image-generation'],
+ endpointType: 'openai'
+ }
+ ],
+ modelConfig: {
+ type: ModelType.ImageGeneration,
+ apiEndpoint: ApiEndpointType.Image,
+ endpointType: 'image-generation'
+ }
+ })
+
+ expect((wrapper.vm as any).config.apiEndpoint).toBe(ApiEndpointType.Image)
+ expect((wrapper.vm as any).config.type).toBe(ModelType.ImageGeneration)
+
+ ;(wrapper.vm as any).config.endpointType = 'openai'
+ await nextTick()
+ await flushPromises()
+
+ expect((wrapper.vm as any).config.apiEndpoint).toBe(ApiEndpointType.Chat)
+ expect((wrapper.vm as any).config.type).toBe(ModelType.Chat)
+
+ await (wrapper.vm as any).handleSave()
+
+ expect(modelConfigStore.setModelConfig).toHaveBeenCalledWith(
+ 'gpt-4.1',
+ 'new-api',
+ expect.objectContaining({
+ endpointType: 'openai',
+ apiEndpoint: ApiEndpointType.Chat,
+ type: ModelType.Chat
+ })
+ )
+ })
+
+ it('forces image endpoint for image-generation and falls back to chat type for custom models', async () => {
+ const { wrapper, modelConfigStore } = await setup({
+ providerId: 'new-api',
+ modelId: '',
+ modelName: '',
+ providerApiType: 'new-api',
+ mode: 'create',
+ modelConfig: {
+ type: ModelType.Chat,
+ apiEndpoint: ApiEndpointType.Chat
+ }
+ })
+
+ ;(wrapper.vm as any).config.endpointType = 'image-generation'
+ await nextTick()
+ await flushPromises()
+
+ expect((wrapper.vm as any).config.apiEndpoint).toBe(ApiEndpointType.Image)
+ expect((wrapper.vm as any).config.type).toBe(ModelType.ImageGeneration)
+
+ ;(wrapper.vm as any).config.endpointType = 'openai'
+ await nextTick()
+ await flushPromises()
+
+ expect((wrapper.vm as any).config.apiEndpoint).toBe(ApiEndpointType.Chat)
+ expect((wrapper.vm as any).config.type).toBe(ModelType.Chat)
+
+ ;(wrapper.vm as any).modelIdField = 'custom-image-model'
+ ;(wrapper.vm as any).modelNameField = 'Custom Image Model'
+ await (wrapper.vm as any).handleSave()
+
+ expect(modelConfigStore.setModelConfig).toHaveBeenCalledWith(
+ 'custom-image-model',
+ 'new-api',
+ expect.objectContaining({
+ endpointType: 'openai',
+ apiEndpoint: ApiEndpointType.Chat,
+ type: ModelType.Chat
+ })
+ )
+ })
+})