Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions src/main/presenter/deepchatAgentPresenter/dispatch.ts
Original file line number Diff line number Diff line change
Expand Up @@ -396,7 +396,8 @@ export async function executeTools(
toolOutputGuard: ToolOutputGuard,
contextLength: number,
maxTokens: number,
hooks?: ProcessHooks
hooks?: ProcessHooks,
providerId?: string
): Promise<{
executed: number
pendingInteractions: PendingToolInteraction[]
Expand Down Expand Up @@ -445,7 +446,8 @@ export async function executeTools(
type: 'function',
function: { name: tc.name, arguments: tc.arguments },
server: toolDef?.server,
conversationId: io.sessionId
conversationId: io.sessionId,
providerId: providerId?.trim() || undefined
}

const toolContext = {
Expand Down
4 changes: 3 additions & 1 deletion src/main/presenter/deepchatAgentPresenter/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2351,6 +2351,7 @@ export class DeepChatAgentPresenter implements IAgentImplementation {
}

const projectDir = this.resolveProjectDir(sessionId)
const sessionState = await this.getSessionState(sessionId)
const toolDefinitions = await this.loadToolDefinitionsForSession(sessionId, projectDir)

const toolDefinition = toolDefinitions.find((definition) => {
Expand Down Expand Up @@ -2386,7 +2387,8 @@ export class DeepChatAgentPresenter implements IAgentImplementation {
arguments: toolCall.params || '{}'
},
server: toolDefinition?.server,
conversationId: sessionId
conversationId: sessionId,
providerId: sessionState?.providerId?.trim() || undefined
}

try {
Expand Down
3 changes: 2 additions & 1 deletion src/main/presenter/deepchatAgentPresenter/process.ts
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,8 @@ export async function processStream(params: ProcessParams): Promise<ProcessResul
params.toolOutputGuard,
modelConfig.contextLength > 0 ? modelConfig.contextLength : UNKNOWN_CONTEXT_LIMIT,
maxTokens,
hooks
hooks,
providerId
)
toolCallCount += executed.executed
echo.flush()
Expand Down
5 changes: 0 additions & 5 deletions src/main/presenter/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ import { BrowserWindow, ipcMain, IpcMainInvokeEvent, app } from 'electron'
import { WindowPresenter } from './windowPresenter'
import { ShortcutPresenter } from './shortcutPresenter'
import {
CONVERSATION,
CONVERSATION_SETTINGS,
IConfigPresenter,
IDeeplinkPresenter,
Expand Down Expand Up @@ -361,10 +360,6 @@ export class Presenter implements IPresenter {
return this.legacySessionManager?.getSessionSync(conversationId) ?? null
}

async getLegacyConversation(conversationId: string): Promise<CONVERSATION | null> {
return await this.getLegacySessionPresenter().getConversation(conversationId)
}

async updateLegacyConversationSettings(
conversationId: string,
settings: Partial<CONVERSATION_SETTINGS>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import {
IConfigPresenter
} from '@shared/presenter'
import { OpenAICompatibleProvider } from './openAICompatibleProvider'
import type { ProviderMcpRuntimePort } from '../runtimePorts'

// Define interface for 302AI API balance response
interface _302AIBalanceResponse {
Expand Down Expand Up @@ -38,8 +39,12 @@ interface _302AIModelResponse {
}

export class _302AIProvider extends OpenAICompatibleProvider {
constructor(provider: LLM_PROVIDER, configPresenter: IConfigPresenter) {
super(provider, configPresenter)
constructor(
provider: LLM_PROVIDER,
configPresenter: IConfigPresenter,
mcpRuntime?: ProviderMcpRuntimePort
) {
super(provider, configPresenter, mcpRuntime)
}

async completions(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,15 @@ import { OpenAICompatibleProvider } from './openAICompatibleProvider'
import { proxyConfig } from '@/presenter/proxyConfig'
import { ProxyAgent } from 'undici'
import OpenAI from 'openai'
import type { ProviderMcpRuntimePort } from '../runtimePorts'

export class AihubmixProvider extends OpenAICompatibleProvider {
constructor(provider: LLM_PROVIDER, configPresenter: IConfigPresenter) {
super(provider, configPresenter)
constructor(
provider: LLM_PROVIDER,
configPresenter: IConfigPresenter,
mcpRuntime?: ProviderMcpRuntimePort
) {
super(provider, configPresenter, mcpRuntime)
}

protected createOpenAIClient(): void {
Expand Down
Original file line number Diff line number Diff line change
@@ -1,13 +1,18 @@
import { LLM_PROVIDER, MODEL_META, IConfigPresenter, KeyStatus } from '@shared/presenter'
import { OpenAICompatibleProvider } from './openAICompatibleProvider'
import type { ProviderMcpRuntimePort } from '../runtimePorts'

interface CherryInUsageResponse {
total_usage: number
}

export class CherryInProvider extends OpenAICompatibleProvider {
constructor(provider: LLM_PROVIDER, configPresenter: IConfigPresenter) {
super(provider, configPresenter)
constructor(
provider: LLM_PROVIDER,
configPresenter: IConfigPresenter,
mcpRuntime?: ProviderMcpRuntimePort
) {
super(provider, configPresenter, mcpRuntime)
}

private getBaseUrl(): string {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,15 @@ import {
import { DEFAULT_MODEL_CONTEXT_LENGTH, DEFAULT_MODEL_MAX_TOKENS } from '@shared/modelConfigDefaults'
import { OpenAICompatibleProvider } from './openAICompatibleProvider'
import { modelCapabilities } from '../../configPresenter/modelCapabilities'
import type { ProviderMcpRuntimePort } from '../runtimePorts'

export class DashscopeProvider extends OpenAICompatibleProvider {
constructor(provider: LLM_PROVIDER, configPresenter: IConfigPresenter) {
super(provider, configPresenter)
constructor(
provider: LLM_PROVIDER,
configPresenter: IConfigPresenter,
mcpRuntime?: ProviderMcpRuntimePort
) {
super(provider, configPresenter, mcpRuntime)
}

private supportsEnableThinking(modelId: string): boolean {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import {
} from '@shared/presenter'
import { OpenAICompatibleProvider } from './openAICompatibleProvider'
import { SUMMARY_TITLES_PROMPT } from '../baseProvider'
import type { ProviderMcpRuntimePort } from '../runtimePorts'

// Define interface for DeepSeek API key response
interface DeepSeekBalanceResponse {
Expand All @@ -20,8 +21,12 @@ interface DeepSeekBalanceResponse {
}

export class DeepseekProvider extends OpenAICompatibleProvider {
constructor(provider: LLM_PROVIDER, configPresenter: IConfigPresenter) {
super(provider, configPresenter)
constructor(
provider: LLM_PROVIDER,
configPresenter: IConfigPresenter,
mcpRuntime?: ProviderMcpRuntimePort
) {
super(provider, configPresenter, mcpRuntime)
}

async completions(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ import {
import { OpenAICompatibleProvider } from './openAICompatibleProvider'
import { providerDbLoader } from '../../configPresenter/providerDbLoader'
import { modelCapabilities } from '../../configPresenter/modelCapabilities'
import type { ProviderMcpRuntimePort } from '../runtimePorts'

export class DoubaoProvider extends OpenAICompatibleProvider {
// List of models that support thinking parameter
Expand All @@ -30,9 +31,13 @@ export class DoubaoProvider extends OpenAICompatibleProvider {
'doubao-1-5-thinking-pro-m-250428'
]

constructor(provider: LLM_PROVIDER, configPresenter: IConfigPresenter) {
constructor(
provider: LLM_PROVIDER,
configPresenter: IConfigPresenter,
mcpRuntime?: ProviderMcpRuntimePort
) {
// Initialize Doubao model configuration
super(provider, configPresenter)
super(provider, configPresenter, mcpRuntime)
}

private supportsThinking(modelId: string): boolean {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,15 @@ import {
import { DEFAULT_MODEL_CONTEXT_LENGTH, DEFAULT_MODEL_MAX_TOKENS } from '@shared/modelConfigDefaults'
import { OpenAICompatibleProvider } from './openAICompatibleProvider'
import { ModelsPage } from 'openai/resources'
import type { ProviderMcpRuntimePort } from '../runtimePorts'

export class GithubProvider extends OpenAICompatibleProvider {
constructor(provider: LLM_PROVIDER, configPresenter: IConfigPresenter) {
super(provider, configPresenter)
constructor(
provider: LLM_PROVIDER,
configPresenter: IConfigPresenter,
mcpRuntime?: ProviderMcpRuntimePort
) {
super(provider, configPresenter, mcpRuntime)
}
protected async fetchOpenAIModels(options?: { timeout: number }): Promise<MODEL_META[]> {
const response = (await this.openai.models.list(options)) as ModelsPage & {
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import { LLM_PROVIDER, LLMResponse, ChatMessage, IConfigPresenter } from '@shared/presenter'
import { OpenAICompatibleProvider } from './openAICompatibleProvider'
import { ModelConfig, MCPToolDefinition, LLMCoreStreamEvent } from '@shared/presenter'
import type { ProviderMcpRuntimePort } from '../runtimePorts'

export class GrokProvider extends OpenAICompatibleProvider {
// Image generation model ID
Expand All @@ -13,8 +14,12 @@ export class GrokProvider extends OpenAICompatibleProvider {
// Models that support reasoning_effort parameter (grok-4 does not)
private static readonly REASONING_EFFORT_MODELS: string[] = ['grok-3-mini', 'grok-3-mini-fast']

constructor(provider: LLM_PROVIDER, configPresenter: IConfigPresenter) {
super(provider, configPresenter)
constructor(
provider: LLM_PROVIDER,
configPresenter: IConfigPresenter,
mcpRuntime?: ProviderMcpRuntimePort
) {
super(provider, configPresenter, mcpRuntime)
}

// Check if it's an image model
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ import {
IConfigPresenter
} from '@shared/presenter'
import { OpenAICompatibleProvider } from './openAICompatibleProvider'
import type { ProviderMcpRuntimePort } from '../runtimePorts'

// Define interface for Groq model response (following PPIO naming convention)
interface GroqModelResponse {
Expand All @@ -28,8 +29,12 @@ interface GroqModelResponse {
}

export class GroqProvider extends OpenAICompatibleProvider {
constructor(provider: LLM_PROVIDER, configPresenter: IConfigPresenter) {
super(provider, configPresenter)
constructor(
provider: LLM_PROVIDER,
configPresenter: IConfigPresenter,
mcpRuntime?: ProviderMcpRuntimePort
) {
super(provider, configPresenter, mcpRuntime)
}

async completions(
Expand Down
Original file line number Diff line number Diff line change
@@ -1,9 +1,14 @@
import { LLM_PROVIDER, MODEL_META, IConfigPresenter } from '@shared/presenter'
import { OpenAICompatibleProvider } from './openAICompatibleProvider'
import type { ProviderMcpRuntimePort } from '../runtimePorts'

export class JiekouProvider extends OpenAICompatibleProvider {
constructor(provider: LLM_PROVIDER, configPresenter: IConfigPresenter) {
super(provider, configPresenter)
constructor(
provider: LLM_PROVIDER,
configPresenter: IConfigPresenter,
mcpRuntime?: ProviderMcpRuntimePort
) {
super(provider, configPresenter, mcpRuntime)
}

protected async fetchOpenAIModels(options?: { timeout: number }): Promise<MODEL_META[]> {
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,12 @@
import { IConfigPresenter, LLM_PROVIDER } from '@shared/presenter'
import { OpenAICompatibleProvider } from './openAICompatibleProvider'
import type { ProviderMcpRuntimePort } from '../runtimePorts'
export class LMStudioProvider extends OpenAICompatibleProvider {
constructor(provider: LLM_PROVIDER, configPresenter: IConfigPresenter) {
super(provider, configPresenter)
constructor(
provider: LLM_PROVIDER,
configPresenter: IConfigPresenter,
mcpRuntime?: ProviderMcpRuntimePort
) {
super(provider, configPresenter, mcpRuntime)
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import {
ModelScopeMcpSyncOptions
} from '@shared/presenter'
import { OpenAICompatibleProvider } from './openAICompatibleProvider'
import type { ProviderMcpRuntimePort } from '../runtimePorts'

// Define interface for ModelScope MCP API response
export interface ModelScopeMcpServerResponse {
Expand Down Expand Up @@ -46,8 +47,12 @@ export interface ModelScopeMcpServer {
}

export class ModelscopeProvider extends OpenAICompatibleProvider {
constructor(provider: LLM_PROVIDER, configPresenter: IConfigPresenter) {
super(provider, configPresenter)
constructor(
provider: LLM_PROVIDER,
configPresenter: IConfigPresenter,
mcpRuntime?: ProviderMcpRuntimePort
) {
super(provider, configPresenter, mcpRuntime)
}

async completions(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,15 @@ import {
import { OpenAICompatibleProvider } from './openAICompatibleProvider'
import { providerDbLoader } from '../../configPresenter/providerDbLoader'
import { modelCapabilities } from '../../configPresenter/modelCapabilities'
import type { ProviderMcpRuntimePort } from '../runtimePorts'

export class O3fanProvider extends OpenAICompatibleProvider {
constructor(provider: LLM_PROVIDER, configPresenter: IConfigPresenter) {
super(provider, configPresenter)
constructor(
provider: LLM_PROVIDER,
configPresenter: IConfigPresenter,
mcpRuntime?: ProviderMcpRuntimePort
) {
super(provider, configPresenter, mcpRuntime)
}

protected async fetchOpenAIModels(): Promise<MODEL_META[]> {
Expand Down
Original file line number Diff line number Diff line change
@@ -1,8 +1,13 @@
import { IConfigPresenter, LLM_PROVIDER, LLMResponse } from '@shared/presenter'
import { OpenAICompatibleProvider } from './openAICompatibleProvider'
import type { ProviderMcpRuntimePort } from '../runtimePorts'
export class OpenAIProvider extends OpenAICompatibleProvider {
constructor(provider: LLM_PROVIDER, configPresenter: IConfigPresenter) {
super(provider, configPresenter)
constructor(
provider: LLM_PROVIDER,
configPresenter: IConfigPresenter,
mcpRuntime?: ProviderMcpRuntimePort
) {
super(provider, configPresenter, mcpRuntime)
}

async completions(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import {
IConfigPresenter
} from '@shared/presenter'
import { OpenAICompatibleProvider } from './openAICompatibleProvider'
import type { ProviderMcpRuntimePort } from '../runtimePorts'

// Define interface for OpenRouter API key response
interface OpenRouterKeyResponse {
Expand Down Expand Up @@ -57,8 +58,12 @@ interface OpenRouterModelResponse {
}

export class OpenRouterProvider extends OpenAICompatibleProvider {
constructor(provider: LLM_PROVIDER, configPresenter: IConfigPresenter) {
super(provider, configPresenter)
constructor(
provider: LLM_PROVIDER,
configPresenter: IConfigPresenter,
mcpRuntime?: ProviderMcpRuntimePort
) {
super(provider, configPresenter, mcpRuntime)
}

async completions(
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import { LLM_PROVIDER, MODEL_META, IConfigPresenter } from '@shared/presenter'
import { OpenAICompatibleProvider } from './openAICompatibleProvider'
import type { ProviderMcpRuntimePort } from '../runtimePorts'

/**
* PoeProvider integrates Poe's OpenAI-compatible API surface with the shared
Expand All @@ -11,8 +12,12 @@ import { OpenAICompatibleProvider } from './openAICompatibleProvider'
* tweak metadata so the renderer can present a clearer group name.
*/
export class PoeProvider extends OpenAICompatibleProvider {
constructor(provider: LLM_PROVIDER, configPresenter: IConfigPresenter) {
super(provider, configPresenter)
constructor(
provider: LLM_PROVIDER,
configPresenter: IConfigPresenter,
mcpRuntime?: ProviderMcpRuntimePort
) {
super(provider, configPresenter, mcpRuntime)
}

protected async fetchOpenAIModels(options?: { timeout: number }): Promise<MODEL_META[]> {
Expand Down
Loading
Loading