Skip to content

Commit 3ec164f

Browse files
committed
chore: update koog to 0.8.0
1 parent ef35576 commit 3ec164f

15 files changed

Lines changed: 90 additions & 58 deletions

File tree

build.gradle.kts

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,14 @@ allprojects {
2121
google()
2222
mavenCentral()
2323
}
24+
25+
configurations.configureEach {
26+
resolutionStrategy.eachDependency {
27+
if (requested.group == "org.jetbrains.kotlin" && requested.name.startsWith("kotlin-stdlib")) {
28+
useVersion(libs.versions.kotlin.get())
29+
}
30+
}
31+
}
2432
}
2533

2634
// Convenience task to publish mpp-core and mpp-ui to mavenLocal for mpp-idea composite build

gradle/libs.versions.toml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[versions]
22
# Kotlin & Compose
3-
kotlin = "2.2.0"
3+
kotlin = "2.3.10"
44
compose = "1.9.2"
55

66
# Gradle Plugins
@@ -30,7 +30,7 @@ logback = "1.5.19"
3030
kotlinLogging = "7.0.13"
3131

3232
# AI/LLM
33-
koog = "0.5.2"
33+
koog = "0.8.0"
3434
jtokkit = "1.1.0"
3535
mcpKotlinSdk = "0.7.2"
3636

mpp-core/src/commonMain/kotlin/cc/unitmesh/agent/executor/CodeReviewAgentExecutor.kt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ class CodeReviewAgentExecutor(
4848
): CodeReviewResult {
4949
resetExecution()
5050
conversationManager = ConversationManager(llmService, systemPrompt)
51-
logger.info(Json.encodeToString(linterSummary))
51+
logger.info { Json.encodeToString(linterSummary) }
5252
val initialUserMessage = buildInitialUserMessage(task, linterSummary)
5353

5454
while (shouldContinue()) {

mpp-core/src/commonMain/kotlin/cc/unitmesh/agent/mcp/McpClientManager.kt

Lines changed: 8 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,12 @@
11
package cc.unitmesh.agent.mcp
22

33
import cc.unitmesh.agent.logging.getLogger
4-
import io.modelcontextprotocol.kotlin.sdk.Implementation
54
import io.modelcontextprotocol.kotlin.sdk.client.Client
65
import io.modelcontextprotocol.kotlin.sdk.client.SseClientTransport
76
import io.modelcontextprotocol.kotlin.sdk.shared.Transport
7+
import io.modelcontextprotocol.kotlin.sdk.types.ContentBlock
8+
import io.modelcontextprotocol.kotlin.sdk.types.Implementation
9+
import io.modelcontextprotocol.kotlin.sdk.types.ToolSchema
810
import kotlinx.coroutines.Dispatchers
911
import kotlinx.coroutines.withContext
1012
import kotlinx.serialization.json.Json
@@ -106,15 +108,8 @@ class McpClientManager(
106108
name = tool.name,
107109
description = tool.description ?: "",
108110
serverName = serverName,
109-
inputSchema = tool.inputSchema?.let {
110-
// inputSchema is Tool.Input, need to convert to JsonObject first
111-
val schemaJson = Json.decodeFromString<JsonObject>(
112-
Json.encodeToString(
113-
io.modelcontextprotocol.kotlin.sdk.Tool.Input.serializer(),
114-
it
115-
)
116-
)
117-
json.encodeToString(JsonObject.serializer(), schemaJson)
111+
inputSchema = tool.inputSchema?.let {
112+
json.encodeToString(ToolSchema.serializer(), it)
118113
},
119114
enabled = false
120115
)
@@ -238,12 +233,12 @@ class McpClientManager(
238233
}
239234

240235
// Call tool
241-
val result = client.callTool(toolName, arguments = args, compatibility = true, options = null)
236+
val result = client.callTool(toolName, arguments = args, options = null)
242237

243238
// Convert result to JSON string
244-
if (result?.content?.isNotEmpty() == true) {
239+
if (result.content.isNotEmpty()) {
245240
json.encodeToString(
246-
io.modelcontextprotocol.kotlin.sdk.PromptMessageContent.serializer(),
241+
ContentBlock.serializer(),
247242
result.content.first()
248243
)
249244
} else {

mpp-core/src/commonMain/kotlin/cc/unitmesh/llm/ExecutorFactory.kt

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ import ai.koog.prompt.executor.llms.all.*
66
import cc.unitmesh.llm.clients.CustomOpenAILLMClient
77
import cc.unitmesh.llm.provider.LLMClientProvider
88
import cc.unitmesh.llm.provider.LLMClientRegistry
9+
import kotlin.time.ExperimentalTime
910

1011
/**
1112
* Try to auto-register GitHub Copilot provider.
@@ -42,6 +43,7 @@ internal expect fun createExecutorBlocking(
4243
* LLMClientRegistry.register(GithubCopilotClientProvider())
4344
* ```
4445
*/
46+
@OptIn(ExperimentalTime::class)
4547
object ExecutorFactory {
4648

4749
/**

mpp-core/src/commonMain/kotlin/cc/unitmesh/llm/LLMService.kt

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ import kotlinx.coroutines.flow.Flow
1717
import kotlinx.coroutines.flow.cancellable
1818
import kotlinx.coroutines.flow.flow
1919
import kotlinx.datetime.Clock
20+
import kotlin.time.ExperimentalTime
2021

2122
/**
2223
* LLM 服务
@@ -26,6 +27,7 @@ import kotlinx.datetime.Clock
2627
* @param compilerService 可选的编译器服务,用于编译 DevIns 命令
2728
* 如果不提供,将使用 DevInsCompilerService.getInstance()
2829
*/
30+
@OptIn(ExperimentalTime::class)
2931
class LLMService(
3032
private val config: ModelConfig,
3133
private val compressionConfig: CompressionConfig = CompressionConfig(),
@@ -83,7 +85,7 @@ class LLMService(
8385
.cancellable()
8486
.collect { frame ->
8587
when (frame) {
86-
is StreamFrame.Append -> {
88+
is StreamFrame.TextDelta -> {
8789
chunkCount++
8890
totalChars += frame.text.length
8991
if (chunkCount == 1) {
@@ -97,6 +99,11 @@ class LLMService(
9799
}
98100
emit(frame.text)
99101
}
102+
is StreamFrame.TextComplete -> {
103+
chunkCount++
104+
totalChars += frame.text.length
105+
emit(frame.text)
106+
}
100107
is StreamFrame.End -> {
101108
val elapsed = Clock.System.now().toEpochMilliseconds() - startTime
102109
logger.info { "✅ [LLM] Stream completed - chunks: $chunkCount, chars: $totalChars, time: ${elapsed}ms" }
@@ -121,7 +128,10 @@ class LLMService(
121128

122129
messagesSinceLastCompression++
123130
}
124-
is StreamFrame.ToolCall -> { /* Tool calls (可以后续扩展) */ }
131+
is StreamFrame.ReasoningDelta,
132+
is StreamFrame.ReasoningComplete,
133+
is StreamFrame.ToolCallDelta,
134+
is StreamFrame.ToolCallComplete -> { /* Tool calls and reasoning frames can be handled later. */ }
125135
}
126136
}
127137
} catch (e: Exception) {

mpp-core/src/commonMain/kotlin/cc/unitmesh/llm/PromptEnhancer.kt

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ class PromptEnhancer(
2929
*/
3030
suspend fun enhance(userInput: String, language: String = "zh"): String {
3131
if (userInput.isBlank()) {
32-
logger.warn("Empty user input provided for enhancement")
32+
logger.warn { "Empty user input provided for enhancement" }
3333
return userInput
3434
}
3535

@@ -43,7 +43,7 @@ class PromptEnhancer(
4343
// Render the enhancement prompt
4444
val enhancementPrompt = templateEngine.render(template, mapOf("context" to context))
4545

46-
logger.info("Sending enhancement request to LLM")
46+
logger.info { "Sending enhancement request to LLM" }
4747

4848
// Call LLM for enhancement
4949
val result = StringBuilder()
@@ -58,12 +58,12 @@ class PromptEnhancer(
5858
// Extract enhanced content from LLM response
5959
val enhancedContent = extractEnhancedContent(result.toString())
6060

61-
logger.info("Successfully enhanced prompt: ${userInput.take(50)}... -> ${enhancedContent.take(50)}...")
61+
logger.info { "Successfully enhanced prompt: ${userInput.take(50)}... -> ${enhancedContent.take(50)}..." }
6262

6363
return enhancedContent.ifEmpty { userInput }
6464

6565
} catch (e: Exception) {
66-
logger.error("Failed to enhance prompt: ${e.message}", e)
66+
logger.error(e) { "Failed to enhance prompt: ${e.message}" }
6767
return userInput // Return original input on error
6868
}
6969
}
@@ -90,7 +90,7 @@ class PromptEnhancer(
9090
return try {
9191
domainDictService?.loadContent() ?: ""
9292
} catch (e: Exception) {
93-
logger.warn("Failed to load domain dictionary: ${e.message}")
93+
logger.warn { "Failed to load domain dictionary: ${e.message}" }
9494
""
9595
}
9696
}
@@ -102,7 +102,7 @@ class PromptEnhancer(
102102
return try {
103103
findAndReadReadme()
104104
} catch (e: Exception) {
105-
logger.warn("Failed to load README: ${e.message}")
105+
logger.warn { "Failed to load README: ${e.message}" }
106106
""
107107
}
108108
}
@@ -163,7 +163,7 @@ class PromptEnhancer(
163163
return llmResponse.trim()
164164

165165
} catch (e: Exception) {
166-
logger.warn("Failed to parse LLM response, returning as-is: ${e.message}")
166+
logger.warn { "Failed to parse LLM response, returning as-is: ${e.message}" }
167167
return llmResponse.trim()
168168
}
169169
}

mpp-core/src/commonMain/kotlin/cc/unitmesh/llm/clients/CustomOpenAILLMClient.kt

Lines changed: 31 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -5,19 +5,24 @@ import ai.koog.prompt.dsl.Prompt
55
import ai.koog.prompt.executor.clients.ConnectionTimeoutConfig
66
import ai.koog.prompt.executor.clients.LLMClient
77
import ai.koog.prompt.executor.clients.openai.base.AbstractOpenAILLMClient
8-
import ai.koog.prompt.executor.clients.openai.base.OpenAIBasedSettings
8+
import ai.koog.prompt.executor.clients.openai.base.OpenAIBaseSettings
9+
import ai.koog.prompt.executor.clients.openai.base.OpenAICompatibleToolDescriptorSchemaGenerator
910
import ai.koog.prompt.executor.clients.openai.base.models.OpenAIMessage
1011
import ai.koog.prompt.executor.clients.openai.base.models.OpenAITool
1112
import ai.koog.prompt.executor.clients.openai.base.models.OpenAIToolChoice
12-
import ai.koog.prompt.executor.model.LLMChoice
1313
import ai.koog.prompt.llm.LLMProvider
1414
import ai.koog.prompt.llm.LLModel
15+
import ai.koog.prompt.message.Message
1516
import ai.koog.prompt.params.LLMParams
16-
import ai.koog.prompt.streaming.StreamFrameFlowBuilder
17+
import ai.koog.prompt.streaming.StreamFrame
1718
import io.github.oshai.kotlinlogging.KotlinLogging
1819
import io.ktor.client.*
19-
import kotlinx.datetime.Clock
20+
import kotlinx.coroutines.flow.Flow
21+
import kotlinx.coroutines.flow.collect
22+
import kotlinx.coroutines.flow.flow
2023
import kotlinx.serialization.Serializable
24+
import kotlin.time.Clock
25+
import kotlin.time.ExperimentalTime
2126

2227
/**
2328
* Configuration settings for custom OpenAI-compatible APIs (like GLM, custom endpoints, etc.)
@@ -30,7 +35,7 @@ class CustomOpenAIClientSettings(
3035
baseUrl: String,
3136
chatCompletionsPath: String = "chat/completions",
3237
timeoutConfig: ConnectionTimeoutConfig = ConnectionTimeoutConfig()
33-
) : OpenAIBasedSettings(baseUrl, chatCompletionsPath, timeoutConfig)
38+
) : OpenAIBaseSettings(baseUrl, chatCompletionsPath, timeoutConfig)
3439

3540
/**
3641
* Request model for custom OpenAI-compatible chat completion
@@ -122,6 +127,7 @@ data class CustomOpenAIChatCompletionStreamResponse(
122127
* @param baseClient Optional custom HTTP client
123128
* @param clock Clock instance for tracking timestamps
124129
*/
130+
@OptIn(ExperimentalTime::class)
125131
class CustomOpenAILLMClient(
126132
apiKey: String,
127133
baseUrl: String,
@@ -143,18 +149,15 @@ class CustomOpenAILLMClient(
143149
}
144150
}
145151
},
152+
"custom-openai",
146153
clock,
147-
staticLogger
154+
staticLogger,
155+
OpenAICompatibleToolDescriptorSchemaGenerator()
148156
) {
149157

150158
private companion object {
151159
private val staticLogger = KotlinLogging.logger { }
152160

153-
init {
154-
// Register custom OpenAI JSON schema generators for structured output
155-
// Use OpenAI provider since custom providers are OpenAI-compatible
156-
registerOpenAIJsonSchemaGenerators(LLMProvider.OpenAI)
157-
}
158161
}
159162

160163
override fun llmProvider(): LLMProvider = LLMProvider.OpenAI // OpenAI-compatible provider
@@ -194,7 +197,7 @@ class CustomOpenAILLMClient(
194197
return json.encodeToString(request)
195198
}
196199

197-
override fun processProviderChatResponse(response: CustomOpenAIChatCompletionResponse): List<LLMChoice> {
200+
override fun processProviderChatResponse(response: CustomOpenAIChatCompletionResponse): List<List<Message.Response>> {
198201
require(response.choices.isNotEmpty()) { "Empty choices in response" }
199202
return response.choices.map {
200203
it.message.toMessageResponses(
@@ -210,13 +213,23 @@ class CustomOpenAILLMClient(
210213
override fun decodeResponse(data: String): CustomOpenAIChatCompletionResponse =
211214
json.decodeFromString(data)
212215

213-
override suspend fun StreamFrameFlowBuilder.processStreamingChunk(chunk: CustomOpenAIChatCompletionStreamResponse) {
214-
chunk.choices.firstOrNull()?.let { choice ->
215-
choice.delta.content?.let { emitAppend(it) }
216-
choice.delta.toolCalls?.forEach { toolCall ->
217-
upsertToolCall(0, toolCall.id, toolCall.function.name, toolCall.function.arguments)
216+
override fun processStreamingResponse(
217+
response: Flow<CustomOpenAIChatCompletionStreamResponse>
218+
): Flow<StreamFrame> = flow {
219+
response.collect { chunk ->
220+
chunk.choices.firstOrNull()?.let { choice ->
221+
choice.delta.content?.let { emit(StreamFrame.TextDelta(it)) }
222+
choice.delta.toolCalls?.forEach { toolCall ->
223+
emit(
224+
StreamFrame.ToolCallDelta(
225+
id = toolCall.id,
226+
name = toolCall.function.name,
227+
content = toolCall.function.arguments
228+
)
229+
)
230+
}
231+
choice.finishReason?.let { emit(StreamFrame.End(it, createMetaInfo(chunk.usage))) }
218232
}
219-
choice.finishReason?.let { emitEnd(it, createMetaInfo(chunk.usage)) }
220233
}
221234
}
222235

@@ -225,4 +238,3 @@ class CustomOpenAILLMClient(
225238
throw UnsupportedOperationException("Moderation is not supported by custom OpenAI-compatible APIs.")
226239
}
227240
}
228-

mpp-core/src/commonTest/kotlin/cc/unitmesh/llm/CustomOpenAILLMClientTest.kt

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,10 +6,12 @@ import kotlin.test.Test
66
import kotlin.test.assertEquals
77
import kotlin.test.assertNotNull
88
import kotlin.test.assertTrue
9+
import kotlin.time.ExperimentalTime
910

1011
/**
1112
* 测试 CustomOpenAILLMClient 的基本功能
1213
*/
14+
@OptIn(ExperimentalTime::class)
1315
class CustomOpenAILLMClientTest {
1416

1517
@Test
@@ -107,4 +109,3 @@ class CustomOpenAILLMClientTest {
107109
}
108110
}
109111
}
110-

mpp-core/src/commonTest/kotlin/cc/unitmesh/llm/MiniMaxModelTest.kt

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -38,8 +38,9 @@ class MiniMaxModelTest {
3838
assertEquals("MiniMax-M2.1", model.id)
3939
assertEquals(1_000_000L, model.contextLength)
4040
assertEquals(128_000, model.maxOutputTokens)
41-
assertTrue(model.capabilities.contains(LLMCapability.Completion))
42-
assertTrue(model.capabilities.contains(LLMCapability.Tools))
41+
val capabilities = model.capabilities.orEmpty()
42+
assertTrue(capabilities.contains(LLMCapability.Completion))
43+
assertTrue(capabilities.contains(LLMCapability.Tools))
4344
}
4445

4546
@Test
@@ -64,8 +65,9 @@ class MiniMaxModelTest {
6465
fun `test minimax vision model capabilities`() {
6566
val model = ModelRegistry.createModel(LLMProviderType.MINIMAX, "MiniMax-Text-01V")
6667
assertNotNull(model, "MiniMax-Text-01V model should be created")
67-
assertTrue(model.capabilities.contains(LLMCapability.Vision.Image))
68-
assertTrue(model.capabilities.contains(LLMCapability.Document))
68+
val capabilities = model.capabilities.orEmpty()
69+
assertTrue(capabilities.contains(LLMCapability.Vision.Image))
70+
assertTrue(capabilities.contains(LLMCapability.Document))
6971
}
7072

7173
@Test
@@ -100,4 +102,3 @@ class MiniMaxModelTest {
100102
assertEquals(128_000L, model.contextLength)
101103
}
102104
}
103-

0 commit comments

Comments
 (0)