Skip to content

Commit 3427bb7

Browse files
committed
refactor(session): consume native LLM events
1 parent 6b78662 commit 3427bb7

8 files changed

Lines changed: 478 additions & 335 deletions

File tree

bun.lock

Lines changed: 1 addition & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

packages/opencode/package.json

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -105,6 +105,7 @@
105105
"@octokit/graphql": "9.0.2",
106106
"@octokit/rest": "catalog:",
107107
"@openauthjs/openauth": "catalog:",
108+
"@opencode-ai/llm": "workspace:*",
108109
"@opencode-ai/plugin": "workspace:*",
109110
"@opencode-ai/script": "workspace:*",
110111
"@opencode-ai/sdk": "workspace:*",
Lines changed: 223 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,223 @@
1+
import { ContentBlockID, FinishReason, LLMEvent, ProviderMetadata, ToolCallID, ToolResultValue, Usage } from "@opencode-ai/llm"
2+
import { Effect, Schema } from "effect"
3+
import { type streamText } from "ai"
4+
import { errorMessage } from "@/util/error"
5+
6+
type Result = Awaited<ReturnType<typeof streamText>>
7+
type AISDKEvent = Result["fullStream"] extends AsyncIterable<infer T> ? T : never
8+
9+
export function adapterState() {
10+
return {
11+
step: 0,
12+
text: 0,
13+
reasoning: 0,
14+
currentTextID: undefined as ContentBlockID | undefined,
15+
currentReasoningID: undefined as ContentBlockID | undefined,
16+
toolNames: {} as Record<string, string>,
17+
}
18+
}
19+
20+
const contentBlockID = (value: string) => ContentBlockID.make(value)
21+
const toolCallID = (value: string) => ToolCallID.make(value)
22+
23+
function finishReason(value: string | undefined): FinishReason {
24+
return Schema.is(FinishReason)(value) ? value : "unknown"
25+
}
26+
27+
function providerMetadata(value: unknown): ProviderMetadata | undefined {
28+
return Schema.is(ProviderMetadata)(value) ? value : undefined
29+
}
30+
31+
function usage(value: unknown): Usage | undefined {
32+
if (!value || typeof value !== "object") return undefined
33+
const item = value as {
34+
inputTokens?: number
35+
outputTokens?: number
36+
totalTokens?: number
37+
reasoningTokens?: number
38+
cachedInputTokens?: number
39+
inputTokenDetails?: { cacheReadTokens?: number; cacheWriteTokens?: number }
40+
outputTokenDetails?: { reasoningTokens?: number }
41+
}
42+
const result = Object.fromEntries(
43+
Object.entries({
44+
inputTokens: item.inputTokens,
45+
outputTokens: item.outputTokens,
46+
totalTokens: item.totalTokens,
47+
reasoningTokens: item.outputTokenDetails?.reasoningTokens ?? item.reasoningTokens,
48+
cacheReadInputTokens: item.inputTokenDetails?.cacheReadTokens ?? item.cachedInputTokens,
49+
cacheWriteInputTokens: item.inputTokenDetails?.cacheWriteTokens,
50+
}).filter((entry) => entry[1] !== undefined),
51+
)
52+
return new Usage(result)
53+
}
54+
55+
export function toLLMEvents(
56+
state: ReturnType<typeof adapterState>,
57+
event: AISDKEvent,
58+
): Effect.Effect<ReadonlyArray<LLMEvent>, unknown> {
59+
switch (event.type) {
60+
case "start":
61+
return Effect.succeed([])
62+
63+
case "start-step":
64+
return Effect.succeed([LLMEvent.stepStart({ index: state.step })])
65+
66+
case "finish-step":
67+
return Effect.sync(() => [
68+
LLMEvent.stepFinish({
69+
index: state.step++,
70+
reason: finishReason(event.finishReason),
71+
usage: usage(event.usage),
72+
providerMetadata: providerMetadata(event.providerMetadata),
73+
}),
74+
])
75+
76+
case "finish":
77+
return Effect.sync(() => {
78+
state.toolNames = {}
79+
return [
80+
LLMEvent.requestFinish({
81+
reason: finishReason(event.finishReason),
82+
usage: usage(event.totalUsage),
83+
}),
84+
]
85+
})
86+
87+
case "text-start":
88+
return Effect.sync(() => {
89+
state.currentTextID = contentBlockID(event.id ?? `text-${state.text++}`)
90+
return [
91+
LLMEvent.textStart({
92+
id: state.currentTextID,
93+
providerMetadata: providerMetadata(event.providerMetadata),
94+
}),
95+
]
96+
})
97+
98+
case "text-delta":
99+
return Effect.succeed([
100+
LLMEvent.textDelta({
101+
id: event.id ? contentBlockID(event.id) : (state.currentTextID ?? contentBlockID(`text-${state.text++}`)),
102+
text: event.text,
103+
}),
104+
])
105+
106+
case "text-end":
107+
return Effect.succeed([
108+
LLMEvent.textEnd({
109+
id: event.id ? contentBlockID(event.id) : (state.currentTextID ?? contentBlockID(`text-${state.text++}`)),
110+
providerMetadata: providerMetadata(event.providerMetadata),
111+
}),
112+
])
113+
114+
case "reasoning-start":
115+
return Effect.sync(() => {
116+
state.currentReasoningID = contentBlockID(event.id)
117+
return [
118+
LLMEvent.reasoningStart({
119+
id: state.currentReasoningID,
120+
providerMetadata: providerMetadata(event.providerMetadata),
121+
}),
122+
]
123+
})
124+
125+
case "reasoning-delta":
126+
return Effect.succeed([
127+
LLMEvent.reasoningDelta({
128+
id: event.id ? contentBlockID(event.id) : (state.currentReasoningID ?? contentBlockID(`reasoning-${state.reasoning++}`)),
129+
text: event.text,
130+
}),
131+
])
132+
133+
case "reasoning-end":
134+
return Effect.sync(() => {
135+
const id = contentBlockID(event.id)
136+
state.currentReasoningID = undefined
137+
return [
138+
LLMEvent.reasoningEnd({
139+
id,
140+
providerMetadata: providerMetadata(event.providerMetadata),
141+
}),
142+
]
143+
})
144+
145+
case "tool-input-start":
146+
return Effect.sync(() => {
147+
state.toolNames[event.id] = event.toolName
148+
return [
149+
LLMEvent.toolInputStart({
150+
id: toolCallID(event.id),
151+
name: event.toolName,
152+
providerMetadata: providerMetadata(event.providerMetadata),
153+
}),
154+
]
155+
})
156+
157+
case "tool-input-delta":
158+
return Effect.succeed([
159+
LLMEvent.toolInputDelta({
160+
id: toolCallID(event.id),
161+
name: state.toolNames[event.id] ?? "unknown",
162+
text: event.delta ?? "",
163+
}),
164+
])
165+
166+
case "tool-input-end":
167+
return Effect.succeed([
168+
LLMEvent.toolInputEnd({
169+
id: toolCallID(event.id),
170+
name: state.toolNames[event.id] ?? "unknown",
171+
}),
172+
])
173+
174+
case "tool-call":
175+
return Effect.sync(() => {
176+
state.toolNames[event.toolCallId] = event.toolName
177+
return [
178+
LLMEvent.toolCall({
179+
id: toolCallID(event.toolCallId),
180+
name: event.toolName,
181+
input: event.input,
182+
providerExecuted: "providerExecuted" in event ? event.providerExecuted : undefined,
183+
providerMetadata: providerMetadata(event.providerMetadata),
184+
}),
185+
]
186+
})
187+
188+
case "tool-result":
189+
return Effect.sync(() => {
190+
const name = state.toolNames[event.toolCallId] ?? "unknown"
191+
delete state.toolNames[event.toolCallId]
192+
return [
193+
LLMEvent.toolResult({
194+
id: toolCallID(event.toolCallId),
195+
name,
196+
result: ToolResultValue.make(event.output),
197+
providerExecuted: "providerExecuted" in event ? event.providerExecuted : undefined,
198+
}),
199+
]
200+
})
201+
202+
case "tool-error":
203+
return Effect.sync(() => {
204+
const name = state.toolNames[event.toolCallId] ?? "unknown"
205+
delete state.toolNames[event.toolCallId]
206+
return [
207+
LLMEvent.toolError({
208+
id: toolCallID(event.toolCallId),
209+
name,
210+
message: errorMessage(event.error),
211+
}),
212+
]
213+
})
214+
215+
case "error":
216+
return Effect.fail(event.error)
217+
218+
default:
219+
return Effect.succeed([])
220+
}
221+
}
222+
223+
export * as LLMAISDK from "./llm-ai-sdk"

packages/opencode/src/session/llm.ts

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ import * as Log from "@opencode-ai/core/util/log"
33
import { Context, Effect, Layer, Record } from "effect"
44
import * as Stream from "effect/Stream"
55
import { streamText, wrapLanguageModel, type ModelMessage, type Tool, tool, jsonSchema } from "ai"
6+
import type { LLMEvent } from "@opencode-ai/llm"
67
import { mergeDeep } from "remeda"
78
import { GitLabWorkflowLanguageModel } from "gitlab-ai-provider"
89
import { ProviderTransform } from "@/provider/transform"
@@ -24,10 +25,10 @@ import { InstallationVersion } from "@opencode-ai/core/installation/version"
2425
import { EffectBridge } from "@/effect/bridge"
2526
import * as Option from "effect/Option"
2627
import * as OtelTracer from "@effect/opentelemetry/Tracer"
28+
import { LLMAISDK } from "./llm-ai-sdk"
2729

2830
const log = Log.create({ service: "llm" })
2931
export const OUTPUT_TOKEN_MAX = ProviderTransform.OUTPUT_TOKEN_MAX
30-
type Result = Awaited<ReturnType<typeof streamText>>
3132

3233
// Avoid re-instantiating remeda's deep merge types in this hot LLM path; the runtime behavior is still mergeDeep.
3334
const mergeOptions = (target: Record<string, any>, source: Record<string, any> | undefined): Record<string, any> =>
@@ -52,10 +53,8 @@ export type StreamRequest = StreamInput & {
5253
abort: AbortSignal
5354
}
5455

55-
export type Event = Result["fullStream"] extends AsyncIterable<infer T> ? T : never
56-
5756
export interface Interface {
58-
readonly stream: (input: StreamInput) => Stream.Stream<Event, unknown>
57+
readonly stream: (input: StreamInput) => Stream.Stream<LLMEvent, unknown>
5958
}
6059

6160
export class Service extends Context.Service<Service, Interface>()("@opencode/LLM") {}
@@ -427,7 +426,11 @@ const live: Layer.Layer<
427426

428427
const result = yield* run({ ...input, abort: ctrl.signal })
429428

430-
return Stream.fromAsyncIterable(result.fullStream, (e) => (e instanceof Error ? e : new Error(String(e))))
429+
const state = LLMAISDK.adapterState()
430+
return Stream.fromAsyncIterable(result.fullStream, (e) => (e instanceof Error ? e : new Error(String(e)))).pipe(
431+
Stream.mapEffect((event) => LLMAISDK.toLLMEvents(state, event)),
432+
Stream.flatMap((events) => Stream.fromIterable(events)),
433+
)
431434
}),
432435
),
433436
)

0 commit comments

Comments
 (0)