Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions bun.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

15 changes: 13 additions & 2 deletions packages/llm/src/schema/events.ts
Original file line number Diff line number Diff line change
Expand Up @@ -222,6 +222,9 @@ const llmEventTagged = Schema.Union([
]).pipe(Schema.toTaggedUnion("type"))

type WithID<Event extends { readonly id: unknown }, ID> = Omit<Event, "type" | "id"> & { readonly id: ID | string }
type WithUsage<Event extends { readonly usage?: Usage }> = Omit<Event, "type" | "usage"> & {
readonly usage?: Usage | ConstructorParameters<typeof Usage>[0]
}

const responseID = (value: ResponseID | string) => ResponseID.make(value)
const contentBlockID = (value: ContentBlockID | string) => ContentBlockID.make(value)
Expand Down Expand Up @@ -252,8 +255,16 @@ export const LLMEvent = Object.assign(llmEventTagged, {
toolCall: (input: WithID<ToolCall, ToolCallID>) => ToolCall.make({ ...input, id: toolCallID(input.id) }),
toolResult: (input: WithID<ToolResult, ToolCallID>) => ToolResult.make({ ...input, id: toolCallID(input.id) }),
toolError: (input: WithID<ToolError, ToolCallID>) => ToolError.make({ ...input, id: toolCallID(input.id) }),
stepFinish: StepFinish.make,
requestFinish: RequestFinish.make,
stepFinish: (input: WithUsage<StepFinish>) =>
StepFinish.make({
...input,
usage: input.usage === undefined ? undefined : input.usage instanceof Usage ? input.usage : new Usage(input.usage),
}),
requestFinish: (input: WithUsage<RequestFinish>) =>
RequestFinish.make({
...input,
usage: input.usage === undefined ? undefined : input.usage instanceof Usage ? input.usage : new Usage(input.usage),
}),
providerError: ProviderErrorEvent.make,
is: {
requestStart: llmEventTagged.guards["request-start"],
Expand Down
1 change: 1 addition & 0 deletions packages/opencode/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,7 @@
"@octokit/graphql": "9.0.2",
"@octokit/rest": "catalog:",
"@openauthjs/openauth": "catalog:",
"@opencode-ai/llm": "workspace:*",
"@opencode-ai/plugin": "workspace:*",
"@opencode-ai/script": "workspace:*",
"@opencode-ai/sdk": "workspace:*",
Expand Down
230 changes: 230 additions & 0 deletions packages/opencode/src/session/llm-ai-sdk.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,230 @@
import { FinishReason, LLMEvent, ProviderMetadata, ToolResultValue } from "@opencode-ai/llm"
import { Effect, Schema } from "effect"
import { type streamText } from "ai"
import { errorMessage } from "@/util/error"

type Result = Awaited<ReturnType<typeof streamText>>
type AISDKEvent = Result["fullStream"] extends AsyncIterable<infer T> ? T : never

export function adapterState() {
return {
step: 0,
text: 0,
reasoning: 0,
currentTextID: undefined as string | undefined,
currentReasoningID: undefined as string | undefined,
toolNames: {} as Record<string, string>,
}
}

function finishReason(value: string | undefined): FinishReason {
return Schema.is(FinishReason)(value) ? value : "unknown"
}

function providerMetadata(value: unknown): ProviderMetadata | undefined {
return Schema.is(ProviderMetadata)(value) ? value : undefined
}

function usage(value: unknown) {
if (!value || typeof value !== "object") return undefined
const item = value as {
inputTokens?: number
outputTokens?: number
totalTokens?: number
reasoningTokens?: number
cachedInputTokens?: number
inputTokenDetails?: { cacheReadTokens?: number; cacheWriteTokens?: number }
outputTokenDetails?: { reasoningTokens?: number }
}
const result = Object.fromEntries(
Object.entries({
inputTokens: item.inputTokens,
outputTokens: item.outputTokens,
totalTokens: item.totalTokens,
reasoningTokens: item.outputTokenDetails?.reasoningTokens ?? item.reasoningTokens,
cacheReadInputTokens: item.inputTokenDetails?.cacheReadTokens ?? item.cachedInputTokens,
cacheWriteInputTokens: item.inputTokenDetails?.cacheWriteTokens,
}).filter((entry) => entry[1] !== undefined),
)
return result
}

function currentTextID(state: ReturnType<typeof adapterState>, id: string | undefined) {
state.currentTextID = id ?? state.currentTextID ?? `text-${state.text++}`
return state.currentTextID
}

function currentReasoningID(state: ReturnType<typeof adapterState>, id: string | undefined) {
state.currentReasoningID = id ?? state.currentReasoningID ?? `reasoning-${state.reasoning++}`
return state.currentReasoningID
}

export function toLLMEvents(
state: ReturnType<typeof adapterState>,
event: AISDKEvent,
): Effect.Effect<ReadonlyArray<LLMEvent>, unknown> {
switch (event.type) {
case "start":
return Effect.succeed([])

case "start-step":
return Effect.succeed([LLMEvent.stepStart({ index: state.step })])

case "finish-step":
return Effect.sync(() => [
LLMEvent.stepFinish({
index: state.step++,
reason: finishReason(event.finishReason),
usage: usage(event.usage),
providerMetadata: providerMetadata(event.providerMetadata),
}),
])

case "finish":
return Effect.sync(() => {
state.toolNames = {}
return [
LLMEvent.requestFinish({
reason: finishReason(event.finishReason),
usage: usage(event.totalUsage),
}),
]
})

case "text-start":
return Effect.sync(() => {
state.currentTextID = currentTextID(state, event.id)
return [
LLMEvent.textStart({
id: state.currentTextID,
providerMetadata: providerMetadata(event.providerMetadata),
}),
]
})

case "text-delta":
return Effect.succeed([
LLMEvent.textDelta({
id: currentTextID(state, event.id),
text: event.text,
}),
])

case "text-end":
return Effect.succeed([
LLMEvent.textEnd({
id: currentTextID(state, event.id),
providerMetadata: providerMetadata(event.providerMetadata),
}),
])

case "reasoning-start":
return Effect.sync(() => {
state.currentReasoningID = currentReasoningID(state, event.id)
return [
LLMEvent.reasoningStart({
id: state.currentReasoningID,
providerMetadata: providerMetadata(event.providerMetadata),
}),
]
})

case "reasoning-delta":
return Effect.succeed([
LLMEvent.reasoningDelta({
id: currentReasoningID(state, event.id),
text: event.text,
}),
])

case "reasoning-end":
return Effect.sync(() => {
const id = currentReasoningID(state, event.id)
state.currentReasoningID = undefined
return [
LLMEvent.reasoningEnd({
id,
providerMetadata: providerMetadata(event.providerMetadata),
}),
]
})

case "tool-input-start":
return Effect.sync(() => {
state.toolNames[event.id] = event.toolName
return [
LLMEvent.toolInputStart({
id: event.id,
name: event.toolName,
providerMetadata: providerMetadata(event.providerMetadata),
}),
]
})

case "tool-input-delta":
return Effect.succeed([
LLMEvent.toolInputDelta({
id: event.id,
name: state.toolNames[event.id] ?? "unknown",
text: event.delta ?? "",
}),
])

case "tool-input-end":
return Effect.succeed([
LLMEvent.toolInputEnd({
id: event.id,
name: state.toolNames[event.id] ?? "unknown",
}),
])

case "tool-call":
return Effect.sync(() => {
state.toolNames[event.toolCallId] = event.toolName
return [
LLMEvent.toolCall({
id: event.toolCallId,
name: event.toolName,
input: event.input,
providerExecuted: "providerExecuted" in event ? event.providerExecuted : undefined,
providerMetadata: providerMetadata(event.providerMetadata),
}),
]
})

case "tool-result":
return Effect.sync(() => {
const name = state.toolNames[event.toolCallId] ?? "unknown"
delete state.toolNames[event.toolCallId]
return [
LLMEvent.toolResult({
id: event.toolCallId,
name,
result: ToolResultValue.make(event.output),
providerExecuted: "providerExecuted" in event ? event.providerExecuted : undefined,
}),
]
})

case "tool-error":
return Effect.sync(() => {
const name = state.toolNames[event.toolCallId] ?? "unknown"
delete state.toolNames[event.toolCallId]
return [
LLMEvent.toolError({
id: event.toolCallId,
name,
message: errorMessage(event.error),
}),
]
})

case "error":
return Effect.fail(event.error)

default:
return Effect.succeed([])
}
}

export * as LLMAISDK from "./llm-ai-sdk"
13 changes: 8 additions & 5 deletions packages/opencode/src/session/llm.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ import * as Log from "@opencode-ai/core/util/log"
import { Context, Effect, Layer, Record } from "effect"
import * as Stream from "effect/Stream"
import { streamText, wrapLanguageModel, type ModelMessage, type Tool, tool, jsonSchema } from "ai"
import type { LLMEvent } from "@opencode-ai/llm"
import { mergeDeep } from "remeda"
import { GitLabWorkflowLanguageModel } from "gitlab-ai-provider"
import { ProviderTransform } from "@/provider/transform"
Expand All @@ -24,10 +25,10 @@ import { InstallationVersion } from "@opencode-ai/core/installation/version"
import { EffectBridge } from "@/effect/bridge"
import * as Option from "effect/Option"
import * as OtelTracer from "@effect/opentelemetry/Tracer"
import { LLMAISDK } from "./llm-ai-sdk"

const log = Log.create({ service: "llm" })
export const OUTPUT_TOKEN_MAX = ProviderTransform.OUTPUT_TOKEN_MAX
type Result = Awaited<ReturnType<typeof streamText>>

// Avoid re-instantiating remeda's deep merge types in this hot LLM path; the runtime behavior is still mergeDeep.
const mergeOptions = (target: Record<string, any>, source: Record<string, any> | undefined): Record<string, any> =>
Expand All @@ -52,10 +53,8 @@ export type StreamRequest = StreamInput & {
abort: AbortSignal
}

export type Event = Result["fullStream"] extends AsyncIterable<infer T> ? T : never

export interface Interface {
readonly stream: (input: StreamInput) => Stream.Stream<Event, unknown>
readonly stream: (input: StreamInput) => Stream.Stream<LLMEvent, unknown>
}

export class Service extends Context.Service<Service, Interface>()("@opencode/LLM") {}
Expand Down Expand Up @@ -427,7 +426,11 @@ const live: Layer.Layer<

const result = yield* run({ ...input, abort: ctrl.signal })

return Stream.fromAsyncIterable(result.fullStream, (e) => (e instanceof Error ? e : new Error(String(e))))
const state = LLMAISDK.adapterState()
return Stream.fromAsyncIterable(result.fullStream, (e) => (e instanceof Error ? e : new Error(String(e)))).pipe(
Stream.mapEffect((event) => LLMAISDK.toLLMEvents(state, event)),
Stream.flatMap((events) => Stream.fromIterable(events)),
)
}),
),
)
Expand Down
Loading
Loading