Skip to content

Commit 1d976b7

Browse files
committed
fix(openclaw): Anthropic Messages API SSE streaming, usage tracking, and error format
- MPP SSE streaming emits proper `event: {type}` framing for /v1/messages and omits `data: [DONE]` sentinel that breaks the Anthropic SDK parser - Anthropic error responses use correct format for /v1/messages endpoints - Fix MPP streaming usage tracking (token counts were always 0/0 because raw JSON payloads lacked the `data:` prefix the tracker expected) - Accumulate Anthropic usage from message_start (input) and message_delta (output) events instead of relying on a single final chunk - Extract Anthropic thinking mode (thinking.budget_tokens) for history - MPP errors now show EVM wallet address instead of Solana address - Add route.test.ts with 17 tests covering dual-format SSE tracker
1 parent 536137b commit 1d976b7

4 files changed

Lines changed: 589 additions & 146 deletions

File tree

packages/x402-proxy/CHANGELOG.md

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,16 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
77

88
## [Unreleased]
99

10+
## [0.10.3] - 2026-04-01
11+
12+
### Fixed
13+
- Anthropic Messages API (`/v1/messages`) SSE streaming - proxy now emits proper `event: {type}` framing and omits the `data: [DONE]` sentinel that breaks the Anthropic SDK parser
14+
- Anthropic error responses use correct format (`{type:"error",error:{type,message}}`) instead of OpenAI format for `/v1/messages` endpoints
15+
- MPP streaming usage tracking - token counts were always 0/0 because raw JSON payloads lacked the `data:` prefix the tracker expected
16+
- Anthropic usage accumulation from `message_start` (input tokens) and `message_delta` (output tokens) events instead of relying on a single final chunk
17+
- Anthropic thinking mode (`thinking.budget_tokens`) now extracted from request body for history tracking
18+
- MPP error messages and payment history now show the EVM wallet address instead of the Solana address
19+
1020
## [0.10.2] - 2026-04-01
1121

1222
### Fixed

packages/x402-proxy/package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
{
22
"name": "x402-proxy",
3-
"version": "0.10.2",
3+
"version": "0.10.3",
44
"description": "curl for x402 paid APIs. Auto-pays any endpoint on Base, Solana, and Tempo. Also works as an OpenClaw plugin.",
55
"type": "module",
66
"sideEffects": false,
Lines changed: 351 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,351 @@
1+
import type { ServerResponse } from "node:http";
2+
import { describe, expect, it } from "vitest";
3+
import { createSseTracker, writeErrorResponse } from "./route.js";
4+
5+
// ── createSseTracker ────────────────────────────────────────────────
6+
7+
describe("createSseTracker", () => {
8+
// ── OpenAI format ─────────────────────────────────────────────────
9+
10+
describe("OpenAI format via pushJson (MPP path)", () => {
11+
it("extracts usage from final chunk with usage field", () => {
12+
const tracker = createSseTracker();
13+
tracker.pushJson('{"choices":[{"delta":{"content":"Hi"}}],"model":"gpt-4"}');
14+
tracker.pushJson(
15+
'{"choices":[],"model":"gpt-4","usage":{"prompt_tokens":10,"completion_tokens":5}}',
16+
);
17+
expect(tracker.result).toEqual({
18+
model: "gpt-4",
19+
inputTokens: 10,
20+
outputTokens: 5,
21+
reasoningTokens: undefined,
22+
cacheRead: undefined,
23+
cacheWrite: undefined,
24+
});
25+
});
26+
27+
it("extracts reasoning tokens and cache details", () => {
28+
const tracker = createSseTracker();
29+
tracker.pushJson(
30+
JSON.stringify({
31+
model: "o1",
32+
usage: {
33+
prompt_tokens: 100,
34+
completion_tokens: 50,
35+
prompt_tokens_details: { cached_tokens: 80, cache_creation_input_tokens: 20 },
36+
completion_tokens_details: { reasoning_tokens: 30 },
37+
},
38+
}),
39+
);
40+
expect(tracker.result).toEqual({
41+
model: "o1",
42+
inputTokens: 100,
43+
outputTokens: 50,
44+
reasoningTokens: 30,
45+
cacheRead: 80,
46+
cacheWrite: 20,
47+
});
48+
});
49+
});
50+
51+
describe("OpenAI format via push (x402 raw SSE path)", () => {
52+
it("extracts usage from SSE-framed data lines", () => {
53+
const tracker = createSseTracker();
54+
tracker.push('data: {"choices":[{"delta":{"content":"Hi"}}],"model":"gpt-4"}\n\n');
55+
tracker.push(
56+
'data: {"choices":[],"model":"gpt-4","usage":{"prompt_tokens":10,"completion_tokens":5}}\n\ndata: [DONE]\n\n',
57+
);
58+
expect(tracker.result).toEqual({
59+
model: "gpt-4",
60+
inputTokens: 10,
61+
outputTokens: 5,
62+
reasoningTokens: undefined,
63+
cacheRead: undefined,
64+
cacheWrite: undefined,
65+
});
66+
});
67+
68+
it("handles chunks split across push calls", () => {
69+
const tracker = createSseTracker();
70+
// Split in the middle of a data line
71+
tracker.push('data: {"model":"gpt-4","usa');
72+
tracker.push('ge":{"prompt_tokens":7,"completion_tokens":3}}\n\ndata: [DONE]\n\n');
73+
expect(tracker.result).toEqual({
74+
model: "gpt-4",
75+
inputTokens: 7,
76+
outputTokens: 3,
77+
reasoningTokens: undefined,
78+
cacheRead: undefined,
79+
cacheWrite: undefined,
80+
});
81+
});
82+
});
83+
84+
// ── Anthropic format ──────────────────────────────────────────────
85+
86+
describe("Anthropic streaming via pushJson (MPP path)", () => {
87+
it("accumulates usage from message_start and message_delta", () => {
88+
const tracker = createSseTracker();
89+
tracker.pushJson(
90+
JSON.stringify({
91+
type: "message_start",
92+
message: {
93+
model: "claude-opus-4-6",
94+
usage: { input_tokens: 100, output_tokens: 0 },
95+
},
96+
}),
97+
);
98+
tracker.pushJson(
99+
JSON.stringify({
100+
type: "content_block_start",
101+
index: 0,
102+
content_block: { type: "text", text: "" },
103+
}),
104+
);
105+
tracker.pushJson(
106+
JSON.stringify({
107+
type: "content_block_delta",
108+
index: 0,
109+
delta: { type: "text_delta", text: "Hello" },
110+
}),
111+
);
112+
tracker.pushJson(
113+
JSON.stringify({
114+
type: "message_delta",
115+
delta: { stop_reason: "end_turn" },
116+
usage: { output_tokens: 15 },
117+
}),
118+
);
119+
tracker.pushJson(JSON.stringify({ type: "message_stop" }));
120+
121+
expect(tracker.result).toEqual({
122+
model: "claude-opus-4-6",
123+
inputTokens: 100,
124+
outputTokens: 15,
125+
cacheRead: undefined,
126+
cacheWrite: undefined,
127+
});
128+
});
129+
130+
it("extracts cache fields from message_start", () => {
131+
const tracker = createSseTracker();
132+
tracker.pushJson(
133+
JSON.stringify({
134+
type: "message_start",
135+
message: {
136+
model: "claude-sonnet-4-6",
137+
usage: {
138+
input_tokens: 200,
139+
output_tokens: 0,
140+
cache_creation_input_tokens: 50,
141+
cache_read_input_tokens: 150,
142+
},
143+
},
144+
}),
145+
);
146+
tracker.pushJson(
147+
JSON.stringify({
148+
type: "message_delta",
149+
delta: { stop_reason: "end_turn" },
150+
usage: { output_tokens: 20 },
151+
}),
152+
);
153+
154+
expect(tracker.result).toEqual({
155+
model: "claude-sonnet-4-6",
156+
inputTokens: 200,
157+
outputTokens: 20,
158+
cacheRead: 150,
159+
cacheWrite: 50,
160+
});
161+
});
162+
});
163+
164+
describe("Anthropic streaming via push (x402 raw SSE path)", () => {
165+
it("parses event-framed Anthropic SSE", () => {
166+
const tracker = createSseTracker();
167+
tracker.push(
168+
[
169+
"event: message_start",
170+
'data: {"type":"message_start","message":{"model":"claude-opus-4-6","usage":{"input_tokens":50}}}',
171+
"",
172+
"event: content_block_delta",
173+
'data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"Hi"}}',
174+
"",
175+
"event: message_delta",
176+
'data: {"type":"message_delta","delta":{"stop_reason":"end_turn"},"usage":{"output_tokens":8}}',
177+
"",
178+
"event: message_stop",
179+
'data: {"type":"message_stop"}',
180+
"",
181+
"",
182+
].join("\n"),
183+
);
184+
185+
expect(tracker.result).toEqual({
186+
model: "claude-opus-4-6",
187+
inputTokens: 50,
188+
outputTokens: 8,
189+
cacheRead: undefined,
190+
cacheWrite: undefined,
191+
});
192+
});
193+
});
194+
195+
describe("Anthropic non-streaming via pushJson", () => {
196+
it("extracts usage from complete message response", () => {
197+
const tracker = createSseTracker();
198+
tracker.pushJson(
199+
JSON.stringify({
200+
type: "message",
201+
id: "msg_123",
202+
role: "assistant",
203+
model: "claude-opus-4-6",
204+
content: [{ type: "text", text: "Hello" }],
205+
usage: { input_tokens: 42, output_tokens: 7 },
206+
}),
207+
);
208+
209+
expect(tracker.result).toEqual({
210+
model: "claude-opus-4-6",
211+
inputTokens: 42,
212+
outputTokens: 7,
213+
cacheRead: undefined,
214+
cacheWrite: undefined,
215+
});
216+
});
217+
218+
it("extracts cache fields from non-streaming response", () => {
219+
const tracker = createSseTracker();
220+
tracker.pushJson(
221+
JSON.stringify({
222+
type: "message",
223+
model: "claude-sonnet-4-6",
224+
usage: {
225+
input_tokens: 100,
226+
output_tokens: 10,
227+
cache_creation_input_tokens: 30,
228+
cache_read_input_tokens: 70,
229+
},
230+
}),
231+
);
232+
233+
expect(tracker.result).toEqual({
234+
model: "claude-sonnet-4-6",
235+
inputTokens: 100,
236+
outputTokens: 10,
237+
cacheRead: 70,
238+
cacheWrite: 30,
239+
});
240+
});
241+
});
242+
243+
// ── Edge cases ────────────────────────────────────────────────────
244+
245+
describe("edge cases", () => {
246+
it("returns undefined for empty tracker", () => {
247+
const tracker = createSseTracker();
248+
expect(tracker.result).toBeUndefined();
249+
});
250+
251+
it("returns undefined for malformed JSON", () => {
252+
const tracker = createSseTracker();
253+
tracker.pushJson("not json at all");
254+
expect(tracker.result).toBeUndefined();
255+
});
256+
257+
it("returns undefined for JSON without usage or type fields", () => {
258+
const tracker = createSseTracker();
259+
tracker.pushJson('{"foo":"bar"}');
260+
expect(tracker.result).toBeUndefined();
261+
});
262+
263+
it("does not misdetect OpenAI chunks as Anthropic", () => {
264+
const tracker = createSseTracker();
265+
// Full realistic OpenAI streaming sequence
266+
tracker.pushJson(
267+
'{"id":"chatcmpl-abc","object":"chat.completion.chunk","model":"gpt-4","choices":[{"index":0,"delta":{"role":"assistant"},"finish_reason":null}]}',
268+
);
269+
tracker.pushJson(
270+
'{"id":"chatcmpl-abc","object":"chat.completion.chunk","model":"gpt-4","choices":[{"index":0,"delta":{"content":"Hi"},"finish_reason":null}]}',
271+
);
272+
tracker.pushJson(
273+
'{"id":"chatcmpl-abc","object":"chat.completion.chunk","model":"gpt-4","choices":[],"usage":{"prompt_tokens":12,"completion_tokens":3}}',
274+
);
275+
// Should use OpenAI path (has reasoningTokens field), not Anthropic
276+
expect(tracker.result).toEqual({
277+
model: "gpt-4",
278+
inputTokens: 12,
279+
outputTokens: 3,
280+
reasoningTokens: undefined,
281+
cacheRead: undefined,
282+
cacheWrite: undefined,
283+
});
284+
});
285+
286+
it("ignores [DONE] sentinel in SSE stream", () => {
287+
const tracker = createSseTracker();
288+
tracker.push(
289+
'data: {"model":"gpt-4","usage":{"prompt_tokens":5,"completion_tokens":2}}\n\ndata: [DONE]\n\n',
290+
);
291+
expect(tracker.result?.inputTokens).toBe(5);
292+
});
293+
});
294+
});
295+
296+
// ── writeErrorResponse ──────────────────────────────────────────────
297+
298+
describe("writeErrorResponse", () => {
299+
function mockRes() {
300+
let body = "";
301+
let headStatus = 0;
302+
let headHeaders: Record<string, string> = {};
303+
return {
304+
writeHead(status: number, headers: Record<string, string>) {
305+
headStatus = status;
306+
headHeaders = headers;
307+
},
308+
end(data: string) {
309+
body = data;
310+
},
311+
get _status() {
312+
return headStatus;
313+
},
314+
get _headers() {
315+
return headHeaders;
316+
},
317+
get _body() {
318+
return body;
319+
},
320+
} as unknown as ServerResponse & {
321+
_status: number;
322+
_headers: Record<string, string>;
323+
_body: string;
324+
};
325+
}
326+
327+
it("writes OpenAI error format when isAnthropicFormat is false", () => {
328+
const res = mockRes();
329+
writeErrorResponse(res, 402, "Payment failed", "payment_error", "payment_failed", false);
330+
expect(res._status).toBe(402);
331+
expect(JSON.parse(res._body)).toEqual({
332+
error: { message: "Payment failed", type: "payment_error", code: "payment_failed" },
333+
});
334+
});
335+
336+
it("writes Anthropic error format when isAnthropicFormat is true", () => {
337+
const res = mockRes();
338+
writeErrorResponse(res, 402, "Payment failed", "payment_error", "payment_failed", true);
339+
expect(res._status).toBe(402);
340+
expect(JSON.parse(res._body)).toEqual({
341+
type: "error",
342+
error: { type: "payment_error", message: "Payment failed" },
343+
});
344+
});
345+
346+
it("sets Content-Type to application/json", () => {
347+
const res = mockRes();
348+
writeErrorResponse(res, 500, "err", "t", "c", false);
349+
expect(res._headers["Content-Type"]).toBe("application/json");
350+
});
351+
});

0 commit comments

Comments
 (0)