Skip to content

Commit 9dc6feb

Browse files
committed
feat: MCP app sampling support via stock SDK types
Alternative to #477. Instead of forking a stripped-down sampling schema, this reuses the stock `CreateMessageRequest` / `CreateMessageResult` / `CreateMessageResultWithTools` types from `@modelcontextprotocol/sdk` directly — same pattern already used for `tools/call`. - Spec: `sampling/createMessage` listed under `### Standard MCP Messages`, `sampling?: { tools?: {} }` added to `HostCapabilities` (mirrors MCP `ClientCapabilities.sampling` shape for easy pass-through). - SDK: `App.createSamplingMessage()` with overloads that narrow the return type based on whether `params.tools` is set; `AppBridge.oncreatesamplingmessage` setter; `CreateMessageRequest`/`CreateMessageResult*` added to the `AppRequest`/`AppResult` unions. - Picks up SEP-1577 tool-calling support (`tools`, `toolChoice`, `tool_use` content blocks, `stopReason: "toolUse"`, array content) for free — unblocks the "nested agents" use case motivated by the original PR. https://claude.ai/code/session_01ENGWTtsfcyP4S6fTWtUMAh
1 parent 0af4cdc commit 9dc6feb

File tree

10 files changed

+316
-1
lines changed

10 files changed

+316
-1
lines changed

specification/draft/apps.mdx

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -516,6 +516,10 @@ UI iframes can use the following subset of standard MCP protocol messages:
516516

517517
- `resources/read` - Read resource content
518518

519+
**Sampling:**
520+
521+
- `sampling/createMessage` - Request an LLM completion from the host (uses the standard MCP [`CreateMessageRequest`](https://modelcontextprotocol.io/specification/2025-11-25/client/sampling) / `CreateMessageResult` types, including SEP-1577 `tools` / `toolChoice` / `tool_use` content blocks). The host has full discretion over model selection and SHOULD apply rate limiting, cost controls, and user approval (human-in-the-loop). Apps MUST check `hostCapabilities.sampling` before sending this request, and `hostCapabilities.sampling.tools` before including `tools` in the request params.
522+
519523
**Notifications:**
520524

521525
- `notifications/message` - Log messages to host
@@ -662,6 +666,14 @@ interface HostCapabilities {
662666
};
663667
/** Host accepts log messages. */
664668
logging?: {};
669+
/**
670+
* Host supports LLM sampling (sampling/createMessage) from the view.
671+
* Mirrors MCP ClientCapabilities.sampling so hosts can pass it through.
672+
*/
673+
sampling?: {
674+
/** Host supports tool use via `tools` and `toolChoice` params (SEP-1577). */
675+
tools?: {};
676+
};
665677
/** Sandbox configuration applied by the host. */
666678
sandbox?: {
667679
/** Permissions granted by the host (camera, microphone, geolocation, clipboard-write). */

src/app-bridge.examples.ts

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,8 @@ import type { Transport } from "@modelcontextprotocol/sdk/shared/transport.js";
1212
import {
1313
CallToolResult,
1414
CallToolResultSchema,
15+
CreateMessageRequest,
16+
CreateMessageResult,
1517
ListResourcesResultSchema,
1618
ReadResourceResultSchema,
1719
ListPromptsResultSchema,
@@ -228,6 +230,26 @@ function AppBridge_oncalltool_forwardToServer(
228230
//#endregion AppBridge_oncalltool_forwardToServer
229231
}
230232

233+
/**
234+
* Example: Forward sampling requests to your LLM provider.
235+
*/
236+
function AppBridge_oncreatesamplingmessage_forwardToLlm(
237+
bridge: AppBridge,
238+
myLlmProvider: {
239+
complete: (
240+
params: CreateMessageRequest["params"],
241+
opts: { signal: AbortSignal },
242+
) => Promise<CreateMessageResult>;
243+
},
244+
) {
245+
//#region AppBridge_oncreatesamplingmessage_forwardToLlm
246+
bridge.oncreatesamplingmessage = async (params, extra) => {
247+
// Apply rate limiting, user approval, cost controls here
248+
return await myLlmProvider.complete(params, { signal: extra.signal });
249+
};
250+
//#endregion AppBridge_oncreatesamplingmessage_forwardToLlm
251+
}
252+
231253
/**
232254
* Example: Forward list resources requests to the MCP server.
233255
*/

src/app-bridge.test.ts

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -712,6 +712,43 @@ describe("App <-> AppBridge integration", () => {
712712
expect(result.content).toEqual(resultContent);
713713
});
714714

715+
it("oncreatesamplingmessage setter registers handler for sampling/createMessage requests", async () => {
716+
// Re-create bridge with sampling capability so App's capability check passes
717+
bridge = new AppBridge(null, testHostInfo, {
718+
...testHostCapabilities,
719+
sampling: { tools: {} },
720+
});
721+
722+
const receivedParams: unknown[] = [];
723+
bridge.oncreatesamplingmessage = async (params) => {
724+
receivedParams.push(params);
725+
return {
726+
role: "assistant",
727+
content: { type: "text", text: "Hello from the model" },
728+
model: "test-model",
729+
stopReason: "endTurn",
730+
};
731+
};
732+
733+
await bridge.connect(bridgeTransport);
734+
await app.connect(appTransport);
735+
736+
expect(app.getHostCapabilities()?.sampling?.tools).toEqual({});
737+
738+
const result = await app.createSamplingMessage({
739+
messages: [{ role: "user", content: { type: "text", text: "Hi" } }],
740+
maxTokens: 50,
741+
});
742+
743+
expect(receivedParams).toHaveLength(1);
744+
expect(receivedParams[0]).toMatchObject({ maxTokens: 50 });
745+
expect(result.model).toEqual("test-model");
746+
expect(result.content).toEqual({
747+
type: "text",
748+
text: "Hello from the model",
749+
});
750+
});
751+
715752
it("ondownloadfile setter registers handler for ui/download-file requests", async () => {
716753
const downloadParams = {
717754
contents: [

src/app-bridge.ts

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,10 @@ import {
55
CallToolRequestSchema,
66
CallToolResult,
77
CallToolResultSchema,
8+
CreateMessageRequest,
9+
CreateMessageRequestSchema,
10+
CreateMessageResult,
11+
CreateMessageResultWithTools,
812
EmptyResult,
913
Implementation,
1014
ListPromptsRequest,
@@ -833,6 +837,49 @@ export class AppBridge extends Protocol<
833837
});
834838
}
835839

840+
/**
841+
* Register a handler for LLM sampling requests from the view.
842+
*
843+
* The view sends standard MCP `sampling/createMessage` requests to obtain
844+
* LLM completions via the host's model connection. The host has full
845+
* discretion over which model to use and SHOULD apply rate limiting,
846+
* cost controls, and user approval (human-in-the-loop) before sampling.
847+
*
848+
* Hosts that register this handler SHOULD advertise `sampling` (and
849+
* `sampling.tools` if tool-calling is supported) in
850+
* {@link McpUiHostCapabilities `McpUiHostCapabilities`}.
851+
*
852+
* @param callback - Handler that receives `CreateMessageRequest` params and
853+
* returns a `CreateMessageResult` (or `CreateMessageResultWithTools` when
854+
* `params.tools` was provided)
855+
* - `params` - Standard MCP sampling params (messages, maxTokens, tools, etc.)
856+
* - `extra` - Request metadata (abort signal, session info)
857+
*
858+
* @example Forward to your LLM provider
859+
* ```ts source="./app-bridge.examples.ts#AppBridge_oncreatesamplingmessage_forwardToLlm"
860+
* bridge.oncreatesamplingmessage = async (params, extra) => {
861+
* // Apply rate limiting, user approval, cost controls here
862+
* return await myLlmProvider.complete(params, { signal: extra.signal });
863+
* };
864+
* ```
865+
*
866+
* @see `CreateMessageRequest` from @modelcontextprotocol/sdk for the request type
867+
* @see `CreateMessageResult` / `CreateMessageResultWithTools` from @modelcontextprotocol/sdk for result types
868+
*/
869+
set oncreatesamplingmessage(
870+
callback: (
871+
params: CreateMessageRequest["params"],
872+
extra: RequestHandlerExtra,
873+
) => Promise<CreateMessageResult | CreateMessageResultWithTools>,
874+
) {
875+
this.setRequestHandler(
876+
CreateMessageRequestSchema,
877+
async (request, extra) => {
878+
return callback(request.params, extra);
879+
},
880+
);
881+
}
882+
836883
/**
837884
* Notify the view that the MCP server's tool list has changed.
838885
*

src/app.examples.ts

Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -297,6 +297,54 @@ async function App_callServerTool_fetchWeather(app: App) {
297297
//#endregion App_callServerTool_fetchWeather
298298
}
299299

300+
/**
301+
* Example: Simple LLM completion via host sampling.
302+
*/
303+
async function App_createSamplingMessage_simple(app: App) {
304+
//#region App_createSamplingMessage_simple
305+
const result = await app.createSamplingMessage({
306+
messages: [
307+
{
308+
role: "user",
309+
content: { type: "text", text: "Summarize this in one line." },
310+
},
311+
],
312+
maxTokens: 100,
313+
});
314+
console.log(result.content);
315+
//#endregion App_createSamplingMessage_simple
316+
}
317+
318+
/**
319+
* Example: Agentic loop with tools (requires host sampling.tools capability).
320+
*/
321+
async function App_createSamplingMessage_withTools(
322+
app: App,
323+
messages: import("@modelcontextprotocol/sdk/types.js").SamplingMessage[],
324+
) {
325+
//#region App_createSamplingMessage_withTools
326+
if (!app.getHostCapabilities()?.sampling?.tools) return;
327+
328+
const result = await app.createSamplingMessage({
329+
messages,
330+
maxTokens: 1024,
331+
tools: [
332+
{
333+
name: "get_weather",
334+
description: "Get the current weather",
335+
inputSchema: {
336+
type: "object",
337+
properties: { city: { type: "string" } },
338+
},
339+
},
340+
],
341+
});
342+
if (result.stopReason === "toolUse") {
343+
// result.content may be an array containing tool_use blocks
344+
}
345+
//#endregion App_createSamplingMessage_withTools
346+
}
347+
300348
/**
301349
* Example: Send a text message from user interaction.
302350
*/

src/app.ts

Lines changed: 91 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,11 @@ import {
99
CallToolRequestSchema,
1010
CallToolResult,
1111
CallToolResultSchema,
12+
CreateMessageRequest,
13+
CreateMessageResult,
14+
CreateMessageResultSchema,
15+
CreateMessageResultWithTools,
16+
CreateMessageResultWithToolsSchema,
1217
EmptyResultSchema,
1318
Implementation,
1419
ListToolsRequest,
@@ -638,7 +643,15 @@ export class App extends Protocol<AppRequest, AppNotification, AppResult> {
638643
* @internal
639644
*/
640645
assertCapabilityForMethod(method: AppRequest["method"]): void {
641-
// TODO
646+
switch (method) {
647+
case "sampling/createMessage":
648+
if (!this._hostCapabilities?.sampling) {
649+
throw new Error(
650+
`Host does not support sampling (required for ${method})`,
651+
);
652+
}
653+
break;
654+
}
642655
}
643656

644657
/**
@@ -739,6 +752,83 @@ export class App extends Protocol<AppRequest, AppNotification, AppResult> {
739752
);
740753
}
741754

755+
/**
756+
* Request an LLM completion from the host (standard MCP `sampling/createMessage`).
757+
*
758+
* Enables the app to use the host's model connection for completions. The host
759+
* has full discretion over which model to select and MAY modify or reject the
760+
* request (human-in-the-loop). Check {@link getHostCapabilities `getHostCapabilities`}`()?.sampling`
761+
* before calling — hosts without this capability will reject the request.
762+
*
763+
* This method reuses the stock MCP `CreateMessageRequest` shape. When `params.tools`
764+
* is provided, the result is parsed with the extended schema that permits
765+
* `stopReason: "toolUse"` and array content containing `tool_use` blocks.
766+
*
767+
* @param params - Standard MCP `CreateMessageRequest` params (messages, maxTokens,
768+
* systemPrompt, temperature, modelPreferences, tools, toolChoice, etc.)
769+
* @param options - Request options (timeout, abort signal)
770+
* @returns `CreateMessageResult` (single content block) or `CreateMessageResultWithTools`
771+
* (array content, may include `tool_use` blocks) depending on whether `tools` was set
772+
*
773+
* @throws {Error} If the host rejects the request or does not support sampling
774+
* @throws {Error} If the request times out or the connection is lost
775+
*
776+
* @example Simple completion
777+
* ```ts source="./app.examples.ts#App_createSamplingMessage_simple"
778+
* const result = await app.createSamplingMessage({
779+
* messages: [
780+
* { role: "user", content: { type: "text", text: "Summarize this in one line." } },
781+
* ],
782+
* maxTokens: 100,
783+
* });
784+
* console.log(result.content);
785+
* ```
786+
*
787+
* @example Agentic loop with tools
788+
* ```ts source="./app.examples.ts#App_createSamplingMessage_withTools"
789+
* if (!app.getHostCapabilities()?.sampling?.tools) return;
790+
*
791+
* const result = await app.createSamplingMessage({
792+
* messages,
793+
* maxTokens: 1024,
794+
* tools: [
795+
* {
796+
* name: "get_weather",
797+
* description: "Get the current weather",
798+
* inputSchema: { type: "object", properties: { city: { type: "string" } } },
799+
* },
800+
* ],
801+
* });
802+
* if (result.stopReason === "toolUse") {
803+
* // result.content may be an array containing tool_use blocks
804+
* }
805+
* ```
806+
*
807+
* @see `CreateMessageRequest` from @modelcontextprotocol/sdk for the request type
808+
* @see `CreateMessageResult` / `CreateMessageResultWithTools` from @modelcontextprotocol/sdk for result types
809+
*/
810+
async createSamplingMessage(
811+
params: CreateMessageRequest["params"] & { tools?: undefined },
812+
options?: RequestOptions,
813+
): Promise<CreateMessageResult>;
814+
async createSamplingMessage(
815+
params: CreateMessageRequest["params"],
816+
options?: RequestOptions,
817+
): Promise<CreateMessageResultWithTools>;
818+
async createSamplingMessage(
819+
params: CreateMessageRequest["params"],
820+
options?: RequestOptions,
821+
): Promise<CreateMessageResult | CreateMessageResultWithTools> {
822+
const resultSchema = params.tools
823+
? CreateMessageResultWithToolsSchema
824+
: CreateMessageResultSchema;
825+
return await this.request(
826+
{ method: "sampling/createMessage", params },
827+
resultSchema,
828+
options,
829+
);
830+
}
831+
742832
/**
743833
* Send a message to the host's chat interface.
744834
*

src/generated/schema.json

Lines changed: 26 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

src/generated/schema.ts

Lines changed: 18 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)