|
| 1 | +#![cfg(all( |
| 2 | + feature = "transport-streamable-http-client", |
| 3 | + feature = "transport-streamable-http-client-reqwest", |
| 4 | + feature = "transport-streamable-http-server", |
| 5 | + not(feature = "local") |
| 6 | +))] |
| 7 | + |
| 8 | +use std::time::Instant; |
| 9 | + |
| 10 | +use rmcp::{ |
| 11 | + ServerHandler, ServiceExt, |
| 12 | + handler::server::{router::tool::ToolRouter, wrapper::Parameters}, |
| 13 | + model::{CallToolRequestParams, ClientInfo, ServerCapabilities, ServerInfo}, |
| 14 | + schemars, tool, tool_handler, tool_router, |
| 15 | + transport::{ |
| 16 | + StreamableHttpClientTransport, |
| 17 | + streamable_http_client::StreamableHttpClientTransportConfig, |
| 18 | + streamable_http_server::{ |
| 19 | + StreamableHttpServerConfig, StreamableHttpService, session::local::LocalSessionManager, |
| 20 | + }, |
| 21 | + }, |
| 22 | +}; |
| 23 | +use tokio_util::sync::CancellationToken; |
| 24 | + |
| 25 | +#[derive(Debug, serde::Deserialize, schemars::JsonSchema)] |
| 26 | +struct SumRequest { |
| 27 | + a: i32, |
| 28 | + b: i32, |
| 29 | +} |
| 30 | + |
| 31 | +#[derive(Debug, Clone)] |
| 32 | +struct EchoServer { |
| 33 | + tool_router: ToolRouter<Self>, |
| 34 | +} |
| 35 | + |
| 36 | +impl EchoServer { |
| 37 | + fn new() -> Self { |
| 38 | + Self { |
| 39 | + tool_router: Self::tool_router(), |
| 40 | + } |
| 41 | + } |
| 42 | +} |
| 43 | + |
| 44 | +#[tool_router] |
| 45 | +impl EchoServer { |
| 46 | + #[tool(description = "Sum two numbers")] |
| 47 | + fn sum(&self, Parameters(SumRequest { a, b }): Parameters<SumRequest>) -> String { |
| 48 | + (a + b).to_string() |
| 49 | + } |
| 50 | +} |
| 51 | + |
| 52 | +#[tool_handler(router = self.tool_router)] |
| 53 | +impl ServerHandler for EchoServer { |
| 54 | + fn get_info(&self) -> ServerInfo { |
| 55 | + ServerInfo::new(ServerCapabilities::builder().enable_tools().build()) |
| 56 | + } |
| 57 | +} |
| 58 | + |
| 59 | +/// Verify that subsequent tool calls do not regress in latency due to |
| 60 | +/// HTTP/1.1 connection pool exhaustion. Before the fix, each POST SSE |
| 61 | +/// response was dropped without fully consuming the body, preventing |
| 62 | +/// connection reuse and forcing a new TCP connection (~40 ms) per call. |
| 63 | +#[tokio::test] |
| 64 | +async fn test_subsequent_tool_calls_reuse_connections() -> anyhow::Result<()> { |
| 65 | + let ct = CancellationToken::new(); |
| 66 | + |
| 67 | + let service: StreamableHttpService<EchoServer, LocalSessionManager> = |
| 68 | + StreamableHttpService::new( |
| 69 | + || Ok(EchoServer::new()), |
| 70 | + Default::default(), |
| 71 | + StreamableHttpServerConfig::default() |
| 72 | + .with_sse_keep_alive(None) |
| 73 | + .with_cancellation_token(ct.child_token()), |
| 74 | + ); |
| 75 | + |
| 76 | + let router = axum::Router::new().nest_service("/mcp", service); |
| 77 | + let listener = tokio::net::TcpListener::bind("127.0.0.1:0").await?; |
| 78 | + let addr = listener.local_addr()?; |
| 79 | + |
| 80 | + let server_handle = tokio::spawn({ |
| 81 | + let ct = ct.clone(); |
| 82 | + async move { |
| 83 | + let _ = axum::serve(listener, router) |
| 84 | + .with_graceful_shutdown(async move { ct.cancelled_owned().await }) |
| 85 | + .await; |
| 86 | + } |
| 87 | + }); |
| 88 | + |
| 89 | + let transport = StreamableHttpClientTransport::from_config( |
| 90 | + StreamableHttpClientTransportConfig::with_uri(format!("http://{addr}/mcp")), |
| 91 | + ); |
| 92 | + let client = ClientInfo::default().serve(transport).await?; |
| 93 | + |
| 94 | + // Warm up: first call may include one-time setup costs. |
| 95 | + let args: serde_json::Map<String, serde_json::Value> = |
| 96 | + serde_json::from_value(serde_json::json!({"a": 1, "b": 2}))?; |
| 97 | + let _ = client |
| 98 | + .call_tool(CallToolRequestParams::new("sum").with_arguments(args)) |
| 99 | + .await?; |
| 100 | + |
| 101 | + // Measure subsequent calls. |
| 102 | + let mut durations = Vec::new(); |
| 103 | + for i in 0..5i32 { |
| 104 | + let args: serde_json::Map<String, serde_json::Value> = |
| 105 | + serde_json::from_value(serde_json::json!({"a": i, "b": i + 1}))?; |
| 106 | + let start = Instant::now(); |
| 107 | + let result = client |
| 108 | + .call_tool(CallToolRequestParams::new("sum").with_arguments(args)) |
| 109 | + .await?; |
| 110 | + let elapsed = start.elapsed(); |
| 111 | + durations.push(elapsed); |
| 112 | + |
| 113 | + assert!( |
| 114 | + result.is_error != Some(true), |
| 115 | + "tool call should succeed, got error: {:?}", |
| 116 | + result.content |
| 117 | + ); |
| 118 | + } |
| 119 | + |
| 120 | + let _ = client.cancel().await; |
| 121 | + ct.cancel(); |
| 122 | + server_handle.await?; |
| 123 | + |
| 124 | + // With connection reuse, localhost calls should complete well under 20 ms. |
| 125 | + // Before the fix, they consistently took ~42 ms due to new TCP connections. |
| 126 | + let max_allowed = std::time::Duration::from_millis(20); |
| 127 | + for (i, d) in durations.iter().enumerate() { |
| 128 | + assert!( |
| 129 | + *d < max_allowed, |
| 130 | + "call {} took {:?}, expected < {:?} (connection reuse may be broken)", |
| 131 | + i + 1, |
| 132 | + d, |
| 133 | + max_allowed, |
| 134 | + ); |
| 135 | + } |
| 136 | + |
| 137 | + Ok(()) |
| 138 | +} |
0 commit comments