Skip to content

Commit 1888a10

Browse files
committed
test(langchain): add real HTTP server support for agent invoke tests
Add _start_server function to launch actual FastAPI servers for integration testing, modify request functions to accept both app objects and server URLs, and update test fixtures to use real HTTP connections instead of ASGI transport. This enables more realistic testing of agent invoke methods using actual HTTP requests while maintaining backward compatibility with ASGI transport for non-streaming tests. The changes affect test_agent_invoke_methods.py with new server startup logic, enhanced request handling, and proper server cleanup in test fixtures. test(langchain): 为代理调用测试添加真实 HTTP 服务器支持 添加 _start_server 函数以启动实际的 FastAPI 服务器进行集成测试, 修改请求函数以接受 app 对象和服务器 URL, 并更新测试 fixture 以使用真实的 HTTP 连接而不是 ASGI 传输。 这使得使用实际 HTTP 请求对代理调用方法进行更真实的测试 同时保持与 ASGI 传输的向后兼容性以用于非流式测试。 这些更改影响 test_agent_invoke_methods.py 中的新服务器启动逻辑、 增强的请求处理和测试 fixture 中的适当服务器清理。 Change-Id: Icb5b09c2d588021cf72749658b1d07237fc1ef9c Signed-off-by: OhYee <oyohyee@oyohyee.com>
1 parent eb0acdb commit 1888a10

File tree

1 file changed

+137
-27
lines changed

1 file changed

+137
-27
lines changed

tests/unittests/integration/langchain/test_agent_invoke_methods.py

Lines changed: 137 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -81,6 +81,39 @@ def _sse(data: Dict[str, Any]) -> str:
8181
return f"data: {json.dumps(data, ensure_ascii=False)}\n\n"
8282

8383

84+
def _start_server(app: FastAPI) -> tuple:
85+
"""启动 FastAPI 服务器并返回 base_url
86+
87+
Returns:
88+
tuple: (base_url, server, thread)
89+
"""
90+
91+
# 添加健康检查端点
92+
@app.get("/health")
93+
async def health():
94+
return {"status": "ok"}
95+
96+
port = _find_free_port()
97+
config = uvicorn.Config(
98+
app, host="127.0.0.1", port=port, log_level="warning"
99+
)
100+
server = uvicorn.Server(config)
101+
102+
thread = threading.Thread(target=server.run, daemon=True)
103+
thread.start()
104+
105+
base_url = f"http://127.0.0.1:{port}"
106+
# 等待服务器启动
107+
for _ in range(50):
108+
try:
109+
httpx.get(f"{base_url}/health", timeout=0.2)
110+
break
111+
except Exception:
112+
time.sleep(0.1)
113+
114+
return base_url, server, thread
115+
116+
84117
def _build_mock_openai_app() -> FastAPI:
85118
"""构建本地 OpenAI 协议兼容的简单服务"""
86119
app = FastAPI()
@@ -297,20 +330,36 @@ def parse_sse_events(content: str) -> List[Dict[str, Any]]:
297330

298331

299332
async def request_agui_events(
300-
server_app,
333+
server_url_or_app: Union[str, FastAPI],
301334
messages: List[Dict[str, str]],
302335
stream: bool = True,
303336
) -> List[Dict[str, Any]]:
304-
"""发送 AG-UI 请求并返回事件列表"""
305-
async with httpx.AsyncClient(
306-
transport=httpx.ASGITransport(app=server_app),
307-
base_url="http://test",
308-
) as client:
309-
response = await client.post(
310-
"/ag-ui/agent",
311-
json={"messages": messages, "stream": stream},
312-
timeout=60.0,
313-
)
337+
"""发送 AG-UI 请求并返回事件列表
338+
339+
Args:
340+
server_url_or_app: 服务器 URL (如 "http://127.0.0.1:8000") 或 FastAPI app 对象
341+
messages: 消息列表
342+
stream: 是否流式响应
343+
"""
344+
if isinstance(server_url_or_app, str):
345+
# 使用真实的 HTTP 连接
346+
async with httpx.AsyncClient(base_url=server_url_or_app) as client:
347+
response = await client.post(
348+
"/ag-ui/agent",
349+
json={"messages": messages, "stream": stream},
350+
timeout=60.0,
351+
)
352+
else:
353+
# 使用 ASGITransport (用于非流式测试)
354+
async with httpx.AsyncClient(
355+
transport=httpx.ASGITransport(app=server_url_or_app),
356+
base_url="http://test",
357+
) as client:
358+
response = await client.post(
359+
"/ag-ui/agent",
360+
json={"messages": messages, "stream": stream},
361+
timeout=60.0,
362+
)
314363

315364
assert response.status_code == 200
316365
return parse_sse_events(response.text)
@@ -670,26 +719,42 @@ def assert_openai_tool_call_response(
670719

671720

672721
async def request_openai_events(
673-
server_app,
722+
server_url_or_app: Union[str, FastAPI],
674723
messages: List[Dict[str, str]],
675724
stream: bool = True,
676725
) -> Union[List[Dict[str, Any]], Dict[str, Any]]:
677-
"""发送 OpenAI 协议请求并返回流式事件列表或响应"""
726+
"""发送 OpenAI 协议请求并返回流式事件列表或响应
727+
728+
Args:
729+
server_url_or_app: 服务器 URL (如 "http://127.0.0.1:8000") 或 FastAPI app 对象
730+
messages: 消息列表
731+
stream: 是否流式响应
732+
"""
678733
payload: Dict[str, Any] = {
679734
"model": "mock-model",
680735
"messages": messages,
681736
"stream": stream,
682737
}
683738

684-
async with httpx.AsyncClient(
685-
transport=httpx.ASGITransport(app=server_app),
686-
base_url="http://test",
687-
) as client:
688-
response = await client.post(
689-
"/openai/v1/chat/completions",
690-
json=payload,
691-
timeout=60.0,
692-
)
739+
if isinstance(server_url_or_app, str):
740+
# 使用真实的 HTTP 连接
741+
async with httpx.AsyncClient(base_url=server_url_or_app) as client:
742+
response = await client.post(
743+
"/openai/v1/chat/completions",
744+
json=payload,
745+
timeout=60.0,
746+
)
747+
else:
748+
# 使用 ASGITransport (用于非流式测试)
749+
async with httpx.AsyncClient(
750+
transport=httpx.ASGITransport(app=server_url_or_app),
751+
base_url="http://test",
752+
) as client:
753+
response = await client.post(
754+
"/openai/v1/chat/completions",
755+
json=payload,
756+
timeout=60.0,
757+
)
693758

694759
assert response.status_code == 200
695760

@@ -779,7 +844,16 @@ async def generator():
779844
return generator()
780845

781846
server = AgentRunServer(invoke_agent=invoke_agent)
782-
return server.app
847+
app = server.app
848+
849+
# 启动真实的 HTTP 服务器
850+
base_url, uvicorn_server, thread = _start_server(app)
851+
852+
yield base_url
853+
854+
# 清理服务器
855+
uvicorn_server.should_exit = True
856+
thread.join(timeout=5)
783857

784858

785859
# =============================================================================
@@ -1643,7 +1717,16 @@ async def generator():
16431717
return await agent.ainvoke(cast(Any, input_data))
16441718

16451719
server = AgentRunServer(invoke_agent=invoke_agent)
1646-
return server.app
1720+
app = server.app
1721+
1722+
# 启动真实的 HTTP 服务器
1723+
base_url, uvicorn_server, thread = _start_server(app)
1724+
1725+
yield base_url
1726+
1727+
# 清理服务器
1728+
uvicorn_server.should_exit = True
1729+
thread.join(timeout=5)
16471730

16481731
@pytest.mark.parametrize(
16491732
"case_key,prompt",
@@ -1704,7 +1787,16 @@ def generator():
17041787
return agent.invoke(cast(Any, input_data))
17051788

17061789
server = AgentRunServer(invoke_agent=invoke_agent)
1707-
return server.app
1790+
app = server.app
1791+
1792+
# 启动真实的 HTTP 服务器
1793+
base_url, uvicorn_server, thread = _start_server(app)
1794+
1795+
yield base_url
1796+
1797+
# 清理服务器
1798+
uvicorn_server.should_exit = True
1799+
thread.join(timeout=5)
17081800

17091801
@pytest.mark.parametrize(
17101802
"case_key,prompt",
@@ -1762,7 +1854,16 @@ async def generator():
17621854
return generator()
17631855

17641856
server = AgentRunServer(invoke_agent=invoke_agent)
1765-
return server.app
1857+
app = server.app
1858+
1859+
# 启动真实的 HTTP 服务器
1860+
base_url, uvicorn_server, thread = _start_server(app)
1861+
1862+
yield base_url
1863+
1864+
# 清理服务器
1865+
uvicorn_server.should_exit = True
1866+
thread.join(timeout=5)
17661867

17671868
@pytest.fixture
17681869
def server_app_async(self, agent_model):
@@ -1796,7 +1897,16 @@ async def generator():
17961897
return generator()
17971898

17981899
server = AgentRunServer(invoke_agent=invoke_agent)
1799-
return server.app
1900+
app = server.app
1901+
1902+
# 启动真实的 HTTP 服务器
1903+
base_url, uvicorn_server, thread = _start_server(app)
1904+
1905+
yield base_url
1906+
1907+
# 清理服务器
1908+
uvicorn_server.should_exit = True
1909+
thread.join(timeout=5)
18001910

18011911
@pytest.mark.parametrize(
18021912
"case_key,prompt",

0 commit comments

Comments
 (0)