|
37 | 37 | ToolApprovalItem, |
38 | 38 | ToolCallItem, |
39 | 39 | ToolCallOutputItem, |
| 40 | + ToolExecutionConfig, |
40 | 41 | ToolGuardrailFunctionOutput, |
41 | 42 | ToolInputGuardrail, |
42 | 43 | ToolOutputGuardrailData, |
@@ -232,6 +233,122 @@ async def test_plaintext_agent_with_tool_call_is_run_again(): |
232 | 233 | assert isinstance(result.next_step, NextStepRunAgain) |
233 | 234 |
|
234 | 235 |
|
| 236 | +@pytest.mark.asyncio |
| 237 | +async def test_function_tool_concurrency_default_starts_all_calls(): |
| 238 | + active_count = 0 |
| 239 | + max_seen_count = 0 |
| 240 | + |
| 241 | + async def tracked_tool(value: int) -> str: |
| 242 | + nonlocal active_count, max_seen_count |
| 243 | + active_count += 1 |
| 244 | + max_seen_count = max(max_seen_count, active_count) |
| 245 | + try: |
| 246 | + await asyncio.sleep(0.01) |
| 247 | + return f"ok-{value}" |
| 248 | + finally: |
| 249 | + active_count -= 1 |
| 250 | + |
| 251 | + tool = function_tool(tracked_tool, name_override="tracked_tool") |
| 252 | + agent = Agent(name="test", tools=[tool]) |
| 253 | + response = ModelResponse( |
| 254 | + output=[ |
| 255 | + get_function_tool_call("tracked_tool", json.dumps({"value": 1}), call_id="call_1"), |
| 256 | + get_function_tool_call("tracked_tool", json.dumps({"value": 2}), call_id="call_2"), |
| 257 | + get_function_tool_call("tracked_tool", json.dumps({"value": 3}), call_id="call_3"), |
| 258 | + ], |
| 259 | + usage=Usage(), |
| 260 | + response_id="resp", |
| 261 | + ) |
| 262 | + |
| 263 | + result = await get_execute_result(agent, response) |
| 264 | + |
| 265 | + assert active_count == 0 |
| 266 | + assert max_seen_count == 3 |
| 267 | + assert_item_is_function_tool_call_output(result.generated_items[3], "ok-1") |
| 268 | + assert_item_is_function_tool_call_output(result.generated_items[4], "ok-2") |
| 269 | + assert_item_is_function_tool_call_output(result.generated_items[5], "ok-3") |
| 270 | + |
| 271 | + |
| 272 | +@pytest.mark.asyncio |
| 273 | +async def test_function_tool_concurrency_cap_limits_calls_and_preserves_output_order(): |
| 274 | + active_count = 0 |
| 275 | + max_seen_count = 0 |
| 276 | + |
| 277 | + async def tracked_tool(value: int) -> str: |
| 278 | + nonlocal active_count, max_seen_count |
| 279 | + active_count += 1 |
| 280 | + max_seen_count = max(max_seen_count, active_count) |
| 281 | + try: |
| 282 | + await asyncio.sleep(0.03 if value == 1 else 0.001) |
| 283 | + return f"ok-{value}" |
| 284 | + finally: |
| 285 | + active_count -= 1 |
| 286 | + |
| 287 | + tool = function_tool(tracked_tool, name_override="tracked_tool") |
| 288 | + agent = Agent(name="test", tools=[tool]) |
| 289 | + response = ModelResponse( |
| 290 | + output=[ |
| 291 | + get_function_tool_call("tracked_tool", json.dumps({"value": 1}), call_id="call_1"), |
| 292 | + get_function_tool_call("tracked_tool", json.dumps({"value": 2}), call_id="call_2"), |
| 293 | + get_function_tool_call("tracked_tool", json.dumps({"value": 3}), call_id="call_3"), |
| 294 | + ], |
| 295 | + usage=Usage(), |
| 296 | + response_id="resp", |
| 297 | + ) |
| 298 | + |
| 299 | + result = await get_execute_result( |
| 300 | + agent, |
| 301 | + response, |
| 302 | + run_config=RunConfig(tool_execution=ToolExecutionConfig(max_function_tool_concurrency=2)), |
| 303 | + ) |
| 304 | + |
| 305 | + assert active_count == 0 |
| 306 | + assert max_seen_count == 2 |
| 307 | + assert_item_is_function_tool_call_output(result.generated_items[3], "ok-1") |
| 308 | + assert_item_is_function_tool_call_output(result.generated_items[4], "ok-2") |
| 309 | + assert_item_is_function_tool_call_output(result.generated_items[5], "ok-3") |
| 310 | + |
| 311 | + |
| 312 | +@pytest.mark.asyncio |
| 313 | +async def test_function_tool_concurrency_cap_leaves_queued_calls_unstarted_after_failure(): |
| 314 | + started_tools: list[str] = [] |
| 315 | + |
| 316 | + async def failing_tool() -> str: |
| 317 | + started_tools.append("failing_tool") |
| 318 | + raise RuntimeError("boom") |
| 319 | + |
| 320 | + async def queued_tool() -> str: |
| 321 | + started_tools.append("queued_tool") |
| 322 | + return "should-not-run" |
| 323 | + |
| 324 | + failing = function_tool( |
| 325 | + failing_tool, |
| 326 | + name_override="failing_tool", |
| 327 | + failure_error_function=None, |
| 328 | + ) |
| 329 | + queued = function_tool(queued_tool, name_override="queued_tool") |
| 330 | + agent = Agent(name="test", tools=[failing, queued]) |
| 331 | + response = ModelResponse( |
| 332 | + output=[ |
| 333 | + get_function_tool_call("failing_tool", "{}", call_id="call_1"), |
| 334 | + get_function_tool_call("queued_tool", "{}", call_id="call_2"), |
| 335 | + ], |
| 336 | + usage=Usage(), |
| 337 | + response_id="resp", |
| 338 | + ) |
| 339 | + |
| 340 | + with pytest.raises(UserError, match="Error running tool failing_tool: boom"): |
| 341 | + await get_execute_result( |
| 342 | + agent, |
| 343 | + response, |
| 344 | + run_config=RunConfig( |
| 345 | + tool_execution=ToolExecutionConfig(max_function_tool_concurrency=1) |
| 346 | + ), |
| 347 | + ) |
| 348 | + |
| 349 | + assert started_tools == ["failing_tool"] |
| 350 | + |
| 351 | + |
235 | 352 | @pytest.mark.asyncio |
236 | 353 | async def test_plaintext_agent_hosted_shell_items_without_message_runs_again(): |
237 | 354 | shell_tool = ShellTool(environment={"type": "container_auto"}) |
|
0 commit comments