Skip to content

Commit 4f3276b

Browse files
fix: add comprehensive tool call support to streaming implementation
Fixes critical gap in get_response_stream method where tool calls were completely ignored during streaming. The method now properly: - Accumulates tool calls during streaming using _process_stream_delta - Executes tool calls after streaming completes if execute_tool_fn provided - Continues conversation with follow-up response after tool execution - Handles both Ollama and other providers correctly - Maintains real-time content streaming while adding tool support This makes streaming fully functional for agents that use tools, which is essential for PraisonAI''s core functionality. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Mervin Praison <MervinPraison@users.noreply.github.com>
1 parent 6663b9b commit 4f3276b

1 file changed

Lines changed: 68 additions & 2 deletions

File tree

  • src/praisonai-agents/praisonaiagents/llm

src/praisonai-agents/praisonaiagents/llm/llm.py

Lines changed: 68 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1631,8 +1631,11 @@ def get_response_stream(
16311631
use_streaming = False
16321632

16331633
if use_streaming:
1634-
# Real-time streaming approach
1634+
# Real-time streaming approach with tool call support
16351635
try:
1636+
tool_calls = []
1637+
response_text = ""
1638+
16361639
for chunk in litellm.completion(
16371640
**self._build_completion_params(
16381641
messages=messages,
@@ -1646,10 +1649,73 @@ def get_response_stream(
16461649
):
16471650
if chunk and chunk.choices and chunk.choices[0].delta:
16481651
delta = chunk.choices[0].delta
1652+
1653+
# Process both content and tool calls using existing helper
1654+
response_text, tool_calls = self._process_stream_delta(
1655+
delta, response_text, tool_calls, formatted_tools
1656+
)
1657+
1658+
# Yield content chunks in real-time as they arrive
16491659
if delta.content:
1650-
# Yield content chunks in real-time
16511660
yield delta.content
1661+
1662+
# After streaming completes, handle tool calls if present
1663+
if tool_calls and execute_tool_fn:
1664+
# Add assistant message with tool calls to conversation
1665+
if self._is_ollama_provider():
1666+
messages.append({
1667+
"role": "assistant",
1668+
"content": response_text
1669+
})
1670+
else:
1671+
serializable_tool_calls = self._serialize_tool_calls(tool_calls)
1672+
messages.append({
1673+
"role": "assistant",
1674+
"content": response_text,
1675+
"tool_calls": serializable_tool_calls
1676+
})
1677+
1678+
# Execute tool calls and add results to conversation
1679+
for tool_call in tool_calls:
1680+
is_ollama = self._is_ollama_provider()
1681+
function_name, arguments, tool_call_id = self._extract_tool_call_info(tool_call, is_ollama)
1682+
1683+
try:
1684+
# Execute the tool
1685+
tool_result = execute_tool_fn(function_name, arguments)
16521686

1687+
# Add tool result to messages
1688+
tool_message = self._create_tool_message(function_name, tool_result, tool_call_id, is_ollama)
1689+
messages.append(tool_message)
1690+
1691+
except Exception as e:
1692+
logging.error(f"Tool execution error for {function_name}: {e}")
1693+
# Add error message to conversation
1694+
error_message = self._create_tool_message(
1695+
function_name, f"Error executing tool: {e}", tool_call_id, is_ollama
1696+
)
1697+
messages.append(error_message)
1698+
1699+
# Continue conversation after tool execution - get follow-up response
1700+
try:
1701+
follow_up_response = litellm.completion(
1702+
**self._build_completion_params(
1703+
messages=messages,
1704+
tools=formatted_tools,
1705+
temperature=temperature,
1706+
stream=False,
1707+
**kwargs
1708+
)
1709+
)
1710+
1711+
if follow_up_response and follow_up_response.choices:
1712+
follow_up_content = follow_up_response.choices[0].message.content
1713+
if follow_up_content:
1714+
# Yield the follow-up response after tool execution
1715+
yield follow_up_content
1716+
except Exception as e:
1717+
logging.error(f"Follow-up response failed: {e}")
1718+
16531719
except Exception as e:
16541720
logging.error(f"Streaming failed: {e}")
16551721
# Fall back to non-streaming if streaming fails

0 commit comments

Comments
 (0)