-
Notifications
You must be signed in to change notification settings - Fork 17
Expand file tree
/
Copy pathllm_caller.py
More file actions
101 lines (93 loc) · 3.57 KB
/
llm_caller.py
File metadata and controls
101 lines (93 loc) · 3.57 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import json
import os
import openai
from openai.types.responses import ResponseInputParam
from slack_sdk.models.messages.chunk import TaskUpdateChunk
from slack_sdk.web.chat_stream import ChatStream
from agent.tools.dice import roll_dice, roll_dice_definition
def call_llm(
streamer: ChatStream,
prompts: ResponseInputParam,
):
"""
Stream an LLM response to prompts with an example dice rolling function
https://docs.slack.dev/tools/python-slack-sdk/web#sending-streaming-messages
https://platform.openai.com/docs/guides/text
https://platform.openai.com/docs/guides/streaming-responses
https://platform.openai.com/docs/guides/function-calling
"""
llm = openai.OpenAI(
api_key=os.getenv("OPENAI_API_KEY"),
)
tool_calls = []
response = llm.responses.create(
model="gpt-4o-mini",
input=prompts,
tools=[
roll_dice_definition,
],
stream=True,
)
for event in response:
# Markdown text from the LLM response is streamed in chat as it arrives
if event.type == "response.output_text.delta":
streamer.append(markdown_text=f"{event.delta}")
# Function calls are saved for later computation and a new task is shown
if event.type == "response.output_item.done":
if event.item.type == "function_call":
tool_calls.append(event.item)
if event.item.name == "roll_dice":
args = json.loads(event.item.arguments)
streamer.append(
chunks=[
TaskUpdateChunk(
id=f"{event.item.call_id}",
title=f"Rolling a {args['count']}d{args['sides']}...",
status="in_progress",
),
],
)
# Tool calls are performed and tasks are marked as completed in Slack
if tool_calls:
for call in tool_calls:
if call.name == "roll_dice":
args = json.loads(call.arguments)
prompts.append(
{
"id": call.id,
"call_id": call.call_id,
"type": "function_call",
"name": "roll_dice",
"arguments": call.arguments,
}
)
result = roll_dice(**args)
prompts.append(
{
"type": "function_call_output",
"call_id": call.call_id,
"output": json.dumps(result),
}
)
if result.get("error") is not None:
streamer.append(
chunks=[
TaskUpdateChunk(
id=f"{call.call_id}",
title=f"{result['error']}",
status="error",
),
],
)
else:
streamer.append(
chunks=[
TaskUpdateChunk(
id=f"{call.call_id}",
title=f"{result['description']}",
status="complete",
),
],
)
# Complete the LLM response after making tool calls
call_llm(streamer, prompts)