Skip to content

Commit b387ecf

Browse files
zimegmwbrooks
andauthored
feat: showcase text generation and thinking steps from suggested prompts (#37)
Co-authored-by: Michael Brooks <michael@michaelbrooks.ca>
1 parent 1683353 commit b387ecf

File tree

10 files changed

+302
-103
lines changed

10 files changed

+302
-103
lines changed

README.md

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -150,9 +150,11 @@ Configures the new Slack Assistant features, providing a dedicated side panel UI
150150
- The `assistant_thread_started.py` file, which responds to new app threads with a list of suggested prompts.
151151
- The `message.py` file, which responds to user messages sent to app threads or from the **Chat** and **History** tab with an LLM generated response.
152152

153-
### `/ai`
153+
### `/agent`
154154

155-
The `llm_caller.py` file, which handles OpenAI API integration and message formatting. It includes the `call_llm()` function that sends conversation threads to OpenAI's models.
155+
The `llm_caller.py` file calls the OpenAI API and streams the generated response into a Slack conversation.
156+
157+
The `tools` directory contains app-specific functions for the LLM to call.
156158

157159
## App Distribution / OAuth
158160

agent/llm_caller.py

Lines changed: 101 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,101 @@
1+
import json
2+
import os
3+
4+
import openai
5+
from openai.types.responses import ResponseInputParam
6+
from slack_sdk.models.messages.chunk import TaskUpdateChunk
7+
from slack_sdk.web.chat_stream import ChatStream
8+
9+
from agent.tools.dice import roll_dice, roll_dice_definition
10+
11+
12+
def call_llm(
13+
streamer: ChatStream,
14+
prompts: ResponseInputParam,
15+
):
16+
"""
17+
Stream an LLM response to prompts with an example dice rolling function
18+
19+
https://docs.slack.dev/tools/python-slack-sdk/web#sending-streaming-messages
20+
https://platform.openai.com/docs/guides/text
21+
https://platform.openai.com/docs/guides/streaming-responses
22+
https://platform.openai.com/docs/guides/function-calling
23+
"""
24+
llm = openai.OpenAI(
25+
api_key=os.getenv("OPENAI_API_KEY"),
26+
)
27+
tool_calls = []
28+
response = llm.responses.create(
29+
model="gpt-4o-mini",
30+
input=prompts,
31+
tools=[
32+
roll_dice_definition,
33+
],
34+
stream=True,
35+
)
36+
for event in response:
37+
# Markdown text from the LLM response is streamed in chat as it arrives
38+
if event.type == "response.output_text.delta":
39+
streamer.append(markdown_text=f"{event.delta}")
40+
41+
# Function calls are saved for later computation and a new task is shown
42+
if event.type == "response.output_item.done":
43+
if event.item.type == "function_call":
44+
tool_calls.append(event.item)
45+
if event.item.name == "roll_dice":
46+
args = json.loads(event.item.arguments)
47+
streamer.append(
48+
chunks=[
49+
TaskUpdateChunk(
50+
id=f"{event.item.call_id}",
51+
title=f"Rolling a {args['count']}d{args['sides']}...",
52+
status="in_progress",
53+
),
54+
],
55+
)
56+
57+
# Tool calls are performed and tasks are marked as completed in Slack
58+
if tool_calls:
59+
for call in tool_calls:
60+
if call.name == "roll_dice":
61+
args = json.loads(call.arguments)
62+
prompts.append(
63+
{
64+
"id": call.id,
65+
"call_id": call.call_id,
66+
"type": "function_call",
67+
"name": "roll_dice",
68+
"arguments": call.arguments,
69+
}
70+
)
71+
result = roll_dice(**args)
72+
prompts.append(
73+
{
74+
"type": "function_call_output",
75+
"call_id": call.call_id,
76+
"output": json.dumps(result),
77+
}
78+
)
79+
if result.get("error") is not None:
80+
streamer.append(
81+
chunks=[
82+
TaskUpdateChunk(
83+
id=f"{call.call_id}",
84+
title=f"{result['error']}",
85+
status="error",
86+
),
87+
],
88+
)
89+
else:
90+
streamer.append(
91+
chunks=[
92+
TaskUpdateChunk(
93+
id=f"{call.call_id}",
94+
title=f"{result['description']}",
95+
status="complete",
96+
),
97+
],
98+
)
99+
100+
# Complete the LLM response after making tool calls
101+
call_llm(streamer, prompts)

agent/tools/__init__.py

Whitespace-only changes.

agent/tools/dice.py

Lines changed: 60 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,60 @@
1+
import random
2+
import time
3+
4+
from openai.types.responses import FunctionToolParam
5+
6+
7+
def roll_dice(sides: int = 6, count: int = 1) -> dict:
8+
if sides < 2:
9+
return {
10+
"error": "A die must have at least 2 sides",
11+
"rolls": [],
12+
"total": 0,
13+
}
14+
15+
if count < 1:
16+
return {
17+
"error": "Must roll at least 1 die",
18+
"rolls": [],
19+
"total": 0,
20+
}
21+
22+
# Roll the dice and calculate the total
23+
rolls = [random.randint(1, sides) for _ in range(count)]
24+
total = sum(rolls)
25+
26+
# Add a pause between rolls to demonstrate loading states
27+
time.sleep(2)
28+
29+
return {
30+
"rolls": rolls,
31+
"total": total,
32+
"description": f"Rolled a {count}d{sides} to total {total}",
33+
}
34+
35+
36+
# Tool definition for OpenAI API
37+
#
38+
# https://platform.openai.com/docs/guides/function-calling
39+
roll_dice_definition: FunctionToolParam = {
40+
"type": "function",
41+
"name": "roll_dice",
42+
"description": "Roll one or more dice with a specified number of sides. Use this when the user wants to roll dice or generate random numbers within a range.",
43+
"parameters": {
44+
"type": "object",
45+
"properties": {
46+
"sides": {
47+
"type": "integer",
48+
"description": "The number of sides on the die (e.g., 6 for a standard die, 20 for a d20)",
49+
"default": 6,
50+
},
51+
"count": {
52+
"type": "integer",
53+
"description": "The number of dice to roll",
54+
"default": 1,
55+
},
56+
},
57+
"required": ["sides", "count"],
58+
},
59+
"strict": False,
60+
}

ai/llm_caller.py

Lines changed: 0 additions & 27 deletions
This file was deleted.

app.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22
import os
33

44
from dotenv import load_dotenv
5-
65
from slack_bolt import App
76
from slack_bolt.adapter.socket_mode import SocketModeHandler
87
from slack_sdk import WebClient
@@ -22,6 +21,7 @@
2221
token=os.environ.get("SLACK_BOT_TOKEN"),
2322
),
2423
)
24+
2525
# Register Listeners
2626
register_listeners(app)
2727

Lines changed: 13 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
from logging import Logger
2-
from typing import Dict, List
32

43
from slack_bolt import Say, SetSuggestedPrompts
54

@@ -18,24 +17,19 @@ def assistant_thread_started(
1817
logger: Logger instance for error tracking
1918
"""
2019
try:
21-
say("How can I help you?")
22-
23-
prompts: List[Dict[str, str]] = [
24-
{
25-
"title": "What does Slack stand for?",
26-
"message": "Slack, a business communication service, was named after an acronym. Can you guess what it stands for?",
27-
},
28-
{
29-
"title": "Write a draft announcement",
30-
"message": "Can you write a draft announcement about a new feature my team just released? It must include how impactful it is.",
31-
},
32-
{
33-
"title": "Suggest names for my Slack app",
34-
"message": "Can you suggest a few names for my Slack app? The app helps my teammates better organize information and plan priorities and action items.",
35-
},
36-
]
37-
38-
set_suggested_prompts(prompts=prompts)
20+
say("What would you like to do today?")
21+
set_suggested_prompts(
22+
prompts=[
23+
{
24+
"title": "Prompt a task with thinking steps",
25+
"message": "Wonder a few deep thoughts.",
26+
},
27+
{
28+
"title": "Roll dice for a random number",
29+
"message": "Roll two 12-sided dice and three 6-sided dice for a psuedo-random score.",
30+
},
31+
]
32+
)
3933
except Exception as e:
4034
logger.exception(f"Failed to handle an assistant_thread_started event: {e}", e)
4135
say(f":warning: Something went wrong! ({e})")

0 commit comments

Comments
 (0)