Skip to content

Commit 242297a

Browse files
committed
feat: showcase text generation and thinking steps from suggested prompts
1 parent 1683353 commit 242297a

4 files changed

Lines changed: 104 additions & 68 deletions

File tree

ai/llm_caller.py

Lines changed: 4 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1,27 +1,17 @@
11
import os
2-
from typing import Dict, List
32

43
import openai
54
from openai import Stream
65
from openai.types.responses import ResponseStreamEvent
76

8-
DEFAULT_SYSTEM_CONTENT = """
9-
You're an assistant in a Slack workspace.
10-
Users in the workspace will ask you to help them write something or to think better about a specific topic.
11-
You'll respond to those questions in a professional way.
12-
When you include markdown text, convert them to Slack compatible ones.
13-
When a prompt has Slack's special syntax like <@USER_ID> or <#CHANNEL_ID>, you must keep them as-is in your response.
14-
"""
15-
167

178
def call_llm(
18-
messages_in_thread: List[Dict[str, str]],
19-
system_content: str = DEFAULT_SYSTEM_CONTENT,
9+
prompt: str,
2010
) -> Stream[ResponseStreamEvent]:
2111
openai_client = openai.OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
22-
messages = [{"role": "system", "content": system_content}]
23-
messages.extend(messages_in_thread)
2412
response = openai_client.responses.create(
25-
model="gpt-4o-mini", input=messages, stream=True
13+
model="gpt-4o-mini",
14+
input=prompt,
15+
stream=True,
2616
)
2717
return response
Lines changed: 13 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
from logging import Logger
2-
from typing import Dict, List
32

43
from slack_bolt import Say, SetSuggestedPrompts
54

@@ -18,24 +17,19 @@ def assistant_thread_started(
1817
logger: Logger instance for error tracking
1918
"""
2019
try:
21-
say("How can I help you?")
22-
23-
prompts: List[Dict[str, str]] = [
24-
{
25-
"title": "What does Slack stand for?",
26-
"message": "Slack, a business communication service, was named after an acronym. Can you guess what it stands for?",
27-
},
28-
{
29-
"title": "Write a draft announcement",
30-
"message": "Can you write a draft announcement about a new feature my team just released? It must include how impactful it is.",
31-
},
32-
{
33-
"title": "Suggest names for my Slack app",
34-
"message": "Can you suggest a few names for my Slack app? The app helps my teammates better organize information and plan priorities and action items.",
35-
},
36-
]
37-
38-
set_suggested_prompts(prompts=prompts)
20+
say("What would you like to do today?")
21+
set_suggested_prompts(
22+
prompts=[
23+
{
24+
"title": "Prompt a task with thinking steps",
25+
"message": "Wonder a few deep thoughts.",
26+
},
27+
{
28+
"title": "Generate a release announcement",
29+
"message": "Please write detailed changelog notes for a feature that almost seems to be magic.",
30+
},
31+
]
32+
)
3933
except Exception as e:
4034
logger.exception(f"Failed to handle an assistant_thread_started event: {e}", e)
4135
say(f":warning: Something went wrong! ({e})")

listeners/assistant/message.py

Lines changed: 85 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,9 @@
1+
import time
12
from logging import Logger
2-
from typing import Dict, List
33

44
from slack_bolt import BoltContext, Say, SetStatus
55
from slack_sdk import WebClient
6+
from slack_sdk.models.messages.chunk import MarkdownTextChunk, TaskUpdateChunk
67

78
from ai.llm_caller import call_llm
89

@@ -13,6 +14,7 @@ def message(
1314
client: WebClient,
1415
context: BoltContext,
1516
logger: Logger,
17+
message: dict,
1618
payload: dict,
1719
say: Say,
1820
set_status: SetStatus,
@@ -34,47 +36,96 @@ def message(
3436
thread_ts = payload["thread_ts"]
3537
user_id = context.user_id
3638

37-
set_status(
38-
status="thinking...",
39-
loading_messages=[
40-
"Teaching the hamsters to type faster…",
41-
"Untangling the internet cables…",
42-
"Consulting the office goldfish…",
43-
"Polishing up the response just for you…",
44-
"Convincing the AI to stop overthinking…",
45-
],
46-
)
47-
48-
replies = client.conversations_replies(
49-
channel=context.channel_id,
50-
ts=context.thread_ts,
51-
oldest=context.thread_ts,
52-
limit=10,
53-
)
54-
messages_in_thread: List[Dict[str, str]] = []
55-
for message in replies["messages"]:
56-
role = "user" if message.get("bot_id") is None else "assistant"
57-
messages_in_thread.append({"role": role, "content": message["text"]})
58-
59-
returned_message = call_llm(messages_in_thread)
60-
6139
streamer = client.chat_stream(
6240
channel=channel_id,
6341
recipient_team_id=team_id,
6442
recipient_user_id=user_id,
6543
thread_ts=thread_ts,
6644
)
6745

68-
# Loop over OpenAI response stream
69-
# https://platform.openai.com/docs/api-reference/responses/create
70-
for event in returned_message:
71-
if event.type == "response.output_text.delta":
72-
streamer.append(markdown_text=f"{event.delta}")
73-
else:
74-
continue
46+
# This first example shows a generated text response for the provided prompt
47+
if message["text"] != "Wonder a few deep thoughts.":
48+
set_status(
49+
status="thinking...",
50+
loading_messages=[
51+
"Teaching the hamsters to type faster…",
52+
"Untangling the internet cables…",
53+
"Consulting the office goldfish…",
54+
"Polishing up the response just for you…",
55+
"Convincing the AI to stop overthinking…",
56+
],
57+
)
58+
59+
# Loop over OpenAI response stream
60+
# https://platform.openai.com/docs/api-reference/responses/create
61+
for event in call_llm(message["text"]):
62+
if event.type == "response.output_text.delta":
63+
streamer.append(markdown_text=f"{event.delta}")
64+
else:
65+
continue
66+
67+
feedback_block = create_feedback_block()
68+
streamer.stop(
69+
blocks=feedback_block,
70+
)
71+
72+
# The second example shows detailed thinking steps similar to tool calls
73+
else:
74+
streamer.append(
75+
chunks=[
76+
MarkdownTextChunk(
77+
text="Hello.\nI have received the task. ",
78+
),
79+
MarkdownTextChunk(
80+
text="This task appears manageable.\nThat is good.",
81+
),
82+
TaskUpdateChunk(
83+
id="001",
84+
title="Understanding the task...",
85+
status="in_progress",
86+
details="- Indentify the goal\n- Identify constraints\n- Pretending this is obvious",
87+
),
88+
TaskUpdateChunk(
89+
id="002",
90+
title="Performing acrobatics...",
91+
status="pending",
92+
),
93+
],
94+
)
95+
time.sleep(4)
96+
97+
streamer.append(
98+
chunks=[
99+
TaskUpdateChunk(
100+
id="001",
101+
title="Understanding the task...",
102+
status="complete",
103+
details="- Indentied the goal\n- Identied constraints\n- Pretended this was obvious",
104+
output="We'll continue to ramble now",
105+
),
106+
TaskUpdateChunk(
107+
id="002",
108+
title="Performing acrobatics...",
109+
status="in_progress",
110+
details="- Jumping atop ropes\n- Juggling bowling pins\n- Riding a single wheel too",
111+
),
112+
],
113+
)
114+
time.sleep(4)
75115

76-
feedback_block = create_feedback_block()
77-
streamer.stop(blocks=feedback_block)
116+
streamer.stop(
117+
chunks=[
118+
TaskUpdateChunk(
119+
id="002",
120+
title="Performing acrobatics...",
121+
status="complete",
122+
details="- Jumped atop ropes\n- Juggled bowling pins\n- Rode a single wheel too",
123+
),
124+
MarkdownTextChunk(
125+
text="The crowd appears to be astouned and applauds :popcorn:"
126+
),
127+
],
128+
)
78129

79130
except Exception as e:
80131
logger.exception(f"Failed to handle a user message event: {e}")

listeners/events/app_mentioned.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
from slack_sdk import WebClient
55

66
from ai.llm_caller import call_llm
7+
78
from ..views.feedback_block import create_feedback_block
89

910

@@ -38,7 +39,7 @@ def app_mentioned_callback(client: WebClient, event: dict, logger: Logger, say:
3839
],
3940
)
4041

41-
returned_message = call_llm([{"role": "user", "content": text}])
42+
returned_message = call_llm(text)
4243

4344
streamer = client.chat_stream(
4445
channel=channel_id,

0 commit comments

Comments
 (0)