Skip to content
18 changes: 4 additions & 14 deletions ai/llm_caller.py
Original file line number Diff line number Diff line change
@@ -1,27 +1,17 @@
import os
from typing import Dict, List

import openai
from openai import Stream
from openai.types.responses import ResponseStreamEvent

DEFAULT_SYSTEM_CONTENT = """
You're an assistant in a Slack workspace.
Users in the workspace will ask you to help them write something or to think better about a specific topic.
You'll respond to those questions in a professional way.
When you include markdown text, convert them to Slack compatible ones.
When a prompt has Slack's special syntax like <@USER_ID> or <#CHANNEL_ID>, you must keep them as-is in your response.
"""


def call_llm(
messages_in_thread: List[Dict[str, str]],
system_content: str = DEFAULT_SYSTEM_CONTENT,
prompt: str,
) -> Stream[ResponseStreamEvent]:
openai_client = openai.OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
messages = [{"role": "system", "content": system_content}]
messages.extend(messages_in_thread)
response = openai_client.responses.create(
model="gpt-4o-mini", input=messages, stream=True
model="gpt-4o-mini",
input=prompt,
stream=True,
)
return response
32 changes: 13 additions & 19 deletions listeners/assistant/assistant_thread_started.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
from logging import Logger
from typing import Dict, List

from slack_bolt import Say, SetSuggestedPrompts

Expand All @@ -18,24 +17,19 @@ def assistant_thread_started(
logger: Logger instance for error tracking
"""
try:
say("How can I help you?")

prompts: List[Dict[str, str]] = [
{
"title": "What does Slack stand for?",
"message": "Slack, a business communication service, was named after an acronym. Can you guess what it stands for?",
},
{
"title": "Write a draft announcement",
"message": "Can you write a draft announcement about a new feature my team just released? It must include how impactful it is.",
},
{
"title": "Suggest names for my Slack app",
"message": "Can you suggest a few names for my Slack app? The app helps my teammates better organize information and plan priorities and action items.",
},
]

set_suggested_prompts(prompts=prompts)
say("What would you like to do today?")
set_suggested_prompts(
prompts=[
{
"title": "Prompt a task with thinking steps",
"message": "Wonder a few deep thoughts.",
},
{
"title": "Generate a release announcement",
"message": "Please write detailed changelog notes for a feature that almost seems to be magic.",
},
]
)
except Exception as e:
logger.exception(f"Failed to handle an assistant_thread_started event: {e}", e)
say(f":warning: Something went wrong! ({e})")
118 changes: 84 additions & 34 deletions listeners/assistant/message.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
import time
from logging import Logger
from typing import Dict, List

from slack_bolt import BoltContext, Say, SetStatus
from slack_sdk import WebClient
from slack_sdk.models.messages.chunk import MarkdownTextChunk, TaskUpdateChunk

from ai.llm_caller import call_llm

Expand All @@ -13,6 +14,7 @@ def message(
client: WebClient,
context: BoltContext,
logger: Logger,
message: dict,
payload: dict,
say: Say,
set_status: SetStatus,
Expand All @@ -34,47 +36,95 @@ def message(
thread_ts = payload["thread_ts"]
user_id = context.user_id

set_status(
status="thinking...",
loading_messages=[
"Teaching the hamsters to type faster…",
"Untangling the internet cables…",
"Consulting the office goldfish…",
"Polishing up the response just for you…",
"Convincing the AI to stop overthinking…",
],
)

replies = client.conversations_replies(
channel=context.channel_id,
ts=context.thread_ts,
oldest=context.thread_ts,
limit=10,
)
messages_in_thread: List[Dict[str, str]] = []
for message in replies["messages"]:
role = "user" if message.get("bot_id") is None else "assistant"
messages_in_thread.append({"role": role, "content": message["text"]})

returned_message = call_llm(messages_in_thread)

streamer = client.chat_stream(
channel=channel_id,
recipient_team_id=team_id,
recipient_user_id=user_id,
thread_ts=thread_ts,
)

# Loop over OpenAI response stream
# https://platform.openai.com/docs/api-reference/responses/create
for event in returned_message:
if event.type == "response.output_text.delta":
streamer.append(markdown_text=f"{event.delta}")
else:
continue
# This first example shows a generated text response for the provided prompt
Comment thread
zimeg marked this conversation as resolved.
Outdated
if message["text"] != "Wonder a few deep thoughts.":
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

suggestion: It took me a few re-reads before I noticed the inverted logic (!=).

At first, I kept wondering why "Wonder a few deep thoughts." would be passed directly to the LLM, while the else statement handled the example.

Non-blocker, but could we flip this logic to be == "Wonder a few deep thoughts.": and have the else handle the general LLM response?

set_status(
status="thinking...",
loading_messages=[
"Teaching the hamsters to type faster…",
"Untangling the internet cables…",
"Consulting the office goldfish…",
"Polishing up the response just for you…",
"Convincing the AI to stop overthinking…",
],
)

# Loop over OpenAI response stream
# https://platform.openai.com/docs/api-reference/responses/create
for event in call_llm(message["text"]):
if event.type == "response.output_text.delta":
streamer.append(markdown_text=f"{event.delta}")
else:
continue

feedback_block = create_feedback_block()
streamer.stop(
blocks=feedback_block,
)

# The second example shows detailed thinking steps similar to tool calls
Comment thread
zimeg marked this conversation as resolved.
Outdated
else:
streamer.append(
chunks=[
MarkdownTextChunk(
text="Hello.\nI have received the task. ",
),
MarkdownTextChunk(
text="This task appears manageable.\nThat is good.",
),
TaskUpdateChunk(
id="001",
title="Understanding the task...",
status="in_progress",
details="- Identifying the goal\n- Identifying constraints",
),
TaskUpdateChunk(
id="002",
title="Performing acrobatics...",
status="pending",
),
],
)
time.sleep(4)
Comment on lines +53 to +74
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

praise: Love this example. It's dead simple and easy to grok. It's ripe to be altered, experimented, and hacked on for folks playing with the sample. 🙌🏻


streamer.append(
chunks=[
TaskUpdateChunk(
id="001",
title="Understanding the task...",
status="complete",
details="\n- Pretending this was obvious",
output="We'll continue to ramble now",
),
TaskUpdateChunk(
id="002",
title="Performing acrobatics...",
status="in_progress",
),
],
)
time.sleep(4)
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nice! I've been experimenting with something similar on my own, I like this idea. Featuring what is the agent doing is great like (tools calls or thinking steps)


feedback_block = create_feedback_block()
streamer.stop(blocks=feedback_block)
streamer.stop(
chunks=[
TaskUpdateChunk(
id="002",
title="Performing acrobatics...",
status="complete",
details="- Jumped atop ropes\n- Juggled bowling pins\n- Rode a single wheel too",
),
MarkdownTextChunk(
text="The crowd appears to be astouned and applauds :popcorn:"
Comment thread
zimeg marked this conversation as resolved.
Outdated
),
],
)

except Exception as e:
logger.exception(f"Failed to handle a user message event: {e}")
Expand Down
3 changes: 2 additions & 1 deletion listeners/events/app_mentioned.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from slack_sdk import WebClient

from ai.llm_caller import call_llm

from ..views.feedback_block import create_feedback_block


Expand Down Expand Up @@ -38,7 +39,7 @@ def app_mentioned_callback(client: WebClient, event: dict, logger: Logger, say:
],
)

returned_message = call_llm([{"role": "user", "content": text}])
returned_message = call_llm(text)
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⭐ nice


streamer = client.chat_stream(
channel=channel_id,
Expand Down