Skip to content
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,9 @@ Configures the new Slack Assistant features, providing a dedicated side panel UI

### `/ai`

The `llm_caller.py` file, which handles OpenAI API integration and message formatting. It includes the `call_llm()` function that sends conversation threads to OpenAI's models.
The `llm_caller.py` file calls the OpenAI API and streams the generated response into a Slack conversation.

The `tools` directory contains app-specific functions for the LLM to call.

## App Distribution / OAuth

Expand Down
112 changes: 93 additions & 19 deletions ai/llm_caller.py
Original file line number Diff line number Diff line change
@@ -1,27 +1,101 @@
import json
import os
from typing import Dict, List

import openai
from openai import Stream
from openai.types.responses import ResponseStreamEvent
from openai.types.responses import ResponseInputParam
from slack_sdk.models.messages.chunk import TaskUpdateChunk
from slack_sdk.web.chat_stream import ChatStream

DEFAULT_SYSTEM_CONTENT = """
You're an assistant in a Slack workspace.
Users in the workspace will ask you to help them write something or to think better about a specific topic.
You'll respond to those questions in a professional way.
When you include markdown text, convert them to Slack compatible ones.
When a prompt has Slack's special syntax like <@USER_ID> or <#CHANNEL_ID>, you must keep them as-is in your response.
"""
from ai.tools.dice import roll_dice, roll_dice_definition


def call_llm(
messages_in_thread: List[Dict[str, str]],
system_content: str = DEFAULT_SYSTEM_CONTENT,
) -> Stream[ResponseStreamEvent]:
openai_client = openai.OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
messages = [{"role": "system", "content": system_content}]
messages.extend(messages_in_thread)
response = openai_client.responses.create(
model="gpt-4o-mini", input=messages, stream=True
streamer: ChatStream,
prompts: ResponseInputParam,
):
"""
Stream an LLM response to prompts with an example dice rolling function

https://docs.slack.dev/tools/python-slack-sdk/web#sending-streaming-messages
https://platform.openai.com/docs/guides/text
https://platform.openai.com/docs/guides/streaming-responses
https://platform.openai.com/docs/guides/function-calling
"""
llm = openai.OpenAI(
api_key=os.getenv("OPENAI_API_KEY"),
)
tool_calls = []
response = llm.responses.create(
model="gpt-4o-mini",
input=prompts,
tools=[
roll_dice_definition,
],
stream=True,
)
return response
for event in response:
# Markdown text from the LLM response is streamed in chat as it arrives
if event.type == "response.output_text.delta":
streamer.append(markdown_text=f"{event.delta}")

# Function calls are saved for later computation and a new task is shown
if event.type == "response.output_item.done":
if event.item.type == "function_call":
tool_calls.append(event.item)
if event.item.name == "roll_dice":
args = json.loads(event.item.arguments)
streamer.append(
chunks=[
TaskUpdateChunk(
id=f"{event.item.call_id}",
title=f"Rolling a {args['count']}d{args['sides']}...",
status="in_progress",
),
],
)

# Tool calls are performed and tasks are marked as completed in Slack
if tool_calls:
for call in tool_calls:
if call.name == "roll_dice":
args = json.loads(call.arguments)
prompts.append(
{
"id": call.id,
"call_id": call.call_id,
"type": "function_call",
"name": "roll_dice",
"arguments": call.arguments,
}
)
result = roll_dice(**args)
prompts.append(
{
"type": "function_call_output",
"call_id": call.call_id,
"output": json.dumps(result),
}
)
if result.get("error") is not None:
streamer.append(
chunks=[
TaskUpdateChunk(
id=f"{call.call_id}",
title=f"{result['error']}",
status="error",
),
],
)
else:
streamer.append(
chunks=[
TaskUpdateChunk(
id=f"{call.call_id}",
title=f"{result['description']}",
status="complete",
),
],
)

# Complete the LLM response after making tool calls
call_llm(streamer, prompts)
Empty file added ai/tools/__init__.py
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

suggestion: I think it's reasonable to rename ai/ to agent/ if we want, but it can also happen in a later PR.

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@mwbrooks Awesome callout! Let's make it in this PR as part of 6ab8795 with the addition of tools 🤖

Empty file.
58 changes: 58 additions & 0 deletions ai/tools/dice.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
import random
import time

from openai.types.responses import FunctionToolParam


def roll_dice(sides: int = 6, count: int = 1) -> dict:
if sides < 2:
return {
"error": "A die must have at least 2 sides",
"rolls": [],
"total": 0,
}

if count < 1:
return {
"error": "Must roll at least 1 die",
"rolls": [],
"total": 0,
}
rolls = [random.randint(1, sides) for _ in range(count)]
total = sum(rolls)

# Add a pause between rolls to demonstrate loading states
time.sleep(2)

return {
"rolls": rolls,
"total": total,
"description": f"Rolled a {count}d{sides} to total {total}",
}


# Tool definition for OpenAI API
#
# https://platform.openai.com/docs/guides/function-calling
roll_dice_definition: FunctionToolParam = {
"type": "function",
"name": "roll_dice",
"description": "Roll one or more dice with a specified number of sides. Use this when the user wants to roll dice or generate random numbers within a range.",
"parameters": {
"type": "object",
"properties": {
"sides": {
"type": "integer",
"description": "The number of sides on the die (e.g., 6 for a standard die, 20 for a d20)",
"default": 6,
},
"count": {
"type": "integer",
"description": "The number of dice to roll",
"default": 1,
},
},
"required": ["sides", "count"],
},
"strict": False,
}
2 changes: 1 addition & 1 deletion app.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
import os

from dotenv import load_dotenv

from slack_bolt import App
from slack_bolt.adapter.socket_mode import SocketModeHandler
from slack_sdk import WebClient
Expand All @@ -22,6 +21,7 @@
token=os.environ.get("SLACK_BOT_TOKEN"),
),
)

# Register Listeners
register_listeners(app)

Expand Down
32 changes: 13 additions & 19 deletions listeners/assistant/assistant_thread_started.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
from logging import Logger
from typing import Dict, List

from slack_bolt import Say, SetSuggestedPrompts

Expand All @@ -18,24 +17,19 @@ def assistant_thread_started(
logger: Logger instance for error tracking
"""
try:
say("How can I help you?")

prompts: List[Dict[str, str]] = [
{
"title": "What does Slack stand for?",
"message": "Slack, a business communication service, was named after an acronym. Can you guess what it stands for?",
},
{
"title": "Write a draft announcement",
"message": "Can you write a draft announcement about a new feature my team just released? It must include how impactful it is.",
},
{
"title": "Suggest names for my Slack app",
"message": "Can you suggest a few names for my Slack app? The app helps my teammates better organize information and plan priorities and action items.",
},
]

set_suggested_prompts(prompts=prompts)
say("What would you like to do today?")
set_suggested_prompts(
prompts=[
{
"title": "Prompt a task with thinking steps",
"message": "Wonder a few deep thoughts.",
},
{
"title": "Roll dice for a random number",
"message": "Roll two 12-sided dice and three 6-sided dice for a psuedo-random score.",
},
]
)
except Exception as e:
logger.exception(f"Failed to handle an assistant_thread_started event: {e}", e)
say(f":warning: Something went wrong! ({e})")
Loading