-
Notifications
You must be signed in to change notification settings - Fork 17
feat: showcase text generation and thinking steps from suggested prompts #37
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 2 commits
242297a
ea6c722
4a5de84
ac2816f
6ab8795
e2f6fa2
7ddcac0
d6af288
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,27 +1,17 @@ | ||
| import os | ||
| from typing import Dict, List | ||
|
|
||
| import openai | ||
| from openai import Stream | ||
| from openai.types.responses import ResponseStreamEvent | ||
|
|
||
| DEFAULT_SYSTEM_CONTENT = """ | ||
| You're an assistant in a Slack workspace. | ||
| Users in the workspace will ask you to help them write something or to think better about a specific topic. | ||
| You'll respond to those questions in a professional way. | ||
| When you include markdown text, convert them to Slack compatible ones. | ||
| When a prompt has Slack's special syntax like <@USER_ID> or <#CHANNEL_ID>, you must keep them as-is in your response. | ||
| """ | ||
|
|
||
|
|
||
| def call_llm( | ||
| messages_in_thread: List[Dict[str, str]], | ||
| system_content: str = DEFAULT_SYSTEM_CONTENT, | ||
| prompt: str, | ||
| ) -> Stream[ResponseStreamEvent]: | ||
| openai_client = openai.OpenAI(api_key=os.getenv("OPENAI_API_KEY")) | ||
| messages = [{"role": "system", "content": system_content}] | ||
| messages.extend(messages_in_thread) | ||
| response = openai_client.responses.create( | ||
| model="gpt-4o-mini", input=messages, stream=True | ||
| model="gpt-4o-mini", | ||
| input=prompt, | ||
| stream=True, | ||
| ) | ||
| return response |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,8 +1,9 @@ | ||
| import time | ||
| from logging import Logger | ||
| from typing import Dict, List | ||
|
|
||
| from slack_bolt import BoltContext, Say, SetStatus | ||
| from slack_sdk import WebClient | ||
| from slack_sdk.models.messages.chunk import MarkdownTextChunk, TaskUpdateChunk | ||
|
|
||
| from ai.llm_caller import call_llm | ||
|
|
||
|
|
@@ -13,6 +14,7 @@ def message( | |
| client: WebClient, | ||
| context: BoltContext, | ||
| logger: Logger, | ||
| message: dict, | ||
| payload: dict, | ||
| say: Say, | ||
| set_status: SetStatus, | ||
|
|
@@ -34,47 +36,95 @@ def message( | |
| thread_ts = payload["thread_ts"] | ||
| user_id = context.user_id | ||
|
|
||
| set_status( | ||
| status="thinking...", | ||
| loading_messages=[ | ||
| "Teaching the hamsters to type faster…", | ||
| "Untangling the internet cables…", | ||
| "Consulting the office goldfish…", | ||
| "Polishing up the response just for you…", | ||
| "Convincing the AI to stop overthinking…", | ||
| ], | ||
| ) | ||
|
|
||
| replies = client.conversations_replies( | ||
| channel=context.channel_id, | ||
| ts=context.thread_ts, | ||
| oldest=context.thread_ts, | ||
| limit=10, | ||
| ) | ||
| messages_in_thread: List[Dict[str, str]] = [] | ||
| for message in replies["messages"]: | ||
| role = "user" if message.get("bot_id") is None else "assistant" | ||
| messages_in_thread.append({"role": role, "content": message["text"]}) | ||
|
|
||
| returned_message = call_llm(messages_in_thread) | ||
|
|
||
| streamer = client.chat_stream( | ||
| channel=channel_id, | ||
| recipient_team_id=team_id, | ||
| recipient_user_id=user_id, | ||
| thread_ts=thread_ts, | ||
| ) | ||
|
|
||
| # Loop over OpenAI response stream | ||
| # https://platform.openai.com/docs/api-reference/responses/create | ||
| for event in returned_message: | ||
| if event.type == "response.output_text.delta": | ||
| streamer.append(markdown_text=f"{event.delta}") | ||
| else: | ||
| continue | ||
| # This first example shows a generated text response for the provided prompt | ||
| if message["text"] != "Wonder a few deep thoughts.": | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. suggestion: It took me a few re-reads before I noticed the inverted logic ( At first, I kept wondering why Non-blocker, but could we flip this logic to be |
||
| set_status( | ||
| status="thinking...", | ||
| loading_messages=[ | ||
| "Teaching the hamsters to type faster…", | ||
| "Untangling the internet cables…", | ||
| "Consulting the office goldfish…", | ||
| "Polishing up the response just for you…", | ||
| "Convincing the AI to stop overthinking…", | ||
| ], | ||
| ) | ||
|
|
||
| # Loop over OpenAI response stream | ||
| # https://platform.openai.com/docs/api-reference/responses/create | ||
| for event in call_llm(message["text"]): | ||
| if event.type == "response.output_text.delta": | ||
| streamer.append(markdown_text=f"{event.delta}") | ||
| else: | ||
| continue | ||
|
|
||
| feedback_block = create_feedback_block() | ||
| streamer.stop( | ||
| blocks=feedback_block, | ||
| ) | ||
|
|
||
| # The second example shows detailed thinking steps similar to tool calls | ||
|
zimeg marked this conversation as resolved.
Outdated
|
||
| else: | ||
| streamer.append( | ||
| chunks=[ | ||
| MarkdownTextChunk( | ||
| text="Hello.\nI have received the task. ", | ||
| ), | ||
| MarkdownTextChunk( | ||
| text="This task appears manageable.\nThat is good.", | ||
| ), | ||
| TaskUpdateChunk( | ||
| id="001", | ||
| title="Understanding the task...", | ||
| status="in_progress", | ||
| details="- Identifying the goal\n- Identifying constraints", | ||
| ), | ||
| TaskUpdateChunk( | ||
| id="002", | ||
| title="Performing acrobatics...", | ||
| status="pending", | ||
| ), | ||
| ], | ||
| ) | ||
| time.sleep(4) | ||
|
Comment on lines
+53
to
+74
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. praise: Love this example. It's dead simple and easy to grok. It's ripe to be altered, experimented, and hacked on for folks playing with the sample. 🙌🏻 |
||
|
|
||
| streamer.append( | ||
| chunks=[ | ||
| TaskUpdateChunk( | ||
| id="001", | ||
| title="Understanding the task...", | ||
| status="complete", | ||
| details="\n- Pretending this was obvious", | ||
| output="We'll continue to ramble now", | ||
| ), | ||
| TaskUpdateChunk( | ||
| id="002", | ||
| title="Performing acrobatics...", | ||
| status="in_progress", | ||
| ), | ||
| ], | ||
| ) | ||
| time.sleep(4) | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Nice! I've been experimenting with something similar on my own, I like this idea. Featuring what is the agent doing is great like (tools calls or thinking steps) |
||
|
|
||
| feedback_block = create_feedback_block() | ||
| streamer.stop(blocks=feedback_block) | ||
| streamer.stop( | ||
| chunks=[ | ||
| TaskUpdateChunk( | ||
| id="002", | ||
| title="Performing acrobatics...", | ||
| status="complete", | ||
| details="- Jumped atop ropes\n- Juggled bowling pins\n- Rode a single wheel too", | ||
| ), | ||
| MarkdownTextChunk( | ||
| text="The crowd appears to be astouned and applauds :popcorn:" | ||
|
zimeg marked this conversation as resolved.
Outdated
|
||
| ), | ||
| ], | ||
| ) | ||
|
|
||
| except Exception as e: | ||
| logger.exception(f"Failed to handle a user message event: {e}") | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -4,6 +4,7 @@ | |
| from slack_sdk import WebClient | ||
|
|
||
| from ai.llm_caller import call_llm | ||
|
|
||
| from ..views.feedback_block import create_feedback_block | ||
|
|
||
|
|
||
|
|
@@ -38,7 +39,7 @@ def app_mentioned_callback(client: WebClient, event: dict, logger: Logger, say: | |
| ], | ||
| ) | ||
|
|
||
| returned_message = call_llm([{"role": "user", "content": text}]) | ||
| returned_message = call_llm(text) | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. ⭐ nice |
||
|
|
||
| streamer = client.chat_stream( | ||
| channel=channel_id, | ||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.