diff --git a/docker/Dockerfile b/docker/Dockerfile
index 6d48bd87d..55006cf15 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -16,7 +16,7 @@ RUN mkdir -p /root/.praison
# Install Python packages (using latest versions)
RUN pip install --no-cache-dir \
flask \
- "praisonai>=2.2.24" \
+ "praisonai>=2.2.25" \
"praisonai[api]" \
gunicorn \
markdown
diff --git a/docker/Dockerfile.chat b/docker/Dockerfile.chat
index af908a230..4e604d907 100644
--- a/docker/Dockerfile.chat
+++ b/docker/Dockerfile.chat
@@ -16,7 +16,7 @@ RUN mkdir -p /root/.praison
# Install Python packages (using latest versions)
RUN pip install --no-cache-dir \
praisonai_tools \
- "praisonai>=2.2.24" \
+ "praisonai>=2.2.25" \
"praisonai[chat]" \
"embedchain[github,youtube]"
diff --git a/docker/Dockerfile.dev b/docker/Dockerfile.dev
index a08ce7d4c..c34486ae6 100644
--- a/docker/Dockerfile.dev
+++ b/docker/Dockerfile.dev
@@ -20,7 +20,7 @@ RUN mkdir -p /root/.praison
# Install Python packages (using latest versions)
RUN pip install --no-cache-dir \
praisonai_tools \
- "praisonai>=2.2.24" \
+ "praisonai>=2.2.25" \
"praisonai[ui]" \
"praisonai[chat]" \
"praisonai[realtime]" \
diff --git a/docker/Dockerfile.ui b/docker/Dockerfile.ui
index 00874a219..b36e047a6 100644
--- a/docker/Dockerfile.ui
+++ b/docker/Dockerfile.ui
@@ -16,7 +16,7 @@ RUN mkdir -p /root/.praison
# Install Python packages (using latest versions)
RUN pip install --no-cache-dir \
praisonai_tools \
- "praisonai>=2.2.24" \
+ "praisonai>=2.2.25" \
"praisonai[ui]" \
"praisonai[crewai]"
diff --git a/docker/README.md b/docker/README.md
index 0cfe033ee..bd6f62d36 100644
--- a/docker/README.md
+++ b/docker/README.md
@@ -121,7 +121,7 @@ healthcheck:
## š¦ Package Versions
All Docker images use consistent, up-to-date versions:
-- PraisonAI: `>=2.2.24`
+- PraisonAI: `>=2.2.25`
- PraisonAI Agents: `>=0.0.92`
- Python: `3.11-slim`
@@ -218,7 +218,7 @@ docker-compose up -d
### Version Pinning
To use specific versions, update the Dockerfile:
```dockerfile
-RUN pip install "praisonai==2.2.24" "praisonaiagents==0.0.92"
+RUN pip install "praisonai==2.2.25" "praisonaiagents==0.0.92"
```
## š Production Deployment
diff --git a/docs/api/praisonai/deploy.html b/docs/api/praisonai/deploy.html
index 6219474c8..e10be8698 100644
--- a/docs/api/praisonai/deploy.html
+++ b/docs/api/praisonai/deploy.html
@@ -110,7 +110,7 @@
Raises
file.write("FROM python:3.11-slim\n")
file.write("WORKDIR /app\n")
file.write("COPY . .\n")
- file.write("RUN pip install flask praisonai==2.2.24 gunicorn markdown\n")
+ file.write("RUN pip install flask praisonai==2.2.25 gunicorn markdown\n")
file.write("EXPOSE 8080\n")
file.write('CMD ["gunicorn", "-b", "0.0.0.0:8080", "api:app"]\n')
diff --git a/docs/developers/local-development.mdx b/docs/developers/local-development.mdx
index ad9fb5c64..20169e950 100644
--- a/docs/developers/local-development.mdx
+++ b/docs/developers/local-development.mdx
@@ -27,7 +27,7 @@ WORKDIR /app
COPY . .
-RUN pip install flask praisonai==2.2.24 watchdog
+RUN pip install flask praisonai==2.2.25 watchdog
EXPOSE 5555
diff --git a/docs/ui/chat.mdx b/docs/ui/chat.mdx
index f12e854f0..996b6bde6 100644
--- a/docs/ui/chat.mdx
+++ b/docs/ui/chat.mdx
@@ -155,7 +155,7 @@ To facilitate local development with live reload, you can use Docker. Follow the
COPY . .
- RUN pip install flask praisonai==2.2.24 watchdog
+ RUN pip install flask praisonai==2.2.25 watchdog
EXPOSE 5555
diff --git a/docs/ui/code.mdx b/docs/ui/code.mdx
index 3d373ba49..61ae4004b 100644
--- a/docs/ui/code.mdx
+++ b/docs/ui/code.mdx
@@ -208,7 +208,7 @@ To facilitate local development with live reload, you can use Docker. Follow the
COPY . .
- RUN pip install flask praisonai==2.2.24 watchdog
+ RUN pip install flask praisonai==2.2.25 watchdog
EXPOSE 5555
diff --git a/src/praisonai-agents/pyproject.toml b/src/praisonai-agents/pyproject.toml
index e68c6d145..1cecce340 100644
--- a/src/praisonai-agents/pyproject.toml
+++ b/src/praisonai-agents/pyproject.toml
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
[project]
name = "praisonaiagents"
-version = "0.0.95"
+version = "0.0.96"
description = "Praison AI agents for completing complex tasks with Self Reflection Agents"
requires-python = ">=3.10"
authors = [
diff --git a/src/praisonai-agents/uv.lock b/src/praisonai-agents/uv.lock
index 57caad367..732692a7f 100644
--- a/src/praisonai-agents/uv.lock
+++ b/src/praisonai-agents/uv.lock
@@ -2233,7 +2233,7 @@ wheels = [
[[package]]
name = "praisonaiagents"
-version = "0.0.95"
+version = "0.0.96"
source = { editable = "." }
dependencies = [
{ name = "mcp" },
diff --git a/src/praisonai/praisonai.rb b/src/praisonai/praisonai.rb
index a5c5f9273..04fce8802 100644
--- a/src/praisonai/praisonai.rb
+++ b/src/praisonai/praisonai.rb
@@ -3,8 +3,8 @@ class Praisonai < Formula
desc "AI tools for various AI applications"
homepage "https://github.com/MervinPraison/PraisonAI"
- url "https://github.com/MervinPraison/PraisonAI/archive/refs/tags/v2.2.24.tar.gz"
- sha256 `curl -sL https://github.com/MervinPraison/PraisonAI/archive/refs/tags/v2.2.24.tar.gz | shasum -a 256`.split.first
+ url "https://github.com/MervinPraison/PraisonAI/archive/refs/tags/v2.2.25.tar.gz"
+ sha256 `curl -sL https://github.com/MervinPraison/PraisonAI/archive/refs/tags/v2.2.25.tar.gz | shasum -a 256`.split.first
license "MIT"
depends_on "python@3.11"
diff --git a/src/praisonai/praisonai/deploy.py b/src/praisonai/praisonai/deploy.py
index 7dc53ed1d..81d842fda 100644
--- a/src/praisonai/praisonai/deploy.py
+++ b/src/praisonai/praisonai/deploy.py
@@ -56,7 +56,7 @@ def create_dockerfile(self):
file.write("FROM python:3.11-slim\n")
file.write("WORKDIR /app\n")
file.write("COPY . .\n")
- file.write("RUN pip install flask praisonai==2.2.24 gunicorn markdown\n")
+ file.write("RUN pip install flask praisonai==2.2.25 gunicorn markdown\n")
file.write("EXPOSE 8080\n")
file.write('CMD ["gunicorn", "-b", "0.0.0.0:8080", "api:app"]\n')
diff --git a/src/praisonai/praisonai/ui/chat.py b/src/praisonai/praisonai/ui/chat.py
index fc5ebd77a..539e15615 100644
--- a/src/praisonai/praisonai/ui/chat.py
+++ b/src/praisonai/praisonai/ui/chat.py
@@ -12,7 +12,7 @@
from dotenv import load_dotenv
from PIL import Image
from tavily import TavilyClient
-from crawl4ai import AsyncAsyncWebCrawler
+from crawl4ai import AsyncWebCrawler
# Local application/library imports
import chainlit as cl
diff --git a/src/praisonai/praisonai/ui/code.py b/src/praisonai/praisonai/ui/code.py
index 402ca91b0..0a6fc5656 100644
--- a/src/praisonai/praisonai/ui/code.py
+++ b/src/praisonai/praisonai/ui/code.py
@@ -12,7 +12,7 @@
from PIL import Image
from context import ContextGatherer
from tavily import TavilyClient
-from crawl4ai import AsyncAsyncWebCrawler
+from crawl4ai import AsyncWebCrawler
# Local application/library imports
import chainlit as cl
diff --git a/src/praisonai/praisonai/ui/realtime.py b/src/praisonai/praisonai/ui/realtime.py
index 38d740aa4..83503f8e5 100644
--- a/src/praisonai/praisonai/ui/realtime.py
+++ b/src/praisonai/praisonai/ui/realtime.py
@@ -229,7 +229,7 @@ def import_tools_from_file(file_path):
@cl.on_chat_start
async def start():
initialize_db()
- model_name = load_setting("model_name") or os.getenv("MODEL_NAME", "gpt-4o-mini-realtime-preview")
+ model_name = os.getenv("MODEL_NAME", "gpt-4o-mini-realtime-preview-2024-12-17")
cl.user_session.set("model_name", model_name)
cl.user_session.set("message_history", []) # Initialize message history
logger.debug(f"Model name: {model_name}")
@@ -238,7 +238,7 @@ async def start():
# TextInput(
# id="model_name",
# label="Enter the Model Name",
- # placeholder="e.g., gpt-4o-mini-realtime-preview",
+ # placeholder="e.g., gpt-4o-mini-realtime-preview-2024-12-17",
# initial=model_name
# )
# ]
@@ -382,7 +382,8 @@ async def on_audio_start():
openai_realtime = cl.user_session.get("openai_realtime")
if not openai_realtime.is_connected():
- await openai_realtime.connect()
+ model_name = cl.user_session.get("model_name", "gpt-4o-mini-realtime-preview-2024-12-17")
+ await openai_realtime.connect(model_name)
logger.info("Connected to OpenAI realtime")
return True
@@ -394,11 +395,22 @@ async def on_audio_start():
@cl.on_audio_chunk
async def on_audio_chunk(chunk: cl.InputAudioChunk):
- openai_realtime: RealtimeClient = cl.user_session.get("openai_realtime")
+ openai_realtime: RealtimeClient = cl.user_session.get("openai_realtime")
+
+ if not openai_realtime:
+ logger.debug("No realtime client available")
+ return
+
if openai_realtime.is_connected():
- await openai_realtime.append_input_audio(chunk.data)
+ try:
+ success = await openai_realtime.append_input_audio(chunk.data)
+ if not success:
+ logger.debug("Failed to append audio data - connection may be lost")
+ except Exception as e:
+ logger.debug(f"Error processing audio chunk: {e}")
+ # Optionally try to reconnect here if needed
else:
- logger.info("RealtimeClient is not connected")
+ logger.debug("RealtimeClient is not connected - audio chunk ignored")
@cl.on_audio_end
@cl.on_chat_end
@@ -423,14 +435,14 @@ def auth_callback(username: str, password: str):
@cl.on_chat_resume
async def on_chat_resume(thread: ThreadDict):
logger.info(f"Resuming chat: {thread['id']}")
- model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-4o-mini-realtime-preview"
+ model_name = os.getenv("MODEL_NAME") or "gpt-4o-mini-realtime-preview-2024-12-17"
logger.debug(f"Model name: {model_name}")
settings = cl.ChatSettings(
[
TextInput(
id="model_name",
label="Enter the Model Name",
- placeholder="e.g., gpt-4o-mini-realtime-preview",
+ placeholder="e.g., gpt-4o-mini-realtime-preview-2024-12-17",
initial=model_name
)
]
diff --git a/src/praisonai/praisonai/ui/realtimeclient/__init__.py b/src/praisonai/praisonai/ui/realtimeclient/__init__.py
index e6931c310..b12027f4c 100644
--- a/src/praisonai/praisonai/ui/realtimeclient/__init__.py
+++ b/src/praisonai/praisonai/ui/realtimeclient/__init__.py
@@ -6,6 +6,7 @@
import numpy as np
import json
import websockets
+from websockets.exceptions import ConnectionClosed
from datetime import datetime
from collections import defaultdict
import base64
@@ -97,29 +98,66 @@ def __init__(self, url=None, api_key=None):
self.ws = None
def is_connected(self):
- return self.ws is not None
+ if self.ws is None:
+ return False
+ # Some websockets versions don't have a closed attribute
+ try:
+ return not self.ws.closed
+ except AttributeError:
+ # Fallback: check if websocket is still alive by checking state
+ try:
+ return hasattr(self.ws, 'state') and self.ws.state.name == 'OPEN'
+ except:
+ # Last fallback: assume connected if ws exists
+ return True
def log(self, *args):
logger.debug(f"[Websocket/{datetime.utcnow().isoformat()}]", *args)
- async def connect(self, model='gpt-4o-realtime-preview-2024-10-01'):
+ async def connect(self, model='gpt-4o-mini-realtime-preview-2024-12-17'):
if self.is_connected():
raise Exception("Already connected")
- self.ws = await websockets.connect(f"{self.url}?model={model}", extra_headers={
+
+ headers = {
'Authorization': f'Bearer {self.api_key}',
'OpenAI-Beta': 'realtime=v1'
- })
+ }
+
+ # Try different header parameter names for compatibility
+ try:
+ self.ws = await websockets.connect(f"{self.url}?model={model}", additional_headers=headers)
+ except TypeError:
+ # Fallback to older websockets versions
+ try:
+ self.ws = await websockets.connect(f"{self.url}?model={model}", extra_headers=headers)
+ except TypeError:
+ # Last fallback - some versions might not support headers parameter
+ raise Exception("Websockets library version incompatible. Please update websockets to version 11.0 or higher.")
+
self.log(f"Connected to {self.url}")
asyncio.create_task(self._receive_messages())
async def _receive_messages(self):
- async for message in self.ws:
- event = json.loads(message)
- if event['type'] == "error":
- logger.error("ERROR", event)
- self.log("received:", event)
- self.dispatch(f"server.{event['type']}", event)
- self.dispatch("server.*", event)
+ try:
+ async for message in self.ws:
+ event = json.loads(message)
+ if event['type'] == "error":
+ logger.error(f"OpenAI Realtime API Error: {event}")
+ self.log("received:", event)
+ self.dispatch(f"server.{event['type']}", event)
+ self.dispatch("server.*", event)
+ except ConnectionClosed as e:
+ logger.info(f"WebSocket connection closed normally: {e}")
+ # Mark connection as closed
+ self.ws = None
+ # Dispatch disconnection event
+ self.dispatch("disconnected", {"reason": str(e)})
+ except Exception as e:
+ logger.warning(f"WebSocket receive loop ended: {e}")
+ # Mark connection as closed
+ self.ws = None
+ # Dispatch disconnection event
+ self.dispatch("disconnected", {"reason": str(e)})
async def send(self, event_name, data=None):
if not self.is_connected():
@@ -135,16 +173,33 @@ async def send(self, event_name, data=None):
self.dispatch(f"client.{event_name}", event)
self.dispatch("client.*", event)
self.log("sent:", event)
- await self.ws.send(json.dumps(event))
+
+ try:
+ await self.ws.send(json.dumps(event))
+ except ConnectionClosed as e:
+ logger.info(f"WebSocket connection closed during send: {e}")
+ # Mark connection as closed if send fails
+ self.ws = None
+ raise Exception(f"WebSocket connection lost: {e}")
+ except Exception as e:
+ logger.error(f"Failed to send WebSocket message: {e}")
+ # Mark connection as closed if send fails
+ self.ws = None
+ raise Exception(f"WebSocket connection lost: {e}")
def _generate_id(self, prefix):
return f"{prefix}{int(datetime.utcnow().timestamp() * 1000)}"
async def disconnect(self):
if self.ws:
- await self.ws.close()
- self.ws = None
- self.log(f"Disconnected from {self.url}")
+ try:
+ await self.ws.close()
+ logger.info(f"Disconnected from {self.url}")
+ except Exception as e:
+ logger.warning(f"Error during WebSocket close: {e}")
+ finally:
+ self.ws = None
+ self.log(f"WebSocket connection cleaned up")
class RealtimeConversation:
default_frequency = config.features.audio.sample_rate
@@ -341,8 +396,7 @@ def _process_audio_delta(self, event):
return None, None
array_buffer = base64_to_array_buffer(delta)
append_values = array_buffer.tobytes()
- # TODO: make it work
- # item['formatted']['audio'] = merge_int16_arrays(item['formatted']['audio'], append_values)
+ item['formatted']['audio'].append(append_values)
return item, {'audio': append_values}
def _process_text_delta(self, event):
@@ -381,7 +435,6 @@ def __init__(self, url=None, api_key=None):
"tools": [],
"tool_choice": "auto",
"temperature": 0.8,
- "max_response_output_tokens": 4096,
}
self.session_config = {}
self.transcription_models = [{"model": "whisper-1"}]
@@ -431,8 +484,13 @@ def _log_event(self, event):
self.dispatch("realtime.event", realtime_event)
def _on_session_created(self, event):
- print(f"Session created: {event}")
- logger.debug(f"Session created: {event}")
+ try:
+ session_id = event.get('session', {}).get('id', 'unknown')
+ model = event.get('session', {}).get('model', 'unknown')
+ logger.info(f"OpenAI Realtime session created - ID: {session_id}, Model: {model}")
+ except Exception as e:
+ logger.warning(f"Error processing session created event: {e}")
+ logger.debug(f"Session event details: {event}")
self.session_created = True
def _process_event(self, event, *args):
@@ -497,10 +555,15 @@ def reset(self):
self._add_api_event_handlers()
return True
- async def connect(self):
+ async def connect(self, model=None):
if self.is_connected():
raise Exception("Already connected, use .disconnect() first")
- await self.realtime.connect()
+
+ # Use provided model or default
+ if model is None:
+ model = 'gpt-4o-mini-realtime-preview-2024-12-17'
+
+ await self.realtime.connect(model)
await self.update_session()
return True
@@ -516,6 +579,7 @@ async def disconnect(self):
self.conversation.clear()
if self.realtime.is_connected():
await self.realtime.disconnect()
+ logger.info("RealtimeClient disconnected")
def get_turn_detection_type(self):
return self.session_config.get("turn_detection", {}).get("type")
@@ -579,11 +643,22 @@ async def send_user_message_content(self, content=[]):
return True
async def append_input_audio(self, array_buffer):
+ if not self.is_connected():
+ logger.warning("Cannot append audio: RealtimeClient is not connected")
+ return False
+
if len(array_buffer) > 0:
- await self.realtime.send("input_audio_buffer.append", {
- "audio": array_buffer_to_base64(np.array(array_buffer)),
- })
- self.input_audio_buffer.extend(array_buffer)
+ try:
+ await self.realtime.send("input_audio_buffer.append", {
+ "audio": array_buffer_to_base64(np.array(array_buffer)),
+ })
+ self.input_audio_buffer.extend(array_buffer)
+ except Exception as e:
+ logger.error(f"Failed to append input audio: {e}")
+ # Connection might be lost, mark as disconnected
+ if "connection" in str(e).lower() or "closed" in str(e).lower():
+ logger.warning("WebSocket connection appears to be lost. Audio input will be queued until reconnection.")
+ return False
return True
async def create_response(self):
@@ -650,4 +725,17 @@ async def _send_chainlit_message(self, item):
logger.debug(f"Unhandled item type:\n{json.dumps(item, indent=2)}")
# Additional debug logging
- logger.debug(f"Processed Chainlit message for item: {item.get('id', 'unknown')}")
\ No newline at end of file
+ logger.debug(f"Processed Chainlit message for item: {item.get('id', 'unknown')}")
+
+ async def ensure_connected(self):
+ """Check connection health and attempt reconnection if needed"""
+ if not self.is_connected():
+ try:
+ logger.info("Attempting to reconnect to OpenAI Realtime API...")
+ model = 'gpt-4o-mini-realtime-preview-2024-12-17'
+ await self.connect(model)
+ return True
+ except Exception as e:
+ logger.error(f"Failed to reconnect: {e}")
+ return False
+ return True
\ No newline at end of file
diff --git a/src/praisonai/praisonai/ui/realtimeclient/realtimedocs.txt b/src/praisonai/praisonai/ui/realtimeclient/realtimedocs.txt
deleted file mode 100644
index 982a6914b..000000000
--- a/src/praisonai/praisonai/ui/realtimeclient/realtimedocs.txt
+++ /dev/null
@@ -1,1484 +0,0 @@
-Client events
-Beta
-These are events that the OpenAI Realtime WebSocket server will accept from the client.
-
-Learn more about the Realtime API.
-
-session.update
-Beta
-Send this event to update the sessionās default configuration.
-
-event_id
-string
-
-Optional client-generated ID used to identify this event.
-
-type
-string
-
-The event type, must be "session.update".
-
-session
-object
-
-Session configuration to update.
-
-
-Show properties
-session.update
-{
- "event_id": "event_123",
- "type": "session.update",
- "session": {
- "modalities": ["text", "audio"],
- "instructions": "Your knowledge cutoff is 2023-10. You are a helpful assistant.",
- "voice": "alloy",
- "input_audio_format": "pcm16",
- "output_audio_format": "pcm16",
- "input_audio_transcription": {
- "enabled": true,
- "model": "whisper-1"
- },
- "turn_detection": {
- "type": "server_vad",
- "threshold": 0.5,
- "prefix_padding_ms": 300,
- "silence_duration_ms": 200
- },
- "tools": [
- {
- "type": "function",
- "name": "get_weather",
- "description": "Get the current weather for a location.",
- "parameters": {
- "type": "object",
- "properties": {
- "location": { "type": "string" }
- },
- "required": ["location"]
- }
- }
- ],
- "tool_choice": "auto",
- "temperature": 0.8,
- "max_output_tokens": null
- }
-}
-input_audio_buffer.append
-Beta
-Send this event to append audio bytes to the input audio buffer.
-
-event_id
-string
-
-Optional client-generated ID used to identify this event.
-
-type
-string
-
-The event type, must be "input_audio_buffer.append".
-
-audio
-string
-
-Base64-encoded audio bytes.
-
-input_audio_buffer.append
-{
- "event_id": "event_456",
- "type": "input_audio_buffer.append",
- "audio": "Base64EncodedAudioData"
-}
-input_audio_buffer.commit
-Beta
-Send this event to commit audio bytes to a user message.
-
-event_id
-string
-
-Optional client-generated ID used to identify this event.
-
-type
-string
-
-The event type, must be "input_audio_buffer.commit".
-
-input_audio_buffer.commit
-{
- "event_id": "event_789",
- "type": "input_audio_buffer.commit"
-}
-input_audio_buffer.clear
-Beta
-Send this event to clear the audio bytes in the buffer.
-
-event_id
-string
-
-Optional client-generated ID used to identify this event.
-
-type
-string
-
-The event type, must be "input_audio_buffer.clear".
-
-input_audio_buffer.clear
-{
- "event_id": "event_012",
- "type": "input_audio_buffer.clear"
-}
-conversation.item.create
-Beta
-Send this event when adding an item to the conversation.
-
-event_id
-string
-
-Optional client-generated ID used to identify this event.
-
-type
-string
-
-The event type, must be "conversation.item.create".
-
-previous_item_id
-string
-
-The ID of the preceding item after which the new item will be inserted.
-
-item
-object
-
-The item to add to the conversation.
-
-
-Show properties
-conversation.item.create
-{
- "event_id": "event_345",
- "type": "conversation.item.create",
- "previous_item_id": null,
- "item": {
- "id": "msg_001",
- "type": "message",
- "status": "completed",
- "role": "user",
- "content": [
- {
- "type": "input_text",
- "text": "Hello, how are you?"
- }
- ]
- }
-}
-conversation.item.truncate
-Beta
-Send this event when you want to truncate a previous assistant messageās audio.
-
-event_id
-string
-
-Optional client-generated ID used to identify this event.
-
-type
-string
-
-The event type, must be "conversation.item.truncate".
-
-item_id
-string
-
-The ID of the assistant message item to truncate.
-
-content_index
-integer
-
-The index of the content part to truncate.
-
-audio_end_ms
-integer
-
-Inclusive duration up to which audio is truncated, in milliseconds.
-
-conversation.item.truncate
-{
- "event_id": "event_678",
- "type": "conversation.item.truncate",
- "item_id": "msg_002",
- "content_index": 0,
- "audio_end_ms": 1500
-}
-conversation.item.delete
-Beta
-Send this event when you want to remove any item from the conversation history.
-
-event_id
-string
-
-Optional client-generated ID used to identify this event.
-
-type
-string
-
-The event type, must be "conversation.item.delete".
-
-item_id
-string
-
-The ID of the item to delete.
-
-conversation.item.delete
-{
- "event_id": "event_901",
- "type": "conversation.item.delete",
- "item_id": "msg_003"
-}
-response.create
-Beta
-Send this event to trigger a response generation.
-
-event_id
-string
-
-Optional client-generated ID used to identify this event.
-
-type
-string
-
-The event type, must be "response.create".
-
-response
-object
-
-Configuration for the response.
-
-
-Show properties
-response.create
-{
- "event_id": "event_234",
- "type": "response.create",
- "response": {
- "modalities": ["text", "audio"],
- "instructions": "Please assist the user.",
- "voice": "alloy",
- "output_audio_format": "pcm16",
- "tools": [
- {
- "type": "function",
- "name": "calculate_sum",
- "description": "Calculates the sum of two numbers.",
- "parameters": {
- "type": "object",
- "properties": {
- "a": { "type": "number" },
- "b": { "type": "number" }
- },
- "required": ["a", "b"]
- }
- }
- ],
- "tool_choice": "auto",
- "temperature": 0.7,
- "max_output_tokens": 150
- }
-}
-response.cancel
-Beta
-Send this event to cancel an in-progress response.
-
-event_id
-string
-
-Optional client-generated ID used to identify this event.
-
-type
-string
-
-The event type, must be "response.cancel".
-
-response.cancel
-{
- "event_id": "event_567",
- "type": "response.cancel"
-}
-Server events
-Beta
-These are events emitted from the OpenAI Realtime WebSocket server to the client.
-
-Learn more about the Realtime API.
-
-error
-Beta
-Returned when an error occurs.
-
-event_id
-string
-
-The unique ID of the server event.
-
-type
-string
-
-The event type, must be "error".
-
-error
-object
-
-Details of the error.
-
-
-Show properties
-error
-{
- "event_id": "event_890",
- "type": "error",
- "error": {
- "type": "invalid_request_error",
- "code": "invalid_event",
- "message": "The 'type' field is missing.",
- "param": null,
- "event_id": "event_567"
- }
-}
-session.created
-Beta
-Returned when a session is created. Emitted automatically when a new connection is established.
-
-event_id
-string
-
-The unique ID of the server event.
-
-type
-string
-
-The event type, must be "session.created".
-
-session
-object
-
-The session resource.
-
-
-Show properties
-session.created
-{
- "event_id": "event_1234",
- "type": "session.created",
- "session": {
- "id": "sess_001",
- "object": "realtime.session",
- "model": "gpt-4o-realtime-preview-2024-10-01",
- "modalities": ["text", "audio"],
- "instructions": "",
- "voice": "alloy",
- "input_audio_format": "pcm16",
- "output_audio_format": "pcm16",
- "input_audio_transcription": null,
- "turn_detection": {
- "type": "server_vad",
- "threshold": 0.5,
- "prefix_padding_ms": 300,
- "silence_duration_ms": 200
- },
- "tools": [],
- "tool_choice": "auto",
- "temperature": 0.8,
- "max_output_tokens": null
- }
-}
-session.updated
-Beta
-Returned when a session is updated.
-
-event_id
-string
-
-The unique ID of the server event.
-
-type
-string
-
-The event type, must be "session.updated".
-
-session
-object
-
-The updated session resource.
-
-
-Show properties
-session.updated
-{
- "event_id": "event_5678",
- "type": "session.updated",
- "session": {
- "id": "sess_001",
- "object": "realtime.session",
- "model": "gpt-4o-realtime-preview-2024-10-01",
- "modalities": ["text"],
- "instructions": "New instructions",
- "voice": "alloy",
- "input_audio_format": "pcm16",
- "output_audio_format": "pcm16",
- "input_audio_transcription": {
- "enabled": true,
- "model": "whisper-1"
- },
- "turn_detection": {
- "type": "none"
- },
- "tools": [],
- "tool_choice": "none",
- "temperature": 0.7,
- "max_output_tokens": 200
- }
-}
-conversation.created
-Beta
-Returned when a conversation is created. Emitted right after session creation.
-
-event_id
-string
-
-The unique ID of the server event.
-
-type
-string
-
-The event type, must be "conversation.created".
-
-conversation
-object
-
-The conversation resource.
-
-
-Show properties
-conversation.created
-{
- "event_id": "event_9101",
- "type": "conversation.created",
- "conversation": {
- "id": "conv_001",
- "object": "realtime.conversation"
- }
-}
-input_audio_buffer.committed
-Beta
-Returned when an input audio buffer is committed, either by the client or automatically in server VAD mode.
-
-event_id
-string
-
-The unique ID of the server event.
-
-type
-string
-
-The event type, must be "input_audio_buffer.committed".
-
-previous_item_id
-string
-
-The ID of the preceding item after which the new item will be inserted.
-
-item_id
-string
-
-The ID of the user message item that will be created.
-
-input_audio_buffer.committed
-{
- "event_id": "event_1121",
- "type": "input_audio_buffer.committed",
- "previous_item_id": "msg_001",
- "item_id": "msg_002"
-}
-input_audio_buffer.cleared
-Beta
-Returned when the input audio buffer is cleared by the client.
-
-event_id
-string
-
-The unique ID of the server event.
-
-type
-string
-
-The event type, must be "input_audio_buffer.cleared".
-
-input_audio_buffer.cleared
-{
- "event_id": "event_1314",
- "type": "input_audio_buffer.cleared"
-}
-input_audio_buffer.speech_started
-Beta
-Returned in server turn detection mode when speech is detected.
-
-event_id
-string
-
-The unique ID of the server event.
-
-type
-string
-
-The event type, must be "input_audio_buffer.speech_started".
-
-audio_start_ms
-integer
-
-Milliseconds since the session started when speech was detected.
-
-item_id
-string
-
-The ID of the user message item that will be created when speech stops.
-
-input_audio_buffer.speech_started
-{
- "event_id": "event_1516",
- "type": "input_audio_buffer.speech_started",
- "audio_start_ms": 1000,
- "item_id": "msg_003"
-}
-input_audio_buffer.speech_stopped
-Beta
-Returned in server turn detection mode when speech stops.
-
-event_id
-string
-
-The unique ID of the server event.
-
-type
-string
-
-The event type, must be "input_audio_buffer.speech_stopped".
-
-audio_end_ms
-integer
-
-Milliseconds since the session started when speech stopped.
-
-item_id
-string
-
-The ID of the user message item that will be created.
-
-input_audio_buffer.speech_stopped
-{
- "event_id": "event_1718",
- "type": "input_audio_buffer.speech_stopped",
- "audio_end_ms": 2000,
- "item_id": "msg_003"
-}
-conversation.item.created
-Beta
-Returned when a conversation item is created.
-
-event_id
-string
-
-The unique ID of the server event.
-
-type
-string
-
-The event type, must be "conversation.item.created".
-
-previous_item_id
-string
-
-The ID of the preceding item.
-
-item
-object
-
-The item that was created.
-
-
-Show properties
-conversation.item.created
-{
- "event_id": "event_1920",
- "type": "conversation.item.created",
- "previous_item_id": "msg_002",
- "item": {
- "id": "msg_003",
- "object": "realtime.item",
- "type": "message",
- "status": "completed",
- "role": "user",
- "content": [
- {
- "type": "input_audio",
- "transcript": null
- }
- ]
- }
-}
-conversation.item.input_audio_transcription.completed
-Beta
-Returned when input audio transcription is enabled and a transcription succeeds.
-
-event_id
-string
-
-The unique ID of the server event.
-
-type
-string
-
-The event type, must be "conversation.item.input_audio_transcription.completed".
-
-item_id
-string
-
-The ID of the user message item.
-
-content_index
-integer
-
-The index of the content part containing the audio.
-
-transcript
-string
-
-The transcribed text.
-
-conversation.item.input_audio_transcription.completed
-{
- "event_id": "event_2122",
- "type": "conversation.item.input_audio_transcription.completed",
- "item_id": "msg_003",
- "content_index": 0,
- "transcript": "Hello, how are you?"
-}
-conversation.item.input_audio_transcription.failed
-Beta
-Returned when input audio transcription is configured, and a transcription request for a user message failed.
-
-event_id
-string
-
-The unique ID of the server event.
-
-type
-string
-
-The event type, must be "conversation.item.input_audio_transcription.failed".
-
-item_id
-string
-
-The ID of the user message item.
-
-content_index
-integer
-
-The index of the content part containing the audio.
-
-error
-object
-
-Details of the transcription error.
-
-
-Show properties
-conversation.item.input_audio_transcription.failed
-{
- "event_id": "event_2324",
- "type": "conversation.item.input_audio_transcription.failed",
- "item_id": "msg_003",
- "content_index": 0,
- "error": {
- "type": "transcription_error",
- "code": "audio_unintelligible",
- "message": "The audio could not be transcribed.",
- "param": null
- }
-}
-conversation.item.truncated
-Beta
-Returned when an earlier assistant audio message item is truncated by the client.
-
-event_id
-string
-
-The unique ID of the server event.
-
-type
-string
-
-The event type, must be "conversation.item.truncated".
-
-item_id
-string
-
-The ID of the assistant message item that was truncated.
-
-content_index
-integer
-
-The index of the content part that was truncated.
-
-audio_end_ms
-integer
-
-The duration up to which the audio was truncated, in milliseconds.
-
-conversation.item.truncated
-{
- "event_id": "event_2526",
- "type": "conversation.item.truncated",
- "item_id": "msg_004",
- "content_index": 0,
- "audio_end_ms": 1500
-}
-conversation.item.deleted
-Beta
-Returned when an item in the conversation is deleted.
-
-event_id
-string
-
-The unique ID of the server event.
-
-type
-string
-
-The event type, must be "conversation.item.deleted".
-
-item_id
-string
-
-The ID of the item that was deleted.
-
-conversation.item.deleted
-{
- "event_id": "event_2728",
- "type": "conversation.item.deleted",
- "item_id": "msg_005"
-}
-response.created
-Beta
-Returned when a new Response is created. The first event of response creation, where the response is in an initial state of "in_progress".
-
-event_id
-string
-
-The unique ID of the server event.
-
-type
-string
-
-The event type, must be "response.created".
-
-response
-object
-
-The response resource.
-
-
-Show properties
-response.created
-{
- "event_id": "event_2930",
- "type": "response.created",
- "response": {
- "id": "resp_001",
- "object": "realtime.response",
- "status": "in_progress",
- "status_details": null,
- "output": [],
- "usage": null
- }
-}
-response.done
-Beta
-Returned when a Response is done streaming. Always emitted, no matter the final state.
-
-event_id
-string
-
-The unique ID of the server event.
-
-type
-string
-
-The event type, must be "response.done".
-
-response
-object
-
-The response resource.
-
-
-Show properties
-response.done
-{
- "event_id": "event_3132",
- "type": "response.done",
- "response": {
- "id": "resp_001",
- "object": "realtime.response",
- "status": "completed",
- "status_details": null,
- "output": [
- {
- "id": "msg_006",
- "object": "realtime.item",
- "type": "message",
- "status": "completed",
- "role": "assistant",
- "content": [
- {
- "type": "text",
- "text": "Sure, how can I assist you today?"
- }
- ]
- }
- ],
- "usage": {
- "total_tokens": 50,
- "input_tokens": 20,
- "output_tokens": 30
- }
- }
-}
-response.output_item.added
-Beta
-Returned when a new Item is created during response generation.
-
-event_id
-string
-
-The unique ID of the server event.
-
-type
-string
-
-The event type, must be "response.output_item.added".
-
-response_id
-string
-
-The ID of the response to which the item belongs.
-
-output_index
-integer
-
-The index of the output item in the response.
-
-item
-object
-
-The item that was added.
-
-
-Show properties
-response.output_item.added
-{
- "event_id": "event_3334",
- "type": "response.output_item.added",
- "response_id": "resp_001",
- "output_index": 0,
- "item": {
- "id": "msg_007",
- "object": "realtime.item",
- "type": "message",
- "status": "in_progress",
- "role": "assistant",
- "content": []
- }
-}
-response.output_item.done
-Beta
-Returned when an Item is done streaming. Also emitted when a Response is interrupted, incomplete, or cancelled.
-
-event_id
-string
-
-The unique ID of the server event.
-
-type
-string
-
-The event type, must be "response.output_item.done".
-
-response_id
-string
-
-The ID of the response to which the item belongs.
-
-output_index
-integer
-
-The index of the output item in the response.
-
-item
-object
-
-The completed item.
-
-
-Show properties
-response.output_item.done
-{
- "event_id": "event_3536",
- "type": "response.output_item.done",
- "response_id": "resp_001",
- "output_index": 0,
- "item": {
- "id": "msg_007",
- "object": "realtime.item",
- "type": "message",
- "status": "completed",
- "role": "assistant",
- "content": [
- {
- "type": "text",
- "text": "Sure, I can help with that."
- }
- ]
- }
-}
-response.content_part.added
-Beta
-Returned when a new content part is added to an assistant message item during response generation.
-
-event_id
-string
-
-The unique ID of the server event.
-
-type
-string
-
-The event type, must be "response.content_part.added".
-
-response_id
-string
-
-The ID of the response.
-
-item_id
-string
-
-The ID of the item to which the content part was added.
-
-output_index
-integer
-
-The index of the output item in the response.
-
-content_index
-integer
-
-The index of the content part in the item's content array.
-
-part
-object
-
-The content part that was added.
-
-
-Show properties
-response.content_part.added
-{
- "event_id": "event_3738",
- "type": "response.content_part.added",
- "response_id": "resp_001",
- "item_id": "msg_007",
- "output_index": 0,
- "content_index": 0,
- "part": {
- "type": "text",
- "text": ""
- }
-}
-response.content_part.done
-Beta
-Returned when a content part is done streaming in an assistant message item. Also emitted when a Response is interrupted, incomplete, or cancelled.
-
-event_id
-string
-
-The unique ID of the server event.
-
-type
-string
-
-The event type, must be "response.content_part.done".
-
-response_id
-string
-
-The ID of the response.
-
-item_id
-string
-
-The ID of the item.
-
-output_index
-integer
-
-The index of the output item in the response.
-
-content_index
-integer
-
-The index of the content part in the item's content array.
-
-part
-object
-
-The content part that is done.
-
-
-Show properties
-response.content_part.done
-{
- "event_id": "event_3940",
- "type": "response.content_part.done",
- "response_id": "resp_001",
- "item_id": "msg_007",
- "output_index": 0,
- "content_index": 0,
- "part": {
- "type": "text",
- "text": "Sure, I can help with that."
- }
-}
-response.text.delta
-Beta
-Returned when the text value of a "text" content part is updated.
-
-event_id
-string
-
-The unique ID of the server event.
-
-type
-string
-
-The event type, must be "response.text.delta".
-
-response_id
-string
-
-The ID of the response.
-
-item_id
-string
-
-The ID of the item.
-
-output_index
-integer
-
-The index of the output item in the response.
-
-content_index
-integer
-
-The index of the content part in the item's content array.
-
-delta
-string
-
-The text delta.
-
-response.text.delta
-{
- "event_id": "event_4142",
- "type": "response.text.delta",
- "response_id": "resp_001",
- "item_id": "msg_007",
- "output_index": 0,
- "content_index": 0,
- "delta": "Sure, I can h"
-}
-response.text.done
-Beta
-Returned when the text value of a "text" content part is done streaming. Also emitted when a Response is interrupted, incomplete, or cancelled.
-
-event_id
-string
-
-The unique ID of the server event.
-
-type
-string
-
-The event type, must be "response.text.done".
-
-response_id
-string
-
-The ID of the response.
-
-item_id
-string
-
-The ID of the item.
-
-output_index
-integer
-
-The index of the output item in the response.
-
-content_index
-integer
-
-The index of the content part in the item's content array.
-
-text
-string
-
-The final text content.
-
-response.text.done
-{
- "event_id": "event_4344",
- "type": "response.text.done",
- "response_id": "resp_001",
- "item_id": "msg_007",
- "output_index": 0,
- "content_index": 0,
- "text": "Sure, I can help with that."
-}
-response.audio_transcript.delta
-Beta
-Returned when the model-generated transcription of audio output is updated.
-
-event_id
-string
-
-The unique ID of the server event.
-
-type
-string
-
-The event type, must be "response.audio_transcript.delta".
-
-response_id
-string
-
-The ID of the response.
-
-item_id
-string
-
-The ID of the item.
-
-output_index
-integer
-
-The index of the output item in the response.
-
-content_index
-integer
-
-The index of the content part in the item's content array.
-
-delta
-string
-
-The transcript delta.
-
-response.audio_transcript.delta
-{
- "event_id": "event_4546",
- "type": "response.audio_transcript.delta",
- "response_id": "resp_001",
- "item_id": "msg_008",
- "output_index": 0,
- "content_index": 0,
- "delta": "Hello, how can I a"
-}
-response.audio_transcript.done
-Beta
-Returned when the model-generated transcription of audio output is done streaming. Also emitted when a Response is interrupted, incomplete, or cancelled.
-
-event_id
-string
-
-The unique ID of the server event.
-
-type
-string
-
-The event type, must be "response.audio_transcript.done".
-
-response_id
-string
-
-The ID of the response.
-
-item_id
-string
-
-The ID of the item.
-
-output_index
-integer
-
-The index of the output item in the response.
-
-content_index
-integer
-
-The index of the content part in the item's content array.
-
-transcript
-string
-
-The final transcript of the audio.
-
-response.audio_transcript.done
-{
- "event_id": "event_4748",
- "type": "response.audio_transcript.done",
- "response_id": "resp_001",
- "item_id": "msg_008",
- "output_index": 0,
- "content_index": 0,
- "transcript": "Hello, how can I assist you today?"
-}
-response.audio.delta
-Beta
-Returned when the model-generated audio is updated.
-
-event_id
-string
-
-The unique ID of the server event.
-
-type
-string
-
-The event type, must be "response.audio.delta".
-
-response_id
-string
-
-The ID of the response.
-
-item_id
-string
-
-The ID of the item.
-
-output_index
-integer
-
-The index of the output item in the response.
-
-content_index
-integer
-
-The index of the content part in the item's content array.
-
-delta
-string
-
-Base64-encoded audio data delta.
-
-response.audio.delta
-{
- "event_id": "event_4950",
- "type": "response.audio.delta",
- "response_id": "resp_001",
- "item_id": "msg_008",
- "output_index": 0,
- "content_index": 0,
- "delta": "Base64EncodedAudioDelta"
-}
-response.audio.done
-Beta
-Returned when the model-generated audio is done. Also emitted when a Response is interrupted, incomplete, or cancelled.
-
-event_id
-string
-
-The unique ID of the server event.
-
-type
-string
-
-The event type, must be "response.audio.done".
-
-response_id
-string
-
-The ID of the response.
-
-item_id
-string
-
-The ID of the item.
-
-output_index
-integer
-
-The index of the output item in the response.
-
-content_index
-integer
-
-The index of the content part in the item's content array.
-
-response.audio.done
-{
- "event_id": "event_5152",
- "type": "response.audio.done",
- "response_id": "resp_001",
- "item_id": "msg_008",
- "output_index": 0,
- "content_index": 0
-}
-response.function_call_arguments.delta
-Beta
-Returned when the model-generated function call arguments are updated.
-
-event_id
-string
-
-The unique ID of the server event.
-
-type
-string
-
-The event type, must be "response.function_call_arguments.delta".
-
-response_id
-string
-
-The ID of the response.
-
-item_id
-string
-
-The ID of the function call item.
-
-output_index
-integer
-
-The index of the output item in the response.
-
-call_id
-string
-
-The ID of the function call.
-
-delta
-string
-
-The arguments delta as a JSON string.
-
-response.function_call_arguments.delta
-{
- "event_id": "event_5354",
- "type": "response.function_call_arguments.delta",
- "response_id": "resp_002",
- "item_id": "fc_001",
- "output_index": 0,
- "call_id": "call_001",
- "delta": "{\"location\": \"San\""
-}
-response.function_call_arguments.done
-Beta
-Returned when the model-generated function call arguments are done streaming. Also emitted when a Response is interrupted, incomplete, or cancelled.
-
-event_id
-string
-
-The unique ID of the server event.
-
-type
-string
-
-The event type, must be "response.function_call_arguments.done".
-
-response_id
-string
-
-The ID of the response.
-
-item_id
-string
-
-The ID of the function call item.
-
-output_index
-integer
-
-The index of the output item in the response.
-
-call_id
-string
-
-The ID of the function call.
-
-arguments
-string
-
-The final arguments as a JSON string.
-
-response.function_call_arguments.done
-{
- "event_id": "event_5556",
- "type": "response.function_call_arguments.done",
- "response_id": "resp_002",
- "item_id": "fc_001",
- "output_index": 0,
- "call_id": "call_001",
- "arguments": "{\"location\": \"San Francisco\"}"
-}
-rate_limits.updated
-Beta
-Emitted after every "response.done" event to indicate the updated rate limits.
-
-event_id
-string
-
-The unique ID of the server event.
-
-type
-string
-
-The event type, must be "rate_limits.updated".
-
-rate_limits
-array
-
-List of rate limit information.
-
-
-Show properties
-rate_limits.updated
-{
- "event_id": "event_5758",
- "type": "rate_limits.updated",
- "rate_limits": [
- {
- "name": "requests",
- "limit": 1000,
- "remaining": 999,
- "reset_seconds": 60
- },
- {
- "name": "tokens",
- "limit": 50000,
- "remaining": 49950,
- "reset_seconds": 60
- }
- ]
-}
\ No newline at end of file
diff --git a/src/praisonai/pyproject.toml b/src/praisonai/pyproject.toml
index 02b163114..afa4046a0 100644
--- a/src/praisonai/pyproject.toml
+++ b/src/praisonai/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "PraisonAI"
-version = "2.2.24"
+version = "2.2.25"
description = "PraisonAI is an AI Agents Framework with Self Reflection. PraisonAI application combines PraisonAI Agents, AutoGen, and CrewAI into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customisation, and efficient human-agent collaboration."
readme = "README.md"
license = ""
@@ -12,7 +12,7 @@ dependencies = [
"rich>=13.7",
"markdown>=3.5",
"pyparsing>=3.0.0",
- "praisonaiagents>=0.0.95",
+ "praisonaiagents>=0.0.96",
"python-dotenv>=0.19.0",
"instructor>=1.3.3",
"PyYAML>=6.0",
@@ -95,7 +95,7 @@ autogen = ["pyautogen>=0.2.19", "praisonai-tools>=0.0.15", "crewai"]
[tool.poetry]
name = "PraisonAI"
-version = "2.2.24"
+version = "2.2.25"
description = "PraisonAI is an AI Agents Framework with Self Reflection. PraisonAI application combines PraisonAI Agents, AutoGen, and CrewAI into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customisation, and efficient human-agent collaboration."
authors = ["Mervin Praison"]
license = ""
@@ -113,7 +113,7 @@ python = ">=3.10,<3.13"
rich = ">=13.7"
markdown = ">=3.5"
pyparsing = ">=3.0.0"
-praisonaiagents = ">=0.0.95"
+praisonaiagents = ">=0.0.96"
python-dotenv = ">=0.19.0"
instructor = ">=1.3.3"
PyYAML = ">=6.0"
diff --git a/src/praisonai/uv.lock b/src/praisonai/uv.lock
index 588df2f0d..0deb6fb4d 100644
--- a/src/praisonai/uv.lock
+++ b/src/praisonai/uv.lock
@@ -3931,7 +3931,7 @@ wheels = [
[[package]]
name = "praisonai"
-version = "2.2.24"
+version = "2.2.25"
source = { editable = "." }
dependencies = [
{ name = "instructor" },
@@ -4073,7 +4073,7 @@ requires-dist = [
{ name = "plotly", marker = "extra == 'realtime'", specifier = ">=5.24.0" },
{ name = "praisonai-tools", marker = "extra == 'autogen'", specifier = ">=0.0.15" },
{ name = "praisonai-tools", marker = "extra == 'crewai'", specifier = ">=0.0.15" },
- { name = "praisonaiagents", specifier = ">=0.0.95" },
+ { name = "praisonaiagents", specifier = ">=0.0.96" },
{ name = "pyautogen", marker = "extra == 'autogen'", specifier = ">=0.2.19" },
{ name = "pydantic", marker = "extra == 'chat'", specifier = "<=2.10.1" },
{ name = "pydantic", marker = "extra == 'code'", specifier = "<=2.10.1" },
@@ -4130,7 +4130,7 @@ wheels = [
[[package]]
name = "praisonaiagents"
-version = "0.0.95"
+version = "0.0.96"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "mcp" },
@@ -4138,9 +4138,9 @@ dependencies = [
{ name = "pydantic" },
{ name = "rich" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/6a/7c/c6ae58b98d652f3c986683e72883eda5a3a4c2de054c01f6674ebdb0ae26/praisonaiagents-0.0.95.tar.gz", hash = "sha256:3b71869c3c73241e7dee109232d492545ea2a397fbfd9c922d4c9cfd41d93094", size = 127328 }
+sdist = { url = "https://files.pythonhosted.org/packages/10/53/2e1facaed976a6c428b67756108d2370b5ca9249356c7c71b7d6e1a28421/praisonaiagents-0.0.96.tar.gz", hash = "sha256:96ff46fbdb54162cf721c94fb04e95eae3f0d9305fd1939bad31fa1aca7b4231", size = 127366 }
wheels = [
- { url = "https://files.pythonhosted.org/packages/00/d2/63c8725b03d4f4bb1387594ed2cf93d1d544a6216ec7c374536ebfe6bf2a/praisonaiagents-0.0.95-py3-none-any.whl", hash = "sha256:04528314fd23240b4cb388149d8f181714753e6a3b68d429de46f66fc2c94857", size = 145811 },
+ { url = "https://files.pythonhosted.org/packages/4c/d1/89935abb42bd1c1cda1981583e4b6a2058b5c79e157a062489f581dba9b2/praisonaiagents-0.0.96-py3-none-any.whl", hash = "sha256:6d70884a7033e124510c13c80eb1e19dbad6c04d58eb8f0cfceb30fb9c91f85f", size = 145812 },
]
[[package]]