Skip to content

Commit af4838b

Browse files
open-ai-sdk-conversation-id-update
1 parent 8405305 commit af4838b

File tree

16 files changed

+726
-165
lines changed

16 files changed

+726
-165
lines changed

apps/docs/integrations/openai.mdx

Lines changed: 19 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,9 @@ import { withSupermemory } from "@supermemory/tools/openai"
4444
const openai = new OpenAI()
4545

4646
// Wrap client with memory - memories auto-injected into system prompts
47-
const client = withSupermemory(openai, "user-123", {
47+
const client = withSupermemory(openai, {
48+
containerTag: "user-123",
49+
conversationId: "conversation-456",
4850
mode: "full", // "profile" | "query" | "full"
4951
addMemory: "always", // "always" | "never"
5052
})
@@ -62,21 +64,17 @@ const response = await client.chat.completions.create({
6264
### Configuration Options
6365

6466
```typescript
65-
const client = withSupermemory(openai, "user-123", {
66-
// Memory search mode
67-
mode: "full", // "profile" (user profile only), "query" (search only), "full" (both)
68-
69-
// Auto-save conversations as memories
70-
addMemory: "always", // "always" | "never"
71-
72-
// Group messages into conversations
73-
conversationId: "conv-456",
74-
75-
// Enable debug logging
76-
verbose: true,
77-
78-
// Custom API endpoint
79-
baseUrl: "https://custom.api.com"
67+
const client = withSupermemory(openai, {
68+
// Required options
69+
containerTag: "user-123", // Scopes memories to this user
70+
conversationId: "conversation-456", // Groups messages into conversations
71+
72+
// Optional options
73+
mode: "full", // "profile" (user profile only), "query" (search only), "full" (both)
74+
addMemory: "always", // "always" | "never" - auto-save conversations as memories
75+
verbose: true, // Enable debug logging
76+
apiKey: "sm_...", // Supermemory API key (or use SUPERMEMORY_API_KEY env var)
77+
baseUrl: "https://custom.api.com" // Custom API endpoint
8078
})
8179
```
8280

@@ -91,7 +89,11 @@ const client = withSupermemory(openai, "user-123", {
9189
### Works with Responses API Too
9290

9391
```typescript
94-
const client = withSupermemory(openai, "user-123", { mode: "full" })
92+
const client = withSupermemory(openai, {
93+
containerTag: "user-123",
94+
conversationId: "conversation-456",
95+
mode: "full",
96+
})
9597

9698
// Memories injected into instructions
9799
const response = await client.responses.create({

packages/openai-sdk-python/README.md

Lines changed: 12 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,8 @@ async def main():
4444
# Wrap with Supermemory middleware
4545
openai_with_memory = with_supermemory(
4646
openai,
47-
container_tag="user-123", # Unique identifier for user's memories
47+
container_tag="user-123", # Unique identifier for user's memories
48+
conversation_id="chat-session-1", # Required: groups messages into conversations
4849
options=OpenAIMiddlewareOptions(
4950
mode="full", # "profile", "query", or "full"
5051
verbose=True, # Enable logging
@@ -122,7 +123,7 @@ from supermemory_openai import with_supermemory
122123

123124
# Sync client
124125
openai = OpenAI(api_key="your-openai-api-key")
125-
openai_with_memory = with_supermemory(openai, "user-123")
126+
openai_with_memory = with_supermemory(openai, "user-123", "chat-session-1")
126127

127128
# Works the same way
128129
response = openai_with_memory.chat.completions.create(
@@ -137,12 +138,12 @@ response = openai_with_memory.chat.completions.create(
137138

138139
```python
139140
# Async context manager (recommended)
140-
async with with_supermemory(openai, "user-123") as client:
141+
async with with_supermemory(openai, "user-123", "chat-session-1") as client:
141142
response = await client.chat.completions.create(...)
142143
# Background tasks automatically waited for on exit
143144

144145
# Manual cleanup
145-
client = with_supermemory(openai, "user-123")
146+
client = with_supermemory(openai, "user-123", "chat-session-1")
146147
response = await client.chat.completions.create(...)
147148
await client.wait_for_background_tasks() # Ensure memory is saved
148149
```
@@ -160,6 +161,7 @@ Injects all static and dynamic profile memories into every request. Best for mai
160161
openai_with_memory = with_supermemory(
161162
openai,
162163
"user-123",
164+
"chat-session-1",
163165
OpenAIMiddlewareOptions(mode="profile")
164166
)
165167
```
@@ -171,6 +173,7 @@ Only searches for memories relevant to the current user message. More efficient
171173
openai_with_memory = with_supermemory(
172174
openai,
173175
"user-123",
176+
"chat-session-1",
174177
OpenAIMiddlewareOptions(mode="query")
175178
)
176179
```
@@ -182,6 +185,7 @@ Combines both profile and query modes - includes all profile memories plus relev
182185
openai_with_memory = with_supermemory(
183186
openai,
184187
"user-123",
188+
"chat-session-1",
185189
OpenAIMiddlewareOptions(mode="full")
186190
)
187191
```
@@ -206,8 +210,8 @@ from supermemory_openai import with_supermemory, OpenAIMiddlewareOptions
206210
openai_with_memory = with_supermemory(
207211
openai_client,
208212
container_tag="user-123",
213+
conversation_id="chat-session-456",
209214
options=OpenAIMiddlewareOptions(
210-
conversation_id="chat-session-456", # Group messages into conversations
211215
verbose=True, # Enable detailed logging
212216
mode="full", # Use both profile and query
213217
add_memory="always" # Auto-save conversations
@@ -292,13 +296,15 @@ Wraps an OpenAI client with automatic memory injection middleware.
292296
def with_supermemory(
293297
openai_client: Union[OpenAI, AsyncOpenAI],
294298
container_tag: str,
299+
conversation_id: str,
295300
options: Optional[OpenAIMiddlewareOptions] = None
296301
) -> Union[OpenAI, AsyncOpenAI]
297302
```
298303

299304
**Parameters:**
300305
- `openai_client`: OpenAI or AsyncOpenAI client instance
301306
- `container_tag`: Unique identifier for memory storage (e.g., user ID)
307+
- `conversation_id`: Required identifier for grouping messages into conversations
302308
- `options`: Configuration options (see `OpenAIMiddlewareOptions`)
303309

304310
#### `OpenAIMiddlewareOptions`
@@ -308,7 +314,6 @@ Configuration dataclass for middleware behavior.
308314
```python
309315
@dataclass
310316
class OpenAIMiddlewareOptions:
311-
conversation_id: Optional[str] = None # Group messages into conversations
312317
verbose: bool = False # Enable detailed logging
313318
mode: Literal["profile", "query", "full"] = "profile" # Memory injection mode
314319
add_memory: Literal["always", "never"] = "never" # Auto-save behavior
@@ -349,7 +354,7 @@ from supermemory_openai import (
349354

350355
try:
351356
# This will raise SupermemoryConfigurationError if API key is missing
352-
client = with_supermemory(openai_client, "user-123")
357+
client = with_supermemory(openai_client, "user-123", "chat-session-1")
353358

354359
response = await client.chat.completions.create(
355360
messages=[{"role": "user", "content": "Hello"}],

packages/openai-sdk-python/pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
44

55
[project]
66
name = "supermemory-openai-sdk"
7-
version = "1.0.2"
7+
version = "1.1.0"
88
description = "Memory tools for OpenAI function calling with supermemory"
99
readme = "README.md"
1010
license = "MIT"

packages/openai-sdk-python/src/supermemory_openai/middleware.py

Lines changed: 22 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,6 @@
3232
class OpenAIMiddlewareOptions:
3333
"""Configuration options for OpenAI middleware."""
3434

35-
conversation_id: Optional[str] = None
3635
verbose: bool = False
3736
mode: Literal["profile", "query", "full"] = "profile"
3837
add_memory: Literal["always", "never"] = "never"
@@ -263,10 +262,12 @@ def __init__(
263262
self,
264263
openai_client: Union[OpenAI, AsyncOpenAI],
265264
container_tag: str,
265+
conversation_id: str,
266266
options: Optional[OpenAIMiddlewareOptions] = None,
267267
):
268268
self._client: Union[OpenAI, AsyncOpenAI] = openai_client
269269
self._container_tag: str = container_tag
270+
self._conversation_id: str = conversation_id
270271
self._options: OpenAIMiddlewareOptions = options or OpenAIMiddlewareOptions()
271272
self._logger: Logger = create_logger(self._options.verbose)
272273

@@ -336,12 +337,12 @@ async def _create_with_memory_async(
336337
if user_message and user_message.strip():
337338
content = (
338339
get_conversation_content(messages)
339-
if self._options.conversation_id
340+
if self._conversation_id
340341
else user_message
341342
)
342343
custom_id = (
343-
f"conversation:{self._options.conversation_id}"
344-
if self._options.conversation_id
344+
f"conversation:{self._conversation_id}"
345+
if self._conversation_id
345346
else None
346347
)
347348

@@ -399,7 +400,7 @@ def handle_task_exception(task_obj):
399400
"Starting memory search",
400401
{
401402
"container_tag": self._container_tag,
402-
"conversation_id": self._options.conversation_id,
403+
"conversation_id": self._conversation_id,
403404
"mode": self._options.mode,
404405
},
405406
)
@@ -430,12 +431,12 @@ def _create_with_memory_sync(
430431
if user_message and user_message.strip():
431432
content = (
432433
get_conversation_content(messages)
433-
if self._options.conversation_id
434+
if self._conversation_id
434435
else user_message
435436
)
436437
custom_id = (
437-
f"conversation:{self._options.conversation_id}"
438-
if self._options.conversation_id
438+
f"conversation:{self._conversation_id}"
439+
if self._conversation_id
439440
else None
440441
)
441442

@@ -483,7 +484,7 @@ def _create_with_memory_sync(
483484
"Starting memory search",
484485
{
485486
"container_tag": self._container_tag,
486-
"conversation_id": self._options.conversation_id,
487+
"conversation_id": self._conversation_id,
487488
"mode": self._options.mode,
488489
},
489490
)
@@ -615,19 +616,17 @@ def __getattr__(self, name: str) -> Any:
615616
def with_supermemory(
616617
openai_client: Union[OpenAI, AsyncOpenAI],
617618
container_tag: str,
619+
conversation_id: str,
618620
options: Optional[OpenAIMiddlewareOptions] = None,
619621
) -> Union[OpenAI, AsyncOpenAI]:
620622
"""
621623
Wraps an OpenAI client with SuperMemory middleware to automatically inject relevant memories
622624
into the system prompt based on the user's message content.
623625
624-
This middleware searches the supermemory API for relevant memories using the container tag
625-
and user message, then either appends memories to an existing system prompt or creates
626-
a new system prompt with the memories.
627-
628626
Args:
629627
openai_client: The OpenAI client to wrap with SuperMemory middleware
630628
container_tag: The container tag/identifier for memory search (e.g., user ID, project ID)
629+
conversation_id: Conversation ID to group messages into a single document
631630
options: Optional configuration options for the middleware
632631
633632
Returns:
@@ -638,19 +637,17 @@ def with_supermemory(
638637
from supermemory_openai import with_supermemory, OpenAIMiddlewareOptions
639638
from openai import OpenAI
640639
641-
# Create OpenAI client with supermemory middleware
642640
openai = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
643641
openai_with_supermemory = with_supermemory(
644642
openai,
645643
"user-123",
644+
"conversation-456",
646645
OpenAIMiddlewareOptions(
647-
conversation_id="conversation-456",
648646
mode="full",
649647
add_memory="always"
650648
)
651649
)
652650
653-
# Use normally - memories will be automatically injected
654651
response = await openai_with_supermemory.chat.completions.create(
655652
model="gpt-4",
656653
messages=[
@@ -663,6 +660,14 @@ def with_supermemory(
663660
ValueError: When SUPERMEMORY_API_KEY environment variable is not set
664661
Exception: When supermemory API request fails
665662
"""
666-
wrapper = SupermemoryOpenAIWrapper(openai_client, container_tag, options)
663+
if not conversation_id or not conversation_id.strip():
664+
raise ValueError(
665+
"[supermemory] conversation_id is required and cannot be empty. "
666+
"Pass a unique identifier (e.g., session ID, chat ID) as the third argument to with_supermemory(). "
667+
"This ensures messages are grouped into the same document for a conversation. "
668+
"Example: with_supermemory(openai_client, 'user-123', 'conversation-456')"
669+
)
670+
671+
wrapper = SupermemoryOpenAIWrapper(openai_client, container_tag, conversation_id, options)
667672
# Return the wrapper, which delegates all attributes to the original client
668673
return cast(Union[OpenAI, AsyncOpenAI], wrapper)

0 commit comments

Comments
 (0)