@@ -57,44 +57,45 @@ const addTool = addMemoryTool(process.env.SUPERMEMORY_API_KEY!, {
5757
5858#### AI SDK Middleware with Supermemory
5959
60- - ` withSupermemory ` will take advantage supermemory profile v4 endpoint personalized based on container tag
61- - You can provide the Supermemory API key via the ` apiKey ` option to ` withSupermemory ` (recommended for browser usage), or fall back to ` SUPERMEMORY_API_KEY ` in the environment for server usage.
60+ - ` withSupermemory ` wraps any language model with supermemory capabilities using the v4 profile endpoint
61+ - You can provide the Supermemory API key via the ` apiKey ` option (recommended for browser usage), or fall back to ` SUPERMEMORY_API_KEY ` in the environment for server usage
6262- ** Per-turn caching** : Memory injection is cached for tool-call continuations within the same user turn. The middleware detects when the AI SDK is continuing a multi-step flow (e.g., after a tool call) and reuses the cached memories instead of making redundant API calls. A fresh fetch occurs on each new user message turn.
6363
6464``` typescript
6565import { generateText } from " ai"
6666import { withSupermemory } from " @supermemory/tools/ai-sdk"
6767import { openai } from " @ai-sdk/openai"
6868
69- const modelWithMemory = withSupermemory (openai (" gpt-5" ), " user_id_life" )
69+ const modelWithMemory = withSupermemory (openai (" gpt-4" ), {
70+ containerTag: " user-123" ,
71+ customId: " conversation-456" ,
72+ })
7073
7174const result = await generateText ({
72- model: modelWithMemory ,
73- messages: [{ role: " user" , content: " where do i live?" }],
75+ model: modelWithMemory ,
76+ messages: [{ role: " user" , content: " where do i live?" }],
7477})
7578
7679console .log (result .text )
7780```
7881
79- #### Conversation Grouping
82+ #### Configuration Options
8083
81- Use the ` conversationId ` option to group messages into a single document for contextual memory generation :
84+ The ` withSupermemory ` function accepts a model and a configuration object :
8285
8386``` typescript
84- import { generateText } from " ai"
85- import { withSupermemory } from " @supermemory/tools/ai-sdk"
86- import { openai } from " @ai-sdk/openai"
87-
88- const modelWithMemory = withSupermemory (openai (" gpt-5" ), " user_id_life" , {
89- conversationId: " conversation-456"
87+ withSupermemory (model , {
88+ containerTag: string , // Required: User/container identifier for memory scoping
89+ customId: string , // Required: Conversation ID for grouping messages
90+ mode?: " profile" | " query" | " full" , // Memory retrieval mode (default: "profile")
91+ addMemory?: " always" | " never" , // Auto-save conversations (default: "always")
92+ searchMode?: " memories" | " hybrid" | " documents" , // Search mode (default: "memories")
93+ searchLimit?: number , // Max search results for hybrid/documents mode (default: 10)
94+ verbose?: boolean , // Enable detailed logging (default: false)
95+ apiKey?: string , // Supermemory API key (falls back to env var)
96+ baseUrl?: string , // Custom API base URL
97+ promptTemplate?: (data : MemoryPromptData ) => string , // Custom memory formatting
9098})
91-
92- const result = await generateText ({
93- model: modelWithMemory ,
94- messages: [{ role: " user" , content: " where do i live?" }],
95- })
96-
97- console .log (result .text )
9899```
99100
100101#### Verbose Mode
@@ -106,21 +107,23 @@ import { generateText } from "ai"
106107import { withSupermemory } from " @supermemory/tools/ai-sdk"
107108import { openai } from " @ai-sdk/openai"
108109
109- const modelWithMemory = withSupermemory (openai (" gpt-5" ), " user_id_life" , {
110- verbose: true
110+ const modelWithMemory = withSupermemory (openai (" gpt-4" ), {
111+ containerTag: " user-123" ,
112+ customId: " conv-456" ,
113+ verbose: true ,
111114})
112115
113116const result = await generateText ({
114- model: modelWithMemory ,
115- messages: [{ role: " user" , content: " where do i live?" }],
117+ model: modelWithMemory ,
118+ messages: [{ role: " user" , content: " where do i live?" }],
116119})
117120
118121console .log (result .text )
119122```
120123
121124When verbose mode is enabled, you'll see console output like:
122125```
123- [supermemory] Searching memories for container: user_id_life
126+ [supermemory] Searching memories for container: user-123
124127[supermemory] User message: where do i live?
125128[supermemory] System prompt exists: false
126129[supermemory] Found 3 memories
@@ -139,11 +142,10 @@ import { withSupermemory } from "@supermemory/tools/ai-sdk"
139142import { openai } from " @ai-sdk/openai"
140143
141144// Uses profile mode by default - gets all user profile memories
142- const modelWithMemory = withSupermemory (openai (" gpt-4" ), " user-123" )
143-
144- // Explicitly specify profile mode
145- const modelWithProfile = withSupermemory (openai (" gpt-4" ), " user-123" , {
146- mode: " profile"
145+ const modelWithMemory = withSupermemory (openai (" gpt-4" ), {
146+ containerTag: " user-123" ,
147+ customId: " conv-456" ,
148+ mode: " profile" ,
147149})
148150
149151const result = await generateText ({
@@ -158,8 +160,10 @@ import { generateText } from "ai"
158160import { withSupermemory } from " @supermemory/tools/ai-sdk"
159161import { openai } from " @ai-sdk/openai"
160162
161- const modelWithQuery = withSupermemory (openai (" gpt-4" ), " user-123" , {
162- mode: " query"
163+ const modelWithQuery = withSupermemory (openai (" gpt-4" ), {
164+ containerTag: " user-123" ,
165+ customId: " conv-456" ,
166+ mode: " query" ,
163167})
164168
165169const result = await generateText ({
@@ -174,8 +178,10 @@ import { generateText } from "ai"
174178import { withSupermemory } from " @supermemory/tools/ai-sdk"
175179import { openai } from " @ai-sdk/openai"
176180
177- const modelWithFull = withSupermemory (openai (" gpt-4" ), " user-123" , {
178- mode: " full"
181+ const modelWithFull = withSupermemory (openai (" gpt-4" ), {
182+ containerTag: " user-123" ,
183+ customId: " conv-456" ,
184+ mode: " full" ,
179185})
180186
181187const result = await generateText ({
@@ -184,38 +190,58 @@ const result = await generateText({
184190})
185191```
186192
187- #### Automatic Memory Capture
193+ #### RAG with Hybrid Search
188194
189- The middleware can automatically save user messages as memories :
195+ Use ` searchMode: "hybrid" ` to search both memories AND document chunks (recommended for RAG applications) :
190196
191- ** Always Save Memories** - Automatically stores every user message as a memory:
192197``` typescript
193198import { generateText } from " ai"
194199import { withSupermemory } from " @supermemory/tools/ai-sdk"
195200import { openai } from " @ai-sdk/openai"
196201
197- const modelWithAutoSave = withSupermemory (openai (" gpt-4" ), " user-123" , {
198- addMemory: " always"
202+ const ragModel = withSupermemory (openai (" gpt-4" ), {
203+ containerTag: " user-123" ,
204+ customId: " conv-456" ,
205+ mode: " full" ,
206+ searchMode: " hybrid" , // Search both memories and document chunks
207+ searchLimit: 15 , // Return up to 15 results
199208})
200209
201210const result = await generateText ({
202- model: modelWithAutoSave ,
203- messages: [{ role: " user" , content: " I prefer React with TypeScript for my projects " }],
211+ model: ragModel ,
212+ messages: [{ role: " user" , content: " What's in my documents about quarterly goals? " }],
204213})
205- // This message will be automatically saved as a memory
206214```
207215
208- ** Never Save Memories (Default)** - Only retrieves memories without storing new ones:
216+ #### Automatic Memory Capture
217+
218+ The middleware can automatically save conversations as memories:
219+
220+ ** Always Save Memories (Default)** - Automatically stores conversations:
209221``` typescript
210- const modelWithNoSave = withSupermemory (openai (" gpt-4" ), " user-123" )
222+ import { generateText } from " ai"
223+ import { withSupermemory } from " @supermemory/tools/ai-sdk"
224+ import { openai } from " @ai-sdk/openai"
225+
226+ const modelWithAutoSave = withSupermemory (openai (" gpt-4" ), {
227+ containerTag: " user-123" ,
228+ customId: " conv-456" ,
229+ addMemory: " always" ,
230+ })
231+
232+ const result = await generateText ({
233+ model: modelWithAutoSave ,
234+ messages: [{ role: " user" , content: " I prefer React with TypeScript for my projects" }],
235+ })
236+ // This conversation will be automatically saved as a memory
211237```
212238
213- ** Combined Options ** - Use verbose logging with specific modes and memory storage :
239+ ** Never Save Memories ** - Only retrieves memories without storing new ones :
214240``` typescript
215- const modelWithOptions = withSupermemory (openai (" gpt-4" ), " user-123 " , {
216- mode : " profile " ,
217- addMemory : " always " ,
218- verbose: true
241+ const modelWithNoSave = withSupermemory (openai (" gpt-4" ), {
242+ containerTag : " user-123 " ,
243+ customId : " conv-456 " ,
244+ addMemory: " never " ,
219245})
220246```
221247
@@ -239,7 +265,9 @@ ${data.generalSearchMemories}
239265</user_memories>
240266` .trim ()
241267
242- const modelWithCustomPrompt = withSupermemory (openai (" gpt-4" ), " user-123" , {
268+ const modelWithCustomPrompt = withSupermemory (openai (" gpt-4" ), {
269+ containerTag: " user-123" ,
270+ customId: " conv-456" ,
243271 mode: " full" ,
244272 promptTemplate: customPrompt ,
245273})
@@ -646,23 +674,30 @@ Without `strict: true`, optional fields like `includeFullDocs` and `limit` won't
646674
647675### withSupermemory Middleware Options
648676
649- The ` withSupermemory ` middleware accepts additional configuration options :
677+ The ` withSupermemory ` middleware accepts a model and a configuration object :
650678
651679``` typescript
652- interface WithSupermemoryOptions {
653- conversationId? : string
654- verbose? : boolean
655- mode? : " profile" | " query" | " full"
656- addMemory? : " always" | " never"
657- /** Optional Supermemory API key. Use this in browser environments. */
658- apiKey? : string
680+ interface WithSupermemoryConfig {
681+ containerTag: string // Required: User/container identifier for memory scoping
682+ customId: string // Required: Conversation ID for grouping messages
683+ verbose? : boolean // Enable detailed logging (default: false)
684+ mode? : " profile" | " query" | " full" // Memory retrieval mode (default: "profile")
685+ searchMode? : " memories" | " hybrid" | " documents" // Search mode (default: "memories")
686+ searchLimit? : number // Max search results for hybrid/documents mode (default: 10)
687+ addMemory? : " always" | " never" // Auto-save conversations (default: "always")
688+ apiKey? : string // Supermemory API key (falls back to SUPERMEMORY_API_KEY env var)
689+ baseUrl? : string // Custom API base URL
690+ promptTemplate? : (data : MemoryPromptData ) => string // Custom memory formatting
659691}
660692```
661693
662- - ** conversationId** : Optional conversation ID to group messages into a single document for contextual memory generation
694+ - ** containerTag** : Required. The container tag/identifier for memory search (e.g., user ID, project ID)
695+ - ** customId** : Required. Custom ID to group messages into a single document for contextual memory generation
663696- ** verbose** : Enable detailed logging of memory search and injection process (default: false)
664697- ** mode** : Memory search mode - "profile" (default), "query", or "full"
665- - ** addMemory** : Automatic memory storage mode - "always" or "never" (default: "never")
698+ - ** searchMode** : Search mode - "memories" (default), "hybrid" (memories + chunks), or "documents" (chunks only)
699+ - ** searchLimit** : Maximum number of search results when using hybrid/documents mode (default: 10)
700+ - ** addMemory** : Automatic memory storage mode - "always" (default) or "never"
666701
667702## Available Tools
668703
0 commit comments