Skip to content

Commit 1da2ae0

Browse files
updated ai sdk object structure
1 parent 7b99822 commit 1da2ae0

File tree

10 files changed

+367
-361
lines changed

10 files changed

+367
-361
lines changed

apps/docs/integrations/ai-sdk.mdx

Lines changed: 7 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -35,8 +35,7 @@ import { generateText } from "ai"
3535
import { withSupermemory } from "@supermemory/tools/vercel"
3636
import { openai } from "@ai-sdk/openai"
3737

38-
const modelWithMemory = withSupermemory({
39-
model: openai("gpt-4"),
38+
const modelWithMemory = withSupermemory(openai("gpt-4"), {
4039
containerTag: "user-123",
4140
customId: "conv-456"
4241
})
@@ -51,8 +50,7 @@ const result = await generateText({
5150
**Memory saving is enabled by default.** The middleware automatically saves conversations to memory. To disable memory saving:
5251

5352
```typescript
54-
const modelWithMemory = withSupermemory({
55-
model: openai("gpt-4"),
53+
const modelWithMemory = withSupermemory(openai("gpt-4"), {
5654
containerTag: "user-123",
5755
customId: "conv-456",
5856
addMemory: "never"
@@ -65,8 +63,7 @@ const result = await generateText({
6563
**Profile Mode (Default)** - Retrieves the user's complete profile:
6664

6765
```typescript
68-
const model = withSupermemory({
69-
model: openai("gpt-4"),
66+
const model = withSupermemory(openai("gpt-4"), {
7067
containerTag: "user-123",
7168
customId: "conv-456",
7269
mode: "profile"
@@ -76,8 +73,7 @@ const model = withSupermemory({
7673
**Query Mode** - Searches memories based on the user's message:
7774

7875
```typescript
79-
const model = withSupermemory({
80-
model: openai("gpt-4"),
76+
const model = withSupermemory(openai("gpt-4"), {
8177
containerTag: "user-123",
8278
customId: "conv-456",
8379
mode: "query"
@@ -87,8 +83,7 @@ const model = withSupermemory({
8783
**Full Mode** - Combines profile AND query-based search:
8884

8985
```typescript
90-
const model = withSupermemory({
91-
model: openai("gpt-4"),
86+
const model = withSupermemory(openai("gpt-4"), {
9287
containerTag: "user-123",
9388
customId: "conv-456",
9489
mode: "full"
@@ -113,8 +108,7 @@ const claudePrompt = (data: MemoryPromptData) => `
113108
</context>
114109
`.trim()
115110

116-
const model = withSupermemory({
117-
model: anthropic("claude-3-sonnet"),
111+
const model = withSupermemory(anthropic("claude-3-sonnet"), {
118112
containerTag: "user-123",
119113
customId: "conv-456",
120114
mode: "full",
@@ -125,8 +119,7 @@ const model = withSupermemory({
125119
### Verbose Logging
126120

127121
```typescript
128-
const model = withSupermemory({
129-
model: openai("gpt-4"),
122+
const model = withSupermemory(openai("gpt-4"), {
130123
containerTag: "user-123",
131124
customId: "conv-456",
132125
verbose: true

packages/tools/README.md

Lines changed: 95 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -57,44 +57,45 @@ const addTool = addMemoryTool(process.env.SUPERMEMORY_API_KEY!, {
5757

5858
#### AI SDK Middleware with Supermemory
5959

60-
- `withSupermemory` will take advantage supermemory profile v4 endpoint personalized based on container tag
61-
- You can provide the Supermemory API key via the `apiKey` option to `withSupermemory` (recommended for browser usage), or fall back to `SUPERMEMORY_API_KEY` in the environment for server usage.
60+
- `withSupermemory` wraps any language model with supermemory capabilities using the v4 profile endpoint
61+
- You can provide the Supermemory API key via the `apiKey` option (recommended for browser usage), or fall back to `SUPERMEMORY_API_KEY` in the environment for server usage
6262
- **Per-turn caching**: Memory injection is cached for tool-call continuations within the same user turn. The middleware detects when the AI SDK is continuing a multi-step flow (e.g., after a tool call) and reuses the cached memories instead of making redundant API calls. A fresh fetch occurs on each new user message turn.
6363

6464
```typescript
6565
import { generateText } from "ai"
6666
import { withSupermemory } from "@supermemory/tools/ai-sdk"
6767
import { openai } from "@ai-sdk/openai"
6868

69-
const modelWithMemory = withSupermemory(openai("gpt-5"), "user_id_life")
69+
const modelWithMemory = withSupermemory(openai("gpt-4"), {
70+
containerTag: "user-123",
71+
customId: "conversation-456",
72+
})
7073

7174
const result = await generateText({
72-
model: modelWithMemory,
73-
messages: [{ role: "user", content: "where do i live?" }],
75+
model: modelWithMemory,
76+
messages: [{ role: "user", content: "where do i live?" }],
7477
})
7578

7679
console.log(result.text)
7780
```
7881

79-
#### Conversation Grouping
82+
#### Configuration Options
8083

81-
Use the `conversationId` option to group messages into a single document for contextual memory generation:
84+
The `withSupermemory` function accepts a model and a configuration object:
8285

8386
```typescript
84-
import { generateText } from "ai"
85-
import { withSupermemory } from "@supermemory/tools/ai-sdk"
86-
import { openai } from "@ai-sdk/openai"
87-
88-
const modelWithMemory = withSupermemory(openai("gpt-5"), "user_id_life", {
89-
conversationId: "conversation-456"
87+
withSupermemory(model, {
88+
containerTag: string, // Required: User/container identifier for memory scoping
89+
customId: string, // Required: Conversation ID for grouping messages
90+
mode?: "profile" | "query" | "full", // Memory retrieval mode (default: "profile")
91+
addMemory?: "always" | "never", // Auto-save conversations (default: "always")
92+
searchMode?: "memories" | "hybrid" | "documents", // Search mode (default: "memories")
93+
searchLimit?: number, // Max search results for hybrid/documents mode (default: 10)
94+
verbose?: boolean, // Enable detailed logging (default: false)
95+
apiKey?: string, // Supermemory API key (falls back to env var)
96+
baseUrl?: string, // Custom API base URL
97+
promptTemplate?: (data: MemoryPromptData) => string, // Custom memory formatting
9098
})
91-
92-
const result = await generateText({
93-
model: modelWithMemory,
94-
messages: [{ role: "user", content: "where do i live?" }],
95-
})
96-
97-
console.log(result.text)
9899
```
99100

100101
#### Verbose Mode
@@ -106,21 +107,23 @@ import { generateText } from "ai"
106107
import { withSupermemory } from "@supermemory/tools/ai-sdk"
107108
import { openai } from "@ai-sdk/openai"
108109

109-
const modelWithMemory = withSupermemory(openai("gpt-5"), "user_id_life", {
110-
verbose: true
110+
const modelWithMemory = withSupermemory(openai("gpt-4"), {
111+
containerTag: "user-123",
112+
customId: "conv-456",
113+
verbose: true,
111114
})
112115

113116
const result = await generateText({
114-
model: modelWithMemory,
115-
messages: [{ role: "user", content: "where do i live?" }],
117+
model: modelWithMemory,
118+
messages: [{ role: "user", content: "where do i live?" }],
116119
})
117120

118121
console.log(result.text)
119122
```
120123

121124
When verbose mode is enabled, you'll see console output like:
122125
```
123-
[supermemory] Searching memories for container: user_id_life
126+
[supermemory] Searching memories for container: user-123
124127
[supermemory] User message: where do i live?
125128
[supermemory] System prompt exists: false
126129
[supermemory] Found 3 memories
@@ -139,11 +142,10 @@ import { withSupermemory } from "@supermemory/tools/ai-sdk"
139142
import { openai } from "@ai-sdk/openai"
140143

141144
// Uses profile mode by default - gets all user profile memories
142-
const modelWithMemory = withSupermemory(openai("gpt-4"), "user-123")
143-
144-
// Explicitly specify profile mode
145-
const modelWithProfile = withSupermemory(openai("gpt-4"), "user-123", {
146-
mode: "profile"
145+
const modelWithMemory = withSupermemory(openai("gpt-4"), {
146+
containerTag: "user-123",
147+
customId: "conv-456",
148+
mode: "profile",
147149
})
148150

149151
const result = await generateText({
@@ -158,8 +160,10 @@ import { generateText } from "ai"
158160
import { withSupermemory } from "@supermemory/tools/ai-sdk"
159161
import { openai } from "@ai-sdk/openai"
160162

161-
const modelWithQuery = withSupermemory(openai("gpt-4"), "user-123", {
162-
mode: "query"
163+
const modelWithQuery = withSupermemory(openai("gpt-4"), {
164+
containerTag: "user-123",
165+
customId: "conv-456",
166+
mode: "query",
163167
})
164168

165169
const result = await generateText({
@@ -174,8 +178,10 @@ import { generateText } from "ai"
174178
import { withSupermemory } from "@supermemory/tools/ai-sdk"
175179
import { openai } from "@ai-sdk/openai"
176180

177-
const modelWithFull = withSupermemory(openai("gpt-4"), "user-123", {
178-
mode: "full"
181+
const modelWithFull = withSupermemory(openai("gpt-4"), {
182+
containerTag: "user-123",
183+
customId: "conv-456",
184+
mode: "full",
179185
})
180186

181187
const result = await generateText({
@@ -184,38 +190,58 @@ const result = await generateText({
184190
})
185191
```
186192

187-
#### Automatic Memory Capture
193+
#### RAG with Hybrid Search
188194

189-
The middleware can automatically save user messages as memories:
195+
Use `searchMode: "hybrid"` to search both memories AND document chunks (recommended for RAG applications):
190196

191-
**Always Save Memories** - Automatically stores every user message as a memory:
192197
```typescript
193198
import { generateText } from "ai"
194199
import { withSupermemory } from "@supermemory/tools/ai-sdk"
195200
import { openai } from "@ai-sdk/openai"
196201

197-
const modelWithAutoSave = withSupermemory(openai("gpt-4"), "user-123", {
198-
addMemory: "always"
202+
const ragModel = withSupermemory(openai("gpt-4"), {
203+
containerTag: "user-123",
204+
customId: "conv-456",
205+
mode: "full",
206+
searchMode: "hybrid", // Search both memories and document chunks
207+
searchLimit: 15, // Return up to 15 results
199208
})
200209

201210
const result = await generateText({
202-
model: modelWithAutoSave,
203-
messages: [{ role: "user", content: "I prefer React with TypeScript for my projects" }],
211+
model: ragModel,
212+
messages: [{ role: "user", content: "What's in my documents about quarterly goals?" }],
204213
})
205-
// This message will be automatically saved as a memory
206214
```
207215

208-
**Never Save Memories (Default)** - Only retrieves memories without storing new ones:
216+
#### Automatic Memory Capture
217+
218+
The middleware can automatically save conversations as memories:
219+
220+
**Always Save Memories (Default)** - Automatically stores conversations:
209221
```typescript
210-
const modelWithNoSave = withSupermemory(openai("gpt-4"), "user-123")
222+
import { generateText } from "ai"
223+
import { withSupermemory } from "@supermemory/tools/ai-sdk"
224+
import { openai } from "@ai-sdk/openai"
225+
226+
const modelWithAutoSave = withSupermemory(openai("gpt-4"), {
227+
containerTag: "user-123",
228+
customId: "conv-456",
229+
addMemory: "always",
230+
})
231+
232+
const result = await generateText({
233+
model: modelWithAutoSave,
234+
messages: [{ role: "user", content: "I prefer React with TypeScript for my projects" }],
235+
})
236+
// This conversation will be automatically saved as a memory
211237
```
212238

213-
**Combined Options** - Use verbose logging with specific modes and memory storage:
239+
**Never Save Memories** - Only retrieves memories without storing new ones:
214240
```typescript
215-
const modelWithOptions = withSupermemory(openai("gpt-4"), "user-123", {
216-
mode: "profile",
217-
addMemory: "always",
218-
verbose: true
241+
const modelWithNoSave = withSupermemory(openai("gpt-4"), {
242+
containerTag: "user-123",
243+
customId: "conv-456",
244+
addMemory: "never",
219245
})
220246
```
221247

@@ -239,7 +265,9 @@ ${data.generalSearchMemories}
239265
</user_memories>
240266
`.trim()
241267

242-
const modelWithCustomPrompt = withSupermemory(openai("gpt-4"), "user-123", {
268+
const modelWithCustomPrompt = withSupermemory(openai("gpt-4"), {
269+
containerTag: "user-123",
270+
customId: "conv-456",
243271
mode: "full",
244272
promptTemplate: customPrompt,
245273
})
@@ -646,23 +674,30 @@ Without `strict: true`, optional fields like `includeFullDocs` and `limit` won't
646674

647675
### withSupermemory Middleware Options
648676

649-
The `withSupermemory` middleware accepts additional configuration options:
677+
The `withSupermemory` middleware accepts a model and a configuration object:
650678

651679
```typescript
652-
interface WithSupermemoryOptions {
653-
conversationId?: string
654-
verbose?: boolean
655-
mode?: "profile" | "query" | "full"
656-
addMemory?: "always" | "never"
657-
/** Optional Supermemory API key. Use this in browser environments. */
658-
apiKey?: string
680+
interface WithSupermemoryConfig {
681+
containerTag: string // Required: User/container identifier for memory scoping
682+
customId: string // Required: Conversation ID for grouping messages
683+
verbose?: boolean // Enable detailed logging (default: false)
684+
mode?: "profile" | "query" | "full" // Memory retrieval mode (default: "profile")
685+
searchMode?: "memories" | "hybrid" | "documents" // Search mode (default: "memories")
686+
searchLimit?: number // Max search results for hybrid/documents mode (default: 10)
687+
addMemory?: "always" | "never" // Auto-save conversations (default: "always")
688+
apiKey?: string // Supermemory API key (falls back to SUPERMEMORY_API_KEY env var)
689+
baseUrl?: string // Custom API base URL
690+
promptTemplate?: (data: MemoryPromptData) => string // Custom memory formatting
659691
}
660692
```
661693

662-
- **conversationId**: Optional conversation ID to group messages into a single document for contextual memory generation
694+
- **containerTag**: Required. The container tag/identifier for memory search (e.g., user ID, project ID)
695+
- **customId**: Required. Custom ID to group messages into a single document for contextual memory generation
663696
- **verbose**: Enable detailed logging of memory search and injection process (default: false)
664697
- **mode**: Memory search mode - "profile" (default), "query", or "full"
665-
- **addMemory**: Automatic memory storage mode - "always" or "never" (default: "never")
698+
- **searchMode**: Search mode - "memories" (default), "hybrid" (memories + chunks), or "documents" (chunks only)
699+
- **searchLimit**: Maximum number of search results when using hybrid/documents mode (default: 10)
700+
- **addMemory**: Automatic memory storage mode - "always" (default) or "never"
666701

667702
## Available Tools
668703

packages/tools/src/shared/cache.ts

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,12 @@ import type { MemoryMode } from "./types"
55
* Generic memory cache for storing per-turn memories to avoid redundant API calls.
66
* Used to cache memory retrieval results during tool-call loops within the same turn.
77
*/
8-
export class MemoryCache<T = string> {
9-
private cache: LRUCache<string, T> = new LRUCache({ max: 100 })
8+
export class MemoryCache<T extends {} = string> {
9+
private cache: LRUCache<string, T>
10+
11+
constructor() {
12+
this.cache = new LRUCache<string, T>({ max: 100 })
13+
}
1014

1115
/**
1216
* Generates a cache key for the current turn based on context parameters.

packages/tools/src/shared/index.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ export type {
33
MemoryPromptData,
44
PromptTemplate,
55
MemoryMode,
6+
SearchMode,
67
AddMemoryMode,
78
Logger,
89
ProfileStructure,

packages/tools/src/shared/memory-client.ts

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ import { deduplicateMemories } from "../tools-shared"
22
import type {
33
Logger,
44
MemoryMode,
5+
SearchMode,
56
MemoryPromptData,
67
ProfileStructure,
78
PromptTemplate,
@@ -72,6 +73,15 @@ export interface BuildMemoriesTextOptions {
7273
apiKey: string
7374
logger: Logger
7475
promptTemplate?: PromptTemplate
76+
/**
77+
* Search mode for memory retrieval:
78+
* - "memories": Search only memory entries (default)
79+
* - "hybrid": Search both memories AND document chunks
80+
* - "documents": Search only document chunks
81+
*/
82+
searchMode?: SearchMode
83+
/** Maximum number of search results to return (default: 10) */
84+
searchLimit?: number
7585
}
7686

7787
/**

packages/tools/src/shared/types.ts

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,14 @@ export type PromptTemplate = (data: MemoryPromptData) => string
4747
*/
4848
export type MemoryMode = "profile" | "query" | "full"
4949

50+
/**
51+
* Search mode for memory retrieval:
52+
* - "memories": Search only memory entries (default)
53+
* - "hybrid": Search both memories AND document chunks (recommended for RAG)
54+
* - "documents": Search only document chunks
55+
*/
56+
export type SearchMode = "memories" | "hybrid" | "documents"
57+
5058
/**
5159
* Memory persistence mode:
5260
* - "always": Automatically save conversations as memories

0 commit comments

Comments
 (0)