diff --git a/apps/docs/ai-sdk/overview.mdx b/apps/docs/ai-sdk/overview.mdx
index 0c9a48f49..535f0cef2 100644
--- a/apps/docs/ai-sdk/overview.mdx
+++ b/apps/docs/ai-sdk/overview.mdx
@@ -40,6 +40,7 @@ const result = await generateText({
```typescript
const modelWithMemory = withSupermemory(openai("gpt-5"), "user-123", {
+ conversationId: "conv-1",
addMemory: "always"
})
```
diff --git a/apps/docs/ai-sdk/user-profiles.mdx b/apps/docs/ai-sdk/user-profiles.mdx
index df8c5430f..09977272d 100644
--- a/apps/docs/ai-sdk/user-profiles.mdx
+++ b/apps/docs/ai-sdk/user-profiles.mdx
@@ -50,6 +50,7 @@ All of this happens transparently - you write code as if using a normal model, b
```typescript
const model = withSupermemory(openai("gpt-5"), "user-123", {
+ conversationId: "conv-1",
addMemory: "always"
})
```
@@ -117,6 +118,32 @@ const result = await generateText({
// Uses both profile (user's expertise) AND search (previous debugging sessions)
```
+### Hybrid Search Mode
+
+Use `searchMode: "hybrid"` to search both memories AND document chunks.
+
+```typescript
+const model = withSupermemory(openai("gpt-4"), "user-123", {
+ mode: "full",
+ searchMode: "hybrid", // Search memories + document chunks
+ searchLimit: 15 // Max results (default: 10)
+})
+
+const result = await generateText({
+ model,
+ messages: [{
+ role: "user",
+ content: "What's in my documents about quarterly goals?"
+ }]
+})
+// Searches both extracted memories AND raw document content
+```
+
+**Search Mode Options:**
+- `"memories"` (default) - Search only memory entries
+- `"hybrid"` - Search memories + document chunks
+- `"documents"` - Search only document chunks
+
## Custom Prompt Templates
Customize how memories are formatted and injected into the system prompt using the `promptTemplate` option. This is useful for:
diff --git a/apps/docs/integrations/ai-sdk.mdx b/apps/docs/integrations/ai-sdk.mdx
index a9e2d6f8a..a4561bf12 100644
--- a/apps/docs/integrations/ai-sdk.mdx
+++ b/apps/docs/integrations/ai-sdk.mdx
@@ -32,10 +32,14 @@ Automatically inject user profiles into every LLM call for instant personalizati
```typescript
import { generateText } from "ai"
-import { withSupermemory } from "@supermemory/tools/ai-sdk"
+import { withSupermemory } from "@supermemory/tools/vercel"
import { openai } from "@ai-sdk/openai"
-const modelWithMemory = withSupermemory(openai("gpt-5"), "user-123")
+const modelWithMemory = withSupermemory({
+ model: openai("gpt-4"),
+ containerTag: "user-123",
+ customId: "conv-456"
+})
const result = await generateText({
model: modelWithMemory,
@@ -44,11 +48,14 @@ const result = await generateText({
```
- **Memory saving is disabled by default.** The middleware only retrieves existing memories. To automatically save new memories:
+ **Memory saving is enabled by default.** The middleware automatically saves conversations to memory. To disable memory saving:
```typescript
- const modelWithMemory = withSupermemory(openai("gpt-5"), "user-123", {
- addMemory: "always"
+ const modelWithMemory = withSupermemory({
+ model: openai("gpt-4"),
+ containerTag: "user-123",
+ customId: "conv-456",
+ addMemory: "never"
})
```
@@ -58,19 +65,34 @@ const result = await generateText({
**Profile Mode (Default)** - Retrieves the user's complete profile:
```typescript
-const model = withSupermemory(openai("gpt-4"), "user-123", { mode: "profile" })
+const model = withSupermemory({
+ model: openai("gpt-4"),
+ containerTag: "user-123",
+ customId: "conv-456",
+ mode: "profile"
+})
```
**Query Mode** - Searches memories based on the user's message:
```typescript
-const model = withSupermemory(openai("gpt-4"), "user-123", { mode: "query" })
+const model = withSupermemory({
+ model: openai("gpt-4"),
+ containerTag: "user-123",
+ customId: "conv-456",
+ mode: "query"
+})
```
**Full Mode** - Combines profile AND query-based search:
```typescript
-const model = withSupermemory(openai("gpt-4"), "user-123", { mode: "full" })
+const model = withSupermemory({
+ model: openai("gpt-4"),
+ containerTag: "user-123",
+ customId: "conv-456",
+ mode: "full"
+})
```
### Custom Prompt Templates
@@ -78,7 +100,7 @@ const model = withSupermemory(openai("gpt-4"), "user-123", { mode: "full" })
Customize how memories are formatted. The template receives `userMemories`, `generalSearchMemories`, and `searchResults` (raw array for filtering by metadata):
```typescript
-import { withSupermemory, type MemoryPromptData } from "@supermemory/tools/ai-sdk"
+import { withSupermemory, type MemoryPromptData } from "@supermemory/tools/vercel"
const claudePrompt = (data: MemoryPromptData) => `
@@ -91,7 +113,10 @@ const claudePrompt = (data: MemoryPromptData) => `
`.trim()
-const model = withSupermemory(anthropic("claude-3-sonnet"), "user-123", {
+const model = withSupermemory({
+ model: anthropic("claude-3-sonnet"),
+ containerTag: "user-123",
+ customId: "conv-456",
mode: "full",
promptTemplate: claudePrompt
})
@@ -100,7 +125,10 @@ const model = withSupermemory(anthropic("claude-3-sonnet"), "user-123", {
### Verbose Logging
```typescript
-const model = withSupermemory(openai("gpt-4"), "user-123", {
+const model = withSupermemory({
+ model: openai("gpt-4"),
+ containerTag: "user-123",
+ customId: "conv-456",
verbose: true
})
// Console output shows memory retrieval details
diff --git a/apps/web/components/memory-graph/memory-graph-wrapper.tsx b/apps/web/components/memory-graph/memory-graph-wrapper.tsx
index 655f2482e..6b28f6a18 100644
--- a/apps/web/components/memory-graph/memory-graph-wrapper.tsx
+++ b/apps/web/components/memory-graph/memory-graph-wrapper.tsx
@@ -71,10 +71,12 @@ export function MemoryGraph({
maxNodes={maxNodes}
canvasRef={canvasRef}
totalCount={totalCount}
- colors={{
- bg: "transparent",
- edgeDerives: "#9ca3af",
- } as any}
+ colors={
+ {
+ bg: "transparent",
+ edgeDerives: "#9ca3af",
+ } as any
+ }
{...rest}
>
{children}
diff --git a/bun.lock b/bun.lock
index 879532b4a..b5dd4edf5 100644
--- a/bun.lock
+++ b/bun.lock
@@ -314,7 +314,7 @@
},
"packages/tools": {
"name": "@supermemory/tools",
- "version": "1.4.1",
+ "version": "1.5.0",
"dependencies": {
"@ai-sdk/anthropic": "^2.0.25",
"@ai-sdk/openai": "^2.0.23",
diff --git a/packages/docs-test/tests/integrations/ai-sdk.ts b/packages/docs-test/tests/integrations/ai-sdk.ts
index f6d4f1d2b..596f36121 100644
--- a/packages/docs-test/tests/integrations/ai-sdk.ts
+++ b/packages/docs-test/tests/integrations/ai-sdk.ts
@@ -13,17 +13,27 @@ async function testMiddleware() {
console.log("=== Middleware ===")
// Basic wrapper
- const model = withSupermemory(openai("gpt-4"), "user-123")
+ const model = withSupermemory({
+ model: openai("gpt-4"),
+ containerTag: "user-123",
+ customId: "conv-001",
+ })
console.log("✓ withSupermemory basic")
// With addMemory option
- const modelWithAdd = withSupermemory(openai("gpt-4"), "user-123", {
+ const modelWithAdd = withSupermemory({
+ model: openai("gpt-4"),
+ containerTag: "user-123",
+ customId: "conv-002",
addMemory: "always",
})
console.log("✓ withSupermemory with addMemory")
// With verbose logging
- const modelVerbose = withSupermemory(openai("gpt-4"), "user-123", {
+ const modelVerbose = withSupermemory({
+ model: openai("gpt-4"),
+ containerTag: "user-123",
+ customId: "conv-003",
verbose: true,
})
console.log("✓ withSupermemory with verbose")
@@ -32,17 +42,26 @@ async function testMiddleware() {
async function testSearchModes() {
console.log("\n=== Search Modes ===")
- const profileModel = withSupermemory(openai("gpt-4"), "user-123", {
+ const profileModel = withSupermemory({
+ model: openai("gpt-4"),
+ containerTag: "user-123",
+ customId: "conv-004",
mode: "profile",
})
console.log("✓ mode: profile")
- const queryModel = withSupermemory(openai("gpt-4"), "user-123", {
+ const queryModel = withSupermemory({
+ model: openai("gpt-4"),
+ containerTag: "user-123",
+ customId: "conv-005",
mode: "query",
})
console.log("✓ mode: query")
- const fullModel = withSupermemory(openai("gpt-4"), "user-123", {
+ const fullModel = withSupermemory({
+ model: openai("gpt-4"),
+ containerTag: "user-123",
+ customId: "conv-006",
mode: "full",
})
console.log("✓ mode: full")
@@ -61,14 +80,13 @@ async function testCustomPrompt() {
`.trim()
- const model = withSupermemory(
- anthropic("claude-3-sonnet-20240229"),
- "user-123",
- {
- mode: "full",
- promptTemplate: claudePrompt,
- },
- )
+ const model = withSupermemory({
+ model: anthropic("claude-3-sonnet-20240229"),
+ containerTag: "user-123",
+ customId: "conv-007",
+ mode: "full",
+ promptTemplate: claudePrompt,
+ })
console.log("✓ Custom prompt template")
}
@@ -76,14 +94,21 @@ async function testTools() {
console.log("\n=== Memory Tools ===")
// All tools
- const tools = supermemoryTools("YOUR_API_KEY")
+ const tools = supermemoryTools({
+ apiKey: "YOUR_API_KEY",
+ })
console.log("✓ supermemoryTools")
// Individual tools
- const searchTool = searchMemoriesTool("API_KEY", { projectId: "personal" })
+ const searchTool = searchMemoriesTool({
+ apiKey: "API_KEY",
+ projectId: "personal",
+ })
console.log("✓ searchMemoriesTool")
- const addTool = addMemoryTool("API_KEY")
+ const addTool = addMemoryTool({
+ apiKey: "API_KEY",
+ })
console.log("✓ addMemoryTool")
// Combined
@@ -91,7 +116,7 @@ async function testTools() {
searchMemories: searchTool,
addMemory: addTool,
}
- console.log("✓ Combined tools object")
+ console.log("✓ Combined tools object", Object.keys(toolsObj))
}
async function main() {
diff --git a/packages/memory-graph/src/components/memory-graph.tsx b/packages/memory-graph/src/components/memory-graph.tsx
index 66631fd85..49e08c502 100644
--- a/packages/memory-graph/src/components/memory-graph.tsx
+++ b/packages/memory-graph/src/components/memory-graph.tsx
@@ -197,7 +197,11 @@ export function MemoryGraph({
setViewportVersion((v) => v + 1)
}
- const { hasMore: more, isLoadingMore: loading, onLoadMore: load } = loadMoreRef.current
+ const {
+ hasMore: more,
+ isLoadingMore: loading,
+ onLoadMore: load,
+ } = loadMoreRef.current
if (!more || loading || !load || !viewportRef.current) return
const vp = viewportRef.current
@@ -205,14 +209,17 @@ export function MemoryGraph({
if (currentNodes.length === 0) return
const topLeft = vp.screenToWorld(0, 0)
- const bottomRight = vp.screenToWorld(containerSize.width, containerSize.height)
+ const bottomRight = vp.screenToWorld(
+ containerSize.width,
+ containerSize.height,
+ )
const viewW = bottomRight.x - topLeft.x
const viewH = bottomRight.y - topLeft.y
- let minX = Infinity
- let minY = Infinity
- let maxX = -Infinity
- let maxY = -Infinity
+ let minX = Number.POSITIVE_INFINITY
+ let minY = Number.POSITIVE_INFINITY
+ let maxX = Number.NEGATIVE_INFINITY
+ let maxY = Number.NEGATIVE_INFINITY
for (const n of currentNodes) {
if (n.x < minX) minX = n.x
if (n.y < minY) minY = n.y
@@ -613,7 +620,6 @@ export function MemoryGraph({
colors={colors}
/>
-
{!isLoading && !nodes.some((n) => n.type === "document") && children && (
{children}
)}
diff --git a/packages/tools/README.md b/packages/tools/README.md
index 2d03411d3..60ab814a8 100644
--- a/packages/tools/README.md
+++ b/packages/tools/README.md
@@ -66,7 +66,11 @@ import { generateText } from "ai"
import { withSupermemory } from "@supermemory/tools/ai-sdk"
import { openai } from "@ai-sdk/openai"
-const modelWithMemory = withSupermemory(openai("gpt-5"), "user_id_life")
+const modelWithMemory = withSupermemory({
+ model: openai("gpt-4o"),
+ containerTag: "user_id_life",
+ customId: "conversation-123",
+})
const result = await generateText({
model: modelWithMemory,
@@ -78,15 +82,17 @@ console.log(result.text)
#### Conversation Grouping
-Use the `conversationId` option to group messages into a single document for contextual memory generation:
+Use the `customId` parameter to group messages into a single document for contextual memory generation:
```typescript
import { generateText } from "ai"
import { withSupermemory } from "@supermemory/tools/ai-sdk"
import { openai } from "@ai-sdk/openai"
-const modelWithMemory = withSupermemory(openai("gpt-5"), "user_id_life", {
- conversationId: "conversation-456"
+const modelWithMemory = withSupermemory({
+ model: openai("gpt-4o"),
+ containerTag: "user_id_life",
+ customId: "conversation-456", // Groups all messages in this conversation
})
const result = await generateText({
@@ -106,8 +112,11 @@ import { generateText } from "ai"
import { withSupermemory } from "@supermemory/tools/ai-sdk"
import { openai } from "@ai-sdk/openai"
-const modelWithMemory = withSupermemory(openai("gpt-5"), "user_id_life", {
- verbose: true
+const modelWithMemory = withSupermemory({
+ model: openai("gpt-4o"),
+ containerTag: "user_id_life",
+ customId: "conversation-123",
+ verbose: true,
})
const result = await generateText({
@@ -139,11 +148,18 @@ import { withSupermemory } from "@supermemory/tools/ai-sdk"
import { openai } from "@ai-sdk/openai"
// Uses profile mode by default - gets all user profile memories
-const modelWithMemory = withSupermemory(openai("gpt-4"), "user-123")
+const modelWithMemory = withSupermemory({
+ model: openai("gpt-4"),
+ containerTag: "user-123",
+ customId: "conv-001",
+})
// Explicitly specify profile mode
-const modelWithProfile = withSupermemory(openai("gpt-4"), "user-123", {
- mode: "profile"
+const modelWithProfile = withSupermemory({
+ model: openai("gpt-4"),
+ containerTag: "user-123",
+ customId: "conv-001",
+ mode: "profile",
})
const result = await generateText({
@@ -158,8 +174,11 @@ import { generateText } from "ai"
import { withSupermemory } from "@supermemory/tools/ai-sdk"
import { openai } from "@ai-sdk/openai"
-const modelWithQuery = withSupermemory(openai("gpt-4"), "user-123", {
- mode: "query"
+const modelWithQuery = withSupermemory({
+ model: openai("gpt-4"),
+ containerTag: "user-123",
+ customId: "conv-002",
+ mode: "query",
})
const result = await generateText({
@@ -174,8 +193,11 @@ import { generateText } from "ai"
import { withSupermemory } from "@supermemory/tools/ai-sdk"
import { openai } from "@ai-sdk/openai"
-const modelWithFull = withSupermemory(openai("gpt-4"), "user-123", {
- mode: "full"
+const modelWithFull = withSupermemory({
+ model: openai("gpt-4"),
+ containerTag: "user-123",
+ customId: "conv-003",
+ mode: "full",
})
const result = await generateText({
@@ -184,18 +206,47 @@ const result = await generateText({
})
```
+**Hybrid Search Mode (RAG)** - Search both memories AND document chunks:
+```typescript
+import { generateText } from "ai"
+import { withSupermemory } from "@supermemory/tools/ai-sdk"
+import { openai } from "@ai-sdk/openai"
+
+const modelWithHybrid = withSupermemory({
+ model: openai("gpt-4"),
+ containerTag: "user-123",
+ customId: "conv-004",
+ mode: "full",
+ searchMode: "hybrid", // Search memories + document chunks
+ searchLimit: 15, // Max results (default: 10)
+})
+
+const result = await generateText({
+ model: modelWithHybrid,
+ messages: [{ role: "user", content: "What's in my documents about quarterly goals?" }],
+})
+```
+
+Search mode options:
+- `"memories"` (default) - Search only memory entries
+- `"hybrid"` - Search memories + document chunks (recommended for RAG)
+- `"documents"` - Search only document chunks
+
#### Automatic Memory Capture
The middleware can automatically save user messages as memories:
-**Always Save Memories** - Automatically stores every user message as a memory:
+**Always Save Memories (Default)** - Automatically stores every user message as a memory:
```typescript
import { generateText } from "ai"
import { withSupermemory } from "@supermemory/tools/ai-sdk"
import { openai } from "@ai-sdk/openai"
-const modelWithAutoSave = withSupermemory(openai("gpt-4"), "user-123", {
- addMemory: "always"
+const modelWithAutoSave = withSupermemory({
+ model: openai("gpt-4"),
+ containerTag: "user-123",
+ customId: "conv-005",
+ addMemory: "always", // Default behavior - saves conversations
})
const result = await generateText({
@@ -205,17 +256,25 @@ const result = await generateText({
// This message will be automatically saved as a memory
```
-**Never Save Memories (Default)** - Only retrieves memories without storing new ones:
+**Never Save Memories** - Only retrieves memories without storing new ones:
```typescript
-const modelWithNoSave = withSupermemory(openai("gpt-4"), "user-123")
+const modelWithNoSave = withSupermemory({
+ model: openai("gpt-4"),
+ containerTag: "user-123",
+ customId: "conv-006",
+ addMemory: "never", // Read-only mode
+})
```
**Combined Options** - Use verbose logging with specific modes and memory storage:
```typescript
-const modelWithOptions = withSupermemory(openai("gpt-4"), "user-123", {
+const modelWithOptions = withSupermemory({
+ model: openai("gpt-4"),
+ containerTag: "user-123",
+ customId: "conv-007",
mode: "profile",
addMemory: "always",
- verbose: true
+ verbose: true,
})
```
@@ -239,7 +298,10 @@ ${data.generalSearchMemories}
`.trim()
-const modelWithCustomPrompt = withSupermemory(openai("gpt-4"), "user-123", {
+const modelWithCustomPrompt = withSupermemory({
+ model: openai("gpt-4"),
+ containerTag: "user-123",
+ customId: "conv-008",
mode: "full",
promptTemplate: customPrompt,
})
@@ -646,23 +708,35 @@ Without `strict: true`, optional fields like `includeFullDocs` and `limit` won't
### withSupermemory Middleware Options
-The `withSupermemory` middleware accepts additional configuration options:
+The `withSupermemory` middleware accepts configuration via an options object:
```typescript
interface WithSupermemoryOptions {
- conversationId?: string
+ model: LanguageModel // Required: The language model to wrap
+ containerTag: string // Required: User/container ID for scoping memories
+ customId: string // Required: Conversation ID to group messages
verbose?: boolean
mode?: "profile" | "query" | "full"
+ searchMode?: "memories" | "hybrid" | "documents"
+ searchLimit?: number
addMemory?: "always" | "never"
- /** Optional Supermemory API key. Use this in browser environments. */
- apiKey?: string
+ apiKey?: string // Optional: Supermemory API key (falls back to env var)
+ baseUrl?: string // Optional: Custom API endpoint
+ promptTemplate?: (data: MemoryPromptData) => string
}
```
-- **conversationId**: Optional conversation ID to group messages into a single document for contextual memory generation
+- **model**: Required. The language model to wrap with supermemory capabilities
+- **containerTag**: Required. User/container ID for scoping memories (e.g., user ID, project ID)
+- **customId**: Required. Conversation ID to group messages into a single document for contextual memory generation
- **verbose**: Enable detailed logging of memory search and injection process (default: false)
- **mode**: Memory search mode - "profile" (default), "query", or "full"
-- **addMemory**: Automatic memory storage mode - "always" or "never" (default: "never")
+- **searchMode**: Search mode - "memories" (default), "hybrid", or "documents". Use "hybrid" for RAG applications
+- **searchLimit**: Maximum number of search results when using hybrid/documents mode (default: 10)
+- **addMemory**: Automatic memory storage mode - "always" (default) or "never"
+- **apiKey**: Optional Supermemory API key. Use this in browser environments. Falls back to SUPERMEMORY_API_KEY env var
+- **baseUrl**: Optional custom base URL for the Supermemory API
+- **promptTemplate**: Optional custom function to format memory data into the system prompt
## Available Tools
diff --git a/packages/tools/package.json b/packages/tools/package.json
index 8d192aba6..1575c7f4b 100644
--- a/packages/tools/package.json
+++ b/packages/tools/package.json
@@ -1,7 +1,7 @@
{
"name": "@supermemory/tools",
"type": "module",
- "version": "1.4.01",
+ "version": "1.5.0",
"description": "Memory tools for AI SDK and OpenAI function calling with supermemory",
"scripts": {
"build": "tsdown",
diff --git a/packages/tools/src/ai-sdk.ts b/packages/tools/src/ai-sdk.ts
index 77f3ad03f..a4e9f3aa9 100644
--- a/packages/tools/src/ai-sdk.ts
+++ b/packages/tools/src/ai-sdk.ts
@@ -220,38 +220,6 @@ export const documentListTool = (
})
}
-export const documentDeleteTool = (
- apiKey: string,
- config?: SupermemoryToolsConfig,
-) => {
- const client = new Supermemory({
- apiKey,
- ...(config?.baseUrl ? { baseURL: config.baseUrl } : {}),
- })
-
- return tool({
- description: TOOL_DESCRIPTIONS.documentDelete,
- inputSchema: z.object({
- documentId: z.string().describe(PARAMETER_DESCRIPTIONS.documentId),
- }),
- execute: async ({ documentId }) => {
- try {
- await client.documents.delete({ docId: documentId })
-
- return {
- success: true,
- message: `Document ${documentId} deleted successfully`,
- }
- } catch (error) {
- return {
- success: false,
- error: error instanceof Error ? error.message : "Unknown error",
- }
- }
- },
- })
-}
-
export const documentAddTool = (
apiKey: string,
config?: SupermemoryToolsConfig,
@@ -368,7 +336,6 @@ export function supermemoryTools(
addMemory: addMemoryTool(apiKey, config),
getProfile: getProfileTool(apiKey, config),
documentList: documentListTool(apiKey, config),
- documentDelete: documentDeleteTool(apiKey, config),
documentAdd: documentAddTool(apiKey, config),
memoryForget: memoryForgetTool(apiKey, config),
}
diff --git a/packages/tools/src/shared/index.ts b/packages/tools/src/shared/index.ts
index 5a6e0f7ba..f866f3cdd 100644
--- a/packages/tools/src/shared/index.ts
+++ b/packages/tools/src/shared/index.ts
@@ -3,6 +3,7 @@ export type {
MemoryPromptData,
PromptTemplate,
MemoryMode,
+ SearchMode,
AddMemoryMode,
Logger,
ProfileStructure,
@@ -34,9 +35,12 @@ export {
// Memory client
export {
supermemoryProfileSearch,
+ supermemoryHybridSearch,
buildMemoriesText,
extractQueryText,
getLastUserMessageText,
type BuildMemoriesTextOptions,
type GenericMessage,
+ type SearchResultItem,
+ type SearchResponse,
} from "./memory-client"
diff --git a/packages/tools/src/shared/memory-client.ts b/packages/tools/src/shared/memory-client.ts
index 58754c895..b1fdf3016 100644
--- a/packages/tools/src/shared/memory-client.ts
+++ b/packages/tools/src/shared/memory-client.ts
@@ -5,12 +5,36 @@ import type {
MemoryPromptData,
ProfileStructure,
PromptTemplate,
+ SearchMode,
} from "./types"
import {
convertProfileToMarkdown,
defaultPromptTemplate,
} from "./prompt-builder"
+/**
+ * Search result item from the Supermemory search API.
+ * Contains either a memory field (for memory results) or a chunk field (for document chunks).
+ */
+export interface SearchResultItem {
+ id: string
+ similarity: number
+ memory?: string
+ chunk?: string
+ title?: string
+ content?: string
+ metadata?: Record
+}
+
+/**
+ * Response structure from the Supermemory search API.
+ */
+export interface SearchResponse {
+ results: SearchResultItem[]
+ total: number
+ timing: number
+}
+
/**
* Fetches profile and search results from the Supermemory API.
*
@@ -61,6 +85,59 @@ export const supermemoryProfileSearch = async (
}
}
+/**
+ * Performs a hybrid search using the Supermemory search API.
+ * Hybrid search returns both memories AND document chunks.
+ *
+ * @param containerTag - The container tag/user ID for scoping memories
+ * @param queryText - The search query text
+ * @param searchMode - The search mode: "memories", "hybrid", or "documents"
+ * @param baseUrl - The API base URL
+ * @param apiKey - The API key for authentication
+ * @param limit - Maximum number of results to return (default: 10)
+ * @returns The search response with results containing either memory or chunk fields
+ */
+export const supermemoryHybridSearch = async (
+ containerTag: string,
+ queryText: string,
+ searchMode: SearchMode,
+ baseUrl: string,
+ apiKey: string,
+ limit = 10,
+): Promise => {
+ const payload = JSON.stringify({
+ q: queryText,
+ containerTag: containerTag,
+ searchMode: searchMode,
+ limit: limit,
+ })
+
+ try {
+ const response = await fetch(`${baseUrl}/v4/search`, {
+ method: "POST",
+ headers: {
+ "Content-Type": "application/json",
+ Authorization: `Bearer ${apiKey}`,
+ },
+ body: payload,
+ })
+
+ if (!response.ok) {
+ const errorText = await response.text().catch(() => "Unknown error")
+ throw new Error(
+ `Supermemory search failed: ${response.status} ${response.statusText}. ${errorText}`,
+ )
+ }
+
+ return await response.json()
+ } catch (error) {
+ if (error instanceof Error) {
+ throw error
+ }
+ throw new Error(`Supermemory API request failed: ${error}`)
+ }
+}
+
/**
* Options for building memories text.
*/
@@ -72,12 +149,48 @@ export interface BuildMemoriesTextOptions {
apiKey: string
logger: Logger
promptTemplate?: PromptTemplate
+ /**
+ * Search mode for memory retrieval:
+ * - "memories": Search only memory entries (default)
+ * - "hybrid": Search both memories AND document chunks (recommended for RAG)
+ * - "documents": Search only document chunks
+ */
+ searchMode?: SearchMode
+ /** Maximum number of search results to return (default: 10) */
+ searchLimit?: number
+}
+
+/**
+ * Formats search results (memories and/or chunks) into a readable string.
+ */
+const formatSearchResults = (
+ results: SearchResultItem[],
+ includeChunks: boolean,
+): string => {
+ if (results.length === 0) return ""
+
+ const formattedResults = results
+ .map((result) => {
+ if (result.memory) {
+ return `- ${result.memory}`
+ }
+ if (result.chunk && includeChunks) {
+ return `- [Document] ${result.chunk}`
+ }
+ return null
+ })
+ .filter(Boolean)
+
+ return formattedResults.join("\n")
}
/**
* Fetches memories from the API, deduplicates them, and formats them into
* the final string to be injected into the system prompt.
*
+ * When searchMode is "hybrid" or "documents", uses the search API to retrieve
+ * both memories and document chunks. Otherwise, uses the profile API.
+ *
* @param options - Configuration for building memories text
* @returns The final formatted memories string ready for injection
*/
@@ -92,69 +205,144 @@ export const buildMemoriesText = async (
apiKey,
logger,
promptTemplate = defaultPromptTemplate,
+ searchMode = "memories",
+ searchLimit = 10,
} = options
- const memoriesResponse = await supermemoryProfileSearch(
- containerTag,
- queryText,
- baseUrl,
- apiKey,
- )
+ const useHybridSearch = searchMode === "hybrid" || searchMode === "documents"
- const memoryCountStatic = memoriesResponse.profile.static?.length || 0
- const memoryCountDynamic = memoriesResponse.profile.dynamic?.length || 0
+ let userMemories = ""
+ let generalSearchMemories = ""
+ let rawSearchResults: Array<{
+ memory: string
+ metadata?: Record
+ }> = []
- logger.info("Memory search completed", {
- containerTag,
- memoryCountStatic,
- memoryCountDynamic,
- queryText:
- queryText.substring(0, 100) + (queryText.length > 100 ? "..." : ""),
- mode,
- })
+ if (useHybridSearch && queryText) {
+ logger.info("Using hybrid search mode", {
+ containerTag,
+ searchMode,
+ queryText:
+ queryText.substring(0, 100) + (queryText.length > 100 ? "..." : ""),
+ })
- const deduplicated = deduplicateMemories({
- static: memoriesResponse.profile.static,
- dynamic: memoriesResponse.profile.dynamic,
- searchResults: memoriesResponse.searchResults?.results,
- })
+ const searchResponse = await supermemoryHybridSearch(
+ containerTag,
+ queryText,
+ searchMode,
+ baseUrl,
+ apiKey,
+ searchLimit,
+ )
- logger.debug("Memory deduplication completed", {
- static: {
- original: memoryCountStatic,
- deduplicated: deduplicated.static.length,
- },
- dynamic: {
- original: memoryCountDynamic,
- deduplicated: deduplicated.dynamic.length,
- },
- searchResults: {
- original: memoriesResponse.searchResults?.results?.length,
- deduplicated: deduplicated.searchResults?.length,
- },
- })
+ logger.info("Hybrid search completed", {
+ containerTag,
+ resultCount: searchResponse.results.length,
+ timing: searchResponse.timing,
+ searchMode,
+ })
+
+ const includeChunks = searchMode === "hybrid" || searchMode === "documents"
+ generalSearchMemories = formatSearchResults(
+ searchResponse.results,
+ includeChunks,
+ )
- const userMemories =
- mode !== "query"
- ? convertProfileToMarkdown({
- profile: {
- static: deduplicated.static,
- dynamic: deduplicated.dynamic,
- },
- searchResults: { results: [] },
- })
- : ""
- const generalSearchMemories =
- mode !== "profile"
- ? `Search results for user's recent message: \n${deduplicated.searchResults
- .map((memory) => `- ${memory}`)
- .join("\n")}`
- : ""
+ if (generalSearchMemories) {
+ generalSearchMemories = `Search results for user's recent message:\n${generalSearchMemories}`
+ }
+
+ rawSearchResults = searchResponse.results.map((r) => ({
+ memory: r.memory || r.chunk || "",
+ metadata: r.metadata,
+ }))
+
+ if (mode !== "query") {
+ const profileResponse = await supermemoryProfileSearch(
+ containerTag,
+ "",
+ baseUrl,
+ apiKey,
+ )
+
+ const deduplicated = deduplicateMemories({
+ static: profileResponse.profile.static,
+ dynamic: profileResponse.profile.dynamic,
+ searchResults: [],
+ })
+
+ userMemories = convertProfileToMarkdown({
+ profile: {
+ static: deduplicated.static,
+ dynamic: deduplicated.dynamic,
+ },
+ searchResults: { results: [] },
+ })
+ }
+ } else {
+ const memoriesResponse = await supermemoryProfileSearch(
+ containerTag,
+ queryText,
+ baseUrl,
+ apiKey,
+ )
+
+ const memoryCountStatic = memoriesResponse.profile.static?.length || 0
+ const memoryCountDynamic = memoriesResponse.profile.dynamic?.length || 0
+
+ logger.info("Memory search completed", {
+ containerTag,
+ memoryCountStatic,
+ memoryCountDynamic,
+ queryText:
+ queryText.substring(0, 100) + (queryText.length > 100 ? "..." : ""),
+ mode,
+ })
+
+ const deduplicated = deduplicateMemories({
+ static: memoriesResponse.profile.static,
+ dynamic: memoriesResponse.profile.dynamic,
+ searchResults: memoriesResponse.searchResults?.results,
+ })
+
+ logger.debug("Memory deduplication completed", {
+ static: {
+ original: memoryCountStatic,
+ deduplicated: deduplicated.static.length,
+ },
+ dynamic: {
+ original: memoryCountDynamic,
+ deduplicated: deduplicated.dynamic.length,
+ },
+ searchResults: {
+ original: memoriesResponse.searchResults?.results?.length,
+ deduplicated: deduplicated.searchResults?.length,
+ },
+ })
+
+ userMemories =
+ mode !== "query"
+ ? convertProfileToMarkdown({
+ profile: {
+ static: deduplicated.static,
+ dynamic: deduplicated.dynamic,
+ },
+ searchResults: { results: [] },
+ })
+ : ""
+ generalSearchMemories =
+ mode !== "profile"
+ ? `Search results for user's recent message: \n${deduplicated.searchResults
+ .map((memory) => `- ${memory}`)
+ .join("\n")}`
+ : ""
+ rawSearchResults = memoriesResponse.searchResults?.results ?? []
+ }
const promptData: MemoryPromptData = {
userMemories,
generalSearchMemories,
- searchResults: memoriesResponse.searchResults?.results ?? [],
+ searchResults: rawSearchResults,
}
const memories = promptTemplate(promptData)
diff --git a/packages/tools/src/shared/types.ts b/packages/tools/src/shared/types.ts
index 421785f52..70427eae6 100644
--- a/packages/tools/src/shared/types.ts
+++ b/packages/tools/src/shared/types.ts
@@ -47,6 +47,14 @@ export type PromptTemplate = (data: MemoryPromptData) => string
*/
export type MemoryMode = "profile" | "query" | "full"
+/**
+ * Search mode for memory retrieval:
+ * - "memories": Search only memory entries (default)
+ * - "hybrid": Search both memories AND document chunks (recommended for RAG)
+ * - "documents": Search only document chunks
+ */
+export type SearchMode = "memories" | "hybrid" | "documents"
+
/**
* Memory persistence mode:
* - "always": Automatically save conversations as memories
@@ -117,6 +125,15 @@ export interface SupermemoryBaseOptions {
threadId?: string
/** Memory retrieval mode */
mode?: MemoryMode
+ /**
+ * Search mode for memory retrieval:
+ * - "memories": Search only memory entries (default)
+ * - "hybrid": Search both memories AND document chunks (recommended for RAG)
+ * - "documents": Search only document chunks
+ */
+ searchMode?: SearchMode
+ /** Maximum number of search results to return when using hybrid/documents mode (default: 10) */
+ searchLimit?: number
/** Memory persistence mode */
addMemory?: AddMemoryMode
/** Enable detailed logging of memory search and injection */
diff --git a/packages/tools/src/tools-shared.ts b/packages/tools/src/tools-shared.ts
index ac5dfcd39..551f2604a 100644
--- a/packages/tools/src/tools-shared.ts
+++ b/packages/tools/src/tools-shared.ts
@@ -12,8 +12,6 @@ export const TOOL_DESCRIPTIONS = {
"Get user profile containing static memories (permanent facts) and dynamic memories (recent context). Optionally include search results by providing a query.",
documentList:
"List stored documents with optional filtering by container tag, status, and pagination. Useful for browsing or managing saved content.",
- documentDelete:
- "Delete a document and its associated memories by document ID or customId. Deletes are permanent. Use when user wants to remove saved content.",
documentAdd:
"Add a new document (URL, text, or content) to memory. The content is queued for processing, and memories will be extracted automatically.",
memoryForget:
@@ -33,7 +31,6 @@ export const PARAMETER_DESCRIPTIONS = {
offset: "Number of items to skip for pagination (default: 0)",
status:
"Filter documents by processing status (e.g., 'completed', 'processing', 'failed')",
- documentId: "The unique identifier of the document to operate on",
content: "The content to add - can be text, URL, or other supported formats",
title: "Optional title for the document",
description: "Optional description for the document",
diff --git a/packages/tools/src/vercel/index.ts b/packages/tools/src/vercel/index.ts
index beeef093b..28a2f482e 100644
--- a/packages/tools/src/vercel/index.ts
+++ b/packages/tools/src/vercel/index.ts
@@ -12,9 +12,13 @@ import {
} from "./middleware"
import type { PromptTemplate, MemoryPromptData } from "./memory-prompt"
-interface WrapVercelLanguageModelOptions {
- /** Optional conversation ID to group messages for contextual memory generation */
- conversationId?: string
+interface WrapVercelLanguageModelOptions {
+ /** The language model to wrap with supermemory capabilities */
+ model: T
+ /** The container tag/identifier for memory search (e.g., user ID, project ID) */
+ containerTag: string
+ /** Custom ID to group messages into a single document. Required. */
+ customId: string
/** Enable detailed logging of memory search and injection */
verbose?: boolean
/**
@@ -24,9 +28,18 @@ interface WrapVercelLanguageModelOptions {
* - "full": Combines both profile and query-based results
*/
mode?: "profile" | "query" | "full"
+ /**
+ * Search mode for memory retrieval:
+ * - "memories": Search only memory entries (default)
+ * - "hybrid": Search both memories AND document chunks (recommended for RAG)
+ * - "documents": Search only document chunks
+ */
+ searchMode?: "memories" | "hybrid" | "documents"
+ /** Maximum number of search results to return when using hybrid/documents mode (default: 10) */
+ searchLimit?: number
/**
* Memory persistence mode:
- * - "always": Automatically save conversations as memories
+ * - "always": Automatically save conversations as memories (default)
* - "never": Only retrieve memories, don't store new ones
*/
addMemory?: "always" | "never"
@@ -63,13 +76,15 @@ interface WrapVercelLanguageModelOptions {
* Supports both Vercel AI SDK 5 (LanguageModelV2) and SDK 6 (LanguageModelV3) via runtime
* detection of `model.specificationVersion`.
*
- * @param model - The language model to wrap with supermemory capabilities (V2 or V3)
- * @param containerTag - The container tag/identifier for memory search (e.g., user ID, project ID)
- * @param options - Optional configuration options for the middleware
- * @param options.conversationId - Optional conversation ID to group messages into a single document for contextual memory generation
+ * @param options - Configuration object containing model and Supermemory options
+ * @param options.model - The language model to wrap with supermemory capabilities (V2 or V3)
+ * @param options.containerTag - Required. The container tag/identifier for memory search (e.g., user ID, project ID)
+ * @param options.customId - Required. Custom ID to group messages into a single document
* @param options.verbose - Optional flag to enable detailed logging of memory search and injection process (default: false)
* @param options.mode - Optional mode for memory search: "profile", "query", or "full" (default: "profile")
- * @param options.addMemory - Optional mode for memory search: "always", "never" (default: "never")
+ * @param options.searchMode - Optional search mode: "memories" (default), "hybrid" (memories + chunks), or "documents" (chunks only)
+ * @param options.searchLimit - Optional maximum number of search results when using hybrid/documents mode (default: 10)
+ * @param options.addMemory - Optional mode for memory persistence: "always" (default - saves conversations), "never" (read-only mode)
* @param options.apiKey - Optional Supermemory API key to use instead of the environment variable
* @param options.baseUrl - Optional base URL for the Supermemory API (default: "https://api.supermemory.ai")
*
@@ -77,30 +92,44 @@ interface WrapVercelLanguageModelOptions {
*
* @example
* ```typescript
- * import { withSupermemory } from "@supermemory/tools/ai-sdk"
+ * import { withSupermemory } from "@supermemory/tools/vercel"
* import { openai } from "@ai-sdk/openai"
+ * import { generateText } from "ai"
*
- * const modelWithMemory = withSupermemory(openai("gpt-4"), "user-123", {
- * conversationId: "conversation-456",
+ * // Basic usage with profile memories
+ * const modelWithMemory = withSupermemory({
+ * model: openai("gpt-4"),
+ * containerTag: "user-123",
+ * customId: "conv-456",
* mode: "full",
* addMemory: "always"
* })
*
+ * // RAG usage with hybrid search (memories + document chunks)
+ * const ragModel = withSupermemory({
+ * model: openai("gpt-4"),
+ * containerTag: "user-123",
+ * customId: "conv-789",
+ * mode: "full",
+ * searchMode: "hybrid", // Search both memories and document chunks
+ * searchLimit: 15,
+ * })
+ *
* const result = await generateText({
- * model: modelWithMemory,
- * messages: [{ role: "user", content: "What's my favorite programming language?" }]
+ * model: ragModel,
+ * messages: [{ role: "user", content: "What's in my documents about quarterly goals?" }]
* })
* ```
*
* @throws {Error} When neither `options.apiKey` nor `process.env.SUPERMEMORY_API_KEY` are set
+ * @throws {Error} When `options.customId` is empty or invalid
* @throws {Error} When supermemory API request fails
*/
const wrapVercelLanguageModel = (
- model: T,
- containerTag: string,
- options?: WrapVercelLanguageModelOptions,
+ options: WrapVercelLanguageModelOptions,
): T => {
- const providedApiKey = options?.apiKey ?? process.env.SUPERMEMORY_API_KEY
+ const { model, containerTag, customId, ...restOptions } = options
+ const providedApiKey = restOptions.apiKey ?? process.env.SUPERMEMORY_API_KEY
if (!providedApiKey) {
throw new Error(
@@ -108,15 +137,24 @@ const wrapVercelLanguageModel = (
)
}
+ // Validate customId is not empty
+ if (!customId || typeof customId !== "string" || customId.trim() === "") {
+ throw new Error(
+ "customId is required and must be a non-empty string — provide it via `options.customId`",
+ )
+ }
+
const ctx = createSupermemoryContext({
containerTag,
apiKey: providedApiKey,
- conversationId: options?.conversationId,
- verbose: options?.verbose ?? false,
- mode: options?.mode ?? "profile",
- addMemory: options?.addMemory ?? "never",
- baseUrl: options?.baseUrl,
- promptTemplate: options?.promptTemplate,
+ customId,
+ verbose: restOptions.verbose ?? false,
+ mode: restOptions.mode ?? "profile",
+ searchMode: restOptions.searchMode ?? "memories",
+ searchLimit: restOptions.searchLimit ?? 10,
+ addMemory: restOptions.addMemory ?? "always",
+ baseUrl: restOptions.baseUrl,
+ promptTemplate: restOptions.promptTemplate,
})
const wrappedModel = {
@@ -130,14 +168,19 @@ const wrapVercelLanguageModel = (
const result = await model.doGenerate(transformedParams as any)
const userMessage = getLastUserMessage(params)
- if (ctx.addMemory === "always" && userMessage && userMessage.trim()) {
+ if (
+ ctx.addMemory === "always" &&
+ ctx.customId &&
+ userMessage &&
+ userMessage.trim()
+ ) {
const assistantResponseText = extractAssistantResponseText(
result.content as unknown[],
)
saveMemoryAfterResponse(
ctx.client,
ctx.containerTag,
- ctx.conversationId,
+ ctx.customId,
assistantResponseText,
params,
ctx.logger,
@@ -180,13 +223,14 @@ const wrapVercelLanguageModel = (
const userMessage = getLastUserMessage(params)
if (
ctx.addMemory === "always" &&
+ ctx.customId &&
userMessage &&
userMessage.trim()
) {
saveMemoryAfterResponse(
ctx.client,
ctx.containerTag,
- ctx.conversationId,
+ ctx.customId,
generatedText,
params,
ctx.logger,
diff --git a/packages/tools/src/vercel/middleware.ts b/packages/tools/src/vercel/middleware.ts
index 8c31b86fd..b0a96ae7b 100644
--- a/packages/tools/src/vercel/middleware.ts
+++ b/packages/tools/src/vercel/middleware.ts
@@ -11,6 +11,7 @@ import {
type Logger,
type PromptTemplate,
type MemoryMode,
+ type SearchMode,
} from "../shared"
import {
type LanguageModelCallOptions,
@@ -19,7 +20,7 @@ import {
} from "./util"
import { extractQueryText, injectMemoriesIntoParams } from "./memory-prompt"
-const getConversationContent = (params: LanguageModelCallOptions) => {
+const _getConversationContent = (params: LanguageModelCallOptions) => {
return params.prompt
.filter((msg) => msg.role !== "system" && msg.role !== "tool")
.map((msg) => {
@@ -99,58 +100,34 @@ const convertToConversationMessages = (
}
export const saveMemoryAfterResponse = async (
- client: Supermemory,
+ _client: Supermemory,
containerTag: string,
- conversationId: string | undefined,
+ customId: string,
assistantResponseText: string,
params: LanguageModelCallOptions,
logger: Logger,
apiKey: string,
baseUrl: string,
): Promise => {
- const customId = conversationId ? `conversation:${conversationId}` : undefined
-
try {
- if (customId && conversationId) {
- const conversationMessages = convertToConversationMessages(
- params,
- assistantResponseText,
- )
-
- const response = await addConversation({
- conversationId,
- messages: conversationMessages,
- containerTags: [containerTag],
- apiKey,
- baseUrl,
- })
-
- logger.info("Conversation saved successfully via /v4/conversations", {
- containerTag,
- conversationId,
- messageCount: conversationMessages.length,
- responseId: response.id,
- })
- return
- }
-
- const userMessage = getLastUserMessage(params)
- const content = conversationId
- ? `${getConversationContent(params)} \n\n Assistant: ${assistantResponseText}`
- : `User: ${userMessage} \n\n Assistant: ${assistantResponseText}`
-
- const response = await client.add({
- content,
+ const conversationMessages = convertToConversationMessages(
+ params,
+ assistantResponseText,
+ )
+
+ const response = await addConversation({
+ conversationId: customId,
+ messages: conversationMessages,
containerTags: [containerTag],
- customId,
+ apiKey,
+ baseUrl,
})
- logger.info("Memory saved successfully via /v3/documents", {
+ logger.info("Conversation saved successfully via /v4/conversations", {
containerTag,
customId,
- content,
- contentLength: content.length,
- memoryId: response.id,
+ messageCount: conversationMessages.length,
+ responseId: response.id,
})
} catch (error) {
logger.error("Error saving memory", {
@@ -167,8 +144,8 @@ interface SupermemoryMiddlewareOptions {
containerTag: string
/** Supermemory API key */
apiKey: string
- /** Optional conversation ID to group messages for contextual memory generation */
- conversationId?: string
+ /** Custom ID to group messages into a single document. Required. */
+ customId: string
/** Enable detailed logging of memory search and injection */
verbose?: boolean
/**
@@ -178,6 +155,15 @@ interface SupermemoryMiddlewareOptions {
* - "full": Combines both profile and query-based results
*/
mode?: MemoryMode
+ /**
+ * Search mode for memory retrieval:
+ * - "memories": Search only memory entries (default)
+ * - "hybrid": Search both memories AND document chunks (recommended for RAG)
+ * - "documents": Search only document chunks
+ */
+ searchMode?: SearchMode
+ /** Maximum number of search results to return (default: 10) */
+ searchLimit?: number
/**
* Memory persistence mode:
* - "always": Automatically save conversations as memories
@@ -194,8 +180,10 @@ interface SupermemoryMiddlewareContext {
client: Supermemory
logger: Logger
containerTag: string
- conversationId?: string
+ customId: string
mode: MemoryMode
+ searchMode: SearchMode
+ searchLimit: number
addMemory: "always" | "never"
normalizedBaseUrl: string
apiKey: string
@@ -213,9 +201,11 @@ export const createSupermemoryContext = (
const {
containerTag,
apiKey,
- conversationId,
+ customId,
verbose = false,
mode = "profile",
+ searchMode = "memories",
+ searchLimit = 10,
addMemory = "never",
baseUrl,
promptTemplate,
@@ -235,8 +225,10 @@ export const createSupermemoryContext = (
client,
logger,
containerTag,
- conversationId,
+ customId,
mode,
+ searchMode,
+ searchLimit,
addMemory,
normalizedBaseUrl,
apiKey,
@@ -255,7 +247,7 @@ const makeTurnKey = (
): string => {
return MemoryCache.makeTurnKey(
ctx.containerTag,
- ctx.conversationId,
+ ctx.customId,
ctx.mode,
userMessage,
)
@@ -296,8 +288,9 @@ export const transformParamsWithMemory = async (
ctx.logger.info("Starting memory search", {
containerTag: ctx.containerTag,
- conversationId: ctx.conversationId,
+ customId: ctx.customId,
mode: ctx.mode,
+ searchMode: ctx.searchMode,
isNewTurn,
cacheHit: false,
})
@@ -312,6 +305,8 @@ export const transformParamsWithMemory = async (
apiKey: ctx.apiKey,
logger: ctx.logger,
promptTemplate: ctx.promptTemplate,
+ searchMode: ctx.searchMode,
+ searchLimit: ctx.searchLimit,
})
ctx.memoryCache.set(turnKey, memories)
diff --git a/packages/tools/test/claude-memory-real-example.ts b/packages/tools/test/claude-memory-real-example.ts
index bb6070d48..dbd03b21b 100644
--- a/packages/tools/test/claude-memory-real-example.ts
+++ b/packages/tools/test/claude-memory-real-example.ts
@@ -119,10 +119,16 @@ export async function realClaudeMemoryExample() {
const toolResults = []
if (responseData.content) {
- const memoryToolCalls = responseData.content.filter(
- (block: any): block is { type: 'tool_use'; id: string; name: 'memory'; input: { command: MemoryCommand; path: string } } =>
- block.type === "tool_use" && block.name === "memory",
- )
+ const memoryToolCalls = responseData.content.filter(
+ (
+ block: any,
+ ): block is {
+ type: "tool_use"
+ id: string
+ name: "memory"
+ input: { command: MemoryCommand; path: string }
+ } => block.type === "tool_use" && block.name === "memory",
+ )
const results = await Promise.all(
memoryToolCalls.map((block: any) => {
@@ -196,10 +202,16 @@ export async function processClaudeResponse(
const toolResults = []
if (claudeResponseData.content) {
- const memoryToolCalls = claudeResponseData.content.filter(
- (block: any): block is { type: 'tool_use'; id: string; name: 'memory'; input: { command: MemoryCommand; path: string } } =>
- block.type === "tool_use" && block.name === "memory",
- )
+ const memoryToolCalls = claudeResponseData.content.filter(
+ (
+ block: any,
+ ): block is {
+ type: "tool_use"
+ id: string
+ name: "memory"
+ input: { command: MemoryCommand; path: string }
+ } => block.type === "tool_use" && block.name === "memory",
+ )
const results = await Promise.all(
memoryToolCalls.map((block: any) =>
diff --git a/packages/tools/test/with-supermemory/integration.test.ts b/packages/tools/test/with-supermemory/integration.test.ts
index 4c112cb05..323ca6674 100644
--- a/packages/tools/test/with-supermemory/integration.test.ts
+++ b/packages/tools/test/with-supermemory/integration.test.ts
@@ -96,14 +96,13 @@ describe.skipIf(!shouldRunIntegration)(
const { model, getCapturedGenerateParams } =
createIntegrationMockModel()
- const wrapped = withSupermemory(
+ const wrapped = withSupermemory({
model,
- INTEGRATION_CONFIG.containerTag,
- {
- apiKey: INTEGRATION_CONFIG.apiKey,
- mode: "profile",
- },
- )
+ containerTag: INTEGRATION_CONFIG.containerTag,
+ customId: `test-${Date.now()}`,
+ apiKey: INTEGRATION_CONFIG.apiKey,
+ mode: "profile",
+ })
await wrapped.doGenerate({
prompt: [
@@ -125,18 +124,16 @@ describe.skipIf(!shouldRunIntegration)(
const { model } = createIntegrationMockModel()
const fetchSpy = vi.spyOn(globalThis, "fetch")
- const conversationId = `test-generate-${Date.now()}`
+ const customId = `test-generate-${Date.now()}`
- const wrapped = withSupermemory(
+ const wrapped = withSupermemory({
model,
- INTEGRATION_CONFIG.containerTag,
- {
- apiKey: INTEGRATION_CONFIG.apiKey,
- mode: "profile",
- addMemory: "always",
- conversationId,
- },
- )
+ containerTag: INTEGRATION_CONFIG.containerTag,
+ customId,
+ apiKey: INTEGRATION_CONFIG.apiKey,
+ mode: "profile",
+ addMemory: "always",
+ })
await wrapped.doGenerate({
prompt: [
@@ -166,21 +163,19 @@ describe.skipIf(!shouldRunIntegration)(
fetchSpy.mockRestore()
})
- it("should work with conversationId for grouped memories", async () => {
+ it("should work with customId for grouped memories", async () => {
const { model, getCapturedGenerateParams } =
createIntegrationMockModel()
- const conversationId = `test-conversation-${Date.now()}`
+ const customId = `test-conversation-${Date.now()}`
- const wrapped = withSupermemory(
+ const wrapped = withSupermemory({
model,
- INTEGRATION_CONFIG.containerTag,
- {
- apiKey: INTEGRATION_CONFIG.apiKey,
- mode: "profile",
- conversationId,
- },
- )
+ containerTag: INTEGRATION_CONFIG.containerTag,
+ customId,
+ apiKey: INTEGRATION_CONFIG.apiKey,
+ mode: "profile",
+ })
await wrapped.doGenerate({
prompt: [
@@ -203,14 +198,13 @@ describe.skipIf(!shouldRunIntegration)(
it("should fetch memories and stream response", async () => {
const { model, getCapturedStreamParams } = createIntegrationMockModel()
- const wrapped = withSupermemory(
+ const wrapped = withSupermemory({
model,
- INTEGRATION_CONFIG.containerTag,
- {
- apiKey: INTEGRATION_CONFIG.apiKey,
- mode: "profile",
- },
- )
+ containerTag: INTEGRATION_CONFIG.containerTag,
+ customId: `test-stream-${Date.now()}`,
+ apiKey: INTEGRATION_CONFIG.apiKey,
+ mode: "profile",
+ })
const { stream } = await wrapped.doStream({
prompt: [
@@ -240,18 +234,16 @@ describe.skipIf(!shouldRunIntegration)(
const { model } = createIntegrationMockModel()
const fetchSpy = vi.spyOn(globalThis, "fetch")
- const conversationId = `test-stream-${Date.now()}`
+ const customId = `test-stream-${Date.now()}`
- const wrapped = withSupermemory(
+ const wrapped = withSupermemory({
model,
- INTEGRATION_CONFIG.containerTag,
- {
- apiKey: INTEGRATION_CONFIG.apiKey,
- mode: "profile",
- addMemory: "always",
- conversationId,
- },
- )
+ containerTag: INTEGRATION_CONFIG.containerTag,
+ customId,
+ apiKey: INTEGRATION_CONFIG.apiKey,
+ mode: "profile",
+ addMemory: "always",
+ })
const { stream } = await wrapped.doStream({
prompt: [
@@ -286,14 +278,13 @@ describe.skipIf(!shouldRunIntegration)(
it("should handle text-delta chunks correctly", async () => {
const { model } = createIntegrationMockModel()
- const wrapped = withSupermemory(
+ const wrapped = withSupermemory({
model,
- INTEGRATION_CONFIG.containerTag,
- {
- apiKey: INTEGRATION_CONFIG.apiKey,
- mode: "profile",
- },
- )
+ containerTag: INTEGRATION_CONFIG.containerTag,
+ customId: `test-chunks-${Date.now()}`,
+ apiKey: INTEGRATION_CONFIG.apiKey,
+ mode: "profile",
+ })
const { stream } = await wrapped.doStream({
prompt: [
@@ -327,14 +318,13 @@ describe.skipIf(!shouldRunIntegration)(
const { model } = createIntegrationMockModel()
const fetchSpy = vi.spyOn(globalThis, "fetch")
- const wrapped = withSupermemory(
+ const wrapped = withSupermemory({
model,
- INTEGRATION_CONFIG.containerTag,
- {
- apiKey: INTEGRATION_CONFIG.apiKey,
- mode: "profile",
- },
- )
+ containerTag: INTEGRATION_CONFIG.containerTag,
+ customId: `test-profile-${Date.now()}`,
+ apiKey: INTEGRATION_CONFIG.apiKey,
+ mode: "profile",
+ })
await wrapped.doGenerate({
prompt: [
@@ -368,14 +358,13 @@ describe.skipIf(!shouldRunIntegration)(
const { model } = createIntegrationMockModel()
const fetchSpy = vi.spyOn(globalThis, "fetch")
- const wrapped = withSupermemory(
+ const wrapped = withSupermemory({
model,
- INTEGRATION_CONFIG.containerTag,
- {
- apiKey: INTEGRATION_CONFIG.apiKey,
- mode: "query",
- },
- )
+ containerTag: INTEGRATION_CONFIG.containerTag,
+ customId: `test-query-${Date.now()}`,
+ apiKey: INTEGRATION_CONFIG.apiKey,
+ mode: "query",
+ })
await wrapped.doGenerate({
prompt: [
@@ -409,14 +398,13 @@ describe.skipIf(!shouldRunIntegration)(
const { model } = createIntegrationMockModel()
const fetchSpy = vi.spyOn(globalThis, "fetch")
- const wrapped = withSupermemory(
+ const wrapped = withSupermemory({
model,
- INTEGRATION_CONFIG.containerTag,
- {
- apiKey: INTEGRATION_CONFIG.apiKey,
- mode: "full",
- },
- )
+ containerTag: INTEGRATION_CONFIG.containerTag,
+ customId: `test-full-${Date.now()}`,
+ apiKey: INTEGRATION_CONFIG.apiKey,
+ mode: "full",
+ })
await wrapped.doGenerate({
prompt: [
@@ -456,15 +444,14 @@ describe.skipIf(!shouldRunIntegration)(
generalSearchMemories: string
}) => `${data.userMemories}`
- const wrapped = withSupermemory(
+ const wrapped = withSupermemory({
model,
- INTEGRATION_CONFIG.containerTag,
- {
- apiKey: INTEGRATION_CONFIG.apiKey,
- mode: "profile",
- promptTemplate: customTemplate,
- },
- )
+ containerTag: INTEGRATION_CONFIG.containerTag,
+ customId: `test-template-${Date.now()}`,
+ apiKey: INTEGRATION_CONFIG.apiKey,
+ mode: "profile",
+ promptTemplate: customTemplate,
+ })
await wrapped.doGenerate({
prompt: [
@@ -485,15 +472,14 @@ describe.skipIf(!shouldRunIntegration)(
const { model, getCapturedGenerateParams } =
createIntegrationMockModel()
- const wrapped = withSupermemory(
+ const wrapped = withSupermemory({
model,
- INTEGRATION_CONFIG.containerTag,
- {
- apiKey: INTEGRATION_CONFIG.apiKey,
- mode: "profile",
- verbose: true,
- },
- )
+ containerTag: INTEGRATION_CONFIG.containerTag,
+ customId: `test-verbose-${Date.now()}`,
+ apiKey: INTEGRATION_CONFIG.apiKey,
+ mode: "profile",
+ verbose: true,
+ })
await wrapped.doGenerate({
prompt: [
@@ -514,15 +500,14 @@ describe.skipIf(!shouldRunIntegration)(
const fetchSpy = vi.spyOn(globalThis, "fetch")
// Use the configured base URL (or default)
- const wrapped = withSupermemory(
+ const wrapped = withSupermemory({
model,
- INTEGRATION_CONFIG.containerTag,
- {
- apiKey: INTEGRATION_CONFIG.apiKey,
- mode: "profile",
- baseUrl: INTEGRATION_CONFIG.baseUrl,
- },
- )
+ containerTag: INTEGRATION_CONFIG.containerTag,
+ customId: `test-baseurl-${Date.now()}`,
+ apiKey: INTEGRATION_CONFIG.apiKey,
+ mode: "profile",
+ baseUrl: INTEGRATION_CONFIG.baseUrl,
+ })
await wrapped.doGenerate({
prompt: [
@@ -556,14 +541,13 @@ describe.skipIf(!shouldRunIntegration)(
new Error("Model error"),
)
- const wrapped = withSupermemory(
+ const wrapped = withSupermemory({
model,
- INTEGRATION_CONFIG.containerTag,
- {
- apiKey: INTEGRATION_CONFIG.apiKey,
- mode: "profile",
- },
- )
+ containerTag: INTEGRATION_CONFIG.containerTag,
+ customId: `test-error-${Date.now()}`,
+ apiKey: INTEGRATION_CONFIG.apiKey,
+ mode: "profile",
+ })
await expect(
wrapped.doGenerate({
@@ -580,14 +564,13 @@ describe.skipIf(!shouldRunIntegration)(
it("should handle invalid API key gracefully", async () => {
const { model } = createIntegrationMockModel()
- const wrapped = withSupermemory(
+ const wrapped = withSupermemory({
model,
- INTEGRATION_CONFIG.containerTag,
- {
- apiKey: "invalid-api-key-12345",
- mode: "profile",
- },
- )
+ containerTag: INTEGRATION_CONFIG.containerTag,
+ customId: `test-invalid-key-${Date.now()}`,
+ apiKey: "invalid-api-key-12345",
+ mode: "profile",
+ })
await expect(
wrapped.doGenerate({
diff --git a/packages/tools/test/with-supermemory/unit.test.ts b/packages/tools/test/with-supermemory/unit.test.ts
index b20eb6f2b..ef9e272ef 100644
--- a/packages/tools/test/with-supermemory/unit.test.ts
+++ b/packages/tools/test/with-supermemory/unit.test.ts
@@ -73,7 +73,11 @@ describe("Unit: withSupermemory", () => {
const mockModel = createMockLanguageModel()
expect(() => {
- withSupermemory(mockModel, TEST_CONFIG.containerTag)
+ withSupermemory({
+ model: mockModel,
+ containerTag: TEST_CONFIG.containerTag,
+ customId: "test-conv-123",
+ })
}).toThrow("SUPERMEMORY_API_KEY is not set")
})
@@ -81,7 +85,11 @@ describe("Unit: withSupermemory", () => {
process.env.SUPERMEMORY_API_KEY = "test-key"
const mockModel = createMockLanguageModel()
- const wrappedModel = withSupermemory(mockModel, TEST_CONFIG.containerTag)
+ const wrappedModel = withSupermemory({
+ model: mockModel,
+ containerTag: TEST_CONFIG.containerTag,
+ customId: "test-conv-456",
+ })
expect(wrappedModel).toBeDefined()
expect(wrappedModel.specificationVersion).toBe("v2")
@@ -106,6 +114,7 @@ describe("Unit: withSupermemory", () => {
const ctx = createSupermemoryContext({
containerTag: TEST_CONFIG.containerTag,
apiKey: TEST_CONFIG.apiKey,
+ customId: "test-cache-123",
mode: "profile",
})
@@ -138,6 +147,7 @@ describe("Unit: withSupermemory", () => {
const ctx = createSupermemoryContext({
containerTag: TEST_CONFIG.containerTag,
apiKey: TEST_CONFIG.apiKey,
+ customId: "test-continuation-456",
mode: "profile",
})
@@ -210,6 +220,7 @@ describe("Unit: withSupermemory", () => {
const ctx = createSupermemoryContext({
containerTag: TEST_CONFIG.containerTag,
apiKey: TEST_CONFIG.apiKey,
+ customId: "test-refetch-789",
mode: "profile",
})
@@ -270,6 +281,7 @@ describe("Unit: withSupermemory", () => {
const ctx = createSupermemoryContext({
containerTag: TEST_CONFIG.containerTag,
apiKey: TEST_CONFIG.apiKey,
+ customId: "test-error-101",
mode: "profile",
})
@@ -291,6 +303,7 @@ describe("Unit: withSupermemory", () => {
const ctx = createSupermemoryContext({
containerTag: TEST_CONFIG.containerTag,
apiKey: TEST_CONFIG.apiKey,
+ customId: "test-empty-102",
mode: "query",
})
@@ -308,6 +321,7 @@ describe("Unit: withSupermemory", () => {
const ctx = createSupermemoryContext({
containerTag: TEST_CONFIG.containerTag,
apiKey: TEST_CONFIG.apiKey,
+ customId: "test-empty-content-103",
mode: "query",
})
@@ -335,6 +349,7 @@ describe("Unit: withSupermemory", () => {
const ctx = createSupermemoryContext({
containerTag: TEST_CONFIG.containerTag,
apiKey: TEST_CONFIG.apiKey,
+ customId: "test-mutate-104",
mode: "profile",
})