Skip to content

Commit 4decc4f

Browse files
feat(honcho): add self-log read tools and arbitrary-peer query escape hatch
Closes the read-side gap — self-conclusions written via observed='self' had no mid-session retrieval surface. Adds: - query_self_conclusions: semantic search on the AI peer's own self-log - list_self_conclusions: paginated audit of self-conclusions - query_peer_conclusions: cross-peer query with explicit observer/observed peer names — escape hatch for multi-agent setups (Claude + Codex + Hermes) and cell-B queries (user's evaluations of a specific agent) Three-tool pattern per operation: sharp facades for the common cases (_user_, _self_) plus a peer-name-addressable variant for arbitrary edges. Facades win on MCP discoverability; the escape hatch scales to N agents without tool sprawl. Read API now mirrors the write API. list_self_conclusions and query_self_conclusions are mode-independent — aiPeer always observes itself regardless of unified vs directional config.
1 parent f253980 commit 4decc4f

2 files changed

Lines changed: 235 additions & 2 deletions

File tree

plugins/honcho/dist/mcp-server.js

Lines changed: 114 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29740,7 +29740,7 @@ For either target, the test before writing is: "would future-me find this useful
2974029740
},
2974129741
{
2974229742
name: "query_conclusions",
29743-
description: "Semantic search across saved conclusions about the user. Use this to find specific knowledge Honcho has derived \u2014 more targeted than list_conclusions (which is just paginated). Returns conclusions ranked by relevance.",
29743+
description: "Semantic search across saved conclusions about the user. Returns conclusions ranked by relevance. For your own self-log, use query_self_conclusions. For cross-peer queries (multi-agent setups), use query_peer_conclusions.",
2974429744
inputSchema: {
2974529745
type: "object",
2974629746
properties: {
@@ -29757,6 +29757,49 @@ For either target, the test before writing is: "would future-me find this useful
2975729757
required: ["query"]
2975829758
}
2975929759
},
29760+
{
29761+
name: "query_self_conclusions",
29762+
description: "Semantic search across YOUR cross-session self-log (conclusions you've written about yourself as the AI peer via create_conclusion(observed='self')). Use to retrieve patterns, discoveries, and course-corrections you've logged in past sessions on a related topic. Symmetric with query_conclusions but scoped to the AI peer.",
29763+
inputSchema: {
29764+
type: "object",
29765+
properties: {
29766+
query: { type: "string", description: "Semantic query" },
29767+
top_k: { type: "number", description: "Max results (default 10)", default: 10 }
29768+
},
29769+
required: ["query"]
29770+
}
29771+
},
29772+
{
29773+
name: "list_self_conclusions",
29774+
description: "Paginated list of YOUR self-log conclusions. Use to audit what you've recorded or find IDs for deletion. For semantic search, prefer query_self_conclusions.",
29775+
inputSchema: {
29776+
type: "object",
29777+
properties: {
29778+
page: { type: "number", description: "Page number (1-indexed)", default: 1 },
29779+
size: { type: "number", description: "Results per page (max 50)", default: 20 }
29780+
}
29781+
}
29782+
},
29783+
{
29784+
name: "query_peer_conclusions",
29785+
description: "Semantic search across conclusions on an arbitrary (observer, observed) peer edge. Escape hatch for multi-agent setups (Claude + Codex + Hermes etc.) where you need to address peers by name rather than alias. Common cases: (a) cross-peer queries \u2014 what does another agent know about the user; (b) cell-B queries \u2014 what does the user think about a specific agent (observer=user, observed=<agent>). For self-log queries, prefer query_self_conclusions; for default user queries, prefer query_conclusions.",
29786+
inputSchema: {
29787+
type: "object",
29788+
properties: {
29789+
query: { type: "string", description: "Semantic query" },
29790+
observed_peer: {
29791+
type: "string",
29792+
description: "Peer name being observed (the subject of the conclusions). Required."
29793+
},
29794+
observer_peer: {
29795+
type: "string",
29796+
description: "Peer name doing the observing. Defaults to the AI peer (caller). Pass user peer name to query cell B (user's evaluations of an agent)."
29797+
},
29798+
top_k: { type: "number", description: "Max results (default 10)", default: 10 }
29799+
},
29800+
required: ["query", "observed_peer"]
29801+
}
29802+
},
2976029803
{
2976129804
name: "schedule_dream",
2976229805
description: "Trigger Honcho's background memory consolidation (a 'dream'). Honcho will merge redundant conclusions and derive higher-level insights about the user. Use after a long or insight-rich session to improve memory quality. Returns immediately; consolidation runs async on Honcho's side.",
@@ -30163,6 +30206,76 @@ For either target, the test before writing is: "would future-me find this useful
3016330206
};
3016430207
}
3016530208
}
30209+
if (name === "query_self_conclusions" || name === "list_self_conclusions") {
30210+
try {
30211+
const aiPeer = await honcho.peer(config2.aiPeer);
30212+
const conclusionScope = aiPeer.conclusionsOf(config2.aiPeer);
30213+
if (name === "query_self_conclusions") {
30214+
const query = args?.query;
30215+
const topK = args?.top_k ?? 10;
30216+
const conclusions = await conclusionScope.query(query, topK);
30217+
const items2 = (conclusions ?? []).map((c) => ({
30218+
id: c.id,
30219+
content: c.content,
30220+
sessionId: c.sessionId ?? c.session_id,
30221+
createdAt: c.createdAt ?? c.created_at
30222+
}));
30223+
return { content: [{ type: "text", text: JSON.stringify(items2, null, 2) }] };
30224+
}
30225+
const page = args?.page ?? 1;
30226+
const size = Math.min(args?.size ?? 20, 100);
30227+
const result = await conclusionScope.list({ page, size });
30228+
const items = result.items.map((c) => ({
30229+
id: c.id,
30230+
content: c.content,
30231+
createdAt: c.createdAt
30232+
}));
30233+
return {
30234+
content: [{
30235+
type: "text",
30236+
text: JSON.stringify({ items, total: result.total, page: result.page, pages: result.pages }, null, 2)
30237+
}]
30238+
};
30239+
} catch (error3) {
30240+
return {
30241+
content: [{ type: "text", text: `Error: ${error3 instanceof Error ? error3.message : String(error3)}` }],
30242+
isError: true
30243+
};
30244+
}
30245+
}
30246+
if (name === "query_peer_conclusions") {
30247+
try {
30248+
const query = args?.query;
30249+
const observedPeerName = args?.observed_peer;
30250+
const observerPeerName = args?.observer_peer ?? config2.aiPeer;
30251+
const topK = args?.top_k ?? 10;
30252+
if (!observedPeerName) {
30253+
return {
30254+
content: [{ type: "text", text: JSON.stringify({ success: false, error: "observed_peer is required" }) }],
30255+
isError: true
30256+
};
30257+
}
30258+
const observer = await honcho.peer(observerPeerName);
30259+
const conclusions = await observer.conclusionsOf(observedPeerName).query(query, topK);
30260+
const items = (conclusions ?? []).map((c) => ({
30261+
id: c.id,
30262+
content: c.content,
30263+
sessionId: c.sessionId ?? c.session_id,
30264+
createdAt: c.createdAt ?? c.created_at
30265+
}));
30266+
return {
30267+
content: [{
30268+
type: "text",
30269+
text: JSON.stringify({ observer: observerPeerName, observed: observedPeerName, items }, null, 2)
30270+
}]
30271+
};
30272+
} catch (error3) {
30273+
return {
30274+
content: [{ type: "text", text: `Error: ${error3 instanceof Error ? error3.message : String(error3)}` }],
30275+
isError: true
30276+
};
30277+
}
30278+
}
3016630279
const sessionName = getSessionName(cwd);
3016730280
try {
3016830281
const session = await honcho.session(sessionName);

plugins/honcho/src/mcp/server.ts

Lines changed: 121 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -661,7 +661,7 @@ export async function runMcpServer(): Promise<void> {
661661
},
662662
{
663663
name: "query_conclusions",
664-
description: "Semantic search across saved conclusions about the user. Use this to find specific knowledge Honcho has derived — more targeted than list_conclusions (which is just paginated). Returns conclusions ranked by relevance.",
664+
description: "Semantic search across saved conclusions about the user. Returns conclusions ranked by relevance. For your own self-log, use query_self_conclusions. For cross-peer queries (multi-agent setups), use query_peer_conclusions.",
665665
inputSchema: {
666666
type: "object",
667667
properties: {
@@ -678,6 +678,49 @@ export async function runMcpServer(): Promise<void> {
678678
required: ["query"],
679679
},
680680
},
681+
{
682+
name: "query_self_conclusions",
683+
description: "Semantic search across YOUR cross-session self-log (conclusions you've written about yourself as the AI peer via create_conclusion(observed='self')). Use to retrieve patterns, discoveries, and course-corrections you've logged in past sessions on a related topic. Symmetric with query_conclusions but scoped to the AI peer.",
684+
inputSchema: {
685+
type: "object",
686+
properties: {
687+
query: { type: "string", description: "Semantic query" },
688+
top_k: { type: "number", description: "Max results (default 10)", default: 10 },
689+
},
690+
required: ["query"],
691+
},
692+
},
693+
{
694+
name: "list_self_conclusions",
695+
description: "Paginated list of YOUR self-log conclusions. Use to audit what you've recorded or find IDs for deletion. For semantic search, prefer query_self_conclusions.",
696+
inputSchema: {
697+
type: "object",
698+
properties: {
699+
page: { type: "number", description: "Page number (1-indexed)", default: 1 },
700+
size: { type: "number", description: "Results per page (max 50)", default: 20 },
701+
},
702+
},
703+
},
704+
{
705+
name: "query_peer_conclusions",
706+
description: "Semantic search across conclusions on an arbitrary (observer, observed) peer edge. Escape hatch for multi-agent setups (Claude + Codex + Hermes etc.) where you need to address peers by name rather than alias. Common cases: (a) cross-peer queries — what does another agent know about the user; (b) cell-B queries — what does the user think about a specific agent (observer=user, observed=<agent>). For self-log queries, prefer query_self_conclusions; for default user queries, prefer query_conclusions.",
707+
inputSchema: {
708+
type: "object",
709+
properties: {
710+
query: { type: "string", description: "Semantic query" },
711+
observed_peer: {
712+
type: "string",
713+
description: "Peer name being observed (the subject of the conclusions). Required.",
714+
},
715+
observer_peer: {
716+
type: "string",
717+
description: "Peer name doing the observing. Defaults to the AI peer (caller). Pass user peer name to query cell B (user's evaluations of an agent).",
718+
},
719+
top_k: { type: "number", description: "Max results (default 10)", default: 10 },
720+
},
721+
required: ["query", "observed_peer"],
722+
},
723+
},
681724
{
682725
name: "schedule_dream",
683726
description: "Trigger Honcho's background memory consolidation (a 'dream'). Honcho will merge redundant conclusions and derive higher-level insights about the user. Use after a long or insight-rich session to improve memory quality. Returns immediately; consolidation runs async on Honcho's side.",
@@ -1120,6 +1163,83 @@ export async function runMcpServer(): Promise<void> {
11201163
}
11211164
}
11221165

1166+
// Self-log facade: aiPeer observes itself. Independent of observation mode.
1167+
if (name === "query_self_conclusions" || name === "list_self_conclusions") {
1168+
try {
1169+
const aiPeer = await honcho.peer(config.aiPeer);
1170+
const conclusionScope = aiPeer.conclusionsOf(config.aiPeer);
1171+
1172+
if (name === "query_self_conclusions") {
1173+
const query = args?.query as string;
1174+
const topK = (args?.top_k as number) ?? 10;
1175+
const conclusions = await conclusionScope.query(query, topK);
1176+
const items = (conclusions ?? []).map((c: any) => ({
1177+
id: c.id,
1178+
content: c.content,
1179+
sessionId: c.sessionId ?? c.session_id,
1180+
createdAt: c.createdAt ?? c.created_at,
1181+
}));
1182+
return { content: [{ type: "text", text: JSON.stringify(items, null, 2) }] };
1183+
}
1184+
1185+
const page = (args?.page as number) ?? 1;
1186+
const size = Math.min((args?.size as number) ?? 20, 100);
1187+
const result = await conclusionScope.list({ page, size });
1188+
const items = result.items.map((c: any) => ({
1189+
id: c.id,
1190+
content: c.content,
1191+
createdAt: c.createdAt,
1192+
}));
1193+
return {
1194+
content: [{
1195+
type: "text",
1196+
text: JSON.stringify({ items, total: result.total, page: result.page, pages: result.pages }, null, 2),
1197+
}],
1198+
};
1199+
} catch (error) {
1200+
return {
1201+
content: [{ type: "text", text: `Error: ${error instanceof Error ? error.message : String(error)}` }],
1202+
isError: true,
1203+
};
1204+
}
1205+
}
1206+
1207+
// Escape hatch: arbitrary (observer, observed) peer-name addressing for
1208+
// multi-agent setups. Default observer = aiPeer (caller).
1209+
if (name === "query_peer_conclusions") {
1210+
try {
1211+
const query = args?.query as string;
1212+
const observedPeerName = args?.observed_peer as string;
1213+
const observerPeerName = (args?.observer_peer as string) ?? config.aiPeer;
1214+
const topK = (args?.top_k as number) ?? 10;
1215+
if (!observedPeerName) {
1216+
return {
1217+
content: [{ type: "text", text: JSON.stringify({ success: false, error: "observed_peer is required" }) }],
1218+
isError: true,
1219+
};
1220+
}
1221+
const observer = await honcho.peer(observerPeerName);
1222+
const conclusions = await observer.conclusionsOf(observedPeerName).query(query, topK);
1223+
const items = (conclusions ?? []).map((c: any) => ({
1224+
id: c.id,
1225+
content: c.content,
1226+
sessionId: c.sessionId ?? c.session_id,
1227+
createdAt: c.createdAt ?? c.created_at,
1228+
}));
1229+
return {
1230+
content: [{
1231+
type: "text",
1232+
text: JSON.stringify({ observer: observerPeerName, observed: observedPeerName, items }, null, 2),
1233+
}],
1234+
};
1235+
} catch (error) {
1236+
return {
1237+
content: [{ type: "text", text: `Error: ${error instanceof Error ? error.message : String(error)}` }],
1238+
isError: true,
1239+
};
1240+
}
1241+
}
1242+
11231243
// ── Honcho session tools ──
11241244

11251245
const sessionName = getSessionName(cwd);

0 commit comments

Comments
 (0)