Skip to content

Commit 7878c04

Browse files
ozgesolidkeyclaude
andcommitted
Add Local LLM agent support, agent naming, and API server port takeover
Local LLM: - New "Local LLM" option in Agent Setup Wizard - Auto-detects running Ollama and LM Studio with model listing - Bridge script (examples/agent-local-llm.mjs) connects local LLM to LOGAN's chat via OpenAI-compatible API with tool dispatch - Configurable endpoint and model in wizard Agent naming: - Optional name field in wizard configure step (default: "wolvie") - Agent name shown in chat status bar with model info - Name passed to LLM system prompt so agent responds in character API server port takeover: - New /api/shutdown endpoint for graceful server handoff - On EADDRINUSE, new instance requests old server to shut down - Falls back to lsof-based kill if old instance unresponsive - Eliminates "MCP bridge disabled" errors on re-launch Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
1 parent f8b9e57 commit 7878c04

5 files changed

Lines changed: 622 additions & 22 deletions

File tree

examples/agent-local-llm.mjs

Lines changed: 375 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,375 @@
1+
#!/usr/bin/env node
2+
/**
3+
* LOGAN Chat Agent — Local LLM Bridge
4+
*
5+
* Connects a local LLM (Ollama, LM Studio, or any OpenAI-compatible API)
6+
* to LOGAN's Chat tab. The LLM gets context about the open log file and
7+
* can use LOGAN's analysis tools via function-calling-style prompts.
8+
*
9+
* Usage:
10+
* LLM_ENDPOINT=http://localhost:11434/v1 LLM_MODEL=llama3 node examples/agent-local-llm.mjs
11+
*
12+
* Environment variables:
13+
* LLM_ENDPOINT — Base URL (default: http://localhost:11434/v1)
14+
* LLM_MODEL — Model name (default: llama3)
15+
*
16+
* Prerequisites:
17+
* - LOGAN is running with a file open
18+
* - Ollama/LM Studio running locally
19+
*/
20+
21+
import { readFileSync } from 'fs';
22+
import { join } from 'path';
23+
import { homedir } from 'os';
24+
import http from 'http';
25+
import https from 'https';
26+
27+
// --- Config ---
28+
const PORT_FILE = join(homedir(), '.logan', 'mcp-port');
29+
const WAIT_TIMEOUT = 300; // seconds
30+
const LLM_ENDPOINT = process.env.LLM_ENDPOINT || 'http://localhost:11434/v1';
31+
const LLM_MODEL = process.env.LLM_MODEL || 'llama3';
32+
const AGENT_NAME = process.env.AGENT_NAME || 'wolvie';
33+
34+
// --- Read LOGAN port ---
35+
let port;
36+
try {
37+
port = parseInt(readFileSync(PORT_FILE, 'utf-8').trim(), 10);
38+
} catch {
39+
console.error(`ERROR: LOGAN is not running (no ${PORT_FILE})`);
40+
process.exit(1);
41+
}
42+
43+
const BASE = `http://127.0.0.1:${port}`;
44+
45+
// --- HTTP helpers ---
46+
47+
function loganApi(method, path, body) {
48+
return new Promise((resolve, reject) => {
49+
const payload = body ? JSON.stringify(body) : undefined;
50+
const req = http.request(
51+
{
52+
hostname: '127.0.0.1',
53+
port,
54+
path,
55+
method,
56+
headers: {
57+
'Content-Type': 'application/json',
58+
...(payload ? { 'Content-Length': Buffer.byteLength(payload) } : {}),
59+
},
60+
timeout: 60000,
61+
},
62+
(res) => {
63+
const chunks = [];
64+
res.on('data', (c) => chunks.push(c));
65+
res.on('end', () => {
66+
try {
67+
resolve(JSON.parse(Buffer.concat(chunks).toString()));
68+
} catch {
69+
reject(new Error('Invalid JSON response'));
70+
}
71+
});
72+
}
73+
);
74+
req.on('error', reject);
75+
req.on('timeout', () => { req.destroy(); reject(new Error('Timeout')); });
76+
if (payload) req.write(payload);
77+
req.end();
78+
});
79+
}
80+
81+
function llmChat(messages) {
82+
return new Promise((resolve, reject) => {
83+
const url = new URL(`${LLM_ENDPOINT}/chat/completions`);
84+
const isHttps = url.protocol === 'https:';
85+
const lib = isHttps ? https : http;
86+
87+
const payload = JSON.stringify({
88+
model: LLM_MODEL,
89+
messages,
90+
temperature: 0.7,
91+
max_tokens: 2048,
92+
stream: false,
93+
});
94+
95+
const req = lib.request(
96+
{
97+
hostname: url.hostname,
98+
port: url.port || (isHttps ? 443 : 80),
99+
path: url.pathname,
100+
method: 'POST',
101+
headers: {
102+
'Content-Type': 'application/json',
103+
'Content-Length': Buffer.byteLength(payload),
104+
},
105+
timeout: 120000,
106+
},
107+
(res) => {
108+
const chunks = [];
109+
res.on('data', (c) => chunks.push(c));
110+
res.on('end', () => {
111+
try {
112+
const data = JSON.parse(Buffer.concat(chunks).toString());
113+
const content = data.choices?.[0]?.message?.content || '';
114+
resolve(content.trim());
115+
} catch (e) {
116+
reject(new Error(`LLM response parse error: ${e.message}`));
117+
}
118+
});
119+
}
120+
);
121+
req.on('error', reject);
122+
req.on('timeout', () => { req.destroy(); reject(new Error('LLM request timeout')); });
123+
req.write(payload);
124+
req.end();
125+
});
126+
}
127+
128+
async function sendMessage(text) {
129+
await loganApi('POST', '/api/agent-message', { message: text });
130+
console.log(`[agent] ${text}`);
131+
}
132+
133+
function waitForMessage(timeoutSec = 120) {
134+
return new Promise((resolve) => {
135+
const timer = setTimeout(() => {
136+
req.destroy();
137+
resolve(null);
138+
}, timeoutSec * 1000);
139+
140+
const req = http.get(
141+
{
142+
hostname: '127.0.0.1',
143+
port,
144+
path: `/api/events?name=${encodeURIComponent(AGENT_NAME)}`,
145+
headers: { Accept: 'text/event-stream' },
146+
},
147+
(res) => {
148+
let buf = '';
149+
res.on('data', (chunk) => {
150+
buf += chunk.toString();
151+
const frames = buf.split('\n\n');
152+
buf = frames.pop();
153+
154+
for (const frame of frames) {
155+
const dataLine = frame.split('\n').find((l) => l.startsWith('data: '));
156+
if (!dataLine) continue;
157+
try {
158+
const msg = JSON.parse(dataLine.slice(6));
159+
if (msg.from === 'user') {
160+
clearTimeout(timer);
161+
req.destroy();
162+
resolve(msg.text);
163+
return;
164+
}
165+
} catch { /* ignore */ }
166+
}
167+
});
168+
res.on('end', () => {
169+
clearTimeout(timer);
170+
resolve(null);
171+
});
172+
}
173+
);
174+
175+
req.on('error', () => {
176+
clearTimeout(timer);
177+
resolve(null);
178+
});
179+
});
180+
}
181+
182+
// --- Build context from LOGAN ---
183+
184+
async function getLogContext() {
185+
try {
186+
const status = await loganApi('GET', '/api/status');
187+
if (!status.isFileOpen) return 'No file is currently open in LOGAN.';
188+
189+
const parts = [`File: ${status.filePath}`, `Lines: ${status.totalLines}`];
190+
if (status.isFiltered) parts.push(`Filtered: ${status.filteredLineCount} visible`);
191+
192+
// Get first few lines as sample
193+
try {
194+
const sample = await loganApi('POST', '/api/get-lines', { startLine: 0, count: 20 });
195+
if (sample.success && sample.lines?.length > 0) {
196+
parts.push('\nSample (first 20 lines):');
197+
parts.push(sample.lines.map(l => l.text).join('\n'));
198+
}
199+
} catch { /* skip sample */ }
200+
201+
return parts.join('\n');
202+
} catch {
203+
return 'Unable to fetch LOGAN status.';
204+
}
205+
}
206+
207+
// --- Command detection & LOGAN tool calls ---
208+
209+
const TOOL_PATTERNS = [
210+
{ pattern: /\b(?:search|find|grep)\s+(?:for\s+)?["']?(.+?)["']?\s*$/i, action: 'search' },
211+
{ pattern: /\b(?:analyze|analysis|triage|overview)\b/i, action: 'analyze' },
212+
{ pattern: /\b(?:crash|crashes|fatal|panic)\b/i, action: 'crashes' },
213+
{ pattern: /\b(?:filter)\s+(.+)/i, action: 'filter' },
214+
{ pattern: /\b(?:clear filter|remove filter|unfilter)\b/i, action: 'clear-filter' },
215+
{ pattern: /\b(?:time.?gaps?|gaps?)\b/i, action: 'time-gaps' },
216+
{ pattern: /\b(?:go to|goto|jump to|navigate to)\s+(?:line\s+)?(\d+)/i, action: 'navigate' },
217+
{ pattern: /\b(?:show|get|read)\s+lines?\s+(\d+)\s*[-to]+\s*(\d+)/i, action: 'get-lines' },
218+
];
219+
220+
async function executeToolIfDetected(userMsg) {
221+
for (const { pattern, action } of TOOL_PATTERNS) {
222+
const match = userMsg.match(pattern);
223+
if (!match) continue;
224+
225+
try {
226+
switch (action) {
227+
case 'search': {
228+
const result = await loganApi('POST', '/api/search', { pattern: match[1], maxResults: 20 });
229+
if (result.success && result.matches?.length > 0) {
230+
return `Found ${result.matches.length} matches for "${match[1]}":\n` +
231+
result.matches.slice(0, 10).map(m => ` Line ${m.lineNumber + 1}: ${m.lineText?.substring(0, 120)}`).join('\n');
232+
}
233+
return `No matches found for "${match[1]}"`;
234+
}
235+
case 'analyze': {
236+
const result = await loganApi('POST', '/api/analyze');
237+
if (result.success && result.analysis) {
238+
const a = result.analysis;
239+
const parts = ['Log Analysis:'];
240+
if (a.levelCounts) parts.push('Levels: ' + Object.entries(a.levelCounts).map(([k, v]) => `${k}:${v}`).join(', '));
241+
if (a.crashes?.length) parts.push(`Crashes: ${a.crashes.length} found`);
242+
if (a.components?.length) parts.push(`Components: ${a.components.length} detected`);
243+
return parts.join('\n');
244+
}
245+
return 'Analysis completed but returned no data.';
246+
}
247+
case 'crashes': {
248+
const result = await loganApi('POST', '/api/investigate-crashes');
249+
if (result.success) return JSON.stringify(result, null, 2).substring(0, 1000);
250+
return 'No crash data found.';
251+
}
252+
case 'filter': {
253+
await loganApi('POST', '/api/filter', { includePatterns: [match[1]] });
254+
return `Filter applied: "${match[1]}"`;
255+
}
256+
case 'clear-filter': {
257+
await loganApi('POST', '/api/clear-filter');
258+
return 'Filter cleared.';
259+
}
260+
case 'time-gaps': {
261+
const result = await loganApi('POST', '/api/time-gaps');
262+
if (result.success && result.gaps?.length > 0) {
263+
return `Found ${result.gaps.length} time gaps:\n` +
264+
result.gaps.slice(0, 5).map(g => ` ${g.duration} gap at line ${g.lineNumber}`).join('\n');
265+
}
266+
return 'No significant time gaps found.';
267+
}
268+
case 'navigate': {
269+
await loganApi('POST', '/api/navigate', { line: parseInt(match[1]) - 1 });
270+
return `Navigated to line ${match[1]}.`;
271+
}
272+
case 'get-lines': {
273+
const start = parseInt(match[1]) - 1;
274+
const count = parseInt(match[2]) - start;
275+
const result = await loganApi('POST', '/api/get-lines', { startLine: start, count });
276+
if (result.success && result.lines?.length > 0) {
277+
return result.lines.map(l => `${l.lineNumber + 1}: ${l.text}`).join('\n');
278+
}
279+
return 'Could not fetch lines.';
280+
}
281+
}
282+
} catch (e) {
283+
return `Tool error: ${e.message}`;
284+
}
285+
}
286+
return null; // no tool matched
287+
}
288+
289+
// --- Conversation state ---
290+
const conversationHistory = [];
291+
292+
const SYSTEM_PROMPT = `You are ${AGENT_NAME}, a helpful log analysis assistant connected to LOGAN, a log viewer tool.
293+
The user may call you "${AGENT_NAME}". Always respond in character.
294+
You help the user analyze and understand log files. You can:
295+
- Search for patterns (user says "search for X")
296+
- Analyze the log (user says "analyze" or "triage")
297+
- Investigate crashes (user says "crashes")
298+
- Filter lines (user says "filter X")
299+
- Find time gaps (user says "time gaps")
300+
- Navigate to lines (user says "go to line N")
301+
- Read specific lines (user says "show lines N to M")
302+
303+
When tool results are provided in [TOOL RESULT], use them to give a clear, concise answer.
304+
Keep responses focused and practical. Don't repeat the raw data — summarize and explain what matters.`;
305+
306+
// --- Main loop ---
307+
308+
console.log('=== LOGAN Local LLM Agent ===');
309+
console.log(`LLM: ${LLM_ENDPOINT} (model: ${LLM_MODEL})`);
310+
console.log(`Connecting to LOGAN on port ${port}...`);
311+
312+
// Get initial context
313+
const logContext = await getLogContext();
314+
conversationHistory.push({ role: 'system', content: SYSTEM_PROMPT + '\n\nCurrent log file context:\n' + logContext });
315+
316+
await sendMessage(`Hey! I'm ${AGENT_NAME}, powered by ${LLM_MODEL}. Ask me about the log file — I can search, analyze, filter, and more.`);
317+
318+
while (true) {
319+
console.log('[waiting for user message...]');
320+
const userMsg = await waitForMessage(WAIT_TIMEOUT);
321+
322+
if (userMsg === null) {
323+
await sendMessage('Session timed out. Run me again when ready!');
324+
break;
325+
}
326+
327+
console.log(`[user] ${userMsg}`);
328+
329+
if (/^(stop|quit|exit|bye|goodbye)$/i.test(userMsg.trim())) {
330+
await sendMessage('Goodbye!');
331+
break;
332+
}
333+
334+
// Check if user message triggers a LOGAN tool
335+
const toolResult = await executeToolIfDetected(userMsg);
336+
337+
// Build the user message for the LLM
338+
let llmUserMsg = userMsg;
339+
if (toolResult) {
340+
llmUserMsg = `${userMsg}\n\n[TOOL RESULT]\n${toolResult}`;
341+
}
342+
343+
conversationHistory.push({ role: 'user', content: llmUserMsg });
344+
345+
// Keep conversation history manageable (last 20 messages)
346+
const messages = conversationHistory.length > 21
347+
? [conversationHistory[0], ...conversationHistory.slice(-20)]
348+
: conversationHistory;
349+
350+
try {
351+
await sendMessage('Thinking...');
352+
const reply = await llmChat(messages);
353+
conversationHistory.push({ role: 'assistant', content: reply });
354+
355+
// Send reply (split if very long)
356+
if (reply.length > 2000) {
357+
const chunks = reply.match(/.{1,2000}/gs) || [reply];
358+
for (const chunk of chunks) {
359+
await sendMessage(chunk);
360+
}
361+
} else {
362+
await sendMessage(reply || 'I couldn\'t generate a response. Could you rephrase?');
363+
}
364+
} catch (e) {
365+
console.error(`[LLM error] ${e.message}`);
366+
// If LLM fails but we have a tool result, send that directly
367+
if (toolResult) {
368+
await sendMessage(toolResult);
369+
} else {
370+
await sendMessage(`LLM error: ${e.message}. Is ${LLM_ENDPOINT} running?`);
371+
}
372+
}
373+
}
374+
375+
console.log('=== Agent exited ===');

0 commit comments

Comments
 (0)