|
1 | | - |
2 | | -import { sendChatParticipantRequest } from "@vscode/chat-extension-utils"; |
3 | | -import { traceInfo, traceWarn } from "../../common/logging"; |
4 | | -import * as vscode from "vscode"; |
5 | | -import { PythonEnvsPrompt } from "./prompts"; |
6 | | -import { PromptElementAndProps } from "@vscode/chat-extension-utils/dist/toolsPrompt"; |
7 | | -import { ChatEnvironmentErrorInfo, ERROR_CHAT_CONTEXT_QUEUE } from "./send_prompt"; |
8 | | -import { PythonHelperChatParticipant } from "./pythonHelperChatParticipant"; |
| 1 | +import { traceInfo, traceWarn } from '../../common/logging'; |
| 2 | +import * as vscode from 'vscode'; |
| 3 | +import { PythonEnvsPrompt, PythonEnvsPromptProps } from './prompts'; |
| 4 | +import { PythonHelperChatParticipant } from './pythonHelperChatParticipant'; |
| 5 | +import { PromptElement, renderPrompt, UserMessage } from '@vscode/prompt-tsx'; |
| 6 | +import { LanguageModelTextPart, LanguageModelToolCallPart } from 'vscode'; |
| 7 | +import { ToolCallRound, ToolResultMetadata } from '@vscode/chat-extension-utils/dist/toolsPrompt'; |
9 | 8 |
|
10 | 9 | export const CHAT_PARTICIPANT_ID = 'python-helper'; |
11 | 10 | export const CHAT_PARTICIPANT_AT_MENTION = `@${CHAT_PARTICIPANT_ID}`; |
12 | 11 |
|
| 12 | +export function registerChatParticipant(_context: vscode.ExtensionContext) { |
| 13 | + traceInfo('Registering python helper chat participant'); // Log registration start |
13 | 14 |
|
| 15 | + // Define the request handler for the chat participant |
| 16 | + const requestHandler: vscode.ChatRequestHandler = async ( |
| 17 | + request: vscode.ChatRequest, // prompt made in pip file |
| 18 | + chatContext: vscode.ChatContext, // history |
| 19 | + stream: vscode.ChatResponseStream, |
| 20 | + token: vscode.CancellationToken, |
| 21 | + ) => { |
| 22 | + traceWarn('Python helper chat participant invoked with request:', request); // Log request details |
14 | 23 |
|
15 | | -export function registerChatParticipant( |
16 | | - _context: vscode.ExtensionContext, |
17 | | -) { |
18 | | - traceInfo('Registering python helper chat participant'); |
19 | | - const participant = vscode.chat.createChatParticipant(CHAT_PARTICIPANT_ID, |
20 | | - async ( |
21 | | - request: vscode.ChatRequest, |
22 | | - chatContext: vscode.ChatContext, |
23 | | - stream: vscode.ChatResponseStream, |
24 | | - token: vscode.CancellationToken |
25 | | - ) => { |
26 | | - traceWarn('Python helper chat participant invoked with request:', request); |
27 | | - const userPrompt = request.prompt; |
28 | | - |
29 | | - |
30 | | - const prompt: PromptElementAndProps<PythonEnvsPrompt> = { |
31 | | - promptElement: PythonEnvsPrompt, |
32 | | - props: { |
33 | | - title: 'Python Environment', |
34 | | - description: 'Provide information about the Python environment.', |
35 | | - request: { |
36 | | - prompt: '' + userPrompt + ' What is the current Python environment? What packages are installed?', |
37 | | - }, |
38 | | - }, |
39 | | - }; |
40 | | - |
41 | | - const { result } = sendChatParticipantRequest( |
42 | | - request, |
43 | | - chatContext, |
44 | | - { |
45 | | - prompt, |
46 | | - requestJustification: vscode.l10n.t('Tell me about my environment.'), |
47 | | - responseStreamOptions: { |
48 | | - stream, |
49 | | - references: false, |
50 | | - responseText: true, |
51 | | - }, |
| 24 | + // gather the available tools |
| 25 | + const first100tools = vscode.lm.tools.slice(0, 100); |
| 26 | + const tools: vscode.LanguageModelChatTool[] = first100tools.map((tool): vscode.LanguageModelChatTool => { |
| 27 | + return { |
| 28 | + name: tool.name, |
| 29 | + description: tool.description, |
| 30 | + inputSchema: tool.inputSchema ?? {}, |
| 31 | + }; |
| 32 | + }); |
| 33 | + traceInfo('Tools prepared:', tools); // Log tools |
| 34 | + |
| 35 | + const userPrompt = request.prompt; |
| 36 | + traceInfo('User prompt received:', userPrompt); // Log user prompt |
| 37 | + |
| 38 | + const refTools = request.toolReferences; |
| 39 | + const refToolInvToken = request.toolInvocationToken; |
| 40 | + const refRef = request.references; |
| 41 | + const refCom = request.command; |
| 42 | + const refPrompt = request.prompt; |
| 43 | + let model = request.model; |
| 44 | + traceInfo('References received:', refTools, refToolInvToken, refRef, refCom, refPrompt, model); // Log references |
| 45 | + |
| 46 | + // takes the info and creates a prompt using the PythonEnvsPrompt |
| 47 | + const result = await renderPrompt<PythonEnvsPromptProps>( |
| 48 | + PythonEnvsPrompt, // Extract the constructor PythonEnvsPromptProps |
| 49 | + { |
| 50 | + title: 'Python Environment', |
| 51 | + description: 'Provide information about the Python environment.', |
| 52 | + request: { |
| 53 | + prompt: '' + userPrompt + ' What is the current Python environment? What packages are installed?', |
52 | 54 | }, |
53 | | - token, |
54 | | - ); |
| 55 | + }, |
| 56 | + { modelMaxPromptTokens: model.maxInputTokens }, |
| 57 | + model, |
| 58 | + ); |
| 59 | + |
| 60 | + // result of building the prompt |
| 61 | + let messages = result.messages; |
| 62 | + |
| 63 | + const options: vscode.LanguageModelChatRequestOptions = { |
| 64 | + justification: 'To make a request to @toolsTSX', |
| 65 | + tools: tools, |
| 66 | + }; |
| 67 | + |
| 68 | + const toolReferences = [...request.toolReferences]; |
| 69 | + const accumulatedToolResults: Record<string, vscode.LanguageModelToolResult> = {}; |
| 70 | + const toolCallRounds: ToolCallRound[] = []; |
| 71 | + const runWithTools = async (): Promise<void> => { |
| 72 | + const requestedTool = toolReferences.shift(); |
| 73 | + |
| 74 | + if (requestedTool) { |
| 75 | + // NOT WORKING::: If a toolReference is present, force the model to call that tool |
| 76 | + options.toolMode = vscode.LanguageModelChatToolMode.Required; |
| 77 | + options.tools = vscode.lm.tools.filter((tool) => tool.name === requestedTool.name); |
| 78 | + } else { |
| 79 | + options.toolMode = undefined; |
| 80 | + options.tools = [...tools]; |
| 81 | + } |
| 82 | + console.log('Requested tool:', requestedTool); // Log requested tool |
| 83 | + |
| 84 | + // Send the request to the model |
| 85 | + const response = await model.sendRequest(messages, options, token); |
| 86 | + traceInfo('Chat participant response sent:', response); // Log response |
| 87 | + |
| 88 | + // Stream the response back to VS Code |
| 89 | + let responseStr = ''; |
| 90 | + const toolCalls: vscode.LanguageModelToolCallPart[] = []; |
| 91 | + |
| 92 | + for await (const chunk of response.stream) { |
| 93 | + if (chunk instanceof LanguageModelTextPart) { |
| 94 | + stream.markdown(chunk.value); |
| 95 | + responseStr += chunk.value; // Accumulate the response string |
| 96 | + } else if (chunk instanceof LanguageModelToolCallPart) { |
| 97 | + // If the response contains vscode.LanguageModelToolCallPart, then you should re-send the prompt with a ToolCall element for each of those. |
| 98 | + console.log('TOOL CALL', chunk); |
| 99 | + toolCalls.push(chunk); |
| 100 | + } |
| 101 | + } |
55 | 102 |
|
56 | | - return await result; |
57 | | - } |
58 | | - ); |
| 103 | + if (toolCalls.length) { |
| 104 | + traceInfo('Tool calls detected:', toolCalls); // Log tool calls |
| 105 | + |
| 106 | + // If the model called any tools, then we do another round- render the prompt with those tool calls (rendering the PromptElements will invoke the tools) |
| 107 | + // and include the tool results in the prompt for the next request. |
| 108 | + toolCallRounds.push({ |
| 109 | + response: responseStr, |
| 110 | + toolCalls, |
| 111 | + }); |
| 112 | + |
| 113 | + const result = await renderPrompt<PythonEnvsPromptProps>( |
| 114 | + PythonEnvsPrompt, // Extract the constructor PythonEnvsPromptProps |
| 115 | + { |
| 116 | + title: 'Python Environment', |
| 117 | + description: 'Provide information about the Python environment.', |
| 118 | + request: { |
| 119 | + prompt: |
| 120 | + '' + |
| 121 | + userPrompt + |
| 122 | + ' What is the current Python environment? What packages are installed?', |
| 123 | + }, |
| 124 | + }, |
| 125 | + { modelMaxPromptTokens: model.maxInputTokens }, |
| 126 | + model, |
| 127 | + ); |
| 128 | + |
| 129 | + // result of building the prompt |
| 130 | + let messages = result.messages; |
| 131 | + const toolResultMetadata = result.metadatas.getAll(ToolResultMetadata); |
| 132 | + if (toolResultMetadata?.length) { |
| 133 | + // Cache tool results for later, so they can be incorporated into later prompts without calling the tool again |
| 134 | + toolResultMetadata.forEach((meta) => (accumulatedToolResults[meta.toolCallId] = meta.result)); |
| 135 | + } |
| 136 | + |
| 137 | + // This loops until the model doesn't want to call any more tools, then the request is done. |
| 138 | + return runWithTools(); |
| 139 | + } |
| 140 | + }; |
| 141 | + await runWithTools(); // Ensure tools are run before proceeding |
| 142 | + |
| 143 | + // end of request handler |
| 144 | + }; |
| 145 | + |
| 146 | + const participant = vscode.chat.createChatParticipant(CHAT_PARTICIPANT_ID, requestHandler); |
59 | 147 | participant.iconPath = new vscode.ThemeIcon('python'); |
60 | 148 |
|
| 149 | + traceInfo('Chat participant created and registered'); // Log participant creation |
| 150 | + |
61 | 151 | // Register using our singleton manager |
62 | 152 | return PythonHelperChatParticipant.register(participant, _context); |
63 | | - |
64 | | - |
65 | 153 | } |
66 | | - |
|
0 commit comments