Replies: 1 comment 2 replies
-
|
CodeCompanion doesn't have compaction builtin (yet). If you're using Anthropic or OpenAI Responses then it leverages their server side tools to compact the chat (source: https://codecompanion.olimorris.dev/architecture#how-context-is-managed). But I started work on builtin compaction last night. There is currently a /compact slash command but it's not good and I'm not happy with it. |
Beta Was this translation helpful? Give feedback.
2 replies
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Uh oh!
There was an error while loading. Please reload this page.
-
Hello,
I have a custom llama.cpp setup and I've noted that no autocompaction is happening, no matter what settings I do. I've made sure the context size is set, enabled the compaction and background actions, alas no luck.
Am I doing something wrong?
My config is this:
{ "olimorris/codecompanion.nvim", version = "^19.0.0", opts = { interactions = { chat = { adapter = "llama", opts = { compaction = { enabled = true, trigger = 0.8 } }, }, }, background = { adapter = "llama", chat = { opts = { enabled = true } } }, adapters = { http = { llama = function() local url = "http://127.0.0.1:9099" local model = (function() local resp = require('plenary.curl').get(url .. '/props', { timeout = 150, }) if resp.status ~= 200 then error("model discovery: http code " .. resp.status) end local body = vim.json.decode(resp.body) if type(body) ~= 'table' or type(body.default_generation_settings) ~= 'table' or not body.model_alias or not body.default_generation_settings.n_ctx then vim.print(body) error('model discovery: unexpected response') end return { name = body.model_alias, ctx = 10 * 1024 -- for the compaction debugging --ctx = body.default_generation_settings.n_ctx } end)() return require("codecompanion.adapters").extend("openai_compatible", { env = { url = url, api_key = "USER", chat_url = "/v1/chat/completions", }, opts = { stream = true, tools = true, vision = false, }, schema = { model = { default = model.name }, num_ctx = { default = model.ctx }, }, meta = { context_window = model.ctx, }, handlers = { form_messages = function(self, messages) local system_content = {} local other_messages = {} -- 1. Separate system messages from everything else for _, msg in ipairs(messages) do if msg.role == "system" then table.insert(system_content, msg.content) else table.insert(other_messages, msg) end end local final_messages = {} -- 2. If there are system messages, merge them into ONE message at the top if #system_content > 0 then table.insert(final_messages, { role = "system", content = table.concat(system_content, "\n\n"), }) end -- 3. Append all the user/assistant messages for _, msg in ipairs(other_messages) do table.insert(final_messages, msg) end -- 4. Pass the cleaned messages to the standard OpenAI handler local openai = require "codecompanion.adapters.http.openai" return openai.handlers.form_messages(self, final_messages) end, parse_message_meta = function(self, data) local extra = data.extra if extra and extra.reasoning_content then data.output.reasoning = { content = extra.reasoning_content } if data.output.content == "" then data.output.content = nil end end return data end, }, }) end, } } }, dependencies = { "nvim-lua/plenary.nvim", "nvim-treesitter/nvim-treesitter", "ravitemer/mcphub.nvim", { "MeanderingProgrammer/render-markdown.nvim", ft = { "markdown", "codecompanion" }, opts = { debounce = 300 } }, }, }Beta Was this translation helpful? Give feedback.
All reactions