Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
20 commits
Select commit Hold shift + click to select a range
267618d
Refactor LLM param resolution into adapters
andrejvysny Apr 22, 2026
a0eba84
Merge upstream main into provider adapters
andrejvysny Apr 22, 2026
dcb4aff
Remove unused session_hf_token parameter from NativeAdapter.build_params
andrejvysny Apr 22, 2026
2f85650
Unify model catalog around provider adapters
andrejvysny Apr 22, 2026
f1f9116
Add OpenCode Go provider adapter
andrejvysny Apr 22, 2026
a5bdf29
Merge upstream main into provider adapters
andrejvysny Apr 23, 2026
4550dd5
Refactor LLM param resolution into provider adapters
andrejvysny Apr 23, 2026
e5b113a
Merge upstream main into provider adapters
andrejvysny Apr 23, 2026
a4a7532
Trim provider adapters to params and validation
andrejvysny Apr 23, 2026
d559a37
Clean provider error handling
andrejvysny Apr 23, 2026
a3a1b8f
Add Phase 1 provider adapters
andrejvysny Apr 23, 2026
8b1abd9
Build adapter-driven model catalog and picker
andrejvysny Apr 23, 2026
958c893
Merge remote-tracking branch 'origin/main' into feat/provider-adapters
andrejvysny Apr 24, 2026
88a95a6
feat: add error handling for missing provider adapter in _resolve_llm…
andrejvysny Apr 24, 2026
590496f
refactor: revert unnecessary formatting changes
andrejvysny Apr 24, 2026
cc6cff7
refactor: cleanup before merge
andrejvysny Apr 24, 2026
0307e23
Merge feat/provider-adapters (PR#55) into phase-2 branch
andrejvysny Apr 24, 2026
25d9331
Add GeminiAdapter for Google Gemini models
andrejvysny Apr 24, 2026
4ac4f1a
chore: remove outdated TODO list
andrejvysny Apr 24, 2026
42f36ea
Fix review blockers and cleanup for PR merge
andrejvysny Apr 24, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
48 changes: 5 additions & 43 deletions agent/core/agent_loop.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@

from agent.config import Config
from agent.core.doom_loop import check_for_doom_loop
from agent.core.llm_errors import friendly_llm_error_message, render_llm_error_message
from agent.core.llm_params import _resolve_llm_params
from agent.core.prompt_caching import with_prompt_caching
from agent.core.session import Event, OpType, Session
Expand Down Expand Up @@ -191,44 +192,7 @@ async def _heal_effort_and_rebuild_params(

def _friendly_error_message(error: Exception) -> str | None:
"""Return a user-friendly message for known error types, or None to fall back to traceback."""
err_str = str(error).lower()

if "authentication" in err_str or "unauthorized" in err_str or "invalid x-api-key" in err_str:
return (
"Authentication failed — your API key is missing or invalid.\n\n"
"To fix this, set the API key for your model provider:\n"
" • Anthropic: export ANTHROPIC_API_KEY=sk-...\n"
" • OpenAI: export OPENAI_API_KEY=sk-...\n"
" • HF Router: export HF_TOKEN=hf_...\n\n"
"You can also add it to a .env file in the project root.\n"
"To switch models, use the /model command."
)

if "insufficient" in err_str and "credit" in err_str:
return (
"Insufficient API credits. Please check your account balance "
"at your model provider's dashboard."
)

if "not supported by provider" in err_str or "no provider supports" in err_str:
return (
"The model isn't served by the provider you pinned.\n\n"
"Drop the ':<provider>' suffix to let the HF router auto-pick a "
"provider, or use '/model' (no arg) to see which providers host "
"which models."
)

if "model_not_found" in err_str or (
"model" in err_str
and ("not found" in err_str or "does not exist" in err_str)
):
return (
"Model not found. Use '/model' to list suggestions, or paste an "
"HF model id like 'MiniMaxAI/MiniMax-M2.7'. Availability is shown "
"when you switch."
)

return None
return friendly_llm_error_message(error)


async def _compact_and_notify(session: Session) -> None:
Expand Down Expand Up @@ -870,11 +834,9 @@ async def _exec_tool(
continue

except Exception as e:
import traceback

error_msg = _friendly_error_message(e)
if error_msg is None:
error_msg = str(e) + "\n" + traceback.format_exc()
logger.info("Agent turn failed: %s", e)
logger.debug("Agent turn failed", exc_info=True)
error_msg = render_llm_error_message(e)

await session.send_event(
Event(
Expand Down
147 changes: 147 additions & 0 deletions agent/core/llm_errors.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,147 @@
"""Shared LLM error classification and user-facing messages."""

from __future__ import annotations

from typing import Literal

LlmErrorType = Literal[
"auth",
"credits",
"model",
"provider",
"rate_limit",
"network",
"unknown",
]

_AUTH_MARKERS = (
"authentication failed",
"authentication_error",
"authentication error",
"unauthorized",
"invalid x-api-key",
"invalid api key",
"incorrect api key",
"didn't provide an api key",
"did not provide an api key",
"no api key provided",
"provide your api key",
"x-api-key header is required",
"api key header is required",
"api key required",
"api key is missing or invalid",
"api_key_invalid",
"401",
)
_CREDITS_MARKERS = (
"insufficient credit",
"insufficient credits",
"out of credits",
"insufficient_quota",
"credit balance is too low",
"balance is too low",
"purchase credits",
"plans & billing",
"quota",
"billing",
"payment required",
"402",
)
_RATE_LIMIT_MARKERS = ("429", "rate limit", "too many requests")
_NETWORK_MARKERS = (
"timeout",
"timed out",
"connect",
"connection error",
"connection refused",
"connection reset",
"network",
"service unavailable",
"bad gateway",
"overloaded",
"capacity",
)


def _has_any(err_str: str, markers: tuple[str, ...]) -> bool:
return any(marker in err_str for marker in markers)


def classify_llm_error(error: Exception) -> LlmErrorType:
"""Classify common provider/API failures from the exception text."""
err_str = str(error).lower()

if _has_any(err_str, _AUTH_MARKERS):
return "auth"
if _has_any(err_str, _CREDITS_MARKERS):
return "credits"
if "not supported by provider" in err_str or "no provider supports" in err_str:
return "provider"
if "model_not_found" in err_str or "unknown model" in err_str:
return "model"
if "model" in err_str and (
"not found" in err_str
or "does not exist" in err_str
or "not available" in err_str
):
return "model"
if _has_any(err_str, _RATE_LIMIT_MARKERS):
return "rate_limit"
if _has_any(err_str, _NETWORK_MARKERS):
return "network"
return "unknown"


def friendly_llm_error_message(error: Exception) -> str | None:
"""Return a clean user-facing message for common LLM failures."""
error_type = classify_llm_error(error)

if error_type == "auth":
return (
"Authentication failed — your API key is missing or invalid.\n\n"
"To fix this, set the API key for your model provider:\n"
" • Anthropic: export ANTHROPIC_API_KEY=sk-...\n"
" • OpenAI: export OPENAI_API_KEY=sk-...\n"
" • HF Router: export HF_TOKEN=hf_...\n\n"
"You can also add it to a .env file in the project root.\n"
"To switch models, use the /model command."
)
if error_type == "credits":
return (
"Insufficient API credits or quota for this model/provider.\n\n"
"Check billing for the current provider, or switch models with /model."
)
if error_type == "provider":
return (
"The model isn't served by the provider you pinned.\n\n"
"Drop the ':<provider>' suffix to let the HF router auto-pick a "
"provider, or use '/model' (no arg) to see which providers host "
"which models."
)
if error_type == "model":
return (
"Model not found. Use '/model' to list suggestions, or paste an "
"HF model id like 'MiniMaxAI/MiniMax-M2.7'. Availability is shown "
"when you switch."
)
if error_type == "rate_limit":
return (
"Rate limit reached. Wait a moment and retry, or switch models/providers "
"with /model."
)
if error_type == "network":
return "The model provider is unavailable or timed out. Retry in a moment."
return None


def render_llm_error_message(error: Exception) -> str:
"""Return the message safe to show to users."""
return friendly_llm_error_message(error) or str(error)


def health_error_type(error: Exception) -> str:
"""Map LLM failures to the backend health endpoint error_type values."""
error_type = classify_llm_error(error)
if error_type in {"auth", "credits", "rate_limit", "network"}:
return error_type
return "unknown"
Loading