Skip to content

Commit 360760c

Browse files
author
octopus
committed
feat: add MiniMax as alternative LLM provider
Add multi-provider LLM factory with auto-detection from API keys. MiniMax M2.7/M2.7-highspeed models supported via OpenAI-compatible API with temperature clamping and configurable base URL. - New llm_provider.py module with create_llm() factory - Provider auto-detection: MINIMAX_API_KEY → minimax, else openai - LLM_PROVIDER, LLM_BASE_URL, LLM_TEMPERATURE env vars - 28 unit tests + 3 integration tests - Updated .env.example and README with MiniMax docs
1 parent 4a776bd commit 360760c

File tree

8 files changed

+454
-5
lines changed

8 files changed

+454
-5
lines changed

.env.example

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,19 @@
11
OPENAI_API_KEY=
22

3+
# LLM provider — auto-detected from API keys if unset
4+
# Options: openai, minimax
5+
# LLM_PROVIDER=openai
6+
37
# LLM model — strong models are required for reliable UI generation
4-
# Recommended: gpt-5.4, gpt-5.4-pro, claude-opus-4-6, gemini-3.1-pro
8+
# Recommended: gpt-5.4, gpt-5.4-pro, claude-opus-4-6, gemini-3.1-pro, MiniMax-M2.7
59
LLM_MODEL=gpt-5.4-2026-03-05
610

11+
# Custom base URL for OpenAI-compatible providers (overrides provider preset)
12+
# LLM_BASE_URL=
13+
14+
# MiniMax (https://www.minimaxi.com) — set key to auto-select MiniMax provider
15+
# MINIMAX_API_KEY=
16+
717
# Rate limiting (per IP) — disabled by default
818
RATE_LIMIT_ENABLED=false
919
RATE_LIMIT_WINDOW_MS=60000

README.md

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,8 +30,11 @@ make dev # Start all services
3030
> | `gpt-5.4` / `gpt-5.4-pro` | OpenAI |
3131
> | `claude-opus-4-6` | Anthropic |
3232
> | `gemini-3.1-pro` | Google |
33+
> | `MiniMax-M2.7` / `MiniMax-M2.7-highspeed` | [MiniMax](https://www.minimaxi.com) |
3334
>
3435
> Smaller or weaker models will produce broken layouts, missing interactivity, or incomplete visualizations.
36+
>
37+
> **Using MiniMax:** Set `MINIMAX_API_KEY` in your `.env` — the provider is auto-detected. Defaults to `MiniMax-M2.7` (1M context window). See [MiniMax docs](https://www.minimaxi.com/document/introduction) for API keys.
3538
3639
- **App**: http://localhost:3000
3740
- **Agent**: http://localhost:8123

apps/agent/main.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3,12 +3,10 @@
33
It defines the workflow graph, state, tools, nodes and edges.
44
"""
55

6-
import os
7-
86
from copilotkit import CopilotKitMiddleware
97
from langchain.agents import create_agent
10-
from langchain_openai import ChatOpenAI
118

9+
from src.llm_provider import create_llm
1210
from src.query import query_data
1311
from src.todos import AgentState, todo_tools
1412
from src.form import generate_form
@@ -19,7 +17,7 @@
1917
_skills_text = load_all_skills()
2018

2119
agent = create_agent(
22-
model=ChatOpenAI(model=os.environ.get("LLM_MODEL", "gpt-5.4-2026-03-05")),
20+
model=create_llm(),
2321
tools=[query_data, *todo_tools, generate_form, *template_tools],
2422
middleware=[CopilotKitMiddleware()],
2523
state_schema=AgentState,

apps/agent/src/llm_provider.py

Lines changed: 89 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,89 @@
1+
"""
2+
LLM provider factory for multi-provider support.
3+
4+
Supports OpenAI (default), MiniMax, and any OpenAI-compatible provider
5+
via the LLM_BASE_URL environment variable.
6+
7+
Provider auto-detection priority:
8+
1. Explicit LLM_PROVIDER env var
9+
2. MINIMAX_API_KEY present → minimax
10+
3. OPENAI_API_KEY present → openai (default)
11+
12+
MiniMax models use the OpenAI-compatible API at https://api.minimax.io/v1
13+
with temperature clamped to (0.0, 1.0].
14+
"""
15+
16+
import os
17+
from langchain_openai import ChatOpenAI
18+
19+
20+
# Provider presets: base_url and api_key env var name
21+
PROVIDER_PRESETS = {
22+
"openai": {
23+
"base_url": None, # uses default
24+
"api_key_env": "OPENAI_API_KEY",
25+
},
26+
"minimax": {
27+
"base_url": "https://api.minimax.io/v1",
28+
"api_key_env": "MINIMAX_API_KEY",
29+
"default_model": "MiniMax-M2.7",
30+
},
31+
}
32+
33+
# Models that require temperature clamping to (0.0, 1.0]
34+
_CLAMP_TEMPERATURE_PROVIDERS = {"minimax"}
35+
36+
37+
def _detect_provider() -> str:
38+
"""Auto-detect provider from environment variables."""
39+
explicit = os.environ.get("LLM_PROVIDER", "").strip().lower()
40+
if explicit:
41+
return explicit
42+
43+
if os.environ.get("MINIMAX_API_KEY"):
44+
return "minimax"
45+
46+
return "openai"
47+
48+
49+
def create_llm() -> ChatOpenAI:
50+
"""
51+
Create a ChatOpenAI-compatible LLM instance based on environment config.
52+
53+
Environment variables:
54+
LLM_PROVIDER – Provider name: "openai" | "minimax" (auto-detected if unset)
55+
LLM_MODEL – Model name (provider-specific default if unset)
56+
LLM_BASE_URL – Custom base URL (overrides provider preset)
57+
LLM_TEMPERATURE – Temperature value (default: 0.7)
58+
OPENAI_API_KEY – OpenAI API key
59+
MINIMAX_API_KEY – MiniMax API key
60+
"""
61+
provider = _detect_provider()
62+
preset = PROVIDER_PRESETS.get(provider, {})
63+
64+
model = os.environ.get("LLM_MODEL") or preset.get("default_model") or "gpt-5.4-2026-03-05"
65+
base_url = os.environ.get("LLM_BASE_URL") or preset.get("base_url")
66+
67+
# Resolve API key
68+
api_key_env = preset.get("api_key_env", "OPENAI_API_KEY")
69+
api_key = os.environ.get(api_key_env) or os.environ.get("OPENAI_API_KEY", "")
70+
71+
# Parse temperature
72+
temperature = float(os.environ.get("LLM_TEMPERATURE", "0.7"))
73+
74+
# Clamp temperature for providers that require it (MiniMax: (0.0, 1.0])
75+
if provider in _CLAMP_TEMPERATURE_PROVIDERS:
76+
temperature = max(0.01, min(temperature, 1.0))
77+
78+
kwargs = {
79+
"model": model,
80+
"temperature": temperature,
81+
}
82+
83+
if base_url:
84+
kwargs["base_url"] = base_url
85+
86+
if api_key:
87+
kwargs["api_key"] = api_key
88+
89+
return ChatOpenAI(**kwargs)

apps/agent/tests/__init__.py

Whitespace-only changes.

apps/agent/tests/conftest.py

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
"""Configure imports so tests can import from the agent src directory."""
2+
3+
import importlib
4+
import sys
5+
import types
6+
from pathlib import Path
7+
8+
# The system has a `src` package installed globally that shadows our local
9+
# `src/` directory. Remove it so that the agent's own `src` package is found.
10+
agent_root = Path(__file__).resolve().parents[1]
11+
src_dir = agent_root / "src"
12+
13+
# Remove any pre-existing `src` module from the cache
14+
for key in list(sys.modules):
15+
if key == "src" or key.startswith("src."):
16+
del sys.modules[key]
17+
18+
# Ensure agent root is first on the path
19+
if str(agent_root) not in sys.path:
20+
sys.path.insert(0, str(agent_root))
21+
22+
# Register our local src as a namespace package so submodule imports work
23+
src_mod = types.ModuleType("src")
24+
src_mod.__path__ = [str(src_dir)]
25+
src_mod.__package__ = "src"
26+
sys.modules["src"] = src_mod

0 commit comments

Comments
 (0)