Skip to content

Commit 3832126

Browse files
committed
api: fix OpenAI model names, strip provider prefix, use $TEST_MODEL in tests
1 parent e993068 commit 3832126

File tree

2 files changed

+8
-5
lines changed

2 files changed

+8
-5
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -540,7 +540,7 @@ Custom headers for claude-specific behavior:
540540
import litellm
541541
542542
response = litellm.completion(
543-
model="openai/haiku",
543+
model="claude-code/haiku",
544544
messages=[{"role": "user", "content": "hello"}],
545545
api_base="http://localhost:8080/openai/v1",
546546
api_key="your-secret-token", # or any string if no token set

api_server.py

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -524,6 +524,9 @@ async def openai_chat_completions(
524524
if not prompt:
525525
raise HTTPException(status_code=400, detail="no user message provided")
526526

527+
# strip provider prefix (e.g. "openai/haiku" → "haiku")
528+
model = req.model.split("/", 1)[-1] if "/" in req.model else req.model
529+
527530
no_continue = x_claude_continue is None or x_claude_continue.lower() not in ("1", "true", "yes")
528531

529532
cid = f"chatcmpl-{uuid.uuid4().hex[:12]}"
@@ -532,7 +535,7 @@ async def openai_chat_completions(
532535
if not req.stream:
533536
text = await _run_claude_text(
534537
prompt,
535-
model=req.model,
538+
model=model,
536539
system_prompt=system_prompt,
537540
workspace=x_claude_workspace,
538541
no_continue=no_continue,
@@ -542,7 +545,7 @@ async def openai_chat_completions(
542545
"id": cid,
543546
"object": "chat.completion",
544547
"created": created,
545-
"model": req.model,
548+
"model": model,
546549
"choices": [
547550
{
548551
"index": 0,
@@ -558,15 +561,15 @@ async def openai_chat_completions(
558561
if workspace in busy_workspaces:
559562
raise HTTPException(status_code=409, detail="workspace busy, retry later")
560563

561-
args = _build_oai_run_args(prompt, req.model, system_prompt, True, req.reasoning_effort, no_continue)
564+
args = _build_oai_run_args(prompt, model, system_prompt, True, req.reasoning_effort, no_continue)
562565
env = _build_env()
563566

564567
busy_workspaces[workspace] = None # type: ignore[assignment]
565568

566569
if req.stream:
567570

568571
async def _sse():
569-
model_name = req.model
572+
model_name = model
570573
finish_reason = "stop"
571574
try:
572575
proc = await asyncio.create_subprocess_exec(

0 commit comments

Comments
 (0)