Skip to content

Commit c167f3a

Browse files
fix(server): update /v1/chat/completions endpoint to be backwards compatabile
1 parent f2e7beb commit c167f3a

2 files changed

Lines changed: 27 additions & 0 deletions

File tree

nemoguardrails/server/schemas/openai.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,16 @@ class OpenAIChatCompletionRequest(BaseModel):
5454
...,
5555
description="The LLM model to use for chat completion (e.g., 'gpt-4o', 'llama-3.1-8b').",
5656
)
57+
58+
@field_validator("model", mode="before")
59+
@classmethod
60+
def normalize_model(cls, v: Any) -> Optional[str]:
61+
if isinstance(v, dict):
62+
if not isinstance(v.get("id"), str):
63+
raise ValueError("Model object must contain a string 'id' field")
64+
return v["id"]
65+
return v
66+
5767
stream: Optional[bool] = Field(
5868
default=False,
5969
description="If set, partial message deltas will be sent as server-sent events.",

tests/server/test_openai_integration.py

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -86,6 +86,23 @@ def test_openai_client_chat_completion(openai_client):
8686
assert hasattr(response, "created")
8787

8888

89+
def test_chat_completion_model_as_dict():
90+
test_client = TestClient(api.app)
91+
response = test_client.post(
92+
"/v1/chat/completions",
93+
json={
94+
"model": {"id": "gpt-4o", "name": "gpt-4o", "maxLength": 10000},
95+
"messages": [{"role": "user", "content": "hi"}],
96+
"guardrails": {"config_id": "with_custom_llm"},
97+
},
98+
)
99+
assert response.status_code == 200
100+
data = response.json()
101+
assert data["model"] == "gpt-4o"
102+
assert data["choices"][0]["message"]["content"] == "Custom LLM response"
103+
assert data["choices"][0]["finish_reason"] == "stop"
104+
105+
89106
def test_openai_client_chat_completion_parameterized(openai_client):
90107
response = openai_client.chat.completions.create(
91108
model="gpt-4o",

0 commit comments

Comments
 (0)