Skip to content

Commit b74f135

Browse files
feat: skip temperature for opus-4-7 (#790)
1 parent dca81eb commit b74f135

4 files changed

Lines changed: 179 additions & 13 deletions

File tree

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "uipath-langchain"
3-
version = "0.9.35"
3+
version = "0.9.36"
44
description = "Python SDK that enables developers to build and deploy LangGraph agents to the UiPath Cloud Platform"
55
readme = { file = "README.md", content-type = "text/markdown" }
66
requires-python = ">=3.11"

src/uipath_langchain/chat/chat_model_factory.py

Lines changed: 32 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,11 @@
2020
}
2121

2222

23+
def _should_skip_temperature(model_info: dict[str, Any]) -> bool:
24+
details = model_info.get("modelDetails") or {}
25+
return bool(details.get("shouldSkipTemperature", False))
26+
27+
2328
def _fetch_discovery(agenthub_config: str) -> list[dict[str, Any]]:
2429
"""Fetch available models from LLM Gateway discovery endpoint."""
2530
from uipath.platform import UiPath
@@ -34,7 +39,7 @@ def _fetch_discovery(agenthub_config: str) -> list[dict[str, Any]]:
3439
def _create_openai_llm(
3540
model: str,
3641
api_flavor: APIFlavor,
37-
temperature: float,
42+
temperature: float | None,
3843
max_tokens: int,
3944
agenthub_config: str,
4045
byo_connection_id: str | None = None,
@@ -45,29 +50,33 @@ def _create_openai_llm(
4550

4651
azure_open_ai_latest_api_version = "2025-04-01-preview"
4752

53+
sampling_kwargs: dict[str, Any] = {}
54+
if temperature is not None:
55+
sampling_kwargs["temperature"] = temperature
56+
4857
match api_flavor:
4958
case APIFlavor.OPENAI_RESPONSES:
5059
return UiPathChatOpenAI(
5160
use_responses_api=True,
5261
model_name=model,
53-
temperature=temperature,
5462
max_tokens=max_tokens,
5563
api_version=azure_open_ai_latest_api_version,
5664
agenthub_config=agenthub_config,
5765
byo_connection_id=byo_connection_id,
5866
output_version="v1",
67+
**sampling_kwargs,
5968
**kwargs,
6069
)
6170
case APIFlavor.OPENAI_COMPLETIONS:
6271
return UiPathChatOpenAI(
6372
use_responses_api=False,
6473
model_name=model,
65-
temperature=temperature,
6674
max_tokens=max_tokens,
6775
api_version=azure_open_ai_latest_api_version,
6876
agenthub_config=agenthub_config,
6977
byo_connection_id=byo_connection_id,
7078
output_version="v1",
79+
**sampling_kwargs,
7180
**kwargs,
7281
)
7382
case _:
@@ -77,7 +86,7 @@ def _create_openai_llm(
7786
def _create_bedrock_llm(
7887
model: str,
7988
api_flavor: APIFlavor,
80-
temperature: float,
89+
temperature: float | None,
8190
max_tokens: int,
8291
agenthub_config: str,
8392
byo_connection_id: str | None = None,
@@ -89,25 +98,29 @@ def _create_bedrock_llm(
8998
UiPathChatBedrockConverse,
9099
)
91100

101+
sampling_kwargs: dict[str, Any] = {}
102+
if temperature is not None:
103+
sampling_kwargs["temperature"] = temperature
104+
92105
match api_flavor:
93106
case APIFlavor.AWS_BEDROCK_CONVERSE:
94107
return UiPathChatBedrockConverse(
95108
model_name=model,
96-
temperature=temperature,
97109
max_tokens=max_tokens,
98110
agenthub_config=agenthub_config,
99111
byo_connection_id=byo_connection_id,
100112
output_version="v1",
113+
**sampling_kwargs,
101114
**kwargs,
102115
)
103116
case APIFlavor.AWS_BEDROCK_INVOKE:
104117
return UiPathChatBedrock(
105118
model_name=model,
106-
temperature=temperature,
107119
max_tokens=max_tokens,
108120
agenthub_config=agenthub_config,
109121
byo_connection_id=byo_connection_id,
110122
output_version="v1",
123+
**sampling_kwargs,
111124
**kwargs,
112125
)
113126
case _:
@@ -117,7 +130,7 @@ def _create_bedrock_llm(
117130
def _create_vertex_llm(
118131
model: str,
119132
api_flavor: APIFlavor,
120-
temperature: float,
133+
temperature: float | None,
121134
max_tokens: int | None,
122135
agenthub_config: str,
123136
byo_connection_id: str | None = None,
@@ -126,15 +139,19 @@ def _create_vertex_llm(
126139
"""Create UiPathChatVertex for Gemini models via LLMGateway."""
127140
from uipath_langchain.chat.vertex import UiPathChatVertex
128141

142+
sampling_kwargs: dict[str, Any] = {}
143+
if temperature is not None:
144+
sampling_kwargs["temperature"] = temperature
145+
129146
match api_flavor:
130147
case APIFlavor.VERTEX_GEMINI_GENERATE_CONTENT:
131148
return UiPathChatVertex(
132149
model_name=model,
133-
temperature=temperature,
134150
max_tokens=max_tokens,
135151
agenthub_config=agenthub_config,
136152
byo_connection_id=byo_connection_id,
137153
output_version="v1",
154+
**sampling_kwargs,
138155
**kwargs,
139156
)
140157
case APIFlavor.VERTEX_ANTHROPIC_CLAUDE:
@@ -243,12 +260,16 @@ def get_chat_model(
243260
vendor, api_flavor = _compute_vendor_and_api_flavor(model_info)
244261
model_name: str = model_info.get("modelName", model)
245262

263+
effective_temperature: float | None = (
264+
None if _should_skip_temperature(model_info) else temperature
265+
)
266+
246267
match LLMProvider(vendor):
247268
case LLMProvider.OPENAI:
248269
return _create_openai_llm(
249270
model_name,
250271
api_flavor,
251-
temperature,
272+
effective_temperature,
252273
max_tokens,
253274
agenthub_config,
254275
byo_connection_id,
@@ -258,7 +279,7 @@ def get_chat_model(
258279
return _create_bedrock_llm(
259280
model_name,
260281
api_flavor,
261-
temperature,
282+
effective_temperature,
262283
max_tokens,
263284
agenthub_config,
264285
byo_connection_id,
@@ -268,7 +289,7 @@ def get_chat_model(
268289
return _create_vertex_llm(
269290
model_name,
270291
api_flavor,
271-
temperature,
292+
effective_temperature,
272293
max_tokens,
273294
agenthub_config,
274295
byo_connection_id,

tests/chat/test_chat_model_factory.py

Lines changed: 145 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
_API_FLAVOR_TO_PROVIDER,
77
_DEFAULT_API_FLAVOR,
88
_compute_vendor_and_api_flavor,
9+
get_chat_model,
910
)
1011
from uipath_langchain.chat.types import APIFlavor, LLMProvider
1112

@@ -320,3 +321,147 @@ def test_default_flavors_map_back_to_same_provider(self):
320321
f"Default flavor {default_flavor} for {provider} "
321322
f"maps to {mapped_provider} instead"
322323
)
324+
325+
326+
class TestGetChatModelTemperatureGating:
327+
"""End-to-end tests that call ``get_chat_model`` and assert how
328+
``temperature`` is forwarded to the underlying LangChain chat class.
329+
330+
The gate is driven by discovery's ``modelDetails.shouldSkipTemperature``:
331+
when True, ``temperature`` must be omitted from the constructor kwargs;
332+
when False/absent, it must be passed through as-is.
333+
"""
334+
335+
def test_opus_4_7_bedrock_converse_omits_temperature(self, mocker):
336+
"""flag=True + Bedrock Converse: UiPathChatBedrockConverse must be
337+
instantiated without a ``temperature`` kwarg."""
338+
pytest.importorskip("langchain_aws")
339+
mocker.patch(
340+
"uipath_langchain.chat.chat_model_factory._get_model_info",
341+
return_value={
342+
"modelName": "anthropic.claude-opus-4-7",
343+
"vendor": "AwsBedrock",
344+
"apiFlavor": "AwsBedrockConverse",
345+
"modelDetails": {"shouldSkipTemperature": True},
346+
},
347+
)
348+
mock_cls = mocker.patch(
349+
"uipath_langchain.chat.bedrock.UiPathChatBedrockConverse"
350+
)
351+
352+
get_chat_model(
353+
model="anthropic.claude-opus-4-7",
354+
temperature=0.0,
355+
max_tokens=4096,
356+
agenthub_config="cfg",
357+
)
358+
359+
_, kwargs = mock_cls.call_args
360+
assert "temperature" not in kwargs
361+
362+
def test_sonnet_4_5_bedrock_converse_forwards_temperature(self, mocker):
363+
"""flag=False: UiPathChatBedrockConverse receives the exact caller
364+
temperature."""
365+
pytest.importorskip("langchain_aws")
366+
mocker.patch(
367+
"uipath_langchain.chat.chat_model_factory._get_model_info",
368+
return_value={
369+
"modelName": "anthropic.claude-sonnet-4-5-20250929-v1:0",
370+
"vendor": "AwsBedrock",
371+
"apiFlavor": "AwsBedrockConverse",
372+
"modelDetails": {"shouldSkipTemperature": False},
373+
},
374+
)
375+
mock_cls = mocker.patch(
376+
"uipath_langchain.chat.bedrock.UiPathChatBedrockConverse"
377+
)
378+
379+
get_chat_model(
380+
model="anthropic.claude-sonnet-4-5-20250929-v1:0",
381+
temperature=0.7,
382+
max_tokens=4096,
383+
agenthub_config="cfg",
384+
)
385+
386+
_, kwargs = mock_cls.call_args
387+
assert kwargs.get("temperature") == 0.7
388+
389+
def test_gpt_openai_responses_forwards_temperature_when_flag_absent(self, mocker):
390+
"""Older discovery payloads have ``modelDetails: null``; the gate
391+
must default to not-skipping and UiPathChatOpenAI must receive the
392+
caller temperature."""
393+
pytest.importorskip("langchain_openai")
394+
mocker.patch(
395+
"uipath_langchain.chat.chat_model_factory._get_model_info",
396+
return_value={
397+
"modelName": "gpt-5-2025-08-07",
398+
"vendor": "OpenAi",
399+
"apiFlavor": "OpenAiResponses",
400+
"modelDetails": None,
401+
},
402+
)
403+
mock_cls = mocker.patch("uipath_langchain.chat.openai.UiPathChatOpenAI")
404+
405+
get_chat_model(
406+
model="gpt-5-2025-08-07",
407+
temperature=0.3,
408+
max_tokens=2048,
409+
agenthub_config="cfg",
410+
)
411+
412+
_, kwargs = mock_cls.call_args
413+
assert kwargs.get("temperature") == 0.3
414+
415+
def test_byom_custom_name_honors_discovery_flag(self, mocker):
416+
"""BYOM display names don't match any known alias, but the discovery
417+
flag still identifies the underlying model — the gate must use it
418+
and the leaf client must be built without a temperature kwarg."""
419+
pytest.importorskip("langchain_aws")
420+
mocker.patch(
421+
"uipath_langchain.chat.chat_model_factory._get_model_info",
422+
return_value={
423+
"modelName": "Custom BYOM Opus 4.7",
424+
"vendor": "AwsBedrock",
425+
"apiFlavor": "AwsBedrockConverse",
426+
"modelDetails": {"shouldSkipTemperature": True},
427+
},
428+
)
429+
mock_cls = mocker.patch(
430+
"uipath_langchain.chat.bedrock.UiPathChatBedrockConverse"
431+
)
432+
433+
get_chat_model(
434+
model="Custom BYOM Opus 4.7",
435+
temperature=0.7,
436+
max_tokens=4096,
437+
agenthub_config="cfg",
438+
)
439+
440+
_, kwargs = mock_cls.call_args
441+
assert "temperature" not in kwargs
442+
443+
def test_gemini_vertex_forwards_temperature(self, mocker):
444+
"""Third vendor path: flag=False on a Vertex Gemini model must
445+
forward the caller temperature to UiPathChatVertex."""
446+
pytest.importorskip("langchain_google_genai")
447+
pytest.importorskip("google.genai")
448+
mocker.patch(
449+
"uipath_langchain.chat.chat_model_factory._get_model_info",
450+
return_value={
451+
"modelName": "gemini-2.5-pro",
452+
"vendor": "VertexAi",
453+
"apiFlavor": "GeminiGenerateContent",
454+
"modelDetails": {"shouldSkipTemperature": False},
455+
},
456+
)
457+
mock_cls = mocker.patch("uipath_langchain.chat.vertex.UiPathChatVertex")
458+
459+
get_chat_model(
460+
model="gemini-2.5-pro",
461+
temperature=0.5,
462+
max_tokens=2048,
463+
agenthub_config="cfg",
464+
)
465+
466+
_, kwargs = mock_cls.call_args
467+
assert kwargs.get("temperature") == 0.5

uv.lock

Lines changed: 1 addition & 1 deletion
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)