Skip to content

Commit 848e00b

Browse files
feat: Using provider/model syntax in modelName examples within openapi spec
1 parent e1927ea commit 848e00b

File tree

4 files changed

+22
-22
lines changed

4 files changed

+22
-22
lines changed

.stats.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
configured_endpoints: 7
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/browserbase%2Fstagehand-1705ff86e7ec80d6be2ddbb0e3cbee821f3e95d68fa6a48c790f586e3470e678.yml
3-
openapi_spec_hash: cf0d4dad078a7f7c1256b437e349b911
4-
config_hash: 3c21550e2c94cad4339d3093d794beb0
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/browserbase%2Fstagehand-119383e808f394a7676e901bac8b97b6d7402d187d03452fd8d62b31d4085580.yml
3+
openapi_spec_hash: 8a8d7be19d95f849098690863fe9a71a
4+
config_hash: 1f709f8775e13029dc60064ef3a94355

src/stagehand/types/model_config_param.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212

1313
class ModelConfigObject(TypedDict, total=False):
1414
model_name: Required[Annotated[str, PropertyInfo(alias="modelName")]]
15-
"""Model name string without prefix (e.g., 'gpt-5-nano', 'claude-4.5-opus')"""
15+
"""Model name string (e.g., 'openai/gpt-5-nano', 'anthropic/claude-4.5-opus')"""
1616

1717
api_key: Annotated[str, PropertyInfo(alias="apiKey")]
1818
"""API key for the model provider"""

tests/api_resources/test_sessions.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -691,15 +691,15 @@ def test_path_params_observe_overload_2(self, client: Stagehand) -> None:
691691
@parametrize
692692
def test_method_start(self, client: Stagehand) -> None:
693693
session = client.sessions.start(
694-
model_name="gpt-4o",
694+
model_name="openai/gpt-4o",
695695
)
696696
assert_matches_type(SessionStartResponse, session, path=["response"])
697697

698698
@pytest.mark.skip(reason="Prism tests are disabled")
699699
@parametrize
700700
def test_method_start_with_all_params(self, client: Stagehand) -> None:
701701
session = client.sessions.start(
702-
model_name="gpt-4o",
702+
model_name="openai/gpt-4o",
703703
act_timeout_ms=0,
704704
browser={
705705
"cdp_url": "ws://localhost:9222",
@@ -787,7 +787,7 @@ def test_method_start_with_all_params(self, client: Stagehand) -> None:
787787
@parametrize
788788
def test_raw_response_start(self, client: Stagehand) -> None:
789789
response = client.sessions.with_raw_response.start(
790-
model_name="gpt-4o",
790+
model_name="openai/gpt-4o",
791791
)
792792

793793
assert response.is_closed is True
@@ -799,7 +799,7 @@ def test_raw_response_start(self, client: Stagehand) -> None:
799799
@parametrize
800800
def test_streaming_response_start(self, client: Stagehand) -> None:
801801
with client.sessions.with_streaming_response.start(
802-
model_name="gpt-4o",
802+
model_name="openai/gpt-4o",
803803
) as response:
804804
assert not response.is_closed
805805
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -1480,15 +1480,15 @@ async def test_path_params_observe_overload_2(self, async_client: AsyncStagehand
14801480
@parametrize
14811481
async def test_method_start(self, async_client: AsyncStagehand) -> None:
14821482
session = await async_client.sessions.start(
1483-
model_name="gpt-4o",
1483+
model_name="openai/gpt-4o",
14841484
)
14851485
assert_matches_type(SessionStartResponse, session, path=["response"])
14861486

14871487
@pytest.mark.skip(reason="Prism tests are disabled")
14881488
@parametrize
14891489
async def test_method_start_with_all_params(self, async_client: AsyncStagehand) -> None:
14901490
session = await async_client.sessions.start(
1491-
model_name="gpt-4o",
1491+
model_name="openai/gpt-4o",
14921492
act_timeout_ms=0,
14931493
browser={
14941494
"cdp_url": "ws://localhost:9222",
@@ -1576,7 +1576,7 @@ async def test_method_start_with_all_params(self, async_client: AsyncStagehand)
15761576
@parametrize
15771577
async def test_raw_response_start(self, async_client: AsyncStagehand) -> None:
15781578
response = await async_client.sessions.with_raw_response.start(
1579-
model_name="gpt-4o",
1579+
model_name="openai/gpt-4o",
15801580
)
15811581

15821582
assert response.is_closed is True
@@ -1588,7 +1588,7 @@ async def test_raw_response_start(self, async_client: AsyncStagehand) -> None:
15881588
@parametrize
15891589
async def test_streaming_response_start(self, async_client: AsyncStagehand) -> None:
15901590
async with async_client.sessions.with_streaming_response.start(
1591-
model_name="gpt-4o",
1591+
model_name="openai/gpt-4o",
15921592
) as response:
15931593
assert not response.is_closed
15941594
assert response.http_request.headers.get("X-Stainless-Lang") == "python"

tests/test_client.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -891,7 +891,7 @@ def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, clien
891891
respx_mock.post("/v1/sessions/start").mock(side_effect=httpx.TimeoutException("Test timeout error"))
892892

893893
with pytest.raises(APITimeoutError):
894-
client.sessions.with_streaming_response.start(model_name="gpt-4o").__enter__()
894+
client.sessions.with_streaming_response.start(model_name="openai/gpt-4o").__enter__()
895895

896896
assert _get_open_connections(client) == 0
897897

@@ -901,7 +901,7 @@ def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client
901901
respx_mock.post("/v1/sessions/start").mock(return_value=httpx.Response(500))
902902

903903
with pytest.raises(APIStatusError):
904-
client.sessions.with_streaming_response.start(model_name="gpt-4o").__enter__()
904+
client.sessions.with_streaming_response.start(model_name="openai/gpt-4o").__enter__()
905905
assert _get_open_connections(client) == 0
906906

907907
@pytest.mark.parametrize("failures_before_success", [0, 2, 4])
@@ -930,7 +930,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
930930

931931
respx_mock.post("/v1/sessions/start").mock(side_effect=retry_handler)
932932

933-
response = client.sessions.with_raw_response.start(model_name="gpt-4o")
933+
response = client.sessions.with_raw_response.start(model_name="openai/gpt-4o")
934934

935935
assert response.retries_taken == failures_before_success
936936
assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success
@@ -955,7 +955,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
955955
respx_mock.post("/v1/sessions/start").mock(side_effect=retry_handler)
956956

957957
response = client.sessions.with_raw_response.start(
958-
model_name="gpt-4o", extra_headers={"x-stainless-retry-count": Omit()}
958+
model_name="openai/gpt-4o", extra_headers={"x-stainless-retry-count": Omit()}
959959
)
960960

961961
assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0
@@ -980,7 +980,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
980980
respx_mock.post("/v1/sessions/start").mock(side_effect=retry_handler)
981981

982982
response = client.sessions.with_raw_response.start(
983-
model_name="gpt-4o", extra_headers={"x-stainless-retry-count": "42"}
983+
model_name="openai/gpt-4o", extra_headers={"x-stainless-retry-count": "42"}
984984
)
985985

986986
assert response.http_request.headers.get("x-stainless-retry-count") == "42"
@@ -1870,7 +1870,7 @@ async def test_retrying_timeout_errors_doesnt_leak(
18701870
respx_mock.post("/v1/sessions/start").mock(side_effect=httpx.TimeoutException("Test timeout error"))
18711871

18721872
with pytest.raises(APITimeoutError):
1873-
await async_client.sessions.with_streaming_response.start(model_name="gpt-4o").__aenter__()
1873+
await async_client.sessions.with_streaming_response.start(model_name="openai/gpt-4o").__aenter__()
18741874

18751875
assert _get_open_connections(async_client) == 0
18761876

@@ -1882,7 +1882,7 @@ async def test_retrying_status_errors_doesnt_leak(
18821882
respx_mock.post("/v1/sessions/start").mock(return_value=httpx.Response(500))
18831883

18841884
with pytest.raises(APIStatusError):
1885-
await async_client.sessions.with_streaming_response.start(model_name="gpt-4o").__aenter__()
1885+
await async_client.sessions.with_streaming_response.start(model_name="openai/gpt-4o").__aenter__()
18861886
assert _get_open_connections(async_client) == 0
18871887

18881888
@pytest.mark.parametrize("failures_before_success", [0, 2, 4])
@@ -1911,7 +1911,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
19111911

19121912
respx_mock.post("/v1/sessions/start").mock(side_effect=retry_handler)
19131913

1914-
response = await client.sessions.with_raw_response.start(model_name="gpt-4o")
1914+
response = await client.sessions.with_raw_response.start(model_name="openai/gpt-4o")
19151915

19161916
assert response.retries_taken == failures_before_success
19171917
assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success
@@ -1936,7 +1936,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
19361936
respx_mock.post("/v1/sessions/start").mock(side_effect=retry_handler)
19371937

19381938
response = await client.sessions.with_raw_response.start(
1939-
model_name="gpt-4o", extra_headers={"x-stainless-retry-count": Omit()}
1939+
model_name="openai/gpt-4o", extra_headers={"x-stainless-retry-count": Omit()}
19401940
)
19411941

19421942
assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0
@@ -1961,7 +1961,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
19611961
respx_mock.post("/v1/sessions/start").mock(side_effect=retry_handler)
19621962

19631963
response = await client.sessions.with_raw_response.start(
1964-
model_name="gpt-4o", extra_headers={"x-stainless-retry-count": "42"}
1964+
model_name="openai/gpt-4o", extra_headers={"x-stainless-retry-count": "42"}
19651965
)
19661966

19671967
assert response.http_request.headers.get("x-stainless-retry-count") == "42"

0 commit comments

Comments
 (0)