feat(together-ai): update model YAMLs [bot]#919
Conversation
|
/test-models |
Gateway test results
Failures (48)
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-R1",
messages=[
{"role": "user", "content": "Use the get_weather tool to check the weather in London. You must call the tool, do not respond with plain text."},
],
tools=tools,
tool_choice="auto",
stream=True,
)
_tool_calls_made = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if delta.tool_calls:
_tool_calls_made = True
for _tc in delta.tool_calls:
if _tc.function:
print(_tc.function.arguments or "", end="", flush=True)
if not _tool_calls_made:
raise Exception("VALIDATION FAILED: tool-call stream - no tool calls received")
print("\nVALIDATION: tool-call stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-R1",
messages=[
{"role": "user", "content": "Use the get_weather tool to check the weather in London. You must call the tool, do not respond with plain text."},
],
tools=tools,
tool_choice="auto",
stream=False,
)
_message = response.choices[0].message
if _message.tool_calls:
for _tc in _message.tool_calls:
print(f"Function: {_tc.function.name}")
print(f"Arguments: {_tc.function.arguments}")
else:
print(_message.content)
if not _message.tool_calls or len(_message.tool_calls) == 0:
raise Exception("VALIDATION FAILED: tool-call - no tool calls in response")
print("VALIDATION: tool-call SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-R1",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=False,
)
_usage = getattr(response, "usage", None)
_reasoning_detected = False
_choices = getattr(response, "choices", None)
if _choices and len(_choices) > 0:
_message = getattr(_choices[0], "message", None)
else:
_message = None
if _message and getattr(_message, "content", None) is not None:
print(_message.content)
if _usage is not None:
_output_token_details = getattr(_usage, "completion_tokens_details", None)
if _output_token_details and getattr(_output_token_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
elif getattr(_usage, "reasoning", None) is not None:
_reasoning_detected = True
if getattr(_message, "reasoning_content", None) is not None:
_reasoning_detected = True
elif getattr(_message, "reasoning", None) is not None:
_reasoning_detected = True
if not _reasoning_detected:
print("Response: ", response)
raise Exception("VALIDATION FAILED: reasoning - no reasoning information in response")
print("VALIDATION: reasoning SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-R1",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=True,
)
_reasoning_detected = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if getattr(delta, "reasoning_content", None) is not None:
_reasoning_detected = True
if getattr(delta, "reasoning", None) is not None:
_reasoning_detected = True
_usage = getattr(chunk, "usage", None)
if _usage is not None:
_details = getattr(_usage, "completion_tokens_details", None)
if _details and getattr(_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
if not _reasoning_detected:
raise Exception("VALIDATION FAILED: reasoning stream - no reasoning information in stream")
print("\nVALIDATION: reasoning stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/moonshotai-Kimi-K2.5",
messages=[
{"role": "system", "content": "You are a helpful assistant with access to tools. You MUST strictly use the provided tools to answer. Never respond with plain text when a tool is available."},
{"role": "user", "content": "Hi"},
{"role": "assistant", "content": "Hi, how can I help you"},
{"role": "user", "content": "Use the get_weather tool to check the weather in London. You must call the tool, do not respond with plain text."},
],
tools=tools,
tool_choice="auto",
stream=True,
)
_tool_calls_made = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if delta.tool_calls:
_tool_calls_made = True
for _tc in delta.tool_calls:
if _tc.function:
print(_tc.function.arguments or "", end="", flush=True)
if not _tool_calls_made:
raise Exception("VALIDATION FAILED: tool-call stream - no tool calls received")
print("\nVALIDATION: tool-call stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/moonshotai-Kimi-K2.5",
messages=[
{"role": "system", "content": "You are a helpful assistant with access to tools. You MUST strictly call multiple tools in parallel whenever possible. Never call them sequentially."},
{"role": "user", "content": "Hi"},
{"role": "assistant", "content": "Hi, how can I help you"},
{"role": "user", "content": "Use the get_weather tool to check the weather in London and Paris. You MUST make both tool calls strictly in parallel, not sequentially."},
],
tools=tools,
tool_choice="auto",
parallel_tool_calls=True,
stream=True,
)
_tool_call_indices = set()
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if delta.tool_calls:
for _tc in delta.tool_calls:
_tool_call_indices.add(_tc.index)
if _tc.function:
print(_tc.function.arguments or "", end="", flush=True)
if len(_tool_call_indices) < 1:
raise Exception(
f"VALIDATION FAILED: parallel-tool-call stream - expected at least 1 tool call, "
f"got {len(_tool_call_indices)}"
)
print(f"\nNumber of parallel tool calls: {len(_tool_call_indices)}")
print("VALIDATION: parallel-tool-call stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/moonshotai-Kimi-K2.5",
messages=[
{"role": "system", "content": "You are a helpful assistant. You MUST think step by step and show your reasoning. Never skip reasoning steps."},
{"role": "user", "content": "Hi"},
{"role": "assistant", "content": "Hi, how can I help you"},
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=True,
)
_reasoning_detected = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if getattr(delta, "reasoning_content", None) is not None:
_reasoning_detected = True
if getattr(delta, "reasoning", None) is not None:
_reasoning_detected = True
_usage = getattr(chunk, "usage", None)
if _usage is not None:
_details = getattr(_usage, "completion_tokens_details", None)
if _details and getattr(_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
if not _reasoning_detected:
raise Exception("VALIDATION FAILED: reasoning stream - no reasoning information in stream")
print("\nVALIDATION: reasoning stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/moonshotai-Kimi-K2.5",
messages=[
{"role": "system", "content": "You are a helpful assistant. You MUST think step by step and show your reasoning. Never skip reasoning steps."},
{"role": "user", "content": "Hi"},
{"role": "assistant", "content": "Hi, how can I help you"},
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=False,
)
_usage = getattr(response, "usage", None)
_reasoning_detected = False
_choices = getattr(response, "choices", None)
if _choices and len(_choices) > 0:
_message = getattr(_choices[0], "message", None)
else:
_message = None
if _message and getattr(_message, "content", None) is not None:
print(_message.content)
if _usage is not None:
_output_token_details = getattr(_usage, "completion_tokens_details", None)
if _output_token_details and getattr(_output_token_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
elif getattr(_usage, "reasoning", None) is not None:
_reasoning_detected = True
if getattr(_message, "reasoning_content", None) is not None:
_reasoning_detected = True
elif getattr(_message, "reasoning", None) is not None:
_reasoning_detected = True
if not _reasoning_detected:
print("Response: ", response)
raise Exception("VALIDATION FAILED: reasoning - no reasoning information in response")
print("VALIDATION: reasoning SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/openai-gpt-oss-120b",
messages=[
{"role": "user", "content": "Use the get_weather tool to check the weather in London. You must call the tool, do not respond with plain text."},
],
tools=tools,
tool_choice="auto",
stream=True,
)
_tool_calls_made = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if delta.tool_calls:
_tool_calls_made = True
for _tc in delta.tool_calls:
if _tc.function:
print(_tc.function.arguments or "", end="", flush=True)
if not _tool_calls_made:
raise Exception("VALIDATION FAILED: tool-call stream - no tool calls received")
print("\nVALIDATION: tool-call stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/openai-gpt-oss-120b",
messages=[
{"role": "user", "content": "Use the get_weather tool to check the weather in London and Paris. You MUST make both tool calls strictly in parallel, not sequentially."},
],
tools=tools,
tool_choice="auto",
parallel_tool_calls=True,
stream=True,
)
_tool_call_indices = set()
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if delta.tool_calls:
for _tc in delta.tool_calls:
_tool_call_indices.add(_tc.index)
if _tc.function:
print(_tc.function.arguments or "", end="", flush=True)
if len(_tool_call_indices) < 1:
raise Exception(
f"VALIDATION FAILED: parallel-tool-call stream - expected at least 1 tool call, "
f"got {len(_tool_call_indices)}"
)
print(f"\nNumber of parallel tool calls: {len(_tool_call_indices)}")
print("VALIDATION: parallel-tool-call stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/openai-gpt-oss-120b",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=False,
)
_usage = getattr(response, "usage", None)
_reasoning_detected = False
_choices = getattr(response, "choices", None)
if _choices and len(_choices) > 0:
_message = getattr(_choices[0], "message", None)
else:
_message = None
if _message and getattr(_message, "content", None) is not None:
print(_message.content)
if _usage is not None:
_output_token_details = getattr(_usage, "completion_tokens_details", None)
if _output_token_details and getattr(_output_token_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
elif getattr(_usage, "reasoning", None) is not None:
_reasoning_detected = True
if getattr(_message, "reasoning_content", None) is not None:
_reasoning_detected = True
elif getattr(_message, "reasoning", None) is not None:
_reasoning_detected = True
if not _reasoning_detected:
print("Response: ", response)
raise Exception("VALIDATION FAILED: reasoning - no reasoning information in response")
print("VALIDATION: reasoning SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/openai-gpt-oss-120b",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=True,
)
_reasoning_detected = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if getattr(delta, "reasoning_content", None) is not None:
_reasoning_detected = True
if getattr(delta, "reasoning", None) is not None:
_reasoning_detected = True
_usage = getattr(chunk, "usage", None)
if _usage is not None:
_details = getattr(_usage, "completion_tokens_details", None)
if _details and getattr(_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
if not _reasoning_detected:
raise Exception("VALIDATION FAILED: reasoning stream - no reasoning information in stream")
print("\nVALIDATION: reasoning stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-V3.1",
messages=[
{"role": "user", "content": "Use the get_weather tool to check the weather in London and Paris. You MUST make both tool calls strictly in parallel, not sequentially."},
],
tools=tools,
tool_choice="auto",
parallel_tool_calls=True,
stream=True,
)
_tool_call_indices = set()
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if delta.tool_calls:
for _tc in delta.tool_calls:
_tool_call_indices.add(_tc.index)
if _tc.function:
print(_tc.function.arguments or "", end="", flush=True)
if len(_tool_call_indices) < 1:
raise Exception(
f"VALIDATION FAILED: parallel-tool-call stream - expected at least 1 tool call, "
f"got {len(_tool_call_indices)}"
)
print(f"\nNumber of parallel tool calls: {len(_tool_call_indices)}")
print("VALIDATION: parallel-tool-call stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-V3.1",
messages=[
{"role": "user", "content": "Use the get_weather tool to check the weather in London. You must call the tool, do not respond with plain text."},
],
tools=tools,
tool_choice="auto",
stream=True,
)
_tool_calls_made = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if delta.tool_calls:
_tool_calls_made = True
for _tc in delta.tool_calls:
if _tc.function:
print(_tc.function.arguments or "", end="", flush=True)
if not _tool_calls_made:
raise Exception("VALIDATION FAILED: tool-call stream - no tool calls received")
print("\nVALIDATION: tool-call stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-V3.1",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=False,
)
_usage = getattr(response, "usage", None)
_reasoning_detected = False
_choices = getattr(response, "choices", None)
if _choices and len(_choices) > 0:
_message = getattr(_choices[0], "message", None)
else:
_message = None
if _message and getattr(_message, "content", None) is not None:
print(_message.content)
if _usage is not None:
_output_token_details = getattr(_usage, "completion_tokens_details", None)
if _output_token_details and getattr(_output_token_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
elif getattr(_usage, "reasoning", None) is not None:
_reasoning_detected = True
if getattr(_message, "reasoning_content", None) is not None:
_reasoning_detected = True
elif getattr(_message, "reasoning", None) is not None:
_reasoning_detected = True
if not _reasoning_detected:
print("Response: ", response)
raise Exception("VALIDATION FAILED: reasoning - no reasoning information in response")
print("VALIDATION: reasoning SUCCESS")
Error: Code snippetfrom openai import OpenAI
import json
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response_schema = json.loads('''{
"title": "CalendarEvent",
"type": "object",
"properties": {
"name": { "type": "string" },
"date": { "type": "string" },
"participants": {
"type": "array",
"items": { "type": "string" }
}
},
"required": ["name", "date", "participants"],
"additionalProperties": false
}''')
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-V3.1",
messages=[
{"role": "user", "content": "Alice and Bob are going to a science fair on Friday. Extract the event details as JSON."},
],
response_format={"type": "json_schema", "json_schema": {"name": "CalendarEvent", "schema": response_schema}},
stream=False,
)
import json as _json
_content = response.choices[0].message.content
print(_content)
if not _content:
raise Exception("VALIDATION FAILED: structured-output - response content is empty")
_parsed = _json.loads(_content)
if "name" not in _parsed or "date" not in _parsed or "participants" not in _parsed:
raise Exception("VALIDATION FAILED: structured-output - missing expected fields (name, date, participants)")
if not isinstance(_parsed.get("participants"), list):
raise Exception("VALIDATION FAILED: structured-output - 'participants' is not a list, schema not enforced")
if set(_parsed.keys()) != {"name", "date", "participants"}:
raise Exception(
f"VALIDATION FAILED: structured-output - unexpected keys present: {set(_parsed.keys())}"
)
print("VALIDATION: structured-output SUCCESS")
Error: Code snippetfrom openai import OpenAI
import json
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response_schema = json.loads('''{
"title": "CalendarEvent",
"type": "object",
"properties": {
"name": { "type": "string" },
"date": { "type": "string" },
"participants": {
"type": "array",
"items": { "type": "string" }
}
},
"required": ["name", "date", "participants"],
"additionalProperties": false
}''')
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-V3.1",
messages=[
{"role": "user", "content": "Alice and Bob are going to a science fair on Friday. Extract the event details as JSON."},
],
response_format={"type": "json_schema", "json_schema": {"name": "CalendarEvent", "schema": response_schema}},
stream=True,
)
import json as _json
_accumulated = ""
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
_accumulated += delta.content
print(delta.content, end="", flush=True)
if not _accumulated:
raise Exception("VALIDATION FAILED: structured-output stream - no content received")
_parsed = _json.loads(_accumulated)
if "name" not in _parsed or "date" not in _parsed or "participants" not in _parsed:
raise Exception("VALIDATION FAILED: structured-output stream - missing expected fields (name, date, participants)")
if not isinstance(_parsed.get("participants"), list):
raise Exception("VALIDATION FAILED: structured-output stream - 'participants' is not a list, schema not enforced")
if set(_parsed.keys()) != {"name", "date", "participants"}:
raise Exception(
f"VALIDATION FAILED: structured-output stream - unexpected keys present: {set(_parsed.keys())}"
)
print("\nVALIDATION: structured-output stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-V3.1",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=True,
)
_reasoning_detected = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if getattr(delta, "reasoning_content", None) is not None:
_reasoning_detected = True
if getattr(delta, "reasoning", None) is not None:
_reasoning_detected = True
_usage = getattr(chunk, "usage", None)
if _usage is not None:
_details = getattr(_usage, "completion_tokens_details", None)
if _details and getattr(_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
if not _reasoning_detected:
raise Exception("VALIDATION FAILED: reasoning stream - no reasoning information in stream")
print("\nVALIDATION: reasoning stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/Qwen-Qwen3-Coder-Next-FP8",
messages=[
{"role": "user", "content": "Use the get_weather tool to check the weather in London. You must call the tool, do not respond with plain text."},
],
tools=tools,
tool_choice="auto",
stream=True,
)
_tool_calls_made = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if delta.tool_calls:
_tool_calls_made = True
for _tc in delta.tool_calls:
if _tc.function:
print(_tc.function.arguments or "", end="", flush=True)
if not _tool_calls_made:
raise Exception("VALIDATION FAILED: tool-call stream - no tool calls received")
print("\nVALIDATION: tool-call stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/zai-org-GLM-5",
messages=[
{"role": "user", "content": "Use the get_weather tool to check the weather in London. You must call the tool, do not respond with plain text."},
],
tools=tools,
tool_choice="auto",
stream=True,
)
_tool_calls_made = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if delta.tool_calls:
_tool_calls_made = True
for _tc in delta.tool_calls:
if _tc.function:
print(_tc.function.arguments or "", end="", flush=True)
if not _tool_calls_made:
raise Exception("VALIDATION FAILED: tool-call stream - no tool calls received")
print("\nVALIDATION: tool-call stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/zai-org-GLM-5",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=False,
)
_usage = getattr(response, "usage", None)
_reasoning_detected = False
_choices = getattr(response, "choices", None)
if _choices and len(_choices) > 0:
_message = getattr(_choices[0], "message", None)
else:
_message = None
if _message and getattr(_message, "content", None) is not None:
print(_message.content)
if _usage is not None:
_output_token_details = getattr(_usage, "completion_tokens_details", None)
if _output_token_details and getattr(_output_token_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
elif getattr(_usage, "reasoning", None) is not None:
_reasoning_detected = True
if getattr(_message, "reasoning_content", None) is not None:
_reasoning_detected = True
elif getattr(_message, "reasoning", None) is not None:
_reasoning_detected = True
if not _reasoning_detected:
print("Response: ", response)
raise Exception("VALIDATION FAILED: reasoning - no reasoning information in response")
print("VALIDATION: reasoning SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/zai-org-GLM-5",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=True,
)
_reasoning_detected = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if getattr(delta, "reasoning_content", None) is not None:
_reasoning_detected = True
if getattr(delta, "reasoning", None) is not None:
_reasoning_detected = True
_usage = getattr(chunk, "usage", None)
if _usage is not None:
_details = getattr(_usage, "completion_tokens_details", None)
if _details and getattr(_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
if not _reasoning_detected:
raise Exception("VALIDATION FAILED: reasoning stream - no reasoning information in stream")
print("\nVALIDATION: reasoning stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-R1-Original",
messages=[
{"role": "user", "content": "Use the get_weather tool to check the weather in London. You must call the tool, do not respond with plain text."},
],
tools=tools,
tool_choice="auto",
stream=False,
)
_message = response.choices[0].message
if _message.tool_calls:
for _tc in _message.tool_calls:
print(f"Function: {_tc.function.name}")
print(f"Arguments: {_tc.function.arguments}")
else:
print(_message.content)
if not _message.tool_calls or len(_message.tool_calls) == 0:
raise Exception("VALIDATION FAILED: tool-call - no tool calls in response")
print("VALIDATION: tool-call SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-R1-Original",
messages=[
{"role": "user", "content": "List 3 colors with their hex codes in JSON."},
],
response_format={"type": "json_object"},
stream=True,
)
import json as _json
_accumulated = ""
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
_accumulated += delta.content
print(delta.content, end="", flush=True)
if not _accumulated:
raise Exception("VALIDATION FAILED: json-output stream - no content received")
_json.loads(_accumulated)
print("\nVALIDATION: json-output stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-R1-Original",
messages=[
{"role": "user", "content": "Use the get_weather tool to check the weather in London. You must call the tool, do not respond with plain text."},
],
tools=tools,
tool_choice="auto",
stream=True,
)
_tool_calls_made = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if delta.tool_calls:
_tool_calls_made = True
for _tc in delta.tool_calls:
if _tc.function:
print(_tc.function.arguments or "", end="", flush=True)
if not _tool_calls_made:
raise Exception("VALIDATION FAILED: tool-call stream - no tool calls received")
print("\nVALIDATION: tool-call stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-R1-Original",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=True,
)
_reasoning_detected = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if getattr(delta, "reasoning_content", None) is not None:
_reasoning_detected = True
if getattr(delta, "reasoning", None) is not None:
_reasoning_detected = True
_usage = getattr(chunk, "usage", None)
if _usage is not None:
_details = getattr(_usage, "completion_tokens_details", None)
if _details and getattr(_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
if not _reasoning_detected:
raise Exception("VALIDATION FAILED: reasoning stream - no reasoning information in stream")
print("\nVALIDATION: reasoning stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-R1-Original",
messages=[
{"role": "user", "content": "List 3 colors with their hex codes in JSON."},
],
response_format={"type": "json_object"},
stream=False,
)
import json as _json
_content = response.choices[0].message.content
print(_content)
if not _content:
raise Exception("VALIDATION FAILED: json-output - response content is empty")
_json.loads(_content)
print("VALIDATION: json-output SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-R1-Original",
messages=[
{"role": "user", "content": "What is the capital of France?"},
],
stream=True,
)
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-R1-Original",
messages=[
{"role": "user", "content": "What is the capital of France?"},
],
stream=False,
)
print(response.choices[0].message.content)
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-R1-Original",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=False,
)
_usage = getattr(response, "usage", None)
_reasoning_detected = False
_choices = getattr(response, "choices", None)
if _choices and len(_choices) > 0:
_message = getattr(_choices[0], "message", None)
else:
_message = None
if _message and getattr(_message, "content", None) is not None:
print(_message.content)
if _usage is not None:
_output_token_details = getattr(_usage, "completion_tokens_details", None)
if _output_token_details and getattr(_output_token_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
elif getattr(_usage, "reasoning", None) is not None:
_reasoning_detected = True
if getattr(_message, "reasoning_content", None) is not None:
_reasoning_detected = True
elif getattr(_message, "reasoning", None) is not None:
_reasoning_detected = True
if not _reasoning_detected:
print("Response: ", response)
raise Exception("VALIDATION FAILED: reasoning - no reasoning information in response")
print("VALIDATION: reasoning SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-V3",
messages=[
{"role": "user", "content": "Use the get_weather tool to check the weather in London. You must call the tool, do not respond with plain text."},
],
tools=tools,
tool_choice="auto",
stream=True,
)
_tool_calls_made = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if delta.tool_calls:
_tool_calls_made = True
for _tc in delta.tool_calls:
if _tc.function:
print(_tc.function.arguments or "", end="", flush=True)
if not _tool_calls_made:
raise Exception("VALIDATION FAILED: tool-call stream - no tool calls received")
print("\nVALIDATION: tool-call stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-V3",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=False,
)
_usage = getattr(response, "usage", None)
_reasoning_detected = False
_choices = getattr(response, "choices", None)
if _choices and len(_choices) > 0:
_message = getattr(_choices[0], "message", None)
else:
_message = None
if _message and getattr(_message, "content", None) is not None:
print(_message.content)
if _usage is not None:
_output_token_details = getattr(_usage, "completion_tokens_details", None)
if _output_token_details and getattr(_output_token_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
elif getattr(_usage, "reasoning", None) is not None:
_reasoning_detected = True
if getattr(_message, "reasoning_content", None) is not None:
_reasoning_detected = True
elif getattr(_message, "reasoning", None) is not None:
_reasoning_detected = True
if not _reasoning_detected:
print("Response: ", response)
raise Exception("VALIDATION FAILED: reasoning - no reasoning information in response")
print("VALIDATION: reasoning SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-V3",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=True,
)
_reasoning_detected = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if getattr(delta, "reasoning_content", None) is not None:
_reasoning_detected = True
if getattr(delta, "reasoning", None) is not None:
_reasoning_detected = True
_usage = getattr(chunk, "usage", None)
if _usage is not None:
_details = getattr(_usage, "completion_tokens_details", None)
if _details and getattr(_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
if not _reasoning_detected:
raise Exception("VALIDATION FAILED: reasoning stream - no reasoning information in stream")
print("\nVALIDATION: reasoning stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
import json
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response_schema = json.loads('''{
"title": "CalendarEvent",
"type": "object",
"properties": {
"name": { "type": "string" },
"date": { "type": "string" },
"participants": {
"type": "array",
"items": { "type": "string" }
}
},
"required": ["name", "date", "participants"],
"additionalProperties": false
}''')
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-V3",
messages=[
{"role": "user", "content": "Alice and Bob are going to a science fair on Friday. Extract the event details as JSON."},
],
response_format={"type": "json_schema", "json_schema": {"name": "CalendarEvent", "schema": response_schema}},
stream=True,
)
import json as _json
_accumulated = ""
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
_accumulated += delta.content
print(delta.content, end="", flush=True)
if not _accumulated:
raise Exception("VALIDATION FAILED: structured-output stream - no content received")
_parsed = _json.loads(_accumulated)
if "name" not in _parsed or "date" not in _parsed or "participants" not in _parsed:
raise Exception("VALIDATION FAILED: structured-output stream - missing expected fields (name, date, participants)")
if not isinstance(_parsed.get("participants"), list):
raise Exception("VALIDATION FAILED: structured-output stream - 'participants' is not a list, schema not enforced")
if set(_parsed.keys()) != {"name", "date", "participants"}:
raise Exception(
f"VALIDATION FAILED: structured-output stream - unexpected keys present: {set(_parsed.keys())}"
)
print("\nVALIDATION: structured-output stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/Qwen-Qwen3.5-9B",
messages=[
{"role": "user", "content": "Use the get_weather tool to check the weather in London and Paris. You MUST make both tool calls strictly in parallel, not sequentially."},
],
tools=tools,
tool_choice="auto",
parallel_tool_calls=True,
stream=True,
)
_tool_call_indices = set()
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if delta.tool_calls:
for _tc in delta.tool_calls:
_tool_call_indices.add(_tc.index)
if _tc.function:
print(_tc.function.arguments or "", end="", flush=True)
if len(_tool_call_indices) < 1:
raise Exception(
f"VALIDATION FAILED: parallel-tool-call stream - expected at least 1 tool call, "
f"got {len(_tool_call_indices)}"
)
print(f"\nNumber of parallel tool calls: {len(_tool_call_indices)}")
print("VALIDATION: parallel-tool-call stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/Qwen-Qwen3.5-9B",
messages=[
{"role": "user", "content": "Use the get_weather tool to check the weather in London. You must call the tool, do not respond with plain text."},
],
tools=tools,
tool_choice="auto",
stream=True,
)
_tool_calls_made = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if delta.tool_calls:
_tool_calls_made = True
for _tc in delta.tool_calls:
if _tc.function:
print(_tc.function.arguments or "", end="", flush=True)
if not _tool_calls_made:
raise Exception("VALIDATION FAILED: tool-call stream - no tool calls received")
print("\nVALIDATION: tool-call stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/Qwen-Qwen3.5-9B",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=True,
)
_reasoning_detected = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if getattr(delta, "reasoning_content", None) is not None:
_reasoning_detected = True
if getattr(delta, "reasoning", None) is not None:
_reasoning_detected = True
_usage = getattr(chunk, "usage", None)
if _usage is not None:
_details = getattr(_usage, "completion_tokens_details", None)
if _details and getattr(_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
if not _reasoning_detected:
raise Exception("VALIDATION FAILED: reasoning stream - no reasoning information in stream")
print("\nVALIDATION: reasoning stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/Qwen-Qwen3.5-9B",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=False,
)
_usage = getattr(response, "usage", None)
_reasoning_detected = False
_choices = getattr(response, "choices", None)
if _choices and len(_choices) > 0:
_message = getattr(_choices[0], "message", None)
else:
_message = None
if _message and getattr(_message, "content", None) is not None:
print(_message.content)
if _usage is not None:
_output_token_details = getattr(_usage, "completion_tokens_details", None)
if _output_token_details and getattr(_output_token_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
elif getattr(_usage, "reasoning", None) is not None:
_reasoning_detected = True
if getattr(_message, "reasoning_content", None) is not None:
_reasoning_detected = True
elif getattr(_message, "reasoning", None) is not None:
_reasoning_detected = True
if not _reasoning_detected:
print("Response: ", response)
raise Exception("VALIDATION FAILED: reasoning - no reasoning information in response")
print("VALIDATION: reasoning SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/MiniMaxAI-MiniMax-M2.5",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=True,
)
_reasoning_detected = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if getattr(delta, "reasoning_content", None) is not None:
_reasoning_detected = True
if getattr(delta, "reasoning", None) is not None:
_reasoning_detected = True
_usage = getattr(chunk, "usage", None)
if _usage is not None:
_details = getattr(_usage, "completion_tokens_details", None)
if _details and getattr(_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
if not _reasoning_detected:
raise Exception("VALIDATION FAILED: reasoning stream - no reasoning information in stream")
print("\nVALIDATION: reasoning stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/MiniMaxAI-MiniMax-M2.5",
messages=[
{"role": "user", "content": "Use the get_weather tool to check the weather in London. You must call the tool, do not respond with plain text."},
],
tools=tools,
tool_choice="auto",
stream=False,
)
_message = response.choices[0].message
if _message.tool_calls:
for _tc in _message.tool_calls:
print(f"Function: {_tc.function.name}")
print(f"Arguments: {_tc.function.arguments}")
else:
print(_message.content)
if not _message.tool_calls or len(_message.tool_calls) == 0:
raise Exception("VALIDATION FAILED: tool-call - no tool calls in response")
print("VALIDATION: tool-call SUCCESS")
Error: Code snippetfrom openai import OpenAI
import json
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response_schema = json.loads('''{
"title": "CalendarEvent",
"type": "object",
"properties": {
"name": { "type": "string" },
"date": { "type": "string" },
"participants": {
"type": "array",
"items": { "type": "string" }
}
},
"required": ["name", "date", "participants"],
"additionalProperties": false
}''')
response = client.chat.completions.create(
model="test-v2-together-ai/MiniMaxAI-MiniMax-M2.5",
messages=[
{"role": "user", "content": "Alice and Bob are going to a science fair on Friday. Extract the event details as JSON."},
],
response_format={"type": "json_schema", "json_schema": {"name": "CalendarEvent", "schema": response_schema}},
stream=True,
)
import json as _json
_accumulated = ""
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
_accumulated += delta.content
print(delta.content, end="", flush=True)
if not _accumulated:
raise Exception("VALIDATION FAILED: structured-output stream - no content received")
_parsed = _json.loads(_accumulated)
if "name" not in _parsed or "date" not in _parsed or "participants" not in _parsed:
raise Exception("VALIDATION FAILED: structured-output stream - missing expected fields (name, date, participants)")
if not isinstance(_parsed.get("participants"), list):
raise Exception("VALIDATION FAILED: structured-output stream - 'participants' is not a list, schema not enforced")
if set(_parsed.keys()) != {"name", "date", "participants"}:
raise Exception(
f"VALIDATION FAILED: structured-output stream - unexpected keys present: {set(_parsed.keys())}"
)
print("\nVALIDATION: structured-output stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/MiniMaxAI-MiniMax-M2.5",
messages=[
{"role": "user", "content": "Use the get_weather tool to check the weather in London. You must call the tool, do not respond with plain text."},
],
tools=tools,
tool_choice="auto",
stream=True,
)
_tool_calls_made = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if delta.tool_calls:
_tool_calls_made = True
for _tc in delta.tool_calls:
if _tc.function:
print(_tc.function.arguments or "", end="", flush=True)
if not _tool_calls_made:
raise Exception("VALIDATION FAILED: tool-call stream - no tool calls received")
print("\nVALIDATION: tool-call stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/MiniMaxAI-MiniMax-M2.5",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=False,
)
_usage = getattr(response, "usage", None)
_reasoning_detected = False
_choices = getattr(response, "choices", None)
if _choices and len(_choices) > 0:
_message = getattr(_choices[0], "message", None)
else:
_message = None
if _message and getattr(_message, "content", None) is not None:
print(_message.content)
if _usage is not None:
_output_token_details = getattr(_usage, "completion_tokens_details", None)
if _output_token_details and getattr(_output_token_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
elif getattr(_usage, "reasoning", None) is not None:
_reasoning_detected = True
if getattr(_message, "reasoning_content", None) is not None:
_reasoning_detected = True
elif getattr(_message, "reasoning", None) is not None:
_reasoning_detected = True
if not _reasoning_detected:
print("Response: ", response)
raise Exception("VALIDATION FAILED: reasoning - no reasoning information in response")
print("VALIDATION: reasoning SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/MiniMaxAI-MiniMax-M2.5",
messages=[
{"role": "user", "content": "What is the capital of France?"},
],
stream=False,
)
print(response.choices[0].message.content)
Error: Code snippetfrom openai import OpenAI
import json
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response_schema = json.loads('''{
"title": "CalendarEvent",
"type": "object",
"properties": {
"name": { "type": "string" },
"date": { "type": "string" },
"participants": {
"type": "array",
"items": { "type": "string" }
}
},
"required": ["name", "date", "participants"],
"additionalProperties": false
}''')
response = client.chat.completions.create(
model="test-v2-together-ai/MiniMaxAI-MiniMax-M2.5",
messages=[
{"role": "user", "content": "Alice and Bob are going to a science fair on Friday. Extract the event details as JSON."},
],
response_format={"type": "json_schema", "json_schema": {"name": "CalendarEvent", "schema": response_schema}},
stream=False,
)
import json as _json
_content = response.choices[0].message.content
print(_content)
if not _content:
raise Exception("VALIDATION FAILED: structured-output - response content is empty")
_parsed = _json.loads(_content)
if "name" not in _parsed or "date" not in _parsed or "participants" not in _parsed:
raise Exception("VALIDATION FAILED: structured-output - missing expected fields (name, date, participants)")
if not isinstance(_parsed.get("participants"), list):
raise Exception("VALIDATION FAILED: structured-output - 'participants' is not a list, schema not enforced")
if set(_parsed.keys()) != {"name", "date", "participants"}:
raise Exception(
f"VALIDATION FAILED: structured-output - unexpected keys present: {set(_parsed.keys())}"
)
print("VALIDATION: structured-output SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/MiniMaxAI-MiniMax-M2.5",
messages=[
{"role": "user", "content": "List 3 colors with their hex codes in JSON."},
],
response_format={"type": "json_object"},
stream=True,
)
import json as _json
_accumulated = ""
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
_accumulated += delta.content
print(delta.content, end="", flush=True)
if not _accumulated:
raise Exception("VALIDATION FAILED: json-output stream - no content received")
_json.loads(_accumulated)
print("\nVALIDATION: json-output stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/MiniMaxAI-MiniMax-M2.5",
messages=[
{"role": "user", "content": "List 3 colors with their hex codes in JSON."},
],
response_format={"type": "json_object"},
stream=False,
)
import json as _json
_content = response.choices[0].message.content
print(_content)
if not _content:
raise Exception("VALIDATION FAILED: json-output - response content is empty")
_json.loads(_content)
print("VALIDATION: json-output SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/MiniMaxAI-MiniMax-M2.5",
messages=[
{"role": "user", "content": "What is the capital of France?"},
],
stream=True,
)
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)Skipped (12)
Skip reason:
Skip reason:
Skip reason:
Skip reason:
Skip reason:
Skip reason:
Skip reason:
Skip reason:
Skip reason:
Skip reason:
Skip reason:
Skip reason: |
|
/test-models |
Gateway test results
Failures (28)
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-V3",
messages=[
{"role": "user", "content": "Use the get_weather tool to check the weather in London. You must call the tool, do not respond with plain text."},
],
tools=tools,
tool_choice="auto",
stream=True,
)
_tool_calls_made = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if delta.tool_calls:
_tool_calls_made = True
for _tc in delta.tool_calls:
if _tc.function:
print(_tc.function.arguments or "", end="", flush=True)
if not _tool_calls_made:
raise Exception("VALIDATION FAILED: tool-call stream - no tool calls received")
print("\nVALIDATION: tool-call stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-V3",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=False,
)
_usage = getattr(response, "usage", None)
_reasoning_detected = False
_choices = getattr(response, "choices", None)
if _choices and len(_choices) > 0:
_message = getattr(_choices[0], "message", None)
else:
_message = None
if _message and getattr(_message, "content", None) is not None:
print(_message.content)
if _usage is not None:
_output_token_details = getattr(_usage, "completion_tokens_details", None)
if _output_token_details and getattr(_output_token_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
elif getattr(_usage, "reasoning", None) is not None:
_reasoning_detected = True
if getattr(_message, "reasoning_content", None) is not None:
_reasoning_detected = True
elif getattr(_message, "reasoning", None) is not None:
_reasoning_detected = True
if not _reasoning_detected:
print("Response: ", response)
raise Exception("VALIDATION FAILED: reasoning - no reasoning information in response")
print("VALIDATION: reasoning SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-V3",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=True,
)
_reasoning_detected = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if getattr(delta, "reasoning_content", None) is not None:
_reasoning_detected = True
if getattr(delta, "reasoning", None) is not None:
_reasoning_detected = True
_usage = getattr(chunk, "usage", None)
if _usage is not None:
_details = getattr(_usage, "completion_tokens_details", None)
if _details and getattr(_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
if not _reasoning_detected:
raise Exception("VALIDATION FAILED: reasoning stream - no reasoning information in stream")
print("\nVALIDATION: reasoning stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-V3.1",
messages=[
{"role": "user", "content": "Use the get_weather tool to check the weather in London and Paris. You MUST make both tool calls strictly in parallel, not sequentially."},
],
tools=tools,
tool_choice="auto",
parallel_tool_calls=True,
stream=True,
)
_tool_call_indices = set()
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if delta.tool_calls:
for _tc in delta.tool_calls:
_tool_call_indices.add(_tc.index)
if _tc.function:
print(_tc.function.arguments or "", end="", flush=True)
if len(_tool_call_indices) < 1:
raise Exception(
f"VALIDATION FAILED: parallel-tool-call stream - expected at least 1 tool call, "
f"got {len(_tool_call_indices)}"
)
print(f"\nNumber of parallel tool calls: {len(_tool_call_indices)}")
print("VALIDATION: parallel-tool-call stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-V3.1",
messages=[
{"role": "user", "content": "Use the get_weather tool to check the weather in London. You must call the tool, do not respond with plain text."},
],
tools=tools,
tool_choice="auto",
stream=True,
)
_tool_calls_made = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if delta.tool_calls:
_tool_calls_made = True
for _tc in delta.tool_calls:
if _tc.function:
print(_tc.function.arguments or "", end="", flush=True)
if not _tool_calls_made:
raise Exception("VALIDATION FAILED: tool-call stream - no tool calls received")
print("\nVALIDATION: tool-call stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-V3.1",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=True,
)
_reasoning_detected = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if getattr(delta, "reasoning_content", None) is not None:
_reasoning_detected = True
if getattr(delta, "reasoning", None) is not None:
_reasoning_detected = True
_usage = getattr(chunk, "usage", None)
if _usage is not None:
_details = getattr(_usage, "completion_tokens_details", None)
if _details and getattr(_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
if not _reasoning_detected:
raise Exception("VALIDATION FAILED: reasoning stream - no reasoning information in stream")
print("\nVALIDATION: reasoning stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-V3.1",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=False,
)
_usage = getattr(response, "usage", None)
_reasoning_detected = False
_choices = getattr(response, "choices", None)
if _choices and len(_choices) > 0:
_message = getattr(_choices[0], "message", None)
else:
_message = None
if _message and getattr(_message, "content", None) is not None:
print(_message.content)
if _usage is not None:
_output_token_details = getattr(_usage, "completion_tokens_details", None)
if _output_token_details and getattr(_output_token_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
elif getattr(_usage, "reasoning", None) is not None:
_reasoning_detected = True
if getattr(_message, "reasoning_content", None) is not None:
_reasoning_detected = True
elif getattr(_message, "reasoning", None) is not None:
_reasoning_detected = True
if not _reasoning_detected:
print("Response: ", response)
raise Exception("VALIDATION FAILED: reasoning - no reasoning information in response")
print("VALIDATION: reasoning SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-R1",
messages=[
{"role": "user", "content": "Use the get_weather tool to check the weather in London. You must call the tool, do not respond with plain text."},
],
tools=tools,
tool_choice="auto",
stream=True,
)
_tool_calls_made = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if delta.tool_calls:
_tool_calls_made = True
for _tc in delta.tool_calls:
if _tc.function:
print(_tc.function.arguments or "", end="", flush=True)
if not _tool_calls_made:
raise Exception("VALIDATION FAILED: tool-call stream - no tool calls received")
print("\nVALIDATION: tool-call stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-R1",
messages=[
{"role": "user", "content": "Use the get_weather tool to check the weather in London. You must call the tool, do not respond with plain text."},
],
tools=tools,
tool_choice="auto",
stream=False,
)
_message = response.choices[0].message
if _message.tool_calls:
for _tc in _message.tool_calls:
print(f"Function: {_tc.function.name}")
print(f"Arguments: {_tc.function.arguments}")
else:
print(_message.content)
if not _message.tool_calls or len(_message.tool_calls) == 0:
raise Exception("VALIDATION FAILED: tool-call - no tool calls in response")
print("VALIDATION: tool-call SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-R1",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=False,
)
_usage = getattr(response, "usage", None)
_reasoning_detected = False
_choices = getattr(response, "choices", None)
if _choices and len(_choices) > 0:
_message = getattr(_choices[0], "message", None)
else:
_message = None
if _message and getattr(_message, "content", None) is not None:
print(_message.content)
if _usage is not None:
_output_token_details = getattr(_usage, "completion_tokens_details", None)
if _output_token_details and getattr(_output_token_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
elif getattr(_usage, "reasoning", None) is not None:
_reasoning_detected = True
if getattr(_message, "reasoning_content", None) is not None:
_reasoning_detected = True
elif getattr(_message, "reasoning", None) is not None:
_reasoning_detected = True
if not _reasoning_detected:
print("Response: ", response)
raise Exception("VALIDATION FAILED: reasoning - no reasoning information in response")
print("VALIDATION: reasoning SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-R1",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=True,
)
_reasoning_detected = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if getattr(delta, "reasoning_content", None) is not None:
_reasoning_detected = True
if getattr(delta, "reasoning", None) is not None:
_reasoning_detected = True
_usage = getattr(chunk, "usage", None)
if _usage is not None:
_details = getattr(_usage, "completion_tokens_details", None)
if _details and getattr(_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
if not _reasoning_detected:
raise Exception("VALIDATION FAILED: reasoning stream - no reasoning information in stream")
print("\nVALIDATION: reasoning stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/Qwen-Qwen3-Coder-Next-FP8",
messages=[
{"role": "user", "content": "Use the get_weather tool to check the weather in London. You must call the tool, do not respond with plain text."},
],
tools=tools,
tool_choice="auto",
stream=True,
)
_tool_calls_made = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if delta.tool_calls:
_tool_calls_made = True
for _tc in delta.tool_calls:
if _tc.function:
print(_tc.function.arguments or "", end="", flush=True)
if not _tool_calls_made:
raise Exception("VALIDATION FAILED: tool-call stream - no tool calls received")
print("\nVALIDATION: tool-call stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/openai-gpt-oss-120b",
messages=[
{"role": "user", "content": "Use the get_weather tool to check the weather in London. You must call the tool, do not respond with plain text."},
],
tools=tools,
tool_choice="auto",
stream=True,
)
_tool_calls_made = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if delta.tool_calls:
_tool_calls_made = True
for _tc in delta.tool_calls:
if _tc.function:
print(_tc.function.arguments or "", end="", flush=True)
if not _tool_calls_made:
raise Exception("VALIDATION FAILED: tool-call stream - no tool calls received")
print("\nVALIDATION: tool-call stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/openai-gpt-oss-120b",
messages=[
{"role": "user", "content": "Use the get_weather tool to check the weather in London and Paris. You MUST make both tool calls strictly in parallel, not sequentially."},
],
tools=tools,
tool_choice="auto",
parallel_tool_calls=True,
stream=True,
)
_tool_call_indices = set()
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if delta.tool_calls:
for _tc in delta.tool_calls:
_tool_call_indices.add(_tc.index)
if _tc.function:
print(_tc.function.arguments or "", end="", flush=True)
if len(_tool_call_indices) < 1:
raise Exception(
f"VALIDATION FAILED: parallel-tool-call stream - expected at least 1 tool call, "
f"got {len(_tool_call_indices)}"
)
print(f"\nNumber of parallel tool calls: {len(_tool_call_indices)}")
print("VALIDATION: parallel-tool-call stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/openai-gpt-oss-120b",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=False,
)
_usage = getattr(response, "usage", None)
_reasoning_detected = False
_choices = getattr(response, "choices", None)
if _choices and len(_choices) > 0:
_message = getattr(_choices[0], "message", None)
else:
_message = None
if _message and getattr(_message, "content", None) is not None:
print(_message.content)
if _usage is not None:
_output_token_details = getattr(_usage, "completion_tokens_details", None)
if _output_token_details and getattr(_output_token_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
elif getattr(_usage, "reasoning", None) is not None:
_reasoning_detected = True
if getattr(_message, "reasoning_content", None) is not None:
_reasoning_detected = True
elif getattr(_message, "reasoning", None) is not None:
_reasoning_detected = True
if not _reasoning_detected:
print("Response: ", response)
raise Exception("VALIDATION FAILED: reasoning - no reasoning information in response")
print("VALIDATION: reasoning SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/openai-gpt-oss-120b",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=True,
)
_reasoning_detected = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if getattr(delta, "reasoning_content", None) is not None:
_reasoning_detected = True
if getattr(delta, "reasoning", None) is not None:
_reasoning_detected = True
_usage = getattr(chunk, "usage", None)
if _usage is not None:
_details = getattr(_usage, "completion_tokens_details", None)
if _details and getattr(_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
if not _reasoning_detected:
raise Exception("VALIDATION FAILED: reasoning stream - no reasoning information in stream")
print("\nVALIDATION: reasoning stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/openai-gpt-oss-120b",
messages=[
{"role": "user", "content": "List 3 colors with their hex codes in JSON."},
],
response_format={"type": "json_object"},
stream=True,
)
import json as _json
_accumulated = ""
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
_accumulated += delta.content
print(delta.content, end="", flush=True)
if not _accumulated:
raise Exception("VALIDATION FAILED: json-output stream - no content received")
_json.loads(_accumulated)
print("\nVALIDATION: json-output stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/zai-org-GLM-5",
messages=[
{"role": "user", "content": "Use the get_weather tool to check the weather in London. You must call the tool, do not respond with plain text."},
],
tools=tools,
tool_choice="auto",
stream=True,
)
_tool_calls_made = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if delta.tool_calls:
_tool_calls_made = True
for _tc in delta.tool_calls:
if _tc.function:
print(_tc.function.arguments or "", end="", flush=True)
if not _tool_calls_made:
raise Exception("VALIDATION FAILED: tool-call stream - no tool calls received")
print("\nVALIDATION: tool-call stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/zai-org-GLM-5",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=False,
)
_usage = getattr(response, "usage", None)
_reasoning_detected = False
_choices = getattr(response, "choices", None)
if _choices and len(_choices) > 0:
_message = getattr(_choices[0], "message", None)
else:
_message = None
if _message and getattr(_message, "content", None) is not None:
print(_message.content)
if _usage is not None:
_output_token_details = getattr(_usage, "completion_tokens_details", None)
if _output_token_details and getattr(_output_token_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
elif getattr(_usage, "reasoning", None) is not None:
_reasoning_detected = True
if getattr(_message, "reasoning_content", None) is not None:
_reasoning_detected = True
elif getattr(_message, "reasoning", None) is not None:
_reasoning_detected = True
if not _reasoning_detected:
print("Response: ", response)
raise Exception("VALIDATION FAILED: reasoning - no reasoning information in response")
print("VALIDATION: reasoning SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/zai-org-GLM-5",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=True,
)
_reasoning_detected = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if getattr(delta, "reasoning_content", None) is not None:
_reasoning_detected = True
if getattr(delta, "reasoning", None) is not None:
_reasoning_detected = True
_usage = getattr(chunk, "usage", None)
if _usage is not None:
_details = getattr(_usage, "completion_tokens_details", None)
if _details and getattr(_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
if not _reasoning_detected:
raise Exception("VALIDATION FAILED: reasoning stream - no reasoning information in stream")
print("\nVALIDATION: reasoning stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/moonshotai-Kimi-K2.5",
messages=[
{"role": "system", "content": "You are a helpful assistant with access to tools. You MUST strictly use the provided tools to answer. Never respond with plain text when a tool is available."},
{"role": "user", "content": "Hi"},
{"role": "assistant", "content": "Hi, how can I help you"},
{"role": "user", "content": "Use the get_weather tool to check the weather in London. You must call the tool, do not respond with plain text."},
],
tools=tools,
tool_choice="auto",
stream=True,
)
_tool_calls_made = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if delta.tool_calls:
_tool_calls_made = True
for _tc in delta.tool_calls:
if _tc.function:
print(_tc.function.arguments or "", end="", flush=True)
if not _tool_calls_made:
raise Exception("VALIDATION FAILED: tool-call stream - no tool calls received")
print("\nVALIDATION: tool-call stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/moonshotai-Kimi-K2.5",
messages=[
{"role": "system", "content": "You are a helpful assistant with access to tools. You MUST strictly call multiple tools in parallel whenever possible. Never call them sequentially."},
{"role": "user", "content": "Hi"},
{"role": "assistant", "content": "Hi, how can I help you"},
{"role": "user", "content": "Use the get_weather tool to check the weather in London and Paris. You MUST make both tool calls strictly in parallel, not sequentially."},
],
tools=tools,
tool_choice="auto",
parallel_tool_calls=True,
stream=True,
)
_tool_call_indices = set()
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if delta.tool_calls:
for _tc in delta.tool_calls:
_tool_call_indices.add(_tc.index)
if _tc.function:
print(_tc.function.arguments or "", end="", flush=True)
if len(_tool_call_indices) < 1:
raise Exception(
f"VALIDATION FAILED: parallel-tool-call stream - expected at least 1 tool call, "
f"got {len(_tool_call_indices)}"
)
print(f"\nNumber of parallel tool calls: {len(_tool_call_indices)}")
print("VALIDATION: parallel-tool-call stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/moonshotai-Kimi-K2.5",
messages=[
{"role": "system", "content": "You are a helpful assistant. You MUST think step by step and show your reasoning. Never skip reasoning steps."},
{"role": "user", "content": "Hi"},
{"role": "assistant", "content": "Hi, how can I help you"},
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=False,
)
_usage = getattr(response, "usage", None)
_reasoning_detected = False
_choices = getattr(response, "choices", None)
if _choices and len(_choices) > 0:
_message = getattr(_choices[0], "message", None)
else:
_message = None
if _message and getattr(_message, "content", None) is not None:
print(_message.content)
if _usage is not None:
_output_token_details = getattr(_usage, "completion_tokens_details", None)
if _output_token_details and getattr(_output_token_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
elif getattr(_usage, "reasoning", None) is not None:
_reasoning_detected = True
if getattr(_message, "reasoning_content", None) is not None:
_reasoning_detected = True
elif getattr(_message, "reasoning", None) is not None:
_reasoning_detected = True
if not _reasoning_detected:
print("Response: ", response)
raise Exception("VALIDATION FAILED: reasoning - no reasoning information in response")
print("VALIDATION: reasoning SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/moonshotai-Kimi-K2.5",
messages=[
{"role": "system", "content": "You are a helpful assistant. You MUST think step by step and show your reasoning. Never skip reasoning steps."},
{"role": "user", "content": "Hi"},
{"role": "assistant", "content": "Hi, how can I help you"},
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=True,
)
_reasoning_detected = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if getattr(delta, "reasoning_content", None) is not None:
_reasoning_detected = True
if getattr(delta, "reasoning", None) is not None:
_reasoning_detected = True
_usage = getattr(chunk, "usage", None)
if _usage is not None:
_details = getattr(_usage, "completion_tokens_details", None)
if _details and getattr(_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
if not _reasoning_detected:
raise Exception("VALIDATION FAILED: reasoning stream - no reasoning information in stream")
print("\nVALIDATION: reasoning stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/Qwen-Qwen3.5-9B",
messages=[
{"role": "user", "content": "Use the get_weather tool to check the weather in London. You must call the tool, do not respond with plain text."},
],
tools=tools,
tool_choice="auto",
stream=True,
)
_tool_calls_made = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if delta.tool_calls:
_tool_calls_made = True
for _tc in delta.tool_calls:
if _tc.function:
print(_tc.function.arguments or "", end="", flush=True)
if not _tool_calls_made:
raise Exception("VALIDATION FAILED: tool-call stream - no tool calls received")
print("\nVALIDATION: tool-call stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/Qwen-Qwen3.5-9B",
messages=[
{"role": "user", "content": "Use the get_weather tool to check the weather in London and Paris. You MUST make both tool calls strictly in parallel, not sequentially."},
],
tools=tools,
tool_choice="auto",
parallel_tool_calls=True,
stream=True,
)
_tool_call_indices = set()
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if delta.tool_calls:
for _tc in delta.tool_calls:
_tool_call_indices.add(_tc.index)
if _tc.function:
print(_tc.function.arguments or "", end="", flush=True)
if len(_tool_call_indices) < 1:
raise Exception(
f"VALIDATION FAILED: parallel-tool-call stream - expected at least 1 tool call, "
f"got {len(_tool_call_indices)}"
)
print(f"\nNumber of parallel tool calls: {len(_tool_call_indices)}")
print("VALIDATION: parallel-tool-call stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/Qwen-Qwen3.5-9B",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=False,
)
_usage = getattr(response, "usage", None)
_reasoning_detected = False
_choices = getattr(response, "choices", None)
if _choices and len(_choices) > 0:
_message = getattr(_choices[0], "message", None)
else:
_message = None
if _message and getattr(_message, "content", None) is not None:
print(_message.content)
if _usage is not None:
_output_token_details = getattr(_usage, "completion_tokens_details", None)
if _output_token_details and getattr(_output_token_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
elif getattr(_usage, "reasoning", None) is not None:
_reasoning_detected = True
if getattr(_message, "reasoning_content", None) is not None:
_reasoning_detected = True
elif getattr(_message, "reasoning", None) is not None:
_reasoning_detected = True
if not _reasoning_detected:
print("Response: ", response)
raise Exception("VALIDATION FAILED: reasoning - no reasoning information in response")
print("VALIDATION: reasoning SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/Qwen-Qwen3.5-9B",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=True,
)
_reasoning_detected = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if getattr(delta, "reasoning_content", None) is not None:
_reasoning_detected = True
if getattr(delta, "reasoning", None) is not None:
_reasoning_detected = True
_usage = getattr(chunk, "usage", None)
if _usage is not None:
_details = getattr(_usage, "completion_tokens_details", None)
if _details and getattr(_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
if not _reasoning_detected:
raise Exception("VALIDATION FAILED: reasoning stream - no reasoning information in stream")
print("\nVALIDATION: reasoning stream SUCCESS")Skipped (14)
Skip reason:
Skip reason:
Skip reason:
Skip reason:
Skip reason:
Skip reason:
Skip reason:
Skip reason:
Skip reason:
Skip reason:
Skip reason:
Skip reason:
Skip reason:
Skip reason: |
|
/test-models |
There was a problem hiding this comment.
Cursor Bugbot has reviewed your changes and found 2 potential issues.
❌ Bugbot Autofix is OFF. To automatically fix reported issues with cloud agents, enable autofix in the Cursor dashboard.
Reviewed by Cursor Bugbot for commit 438d118. Configure here.
| mode: chat | ||
| model: MiniMaxAI/MiniMax-M2.5 | ||
| provisioning: serverless | ||
| provisioning: provisioned |
There was a problem hiding this comment.
MiniMax availability is misclassified
Medium Severity
MiniMaxAI/MiniMax-M2.5 is still listed as a serverless Together model with a larger context window, but this marks it provisioned and lowers context_window. Consumers may hide the serverless model or reject valid prompts.
Additional Locations (1)
Reviewed by Cursor Bugbot for commit 438d118. Configure here.
| region: "*" | ||
| features: | ||
| - function_calling | ||
| - json_output |
There was a problem hiding this comment.
Original R1 capabilities are overstated
Medium Severity
DeepSeek-R1-Original did not gain native function_calling or json_output; those capabilities belong to newer R1 variants. This metadata can make clients send unsupported tool or JSON-mode requests to the original model.
Reviewed by Cursor Bugbot for commit 438d118. Configure here.
Gateway test results
Failures (28)
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-V3",
messages=[
{"role": "user", "content": "Use the get_weather tool to check the weather in London. You must call the tool, do not respond with plain text."},
],
tools=tools,
tool_choice="auto",
stream=True,
)
_tool_calls_made = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if delta.tool_calls:
_tool_calls_made = True
for _tc in delta.tool_calls:
if _tc.function:
print(_tc.function.arguments or "", end="", flush=True)
if not _tool_calls_made:
raise Exception("VALIDATION FAILED: tool-call stream - no tool calls received")
print("\nVALIDATION: tool-call stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-V3",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=False,
)
_usage = getattr(response, "usage", None)
_reasoning_detected = False
_choices = getattr(response, "choices", None)
if _choices and len(_choices) > 0:
_message = getattr(_choices[0], "message", None)
else:
_message = None
if _message and getattr(_message, "content", None) is not None:
print(_message.content)
if _usage is not None:
_output_token_details = getattr(_usage, "completion_tokens_details", None)
if _output_token_details and getattr(_output_token_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
elif getattr(_usage, "reasoning", None) is not None:
_reasoning_detected = True
if getattr(_message, "reasoning_content", None) is not None:
_reasoning_detected = True
elif getattr(_message, "reasoning", None) is not None:
_reasoning_detected = True
if not _reasoning_detected:
print("Response: ", response)
raise Exception("VALIDATION FAILED: reasoning - no reasoning information in response")
print("VALIDATION: reasoning SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-V3",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=True,
)
_reasoning_detected = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if getattr(delta, "reasoning_content", None) is not None:
_reasoning_detected = True
if getattr(delta, "reasoning", None) is not None:
_reasoning_detected = True
_usage = getattr(chunk, "usage", None)
if _usage is not None:
_details = getattr(_usage, "completion_tokens_details", None)
if _details and getattr(_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
if not _reasoning_detected:
raise Exception("VALIDATION FAILED: reasoning stream - no reasoning information in stream")
print("\nVALIDATION: reasoning stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
import json
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response_schema = json.loads('''{
"title": "CalendarEvent",
"type": "object",
"properties": {
"name": { "type": "string" },
"date": { "type": "string" },
"participants": {
"type": "array",
"items": { "type": "string" }
}
},
"required": ["name", "date", "participants"],
"additionalProperties": false
}''')
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-V3",
messages=[
{"role": "user", "content": "Alice and Bob are going to a science fair on Friday. Extract the event details as JSON."},
],
response_format={"type": "json_schema", "json_schema": {"name": "CalendarEvent", "schema": response_schema}},
stream=True,
)
import json as _json
_accumulated = ""
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
_accumulated += delta.content
print(delta.content, end="", flush=True)
if not _accumulated:
raise Exception("VALIDATION FAILED: structured-output stream - no content received")
_parsed = _json.loads(_accumulated)
if "name" not in _parsed or "date" not in _parsed or "participants" not in _parsed:
raise Exception("VALIDATION FAILED: structured-output stream - missing expected fields (name, date, participants)")
if not isinstance(_parsed.get("participants"), list):
raise Exception("VALIDATION FAILED: structured-output stream - 'participants' is not a list, schema not enforced")
if set(_parsed.keys()) != {"name", "date", "participants"}:
raise Exception(
f"VALIDATION FAILED: structured-output stream - unexpected keys present: {set(_parsed.keys())}"
)
print("\nVALIDATION: structured-output stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-V3.1",
messages=[
{"role": "user", "content": "Use the get_weather tool to check the weather in London and Paris. You MUST make both tool calls strictly in parallel, not sequentially."},
],
tools=tools,
tool_choice="auto",
parallel_tool_calls=True,
stream=True,
)
_tool_call_indices = set()
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if delta.tool_calls:
for _tc in delta.tool_calls:
_tool_call_indices.add(_tc.index)
if _tc.function:
print(_tc.function.arguments or "", end="", flush=True)
if len(_tool_call_indices) < 1:
raise Exception(
f"VALIDATION FAILED: parallel-tool-call stream - expected at least 1 tool call, "
f"got {len(_tool_call_indices)}"
)
print(f"\nNumber of parallel tool calls: {len(_tool_call_indices)}")
print("VALIDATION: parallel-tool-call stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-V3.1",
messages=[
{"role": "user", "content": "Use the get_weather tool to check the weather in London. You must call the tool, do not respond with plain text."},
],
tools=tools,
tool_choice="auto",
stream=True,
)
_tool_calls_made = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if delta.tool_calls:
_tool_calls_made = True
for _tc in delta.tool_calls:
if _tc.function:
print(_tc.function.arguments or "", end="", flush=True)
if not _tool_calls_made:
raise Exception("VALIDATION FAILED: tool-call stream - no tool calls received")
print("\nVALIDATION: tool-call stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-V3.1",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=False,
)
_usage = getattr(response, "usage", None)
_reasoning_detected = False
_choices = getattr(response, "choices", None)
if _choices and len(_choices) > 0:
_message = getattr(_choices[0], "message", None)
else:
_message = None
if _message and getattr(_message, "content", None) is not None:
print(_message.content)
if _usage is not None:
_output_token_details = getattr(_usage, "completion_tokens_details", None)
if _output_token_details and getattr(_output_token_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
elif getattr(_usage, "reasoning", None) is not None:
_reasoning_detected = True
if getattr(_message, "reasoning_content", None) is not None:
_reasoning_detected = True
elif getattr(_message, "reasoning", None) is not None:
_reasoning_detected = True
if not _reasoning_detected:
print("Response: ", response)
raise Exception("VALIDATION FAILED: reasoning - no reasoning information in response")
print("VALIDATION: reasoning SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-V3.1",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=True,
)
_reasoning_detected = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if getattr(delta, "reasoning_content", None) is not None:
_reasoning_detected = True
if getattr(delta, "reasoning", None) is not None:
_reasoning_detected = True
_usage = getattr(chunk, "usage", None)
if _usage is not None:
_details = getattr(_usage, "completion_tokens_details", None)
if _details and getattr(_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
if not _reasoning_detected:
raise Exception("VALIDATION FAILED: reasoning stream - no reasoning information in stream")
print("\nVALIDATION: reasoning stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/openai-gpt-oss-120b",
messages=[
{"role": "user", "content": "Use the get_weather tool to check the weather in London. You must call the tool, do not respond with plain text."},
],
tools=tools,
tool_choice="auto",
stream=True,
)
_tool_calls_made = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if delta.tool_calls:
_tool_calls_made = True
for _tc in delta.tool_calls:
if _tc.function:
print(_tc.function.arguments or "", end="", flush=True)
if not _tool_calls_made:
raise Exception("VALIDATION FAILED: tool-call stream - no tool calls received")
print("\nVALIDATION: tool-call stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/openai-gpt-oss-120b",
messages=[
{"role": "user", "content": "Use the get_weather tool to check the weather in London and Paris. You MUST make both tool calls strictly in parallel, not sequentially."},
],
tools=tools,
tool_choice="auto",
parallel_tool_calls=True,
stream=True,
)
_tool_call_indices = set()
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if delta.tool_calls:
for _tc in delta.tool_calls:
_tool_call_indices.add(_tc.index)
if _tc.function:
print(_tc.function.arguments or "", end="", flush=True)
if len(_tool_call_indices) < 1:
raise Exception(
f"VALIDATION FAILED: parallel-tool-call stream - expected at least 1 tool call, "
f"got {len(_tool_call_indices)}"
)
print(f"\nNumber of parallel tool calls: {len(_tool_call_indices)}")
print("VALIDATION: parallel-tool-call stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/openai-gpt-oss-120b",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=False,
)
_usage = getattr(response, "usage", None)
_reasoning_detected = False
_choices = getattr(response, "choices", None)
if _choices and len(_choices) > 0:
_message = getattr(_choices[0], "message", None)
else:
_message = None
if _message and getattr(_message, "content", None) is not None:
print(_message.content)
if _usage is not None:
_output_token_details = getattr(_usage, "completion_tokens_details", None)
if _output_token_details and getattr(_output_token_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
elif getattr(_usage, "reasoning", None) is not None:
_reasoning_detected = True
if getattr(_message, "reasoning_content", None) is not None:
_reasoning_detected = True
elif getattr(_message, "reasoning", None) is not None:
_reasoning_detected = True
if not _reasoning_detected:
print("Response: ", response)
raise Exception("VALIDATION FAILED: reasoning - no reasoning information in response")
print("VALIDATION: reasoning SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/openai-gpt-oss-120b",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=True,
)
_reasoning_detected = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if getattr(delta, "reasoning_content", None) is not None:
_reasoning_detected = True
if getattr(delta, "reasoning", None) is not None:
_reasoning_detected = True
_usage = getattr(chunk, "usage", None)
if _usage is not None:
_details = getattr(_usage, "completion_tokens_details", None)
if _details and getattr(_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
if not _reasoning_detected:
raise Exception("VALIDATION FAILED: reasoning stream - no reasoning information in stream")
print("\nVALIDATION: reasoning stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/Qwen-Qwen3-Coder-Next-FP8",
messages=[
{"role": "user", "content": "Use the get_weather tool to check the weather in London. You must call the tool, do not respond with plain text."},
],
tools=tools,
tool_choice="auto",
stream=True,
)
_tool_calls_made = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if delta.tool_calls:
_tool_calls_made = True
for _tc in delta.tool_calls:
if _tc.function:
print(_tc.function.arguments or "", end="", flush=True)
if not _tool_calls_made:
raise Exception("VALIDATION FAILED: tool-call stream - no tool calls received")
print("\nVALIDATION: tool-call stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/moonshotai-Kimi-K2.5",
messages=[
{"role": "system", "content": "You are a helpful assistant with access to tools. You MUST strictly call multiple tools in parallel whenever possible. Never call them sequentially."},
{"role": "user", "content": "Hi"},
{"role": "assistant", "content": "Hi, how can I help you"},
{"role": "user", "content": "Use the get_weather tool to check the weather in London and Paris. You MUST make both tool calls strictly in parallel, not sequentially."},
],
tools=tools,
tool_choice="auto",
parallel_tool_calls=True,
stream=True,
)
_tool_call_indices = set()
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if delta.tool_calls:
for _tc in delta.tool_calls:
_tool_call_indices.add(_tc.index)
if _tc.function:
print(_tc.function.arguments or "", end="", flush=True)
if len(_tool_call_indices) < 1:
raise Exception(
f"VALIDATION FAILED: parallel-tool-call stream - expected at least 1 tool call, "
f"got {len(_tool_call_indices)}"
)
print(f"\nNumber of parallel tool calls: {len(_tool_call_indices)}")
print("VALIDATION: parallel-tool-call stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/moonshotai-Kimi-K2.5",
messages=[
{"role": "system", "content": "You are a helpful assistant. You MUST think step by step and show your reasoning. Never skip reasoning steps."},
{"role": "user", "content": "Hi"},
{"role": "assistant", "content": "Hi, how can I help you"},
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=False,
)
_usage = getattr(response, "usage", None)
_reasoning_detected = False
_choices = getattr(response, "choices", None)
if _choices and len(_choices) > 0:
_message = getattr(_choices[0], "message", None)
else:
_message = None
if _message and getattr(_message, "content", None) is not None:
print(_message.content)
if _usage is not None:
_output_token_details = getattr(_usage, "completion_tokens_details", None)
if _output_token_details and getattr(_output_token_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
elif getattr(_usage, "reasoning", None) is not None:
_reasoning_detected = True
if getattr(_message, "reasoning_content", None) is not None:
_reasoning_detected = True
elif getattr(_message, "reasoning", None) is not None:
_reasoning_detected = True
if not _reasoning_detected:
print("Response: ", response)
raise Exception("VALIDATION FAILED: reasoning - no reasoning information in response")
print("VALIDATION: reasoning SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/moonshotai-Kimi-K2.5",
messages=[
{"role": "system", "content": "You are a helpful assistant with access to tools. You MUST strictly use the provided tools to answer. Never respond with plain text when a tool is available."},
{"role": "user", "content": "Hi"},
{"role": "assistant", "content": "Hi, how can I help you"},
{"role": "user", "content": "Use the get_weather tool to check the weather in London. You must call the tool, do not respond with plain text."},
],
tools=tools,
tool_choice="auto",
stream=True,
)
_tool_calls_made = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if delta.tool_calls:
_tool_calls_made = True
for _tc in delta.tool_calls:
if _tc.function:
print(_tc.function.arguments or "", end="", flush=True)
if not _tool_calls_made:
raise Exception("VALIDATION FAILED: tool-call stream - no tool calls received")
print("\nVALIDATION: tool-call stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/moonshotai-Kimi-K2.5",
messages=[
{"role": "system", "content": "You are a helpful assistant. You MUST think step by step and show your reasoning. Never skip reasoning steps."},
{"role": "user", "content": "Hi"},
{"role": "assistant", "content": "Hi, how can I help you"},
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=True,
)
_reasoning_detected = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if getattr(delta, "reasoning_content", None) is not None:
_reasoning_detected = True
if getattr(delta, "reasoning", None) is not None:
_reasoning_detected = True
_usage = getattr(chunk, "usage", None)
if _usage is not None:
_details = getattr(_usage, "completion_tokens_details", None)
if _details and getattr(_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
if not _reasoning_detected:
raise Exception("VALIDATION FAILED: reasoning stream - no reasoning information in stream")
print("\nVALIDATION: reasoning stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/zai-org-GLM-5",
messages=[
{"role": "user", "content": "Use the get_weather tool to check the weather in London. You must call the tool, do not respond with plain text."},
],
tools=tools,
tool_choice="auto",
stream=True,
)
_tool_calls_made = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if delta.tool_calls:
_tool_calls_made = True
for _tc in delta.tool_calls:
if _tc.function:
print(_tc.function.arguments or "", end="", flush=True)
if not _tool_calls_made:
raise Exception("VALIDATION FAILED: tool-call stream - no tool calls received")
print("\nVALIDATION: tool-call stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/zai-org-GLM-5",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=False,
)
_usage = getattr(response, "usage", None)
_reasoning_detected = False
_choices = getattr(response, "choices", None)
if _choices and len(_choices) > 0:
_message = getattr(_choices[0], "message", None)
else:
_message = None
if _message and getattr(_message, "content", None) is not None:
print(_message.content)
if _usage is not None:
_output_token_details = getattr(_usage, "completion_tokens_details", None)
if _output_token_details and getattr(_output_token_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
elif getattr(_usage, "reasoning", None) is not None:
_reasoning_detected = True
if getattr(_message, "reasoning_content", None) is not None:
_reasoning_detected = True
elif getattr(_message, "reasoning", None) is not None:
_reasoning_detected = True
if not _reasoning_detected:
print("Response: ", response)
raise Exception("VALIDATION FAILED: reasoning - no reasoning information in response")
print("VALIDATION: reasoning SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/zai-org-GLM-5",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=True,
)
_reasoning_detected = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if getattr(delta, "reasoning_content", None) is not None:
_reasoning_detected = True
if getattr(delta, "reasoning", None) is not None:
_reasoning_detected = True
_usage = getattr(chunk, "usage", None)
if _usage is not None:
_details = getattr(_usage, "completion_tokens_details", None)
if _details and getattr(_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
if not _reasoning_detected:
raise Exception("VALIDATION FAILED: reasoning stream - no reasoning information in stream")
print("\nVALIDATION: reasoning stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-R1",
messages=[
{"role": "user", "content": "Use the get_weather tool to check the weather in London. You must call the tool, do not respond with plain text."},
],
tools=tools,
tool_choice="auto",
stream=False,
)
_message = response.choices[0].message
if _message.tool_calls:
for _tc in _message.tool_calls:
print(f"Function: {_tc.function.name}")
print(f"Arguments: {_tc.function.arguments}")
else:
print(_message.content)
if not _message.tool_calls or len(_message.tool_calls) == 0:
raise Exception("VALIDATION FAILED: tool-call - no tool calls in response")
print("VALIDATION: tool-call SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-R1",
messages=[
{"role": "user", "content": "Use the get_weather tool to check the weather in London. You must call the tool, do not respond with plain text."},
],
tools=tools,
tool_choice="auto",
stream=True,
)
_tool_calls_made = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if delta.tool_calls:
_tool_calls_made = True
for _tc in delta.tool_calls:
if _tc.function:
print(_tc.function.arguments or "", end="", flush=True)
if not _tool_calls_made:
raise Exception("VALIDATION FAILED: tool-call stream - no tool calls received")
print("\nVALIDATION: tool-call stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-R1",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=True,
)
_reasoning_detected = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if getattr(delta, "reasoning_content", None) is not None:
_reasoning_detected = True
if getattr(delta, "reasoning", None) is not None:
_reasoning_detected = True
_usage = getattr(chunk, "usage", None)
if _usage is not None:
_details = getattr(_usage, "completion_tokens_details", None)
if _details and getattr(_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
if not _reasoning_detected:
raise Exception("VALIDATION FAILED: reasoning stream - no reasoning information in stream")
print("\nVALIDATION: reasoning stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/deepseek-ai-DeepSeek-R1",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=False,
)
_usage = getattr(response, "usage", None)
_reasoning_detected = False
_choices = getattr(response, "choices", None)
if _choices and len(_choices) > 0:
_message = getattr(_choices[0], "message", None)
else:
_message = None
if _message and getattr(_message, "content", None) is not None:
print(_message.content)
if _usage is not None:
_output_token_details = getattr(_usage, "completion_tokens_details", None)
if _output_token_details and getattr(_output_token_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
elif getattr(_usage, "reasoning", None) is not None:
_reasoning_detected = True
if getattr(_message, "reasoning_content", None) is not None:
_reasoning_detected = True
elif getattr(_message, "reasoning", None) is not None:
_reasoning_detected = True
if not _reasoning_detected:
print("Response: ", response)
raise Exception("VALIDATION FAILED: reasoning - no reasoning information in response")
print("VALIDATION: reasoning SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/Qwen-Qwen3.5-9B",
messages=[
{"role": "user", "content": "Use the get_weather tool to check the weather in London. You must call the tool, do not respond with plain text."},
],
tools=tools,
tool_choice="auto",
stream=True,
)
_tool_calls_made = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if delta.tool_calls:
_tool_calls_made = True
for _tc in delta.tool_calls:
if _tc.function:
print(_tc.function.arguments or "", end="", flush=True)
if not _tool_calls_made:
raise Exception("VALIDATION FAILED: tool-call stream - no tool calls received")
print("\nVALIDATION: tool-call stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/Qwen-Qwen3.5-9B",
messages=[
{"role": "user", "content": "Use the get_weather tool to check the weather in London and Paris. You MUST make both tool calls strictly in parallel, not sequentially."},
],
tools=tools,
tool_choice="auto",
parallel_tool_calls=True,
stream=True,
)
_tool_call_indices = set()
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if delta.tool_calls:
for _tc in delta.tool_calls:
_tool_call_indices.add(_tc.index)
if _tc.function:
print(_tc.function.arguments or "", end="", flush=True)
if len(_tool_call_indices) < 1:
raise Exception(
f"VALIDATION FAILED: parallel-tool-call stream - expected at least 1 tool call, "
f"got {len(_tool_call_indices)}"
)
print(f"\nNumber of parallel tool calls: {len(_tool_call_indices)}")
print("VALIDATION: parallel-tool-call stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/Qwen-Qwen3.5-9B",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=False,
)
_usage = getattr(response, "usage", None)
_reasoning_detected = False
_choices = getattr(response, "choices", None)
if _choices and len(_choices) > 0:
_message = getattr(_choices[0], "message", None)
else:
_message = None
if _message and getattr(_message, "content", None) is not None:
print(_message.content)
if _usage is not None:
_output_token_details = getattr(_usage, "completion_tokens_details", None)
if _output_token_details and getattr(_output_token_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
elif getattr(_usage, "reasoning", None) is not None:
_reasoning_detected = True
if getattr(_message, "reasoning_content", None) is not None:
_reasoning_detected = True
elif getattr(_message, "reasoning", None) is not None:
_reasoning_detected = True
if not _reasoning_detected:
print("Response: ", response)
raise Exception("VALIDATION FAILED: reasoning - no reasoning information in response")
print("VALIDATION: reasoning SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/Qwen-Qwen3.5-9B",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=True,
)
_reasoning_detected = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if getattr(delta, "reasoning_content", None) is not None:
_reasoning_detected = True
if getattr(delta, "reasoning", None) is not None:
_reasoning_detected = True
_usage = getattr(chunk, "usage", None)
if _usage is not None:
_details = getattr(_usage, "completion_tokens_details", None)
if _details and getattr(_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
if not _reasoning_detected:
raise Exception("VALIDATION FAILED: reasoning stream - no reasoning information in stream")
print("\nVALIDATION: reasoning stream SUCCESS")Skipped (14)
Skip reason:
Skip reason:
Skip reason:
Skip reason:
Skip reason:
Skip reason:
Skip reason:
Skip reason:
Skip reason:
Skip reason:
Skip reason:
Skip reason:
Skip reason:
Skip reason: |


Auto-generated by poc-agent for provider
together-ai.Note
Medium Risk
Primarily metadata changes, but updates to
provisioning,context_windowlimits, and new deprecation dates could affect model selection and runtime routing if consumers rely on these fields.Overview
Updates Together.ai provider model YAMLs with refreshed capabilities and lifecycle metadata.
Notable changes include reduced
context_windowforMiniMax-M2.5*, provisioning updates (e.g.,MiniMax-M2.5andDeepSeek-R1-Originaltoprovisioned), newfeatures/modalities flags (e.g.,system_messages, video/image inputs), addeddeprecationDateon several Qwen/DeepSeek models, and expanded/normalizedsourcesplus a few status/thinking flag adjustments (e.g.,imagen-4.0-previewtostatus: preview).Reviewed by Cursor Bugbot for commit 438d118. Bugbot is set up for automated code reviews on this repo. Configure here.