Skip to content

Commit 5dfa09a

Browse files
committed
fix(openai): support gpt-5-nano parameters
1 parent c22aa7a commit 5dfa09a

File tree

2 files changed

+58
-13
lines changed

2 files changed

+58
-13
lines changed

README.md

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,6 +70,7 @@ $openai = new OpenAI(
7070
```
7171

7272
Available OpenAI Models:
73+
- `MODEL_GPT_5_NANO`: GPT-5 Nano - Small GPT-5 variant optimized for low latency and cost-sensitive workloads
7374
- `MODEL_GPT_4_5_PREVIEW`: GPT-4.5 Preview - OpenAI's most advanced model with enhanced reasoning, broader knowledge, and improved instruction following
7475
- `MODEL_GPT_4_1`: GPT-4.1 - Advanced large language model with strong reasoning capabilities and improved context handling
7576
- `MODEL_GPT_4O`: GPT-4o - Multimodal model optimized for both text and image processing with faster response times
@@ -245,4 +246,4 @@ We truly ❤️ pull requests! If you wish to help, you can learn more about how
245246

246247
## Copyright and license
247248

248-
The MIT License (MIT) [http://www.opensource.org/licenses/mit-license.php](http://www.opensource.org/licenses/mit-license.php)
249+
The MIT License (MIT) [http://www.opensource.org/licenses/mit-license.php](http://www.opensource.org/licenses/mit-license.php)

src/Agents/Adapters/OpenAI.php

Lines changed: 56 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,11 @@
1111

1212
class OpenAI extends Adapter
1313
{
14+
/**
15+
* GPT-5 Nano - Small GPT-5 variant optimized for low latency and cost-sensitive workloads
16+
*/
17+
public const MODEL_GPT_5_NANO = 'gpt-5-nano';
18+
1419
/**
1520
* GPT-4.5 Preview - OpenAI's most advanced model with enhanced reasoning, broader knowledge, and improved instruction following
1621
*/
@@ -76,6 +81,11 @@ class OpenAI extends Adapter
7681
*/
7782
protected int $timeout;
7883

84+
/**
85+
* @var bool
86+
*/
87+
protected bool $hasWarnedTemperatureOverride = false;
88+
7989
/**
8090
* Create a new OpenAI adapter
8191
*
@@ -125,7 +135,8 @@ public function isSchemaSupported(): bool
125135
*/
126136
public function send(array $messages, ?callable $listener = null): Message
127137
{
128-
if ($this->getAgent() === null) {
138+
$agent = $this->getAgent();
139+
if ($agent === null) {
129140
throw new \Exception('Agent not set');
130141
}
131142

@@ -147,11 +158,11 @@ public function send(array $messages, ?callable $listener = null): Message
147158
}
148159

149160
$instructions = [];
150-
foreach ($this->getAgent()->getInstructions() as $name => $content) {
161+
foreach ($agent->getInstructions() as $name => $content) {
151162
$instructions[] = '# '.$name."\n\n".$content;
152163
}
153164

154-
$systemMessage = $this->getAgent()->getDescription().
165+
$systemMessage = $agent->getDescription().
155166
(empty($instructions) ? '' : "\n\n".implode("\n\n", $instructions));
156167

157168
if (! empty($systemMessage)) {
@@ -164,10 +175,14 @@ public function send(array $messages, ?callable $listener = null): Message
164175
$payload = [
165176
'model' => $this->model,
166177
'messages' => $formattedMessages,
167-
'temperature' => $this->temperature,
168178
];
179+
$temperature = $this->temperature;
180+
if ($this->usesDefaultTemperatureOnly()) {
181+
$temperature = 1.0;
182+
}
183+
$payload['temperature'] = $temperature;
169184

170-
$schema = $this->getAgent()->getSchema();
185+
$schema = $agent->getSchema();
171186
if ($schema !== null) {
172187
$payload['response_format'] = [
173188
'type' => 'json_schema',
@@ -187,13 +202,7 @@ public function send(array $messages, ?callable $listener = null): Message
187202
$payload['stream'] = true;
188203
}
189204

190-
// Use 'max_completion_tokens' for o-series models, else 'max_tokens'
191-
$oSeriesModels = [
192-
self::MODEL_O3,
193-
self::MODEL_O3_MINI,
194-
self::MODEL_O4_MINI,
195-
];
196-
if (in_array($this->model, $oSeriesModels)) {
205+
if ($this->usesMaxCompletionTokens()) {
197206
$payload['max_completion_tokens'] = $this->maxTokens;
198207
} else {
199208
$payload['max_tokens'] = $this->maxTokens;
@@ -306,6 +315,7 @@ protected function process(Chunk $chunk, ?callable $listener): string
306315
public function getModels(): array
307316
{
308317
return [
318+
self::MODEL_GPT_5_NANO,
309319
self::MODEL_GPT_4_5_PREVIEW,
310320
self::MODEL_GPT_4_1,
311321
self::MODEL_GPT_4O,
@@ -315,6 +325,40 @@ public function getModels(): array
315325
];
316326
}
317327

328+
/**
329+
* OpenAI expects max_completion_tokens for these models.
330+
*/
331+
protected function usesMaxCompletionTokens(): bool
332+
{
333+
return in_array($this->model, [
334+
self::MODEL_GPT_5_NANO,
335+
self::MODEL_O4_MINI,
336+
self::MODEL_O3,
337+
self::MODEL_O3_MINI,
338+
], true);
339+
}
340+
341+
/**
342+
* Some models only accept the default temperature (1).
343+
*/
344+
protected function usesDefaultTemperatureOnly(): bool
345+
{
346+
$usesDefaultTemperatureOnly = in_array($this->model, [
347+
self::MODEL_GPT_5_NANO,
348+
], true);
349+
350+
if ($usesDefaultTemperatureOnly && $this->temperature !== 1.0 && ! $this->hasWarnedTemperatureOverride) {
351+
$this->hasWarnedTemperatureOverride = true;
352+
error_log(
353+
"OpenAI adapter warning: model '{$this->model}' only supports temperature=1.0. "
354+
."Overriding provided value {$this->temperature}. "
355+
.'Set temperature to 1.0 to remove this warning.'
356+
);
357+
}
358+
359+
return $usesDefaultTemperatureOnly;
360+
}
361+
318362
/**
319363
* Get current model
320364
*

0 commit comments

Comments
 (0)