Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@ $openai = new OpenAI(
```

Available OpenAI Models:
- `MODEL_GPT_5_NANO`: GPT-5 Nano - Small GPT-5 variant optimized for low latency and cost-sensitive workloads
- `MODEL_GPT_4_5_PREVIEW`: GPT-4.5 Preview - OpenAI's most advanced model with enhanced reasoning, broader knowledge, and improved instruction following
- `MODEL_GPT_4_1`: GPT-4.1 - Advanced large language model with strong reasoning capabilities and improved context handling
- `MODEL_GPT_4O`: GPT-4o - Multimodal model optimized for both text and image processing with faster response times
Expand Down Expand Up @@ -245,4 +246,4 @@ We truly ❤️ pull requests! If you wish to help, you can learn more about how

## Copyright and license

The MIT License (MIT) [http://www.opensource.org/licenses/mit-license.php](http://www.opensource.org/licenses/mit-license.php)
The MIT License (MIT) [http://www.opensource.org/licenses/mit-license.php](http://www.opensource.org/licenses/mit-license.php)
68 changes: 56 additions & 12 deletions src/Agents/Adapters/OpenAI.php
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,11 @@

class OpenAI extends Adapter
{
/**
* GPT-5 Nano - Small GPT-5 variant optimized for low latency and cost-sensitive workloads
*/
public const MODEL_GPT_5_NANO = 'gpt-5-nano';

/**
* GPT-4.5 Preview - OpenAI's most advanced model with enhanced reasoning, broader knowledge, and improved instruction following
*/
Expand Down Expand Up @@ -76,6 +81,11 @@ class OpenAI extends Adapter
*/
protected int $timeout;

/**
* @var bool
*/
protected bool $hasWarnedTemperatureOverride = false;

/**
* Create a new OpenAI adapter
*
Expand Down Expand Up @@ -125,7 +135,8 @@ public function isSchemaSupported(): bool
*/
public function send(array $messages, ?callable $listener = null): Message
{
if ($this->getAgent() === null) {
$agent = $this->getAgent();
if ($agent === null) {
throw new \Exception('Agent not set');
}

Expand All @@ -147,11 +158,11 @@ public function send(array $messages, ?callable $listener = null): Message
}

$instructions = [];
foreach ($this->getAgent()->getInstructions() as $name => $content) {
foreach ($agent->getInstructions() as $name => $content) {
$instructions[] = '# '.$name."\n\n".$content;
}

$systemMessage = $this->getAgent()->getDescription().
$systemMessage = $agent->getDescription().
(empty($instructions) ? '' : "\n\n".implode("\n\n", $instructions));

if (! empty($systemMessage)) {
Expand All @@ -164,10 +175,14 @@ public function send(array $messages, ?callable $listener = null): Message
$payload = [
'model' => $this->model,
'messages' => $formattedMessages,
'temperature' => $this->temperature,
];
$temperature = $this->temperature;
if ($this->usesDefaultTemperatureOnly()) {
$temperature = 1.0;
}
$payload['temperature'] = $temperature;
Comment thread
ChiragAgg5k marked this conversation as resolved.

$schema = $this->getAgent()->getSchema();
$schema = $agent->getSchema();
if ($schema !== null) {
Comment thread
coderabbitai[bot] marked this conversation as resolved.
$payload['response_format'] = [
'type' => 'json_schema',
Expand All @@ -187,13 +202,7 @@ public function send(array $messages, ?callable $listener = null): Message
$payload['stream'] = true;
}

// Use 'max_completion_tokens' for o-series models, else 'max_tokens'
$oSeriesModels = [
self::MODEL_O3,
self::MODEL_O3_MINI,
self::MODEL_O4_MINI,
];
if (in_array($this->model, $oSeriesModels)) {
if ($this->usesMaxCompletionTokens()) {
$payload['max_completion_tokens'] = $this->maxTokens;
} else {
$payload['max_tokens'] = $this->maxTokens;
Expand Down Expand Up @@ -306,6 +315,7 @@ protected function process(Chunk $chunk, ?callable $listener): string
public function getModels(): array
{
return [
self::MODEL_GPT_5_NANO,
self::MODEL_GPT_4_5_PREVIEW,
self::MODEL_GPT_4_1,
self::MODEL_GPT_4O,
Expand All @@ -315,6 +325,40 @@ public function getModels(): array
];
}

/**
* OpenAI expects max_completion_tokens for these models.
*/
protected function usesMaxCompletionTokens(): bool
{
return in_array($this->model, [
self::MODEL_GPT_5_NANO,
self::MODEL_O4_MINI,
self::MODEL_O3,
self::MODEL_O3_MINI,
], true);
}

/**
* Some models only accept the default temperature (1).
*/
protected function usesDefaultTemperatureOnly(): bool
{
$usesDefaultTemperatureOnly = in_array($this->model, [
self::MODEL_GPT_5_NANO,
], true);

if ($usesDefaultTemperatureOnly && $this->temperature !== 1.0 && ! $this->hasWarnedTemperatureOverride) {
$this->hasWarnedTemperatureOverride = true;
error_log(
"OpenAI adapter warning: model '{$this->model}' only supports temperature=1.0. "
."Overriding provided value {$this->temperature}. "
.'Set temperature to 1.0 to remove this warning.'
);
}

return $usesDefaultTemperatureOnly;
}
Comment thread
ChiragAgg5k marked this conversation as resolved.

/**
* Get current model
*
Expand Down
Loading