|
11 | 11 | class OpenAI extends Adapter |
12 | 12 | { |
13 | 13 | /** |
14 | | - * GPT-4 Turbo - Latest and most capable model |
| 14 | + * GPT-4.5 Preview - OpenAI's most advanced model with enhanced reasoning, broader knowledge, and improved instruction following |
15 | 15 | */ |
16 | | - public const MODEL_GPT_4_TURBO = 'gpt-4-turbo-preview'; |
| 16 | + public const MODEL_GPT_4_5_PREVIEW = 'gpt-4.5-preview'; |
17 | 17 |
|
18 | 18 | /** |
19 | | - * GPT-4 - Previous generation model |
| 19 | + * GPT-4.1 - Advanced large language model with strong reasoning capabilities and improved context handling |
20 | 20 | */ |
21 | | - public const MODEL_GPT_4 = 'gpt-4'; |
| 21 | + public const MODEL_GPT_4_1 = 'gpt-4.1'; |
22 | 22 |
|
23 | 23 | /** |
24 | | - * GPT-3.5 Turbo - Fast and efficient model |
| 24 | + * GPT-4o - Multimodal model optimized for both text and image processing with faster response times |
25 | 25 | */ |
26 | | - public const MODEL_GPT_3_5_TURBO = 'gpt-3.5-turbo'; |
| 26 | + public const MODEL_GPT_4O = 'gpt-4o'; |
| 27 | + |
| 28 | + /** |
| 29 | + * o4-mini - Compact version of GPT-4o offering good performance with higher throughput and lower latency |
| 30 | + */ |
| 31 | + public const MODEL_O4_MINI = 'o4-mini'; |
| 32 | + |
| 33 | + /** |
| 34 | + * o3 - Balanced model offering good performance for general language tasks with efficient resource usage |
| 35 | + */ |
| 36 | + public const MODEL_O3 = 'o3'; |
| 37 | + |
| 38 | + /** |
| 39 | + * o3-mini - Streamlined model optimized for speed and efficiency while maintaining good capabilities for routine tasks |
| 40 | + */ |
| 41 | + public const MODEL_O3_MINI = 'o3-mini'; |
27 | 42 |
|
28 | 43 | /** |
29 | 44 | * Default OpenAI API endpoint |
@@ -74,7 +89,7 @@ class OpenAI extends Adapter |
74 | 89 | */ |
75 | 90 | public function __construct( |
76 | 91 | string $apiKey, |
77 | | - string $model = self::MODEL_GPT_3_5_TURBO, |
| 92 | + string $model = self::MODEL_O3_MINI, |
78 | 93 | int $maxTokens = 1024, |
79 | 94 | float $temperature = 1.0, |
80 | 95 | ?string $endpoint = null, |
@@ -138,11 +153,22 @@ public function send(array $messages, ?callable $listener = null): Message |
138 | 153 | $payload = [ |
139 | 154 | 'model' => $this->model, |
140 | 155 | 'messages' => $formattedMessages, |
141 | | - 'max_tokens' => $this->maxTokens, |
142 | 156 | 'temperature' => $this->temperature, |
143 | 157 | 'stream' => true, |
144 | 158 | ]; |
145 | 159 |
|
| 160 | + // Use 'max_completion_tokens' for o-series models, else 'max_tokens' |
| 161 | + $oSeriesModels = [ |
| 162 | + self::MODEL_O3, |
| 163 | + self::MODEL_O3_MINI, |
| 164 | + self::MODEL_O4_MINI, |
| 165 | + ]; |
| 166 | + if (in_array($this->model, $oSeriesModels)) { |
| 167 | + $payload['max_completion_tokens'] = $this->maxTokens; |
| 168 | + } else { |
| 169 | + $payload['max_tokens'] = $this->maxTokens; |
| 170 | + } |
| 171 | + |
146 | 172 | $content = ''; |
147 | 173 | $response = $client->fetch( |
148 | 174 | $this->endpoint, |
@@ -230,9 +256,12 @@ protected function process(Chunk $chunk, ?callable $listener): string |
230 | 256 | public function getModels(): array |
231 | 257 | { |
232 | 258 | return [ |
233 | | - self::MODEL_GPT_4_TURBO, |
234 | | - self::MODEL_GPT_4, |
235 | | - self::MODEL_GPT_3_5_TURBO, |
| 259 | + self::MODEL_GPT_4_5_PREVIEW, |
| 260 | + self::MODEL_GPT_4_1, |
| 261 | + self::MODEL_GPT_4O, |
| 262 | + self::MODEL_O4_MINI, |
| 263 | + self::MODEL_O3, |
| 264 | + self::MODEL_O3_MINI, |
236 | 265 | ]; |
237 | 266 | } |
238 | 267 |
|
|
0 commit comments