diff --git a/README.md b/README.md index 384561c..0af81be 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,7 @@ Consilium queries multiple LLMs in parallel, has each model review the others' r │ STAGE 1: QUERY │ │ │ │ ┌─────────┐ ┌──────────┐ ┌─────────────────┐ │ -│ │ GPT-5.2 │ │ Claude │ │ Gemini 3.1 Pro │ │ +│ │ GPT-5.4 │ │ Claude │ │ Gemini 3.1 Pro │ │ │ │ │ │ Sonnet │ │ │ │ │ └────┬────┘ └────┬─────┘ └────────┬────────┘ │ │ │ │ │ │ @@ -90,7 +90,7 @@ The main orchestrator class. from consilium import Council council = Council( - models=["gpt-5.2", "claude-sonnet-4-6", "gemini-3.1-pro"], + models=["gpt-5.4", "claude-sonnet-4-6", "gemini-3.1-pro"], chairman="claude-sonnet-4-6", max_workers=8, ) @@ -100,7 +100,7 @@ council = Council( | Parameter | Type | Default | Description | |-----------|------|---------|-------------| -| `models` | `list[str]` | `["gpt-5.2", "claude-sonnet-4-6", "gemini-3.1-pro"]` | Models to query in Stage 1 | +| `models` | `list[str]` | `["gpt-5.4", "claude-sonnet-4-6", "gemini-3.1-pro"]` | Models to query in Stage 1 | | `chairman` | `str` | `"claude-sonnet-4-6"` | Model for Stage 3 synthesis | | `max_workers` | `int` | `8` | Max parallel threads | @@ -138,7 +138,7 @@ from consilium import council_query result = council_query( "Analyze this screenshot and list the UI elements", images=[screenshot_bytes], - models=["gpt-5.2", "claude-sonnet-4-6"], + models=["gpt-5.4", "claude-sonnet-4-6"], budget=0.25, skip_review=True, # fast mode: Stage 1 only ) @@ -154,7 +154,7 @@ print(result["cost"]["total_usd"]) "final_answer": "...", "individual_responses": [ { - "model": "openai/gpt-5.2", + "model": "openai/gpt-5.4", "text": "...", "latency_seconds": 2.1, "input_tokens": 1500, @@ -164,7 +164,7 @@ print(result["cost"]["total_usd"]) ], "reviews": [...], "cost": { - "breakdown": {"gpt-5.2": 0.007, "claude-sonnet-4-6": 0.012}, + "breakdown": {"gpt-5.4": 0.007, "claude-sonnet-4-6": 0.012}, "total_usd": 0.019, "total_input_tokens": 3000, "total_output_tokens": 800 @@ -179,7 +179,7 @@ print(result["cost"]["total_usd"]) consilium "prompt" [OPTIONS] Options: - --models TEXT Comma-separated model IDs (default: gpt-5.2,claude-sonnet-4-6,gemini-3.1-pro) + --models TEXT Comma-separated model IDs (default: gpt-5.4,claude-sonnet-4-6,gemini-3.1-pro) --chairman TEXT Chairman model for synthesis (default: claude-sonnet-4-6) --image PATH Image file to include (repeatable) --budget FLOAT Max spend in USD @@ -207,7 +207,7 @@ consilium "Write a haiku about AI" --budget 0.10 consilium "List 3 colors" --json | jq '.final_answer' # Custom models -consilium "Hello" --models gpt-5.2,gemini-3.1-pro --chairman gpt-5.2 +consilium "Hello" --models gpt-5.4,gemini-3.1-pro --chairman gpt-5.4 ``` ## Model Support @@ -216,14 +216,14 @@ Consilium supports any model from these providers: | Provider | Models | Env Var | |----------|--------|---------| -| OpenAI | `gpt-5.2`, `gpt-5.2-pro`, `gpt-5`, `gpt-5-mini`, `o3`, `o4-mini` | `OPENAI_API_KEY` | +| OpenAI | `gpt-5.4`, `gpt-5.4-pro`, `gpt-5`, `gpt-5-mini`, `o3`, `o4-mini` | `OPENAI_API_KEY` | | Anthropic | `claude-opus-4-6`, `claude-sonnet-4-6`, `claude-haiku-4-5`, `claude-sonnet-4-5` | `ANTHROPIC_API_KEY` | | Google | `gemini-3.1-pro`, `gemini-3-flash`, `gemini-2.5-pro`, `gemini-2.5-flash` | `GOOGLE_API_KEY` | Use any model with the `provider/model` format: ```python -council = Council(models=["openai/gpt-5.2", "anthropic/claude-sonnet-4-6"]) +council = Council(models=["openai/gpt-5.4", "anthropic/claude-sonnet-4-6"]) ``` ## Budget Control diff --git a/src/consilium/__main__.py b/src/consilium/__main__.py index ff48bfb..2e7e85f 100644 --- a/src/consilium/__main__.py +++ b/src/consilium/__main__.py @@ -4,7 +4,7 @@ consilium "What color is the sky?" consilium "Describe this screenshot" --image screenshot.png - consilium "Hello" --models gpt-5.2,claude-sonnet-4-6 --no-review + consilium "Hello" --models gpt-5.4,claude-sonnet-4-6 --no-review consilium "Plan a trip" --budget 0.50 consilium "Compare approaches" --json """ diff --git a/src/consilium/cost.py b/src/consilium/cost.py index 3adcc5a..a582a47 100644 --- a/src/consilium/cost.py +++ b/src/consilium/cost.py @@ -28,6 +28,7 @@ "gemini-2.5-flash": (0.15, 0.60), "gemini-2.5-pro": (1.25, 10.0), # OpenAI — GPT-5.x series (current) + "gpt-5.4": (2.0, 8.0), "gpt-5.2": (2.0, 8.0), "gpt-5.2-pro": (10.0, 40.0), "gpt-5": (2.0, 8.0), diff --git a/src/consilium/model_registry.py b/src/consilium/model_registry.py index 0d0d3fb..a50faae 100644 --- a/src/consilium/model_registry.py +++ b/src/consilium/model_registry.py @@ -49,7 +49,7 @@ def __post_init__(self) -> None: DEFAULTS: Dict[str, Dict[str, str]] = { "openai": { - "flagship": "gpt-5.2", + "flagship": "gpt-5.4", "fast": "gpt-5-mini", "reasoning": "o3", "reasoning_fast": "o4-mini",