Skip to content

Commit d97fa83

Browse files
authored
feat: display model provider and default model throughout CLI (#324)
- Add DEFAULT_MODEL_IDS constant mapping providers to model IDs - Show model in TUI provider selection (e.g., 'Anthropic (claude-sonnet-4-5-20250514)') - Show model in Review Configuration confirm screen - Show model info in success screens after create/add agent - Show provider in dev TUI, dev --logs, dev --invoke - Show provider in invoke TUI and invoke CLI - Add modelProvider field to AgentEnvSpec schema for persistence - Update OpenAI templates from gpt-4o to gpt-4.1 - Update Gemini templates from gemini-2.0-flash to gemini-2.5-flash
1 parent 3b925ed commit d97fa83

File tree

22 files changed

+123
-37
lines changed

22 files changed

+123
-37
lines changed

src/assets/__tests__/__snapshots__/assets.snapshot.test.ts.snap

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1357,7 +1357,7 @@ def load_model() -> LLM:
13571357
# CrewAI requires OPENAI_API_KEY env var (ignores api_key parameter)
13581358
os.environ["OPENAI_API_KEY"] = api_key
13591359
return LLM(
1360-
model="openai/gpt-4o",
1360+
model="openai/gpt-4.1",
13611361
api_key=api_key
13621362
)
13631363
{{/if}}
@@ -1397,7 +1397,7 @@ def load_model() -> LLM:
13971397
# CrewAI requires GEMINI_API_KEY env var (ignores api_key parameter)
13981398
os.environ["GEMINI_API_KEY"] = api_key
13991399
return LLM(
1400-
model="gemini/gemini-2.0-flash",
1400+
model="gemini/gemini-2.5-flash",
14011401
api_key=api_key
14021402
)
14031403
{{/if}}
@@ -1954,7 +1954,7 @@ def _get_api_key() -> str:
19541954
def load_model() -> ChatOpenAI:
19551955
"""Get authenticated OpenAI model client."""
19561956
return ChatOpenAI(
1957-
model="gpt-4o",
1957+
model="gpt-4.1",
19581958
api_key=_get_api_key()
19591959
)
19601960
{{/if}}
@@ -1991,7 +1991,7 @@ def _get_api_key() -> str:
19911991
def load_model() -> ChatGoogleGenerativeAI:
19921992
"""Get authenticated Gemini model client."""
19931993
return ChatGoogleGenerativeAI(
1994-
model="gemini-2.0-flash",
1994+
model="gemini-2.5-flash",
19951995
api_key=_get_api_key()
19961996
)
19971997
{{/if}}
@@ -2155,10 +2155,11 @@ async def main(query):
21552155
try:
21562156
async with mcp_server as server:
21572157
active_servers = [server] if server else []
2158-
# Currently defaults to GPT-4.1
2159-
# https://openai.github.io/openai-agents-python/models/
21602158
agent = Agent(
2161-
name="{{ name }}", mcp_servers=active_servers, tools=[add_numbers]
2159+
name="{{ name }}",
2160+
model="gpt-4.1",
2161+
mcp_servers=active_servers,
2162+
tools=[add_numbers]
21622163
)
21632164
result = await Runner.run(agent, query)
21642165
return result
@@ -2536,7 +2537,7 @@ def load_model() -> OpenAIModel:
25362537
"""Get authenticated OpenAI model client."""
25372538
return OpenAIModel(
25382539
client_args={"api_key": _get_api_key()},
2539-
model_id="gpt-4o",
2540+
model_id="gpt-4.1",
25402541
)
25412542
{{/if}}
25422543
{{#if (eq modelProvider "Gemini")}}
@@ -2574,7 +2575,7 @@ def load_model() -> GeminiModel:
25742575
"""Get authenticated Gemini model client."""
25752576
return GeminiModel(
25762577
client_args={"api_key": _get_api_key()},
2577-
model_id="gemini-2.0-flash",
2578+
model_id="gemini-2.5-flash",
25782579
)
25792580
{{/if}}
25802581
"

src/assets/python/crewai/base/model/load.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ def load_model() -> LLM:
8787
# CrewAI requires OPENAI_API_KEY env var (ignores api_key parameter)
8888
os.environ["OPENAI_API_KEY"] = api_key
8989
return LLM(
90-
model="openai/gpt-4o",
90+
model="openai/gpt-4.1",
9191
api_key=api_key
9292
)
9393
{{/if}}
@@ -127,7 +127,7 @@ def load_model() -> LLM:
127127
# CrewAI requires GEMINI_API_KEY env var (ignores api_key parameter)
128128
os.environ["GEMINI_API_KEY"] = api_key
129129
return LLM(
130-
model="gemini/gemini-2.0-flash",
130+
model="gemini/gemini-2.5-flash",
131131
api_key=api_key
132132
)
133133
{{/if}}

src/assets/python/langchain_langgraph/base/model/load.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ def _get_api_key() -> str:
8080
def load_model() -> ChatOpenAI:
8181
"""Get authenticated OpenAI model client."""
8282
return ChatOpenAI(
83-
model="gpt-4o",
83+
model="gpt-4.1",
8484
api_key=_get_api_key()
8585
)
8686
{{/if}}
@@ -117,7 +117,7 @@ def _get_api_key() -> str:
117117
def load_model() -> ChatGoogleGenerativeAI:
118118
"""Get authenticated Gemini model client."""
119119
return ChatGoogleGenerativeAI(
120-
model="gemini-2.0-flash",
120+
model="gemini-2.5-flash",
121121
api_key=_get_api_key()
122122
)
123123
{{/if}}

src/assets/python/openaiagents/base/main.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -26,10 +26,11 @@ async def main(query):
2626
try:
2727
async with mcp_server as server:
2828
active_servers = [server] if server else []
29-
# Currently defaults to GPT-4.1
30-
# https://openai.github.io/openai-agents-python/models/
3129
agent = Agent(
32-
name="{{ name }}", mcp_servers=active_servers, tools=[add_numbers]
30+
name="{{ name }}",
31+
model="gpt-4.1",
32+
mcp_servers=active_servers,
33+
tools=[add_numbers]
3334
)
3435
result = await Runner.run(agent, query)
3536
return result

src/assets/python/strands/base/model/load.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ def load_model() -> OpenAIModel:
8080
"""Get authenticated OpenAI model client."""
8181
return OpenAIModel(
8282
client_args={"api_key": _get_api_key()},
83-
model_id="gpt-4o",
83+
model_id="gpt-4.1",
8484
)
8585
{{/if}}
8686
{{#if (eq modelProvider "Gemini")}}
@@ -118,6 +118,6 @@ def load_model() -> GeminiModel:
118118
"""Get authenticated Gemini model client."""
119119
return GeminiModel(
120120
client_args={"api_key": _get_api_key()},
121-
model_id="gemini-2.0-flash",
121+
model_id="gemini-2.5-flash",
122122
)
123123
{{/if}}

src/cli/commands/dev/command.tsx

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,15 +58,22 @@ export const registerDev = (program: Command) => {
5858

5959
// Determine which agent/port to invoke
6060
let invokePort = port;
61+
let targetAgent = invokeProject?.agents[0];
6162
if (opts.agent && invokeProject) {
6263
invokePort = getAgentPort(invokeProject, opts.agent, port);
64+
targetAgent = invokeProject.agents.find(a => a.name === opts.agent);
6365
} else if (invokeProject && invokeProject.agents.length > 1 && !opts.agent) {
6466
const names = invokeProject.agents.map(a => a.name).join(', ');
6567
console.error(`Error: Multiple agents found. Use --agent to specify which one.`);
6668
console.error(`Available: ${names}`);
6769
process.exit(1);
6870
}
6971

72+
// Show model info if available
73+
if (targetAgent?.modelProvider) {
74+
console.log(`Provider: ${targetAgent.modelProvider}`);
75+
}
76+
7077
await invokeDevServer(invokePort, opts.invoke, opts.stream ?? false);
7178
return;
7279
}
@@ -129,8 +136,13 @@ export const registerDev = (program: Command) => {
129136
console.log(`Port ${basePort} in use, using ${actualPort}`);
130137
}
131138

139+
// Get provider info from agent config
140+
const targetAgent = project.agents.find(a => a.name === config.agentName);
141+
const providerInfo = targetAgent?.modelProvider ?? '(see agent code)';
142+
132143
console.log(`Starting dev server...`);
133144
console.log(`Agent: ${config.agentName}`);
145+
console.log(`Provider: ${providerInfo}`);
134146
console.log(`Server: http://localhost:${actualPort}/invocations`);
135147
console.log(`Log: ${logger.getRelativeLogPath()}`);
136148
console.log(`Press Ctrl+C to stop\n`);

src/cli/commands/invoke/action.ts

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -78,6 +78,9 @@ export async function handleInvoke(context: InvokeContext, options: InvokeOption
7878
return { success: false, error: 'No prompt provided. Usage: agentcore invoke "your prompt"' };
7979
}
8080

81+
// Get provider info if available
82+
const providerInfo = agentSpec.modelProvider;
83+
8184
// Create logger for this invocation
8285
const logger = new InvokeLogger({
8386
agentName: agentSpec.name,
@@ -113,6 +116,7 @@ export async function handleInvoke(context: InvokeContext, options: InvokeOption
113116
targetName: selectedTargetName,
114117
response: fullResponse,
115118
logFilePath: logger.logFilePath,
119+
providerInfo,
116120
};
117121
} catch (err) {
118122
logger.logError(err, 'invoke streaming failed');
@@ -136,5 +140,6 @@ export async function handleInvoke(context: InvokeContext, options: InvokeOption
136140
targetName: selectedTargetName,
137141
response: response.content,
138142
logFilePath: logger.logFilePath,
143+
providerInfo,
139144
};
140145
}

src/cli/commands/invoke/command.tsx

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,10 @@ async function handleInvokeCLI(options: InvokeOptions): Promise<void> {
6060
console.error(`\nLog: ${result.logFilePath}`);
6161
}
6262
} else {
63-
// Non-streaming, non-json: print response or error
63+
// Non-streaming, non-json: print provider info and response or error
64+
if (result.providerInfo) {
65+
console.error(`Provider: ${result.providerInfo}`);
66+
}
6467
if (result.success && result.response) {
6568
console.log(result.response);
6669
} else if (!result.success && result.error) {

src/cli/commands/invoke/types.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,4 +14,6 @@ export interface InvokeResult {
1414
response?: string;
1515
error?: string;
1616
logFilePath?: string;
17+
/** Model provider (e.g., "Anthropic", "Bedrock") */
18+
providerInfo?: string;
1719
}

src/cli/operations/agent/generate/schema-mapper.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -112,6 +112,7 @@ export function mapGenerateConfigToAgent(config: GenerateConfig): AgentEnvSpec {
112112
codeLocation: codeLocation as DirectoryPath,
113113
runtimeVersion: DEFAULT_PYTHON_VERSION,
114114
networkMode: DEFAULT_NETWORK_MODE,
115+
modelProvider: config.modelProvider,
115116
};
116117
}
117118

0 commit comments

Comments
 (0)