Skip to content

Commit f44bb2c

Browse files
committed
refactor: extract model configuration in separate file
The behavior is kept exactly the same but now allows for easy and simple change of model used by changing or replacing model.ts This approach is very flexible as Vercel AI SDK support many models out of the box, and the difference is a few lines of code.
1 parent aa83bf2 commit f44bb2c

2 files changed

Lines changed: 92 additions & 43 deletions

File tree

packages/cali/src/cli.ts

Lines changed: 31 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22

33
import 'dotenv/config'
44

5-
import { createOpenAI } from '@ai-sdk/openai'
65
import { confirm, outro, select, spinner, text } from '@clack/prompts'
76
import { CoreMessage, generateText } from 'ai'
87
import * as tools from 'cali-tools'
@@ -12,22 +11,20 @@ import { retro } from 'gradient-string'
1211
import { z } from 'zod'
1312

1413
import { reactNativePrompt } from './prompt.js'
15-
import { getApiKey } from './utils.js'
1614

1715
const MessageSchema = z.union([
18-
z.object({ type: z.literal('select'), content: z.string(), options: z.array(z.string()) }),
16+
z.object({
17+
type: z.literal('select'),
18+
content: z.string(),
19+
options: z.array(z.string()),
20+
}),
1921
z.object({ type: z.literal('question'), content: z.string() }),
2022
z.object({ type: z.literal('confirmation'), content: z.string() }),
21-
z.object({ type: z.literal('end') }),
23+
z.object({ type: z.literal('end'), content: z.string() }),
2224
])
2325

2426
console.clear()
2527

26-
process.on('uncaughtException', (error) => {
27-
console.error(chalk.red(error.message))
28-
console.log(chalk.gray(error.stack))
29-
})
30-
3128
console.log(
3229
retro(`
3330
██████╗ █████╗ ██╗ ██╗
@@ -49,37 +46,28 @@ console.log(
4946

5047
console.log()
5148

52-
const AI_MODEL = process.env.AI_MODEL || 'gpt-4o'
49+
import model from './model.js'
5350

54-
const openai = createOpenAI({
55-
apiKey: await getApiKey('OpenAI', 'OPENAI_API_K2EY'),
51+
const question = await text({
52+
message: 'What do you want to do today?',
53+
placeholder: 'e.g. "Build the app" or "See available simulators"',
5654
})
5755

58-
async function startSession(): Promise<CoreMessage[]> {
59-
const question = await text({
60-
message: 'What do you want to do today?',
61-
placeholder: 'e.g. "Build the app" or "See available simulators"',
62-
validate: (value) => (value.length > 0 ? undefined : 'Please provide a valid answer.'),
63-
})
64-
65-
if (typeof question === 'symbol') {
66-
outro(chalk.gray('Bye!'))
67-
process.exit(0)
68-
}
69-
70-
return [
71-
{
72-
role: 'system',
73-
content: 'What do you want to do today?',
74-
},
75-
{
76-
role: 'user',
77-
content: question,
78-
},
79-
]
56+
if (typeof question === 'symbol') {
57+
outro(chalk.gray('Bye!'))
58+
process.exit(0)
8059
}
8160

82-
let messages = await startSession()
61+
const messages: CoreMessage[] = [
62+
{
63+
role: 'system',
64+
content: 'What do you want to do today?',
65+
},
66+
{
67+
role: 'user',
68+
content: question,
69+
},
70+
]
8371

8472
const s = spinner()
8573

@@ -88,7 +76,7 @@ while (true) {
8876
s.start(chalk.gray('Thinking...'))
8977

9078
const response = await generateText({
91-
model: openai(AI_MODEL),
79+
model,
9280
system: reactNativePrompt,
9381
tools,
9482
maxSteps: 10,
@@ -158,13 +146,13 @@ while (true) {
158146
case 'select':
159147
return select({
160148
message: data.content,
161-
options: data.options.map((option) => ({ value: option, label: option })),
149+
options: data.options.map((option) => ({
150+
value: option,
151+
label: option,
152+
})),
162153
})
163154
case 'question':
164-
return text({
165-
message: data.content,
166-
validate: (value) => (value.length > 0 ? undefined : 'Please provide a valid answer.'),
167-
})
155+
return text({ message: data.content })
168156
case 'confirmation': {
169157
return confirm({ message: data.content }).then((answer) => {
170158
return answer ? 'yes' : 'no'
@@ -174,8 +162,8 @@ while (true) {
174162
})()
175163

176164
if (typeof answer !== 'string') {
177-
messages = await startSession()
178-
continue
165+
outro(chalk.gray('Bye!'))
166+
break
179167
}
180168

181169
messages.push({

packages/cali/src/model.ts

Lines changed: 61 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,61 @@
1+
import 'dotenv/config'
2+
3+
import { execSync } from 'node:child_process'
4+
5+
import { createOpenAI } from '@ai-sdk/openai'
6+
import { confirm, text } from '@clack/prompts'
7+
import chalk from 'chalk'
8+
import dedent from 'dedent'
9+
10+
const OPENAI_API_KEY =
11+
process.env.OPENAI_API_KEY ||
12+
(await (async () => {
13+
let apiKey: string | symbol
14+
do {
15+
apiKey = await text({
16+
message: dedent`
17+
${chalk.bold('Please provide your OpenAI API key.')}
18+
19+
To skip this message, set ${chalk.bold('OPENAI_API_KEY')} env variable, and run again.
20+
21+
You can do it in three ways:
22+
- by creating an ${chalk.bold('.env.local')} file (make sure to ${chalk.bold('.gitignore')} it)
23+
${chalk.gray(`\`\`\`
24+
OPENAI_API_KEY=<your-key>
25+
\`\`\`
26+
`)}
27+
- by passing it inline:
28+
${chalk.gray(`\`\`\`
29+
OPENAI_API_KEY=<your-key> npx cali
30+
\`\`\`
31+
`)}
32+
- by setting it as an env variable in your shell (e.g. in ~/.zshrc or ~/.bashrc):
33+
${chalk.gray(`\`\`\`
34+
export OPENAI_API_KEY=<your-key>
35+
\`\`\`
36+
`)},
37+
`,
38+
})
39+
} while (typeof apiKey !== 'string')
40+
41+
const save = await confirm({
42+
message: 'Do you want to save it for future runs in `.env.local`?',
43+
})
44+
45+
if (save) {
46+
execSync(`echo "OPENAI_API_KEY=${apiKey}" >> .env.local`)
47+
execSync(`echo ".env.local" >> .gitignore`)
48+
}
49+
50+
return apiKey
51+
})())
52+
53+
const AI_MODEL = process.env.AI_MODEL || 'gpt-4o'
54+
55+
const openai = createOpenAI({
56+
apiKey: OPENAI_API_KEY,
57+
})
58+
59+
const model = openai(AI_MODEL)
60+
61+
export default model

0 commit comments

Comments
 (0)