Skip to content

Commit ca3374b

Browse files
committed
fix
1 parent 8c28629 commit ca3374b

File tree

6 files changed

+68
-52
lines changed

6 files changed

+68
-52
lines changed

apps/sim/app/(landing)/models/[provider]/[model]/page.tsx

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -231,13 +231,11 @@ export default async function ModelPage({
231231
? `${formatPrice(model.pricing.cachedInput)}/1M`
232232
: 'N/A'
233233
}
234-
compact
235234
/>
236235
<StatCard label='Output price' value={`${formatPrice(model.pricing.output)}/1M`} />
237236
<StatCard
238237
label='Context window'
239238
value={model.contextWindow ? formatTokenCount(model.contextWindow) : 'Unknown'}
240-
compact
241239
/>
242240
</section>
243241

apps/sim/app/llms.txt/route.ts

Lines changed: 55 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -1,38 +1,63 @@
11
import { getBaseUrl } from '@/lib/core/utils/urls'
2-
import { ALL_CATALOG_MODELS, MODEL_PROVIDERS_WITH_CATALOGS } from '@/app/(landing)/models/utils'
32

43
export function GET() {
54
const baseUrl = getBaseUrl()
65

7-
const content = [
8-
'# Sim',
9-
'',
10-
'> Sim is the open-source platform to build AI agents and run your agentic workforce.',
11-
'',
12-
'## Preferred URLs',
13-
`- Main site: ${baseUrl}`,
14-
`- Integrations directory: ${baseUrl}/integrations`,
15-
`- Models directory: ${baseUrl}/models`,
16-
`- Blog: ${baseUrl}/blog`,
17-
`- Changelog: ${baseUrl}/changelog`,
18-
'- Docs: https://docs.sim.ai',
19-
'',
20-
'## Public data surfaces',
21-
`- Integration pages: ${baseUrl}/integrations`,
22-
`- Provider pages: ${baseUrl}/models`,
23-
`- Model pages: ${baseUrl}/models`,
24-
`- Providers tracked: ${MODEL_PROVIDERS_WITH_CATALOGS.length}`,
25-
`- Models tracked: ${ALL_CATALOG_MODELS.length}`,
26-
'',
27-
'## Crawl helpers',
28-
`- Sitemap: ${baseUrl}/sitemap.xml`,
29-
`- Robots: ${baseUrl}/robots.txt`,
30-
'',
31-
'## Notes',
32-
'- Prefer canonical URLs on sim.ai when citing product, model, integration, and changelog content.',
33-
'- Use the models directory for pricing, context window, and capability facts.',
34-
'- Use the integrations directory for tool coverage and workflow automation capabilities.',
35-
].join('\n')
6+
const content = `# Sim
7+
8+
> Sim is the open-source platform to build AI agents and run your agentic workforce. Connect integrations and LLMs to deploy and orchestrate agentic workflows.
9+
10+
Sim lets teams create agents, workflows, knowledge bases, tables, and docs. It supports both product discovery pages and deeper technical documentation.
11+
12+
## Preferred URLs
13+
14+
- [Homepage](${baseUrl}): Product overview and primary entry point
15+
- [Integrations directory](${baseUrl}/integrations): Public catalog of integrations and automation capabilities
16+
- [Models directory](${baseUrl}/models): Public catalog of AI models, pricing, context windows, and capabilities
17+
- [Blog](${baseUrl}/blog): Announcements, guides, and product context
18+
- [Changelog](${baseUrl}/changelog): Product updates and release notes
19+
20+
## Documentation
21+
22+
- [Documentation](https://docs.sim.ai): Product guides and technical reference
23+
- [Quickstart](https://docs.sim.ai/quickstart): Fastest path to getting started
24+
- [API Reference](https://docs.sim.ai/api): API documentation
25+
26+
## Key Concepts
27+
28+
- **Workspace**: Container for workflows, data sources, and executions
29+
- **Workflow**: Directed graph of blocks defining an agentic process
30+
- **Block**: Individual step such as an LLM call, tool call, HTTP request, or code execution
31+
- **Trigger**: Event or schedule that initiates workflow execution
32+
- **Execution**: A single run of a workflow with logs and outputs
33+
- **Knowledge Base**: Document store used for retrieval-augmented generation
34+
35+
## Capabilities
36+
37+
- AI agent creation and deployment
38+
- Agentic workflow orchestration
39+
- Integrations across business tools, databases, and communication platforms
40+
- Multi-model LLM orchestration
41+
- Knowledge bases and retrieval-augmented generation
42+
- Table creation and management
43+
- Document creation and processing
44+
- Scheduled and webhook-triggered executions
45+
46+
## Use Cases
47+
48+
- AI agent deployment and orchestration
49+
- Knowledge bases and RAG pipelines
50+
- Customer support automation
51+
- Internal operations workflows across sales, marketing, legal, and finance
52+
53+
## Additional Links
54+
55+
- [GitHub Repository](https://github.com/simstudioai/sim): Open-source codebase
56+
- [Docs](https://docs.sim.ai): Canonical documentation source
57+
- [Terms of Service](${baseUrl}/terms): Legal terms
58+
- [Privacy Policy](${baseUrl}/privacy): Data handling practices
59+
- [Sitemap](${baseUrl}/sitemap.xml): Public URL inventory
60+
`
3661

3762
return new Response(content, {
3863
headers: {

apps/sim/providers/anthropic/core.ts

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -293,9 +293,7 @@ export async function executeAnthropicProviderRequest(
293293
messages,
294294
system: systemPrompt,
295295
max_tokens:
296-
Number.parseInt(String(request.maxTokens)) ||
297-
getMaxOutputTokensForModel(request.model) ||
298-
4096,
296+
Number.parseInt(String(request.maxTokens)) || getMaxOutputTokensForModel(request.model),
299297
temperature: Number.parseFloat(String(request.temperature ?? 0.7)),
300298
}
301299

@@ -337,7 +335,7 @@ export async function executeAnthropicProviderRequest(
337335
const budgetTokens = thinkingConfig.thinking.budget_tokens
338336
const minMaxTokens = budgetTokens + 4096
339337
if (payload.max_tokens < minMaxTokens) {
340-
const modelMax = getMaxOutputTokensForModel(request.model) ?? payload.max_tokens
338+
const modelMax = getMaxOutputTokensForModel(request.model)
341339
payload.max_tokens = Math.min(minMaxTokens, modelMax)
342340
logger.info(
343341
`Adjusted max_tokens to ${payload.max_tokens} to satisfy budget_tokens (${budgetTokens}) constraint`

apps/sim/providers/models.ts

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2893,21 +2893,22 @@ export function getModelsWithoutMemory(): string[] {
28932893
*
28942894
* @param modelId - The model ID
28952895
*/
2896-
export function getMaxOutputTokensForModel(modelId: string): number | null {
2896+
export function getMaxOutputTokensForModel(modelId: string): number {
28972897
const normalizedModelId = modelId.toLowerCase()
2898+
const STANDARD_MAX_OUTPUT_TOKENS = 4096
28982899
const allModels = Object.values(PROVIDER_DEFINITIONS).flatMap((provider) => provider.models)
28992900

29002901
const exactMatch = allModels.find((model) => model.id.toLowerCase() === normalizedModelId)
29012902
if (exactMatch) {
2902-
return exactMatch.capabilities.maxOutputTokens ?? null
2903+
return exactMatch.capabilities.maxOutputTokens || STANDARD_MAX_OUTPUT_TOKENS
29032904
}
29042905

29052906
for (const model of allModels) {
29062907
const baseModelId = model.id.toLowerCase()
29072908
if (normalizedModelId.startsWith(`${baseModelId}-`)) {
2908-
return model.capabilities.maxOutputTokens ?? null
2909+
return model.capabilities.maxOutputTokens || STANDARD_MAX_OUTPUT_TOKENS
29092910
}
29102911
}
29112912

2912-
return null
2913+
return STANDARD_MAX_OUTPUT_TOKENS
29132914
}

apps/sim/providers/utils.test.ts

Lines changed: 5 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -692,15 +692,9 @@ describe('Max Output Tokens', () => {
692692
expect(getMaxOutputTokensForModel('azure/gpt-5.2')).toBe(128000)
693693
})
694694

695-
it.concurrent(
696-
'should return null when DeepSeek output limit is not exact enough to publish',
697-
() => {
698-
expect(getMaxOutputTokensForModel('deepseek-reasoner')).toBeNull()
699-
}
700-
)
701-
702-
it.concurrent('should return null when xAI does not publish a max output limit', () => {
703-
expect(getMaxOutputTokensForModel('grok-4-latest')).toBeNull()
695+
it.concurrent('should return standard default for models without maxOutputTokens', () => {
696+
expect(getMaxOutputTokensForModel('deepseek-reasoner')).toBe(4096)
697+
expect(getMaxOutputTokensForModel('grok-4-latest')).toBe(4096)
704698
})
705699

706700
it.concurrent('should return published max for Bedrock Claude Opus 4.1', () => {
@@ -721,8 +715,8 @@ describe('Max Output Tokens', () => {
721715
expect(getMaxOutputTokensForModel('claude-opus-4-1')).toBe(32000)
722716
})
723717

724-
it.concurrent('should return null for unknown models', () => {
725-
expect(getMaxOutputTokensForModel('unknown-model')).toBeNull()
718+
it.concurrent('should return standard default for unknown models', () => {
719+
expect(getMaxOutputTokensForModel('unknown-model')).toBe(4096)
726720
})
727721
})
728722
})

apps/sim/providers/utils.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1111,7 +1111,7 @@ export function getThinkingLevelsForModel(model: string): string[] | null {
11111111
*
11121112
* @param model - The model ID
11131113
*/
1114-
export function getMaxOutputTokensForModel(model: string): number | null {
1114+
export function getMaxOutputTokensForModel(model: string): number {
11151115
return getMaxOutputTokensForModelFromDefinitions(model)
11161116
}
11171117

0 commit comments

Comments
 (0)