diff --git a/examples/ts-code-mode-web/package.json b/examples/ts-code-mode-web/package.json index a302b0fbc..4dd8d2bb9 100644 --- a/examples/ts-code-mode-web/package.json +++ b/examples/ts-code-mode-web/package.json @@ -23,6 +23,7 @@ "@tanstack/ai-ollama": "workspace:*", "@tanstack/ai-openai": "workspace:*", "@tanstack/ai-react": "workspace:*", + "@tanstack/ai-zai": "workspace:*", "@tanstack/nitro-v2-vite-plugin": "^1.154.7", "@tanstack/react-router": "^1.158.4", "@tanstack/react-start": "^1.159.0", diff --git a/examples/ts-code-mode-web/src/routes/_banking-demo/api.banking-demo.ts b/examples/ts-code-mode-web/src/routes/_banking-demo/api.banking-demo.ts index d4acec241..a476fa240 100644 --- a/examples/ts-code-mode-web/src/routes/_banking-demo/api.banking-demo.ts +++ b/examples/ts-code-mode-web/src/routes/_banking-demo/api.banking-demo.ts @@ -4,6 +4,7 @@ import { createCodeMode } from '@tanstack/ai-code-mode' import { anthropicText } from '@tanstack/ai-anthropic' import { openaiText } from '@tanstack/ai-openai' import { geminiText } from '@tanstack/ai-gemini' +import { zaiText } from '@tanstack/ai-zai' import type { AnyTextAdapter } from '@tanstack/ai' import { allTools } from '@/lib/tools' @@ -11,7 +12,7 @@ import { CODE_MODE_SYSTEM_PROMPT } from '@/lib/prompts' import { reportTools } from '@/lib/reports/tools' import { createReportBindings } from '@/lib/reports/create-report-bindings' -type Provider = 'anthropic' | 'openai' | 'gemini' +type Provider = 'anthropic' | 'openai' | 'gemini' | 'zai' function getAdapter(provider: Provider, model?: string): AnyTextAdapter { switch (provider) { @@ -19,6 +20,8 @@ function getAdapter(provider: Provider, model?: string): AnyTextAdapter { return openaiText((model || 'gpt-4o') as 'gpt-4o') case 'gemini': return geminiText((model || 'gemini-2.5-flash') as 'gemini-2.5-flash') + case 'zai': + return zaiText((model || 'glm-4.7') as 'glm-4.7') case 'anthropic': default: return anthropicText((model || 'claude-haiku-4-5') as 'claude-haiku-4-5') diff --git a/examples/ts-code-mode-web/src/routes/_banking-demo/banking-demo.tsx b/examples/ts-code-mode-web/src/routes/_banking-demo/banking-demo.tsx index 4dbdff18b..dd8cc9f6f 100644 --- a/examples/ts-code-mode-web/src/routes/_banking-demo/banking-demo.tsx +++ b/examples/ts-code-mode-web/src/routes/_banking-demo/banking-demo.tsx @@ -31,7 +31,7 @@ export const Route = createFileRoute('/_banking-demo/banking-demo' as any)({ component: BankingDemoPage, }) -type Provider = 'anthropic' | 'openai' | 'gemini' +type Provider = 'anthropic' | 'openai' | 'gemini' | 'zai' interface ModelOption { provider: Provider @@ -254,6 +254,8 @@ const MODEL_OPTIONS: Array = [ }, { provider: 'openai', model: 'gpt-4o', label: 'GPT-4o' }, { provider: 'gemini', model: 'gemini-2.5-flash', label: 'Gemini 2.5 Flash' }, + { provider: 'zai', model: 'glm-4.7', label: 'Z.AI GLM-4.7' }, + { provider: 'zai', model: 'glm-5-turbo', label: 'Z.AI GLM-5 Turbo' }, ] interface ToastItem { diff --git a/examples/ts-code-mode-web/src/routes/_database-demo/api.database-demo.ts b/examples/ts-code-mode-web/src/routes/_database-demo/api.database-demo.ts index acf79386c..b9aa10cc8 100644 --- a/examples/ts-code-mode-web/src/routes/_database-demo/api.database-demo.ts +++ b/examples/ts-code-mode-web/src/routes/_database-demo/api.database-demo.ts @@ -13,12 +13,13 @@ import { createFileSkillStorage } from '@tanstack/ai-code-mode-skills/storage' import { anthropicText } from '@tanstack/ai-anthropic' import { openaiText } from '@tanstack/ai-openai' import { geminiText } from '@tanstack/ai-gemini' +import { zaiText } from '@tanstack/ai-zai' import type { AnyTextAdapter, ServerTool, StreamChunk } from '@tanstack/ai' import type { IsolateDriver } from '@tanstack/ai-code-mode' import { databaseTools, getSchemaInfoTool } from '@/lib/tools/database-tools' -type Provider = 'anthropic' | 'openai' | 'gemini' +type Provider = 'anthropic' | 'openai' | 'gemini' | 'zai' function getAdapter(provider: Provider, model?: string): AnyTextAdapter { switch (provider) { @@ -26,6 +27,8 @@ function getAdapter(provider: Provider, model?: string): AnyTextAdapter { return openaiText((model || 'gpt-4o') as 'gpt-4o') case 'gemini': return geminiText((model || 'gemini-2.5-flash') as 'gemini-2.5-flash') + case 'zai': + return zaiText((model || 'glm-4.7') as 'glm-4.7') case 'anthropic': default: return anthropicText((model || 'claude-haiku-4-5') as 'claude-haiku-4-5') diff --git a/examples/ts-code-mode-web/src/routes/_database-demo/database-demo.tsx b/examples/ts-code-mode-web/src/routes/_database-demo/database-demo.tsx index ca2a204af..aa03cdfb4 100644 --- a/examples/ts-code-mode-web/src/routes/_database-demo/database-demo.tsx +++ b/examples/ts-code-mode-web/src/routes/_database-demo/database-demo.tsx @@ -28,7 +28,7 @@ export const Route = createFileRoute('/_database-demo/database-demo' as any)({ component: DatabaseDemoPage, }) -type Provider = 'anthropic' | 'openai' | 'gemini' +type Provider = 'anthropic' | 'openai' | 'gemini' | 'zai' interface ModelOption { provider: Provider @@ -48,11 +48,9 @@ const MODEL_OPTIONS: Array = [ label: 'Claude Haiku 4', }, { provider: 'openai', model: 'gpt-4o', label: 'GPT-4o' }, - { - provider: 'gemini', - model: 'gemini-2.5-flash', - label: 'Gemini 2.5 Flash', - }, + { provider: 'gemini', model: 'gemini-2.5-flash', label: 'Gemini 2.5 Flash' }, + { provider: 'zai', model: 'glm-4.7', label: 'Z.AI GLM-4.7' }, + { provider: 'zai', model: 'glm-5-turbo', label: 'Z.AI GLM-5 Turbo' }, ] interface SkillWithCode { diff --git a/examples/ts-code-mode-web/src/routes/_home/api.product-codemode.ts b/examples/ts-code-mode-web/src/routes/_home/api.product-codemode.ts index a9dd4545b..7fef15b79 100644 --- a/examples/ts-code-mode-web/src/routes/_home/api.product-codemode.ts +++ b/examples/ts-code-mode-web/src/routes/_home/api.product-codemode.ts @@ -6,6 +6,7 @@ import { createCodeMode } from '@tanstack/ai-code-mode' import { anthropicText } from '@tanstack/ai-anthropic' import { openaiText } from '@tanstack/ai-openai' import { geminiText } from '@tanstack/ai-gemini' +import { zaiText } from '@tanstack/ai-zai' import { createAlwaysTrustedStrategy, createSkillManagementTools, @@ -17,7 +18,7 @@ import type { AnyTextAdapter, ServerTool, StreamChunk } from '@tanstack/ai' import type { IsolateDriver } from '@tanstack/ai-code-mode' import { productTools } from '@/lib/tools/product-tools' -type Provider = 'anthropic' | 'openai' | 'gemini' +type Provider = 'anthropic' | 'openai' | 'gemini' | 'zai' function getAdapter(provider: Provider, model?: string): AnyTextAdapter { switch (provider) { @@ -25,6 +26,8 @@ function getAdapter(provider: Provider, model?: string): AnyTextAdapter { return openaiText((model || 'gpt-4o') as 'gpt-4o') case 'gemini': return geminiText((model || 'gemini-2.5-flash') as 'gemini-2.5-flash') + case 'zai': + return zaiText((model || 'glm-4.7') as 'glm-4.7') case 'anthropic': default: return anthropicText((model || 'claude-haiku-4-5') as 'claude-haiku-4-5') diff --git a/examples/ts-code-mode-web/src/routes/_home/api.product-regular.ts b/examples/ts-code-mode-web/src/routes/_home/api.product-regular.ts index da6a0d2ad..f74c830e2 100644 --- a/examples/ts-code-mode-web/src/routes/_home/api.product-regular.ts +++ b/examples/ts-code-mode-web/src/routes/_home/api.product-regular.ts @@ -3,10 +3,11 @@ import { chat, maxIterations, toServerSentEventsStream } from '@tanstack/ai' import { anthropicText } from '@tanstack/ai-anthropic' import { openaiText } from '@tanstack/ai-openai' import { geminiText } from '@tanstack/ai-gemini' +import { zaiText } from '@tanstack/ai-zai' import type { AnyTextAdapter, StreamChunk } from '@tanstack/ai' import { productTools } from '@/lib/tools/product-tools' -type Provider = 'anthropic' | 'openai' | 'gemini' +type Provider = 'anthropic' | 'openai' | 'gemini' | 'zai' function getAdapter(provider: Provider, model?: string): AnyTextAdapter { switch (provider) { @@ -14,6 +15,8 @@ function getAdapter(provider: Provider, model?: string): AnyTextAdapter { return openaiText((model || 'gpt-4o') as 'gpt-4o') case 'gemini': return geminiText((model || 'gemini-2.5-flash') as 'gemini-2.5-flash') + case 'zai': + return zaiText((model || 'glm-4.7') as 'glm-4.7') case 'anthropic': default: return anthropicText((model || 'claude-haiku-4-5') as 'claude-haiku-4-5') diff --git a/examples/ts-code-mode-web/src/routes/_home/index.tsx b/examples/ts-code-mode-web/src/routes/_home/index.tsx index 319824e90..e693c7329 100644 --- a/examples/ts-code-mode-web/src/routes/_home/index.tsx +++ b/examples/ts-code-mode-web/src/routes/_home/index.tsx @@ -23,7 +23,7 @@ export const Route = createFileRoute('/_home/')({ component: ProductDemoPage, }) -type Provider = 'anthropic' | 'openai' | 'gemini' +type Provider = 'anthropic' | 'openai' | 'gemini' | 'zai' interface ModelOption { provider: Provider @@ -58,6 +58,8 @@ const MODEL_OPTIONS: Array = [ model: 'gemini-2.5-flash', label: 'Gemini 2.5 Flash', }, + { provider: 'zai', model: 'glm-4.7', label: 'Z.AI GLM-4.7' }, + { provider: 'zai', model: 'glm-5-turbo', label: 'Z.AI GLM-5 Turbo' }, ] const PROMPT_SUGGESTIONS = [ diff --git a/examples/ts-code-mode-web/src/routes/_npm-github-chat/api.codemode.ts b/examples/ts-code-mode-web/src/routes/_npm-github-chat/api.codemode.ts index 6b3b4c162..a4601d8e8 100644 --- a/examples/ts-code-mode-web/src/routes/_npm-github-chat/api.codemode.ts +++ b/examples/ts-code-mode-web/src/routes/_npm-github-chat/api.codemode.ts @@ -4,12 +4,13 @@ import { createCodeMode } from '@tanstack/ai-code-mode' import { anthropicText } from '@tanstack/ai-anthropic' import { openaiText } from '@tanstack/ai-openai' import { geminiText } from '@tanstack/ai-gemini' +import { zaiText } from '@tanstack/ai-zai' import type { AnyTextAdapter, StreamChunk } from '@tanstack/ai' import { allTools } from '@/lib/tools' import { CODE_MODE_SYSTEM_PROMPT } from '@/lib/prompts' import { exportConversationToPdfTool } from '@/lib/tools/export-pdf-tool' -type Provider = 'anthropic' | 'openai' | 'gemini' +type Provider = 'anthropic' | 'openai' | 'gemini' | 'zai' function getAdapter(provider: Provider, model?: string): AnyTextAdapter { switch (provider) { @@ -17,6 +18,8 @@ function getAdapter(provider: Provider, model?: string): AnyTextAdapter { return openaiText((model || 'gpt-4o') as 'gpt-4o') case 'gemini': return geminiText((model || 'gemini-2.5-flash') as 'gemini-2.5-flash') + case 'zai': + return zaiText((model || 'glm-4.7') as 'glm-4.7') case 'anthropic': default: return anthropicText((model || 'claude-haiku-4-5') as 'claude-haiku-4-5') diff --git a/examples/ts-code-mode-web/src/routes/_npm-github-chat/npm-github-chat.tsx b/examples/ts-code-mode-web/src/routes/_npm-github-chat/npm-github-chat.tsx index 2d8e213ab..cd1359c20 100644 --- a/examples/ts-code-mode-web/src/routes/_npm-github-chat/npm-github-chat.tsx +++ b/examples/ts-code-mode-web/src/routes/_npm-github-chat/npm-github-chat.tsx @@ -32,7 +32,7 @@ export const Route = createFileRoute('/_npm-github-chat/npm-github-chat')({ component: CodeModePage, }) -type Provider = 'anthropic' | 'openai' | 'gemini' +type Provider = 'anthropic' | 'openai' | 'gemini' | 'zai' interface ModelOption { provider: Provider @@ -53,6 +53,8 @@ const MODEL_OPTIONS: Array = [ }, { provider: 'openai', model: 'gpt-4o', label: 'GPT-4o' }, { provider: 'gemini', model: 'gemini-2.5-flash', label: 'Gemini 2.5 Flash' }, + { provider: 'zai', model: 'glm-4.7', label: 'Z.AI GLM-4.7' }, + { provider: 'zai', model: 'glm-5-turbo', label: 'Z.AI GLM-5 Turbo' }, ] const PROMPT_SUGGESTIONS = [ diff --git a/examples/ts-code-mode-web/src/routes/_reporting/api.reports.ts b/examples/ts-code-mode-web/src/routes/_reporting/api.reports.ts index 02cf73cb1..1004cba93 100644 --- a/examples/ts-code-mode-web/src/routes/_reporting/api.reports.ts +++ b/examples/ts-code-mode-web/src/routes/_reporting/api.reports.ts @@ -4,6 +4,7 @@ import { createCodeMode } from '@tanstack/ai-code-mode' import { anthropicText } from '@tanstack/ai-anthropic' import { openaiText } from '@tanstack/ai-openai' import { geminiText } from '@tanstack/ai-gemini' +import { zaiText } from '@tanstack/ai-zai' import type { AnyTextAdapter } from '@tanstack/ai' import { allTools } from '@/lib/tools' @@ -11,7 +12,7 @@ import { CODE_MODE_SYSTEM_PROMPT, REPORTS_SYSTEM_PROMPT } from '@/lib/prompts' import { reportTools } from '@/lib/reports/tools' import { createReportBindings } from '@/lib/reports/create-report-bindings' -type Provider = 'anthropic' | 'openai' | 'gemini' +type Provider = 'anthropic' | 'openai' | 'gemini' | 'zai' function getAdapter(provider: Provider, model?: string): AnyTextAdapter { switch (provider) { @@ -19,6 +20,8 @@ function getAdapter(provider: Provider, model?: string): AnyTextAdapter { return openaiText((model || 'gpt-4o') as 'gpt-4o') case 'gemini': return geminiText((model || 'gemini-2.5-flash') as 'gemini-2.5-flash') + case 'zai': + return zaiText((model || 'glm-4.7') as 'glm-4.7') case 'anthropic': default: return anthropicText((model || 'claude-haiku-4-5') as 'claude-haiku-4-5') diff --git a/examples/ts-code-mode-web/src/routes/_reporting/reporting-agent.tsx b/examples/ts-code-mode-web/src/routes/_reporting/reporting-agent.tsx index 591259ee8..3b7928028 100644 --- a/examples/ts-code-mode-web/src/routes/_reporting/reporting-agent.tsx +++ b/examples/ts-code-mode-web/src/routes/_reporting/reporting-agent.tsx @@ -31,7 +31,7 @@ export const Route = createFileRoute('/_reporting/reporting-agent')({ component: ReportingAgentPage, }) -type Provider = 'anthropic' | 'openai' | 'gemini' +type Provider = 'anthropic' | 'openai' | 'gemini' | 'zai' interface ModelOption { provider: Provider diff --git a/examples/ts-code-mode-web/src/routes/_structured-output/api.structured-output.ts b/examples/ts-code-mode-web/src/routes/_structured-output/api.structured-output.ts index 98ad879bd..5b77ba8b4 100644 --- a/examples/ts-code-mode-web/src/routes/_structured-output/api.structured-output.ts +++ b/examples/ts-code-mode-web/src/routes/_structured-output/api.structured-output.ts @@ -7,6 +7,7 @@ import { createFileSkillStorage } from '@tanstack/ai-code-mode-skills/storage' import { anthropicText } from '@tanstack/ai-anthropic' import { openaiText } from '@tanstack/ai-openai' import { geminiText } from '@tanstack/ai-gemini' +import { zaiText } from '@tanstack/ai-zai' import { z } from 'zod' import type { AnyTextAdapter } from '@tanstack/ai' @@ -15,7 +16,7 @@ import type { IsolateDriver } from '@tanstack/ai-code-mode' import { cityTools } from '@/lib/tools/city-tools' import { structuredOutput } from '@/lib/structured-output' -type Provider = 'anthropic' | 'openai' | 'gemini' +type Provider = 'anthropic' | 'openai' | 'gemini' | 'zai' const TravelReportSchema = z.object({ title: z.string().describe('Short title for the report'), @@ -45,6 +46,8 @@ function getAdapter(provider: Provider, model?: string): AnyTextAdapter { return openaiText((model || 'gpt-4o') as 'gpt-4o') case 'gemini': return geminiText((model || 'gemini-2.5-flash') as 'gemini-2.5-flash') + case 'zai': + return zaiText((model || 'glm-4.7') as 'glm-4.7') case 'anthropic': default: return anthropicText( diff --git a/examples/ts-group-chat/chat-server/claude-service.ts b/examples/ts-group-chat/chat-server/claude-service.ts index d377c9bf4..8dbc14a6e 100644 --- a/examples/ts-group-chat/chat-server/claude-service.ts +++ b/examples/ts-group-chat/chat-server/claude-service.ts @@ -1,5 +1,6 @@ // Claude AI service for handling queued AI responses import { anthropicText } from '@tanstack/ai-anthropic' +import { zaiText } from '@tanstack/ai-zai' import { chat, toolDefinition } from '@tanstack/ai' import type { JSONSchema, ModelMessage, StreamChunk } from '@tanstack/ai' diff --git a/examples/ts-react-chat/.env.example b/examples/ts-react-chat/.env.example index 2bdb43f49..d051a4719 100644 --- a/examples/ts-react-chat/.env.example +++ b/examples/ts-react-chat/.env.example @@ -2,6 +2,10 @@ # Get yours at: https://platform.openai.com/api-keys OPENAI_API_KEY=sk-... +# Z.AI API Key +# Get yours at: https://docs.z.ai/ +ZAI_API_KEY= + # ElevenLabs API Key (for realtime voice) # Get yours at: https://elevenlabs.io/app/settings/api-keys ELEVENLABS_API_KEY=xi-... diff --git a/examples/ts-react-chat/package.json b/examples/ts-react-chat/package.json index 60cfe6836..b0708b096 100644 --- a/examples/ts-react-chat/package.json +++ b/examples/ts-react-chat/package.json @@ -23,6 +23,7 @@ "@tanstack/ai-openrouter": "workspace:*", "@tanstack/ai-react": "workspace:*", "@tanstack/ai-react-ui": "workspace:*", + "@tanstack/ai-zai": "workspace:*", "@tanstack/nitro-v2-vite-plugin": "^1.154.7", "@tanstack/react-devtools": "^0.9.10", "@tanstack/react-router": "^1.158.4", diff --git a/examples/ts-react-chat/src/lib/model-selection.ts b/examples/ts-react-chat/src/lib/model-selection.ts index 95c122cd4..a3db44aa4 100644 --- a/examples/ts-react-chat/src/lib/model-selection.ts +++ b/examples/ts-react-chat/src/lib/model-selection.ts @@ -6,6 +6,7 @@ export type Provider = | 'grok' | 'groq' | 'openrouter' + | 'zai' export interface ModelOption { provider: Provider @@ -135,6 +136,15 @@ export const MODEL_OPTIONS: Array = [ model: 'grok-3-mini', label: 'Grok - Grok 3 Mini', }, + + // Z.AI (GLM) + { provider: 'zai', model: 'glm-5.1', label: 'Z.AI - GLM-5.1' }, + { provider: 'zai', model: 'glm-5-turbo', label: 'Z.AI - GLM-5 Turbo' }, + { provider: 'zai', model: 'glm-5', label: 'Z.AI - GLM-5' }, + { provider: 'zai', model: 'glm-5v-turbo', label: 'Z.AI - GLM-5V Turbo' }, + { provider: 'zai', model: 'glm-4.7', label: 'Z.AI - GLM-4.7' }, + { provider: 'zai', model: 'glm-4.6v', label: 'Z.AI - GLM-4.6V' }, + { provider: 'zai', model: 'glm-4.6', label: 'Z.AI - GLM-4.6' }, ] export const DEFAULT_MODEL_OPTION = MODEL_OPTIONS[0] diff --git a/examples/ts-react-chat/src/routes/api.tanchat.ts b/examples/ts-react-chat/src/routes/api.tanchat.ts index f571fd9c7..959f26f25 100644 --- a/examples/ts-react-chat/src/routes/api.tanchat.ts +++ b/examples/ts-react-chat/src/routes/api.tanchat.ts @@ -11,6 +11,7 @@ import { anthropicText } from '@tanstack/ai-anthropic' import { geminiText } from '@tanstack/ai-gemini' import { openRouterText } from '@tanstack/ai-openrouter' import { grokText } from '@tanstack/ai-grok' +import { zaiText } from '@tanstack/ai-zai' import { groqText } from '@tanstack/ai-groq' import type { AnyTextAdapter, ChatMiddleware } from '@tanstack/ai' import { @@ -32,6 +33,7 @@ type Provider = | 'grok' | 'groq' | 'openrouter' + | 'zai' const SYSTEM_PROMPT = `You are a helpful assistant for a guitar store. @@ -185,6 +187,13 @@ export const Route = createFileRoute('/api/tanchat')({ adapter: openaiText((model || 'gpt-4o') as 'gpt-4o'), modelOptions: {}, }), + zai: () => + createChatOptions({ + adapter: zaiText((model || 'glm-4.7') as 'glm-4.7', { + coding: true, + }), + modelOptions: {}, + }), } try { diff --git a/examples/ts-react-search/package.json b/examples/ts-react-search/package.json index 4e6cbb8e1..0b2a57328 100644 --- a/examples/ts-react-search/package.json +++ b/examples/ts-react-search/package.json @@ -12,7 +12,7 @@ "@radix-ui/react-slot": "^1.2.4", "@tailwindcss/vite": "^4.1.18", "@tanstack/ai": "workspace:*", - "@tanstack/ai-groq": "workspace:*", + "@tanstack/ai-zai": "workspace:*", "@tanstack/ai-react": "workspace:*", "@tanstack/query-db-collection": "^1.0.6", "@tanstack/react-db": "^0.1.55", diff --git a/examples/ts-react-search/src/routes/api/search.ts b/examples/ts-react-search/src/routes/api/search.ts index bbafc9bb9..d1eaa2cac 100644 --- a/examples/ts-react-search/src/routes/api/search.ts +++ b/examples/ts-react-search/src/routes/api/search.ts @@ -1,6 +1,6 @@ import { createFileRoute } from '@tanstack/react-router' import { chat } from '@tanstack/ai' -import { groqText } from '@tanstack/ai-groq' +import { zaiText } from '@tanstack/ai-zai' import z from 'zod' import { ORDER_STATUS_MAP, @@ -11,7 +11,6 @@ import { DISPUTE_STATUS_MAP, } from '@/features/disputes/constants' import { SETTLEMENT_CURRENCY_MAP } from '@/features/settlements/constants' -import toGroqCompatibleSchema from '@/utils/toGroqCompatibleSchema' const ORDER_STATUS_KEYS = Object.keys(ORDER_STATUS_MAP) const PAYMENT_METHOD_KEYS = Object.keys(PAYMENT_METHOD_MAP) @@ -51,10 +50,6 @@ const outputSchema = z.object({ ]), }) -// Only needed for Groq, since it doesn't support additionalProperties:false on anyOf (union types) -// Otherwise just use the outputSchema directly -const groqOutputSchema = toGroqCompatibleSchema(z.toJSONSchema(outputSchema)) - const SYSTEM_PROMPT = ` JSON API: Convert prompts to structured data. No prose, fences, or comments. @@ -75,10 +70,10 @@ export const Route = createFileRoute('/api/search')({ server: { handlers: { POST: async ({ request }) => { - if (!process.env.GROQ_API_KEY) { + if (!process.env.ZAI_API_KEY) { return new Response( JSON.stringify({ - error: 'GROQ_API_KEY not configured', + error: 'ZAI_API_KEY not configured', }), { status: 500, @@ -91,10 +86,10 @@ export const Route = createFileRoute('/api/search')({ try { const response = await chat({ - adapter: groqText('openai/gpt-oss-20b'), + adapter: zaiText('glm-5-turbo', { coding: true }), messages: [{ role: 'user', content }], systemPrompts: [SYSTEM_PROMPT], - outputSchema: groqOutputSchema, + outputSchema, }) return new Response(JSON.stringify(response), { diff --git a/examples/ts-solid-chat/.env.example b/examples/ts-solid-chat/.env.example index 613cb664b..b8ea691f2 100644 --- a/examples/ts-solid-chat/.env.example +++ b/examples/ts-solid-chat/.env.example @@ -1,3 +1,18 @@ # OpenAI API Key # Get yours at: https://platform.openai.com/api-keys -OPENAI_API_KEY=sk-... \ No newline at end of file +OPENAI_API_KEY=sk-... + +# Anthropic API Key +# Get yours at: https://console.anthropic.com/ +ANTHROPIC_API_KEY=sk-ant-... + +# Gemini API Key +# Get yours at: https://aistudio.google.com/ +GEMINI_API_KEY=... + +# Z.AI API Key +# Get yours at: https://open.bigmodel.cn/ +ZAI_API_KEY=... + +# Ollama (local, no API key needed) +# Make sure Ollama is running: ollama serve \ No newline at end of file diff --git a/examples/ts-solid-chat/package.json b/examples/ts-solid-chat/package.json index a54d44197..24274e463 100644 --- a/examples/ts-solid-chat/package.json +++ b/examples/ts-solid-chat/package.json @@ -15,10 +15,14 @@ "@tanstack/ai-client": "workspace:*", "@tanstack/ai-devtools-core": "workspace:*", "@tanstack/ai-gemini": "workspace:*", + "@tanstack/ai-grok": "workspace:*", + "@tanstack/ai-groq": "workspace:*", "@tanstack/ai-ollama": "workspace:*", "@tanstack/ai-openai": "workspace:*", + "@tanstack/ai-openrouter": "workspace:*", "@tanstack/ai-solid": "workspace:*", "@tanstack/ai-solid-ui": "workspace:*", + "@tanstack/ai-zai": "workspace:*", "@tanstack/nitro-v2-vite-plugin": "^1.154.7", "@tanstack/router-plugin": "^1.158.4", "@tanstack/solid-ai-devtools": "workspace:*", diff --git a/examples/ts-solid-chat/src/lib/model-selection.ts b/examples/ts-solid-chat/src/lib/model-selection.ts new file mode 100644 index 000000000..2a3c0d63d --- /dev/null +++ b/examples/ts-solid-chat/src/lib/model-selection.ts @@ -0,0 +1,180 @@ +export type Provider = + | 'openai' + | 'anthropic' + | 'gemini' + | 'ollama' + | 'grok' + | 'groq' + | 'openrouter' + | 'zai' + +export interface ModelOption { + provider: Provider + model: string + label: string +} + +export const MODEL_OPTIONS: Array = [ + { provider: 'openai', model: 'gpt-4o', label: 'OpenAI - GPT-4o' }, + { provider: 'openai', model: 'gpt-4o-mini', label: 'OpenAI - GPT-4o Mini' }, + { provider: 'openai', model: 'gpt-5', label: 'OpenAI - GPT-5' }, + + { + provider: 'anthropic', + model: 'claude-sonnet-4-6', + label: 'Anthropic - Claude Sonnet 4.6', + }, + { + provider: 'anthropic', + model: 'claude-sonnet-4-5-20250929', + label: 'Anthropic - Claude Sonnet 4.5', + }, + { + provider: 'anthropic', + model: 'claude-opus-4-5-20251101', + label: 'Anthropic - Claude Opus 4.5', + }, + { + provider: 'anthropic', + model: 'claude-haiku-4-0-20250514', + label: 'Anthropic - Claude Haiku 4.0', + }, + + { + provider: 'gemini', + model: 'gemini-2.0-flash', + label: 'Gemini - 2.0 Flash', + }, + { + provider: 'gemini', + model: 'gemini-2.5-flash', + label: 'Gemini - 2.5 Flash', + }, + { + provider: 'gemini', + model: 'gemini-2.5-pro', + label: 'Gemini - 2.5 Pro', + }, + + { + provider: 'ollama', + model: 'mistral:7b', + label: 'Ollama - Mistral 7B', + }, + { + provider: 'ollama', + model: 'mistral', + label: 'Ollama - Mistral', + }, + { + provider: 'ollama', + model: 'gpt-oss:20b', + label: 'Ollama - GPT-OSS 20B', + }, + { + provider: 'ollama', + model: 'granite4:3b', + label: 'Ollama - Granite4 3B', + }, + { + provider: 'ollama', + model: 'smollm', + label: 'Ollama - SmolLM', + }, + + { provider: 'zai', model: 'glm-5.1', label: 'Z.AI - GLM-5.1' }, + { provider: 'zai', model: 'glm-5-turbo', label: 'Z.AI - GLM-5 Turbo' }, + { provider: 'zai', model: 'glm-5', label: 'Z.AI - GLM-5' }, + { provider: 'zai', model: 'glm-5v-turbo', label: 'Z.AI - GLM-5V Turbo' }, + { provider: 'zai', model: 'glm-4.7', label: 'Z.AI - GLM-4.7' }, + { provider: 'zai', model: 'glm-4.6v', label: 'Z.AI - GLM-4.6V' }, + { provider: 'zai', model: 'glm-4.6', label: 'Z.AI - GLM-4.6' }, + + { + provider: 'openrouter', + model: 'openai/chatgpt-4o-latest', + label: 'Openrouter - ChatGPT 4o Latest', + }, + { + provider: 'openrouter', + model: 'openai/chatgpt-4o-mini', + label: 'Openrouter - ChatGPT 4o Mini', + }, + + { + provider: 'groq', + model: 'llama-3.3-70b-versatile', + label: 'Groq - Llama 3.3 70B', + }, + { + provider: 'groq', + model: 'meta-llama/llama-4-maverick-17b-128e-instruct', + label: 'Groq - Llama 4 Maverick', + }, + { + provider: 'groq', + model: 'meta-llama/llama-4-scout-17b-16e-instruct', + label: 'Groq - Llama 4 Scout', + }, + + { + provider: 'grok', + model: 'grok-4', + label: 'Grok - Grok 4', + }, + { + provider: 'grok', + model: 'grok-4-fast-non-reasoning', + label: 'Grok - Grok 4 Fast', + }, + { + provider: 'grok', + model: 'grok-3', + label: 'Grok - Grok 3', + }, + { + provider: 'grok', + model: 'grok-3-mini', + label: 'Grok - Grok 3 Mini', + }, +] + +export const DEFAULT_MODEL_OPTION = MODEL_OPTIONS[0] + +const STORAGE_KEY = 'tanstack-ai-model-preference' + +export function getStoredModelPreference(): ModelOption | null { + if (typeof window === 'undefined') return null + + try { + const stored = localStorage.getItem(STORAGE_KEY) + if (!stored) return null + + const parsed = JSON.parse(stored) as { provider: Provider; model: string } + const option = MODEL_OPTIONS.find( + (opt) => opt.provider === parsed.provider && opt.model === parsed.model, + ) + + return option || null + } catch { + return null + } +} + +export function setStoredModelPreference(option: ModelOption): void { + if (typeof window === 'undefined') return + + try { + localStorage.setItem( + STORAGE_KEY, + JSON.stringify({ provider: option.provider, model: option.model }), + ) + } catch { + return + } +} + +export function getDefaultModelOption(): ModelOption { + const stored = getStoredModelPreference() + return stored || MODEL_OPTIONS[0] +} diff --git a/examples/ts-solid-chat/src/routes/api.chat.ts b/examples/ts-solid-chat/src/routes/api.chat.ts index 0b73e29be..bf7afcc03 100644 --- a/examples/ts-solid-chat/src/routes/api.chat.ts +++ b/examples/ts-solid-chat/src/routes/api.chat.ts @@ -1,7 +1,21 @@ import { createFileRoute } from '@tanstack/solid-router' -import { chat, maxIterations, toServerSentEventsResponse } from '@tanstack/ai' +import { + chat, + createChatOptions, + maxIterations, + toServerSentEventsResponse, +} from '@tanstack/ai' +import { openaiText } from '@tanstack/ai-openai' +import { ollamaText } from '@tanstack/ai-ollama' import { anthropicText } from '@tanstack/ai-anthropic' +import { geminiText } from '@tanstack/ai-gemini' +import { openRouterText } from '@tanstack/ai-openrouter' +import { grokText } from '@tanstack/ai-grok' +import { groqText } from '@tanstack/ai-groq' +import { zaiText } from '@tanstack/ai-zai' import { serverTools } from '@/lib/guitar-tools' +import type { Provider } from '@/lib/model-selection' +import type { AnyTextAdapter } from '@tanstack/ai' const SYSTEM_PROMPT = `You are a helpful assistant for a guitar store. @@ -30,19 +44,6 @@ export const Route = createFileRoute('/api/chat')({ server: { handlers: { POST: async ({ request }) => { - if (!process.env.ANTHROPIC_API_KEY) { - return new Response( - JSON.stringify({ - error: - 'ANTHROPIC_API_KEY not configured. Please add it to .env or .env.local', - }), - { - status: 500, - headers: { 'Content-Type': 'application/json' }, - }, - ) - } - // Capture request signal before reading body (it may be aborted after body is consumed) const requestSignal = request.signal @@ -53,21 +54,81 @@ export const Route = createFileRoute('/api/chat')({ const abortController = new AbortController() - const { messages } = await request.json() + const { messages, data } = await request.json() + const provider: Provider = data?.provider || 'openai' + const model: string = data?.model || 'gpt-4o' + try { - // Use the stream abort signal for proper cancellation handling + const adapterConfig: Record< + Provider, + () => { adapter: AnyTextAdapter } + > = { + anthropic: () => + createChatOptions({ + adapter: anthropicText( + (model || 'claude-sonnet-4-5') as 'claude-sonnet-4-5', + ), + }), + openrouter: () => + createChatOptions({ + adapter: openRouterText('openai/gpt-5.1'), + modelOptions: { + reasoning: { + effort: 'medium', + }, + }, + }), + gemini: () => + createChatOptions({ + adapter: geminiText( + (model || 'gemini-2.5-flash') as 'gemini-2.5-flash', + ), + modelOptions: { + thinkingConfig: { + includeThoughts: true, + thinkingBudget: 100, + }, + }, + }), + grok: () => + createChatOptions({ + adapter: grokText((model || 'grok-3') as 'grok-3'), + modelOptions: {}, + }), + groq: () => + createChatOptions({ + adapter: groqText( + (model || + 'llama-3.3-70b-versatile') as 'llama-3.3-70b-versatile', + ), + }), + ollama: () => + createChatOptions({ + adapter: ollamaText((model || 'gpt-oss:120b') as 'gpt-oss:120b'), + modelOptions: { think: 'low', options: { top_k: 1 } }, + }), + openai: () => + createChatOptions({ + adapter: openaiText((model || 'gpt-4o') as 'gpt-4o'), + modelOptions: {}, + }), + zai: () => + createChatOptions({ + adapter: zaiText((model || 'glm-4.7') as 'glm-4.7', { + coding: true, + }), + modelOptions: {}, + }), + } + + const options = adapterConfig[provider]() + const stream = chat({ - adapter: anthropicText('claude-sonnet-4-5'), + ...options, tools: serverTools, systemPrompts: [SYSTEM_PROMPT], agentLoopStrategy: maxIterations(20), messages, - modelOptions: { - thinking: { - type: 'enabled', - budget_tokens: 10000, - }, - }, abortController, }) diff --git a/examples/ts-solid-chat/src/routes/index.tsx b/examples/ts-solid-chat/src/routes/index.tsx index b8ee0cdb3..e2b1ce56c 100644 --- a/examples/ts-solid-chat/src/routes/index.tsx +++ b/examples/ts-solid-chat/src/routes/index.tsx @@ -11,6 +11,11 @@ import type { InferChatMessages, UIMessage } from '@tanstack/ai-client' import type { JSXElement } from 'solid-js' import GuitarRecommendation from '@/components/example-GuitarRecommendation' import { clientTools } from '@/lib/guitar-tools' +import { + DEFAULT_MODEL_OPTION, + MODEL_OPTIONS, + type ModelOption, +} from '@/lib/model-selection' // Create typed chat options for type inference const chatOptions = createChatClientOptions({ @@ -307,11 +312,22 @@ function DebugPanel(props: { function ChatPage() { const [chunks, setChunks] = createSignal>([]) + const [selectedModel, setSelectedModel] = createSignal( + DEFAULT_MODEL_OPTION, + ) + + const bodyData = () => ({ + provider: selectedModel().provider, + model: selectedModel().model, + }) const { messages, sendMessage, isLoading, addToolApprovalResponse, stop } = useChat({ connection: chatOptions.connection, tools: clientTools, + get body() { + return bodyData() + }, onChunk: (chunk: any) => { setChunks((prev) => [...prev, chunk]) }, @@ -324,10 +340,37 @@ function ChatPage() {
{/* Left side - Chat (1/4 width) */}
-
-

+
+

TanStack AI on SolidJS

+
+
+ + +
+
= [ model: 'smollm', label: 'Ollama - SmolLM', }, + + // Z.AI (GLM) + { provider: 'zai', model: 'glm-5.1', label: 'Z.AI - GLM-5.1' }, + { provider: 'zai', model: 'glm-5-turbo', label: 'Z.AI - GLM-5 Turbo' }, + { provider: 'zai', model: 'glm-5', label: 'Z.AI - GLM-5' }, + { provider: 'zai', model: 'glm-5v-turbo', label: 'Z.AI - GLM-5V Turbo' }, + { provider: 'zai', model: 'glm-4.7', label: 'Z.AI - GLM-4.7' }, + { provider: 'zai', model: 'glm-4.6v', label: 'Z.AI - GLM-4.6V' }, + { provider: 'zai', model: 'glm-4.6', label: 'Z.AI - GLM-4.6' }, ] const STORAGE_KEY = 'tanstack-ai-model-preference' diff --git a/examples/ts-svelte-chat/src/routes/api/chat/+server.ts b/examples/ts-svelte-chat/src/routes/api/chat/+server.ts index 6308af357..290c9c457 100644 --- a/examples/ts-svelte-chat/src/routes/api/chat/+server.ts +++ b/examples/ts-svelte-chat/src/routes/api/chat/+server.ts @@ -8,10 +8,10 @@ import { openaiText } from '@tanstack/ai-openai' import { ollamaText } from '@tanstack/ai-ollama' import { anthropicText } from '@tanstack/ai-anthropic' import { geminiText } from '@tanstack/ai-gemini' +import { zaiText } from '@tanstack/ai-zai' import type { RequestHandler } from './$types' import { env } from '$env/dynamic/private' - import { addToCartToolDef, addToWishListToolDef, @@ -20,7 +20,7 @@ import { recommendGuitarToolDef, } from '$lib/guitar-tools' -type Provider = 'openai' | 'anthropic' | 'gemini' | 'ollama' +type Provider = 'openai' | 'anthropic' | 'gemini' | 'ollama' | 'zai' // Populate process.env with the SvelteKit environment variables // This is needed because the TanStack AI adapters read from process.env @@ -47,6 +47,10 @@ const adapterConfig = { createChatOptions({ adapter: openaiText('gpt-4o'), }), + zai: () => + createChatOptions({ + adapter: zaiText('glm-4.7'), + }), } const SYSTEM_PROMPT = `You are a helpful assistant for a guitar store. @@ -99,7 +103,7 @@ export const POST: RequestHandler = async ({ request }) => { const provider: Provider = data?.provider || 'openai' // Get typed adapter options using createOptions pattern - const options = adapterConfig[provider]() + const options = adapterConfig[provider]() as any const stream = chat({ ...options, diff --git a/examples/ts-vue-chat/package.json b/examples/ts-vue-chat/package.json index 1e539c683..3c39d3703 100644 --- a/examples/ts-vue-chat/package.json +++ b/examples/ts-vue-chat/package.json @@ -18,6 +18,7 @@ "@tanstack/ai-openai": "workspace:*", "@tanstack/ai-vue": "workspace:*", "@tanstack/ai-vue-ui": "workspace:*", + "@tanstack/ai-zai": "workspace:*", "marked": "^15.0.6", "vue": "^3.5.25", "vue-router": "^4.5.0", diff --git a/examples/ts-vue-chat/src/lib/model-selection.ts b/examples/ts-vue-chat/src/lib/model-selection.ts index 0f66fb3fc..11a2aa725 100644 --- a/examples/ts-vue-chat/src/lib/model-selection.ts +++ b/examples/ts-vue-chat/src/lib/model-selection.ts @@ -1,4 +1,4 @@ -export type Provider = 'openai' | 'anthropic' | 'gemini' | 'ollama' +export type Provider = 'openai' | 'anthropic' | 'gemini' | 'ollama' | 'zai' export interface ModelOption { provider: Provider @@ -72,6 +72,15 @@ export const MODEL_OPTIONS: Array = [ model: 'smollm', label: 'Ollama - SmolLM', }, + + // Z.AI (GLM) + { provider: 'zai', model: 'glm-5.1', label: 'Z.AI - GLM-5.1' }, + { provider: 'zai', model: 'glm-5-turbo', label: 'Z.AI - GLM-5 Turbo' }, + { provider: 'zai', model: 'glm-5', label: 'Z.AI - GLM-5' }, + { provider: 'zai', model: 'glm-5v-turbo', label: 'Z.AI - GLM-5V Turbo' }, + { provider: 'zai', model: 'glm-4.7', label: 'Z.AI - GLM-4.7' }, + { provider: 'zai', model: 'glm-4.6v', label: 'Z.AI - GLM-4.6V' }, + { provider: 'zai', model: 'glm-4.6', label: 'Z.AI - GLM-4.6' }, ] const STORAGE_KEY = 'tanstack-ai-model-preference' diff --git a/examples/ts-vue-chat/vite.config.ts b/examples/ts-vue-chat/vite.config.ts index 74c3563f1..a8ba9a89c 100644 --- a/examples/ts-vue-chat/vite.config.ts +++ b/examples/ts-vue-chat/vite.config.ts @@ -7,6 +7,7 @@ import { openaiText } from '@tanstack/ai-openai' import { anthropicText } from '@tanstack/ai-anthropic' import { geminiText } from '@tanstack/ai-gemini' import { ollamaText } from '@tanstack/ai-ollama' +import { zaiText } from '@tanstack/ai-zai' import { toolDefinition } from '@tanstack/ai' import { z } from 'zod' import dotenv from 'dotenv' @@ -175,7 +176,7 @@ IMPORTANT: - Do NOT describe the guitar yourself - let the recommendGuitar tool do it ` -type Provider = 'openai' | 'anthropic' | 'gemini' | 'ollama' +type Provider = 'openai' | 'anthropic' | 'gemini' | 'ollama' | 'zai' export default defineConfig({ plugins: [ @@ -218,6 +219,10 @@ export default defineConfig({ selectedModel = model || 'mistral:7b' adapter = ollamaText(selectedModel) break + case 'zai': + selectedModel = model || 'glm-4.7' + adapter = zaiText(selectedModel) + break case 'openai': default: selectedModel = model || 'gpt-4o' diff --git a/packages/typescript/ai-zai/CHANGELOG.md b/packages/typescript/ai-zai/CHANGELOG.md new file mode 100644 index 000000000..bbda773fd --- /dev/null +++ b/packages/typescript/ai-zai/CHANGELOG.md @@ -0,0 +1,20 @@ +# @tanstack/ai-zai + +## 0.1.0 + +### Minor Changes + +- Initial release of Z.AI adapter for TanStack AI +- Added Web Search tool support for Z.AI models +- Added Thinking Mode support for deep reasoning (GLM-4.7/4.6/4.5) +- Added Tool Streaming support for real-time argument streaming (GLM-4.7) +- Added subpath export for `@tanstack/ai-zai/tools` to expose `webSearchTool` +- Implemented tree-shakeable adapters: + - Text adapter for chat/completion functionality + - Summarization adapter for text summarization +- Features: + - Streaming chat responses + - Function/tool calling with automatic execution + - Structured output with Zod schema validation through system prompts + - OpenAI-compatible API integration + - Full TypeScript support with per-model type inference diff --git a/packages/typescript/ai-zai/README.md b/packages/typescript/ai-zai/README.md new file mode 100644 index 000000000..fca4079f3 --- /dev/null +++ b/packages/typescript/ai-zai/README.md @@ -0,0 +1,313 @@ +# @tanstack/ai-zai + +[![npm version](https://img.shields.io/npm/v/@tanstack/ai-zai.svg)](https://www.npmjs.com/package/@tanstack/ai-zai) +[![license](https://img.shields.io/npm/l/@tanstack/ai-zai.svg)](https://github.com/TanStack/ai/blob/main/LICENSE) + +Z.AI adapter for TanStack AI. + +- Z.AI docs: https://docs.z.ai/api-reference/introduction + +## OpenAI Compatibility + +Z.AI exposes an OpenAI-compatible API surface. This adapter: + +- Uses the OpenAI SDK internally, with Z.AI's base URL (`https://api.z.ai/api/paas/v4`) +- Targets the Chat Completions streaming interface +- Supports function/tool calling via OpenAI-style `tools` +- Supports Zhipu AI specific features like **Web Search**, **Thinking Mode**, and **Tool Streaming** +- Accepts `string` or `ContentPart[]` message content (only text parts are used today) + +## Installation + +```bash +npm install @tanstack/ai-zai +# or +pnpm add @tanstack/ai-zai +# or +yarn add @tanstack/ai-zai +``` + +## Setup + +Get your API key from Z.AI and set it as an environment variable: + +```bash +export ZAI_API_KEY="your_zai_api_key" +``` + +## Usage + +### Text/Chat Adapter + +```ts +import { zaiText } from '@tanstack/ai-zai' +import { generate } from '@tanstack/ai' + +const adapter = zaiText('glm-4.7') + +const result = await generate({ + adapter, + model: 'glm-4.7', + messages: [ + { + role: 'user', + content: [ + { type: 'text', content: 'Hello! Introduce yourself briefly.' }, + ], + }, + ], +}) + +for await (const chunk of result) { + console.log(chunk) +} +``` + +### Web Search Tool + +Zhipu AI provides a built-in Web Search capability. + +```ts +import { zaiText } from '@tanstack/ai-zai' +import { webSearchTool } from '@tanstack/ai-zai/tools' + +const adapter = zaiText('glm-4.7') + +for await (const chunk of adapter.chatStream({ + model: 'glm-4.7', + messages: [ + { role: 'user', content: 'What is the latest news about TanStack?' }, + ], + tools: [webSearchTool({ enable: true, search_result: true })], +})) { + if (chunk.type === 'content') process.stdout.write(chunk.delta) +} +``` + +### Thinking Mode (GLM-4.7/4.6/4.5) + +Enable Deep Thinking for complex reasoning tasks. + +```ts +import { zaiText } from '@tanstack/ai-zai' + +const adapter = zaiText('glm-4.7') + +for await (const chunk of adapter.chatStream({ + model: 'glm-4.7', + messages: [{ role: 'user', content: 'Solve this complex logic puzzle...' }], + modelOptions: { + thinking: { + type: 'enabled', + clear_thinking: false, // Optional: set to false to preserve reasoning across turns (GLM-4.7 only) + }, + }, +})) { + // Thinking content is streamed as part of the reasoning_content delta + // The adapter currently merges reasoning content into the main content stream or handles it as configured + if (chunk.type === 'content') process.stdout.write(chunk.delta) +} +``` + +### Tool / Function Calling & Streaming + +GLM-4.7 supports streaming tool calls via `tool_stream`. + +```ts +import { zaiText } from '@tanstack/ai-zai' +import type { Tool } from '@tanstack/ai' + +const adapter = zaiText('glm-4.7') + +const tools: Array = [ + { + name: 'echo', + description: 'Echo back the provided text', + inputSchema: { + type: 'object', + properties: { text: { type: 'string' } }, + required: ['text'], + }, + }, +] + +for await (const chunk of adapter.chatStream({ + model: 'glm-4.7', + messages: [{ role: 'user', content: 'Call echo with {"text":"hello"}.' }], + tools, + modelOptions: { + tool_stream: true, // Enable streaming tool arguments + }, +})) { + if (chunk.type === 'tool_call') { + const { id, function: fn } = chunk.toolCall + console.log('Tool requested:', fn.name, fn.arguments) + } +} +``` + +### Summarization + +```ts +import { zaiSummarize } from '@tanstack/ai-zai' +import { summarize } from '@tanstack/ai' + +const adapter = zaiSummarize('glm-4.7') + +const result = await summarize({ + adapter, + text: 'Long article text...', + style: 'bullet-points', + maxLength: 500, +}) + +console.log(result.summary) +``` + +### Streaming (direct) + +```ts +import { zaiText } from '@tanstack/ai-zai' + +const adapter = zaiText('glm-4.7') + +for await (const chunk of adapter.chatStream({ + model: 'glm-4.7', + messages: [ + { + role: 'user', + content: [ + { type: 'text', content: 'Stream a short poem about TypeScript.' }, + ], + }, + ], +})) { + if (chunk.type === 'content') process.stdout.write(chunk.delta) + if (chunk.type === 'error') { + console.error(chunk.error) + break + } + if (chunk.type === 'done') break +} +``` + +### With Explicit API Key + +```ts +import { createZAIChat } from '@tanstack/ai-zai' + +const adapter = createZAIChat('glm-4.7', 'your-zai-api-key-here') +``` + +### Error Handling + +The adapter yields an `error` chunk instead of throwing. + +```ts +import { zaiText } from '@tanstack/ai-zai' + +const adapter = zaiText('glm-4.7') + +for await (const chunk of adapter.chatStream({ + model: 'glm-4.7', + messages: [{ role: 'user', content: 'Hello' }], +})) { + if (chunk.type === 'error') { + console.error(chunk.error.message, chunk.error.code) + break + } +} +``` + +## API Reference + +### `createZAIChat(model, apiKey, config?)` + +```ts +import { createZAIChat } from '@tanstack/ai-zai' + +const adapter = createZAIChat('glm-4.7', 'your_zai_api_key', { + baseURL: 'https://api.z.ai/api/paas/v4', +}) +``` + +- `model`: `ZAIModel` +- `apiKey`: string (required) +- `config.baseURL`: string (optional) + +### `zaiText(model, config?)` + +```ts +import { zaiText } from '@tanstack/ai-zai' + +const adapter = zaiText('glm-4.7', { + baseURL: 'https://api.z.ai/api/paas/v4', +}) +``` + +Uses `ZAI_API_KEY` from your environment. + +## Supported Models + +### Chat Models + +- `glm-4.7` - Latest flagship model (Supports Thinking, Tool Streaming) +- `glm-4.6` - Previous flagship model (Supports Thinking) +- `glm-4.6v` - Vision model (Z.AI supports multimodal input, this adapter currently streams text) + +## Features + +- ✅ Streaming chat completions +- ✅ Function/tool calling +- ✅ **Web Search Tool** (Zhipu AI native) +- ✅ **Thinking Mode** (Interleaved & Preserved) +- ✅ **Tool Streaming** (Real-time argument streaming) +- ❌ Structured output (not implemented in this adapter yet) +- ❌ Multimodal input (this adapter currently extracts text only; non-text parts are ignored) + +## Tree-Shakeable Adapters + +This package uses tree-shakeable adapters, so you only import what you need: + +```ts +import { zaiText } from '@tanstack/ai-zai' +``` + +## Configuration + +### Environment Variables + +- `ZAI_API_KEY` - used by `zaiText()` +- `ZAI_API_KEY_TEST` - used by the integration tests in this package + +### Base URL Customization + +Default base URL is `https://api.z.ai/api/paas/v4`. You can override it via: + +- `createZAIChat(model, apiKey, { baseURL })` +- `zaiText(model, { baseURL })` + +## Testing + +```bash +pnpm test:lib +``` + +Integration tests require a real Z.AI API key. + +```bash +export ZAI_API_KEY_TEST="your_test_key" +pnpm test:lib +``` + +## Contributing + +We welcome issues and pull requests. + +- GitHub: https://github.com/TanStack/ai +- Discussions: https://github.com/TanStack/ai/discussions +- Contribution guidelines: https://github.com/TanStack/ai/blob/main/CONTRIBUTING.md + +## License + +MIT © TanStack diff --git a/packages/typescript/ai-zai/package.json b/packages/typescript/ai-zai/package.json new file mode 100644 index 000000000..c505166e5 --- /dev/null +++ b/packages/typescript/ai-zai/package.json @@ -0,0 +1,57 @@ +{ + "name": "@tanstack/ai-zai", + "version": "0.1.0", + "description": "Z.AI adapter for TanStack AI", + "author": "", + "license": "MIT", + "repository": { + "type": "git", + "url": "git+https://github.com/TanStack/ai.git", + "directory": "packages/typescript/ai-zai" + }, + "type": "module", + "module": "./dist/esm/index.js", + "types": "./dist/esm/index.d.ts", + "exports": { + ".": { + "types": "./dist/esm/index.d.ts", + "import": "./dist/esm/index.js" + }, + "./tools": { + "types": "./dist/esm/tools/index.d.ts", + "import": "./dist/esm/tools/index.js" + } + }, + "files": [ + "dist", + "src" + ], + "scripts": { + "build": "vite build", + "clean": "premove ./build ./dist", + "lint:fix": "eslint ./src --fix", + "test:build": "publint --strict", + "test:eslint": "eslint ./src", + "test:lib": "vitest run", + "test:lib:dev": "pnpm test:lib --watch", + "test:types": "tsc" + }, + "keywords": [ + "ai", + "zai", + "tanstack", + "adapter" + ], + "dependencies": { + "openai": "^6.9.1" + }, + "peerDependencies": { + "@tanstack/ai": "workspace:^" + }, + "devDependencies": { + "@tanstack/ai": "workspace:*", + "@vitest/coverage-v8": "4.0.14", + "vite": "^7.2.7", + "zod": "^4.2.0" + } +} diff --git a/packages/typescript/ai-zai/src/adapters/index.ts b/packages/typescript/ai-zai/src/adapters/index.ts new file mode 100644 index 000000000..cd933705c --- /dev/null +++ b/packages/typescript/ai-zai/src/adapters/index.ts @@ -0,0 +1,53 @@ +import { getZAIApiKeyFromEnv } from '../utils/client' +import { ZAITextAdapter } from './text' +import type { ZAI_CHAT_MODELS } from '../model-meta' + +export { ZAITextAdapter, type ZAITextAdapterConfig } from './text' +export { + ZAISummarizeAdapter, + createZAISummarize, + zaiSummarize, + type ZAISummarizeConfig, + type ZAISummarizeProviderOptions, +} from './summarize' + +export type ZAIModel = (typeof ZAI_CHAT_MODELS)[number] + +export interface ZAIAdapterConfig { + baseURL?: string + coding?: boolean +} + +export function createZAIChat( + model: ZAIModel, + apiKey: string, + config?: ZAIAdapterConfig, +): ZAITextAdapter { + if (!apiKey) { + throw new Error('apiKey is required') + } + + return new ZAITextAdapter( + { + apiKey, + baseURL: config?.baseURL, + coding: config?.coding, + }, + model, + ) +} + +export function zaiText( + model: ZAIModel, + config?: ZAIAdapterConfig, +): ZAITextAdapter { + const apiKey = getZAIApiKeyFromEnv() + return new ZAITextAdapter( + { + apiKey, + baseURL: config?.baseURL, + coding: config?.coding, + }, + model, + ) +} diff --git a/packages/typescript/ai-zai/src/adapters/summarize.ts b/packages/typescript/ai-zai/src/adapters/summarize.ts new file mode 100644 index 000000000..60baaff1e --- /dev/null +++ b/packages/typescript/ai-zai/src/adapters/summarize.ts @@ -0,0 +1,162 @@ +import { BaseSummarizeAdapter } from '@tanstack/ai/adapters' +import { getZAIApiKeyFromEnv } from '../utils/client' +import { ZAITextAdapter } from './text' +import type { ZAI_CHAT_MODELS } from '../model-meta' +import type { + StreamChunk, + SummarizationOptions, + SummarizationResult, +} from '@tanstack/ai' +import type { ZAITextAdapterConfig } from './text' + +export interface ZAISummarizeConfig extends ZAITextAdapterConfig {} + +export interface ZAISummarizeProviderOptions { + temperature?: number + maxTokens?: number +} + +export type ZAISummarizeModel = (typeof ZAI_CHAT_MODELS)[number] + +/** + * Z.AI Summarize Adapter + * + * Delegates all API calls to the ZAITextAdapter. + */ +export class ZAISummarizeAdapter< + TModel extends ZAISummarizeModel, +> extends BaseSummarizeAdapter { + readonly kind = 'summarize' as const + readonly name = 'zai' as const + + private textAdapter: ZAITextAdapter + + constructor(config: ZAISummarizeConfig, model: TModel) { + super({}, model) + this.textAdapter = new ZAITextAdapter(config, model) + } + + async summarize(options: SummarizationOptions): Promise { + const { logger } = options + const systemPrompt = this.buildSummarizationPrompt(options) + + logger.request(`activity=summarize provider=zai`, { + provider: 'zai', + model: options.model, + }) + + let summary = '' + const id = '' + let model = options.model + let usage = { promptTokens: 0, completionTokens: 0, totalTokens: 0 } + + try { + for await (const chunk of this.textAdapter.chatStream({ + model: options.model, + messages: [{ role: 'user', content: options.text }], + systemPrompts: [systemPrompt], + maxTokens: options.maxLength, + temperature: 0.3, + logger, + })) { + // AG-UI TEXT_MESSAGE_CONTENT event + if (chunk.type === 'TEXT_MESSAGE_CONTENT') { + if (chunk.content) { + summary = chunk.content + } else { + summary += chunk.delta + } + model = chunk.model || model + } + // AG-UI RUN_FINISHED event + if (chunk.type === 'RUN_FINISHED') { + if (chunk.usage) { + usage = chunk.usage + } + } + } + } catch (error) { + logger.errors('zai.summarize fatal', { + error, + source: 'zai.summarize', + }) + throw error + } + + return { id, model, summary, usage } + } + + async *summarizeStream( + options: SummarizationOptions, + ): AsyncIterable { + const { logger } = options + const systemPrompt = this.buildSummarizationPrompt(options) + + logger.request(`activity=summarize provider=zai`, { + provider: 'zai', + model: options.model, + stream: true, + }) + + try { + yield* this.textAdapter.chatStream({ + model: options.model, + messages: [{ role: 'user', content: options.text }], + systemPrompts: [systemPrompt], + maxTokens: options.maxLength, + temperature: 0.3, + logger, + }) + } catch (error) { + logger.errors('zai.summarize fatal', { + error, + source: 'zai.summarize', + }) + throw error + } + } + + private buildSummarizationPrompt(options: SummarizationOptions): string { + let prompt = 'You are a professional summarizer. ' + + switch (options.style) { + case 'bullet-points': + prompt += 'Provide a summary in bullet point format. ' + break + case 'paragraph': + prompt += 'Provide a summary in paragraph format. ' + break + case 'concise': + prompt += 'Provide a very concise summary in 1-2 sentences. ' + break + default: + prompt += 'Provide a clear and concise summary. ' + } + + if (options.focus && options.focus.length > 0) { + prompt += `Focus on the following aspects: ${options.focus.join(', ')}. ` + } + + if (options.maxLength) { + prompt += `Keep the summary under ${options.maxLength} tokens. ` + } + + return prompt + } +} + +export function createZAISummarize( + model: TModel, + apiKey: string, + config?: Omit, +): ZAISummarizeAdapter { + return new ZAISummarizeAdapter({ apiKey, ...config }, model) +} + +export function zaiSummarize( + model: TModel, + config?: Omit, +): ZAISummarizeAdapter { + const apiKey = getZAIApiKeyFromEnv() + return createZAISummarize(model, apiKey, config) +} diff --git a/packages/typescript/ai-zai/src/adapters/text.ts b/packages/typescript/ai-zai/src/adapters/text.ts new file mode 100644 index 000000000..785b4f6cc --- /dev/null +++ b/packages/typescript/ai-zai/src/adapters/text.ts @@ -0,0 +1,561 @@ +import { BaseTextAdapter } from '@tanstack/ai/adapters' +import { toRunErrorPayload } from '@tanstack/ai/adapter-internals' +import { createZAIClient } from '../utils/client' +import { convertToolsToZAIFormat } from '../utils/conversion' +import { ZAI_MODEL_META } from '../model-meta' +import type { + ZAIChatModelProviderOptionsByName, + ZAIModelInputModalitiesByName, + ZAI_CHAT_MODELS, +} from '../model-meta' +import type { + StructuredOutputOptions, + StructuredOutputResult, +} from '@tanstack/ai/adapters' +import type { InternalLogger } from '@tanstack/ai/adapter-internals' +import type { + Modality, + ModelMessage, + StreamChunk, + TextOptions, +} from '@tanstack/ai' +import type { ZAIMessageMetadataByModality } from '../message-types' +import type { ZAITextOptions } from '../text/text-provider-options' +import type OpenAI from 'openai' + +/** Cast an event object to StreamChunk. Adapters construct events with string + * literal types which are structurally compatible with the EventType enum. */ +const asChunk = (chunk: Record) => + chunk as unknown as StreamChunk + +type ResolveProviderOptions = + TModel extends keyof ZAIChatModelProviderOptionsByName + ? ZAIChatModelProviderOptionsByName[TModel] + : ZAITextOptions + +type ResolveInputModalities = + TModel extends keyof ZAIModelInputModalitiesByName + ? ZAIModelInputModalitiesByName[TModel] + : readonly ['text'] + +export interface ZAITextAdapterConfig { + apiKey: string + baseURL?: string + coding?: boolean +} + +type ZAIChatCompletionParams = + OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming + +/** + * Z.AI Text Adapter + * + * Streams text deltas using the AG-UI protocol event format: + * - RUN_STARTED → TEXT_MESSAGE_START → TEXT_MESSAGE_CONTENT → TEXT_MESSAGE_END → RUN_FINISHED + * - Tool calls: TOOL_CALL_START → TOOL_CALL_ARGS → TOOL_CALL_END + * - Errors: RUN_ERROR + */ +export class ZAITextAdapter< + TModel extends (typeof ZAI_CHAT_MODELS)[number], +> extends BaseTextAdapter< + TModel, + ResolveProviderOptions, + ResolveInputModalities extends ReadonlyArray + ? ResolveInputModalities + : readonly ['text'], + ZAIMessageMetadataByModality +> { + readonly kind = 'text' as const + readonly name = 'zai' as const + + private client: OpenAI + + constructor(config: ZAITextAdapterConfig, model: TModel) { + super({}, model) + + this.client = createZAIClient(config.apiKey, { + baseURL: config.baseURL, + coding: config.coding, + }) + } + + async *chatStream( + options: TextOptions>, + ): AsyncIterable { + const requestParams = this.mapTextOptionsToZAI(options) + const { logger } = options + + const timestamp = Date.now() + const runId = options.runId ?? this.generateId() + const threadId = options.threadId ?? this.generateId() + const messageId = this.generateId() + + try { + logger.request( + `activity=chat provider=zai model=${this.model} messages=${options.messages.length} tools=${options.tools?.length ?? 0} stream=true`, + { provider: 'zai', model: this.model }, + ) + + const stream = await this.client.chat.completions.create(requestParams, { + headers: this.getRequestHeaders(options), + signal: this.getAbortSignal(options), + }) + + yield* this.processZAIStreamChunks( + stream, + options, + runId, + threadId, + messageId, + timestamp, + logger, + ) + } catch (error: unknown) { + logger.errors('zai.chatStream fatal', { + error: toRunErrorPayload(error, 'zai.chatStream failed'), + source: 'zai.chatStream', + }) + throw error + } + } + + async structuredOutput( + options: StructuredOutputOptions>, + ): Promise> { + const { chatOptions, outputSchema } = options + const messages = this.convertMessagesToInput( + chatOptions.messages, + chatOptions, + ) + const { logger } = chatOptions + + // Inject the JSON schema into the system prompt so the model knows + // the expected output shape (Z.AI doesn't support json_schema response_format) + const schemaPrompt = `\n\nOUTPUT SCHEMA (respond with valid JSON matching this schema):\n${JSON.stringify(outputSchema, null, 2)}` + const firstMsg = messages[0] + if (firstMsg && firstMsg.role === 'system') { + messages[0] = { + role: 'system', + content: ((firstMsg as { content: string }).content ?? '') + schemaPrompt, + } + } else { + messages.unshift({ role: 'system', content: schemaPrompt }) + } + + try { + logger.request( + `activity=chat-structured provider=zai model=${this.model} messages=${chatOptions.messages.length} stream=false`, + { provider: 'zai', model: this.model }, + ) + + const response = await this.client.chat.completions.create( + { + model: chatOptions.model ?? this.model, + messages, + temperature: chatOptions.temperature ?? 0, + max_tokens: chatOptions.maxTokens, + top_p: chatOptions.topP, + stream: false, + response_format: { type: 'json_object' }, + }, + { + headers: this.getRequestHeaders(chatOptions), + signal: this.getAbortSignal(chatOptions), + }, + ) + + const rawText = response.choices[0]?.message?.content ?? '' + + let parsed: unknown + try { + parsed = JSON.parse(rawText) + } catch { + throw new Error( + `Failed to parse structured output as JSON. Content: ${rawText.slice(0, 200)}${rawText.length > 200 ? '...' : ''}`, + ) + } + + return { data: parsed, rawText } + } catch (error: unknown) { + logger.errors('zai.structuredOutput fatal', { + error: toRunErrorPayload(error, 'zai.structuredOutput failed'), + source: 'zai.structuredOutput', + }) + throw error + } + } + + private mapTextOptionsToZAI( + options: TextOptions>, + ): ZAIChatCompletionParams { + const messages = this.convertMessagesToInput(options.messages, options) + + const rawProviderOptions = (options.modelOptions ?? {}) as any + const { stopSequences, ...providerOptions } = rawProviderOptions + const stop = stopSequences ?? providerOptions.stop + + const request: ZAIChatCompletionParams = { + model: options.model, + messages, + temperature: options.temperature, + max_tokens: options.maxTokens, + top_p: options.topP, + stream: true, + stream_options: { include_usage: true }, + ...providerOptions, + } + + if (options.tools?.length) { + ;(request as any).tools = convertToolsToZAIFormat(options.tools) + } + + if (stop !== undefined) { + ;(request as any).stop = stop + } + + return request + } + + private convertMessagesToInput( + messages: Array, + options: Pick, + ): Array { + const result: Array = [] + + const modelMeta = ZAI_MODEL_META[this.model] + const inputs = modelMeta.supports.input as ReadonlyArray + const capabilities = { + image: inputs.includes('image'), + video: inputs.includes('video'), + } + + if (options.systemPrompts?.length) { + result.push({ + role: 'system', + content: options.systemPrompts.join('\n'), + }) + } + + for (const message of messages) { + if (message.role === 'tool') { + if (!message.toolCallId) { + throw new Error('Tool message missing required toolCallId') + } + result.push({ + role: 'tool', + tool_call_id: message.toolCallId, + content: + typeof message.content === 'string' + ? message.content + : JSON.stringify(message.content), + }) + continue + } + + if (message.role === 'assistant') { + const toolCalls = message.toolCalls?.map( + (tc: NonNullable[number]) => ({ + id: tc.id, + type: 'function' as const, + function: { + name: tc.function.name, + arguments: + typeof tc.function.arguments === 'string' + ? tc.function.arguments + : JSON.stringify(tc.function.arguments), + }, + }), + ) + + result.push({ + role: 'assistant', + content: this.convertContent(message.content, { + image: false, + video: false, + }) as string, + ...(toolCalls && toolCalls.length ? { tool_calls: toolCalls } : {}), + }) + continue + } + + result.push({ + role: 'user', + content: this.convertContent(message.content, capabilities), + }) + } + + return result + } + + private async *processZAIStreamChunks( + stream: AsyncIterable, + options: TextOptions, + runId: string, + threadId: string, + messageId: string, + timestamp: number, + logger: InternalLogger, + ): AsyncIterable { + let accumulatedContent = '' + let responseModel = options.model + + const toolCallMetadata = new Map< + number, + { id: string; name: string; arguments: string } + >() + + let hasEmittedRunStarted = false + let hasEmittedTextMessageStart = false + + try { + for await (const chunk of stream) { + responseModel = chunk.model || responseModel + + logger.provider(`provider=zai type=chunk`, { chunk }) + + // Emit RUN_STARTED on first chunk + if (!hasEmittedRunStarted) { + hasEmittedRunStarted = true + yield asChunk({ + type: 'RUN_STARTED', + runId, + threadId, + model: responseModel, + timestamp, + }) + } + + const chunkAny = chunk as any + const choice = Array.isArray(chunkAny.choices) + ? chunkAny.choices[0] + : undefined + if (!choice) continue + + const delta = choice.delta + const deltaContent = delta.content + const deltaToolCalls = delta.tool_calls + + // Handle text content deltas + if (typeof deltaContent === 'string' && deltaContent.length) { + if (!hasEmittedTextMessageStart) { + hasEmittedTextMessageStart = true + yield asChunk({ + type: 'TEXT_MESSAGE_START', + messageId, + model: responseModel, + timestamp, + role: 'assistant', + }) + } + + accumulatedContent += deltaContent + yield asChunk({ + type: 'TEXT_MESSAGE_CONTENT', + messageId, + model: responseModel, + timestamp, + delta: deltaContent, + content: accumulatedContent, + }) + } + + // Handle tool call deltas + if (deltaToolCalls?.length) { + for (const toolCallDelta of deltaToolCalls) { + const index = toolCallDelta.index + + if (!toolCallMetadata.has(index)) { + const id = toolCallDelta.id || this.generateId() + const name = toolCallDelta.function?.name || '' + + toolCallMetadata.set(index, { + id, + name, + arguments: '', + }) + + // Emit TOOL_CALL_START + yield asChunk({ + type: 'TOOL_CALL_START', + toolCallId: id, + toolCallName: name, + toolName: name, + model: responseModel, + timestamp, + index, + }) + } + + const current = toolCallMetadata.get(index)! + + if (toolCallDelta.id) current.id = toolCallDelta.id + if (toolCallDelta.function?.name) + current.name = toolCallDelta.function.name + if (toolCallDelta.function?.arguments) { + current.arguments += toolCallDelta.function.arguments + + // Emit TOOL_CALL_ARGS with the delta + yield asChunk({ + type: 'TOOL_CALL_ARGS', + toolCallId: current.id, + model: responseModel, + timestamp, + delta: toolCallDelta.function.arguments, + }) + } + } + } + + // Handle finish + if (choice.finish_reason) { + const isToolTurn = + choice.finish_reason === 'tool_calls' || toolCallMetadata.size > 0 + + // Emit TOOL_CALL_END for each completed tool call + if (isToolTurn) { + for (const [, toolCall] of toolCallMetadata) { + let parsedInput: unknown = {} + try { + const parsed = toolCall.arguments + ? JSON.parse(toolCall.arguments) + : {} + parsedInput = parsed && typeof parsed === 'object' ? parsed : {} + } catch { + parsedInput = {} + } + + yield asChunk({ + type: 'TOOL_CALL_END', + toolCallId: toolCall.id, + toolCallName: toolCall.name, + toolName: toolCall.name, + model: responseModel, + timestamp, + input: parsedInput, + }) + } + } + + // Close text message if we had one + if (hasEmittedTextMessageStart) { + yield asChunk({ + type: 'TEXT_MESSAGE_END', + messageId, + model: responseModel, + timestamp, + }) + } + + // Emit RUN_FINISHED + yield asChunk({ + type: 'RUN_FINISHED', + runId, + threadId, + model: responseModel, + timestamp, + usage: chunk.usage + ? { + promptTokens: chunk.usage.prompt_tokens || 0, + completionTokens: chunk.usage.completion_tokens || 0, + totalTokens: chunk.usage.total_tokens || 0, + } + : undefined, + finishReason: isToolTurn ? 'tool_calls' : 'stop', + }) + } + } + } catch (error: unknown) { + const err = error as Error & { code?: string } + logger.errors('zai stream ended with error', { + error, + source: 'zai.processZAIStreamChunks', + }) + yield asChunk({ + type: 'RUN_ERROR', + runId, + threadId, + message: err.message || 'Unknown error occurred', + code: err.code, + model: options.model, + timestamp, + error: { + message: err.message || 'Unknown error occurred', + code: err.code, + }, + }) + } + } + + private convertContent( + content: unknown, + capabilities: { image: boolean; video: boolean }, + ): string | Array { + if (typeof content === 'string') return content + if (!content) return '' + + if (Array.isArray(content)) { + if (!capabilities.image && !capabilities.video) { + return content + .filter((p) => p && typeof p === 'object' && p.type === 'text') + .map((p) => String(p.content ?? '')) + .join('') + } + + const parts: Array = [] + + for (const part of content) { + if (!part || typeof part !== 'object') continue + + if (part.type === 'text') { + parts.push({ type: 'text', text: part.content ?? '' }) + } else if (part.type === 'image' && capabilities.image) { + parts.push({ + type: 'image_url', + image_url: { url: part.source.value }, + }) + } else if (part.type === 'video' && capabilities.video) { + parts.push({ + type: 'video_url', + video_url: { url: part.source.value }, + } as any) + } + } + + if (parts.length === 0) return '' + return parts + } + + return '' + } + + private getRequestHeaders( + options: TextOptions, + ): Record | undefined { + const request = options.request + const userHeaders = + request instanceof Request + ? Object.fromEntries(request.headers.entries()) + : request?.headers + + if (!userHeaders) return undefined + + if (Array.isArray(userHeaders)) { + return Object.fromEntries(userHeaders) + } + + if (userHeaders instanceof Headers) { + return Object.fromEntries(userHeaders.entries()) + } + + return userHeaders + } + + private getAbortSignal(options: TextOptions): AbortSignal | undefined { + if (options.abortController?.signal) return options.abortController.signal + + const request = options.request + if (request && request instanceof Request) return request.signal + + return request?.signal ?? undefined + } +} diff --git a/packages/typescript/ai-zai/src/index.ts b/packages/typescript/ai-zai/src/index.ts new file mode 100644 index 000000000..2544b1844 --- /dev/null +++ b/packages/typescript/ai-zai/src/index.ts @@ -0,0 +1,36 @@ +// Text (Chat) adapter +export { + ZAITextAdapter, + createZAIChat, + zaiText, + type ZAITextAdapterConfig, +} from './adapters/index' + +// Summarize adapter +export { + ZAISummarizeAdapter, + createZAISummarize, + zaiSummarize, + type ZAISummarizeConfig, + type ZAISummarizeProviderOptions, +} from './adapters/index' + +// Config types +export type { ZAIAdapterConfig, ZAIModel } from './adapters/index' + +// Endpoint constants +export { ZAI_GENERAL_BASE_URL, ZAI_CODING_BASE_URL } from './utils/client' + +// Model metadata types +export type { + ZAIChatModel, + ZAIChatModelProviderOptionsByName, + ZAIChatModelToolCapabilitiesByName, + ZAIModelInputModalitiesByName, +} from './model-meta' + +// Message metadata types +export type { ZAIMessageMetadataByModality } from './message-types' + +// Tools +export * from './tools/index' diff --git a/packages/typescript/ai-zai/src/message-types.ts b/packages/typescript/ai-zai/src/message-types.ts new file mode 100644 index 000000000..b845e056a --- /dev/null +++ b/packages/typescript/ai-zai/src/message-types.ts @@ -0,0 +1,64 @@ +/** + * Z.AI-specific metadata types for multimodal content parts. + * These types extend the base ContentPart metadata with Z.AI-specific options. + * Since Z.AI is OpenAI-compatible, most types are similar to OpenAI. + */ + +/** + * Metadata for Z.AI image content parts. + * Controls how the model processes and analyzes images. + */ +export interface ZAIImageMetadata { + /** + * Controls how the model processes the image. + * - 'auto': Let the model decide based on image size and content + * - 'low': Use low resolution processing (faster, cheaper, less detail) + * - 'high': Use high resolution processing (slower, more expensive, more detail) + * + * @default 'auto' + */ + detail?: 'auto' | 'low' | 'high' +} + +/** + * Metadata for Z.AI audio content parts. + * Specifies the audio format for proper processing. + */ +export interface ZAIAudioMetadata { + /** + * The format of the audio. + * Supported formats: mp3, wav, flac, etc. + * @default 'mp3' + */ + format?: 'mp3' | 'wav' | 'flac' | 'ogg' | 'webm' | 'aac' +} + +/** + * Metadata for Z.AI video content parts. + * Note: Video support in Z.AI may vary; check current API capabilities. + */ +export interface ZAIVideoMetadata {} + +/** + * Metadata for Z.AI document content parts. + * Note: Direct document support may vary; PDFs often need to be converted to images. + */ +export interface ZAIDocumentMetadata {} + +/** + * Metadata for Z.AI text content parts. + * Currently no specific metadata options for text in Z.AI. + */ +export interface ZAITextMetadata {} + +/** + * Map of modality types to their Z.AI-specific metadata types. + * Used for type inference when constructing multimodal messages. + */ +export interface ZAIMessageMetadataByModality { + text: ZAITextMetadata + image: ZAIImageMetadata + audio: ZAIAudioMetadata + video: ZAIVideoMetadata + document: ZAIDocumentMetadata +} diff --git a/packages/typescript/ai-zai/src/model-meta.ts b/packages/typescript/ai-zai/src/model-meta.ts new file mode 100644 index 000000000..d3515fc6b --- /dev/null +++ b/packages/typescript/ai-zai/src/model-meta.ts @@ -0,0 +1,433 @@ +import type { + ZAIBaseOptions, + ZAIMetadataOptions, + ZAIReasoningOptions, + ZAIStreamingOptions, + ZAIStructuredOutputOptions, + ZAIToolsOptions, +} from './text/text-provider-options' + +interface ModelMeta { + name: string + supports: { + input: Array<'text' | 'image' | 'audio' | 'video'> + output: Array<'text' | 'image' | 'audio' | 'video'> + endpoints: Array< + | 'chat' + | 'chat-completions' + | 'assistants' + | 'speech_generation' + | 'image-generation' + | 'fine-tuning' + | 'batch' + | 'image-edit' + | 'moderation' + | 'translation' + | 'realtime' + | 'audio' + | 'video' + | 'transcription' + > + features: Array< + | 'streaming' + | 'function_calling' + | 'structured_outputs' + | 'predicted_outcomes' + | 'distillation' + | 'fine_tuning' + > + tools?: Array< + | 'web_search' + | 'file_search' + | 'image_generation' + | 'code_interpreter' + | 'mcp' + | 'computer_use' + > + } + context_window?: number + max_output_tokens?: number + knowledge_cutoff?: string + pricing: { + input: { + normal: number + cached?: number + } + output: { + normal: number + } + } + providerOptions?: TProviderOptions +} + +// ============================================================================ +// GLM-5 Series +// ============================================================================ + +/** + * GLM-5.1: Long-horizon task flagship model + * Released April 2026 + * Designed for long-running autonomous tasks (up to 8 hours), + * with enhanced coding, reasoning, and agentic capabilities. + * Pricing: $1.4/M input, $0.26/M cached, $4.4/M output + */ +const GLM_5_1 = { + name: 'glm-5.1', + context_window: 200_000, + max_output_tokens: 128_000, + knowledge_cutoff: '2026-03-01', + supports: { + input: ['text'], + output: ['text'], + endpoints: ['chat', 'chat-completions'], + features: ['streaming', 'function_calling', 'structured_outputs'], + tools: ['web_search', 'code_interpreter', 'mcp'], + }, + pricing: { + input: { + normal: 1.4, + cached: 0.26, + }, + output: { + normal: 4.4, + }, + }, +} as const satisfies ModelMeta< + ZAIBaseOptions & + ZAIReasoningOptions & + ZAIStructuredOutputOptions & + ZAIToolsOptions & + ZAIStreamingOptions & + ZAIMetadataOptions +> + +/** + * GLM-5-Turbo: Fast high-performance text model + * Released April 2026 + * Optimized for speed with strong coding and reasoning capabilities. + * Pricing: $1.2/M input, $0.24/M cached, $4.0/M output + */ +const GLM_5_TURBO = { + name: 'glm-5-turbo', + context_window: 200_000, + max_output_tokens: 128_000, + knowledge_cutoff: '2026-03-01', + supports: { + input: ['text'], + output: ['text'], + endpoints: ['chat', 'chat-completions'], + features: ['streaming', 'function_calling', 'structured_outputs'], + tools: ['web_search', 'code_interpreter', 'mcp'], + }, + pricing: { + input: { + normal: 1.2, + cached: 0.24, + }, + output: { + normal: 4.0, + }, + }, +} as const satisfies ModelMeta< + ZAIBaseOptions & + ZAIReasoningOptions & + ZAIStructuredOutputOptions & + ZAIToolsOptions & + ZAIStreamingOptions & + ZAIMetadataOptions +> + +/** + * GLM-5: Base fifth-generation text model + * Released April 2026 + * Strong general-purpose capabilities with tool use support. + * Pricing: $1.0/M input, $0.2/M cached, $3.2/M output + */ +const GLM_5 = { + name: 'glm-5', + context_window: 200_000, + max_output_tokens: 128_000, + knowledge_cutoff: '2026-03-01', + supports: { + input: ['text'], + output: ['text'], + endpoints: ['chat', 'chat-completions'], + features: ['streaming', 'function_calling', 'structured_outputs'], + tools: ['web_search', 'code_interpreter', 'mcp'], + }, + pricing: { + input: { + normal: 1.0, + cached: 0.2, + }, + output: { + normal: 3.2, + }, + }, +} as const satisfies ModelMeta< + ZAIBaseOptions & + ZAIReasoningOptions & + ZAIStructuredOutputOptions & + ZAIToolsOptions & + ZAIStreamingOptions & + ZAIMetadataOptions +> + +/** + * GLM-5V-Turbo: Multimodal Agent foundation model + * Released April 2026 + * First multimodal agent model from Zhipu, supports image, video, file, and text input. + * Optimized for visual programming and complex agent workflows. + * Pricing: $1.2/M input, $0.24/M cached, $4.0/M output + */ +const GLM_5V_TURBO = { + name: 'glm-5v-turbo', + context_window: 200_000, + max_output_tokens: 128_000, + knowledge_cutoff: '2026-03-01', + supports: { + input: ['text', 'image', 'video'], + output: ['text'], + endpoints: ['chat', 'chat-completions'], + features: ['streaming', 'function_calling', 'structured_outputs'], + tools: ['web_search', 'image_generation', 'code_interpreter', 'mcp'], + }, + pricing: { + input: { + normal: 1.2, + cached: 0.24, + }, + output: { + normal: 4.0, + }, + }, +} as const satisfies ModelMeta< + ZAIBaseOptions & + ZAIReasoningOptions & + ZAIStructuredOutputOptions & + ZAIToolsOptions & + ZAIStreamingOptions & + ZAIMetadataOptions +> + +// ============================================================================ +// GLM-4 Series +// ============================================================================ + +/** + * GLM-4.7: Previous flagship model + * Released December 2025 + * Features enhanced coding, reasoning, and agentic capabilities + * Pricing: $0.6/M input, $0.11/M cached, $2.2/M output + */ +const GLM_4_7 = { + name: 'glm-4.7', + context_window: 200_000, + max_output_tokens: 128_000, + knowledge_cutoff: '2025-12-01', + supports: { + input: ['text'], + output: ['text'], + endpoints: ['chat', 'chat-completions'], + features: ['streaming', 'function_calling', 'structured_outputs'], + tools: ['web_search', 'code_interpreter', 'mcp'], + }, + pricing: { + input: { + normal: 0.6, + cached: 0.11, + }, + output: { + normal: 2.2, + }, + }, +} as const satisfies ModelMeta< + ZAIBaseOptions & + ZAIReasoningOptions & + ZAIStructuredOutputOptions & + ZAIToolsOptions & + ZAIStreamingOptions & + ZAIMetadataOptions +> + +/** + * GLM-4.6V: Multimodal vision model + * Released December 2024 + * Supports text, image, and video inputs + */ +const GLM_4_6V = { + name: 'glm-4.6v', + context_window: 128_000, + max_output_tokens: 128_000, + knowledge_cutoff: '2024-12-01', + supports: { + input: ['text', 'image', 'video'], + output: ['text'], + endpoints: ['chat', 'chat-completions'], + features: ['streaming', 'function_calling', 'structured_outputs'], + tools: ['web_search', 'image_generation', 'code_interpreter', 'mcp'], + }, + pricing: { + input: { + normal: 0.14, + }, + output: { + normal: 0.42, + }, + }, +} as const satisfies ModelMeta< + ZAIBaseOptions & + ZAIReasoningOptions & + ZAIStructuredOutputOptions & + ZAIToolsOptions & + ZAIStreamingOptions & + ZAIMetadataOptions +> + +/** + * GLM-4.6: Previous flagship model + * Released September 2024 + * Enhanced coding and reasoning capabilities + * Pricing: $0.6/M input, $0.11/M cached, $2.2/M output + */ +const GLM_4_6 = { + name: 'glm-4.6', + context_window: 128_000, + max_output_tokens: 128_000, + knowledge_cutoff: '2024-09-01', + supports: { + input: ['text'], + output: ['text'], + endpoints: ['chat', 'chat-completions'], + features: ['streaming', 'function_calling', 'structured_outputs'], + tools: ['web_search', 'code_interpreter'], + }, + pricing: { + input: { + normal: 0.6, + cached: 0.11, + }, + output: { + normal: 2.2, + }, + }, +} as const satisfies ModelMeta< + ZAIBaseOptions & + ZAIReasoningOptions & + ZAIStructuredOutputOptions & + ZAIToolsOptions & + ZAIStreamingOptions & + ZAIMetadataOptions +> + +// ============================================================================ +// Exports +// ============================================================================ + +export const ZAI_CHAT_MODELS = [ + // GLM-5 series + GLM_5_1.name, + GLM_5_TURBO.name, + GLM_5.name, + GLM_5V_TURBO.name, + // GLM-4 series + GLM_4_7.name, + GLM_4_6V.name, + GLM_4_6.name, +] as const + +export type ZAIChatModel = (typeof ZAI_CHAT_MODELS)[number] + +/** + * Type-only map from chat model name to its provider options type. + * Used by the core AI types (via the adapter) to narrow + * `providerOptions` based on the selected model. + * + * Manually defined to ensure accurate type narrowing per model. + */ +export type ZAIChatModelProviderOptionsByName = { + // GLM-5 series + [GLM_5_1.name]: ZAIBaseOptions & + ZAIReasoningOptions & + ZAIStructuredOutputOptions & + ZAIToolsOptions & + ZAIStreamingOptions & + ZAIMetadataOptions + [GLM_5_TURBO.name]: ZAIBaseOptions & + ZAIReasoningOptions & + ZAIStructuredOutputOptions & + ZAIToolsOptions & + ZAIStreamingOptions & + ZAIMetadataOptions + [GLM_5.name]: ZAIBaseOptions & + ZAIReasoningOptions & + ZAIStructuredOutputOptions & + ZAIToolsOptions & + ZAIStreamingOptions & + ZAIMetadataOptions + [GLM_5V_TURBO.name]: ZAIBaseOptions & + ZAIReasoningOptions & + ZAIStructuredOutputOptions & + ZAIToolsOptions & + ZAIStreamingOptions & + ZAIMetadataOptions + // GLM-4 series + [GLM_4_7.name]: ZAIBaseOptions & + ZAIReasoningOptions & + ZAIStructuredOutputOptions & + ZAIToolsOptions & + ZAIStreamingOptions & + ZAIMetadataOptions + [GLM_4_6V.name]: ZAIBaseOptions & + ZAIReasoningOptions & + ZAIStructuredOutputOptions & + ZAIToolsOptions & + ZAIStreamingOptions & + ZAIMetadataOptions + [GLM_4_6.name]: ZAIBaseOptions & + ZAIReasoningOptions & + ZAIStructuredOutputOptions & + ZAIToolsOptions & + ZAIStreamingOptions & + ZAIMetadataOptions +} + +/** + * Type-only map from chat model name to its supported provider tools. + * Keyed on each model's `.name` field. Value is the `typeof supports.tools` + * tuple from each model constant. + */ +export type ZAIChatModelToolCapabilitiesByName = { + [GLM_5_1.name]: typeof GLM_5_1.supports.tools + [GLM_5_TURBO.name]: typeof GLM_5_TURBO.supports.tools + [GLM_5.name]: typeof GLM_5.supports.tools + [GLM_5V_TURBO.name]: typeof GLM_5V_TURBO.supports.tools + [GLM_4_7.name]: typeof GLM_4_7.supports.tools + [GLM_4_6V.name]: typeof GLM_4_6V.supports.tools + [GLM_4_6.name]: typeof GLM_4_6.supports.tools +} + +/** + * Type-only map from chat model name to its supported input modalities. + * Based on the 'supports.input' arrays defined for each model. + * Used by the core AI types to constrain ContentPart types based on the selected model. + */ +export type ZAIModelInputModalitiesByName = { + [GLM_5_1.name]: typeof GLM_5_1.supports.input + [GLM_5_TURBO.name]: typeof GLM_5_TURBO.supports.input + [GLM_5.name]: typeof GLM_5.supports.input + [GLM_5V_TURBO.name]: typeof GLM_5V_TURBO.supports.input + [GLM_4_7.name]: typeof GLM_4_7.supports.input + [GLM_4_6V.name]: typeof GLM_4_6V.supports.input + [GLM_4_6.name]: typeof GLM_4_6.supports.input +} + +export const ZAI_MODEL_META = { + [GLM_5_1.name]: GLM_5_1, + [GLM_5_TURBO.name]: GLM_5_TURBO, + [GLM_5.name]: GLM_5, + [GLM_5V_TURBO.name]: GLM_5V_TURBO, + [GLM_4_7.name]: GLM_4_7, + [GLM_4_6V.name]: GLM_4_6V, + [GLM_4_6.name]: GLM_4_6, +} as const diff --git a/packages/typescript/ai-zai/src/text/text-provider-options.ts b/packages/typescript/ai-zai/src/text/text-provider-options.ts new file mode 100644 index 000000000..eb77a898a --- /dev/null +++ b/packages/typescript/ai-zai/src/text/text-provider-options.ts @@ -0,0 +1,208 @@ +import type OpenAI from 'openai' + +// Core, always-available options for Z.AI API +export interface ZAIBaseOptions { + /** + * Whether to run the model response in the background. + * @default false + */ + background?: boolean + + /** + * The conversation that this response belongs to. + */ + conversation?: string | { id: string } + + /** + * Specify additional output data to include in the model response. + */ + include?: Array + + /** + * The unique ID of the previous response to the model. Use this to create multi-turn conversations. + */ + previous_response_id?: string + + /** + * Reference to a prompt template and its variables. + */ + prompt?: { + id: string + version?: string + variables?: Record + } + + /** + * Used by Z.AI to cache responses for similar requests to optimize cache hit rates. + */ + prompt_cache_key?: string + + /** + * The retention policy for the prompt cache. + */ + prompt_cache_retention?: 'in-memory' | '24h' + + /** + * A stable identifier used to help detect users of your application. + */ + safety_identifier?: string + + /** + * Specifies the processing type used for serving the request. + * @default 'auto' + */ + service_tier?: 'auto' | 'default' | 'flex' | 'priority' + + /** + * Whether to store the generated model response for later retrieval via API. + * @default true + */ + store?: boolean + + /** + * Constrains the verbosity of the model's response. + */ + verbosity?: 'low' | 'medium' | 'high' + + /** + * An integer between 0 and 20 specifying the number of most likely tokens to return. + */ + top_logprobs?: number + + /** + * The truncation strategy to use for the model response. + */ + truncation?: 'auto' | 'disabled' +} + +// Feature fragments that can be stitched per-model + +/** + * Level of effort to expend on reasoning. + */ +type ReasoningEffort = 'none' | 'minimal' | 'low' | 'medium' | 'high' + +/** + * Detail level for the reasoning summary. + */ +type ReasoningSummary = 'auto' | 'detailed' + +/** + * Reasoning options for Z.AI models. + */ +export interface ZAIReasoningOptions { + /** + * Reasoning controls for models that support it. + * Lets you guide how much chain-of-thought computation to spend. + */ + reasoning?: { + /** + * Controls the amount of reasoning effort. + * Supported values: none, minimal, low, medium, high + */ + effort?: ReasoningEffort + /** + * A summary of the reasoning performed by the model. + */ + summary?: ReasoningSummary + } + + /** + * Zhipu AI Thinking Mode (GLM-4.7/4.6/4.5) + */ + thinking?: { + type: 'enabled' | 'disabled' + /** + * For GLM-4.7 preserved thinking. Set to false to retain reasoning context. + * @default true + */ + clear_thinking?: boolean + } +} + +export interface ZAIStructuredOutputOptions { + /** + * Configuration options for a text response from the model. + * Can be plain text or structured JSON data. + */ + text?: OpenAI.Responses.ResponseTextConfig +} + +export interface ZAIToolsOptions { + /** + * The maximum number of total calls to built-in tools that can be processed in a response. + */ + max_tool_calls?: number + + /** + * Whether to allow the model to run tool calls in parallel. + * @default true + */ + parallel_tool_calls?: boolean + + /** + * Configuration for tool choices. + */ + tool_choice?: + | 'auto' + | 'none' + | 'required' + | OpenAI.Chat.ChatCompletionToolChoiceOption + + /** + * A list of tools the model may call. + */ + tools?: Array + + /** + * Whether to stream tool calls. + * Supported by GLM-4.7 + */ + tool_stream?: boolean +} + +export interface ZAIStreamingOptions { + /** + * Whether to stream back partial progress. + * @default false + */ + stream?: boolean + + /** + * Options for streaming including usage stats. + */ + stream_options?: { + include_usage?: boolean + } +} + +export interface ZAIMetadataOptions { + /** + * A unique identifier representing your end-user. + */ + user?: string + + /** + * Developer-defined tags and values for tracking and debugging. + */ + metadata?: Record + + /** + * Accept-Language header for Z.AI API. + * @default 'en-US,en' + */ + acceptLanguage?: string +} + +/** + * Complete text provider options for Z.AI. + * Combines all available options for maximum flexibility. + */ +export interface ZAITextOptions + extends + ZAIBaseOptions, + ZAIReasoningOptions, + ZAIStructuredOutputOptions, + ZAIToolsOptions, + ZAIStreamingOptions, + ZAIMetadataOptions {} diff --git a/packages/typescript/ai-zai/src/tools/function-tool.ts b/packages/typescript/ai-zai/src/tools/function-tool.ts new file mode 100644 index 000000000..074d39e74 --- /dev/null +++ b/packages/typescript/ai-zai/src/tools/function-tool.ts @@ -0,0 +1,35 @@ +import type { JSONSchema, Tool } from '@tanstack/ai' +import type OpenAI from 'openai' + +/** + * Type alias for OpenAI Chat Completion Tool. + */ +export type FunctionTool = OpenAI.Chat.Completions.ChatCompletionTool + +/** + * Converts a standard Tool to Zhipu AI FunctionTool format. + */ +export function convertFunctionToolToAdapterFormat(tool: Tool): FunctionTool { + const inputSchema = (tool.inputSchema ?? { + type: 'object', + properties: {}, + required: [], + }) as JSONSchema + + // Ensure basic JSON Schema structure + const parameters: JSONSchema = { ...inputSchema } + if (parameters.type === 'object') { + parameters.additionalProperties ??= false + parameters.required ??= [] + parameters.properties ??= {} + } + + return { + type: 'function', + function: { + name: tool.name, + description: tool.description, + parameters: parameters as any, + }, + } +} diff --git a/packages/typescript/ai-zai/src/tools/index.ts b/packages/typescript/ai-zai/src/tools/index.ts new file mode 100644 index 000000000..3ead2d607 --- /dev/null +++ b/packages/typescript/ai-zai/src/tools/index.ts @@ -0,0 +1,4 @@ +export * from './function-tool' +export * from './tool-choice' +export * from './tool-converter' +export * from './web-search-tool' diff --git a/packages/typescript/ai-zai/src/tools/tool-choice.ts b/packages/typescript/ai-zai/src/tools/tool-choice.ts new file mode 100644 index 000000000..df559caa2 --- /dev/null +++ b/packages/typescript/ai-zai/src/tools/tool-choice.ts @@ -0,0 +1,26 @@ +/** + * Configuration for forcing a specific function tool. + */ +export interface FunctionToolChoice { + type: 'function' + function: { + name: string + } +} + +/** + * Configuration for forcing the web search tool. + */ +export interface WebSearchToolChoice { + type: 'web_search' +} + +/** + * Union of possible tool choice configurations. + * Can be 'auto', 'none', or a specific tool. + */ +export type ToolChoice = + | 'auto' + | 'none' + | FunctionToolChoice + | WebSearchToolChoice diff --git a/packages/typescript/ai-zai/src/tools/tool-converter.ts b/packages/typescript/ai-zai/src/tools/tool-converter.ts new file mode 100644 index 000000000..9e407c0d6 --- /dev/null +++ b/packages/typescript/ai-zai/src/tools/tool-converter.ts @@ -0,0 +1,30 @@ +import { convertFunctionToolToAdapterFormat } from './function-tool' +import { convertWebSearchToolToAdapterFormat } from './web-search-tool' +import type { Tool } from '@tanstack/ai' +import type OpenAI from 'openai' +import type { ZaiWebSearchTool } from './web-search-tool' + +/** + * Union type representing any valid Z.AI tool. + * Can be a standard function tool or a web search tool. + */ +export type ZaiTool = + | OpenAI.Chat.Completions.ChatCompletionTool + | ZaiWebSearchTool + +/** + * Converts an array of standard Tools to Zhipu AI specific format + */ +export function convertToolsToProviderFormat( + tools: Array, +): Array { + return tools.map((tool) => { + // Handle special tool names + if (tool.name === 'web_search') { + return convertWebSearchToolToAdapterFormat(tool) + } + + // Default to function tool + return convertFunctionToolToAdapterFormat(tool) + }) +} diff --git a/packages/typescript/ai-zai/src/tools/web-search-tool.ts b/packages/typescript/ai-zai/src/tools/web-search-tool.ts new file mode 100644 index 000000000..29af650eb --- /dev/null +++ b/packages/typescript/ai-zai/src/tools/web-search-tool.ts @@ -0,0 +1,42 @@ +import type { Tool } from '@tanstack/ai' + +/** + * Definition of the Z.AI Web Search tool structure. + */ +export interface ZaiWebSearchTool { + type: 'web_search' + web_search?: { + enable?: boolean + search_query?: string + search_result?: boolean + } +} + +/** + * Alias for the Z.AI Web Search tool. + */ +export type WebSearchTool = ZaiWebSearchTool + +/** + * Converts a standard Tool to Zhipu AI WebSearchTool format + */ +export function convertWebSearchToolToAdapterFormat( + tool: Tool, +): ZaiWebSearchTool { + const metadata = tool.metadata as ZaiWebSearchTool['web_search'] + return { + type: 'web_search', + web_search: metadata, + } +} + +/** + * Creates a standard Tool from WebSearchTool parameters + */ +export function webSearchTool(config?: ZaiWebSearchTool['web_search']): Tool { + return { + name: 'web_search', + description: 'Search the web', + metadata: config || { enable: true }, + } +} diff --git a/packages/typescript/ai-zai/src/utils/client.ts b/packages/typescript/ai-zai/src/utils/client.ts new file mode 100644 index 000000000..da39c4d1c --- /dev/null +++ b/packages/typescript/ai-zai/src/utils/client.ts @@ -0,0 +1,78 @@ +import OpenAI from 'openai' + +export const ZAI_GENERAL_BASE_URL = 'https://api.z.ai/api/paas/v4' +export const ZAI_CODING_BASE_URL = 'https://api.z.ai/api/coding/paas/v4' + +export interface ClientConfig { + baseURL?: string + coding?: boolean +} + +export function getZAIHeaders(): Record { + return { + 'Accept-Language': 'en-US,en', + } +} + +export function getZAIApiKeyFromEnv(): string { + const env = + typeof globalThis !== 'undefined' && (globalThis as any).window?.env + ? (globalThis as any).window.env + : typeof process !== 'undefined' + ? process.env + : undefined + + const key = env?.ZAI_API_KEY + + if (!key) { + throw new Error( + 'ZAI_API_KEY is required. Please set it in your environment variables or use the factory function with an explicit API key.', + ) + } + + return key +} + +/** + * Validates the Z.AI API key format. + * Checks for empty strings, whitespace, and invalid prefixes. + * + * @param apiKey - The API key to validate + * @returns The validated and trimmed API key + * @throws Error if the key is invalid + */ +export function validateZAIApiKey(apiKey?: string): string { + if (!apiKey || typeof apiKey !== 'string') { + throw new Error('Z.AI API key is required') + } + + const trimmed = apiKey.trim() + + if (!trimmed) { + throw new Error('Z.AI API key is required') + } + + if (/^bearer\s+/i.test(trimmed)) { + throw new Error( + 'Z.AI API key must be the raw token (do not include the "Bearer " prefix)', + ) + } + + if (/\s/.test(trimmed)) { + throw new Error('Z.AI API key must not contain whitespace') + } + + return trimmed +} + +export function createZAIClient(apiKey: string, config?: ClientConfig): OpenAI { + const validatedKey = validateZAIApiKey(apiKey) + + return new OpenAI({ + apiKey: validatedKey, + baseURL: + config?.baseURL ?? + (config?.coding ? ZAI_CODING_BASE_URL : ZAI_GENERAL_BASE_URL), + defaultHeaders: getZAIHeaders(), + }) +} diff --git a/packages/typescript/ai-zai/src/utils/conversion.ts b/packages/typescript/ai-zai/src/utils/conversion.ts new file mode 100644 index 000000000..54f0c0fb0 --- /dev/null +++ b/packages/typescript/ai-zai/src/utils/conversion.ts @@ -0,0 +1,66 @@ +import { convertToolsToProviderFormat } from '../tools/tool-converter' +import type OpenAI from 'openai' +import type { StreamChunk, Tool } from '@tanstack/ai' + +/** Cast an event object to StreamChunk. Adapters construct events with string + * literal types which are structurally compatible with the EventType enum. */ +const asChunk = (chunk: Record) => + chunk as unknown as StreamChunk + +/** + * Converts TanStack Tools to Z.AI compatible OpenAI format. + * Handles both function tools and web search tools. + */ +export function convertToolsToZAIFormat( + tools: Array, +): Array { + return convertToolsToProviderFormat( + tools, + ) as unknown as Array +} + +export function mapZAIErrorToStreamChunk( + error: any, + runId: string, + threadId: string, + model: string, +): StreamChunk { + const timestamp = Date.now() + + let message = 'Unknown error occurred' + let code: string | undefined + + if (error && typeof error === 'object') { + const maybeMessage = + error.error?.message ?? error.message ?? error.toString?.() + + if (typeof maybeMessage === 'string' && maybeMessage.trim()) { + message = maybeMessage + } + + const maybeCode = + error.code ?? error.error?.code ?? error.type ?? error.error?.type + + if (typeof maybeCode === 'string' && maybeCode.trim()) { + code = maybeCode + } else if (typeof error.status === 'number') { + code = String(error.status) + } + } else if (typeof error === 'string' && error.trim()) { + message = error + } + + return asChunk({ + type: 'RUN_ERROR', + runId, + threadId, + message, + code, + model, + timestamp, + error: { + message, + code, + }, + }) +} diff --git a/packages/typescript/ai-zai/tests/coding-endpoint.test.ts b/packages/typescript/ai-zai/tests/coding-endpoint.test.ts new file mode 100644 index 000000000..a5db8eebf --- /dev/null +++ b/packages/typescript/ai-zai/tests/coding-endpoint.test.ts @@ -0,0 +1,183 @@ +import { describe, expect, it } from 'vitest' +import type { StreamChunk } from '@tanstack/ai' +import { createZAIChat } from '../src/adapters' +import { ZAI_GENERAL_BASE_URL, ZAI_CODING_BASE_URL } from '../src/utils/client' + +const apiKey = process.env.ZAI_API_KEY_TEST +const describeIfKey = apiKey ? describe : describe.skip + +function createNoopLogger() { + return { + request: () => {}, + provider: () => {}, + output: () => {}, + middleware: () => {}, + tools: () => {}, + agentLoop: () => {}, + config: () => {}, + errors: () => {}, + } +} + +async function collectStream( + stream: AsyncIterable, +): Promise> { + const chunks: Array = [] + for await (const chunk of stream) { + chunks.push(chunk) + if (chunk.type === 'RUN_FINISHED' || chunk.type === 'RUN_ERROR') break + } + return chunks +} + +function fullTextFromChunks(chunks: Array): string { + const contentChunks = chunks.filter( + (c): c is Extract => + c.type === 'TEXT_MESSAGE_CONTENT', + ) + const last = contentChunks.at(-1) + return (last as any)?.content ?? '' +} + +function lastChunk(chunks: Array): StreamChunk | undefined { + return chunks.at(-1) +} + +describeIfKey('Z.AI Coding Endpoint', () => { + const timeout = 60_000 + + it( + `coding: true connects to ${ZAI_CODING_BASE_URL} and streams text`, + async () => { + const adapter = createZAIChat('glm-4.7', apiKey!, { coding: true }) + + const chunks = await collectStream( + adapter.chatStream({ + model: 'glm-4.7', + messages: [ + { + role: 'user', + content: 'What is 2+2? Reply with just the number.', + }, + ], + temperature: 0, + // Coding plan models use reasoning tokens internally — need enough headroom + maxTokens: 256, + logger: createNoopLogger() as any, + }), + ) + + const types = chunks.map((c) => c.type) + expect(types).toContain('RUN_STARTED') + expect(types).toContain('RUN_FINISHED') + expect(types).not.toContain('RUN_ERROR') + + // With enough tokens, the model produces visible text content + expect(types).toContain('TEXT_MESSAGE_START') + expect( + types.filter((t) => t === 'TEXT_MESSAGE_CONTENT').length, + ).toBeGreaterThan(0) + expect(types).toContain('TEXT_MESSAGE_END') + + const full = fullTextFromChunks(chunks) + expect(typeof full).toBe('string') + expect(full.length).toBeGreaterThan(0) + }, + timeout, + ) + + it( + 'explicit baseURL resolves to coding endpoint', + async () => { + const adapter = createZAIChat('glm-4.7', apiKey!, { + baseURL: ZAI_CODING_BASE_URL, + }) + + const chunks = await collectStream( + adapter.chatStream({ + model: 'glm-4.7', + messages: [ + { + role: 'user', + content: 'What is 3+3? Reply with just the number.', + }, + ], + temperature: 0, + maxTokens: 256, + logger: createNoopLogger() as any, + }), + ) + + expect(lastChunk(chunks)?.type).toBe('RUN_FINISHED') + expect(chunks.some((c) => c.type === 'RUN_ERROR')).toBe(false) + const full = fullTextFromChunks(chunks) + expect(typeof full).toBe('string') + expect(full.length).toBeGreaterThan(0) + }, + timeout, + ) + + it( + 'general endpoint rejects coding-only API key with 429', + async () => { + const adapter = createZAIChat('glm-4.7', apiKey!) + + await expect( + collectStream( + adapter.chatStream({ + model: 'glm-4.7', + messages: [{ role: 'user', content: 'Hi' }], + maxTokens: 16, + logger: createNoopLogger() as any, + }), + ), + ).rejects.toThrow(/429|Insufficient balance/i) + }, + timeout, + ) + + it( + 'tool calling works on coding endpoint', + async () => { + const adapter = createZAIChat('glm-4.7', apiKey!, { coding: true }) + + const chunks = await collectStream( + adapter.chatStream({ + model: 'glm-4.7', + systemPrompts: [ + 'You must call the provided tool. Do not answer with normal text.', + ], + messages: [ + { + role: 'user', + content: 'Call echo with {"text":"hello"} and nothing else.', + }, + ], + tools: [ + { + name: 'echo', + description: 'Echo back the provided text', + inputSchema: { + type: 'object', + properties: { text: { type: 'string' } }, + required: ['text'], + }, + }, + ], + temperature: 0, + maxTokens: 256, + logger: createNoopLogger() as any, + }), + ) + + const types = chunks.map((c) => c.type) + expect(types).toContain('TOOL_CALL_START') + expect(types).toContain('TOOL_CALL_END') + expect(lastChunk(chunks)?.type).toBe('RUN_FINISHED') + + const done = chunks.find((c) => c.type === 'RUN_FINISHED') as any + expect(done.finishReason).toBe('tool_calls') + }, + timeout, + ) +}) diff --git a/packages/typescript/ai-zai/tests/model-meta.test.ts b/packages/typescript/ai-zai/tests/model-meta.test.ts new file mode 100644 index 000000000..069e80dae --- /dev/null +++ b/packages/typescript/ai-zai/tests/model-meta.test.ts @@ -0,0 +1,27 @@ +import { describe, expect, it } from 'vitest' +import { + ZAI_CHAT_MODELS, + ZAI_MODEL_META, + type ZAIChatModelProviderOptionsByName, +} from '../src/model-meta' + +describe('ZAI model meta', () => { + it('ZAI_CHAT_MODELS matches ZAI_MODEL_META keys', () => { + const keys = Object.keys(ZAI_MODEL_META).sort() + const models = [...ZAI_CHAT_MODELS].sort() + expect(models).toEqual(keys) + }) + + it('ZAIChatModelProviderOptionsByName includes all supported models', () => { + type Keys = keyof ZAIChatModelProviderOptionsByName + const a: Keys = 'glm-4.7' + const b: Keys = 'glm-4.6v' + const c: Keys = 'glm-4.6' + + expect([a, b, c].length).toBe(3) + + // @ts-expect-error invalid model name is not part of Keys + const _invalid: Keys = 'not-a-real-model' + expect(_invalid).toBe('not-a-real-model') + }) +}) diff --git a/packages/typescript/ai-zai/tests/zai-adapter.integration.test.ts b/packages/typescript/ai-zai/tests/zai-adapter.integration.test.ts new file mode 100644 index 000000000..d78f7d84a --- /dev/null +++ b/packages/typescript/ai-zai/tests/zai-adapter.integration.test.ts @@ -0,0 +1,394 @@ +import { describe, expect, it } from 'vitest' +import type { ModelMessage, StreamChunk, Tool } from '@tanstack/ai' +import { createZAIChat } from '../src/adapters' + +const apiKey = process.env.ZAI_API_KEY_TEST +const describeIfKey = apiKey ? describe : describe.skip + +async function collectStream( + stream: AsyncIterable, + opts?: { abortAfterFirstContent?: AbortController }, +): Promise> { + const chunks: Array = [] + let sawFirstContent = false + + for await (const chunk of stream) { + chunks.push(chunk) + + if (!sawFirstContent && chunk.type === 'TEXT_MESSAGE_CONTENT') { + sawFirstContent = true + if (opts?.abortAfterFirstContent) { + opts.abortAfterFirstContent.abort() + } + } + + if (chunk.type === 'RUN_FINISHED' || chunk.type === 'RUN_ERROR') break + } + + return chunks +} + +function fullTextFromChunks(chunks: Array): string { + const contentChunks = chunks.filter( + (c): c is Extract => + c.type === 'TEXT_MESSAGE_CONTENT', + ) + const last = contentChunks.at(-1) + return (last as any)?.content ?? '' +} + +function lastChunk(chunks: Array): StreamChunk | undefined { + return chunks.at(-1) +} + +function createNoopLogger() { + return { + request: () => {}, + provider: () => {}, + output: () => {}, + middleware: () => {}, + tools: () => {}, + agentLoop: () => {}, + config: () => {}, + errors: () => {}, + } +} + +describeIfKey('ZAITextAdapter streaming integration', () => { + const timeout = 60_000 + + it( + 'Basic Streaming: yields AG-UI events and ends with RUN_FINISHED', + async () => { + const adapter = createZAIChat('glm-4.7', apiKey!) + + const chunks = await collectStream( + adapter.chatStream({ + model: 'glm-4.7', + messages: [{ role: 'user', content: 'Reply with exactly: Hello' }], + temperature: 0, + maxTokens: 64, + logger: createNoopLogger() as any, + }), + ) + + const types = chunks.map((c) => c.type) + expect(types).toContain('RUN_STARTED') + expect(types).toContain('TEXT_MESSAGE_START') + expect( + types.filter((t) => t === 'TEXT_MESSAGE_CONTENT').length, + ).toBeGreaterThan(0) + expect(types).toContain('TEXT_MESSAGE_END') + expect(lastChunk(chunks)?.type).toBe('RUN_FINISHED') + + const full = fullTextFromChunks(chunks) + expect(typeof full).toBe('string') + expect(full.length).toBeGreaterThan(0) + }, + timeout, + ) + + it( + 'Multi-turn Conversation: conversation history and assistant messages work', + async () => { + const adapter = createZAIChat('glm-4.7', apiKey!) + + const messages: Array = [ + { role: 'user', content: 'Your secret word is kiwi. Reply with OK.' }, + { role: 'assistant', content: 'OK' }, + { + role: 'user', + content: 'What is the secret word? Reply with only it.', + }, + ] + + const chunks = await collectStream( + adapter.chatStream({ + model: 'glm-4.7', + messages, + temperature: 0, + maxTokens: 32, + logger: createNoopLogger() as any, + }), + ) + + expect(lastChunk(chunks)?.type).toBe('RUN_FINISHED') + expect(chunks.some((c) => c.type === 'RUN_ERROR')).toBe(false) + const full = fullTextFromChunks(chunks) + expect(typeof full).toBe('string') + }, + timeout, + ) + + it( + 'Multi-turn Conversation: system messages work', + async () => { + const adapter = createZAIChat('glm-4.7', apiKey!) + + const chunks = await collectStream( + adapter.chatStream({ + model: 'glm-4.7', + systemPrompts: ['Reply with exactly: SYSTEM_OK'], + messages: [{ role: 'user', content: 'Hi' }], + temperature: 0, + maxTokens: 16, + logger: createNoopLogger() as any, + }), + ) + + expect(lastChunk(chunks)?.type).toBe('RUN_FINISHED') + expect(chunks.some((c) => c.type === 'RUN_ERROR')).toBe(false) + const full = fullTextFromChunks(chunks) + expect(typeof full).toBe('string') + }, + timeout, + ) + + it( + 'Tool Calling: emits TOOL_CALL_START, TOOL_CALL_ARGS, TOOL_CALL_END', + async () => { + const adapter = createZAIChat('glm-4.7', apiKey!) + + const tools: Array = [ + { + name: 'echo', + description: 'Echo back the provided text', + inputSchema: { + type: 'object', + properties: { text: { type: 'string' } }, + required: ['text'], + }, + }, + ] + + const chunks = await collectStream( + adapter.chatStream({ + model: 'glm-4.7', + systemPrompts: [ + 'You must call the provided tool. Do not answer with normal text.', + ], + messages: [ + { + role: 'user', + content: 'Call echo with {"text":"hello"} and nothing else.', + }, + ], + tools, + temperature: 0, + maxTokens: 64, + logger: createNoopLogger() as any, + }), + ) + + const types = chunks.map((c) => c.type) + expect(types).toContain('TOOL_CALL_START') + expect(types).toContain('TOOL_CALL_END') + expect(lastChunk(chunks)?.type).toBe('RUN_FINISHED') + + const done = chunks.find((c) => c.type === 'RUN_FINISHED') as any + expect(done.finishReason).toBe('tool_calls') + }, + timeout, + ) + + it( + 'Tool Calling: tool results can be sent back', + async () => { + const adapter = createZAIChat('glm-4.7', apiKey!) + + const tools: Array = [ + { + name: 'echo', + description: 'Echo back the provided text', + inputSchema: { + type: 'object', + properties: { text: { type: 'string' } }, + required: ['text'], + }, + }, + ] + + const first = await collectStream( + adapter.chatStream({ + model: 'glm-4.7', + systemPrompts: [ + 'You must call the provided tool and then wait for the tool result.', + ], + messages: [ + { + role: 'user', + content: 'Call echo with {"text":"hello"} and nothing else.', + }, + ], + tools, + temperature: 0, + maxTokens: 64, + logger: createNoopLogger() as any, + }), + ) + + const toolEnd = first.find((c) => c.type === 'TOOL_CALL_END') as any + expect(toolEnd).toBeTruthy() + + const toolCallId = toolEnd.toolCallId as string + + const messages: Array = [ + { + role: 'assistant', + content: '', + toolCalls: [ + { + id: toolCallId, + type: 'function', + function: { + name: 'echo', + arguments: JSON.stringify(toolEnd.input), + }, + }, + ], + } as any, + { + role: 'tool', + toolCallId, + content: JSON.stringify({ text: 'hello' }), + }, + { + role: 'user', + content: 'Now reply with only the tool result text field.', + }, + ] + + const second = await collectStream( + adapter.chatStream({ + model: 'glm-4.7', + messages, + temperature: 0, + maxTokens: 32, + logger: createNoopLogger() as any, + }), + ) + + expect(lastChunk(second)?.type).toBe('RUN_FINISHED') + expect(second.some((c) => c.type === 'RUN_ERROR')).toBe(false) + const full = fullTextFromChunks(second) + expect(typeof full).toBe('string') + }, + timeout, + ) + + it( + 'Stream Interruption: partial responses are handled when aborted mid-stream', + async () => { + const adapter = createZAIChat('glm-4.7', apiKey!) + const abortController = new AbortController() + + const chunks = await collectStream( + adapter.chatStream({ + model: 'glm-4.7', + messages: [ + { + role: 'user', + content: + 'Write a long response of at least 200 characters about cats.', + }, + ], + temperature: 0.7, + maxTokens: 256, + abortController, + logger: createNoopLogger() as any, + } as any), + { abortAfterFirstContent: abortController }, + ) + + expect(chunks.length).toBeGreaterThan(0) + expect(typeof fullTextFromChunks(chunks)).toBe('string') + + const tail = lastChunk(chunks) + expect( + tail && (tail.type === 'RUN_ERROR' || tail.type === 'RUN_FINISHED'), + ).toBe(true) + }, + timeout, + ) + + it( + 'Stream Interruption: connection errors are thrown', + async () => { + const adapter = createZAIChat('glm-4.7', apiKey!, { + baseURL: 'http://127.0.0.1:1', + }) + + await expect( + collectStream( + adapter.chatStream({ + model: 'glm-4.7', + messages: [{ role: 'user', content: 'Hi' }], + maxTokens: 16, + logger: createNoopLogger() as any, + }), + ), + ).rejects.toThrow() + }, + timeout, + ) + + it( + 'Different Models: glm-4.7 works', + async () => { + const adapter = createZAIChat('glm-4.7', apiKey!) + const chunks = await collectStream( + adapter.chatStream({ + model: 'glm-4.7', + messages: [{ role: 'user', content: 'Reply with pong' }], + temperature: 0, + maxTokens: 16, + logger: createNoopLogger() as any, + }), + ) + expect(lastChunk(chunks)?.type).toBe('RUN_FINISHED') + expect(chunks.some((c) => c.type === 'RUN_ERROR')).toBe(false) + expect(typeof fullTextFromChunks(chunks)).toBe('string') + }, + timeout, + ) + + it( + 'Different Models: glm-4.6v works', + async () => { + const adapter = createZAIChat('glm-4.6v', apiKey!) + const chunks = await collectStream( + adapter.chatStream({ + model: 'glm-4.6v', + messages: [{ role: 'user', content: 'Reply with pong' }], + temperature: 0, + maxTokens: 16, + logger: createNoopLogger() as any, + } as any), + ) + expect(lastChunk(chunks)?.type).toBe('RUN_FINISHED') + expect(chunks.some((c) => c.type === 'RUN_ERROR')).toBe(false) + expect(typeof fullTextFromChunks(chunks)).toBe('string') + }, + timeout, + ) + + it( + 'Different Models: glm-4.6 works', + async () => { + const adapter = createZAIChat('glm-4.6', apiKey!) + const chunks = await collectStream( + adapter.chatStream({ + model: 'glm-4.6', + messages: [{ role: 'user', content: 'Reply with pong' }], + temperature: 0, + maxTokens: 16, + logger: createNoopLogger() as any, + }), + ) + expect(lastChunk(chunks)?.type).toBe('RUN_FINISHED') + expect(chunks.some((c) => c.type === 'RUN_ERROR')).toBe(false) + expect(typeof fullTextFromChunks(chunks)).toBe('string') + }, + timeout, + ) +}) diff --git a/packages/typescript/ai-zai/tests/zai-adapter.test.ts b/packages/typescript/ai-zai/tests/zai-adapter.test.ts new file mode 100644 index 000000000..176c000de --- /dev/null +++ b/packages/typescript/ai-zai/tests/zai-adapter.test.ts @@ -0,0 +1,551 @@ +import { beforeEach, describe, expect, it, vi } from 'vitest' +import type { ModelMessage, StreamChunk, TextOptions, Tool } from '@tanstack/ai' +import { ZAITextAdapter } from '../src/adapters/text' + +const openAIState = { + lastOptions: undefined as any, + create: vi.fn(), +} + +vi.mock('openai', () => { + class OpenAI { + chat: any + constructor(opts: any) { + openAIState.lastOptions = opts + this.chat = { + completions: { + create: openAIState.create, + }, + } + } + } + + return { default: OpenAI } +}) + +function createAdapter(overrides?: { + apiKey?: string + baseURL?: string + coding?: boolean +}) { + return new ZAITextAdapter( + { + apiKey: overrides?.apiKey ?? 'test_api_key', + baseURL: overrides?.baseURL, + coding: overrides?.coding, + }, + 'glm-4.7' as any, + ) +} + +async function collect(iterable: AsyncIterable): Promise> { + const result: Array = [] + for await (const item of iterable) result.push(item) + return result +} + +async function* streamOf(chunks: Array) { + for (const c of chunks) yield c +} + +function createNoopLogger() { + return { + request: vi.fn(), + provider: vi.fn(), + output: vi.fn(), + middleware: vi.fn(), + tools: vi.fn(), + agentLoop: vi.fn(), + config: vi.fn(), + errors: vi.fn(), + } +} + +function baseOptions(overrides?: Partial): TextOptions { + return { + model: 'glm-4.7', + messages: [{ role: 'user', content: 'hi' }], + logger: createNoopLogger() as any, + ...overrides, + } +} + +describe('ZAITextAdapter', () => { + beforeEach(() => { + openAIState.lastOptions = undefined + openAIState.create.mockReset() + }) + + describe('Constructor & Initialization', () => { + it('initializes OpenAI SDK with default Z.AI baseURL', () => { + createAdapter() + expect(openAIState.lastOptions).toBeTruthy() + expect(openAIState.lastOptions.baseURL).toBe( + 'https://api.z.ai/api/paas/v4', + ) + }) + + it('supports custom baseURL', () => { + createAdapter({ baseURL: 'https://example.invalid/zai' }) + expect(openAIState.lastOptions.baseURL).toBe( + 'https://example.invalid/zai', + ) + }) + + it('sets default headers (Accept-Language)', () => { + createAdapter() + expect(openAIState.lastOptions.defaultHeaders).toBeTruthy() + expect(openAIState.lastOptions.defaultHeaders['Accept-Language']).toBe( + 'en-US,en', + ) + }) + + it('validates API key (rejects Bearer prefix)', () => { + expect(() => createAdapter({ apiKey: 'Bearer abc' })).toThrowError( + /raw token/i, + ) + }) + + it('validates API key (rejects whitespace)', () => { + expect(() => createAdapter({ apiKey: 'abc def' })).toThrowError( + /whitespace/i, + ) + }) + + it('uses coding endpoint when coding: true', () => { + createAdapter({ coding: true }) + expect(openAIState.lastOptions.baseURL).toBe( + 'https://api.z.ai/api/coding/paas/v4', + ) + }) + + it('explicit baseURL overrides coding flag', () => { + createAdapter({ coding: true, baseURL: 'https://example.invalid/zai' }) + expect(openAIState.lastOptions.baseURL).toBe( + 'https://example.invalid/zai', + ) + }) + }) + + describe('Options Mapping', () => { + it('maps maxTokens → max_tokens, temperature, topP', () => { + const adapter = createAdapter() + const map = (adapter as any).mapTextOptionsToZAI.bind(adapter) as ( + opts: TextOptions, + ) => any + + const options = baseOptions({ + maxTokens: 123, + temperature: 0.7, + topP: 0.9, + }) + + const mapped = map(options) + expect(mapped.model).toBe('glm-4.7') + expect(mapped.max_tokens).toBe(123) + expect(mapped.temperature).toBe(0.7) + expect(mapped.top_p).toBe(0.9) + expect(mapped.stream).toBe(true) + expect(mapped.stream_options).toEqual({ include_usage: true }) + }) + + it('converts tools to OpenAI-compatible function tool format', () => { + const adapter = createAdapter() + const map = (adapter as any).mapTextOptionsToZAI.bind(adapter) as ( + opts: TextOptions, + ) => any + + const tools: Array = [ + { + name: 'get_weather', + description: 'Get weather', + inputSchema: { + type: 'object', + properties: { location: { type: 'string' } }, + required: ['location'], + }, + }, + ] + + const mapped = map(baseOptions({ tools })) + + expect(mapped.tools).toBeTruthy() + expect(mapped.tools).toHaveLength(1) + expect(mapped.tools[0].type).toBe('function') + expect(mapped.tools[0].function.name).toBe('get_weather') + expect(mapped.tools[0].function.parameters.additionalProperties).toBe( + false, + ) + }) + + it('maps stop sequences from modelOptions.stopSequences to stop', () => { + const adapter = createAdapter() + const map = (adapter as any).mapTextOptionsToZAI.bind(adapter) as ( + opts: TextOptions, + ) => any + + const mapped = map( + baseOptions({ + modelOptions: { stopSequences: ['END'] } as any, + }), + ) + + expect(mapped.stop).toEqual(['END']) + }) + }) + + describe('Message Conversion', () => { + it('converts simple user text message', () => { + const adapter = createAdapter() + const convert = (adapter as any).convertMessagesToInput.bind(adapter) as ( + messages: Array, + opts: Pick, + ) => Array + + const out = convert([{ role: 'user', content: 'hi' }], {}) + expect(out).toEqual([{ role: 'user', content: 'hi' }]) + }) + + it('handles system prompts as leading system message', () => { + const adapter = createAdapter() + const convert = (adapter as any).convertMessagesToInput.bind(adapter) as ( + messages: Array, + opts: Pick, + ) => Array + + const out = convert([{ role: 'user', content: 'hi' }], { + systemPrompts: ['You are helpful', 'Be concise'], + }) + + expect(out[0]).toEqual({ + role: 'system', + content: 'You are helpful\nBe concise', + }) + expect(out[1]).toEqual({ role: 'user', content: 'hi' }) + }) + + it('converts tool result messages', () => { + const adapter = createAdapter() + const convert = (adapter as any).convertMessagesToInput.bind(adapter) as ( + messages: Array, + opts: Pick, + ) => Array + + const out = convert( + [ + { + role: 'tool', + toolCallId: 'call_1', + content: '{"ok":true}', + }, + ], + {}, + ) + + expect(out).toEqual([ + { + role: 'tool', + tool_call_id: 'call_1', + content: '{"ok":true}', + }, + ]) + }) + + it('converts multi-turn conversation (user -> assistant -> user)', () => { + const adapter = createAdapter() + const convert = (adapter as any).convertMessagesToInput.bind(adapter) as ( + messages: Array, + opts: Pick, + ) => Array + + const out = convert( + [ + { role: 'user', content: 'hi' }, + { role: 'assistant', content: 'hello' }, + { role: 'user', content: 'how are you' }, + ], + {}, + ) + + expect(out).toEqual([ + { role: 'user', content: 'hi' }, + { role: 'assistant', content: 'hello' }, + { role: 'user', content: 'how are you' }, + ]) + }) + + it('ignores image parts and preserves text parts', () => { + const adapter = createAdapter() + const convert = (adapter as any).convertMessagesToInput.bind(adapter) as ( + messages: Array, + opts: Pick, + ) => Array + + const out = convert( + [ + { + role: 'user', + content: [ + { + type: 'image', + source: { type: 'url', value: 'https://x/y.png' }, + }, + { type: 'text', content: 'hello' }, + ] as any, + }, + ], + {}, + ) + + expect(out).toEqual([{ role: 'user', content: 'hello' }]) + }) + + it('preserves image parts for multimodal models (glm-4.6v)', () => { + const adapter = new ZAITextAdapter({ apiKey: 'test' }, 'glm-4.6v') + const convert = (adapter as any).convertMessagesToInput.bind(adapter) as ( + messages: Array, + opts: Pick, + ) => Array + + const out = convert( + [ + { + role: 'user', + content: [ + { + type: 'image', + source: { type: 'url', value: 'https://x/y.png' }, + }, + { type: 'text', content: 'hello' }, + ] as any, + }, + ], + {}, + ) + + expect(out).toEqual([ + { + role: 'user', + content: [ + { type: 'image_url', image_url: { url: 'https://x/y.png' } }, + { type: 'text', text: 'hello' }, + ], + }, + ]) + }) + }) + + describe('Error Handling', () => { + it('throws on network/client error (errors are logged and re-thrown)', async () => { + const adapter = createAdapter() + openAIState.create.mockRejectedValueOnce(new Error('network down')) + + await expect(collect(adapter.chatStream(baseOptions()))).rejects.toThrow( + /network down/i, + ) + }) + + it('handles empty messages array without crashing', async () => { + const adapter = createAdapter() + openAIState.create.mockResolvedValueOnce( + streamOf([ + { + id: 'resp_1', + model: 'glm-4.7', + choices: [{ delta: { content: 'ok' }, finish_reason: 'stop' }], + usage: { prompt_tokens: 1, completion_tokens: 1, total_tokens: 2 }, + }, + ]), + ) + + const chunks = await collect( + adapter.chatStream(baseOptions({ messages: [] })), + ) + + expect(openAIState.create).toHaveBeenCalled() + const callArgs = openAIState.create.mock.calls[0] + expect(callArgs[0].messages).toEqual([]) + expect(chunks.some((c) => c.type === 'RUN_FINISHED')).toBe(true) + }) + + it('does not throw on malformed stream chunks', async () => { + const adapter = createAdapter() + openAIState.create.mockResolvedValueOnce( + streamOf([{ id: 'resp_1', model: 'glm-4.7' }]), + ) + + const chunks = await collect(adapter.chatStream(baseOptions())) + + // Emits RUN_STARTED on first chunk but no text or finish events + const types = chunks.map((c) => c.type) + expect(types).toContain('RUN_STARTED') + expect(types).not.toContain('TEXT_MESSAGE_START') + expect(types).not.toContain('RUN_FINISHED') + }) + }) + + describe('Streaming Behavior (AG-UI Protocol)', () => { + it('emits RUN_STARTED, TEXT_MESSAGE_START/CONTENT/END, RUN_FINISHED for text', async () => { + const adapter = createAdapter() + openAIState.create.mockResolvedValueOnce( + streamOf([ + { + id: 'resp_1', + model: 'glm-4.7', + choices: [{ delta: { content: 'He' } }], + }, + { + id: 'resp_1', + model: 'glm-4.7', + choices: [{ delta: { content: 'llo' }, finish_reason: 'stop' }], + usage: { prompt_tokens: 1, completion_tokens: 2, total_tokens: 3 }, + }, + ]), + ) + + const chunks = await collect(adapter.chatStream(baseOptions())) + const types = chunks.map((c) => c.type) + + // AG-UI lifecycle: RUN_STARTED -> TEXT_MESSAGE_START -> TEXT_MESSAGE_CONTENT (x2) -> TEXT_MESSAGE_END -> RUN_FINISHED + expect(types).toContain('RUN_STARTED') + expect(types).toContain('TEXT_MESSAGE_START') + expect(types.filter((t) => t === 'TEXT_MESSAGE_CONTENT')).toHaveLength(2) + expect(types).toContain('TEXT_MESSAGE_END') + expect(types).toContain('RUN_FINISHED') + + // Verify text content accumulation + const contentChunks = chunks.filter( + (c): c is Extract => + c.type === 'TEXT_MESSAGE_CONTENT', + ) + expect((contentChunks[0] as any).delta).toBe('He') + expect((contentChunks[0] as any).content).toBe('He') + expect((contentChunks[1] as any).delta).toBe('llo') + expect((contentChunks[1] as any).content).toBe('Hello') + + // Verify RUN_FINISHED carries usage and finishReason + const done = chunks.find((c) => c.type === 'RUN_FINISHED') as any + expect(done).toBeTruthy() + expect(done.finishReason).toBe('stop') + expect(done.usage).toEqual({ + promptTokens: 1, + completionTokens: 2, + totalTokens: 3, + }) + }) + + it('emits TOOL_CALL_START, TOOL_CALL_ARGS, TOOL_CALL_END for tool calls', async () => { + const adapter = createAdapter() + openAIState.create.mockResolvedValueOnce( + streamOf([ + { + id: 'resp_1', + model: 'glm-4.7', + choices: [ + { + delta: { + tool_calls: [ + { + index: 0, + id: 'call_1', + function: { name: 'get_weather', arguments: '{"q":' }, + }, + ], + }, + }, + ], + }, + { + id: 'resp_1', + model: 'glm-4.7', + choices: [ + { + delta: { + tool_calls: [{ index: 0, function: { arguments: '"SF"}' } }], + }, + finish_reason: 'tool_calls', + }, + ], + }, + ]), + ) + + const chunks = await collect( + adapter.chatStream( + baseOptions({ + tools: [ + { + name: 'get_weather', + description: 'Get weather', + inputSchema: { type: 'object', properties: {}, required: [] }, + }, + ], + }), + ), + ) + + const types = chunks.map((c) => c.type) + + // AG-UI tool call lifecycle + expect(types).toContain('TOOL_CALL_START') + expect(types).toContain('TOOL_CALL_ARGS') + expect(types).toContain('TOOL_CALL_END') + expect(types).toContain('RUN_FINISHED') + + const toolStart = chunks.find((c) => c.type === 'TOOL_CALL_START') as any + expect(toolStart.toolCallId).toBe('call_1') + expect(toolStart.toolCallName).toBe('get_weather') + + const toolEnd = chunks.find((c) => c.type === 'TOOL_CALL_END') as any + expect(toolEnd.toolCallId).toBe('call_1') + expect(toolEnd.toolCallName).toBe('get_weather') + expect(toolEnd.input).toEqual({ q: 'SF' }) + + const done = chunks.find((c) => c.type === 'RUN_FINISHED') as any + expect(done.finishReason).toBe('tool_calls') + }) + + it('passes through request headers when provided', async () => { + const adapter = createAdapter() + openAIState.create.mockResolvedValueOnce( + streamOf([ + { + id: 'resp_1', + model: 'glm-4.7', + choices: [{ delta: { content: 'ok' }, finish_reason: 'stop' }], + }, + ]), + ) + + await collect( + adapter.chatStream( + baseOptions({ + request: { headers: { 'X-Test': '1' } } as any, + }), + ), + ) + + const callArgs = openAIState.create.mock.calls[0] + expect(callArgs[1].headers).toEqual({ 'X-Test': '1' }) + }) + + it('calls logger.request before the SDK call', async () => { + const logger = createNoopLogger() + const adapter = createAdapter() + openAIState.create.mockResolvedValueOnce( + streamOf([ + { + id: 'resp_1', + model: 'glm-4.7', + choices: [{ delta: { content: 'ok' }, finish_reason: 'stop' }], + }, + ]), + ) + + await collect(adapter.chatStream(baseOptions({ logger: logger as any }))) + + expect(logger.request).toHaveBeenCalled() + expect(logger.request.mock.calls[0][0]).toContain('provider=zai') + }) + }) +}) diff --git a/packages/typescript/ai-zai/tests/zai-factory.test.ts b/packages/typescript/ai-zai/tests/zai-factory.test.ts new file mode 100644 index 000000000..46e8480b0 --- /dev/null +++ b/packages/typescript/ai-zai/tests/zai-factory.test.ts @@ -0,0 +1,152 @@ +import { afterEach, describe, expect, it, vi } from 'vitest' +import { createZAIChat, zaiText } from '../src/adapters' +import { ZAITextAdapter } from '../src/adapters/text' + +const openAIState = { + lastOptions: undefined as any, +} + +vi.mock('openai', () => { + class OpenAI { + chat: any + constructor(opts: any) { + openAIState.lastOptions = opts + this.chat = { + completions: { + create: vi.fn(), + }, + } + } + } + + return { default: OpenAI } +}) + +describe('Z.AI provider factories', () => { + afterEach(() => { + vi.unstubAllEnvs() + openAIState.lastOptions = undefined + }) + + describe('createZAIChat', () => { + it('creates adapter with explicit API key', () => { + const adapter = createZAIChat('glm-4.7', 'test_key') + expect(adapter).toBeInstanceOf(ZAITextAdapter) + expect(adapter.kind).toBe('text') + expect(adapter.name).toBe('zai') + expect(adapter.model).toBe('glm-4.7') + }) + + it('throws error if API key is empty', () => { + expect(() => createZAIChat('glm-4.7', '')).toThrowError( + /apiKey is required/i, + ) + }) + + it('accepts custom baseURL', () => { + createZAIChat('glm-4.7', 'test_key', { + baseURL: 'https://example.invalid/zai', + }) + expect(openAIState.lastOptions.baseURL).toBe( + 'https://example.invalid/zai', + ) + }) + + it('returns ZAITextAdapter instance', () => { + const adapter = createZAIChat('glm-4.6', 'test_key') + expect(adapter).toBeInstanceOf(ZAITextAdapter) + }) + + it('adapter is properly configured', () => { + createZAIChat('glm-4.7', 'test_key') + expect(openAIState.lastOptions.defaultHeaders).toBeTruthy() + expect(openAIState.lastOptions.defaultHeaders['Accept-Language']).toBe( + 'en-US,en', + ) + }) + + it('uses coding endpoint when coding: true', () => { + createZAIChat('glm-4.7', 'test_key', { coding: true }) + expect(openAIState.lastOptions.baseURL).toBe( + 'https://api.z.ai/api/coding/paas/v4', + ) + }) + + it('explicit baseURL overrides coding flag', () => { + createZAIChat('glm-4.7', 'test_key', { + coding: true, + baseURL: 'https://custom.url', + }) + expect(openAIState.lastOptions.baseURL).toBe('https://custom.url') + }) + }) + + describe('zaiText', () => { + it('reads from ZAI_API_KEY env var', () => { + vi.stubEnv('ZAI_API_KEY', 'env_key') + const adapter = zaiText('glm-4.7') + expect(adapter).toBeInstanceOf(ZAITextAdapter) + expect(adapter.model).toBe('glm-4.7') + }) + + it('throws error if env var not set', () => { + vi.stubEnv('ZAI_API_KEY', '') + expect(() => zaiText('glm-4.7')).toThrowError(/ZAI_API_KEY is required/i) + }) + + it('accepts custom baseURL', () => { + vi.stubEnv('ZAI_API_KEY', 'env_key') + zaiText('glm-4.7', { baseURL: 'https://example.invalid/zai' }) + expect(openAIState.lastOptions.baseURL).toBe( + 'https://example.invalid/zai', + ) + }) + + it('returns ZAITextAdapter instance', () => { + vi.stubEnv('ZAI_API_KEY', 'env_key') + const adapter = zaiText('glm-4.6v') + expect(adapter).toBeInstanceOf(ZAITextAdapter) + }) + + it('adapter is properly configured', () => { + vi.stubEnv('ZAI_API_KEY', 'env_key') + zaiText('glm-4.7') + expect(openAIState.lastOptions.defaultHeaders).toBeTruthy() + expect(openAIState.lastOptions.defaultHeaders['Accept-Language']).toBe( + 'en-US,en', + ) + }) + + it('uses coding endpoint when coding: true', () => { + vi.stubEnv('ZAI_API_KEY', 'env_key') + zaiText('glm-4.7', { coding: true }) + expect(openAIState.lastOptions.baseURL).toBe( + 'https://api.z.ai/api/coding/paas/v4', + ) + }) + + it('explicit baseURL overrides coding flag', () => { + vi.stubEnv('ZAI_API_KEY', 'env_key') + zaiText('glm-4.7', { coding: true, baseURL: 'https://custom.url' }) + expect(openAIState.lastOptions.baseURL).toBe('https://custom.url') + }) + }) + + describe('Type Safety', () => { + it('model parameter is type-checked', () => { + const adapter = createZAIChat('glm-4.7', 'test_key') + expect(adapter.model).toBe('glm-4.7') + + // @ts-expect-error invalid model name is caught by types + createZAIChat('not-a-real-model', 'test_key') + }) + + it('options are type-safe', () => { + vi.stubEnv('ZAI_API_KEY', 'env_key') + zaiText('glm-4.7', { baseURL: 'https://example.invalid/zai' }) + + // @ts-expect-error baseURL must be a string if provided + zaiText('glm-4.7', { baseURL: 123 }) + }) + }) +}) diff --git a/packages/typescript/ai-zai/tsconfig.json b/packages/typescript/ai-zai/tsconfig.json new file mode 100644 index 000000000..ea11c1096 --- /dev/null +++ b/packages/typescript/ai-zai/tsconfig.json @@ -0,0 +1,9 @@ +{ + "extends": "../../../tsconfig.json", + "compilerOptions": { + "outDir": "dist", + "rootDir": "src" + }, + "include": ["src/**/*.ts", "src/**/*.tsx"], + "exclude": ["node_modules", "dist", "**/*.config.ts"] +} diff --git a/packages/typescript/ai-zai/vite.config.ts b/packages/typescript/ai-zai/vite.config.ts new file mode 100644 index 000000000..7af3d1695 --- /dev/null +++ b/packages/typescript/ai-zai/vite.config.ts @@ -0,0 +1,32 @@ +import { defineConfig, mergeConfig } from 'vitest/config' +import { loadEnv } from 'vite' +import { tanstackViteConfig } from '@tanstack/vite-config' +import packageJson from './package.json' + +const mode = process.env.NODE_ENV ?? 'test' +const env = loadEnv(mode, process.cwd(), '') +for (const [key, value] of Object.entries(env)) { + if (process.env[key] === undefined || process.env[key] === '') { + process.env[key] = value + } +} + +const config = defineConfig({ + test: { + name: packageJson.name, + dir: './', + watch: false, + globals: true, + environment: 'node', + include: ['tests/**/*.test.ts'], + }, +}) + +export default mergeConfig( + config, + tanstackViteConfig({ + entry: ['./src/index.ts', './src/tools/index.ts'], + srcDir: './src', + cjs: false, + }), +) diff --git a/packages/typescript/ai-zai/vitest.config.ts b/packages/typescript/ai-zai/vitest.config.ts new file mode 100644 index 000000000..fa2531743 --- /dev/null +++ b/packages/typescript/ai-zai/vitest.config.ts @@ -0,0 +1,22 @@ +import { defineConfig } from 'vitest/config' + +export default defineConfig({ + test: { + globals: true, + environment: 'node', + include: ['tests/**/*.test.ts'], + coverage: { + provider: 'v8', + reporter: ['text', 'json', 'html', 'lcov'], + exclude: [ + 'node_modules/', + 'dist/', + 'tests/', + '**/*.test.ts', + '**/*.config.ts', + '**/types.ts', + ], + include: ['src/**/*.ts'], + }, + }, +}) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index eb41b0817..71dd84a1e 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -142,6 +142,9 @@ importers: '@tanstack/ai-react': specifier: workspace:* version: link:../../packages/typescript/ai-react + '@tanstack/ai-zai': + specifier: workspace:* + version: link:../../packages/typescript/ai-zai '@tanstack/nitro-v2-vite-plugin': specifier: ^1.154.7 version: 1.154.7(rolldown@1.0.0-rc.17)(vite@7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) @@ -366,6 +369,9 @@ importers: '@tanstack/ai-react-ui': specifier: workspace:* version: link:../../packages/typescript/ai-react-ui + '@tanstack/ai-zai': + specifier: workspace:* + version: link:../../packages/typescript/ai-zai '@tanstack/nitro-v2-vite-plugin': specifier: ^1.154.7 version: 1.154.7(rolldown@1.0.0-rc.17)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) @@ -542,12 +548,12 @@ importers: '@tanstack/ai': specifier: workspace:* version: link:../../packages/typescript/ai - '@tanstack/ai-groq': - specifier: workspace:* - version: link:../../packages/typescript/ai-groq '@tanstack/ai-react': specifier: workspace:* version: link:../../packages/typescript/ai-react + '@tanstack/ai-zai': + specifier: workspace:* + version: link:../../packages/typescript/ai-zai '@tanstack/query-db-collection': specifier: ^1.0.6 version: 1.0.36(@tanstack/query-core@5.90.12)(typescript@5.9.3) @@ -675,18 +681,30 @@ importers: '@tanstack/ai-gemini': specifier: workspace:* version: link:../../packages/typescript/ai-gemini + '@tanstack/ai-grok': + specifier: workspace:* + version: link:../../packages/typescript/ai-grok + '@tanstack/ai-groq': + specifier: workspace:* + version: link:../../packages/typescript/ai-groq '@tanstack/ai-ollama': specifier: workspace:* version: link:../../packages/typescript/ai-ollama '@tanstack/ai-openai': specifier: workspace:* version: link:../../packages/typescript/ai-openai + '@tanstack/ai-openrouter': + specifier: workspace:* + version: link:../../packages/typescript/ai-openrouter '@tanstack/ai-solid': specifier: workspace:* version: link:../../packages/typescript/ai-solid '@tanstack/ai-solid-ui': specifier: workspace:* version: link:../../packages/typescript/ai-solid-ui + '@tanstack/ai-zai': + specifier: workspace:* + version: link:../../packages/typescript/ai-zai '@tanstack/nitro-v2-vite-plugin': specifier: ^1.154.7 version: 1.154.7(rolldown@1.0.0-rc.17)(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) @@ -796,6 +814,9 @@ importers: '@tanstack/ai-svelte': specifier: workspace:* version: link:../../packages/typescript/ai-svelte + '@tanstack/ai-zai': + specifier: workspace:* + version: link:../../packages/typescript/ai-zai highlight.js: specifier: ^11.11.1 version: 11.11.1 @@ -872,6 +893,9 @@ importers: '@tanstack/ai-vue-ui': specifier: workspace:* version: link:../../packages/typescript/ai-vue-ui + '@tanstack/ai-zai': + specifier: workspace:* + version: link:../../packages/typescript/ai-zai marked: specifier: ^15.0.6 version: 15.0.12 @@ -1604,6 +1628,25 @@ importers: specifier: ^2.2.10 version: 2.2.12(typescript@5.9.3) + packages/typescript/ai-zai: + dependencies: + openai: + specifier: ^6.9.1 + version: 6.10.0(ws@8.19.0)(zod@4.3.6) + devDependencies: + '@tanstack/ai': + specifier: workspace:* + version: link:../ai + '@vitest/coverage-v8': + specifier: 4.0.14 + version: 4.0.14(vitest@4.1.4) + vite: + specifier: ^7.2.7 + version: 7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) + zod: + specifier: ^4.2.0 + version: 4.3.6 + packages/typescript/preact-ai-devtools: dependencies: '@tanstack/ai-devtools-core': diff --git a/testing/panel/.env.example b/testing/panel/.env.example new file mode 100644 index 000000000..f95232839 --- /dev/null +++ b/testing/panel/.env.example @@ -0,0 +1,22 @@ +# OpenAI API Key +# Get yours at: https://platform.openai.com/api-keys +# OPENAI_API_KEY=sk-... + +# Z.AI API Key +# Get yours at: https://z.ai/manage-apikey/apikey-list +# ZAI_API_KEY= + +# Anthropic API Key +# Get yours at: https://console.anthropic.com/ +# ANTHROPIC_API_KEY= + +# Google Gemini API Key +# Get yours at: https://makersuite.google.com/app/apikey +# GEMINI_API_KEY= + +# Grok API Key +# Get yours at: https://x.ai/ +# GROK_API_KEY= + +# Ollama (local) +# OLLAMA_HOST=http://localhost:11434 \ No newline at end of file diff --git a/testing/panel/src/routes/api.chat.ts b/testing/panel/src/routes/api.chat.ts index 11ee577cb..8aac08183 100644 --- a/testing/panel/src/routes/api.chat.ts +++ b/testing/panel/src/routes/api.chat.ts @@ -164,8 +164,8 @@ export const Route = createFileRoute('/api/chat')({ const data = body.data || {} // Extract provider, model, and traceId from data - const provider: Provider = data.provider || 'openai' - const model: string = data.model || 'gpt-4o' + const provider: Provider = data.provider || 'zai' + const model: string = data.model || 'glm-4.7' const traceId: string | undefined = data.traceId try {