-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathmodel.ts
More file actions
347 lines (292 loc) · 9.58 KB
/
model.ts
File metadata and controls
347 lines (292 loc) · 9.58 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
/**
* Model Example / 模型示例
*
* 此示例展示如何使用 AgentRun SDK 管理 ModelService 和 ModelProxy。
* This example demonstrates how to manage ModelService and ModelProxy using AgentRun SDK.
*
* 运行前请确保设置了环境变量 / Ensure environment variables are set:
* - AGENTRUN_ACCESS_KEY_ID
* - AGENTRUN_ACCESS_KEY_SECRET
* - AGENTRUN_ACCOUNT_ID
*
* 运行方式 / Run with:
* npm run example:model
*/
import type {
ModelProxyCreateInput,
ModelServiceCreateInput,
ProviderSettings,
ProxyConfig,
} from '../src/index';
import {
ModelClient,
ModelProxy,
ModelService,
ModelType,
ResourceAlreadyExistError,
ResourceNotExistError,
Status,
} from '../src/index';
import { Config } from '../src/utils/config';
import { logger } from '../src/utils/log';
// Logger helper
function log(message: string, ...args: unknown[]) {
logger.info(`[${new Date().toISOString()}] ${message}`, ...args);
}
// 从环境变量读取配置 / Read configuration from environment variables
const BASE_URL = process.env.BASE_URL || 'https://dashscope.aliyuncs.com/compatible-mode/v1';
const API_KEY = process.env.API_KEY || 'sk-xxxxx';
const MODEL_NAMES = (process.env.MODEL_NAMES || 'qwen-max').split(/[\s,]+/).filter(Boolean);
const client = new ModelClient();
const modelServiceName = 'sdk-test-model-service-nodejs';
const modelProxyName = 'sdk-test-model-proxy-nodejs';
/**
* 创建或获取 ModelService / Create or get ModelService
*/
async function createOrGetModelService(): Promise<ModelService> {
log('创建或获取已有的资源 / Creating or getting existing resource');
let ms: ModelService;
try {
const providerSettings: ProviderSettings = {
apiKey: API_KEY,
baseUrl: BASE_URL,
modelNames: MODEL_NAMES,
};
const input: ModelServiceCreateInput = {
modelServiceName,
description: '测试模型服务 / Test Model Service',
modelType: ModelType.LLM,
provider: 'openai',
providerSettings,
};
ms = await ModelService.create({ input });
log(`创建成功 / Created successfully: ${ms.modelServiceId}`);
} catch (error) {
if (error instanceof ResourceAlreadyExistError) {
log('已存在,获取已有资源 / Already exists, getting existing resource');
ms = await ModelService.get({ name: modelServiceName });
} else {
throw error;
}
}
// 等待就绪 / Wait for ready
await ms.waitUntilReadyOrFailed({
callback: service => log(` 当前状态 / Current status: ${service.status}`),
});
if (ms.status !== Status.READY) {
throw new Error(`状态异常 / Unexpected status: ${ms.status}`);
}
log('已就绪状态,当前信息 / Ready state, current info:');
log(` - Name: ${ms.modelServiceName}`);
log(` - ID: ${ms.modelServiceId}`);
log(` - Status: ${ms.status}`);
return ms;
}
/**
* 更新 ModelService / Update ModelService
*/
async function updateModelService(ms: ModelService): Promise<void> {
log('更新描述为当前时间 / Updating description to current time');
await ms.update({
input: {
description: `当前时间戳 / Current timestamp: ${Date.now()}`,
},
});
await ms.waitUntilReadyOrFailed();
if (ms.status !== Status.READY) {
throw new Error(`状态异常 / Unexpected status: ${ms.status}`);
}
log('更新成功,当前信息 / Update successful, current info:');
log(` - Description: ${ms.description}`);
}
/**
* 列出所有 ModelServices / List all ModelServices
*/
async function listModelServices(): Promise<void> {
log('枚举资源列表 / Listing resources');
const services = await ModelService.list({
input: {
modelType: ModelType.LLM,
},
});
log(
`共有 ${services.length} 个资源,分别为 / Total ${services.length} resources:`,
services.map(s => s.modelServiceName)
);
}
/**
* 调用模型服务进行推理 / Invoke model service for inference
*/
async function invokeModelService(ms: ModelService): Promise<void> {
log('调用模型服务进行推理 / Invoking model service for inference');
const result = await ms.completion({
messages: [{ role: 'user', content: '你好,请介绍一下你自己' }],
stream: true,
});
// 流式输出 / Stream output
if ('textStream' in result && result.textStream) {
for await (const chunk of result.textStream) {
process.stdout.write(chunk);
}
}
logger.info(''); // 换行
}
/**
* 删除 ModelService / Delete ModelService
*/
async function deleteModelService(ms: ModelService): Promise<void> {
log('开始清理资源 / Starting cleanup');
await ms.delete();
log('删除请求已发送 / Delete request sent');
// 等待删除完成 / Wait for deletion
log('再次尝试获取 / Trying to get again');
try {
await ms.refresh();
log('资源仍然存在 / Resource still exists');
} catch (error) {
if (error instanceof ResourceNotExistError) {
log('得到资源不存在报错,删除成功 / Resource not found, deletion successful');
} else {
throw error;
}
}
}
/**
* 创建或获取 ModelProxy / Create or get ModelProxy
*/
async function createOrGetModelProxy(): Promise<ModelProxy> {
log('创建或获取已有的资源 / Creating or getting existing resource');
let mp: ModelProxy;
try {
const cfg = new Config();
const proxyConfig: ProxyConfig = {
endpoints: MODEL_NAMES.map(modelName => ({
modelNames: [modelName],
modelServiceName,
})),
};
const input: ModelProxyCreateInput = {
modelProxyName,
description: '测试模型治理 / Test Model Proxy',
modelType: ModelType.LLM,
executionRoleArn: `acs:ram::${cfg.accountId}:role/aliyunagentrundefaultrole`,
proxyConfig,
};
mp = await ModelProxy.create({ input });
log(`创建成功 / Created successfully: ${mp.modelProxyId}`);
} catch (error) {
if (error instanceof ResourceAlreadyExistError) {
log('已存在,获取已有资源 / Already exists, getting existing resource');
mp = await ModelProxy.get({ name: modelProxyName });
} else {
throw error;
}
}
// 等待就绪 / Wait for ready
await mp.waitUntilReadyOrFailed({
callback: proxy => log(` 当前状态 / Current status: ${proxy.status}`),
});
if (mp.status !== Status.READY) {
throw new Error(`状态异常 / Unexpected status: ${mp.status}`);
}
log('已就绪状态,当前信息 / Ready state, current info:');
log(` - Name: ${mp.modelProxyName}`);
log(` - ID: ${mp.modelProxyId}`);
log(` - Status: ${mp.status}`);
return mp;
}
/**
* 更新 ModelProxy / Update ModelProxy
*/
async function updateModelProxy(mp: ModelProxy): Promise<void> {
log('更新描述为当前时间 / Updating description to current time');
const cfg = new Config();
await mp.update({
input: {
executionRoleArn: `acs:ram::${cfg.accountId}:role/aliyunagentrundefaultrole`,
description: `当前时间戳 / Current timestamp: ${Date.now()}`,
},
});
await mp.waitUntilReadyOrFailed();
if (mp.status !== Status.READY) {
throw new Error(`状态异常 / Unexpected status: ${mp.status}`);
}
log('更新成功,当前信息 / Update successful, current info:');
log(` - Description: ${mp.description}`);
}
/**
* 列出所有 ModelProxies / List all ModelProxies
*/
async function listModelProxies(): Promise<void> {
log('枚举资源列表 / Listing resources');
const proxies = await ModelProxy.list({});
log(
`共有 ${proxies.length} 个资源,分别为 / Total ${proxies.length} resources:`,
proxies.map(p => p.modelProxyName)
);
}
/**
* 调用模型代理进行推理 / Invoke model proxy for inference
*/
async function invokeModelProxy(mp: ModelProxy): Promise<void> {
log('调用模型代理进行推理 / Invoking model proxy for inference');
const result = await mp.completion({
messages: [{ role: 'user', content: '你好,请介绍一下你自己' }],
stream: true,
});
// 流式输出 / Stream output
if ('textStream' in result && result.textStream) {
for await (const chunk of result.textStream) {
process.stdout.write(chunk);
}
}
logger.info(''); // 换行
}
/**
* 删除 ModelProxy / Delete ModelProxy
*/
async function deleteModelProxy(mp: ModelProxy): Promise<void> {
log('开始清理资源 / Starting cleanup');
await mp.delete();
log('删除请求已发送 / Delete request sent');
// 等待删除完成 / Wait for deletion
log('再次尝试获取 / Trying to get again');
try {
await mp.refresh();
log('资源仍然存在 / Resource still exists');
} catch (error) {
if (error instanceof ResourceNotExistError) {
log('得到资源不存在报错,删除成功 / Resource not found, deletion successful');
} else {
throw error;
}
}
}
/**
* 主函数 / Main function
*/
async function main() {
log('==== 模型模块基本功能示例 / Model Module Example ====');
log(` base_url=${BASE_URL}`);
log(` api_key=${'*'.repeat(API_KEY.length)}`);
log(` model_names=${MODEL_NAMES}`);
try {
await listModelServices();
const ms = await createOrGetModelService();
await updateModelService(ms);
await invokeModelService(ms);
await listModelProxies();
const mp = await createOrGetModelProxy();
await updateModelProxy(mp);
// await invokeModelProxy(mp); // 注释掉 proxy 调用
await deleteModelProxy(mp);
await listModelProxies();
await deleteModelService(ms);
await listModelServices();
log('==== 示例完成 / Example Complete ====');
} catch (error) {
logger.error('Error:', error);
process.exit(1);
}
}
main();