1+ #!/usr/bin/env python3
2+ """
3+ Test the conversion between scalingConfig and provisionConfig, as well as the conversion between GPUs of the same type and different types.
4+ """
5+ import subprocess
6+
7+ import yaml
8+
9+ s_yaml_file = f"s_149.yaml"
10+
11+
12+ def deploy_function (scalingConfig , provisionConfig , gpuConfig = None , memorySize = None ):
13+ """
14+ 部署函数
15+ 根据 scalingConfig 或 provisionConfig 创建函数
16+ """
17+ print (f"scalingConfig: { scalingConfig } provisionConfig: { provisionConfig } " )
18+ # 使用当前文件夹下的s.yaml内容作为基础配置
19+ with open ("s_149.yaml" , "r" , encoding = "utf-8" ) as f :
20+ s_yaml = yaml .safe_load (f )
21+
22+ s_yaml ["resources" ]["fcDemo" ]["props" ]["scalingConfig" ] = scalingConfig
23+ s_yaml ["resources" ]["fcDemo" ]["props" ]["provisionConfig" ] = provisionConfig
24+ if gpuConfig :
25+ s_yaml ["resources" ]["fcDemo" ]["props" ]["gpuConfig" ] = gpuConfig
26+
27+ if memorySize :
28+ s_yaml ["resources" ]["fcDemo" ]["props" ]["memorySize" ] = memorySize
29+
30+ with open (s_yaml_file , "w" , encoding = "utf-8" ) as f :
31+ yaml .dump (s_yaml , f , default_flow_style = False , allow_unicode = True )
32+
33+ print (f"使用配置文件部署: { s_yaml_file } " )
34+
35+ # 执行部署命令
36+ print ("正在部署..." )
37+ subprocess .check_call (f"s deploy -y -t { s_yaml_file } --skip-push" , shell = True )
38+
39+
40+ def cleanup_deployment ():
41+ """
42+ 清理部署
43+ """
44+ print (f"正在清理部署: { s_yaml_file } " )
45+ subprocess .check_call (f"s remove -y -t { s_yaml_file } " , shell = True )
46+
47+
48+ def main ():
49+ """
50+ 主函数
51+ 修改 scalingConfig 和 provisionConfig,并测试转换
52+ """
53+
54+ subprocess .check_call (f"s deploy -y -t { s_yaml_file } --skip-push" , shell = True )
55+
56+ # GPU remains unchanged
57+ # 弹性 ----> 弹性
58+ scalingConfig = {
59+ "minInstances" : 1 ,
60+ }
61+ provisionConfig = {}
62+ deploy_function (scalingConfig , provisionConfig )
63+
64+ # 弹性 ----> 常驻
65+ scalingConfig = {
66+ "residentPoolId" : "fc-pool-5f044a31f87171jkwaraws" ,
67+ "maxInstances" : 1 ,
68+ }
69+ provisionConfig = {}
70+ deploy_function (scalingConfig , provisionConfig )
71+
72+ # 常驻 ----> 常驻
73+ scalingConfig = {
74+ # "residentPoolId": "fc-pool-5f044a31f87171jkwaraws",
75+ "maxInstances" : 1 ,
76+ }
77+ provisionConfig = {}
78+ deploy_function (scalingConfig , provisionConfig )
79+
80+ # 常驻 ----> 弹性
81+ scalingConfig = {
82+ "minInstances" : 1 ,
83+ }
84+ provisionConfig = {}
85+ deploy_function (scalingConfig , provisionConfig )
86+
87+ cleanup_deployment ()
88+
89+ # GPU remains changed
90+ # 弹性 ----> 弹性
91+ scalingConfig = {
92+ "minInstances" : 1 ,
93+ }
94+ gpuConfig = {
95+ "gpuType" : "fc.gpu.tesla.1" ,
96+ "gpuMemorySize" : 1 ,
97+ }
98+ provisionConfig = {}
99+ memorySize = 32768
100+ deploy_function (scalingConfig , provisionConfig , gpuConfig , memorySize )
101+
102+ # 弹性 ----> 常驻
103+ scalingConfig = {
104+ "residentPoolId" : "fc-pool-16bedd56db9626uva1it08" ,
105+ "maxInstances" : 1 ,
106+ }
107+ gpuConfig = {
108+ "gpuType" : "fc.gpu.ada.1" ,
109+ "gpuMemorySize" : 1 ,
110+ }
111+ provisionConfig = {}
112+ memorySize = 65536
113+ deploy_function (scalingConfig , provisionConfig , gpuConfig , memorySize )
114+
115+ # 常驻 ----> 常驻
116+ scalingConfig = {
117+ "residentPoolId" : "fc-pool-16bedd56db96260yid15cs" ,
118+ "maxInstances" : 1 ,
119+ }
120+ gpuConfig = {
121+ "gpuType" : "fc.gpu.ada.2" ,
122+ "gpuMemorySize" : 1 ,
123+ }
124+ provisionConfig = {}
125+ memorySize = 32768
126+ deploy_function (scalingConfig , provisionConfig , gpuConfig , memorySize )
127+
128+ # 常驻 ----> 弹性
129+ scalingConfig = {
130+ "minInstances" : 1 ,
131+ }
132+ gpuConfig = {
133+ "gpuType" : "fc.gpu.tesla.1" ,
134+ "gpuMemorySize" : 1 ,
135+ }
136+ provisionConfig = {}
137+ memorySize = 32768
138+ deploy_function (scalingConfig , provisionConfig , gpuConfig , memorySize )
139+
140+ cleanup_deployment ()
0 commit comments