@@ -20,11 +20,15 @@ def deploy_function(scalingConfig, provisionConfig, gpuConfig=None, memorySize=N
2020 s_yaml = yaml .safe_load (f )
2121
2222 s_yaml ["resources" ]["fcDemo" ]["props" ]["scalingConfig" ] = scalingConfig
23- s_yaml ["resources" ]["fcDemo" ]["props" ]["provisionConfig" ] = provisionConfig
24- if gpuConfig :
23+ if provisionConfig is not None :
24+ s_yaml ["resources" ]["fcDemo" ]["props" ]["provisionConfig" ] = provisionConfig
25+ elif "provisionConfig" in s_yaml ["resources" ]["fcDemo" ]["props" ]:
26+ del s_yaml ["resources" ]["fcDemo" ]["props" ]["provisionConfig" ]
27+
28+ if gpuConfig is not None :
2529 s_yaml ["resources" ]["fcDemo" ]["props" ]["gpuConfig" ] = gpuConfig
2630
27- if memorySize :
31+ if memorySize is not None :
2832 s_yaml ["resources" ]["fcDemo" ]["props" ]["memorySize" ] = memorySize
2933
3034 with open (s_yaml_file , "w" , encoding = "utf-8" ) as f :
@@ -34,15 +38,15 @@ def deploy_function(scalingConfig, provisionConfig, gpuConfig=None, memorySize=N
3438
3539 # 执行部署命令
3640 print ("正在部署..." )
37- subprocess .check_call (f"s deploy -y -t { s_yaml_file } --skip-push" , shell = True )
41+ subprocess .check_call (f"echo 123456 | sudo -S s deploy -y -t { s_yaml_file } --skip-push" , shell = True )
3842
3943
4044def cleanup_deployment ():
4145 """
4246 清理部署
4347 """
4448 print (f"正在清理部署: { s_yaml_file } " )
45- subprocess .check_call (f"s remove -y -t { s_yaml_file } " , shell = True )
49+ subprocess .check_call (f"echo 123456 | sudo -S s remove -y -t { s_yaml_file } " , shell = True )
4650
4751
4852def main ():
@@ -51,37 +55,37 @@ def main():
5155 修改 scalingConfig 和 provisionConfig,并测试转换
5256 """
5357
54- subprocess .check_call (f"s deploy -y -t { s_yaml_file } --skip-push" , shell = True )
58+ subprocess .check_call (f"echo 123456 | sudo -S s deploy -y -t { s_yaml_file } --skip-push" , shell = True )
5559
5660 # GPU remains unchanged
5761 # 弹性 ----> 弹性
5862 scalingConfig = {
5963 "minInstances" : 1 ,
6064 }
61- provisionConfig = {}
65+ provisionConfig = None
6266 deploy_function (scalingConfig , provisionConfig )
6367
6468 # 弹性 ----> 常驻
6569 scalingConfig = {
6670 "residentPoolId" : "fc-pool-5f044a31f87171jkwaraws" ,
6771 "maxInstances" : 1 ,
6872 }
69- provisionConfig = {}
73+ provisionConfig = None
7074 deploy_function (scalingConfig , provisionConfig )
7175
7276 # 常驻 ----> 常驻
7377 scalingConfig = {
7478 "residentPoolId" : "fc-pool-16bedd56db9626uva1it08" ,
7579 "maxInstances" : 1 ,
7680 }
77- provisionConfig = {}
81+ provisionConfig = None
7882 deploy_function (scalingConfig , provisionConfig )
7983
8084 # 常驻 ----> 弹性
8185 scalingConfig = {
8286 "minInstances" : 1 ,
8387 }
84- provisionConfig = {}
88+ provisionConfig = None
8589 deploy_function (scalingConfig , provisionConfig )
8690
8791 cleanup_deployment ()
@@ -95,7 +99,7 @@ def main():
9599 "gpuType" : "fc.gpu.tesla.1" ,
96100 "gpuMemorySize" : 1 ,
97101 }
98- provisionConfig = {}
102+ provisionConfig = None
99103 memorySize = 32768
100104 deploy_function (scalingConfig , provisionConfig , gpuConfig , memorySize )
101105
@@ -108,7 +112,7 @@ def main():
108112 "gpuType" : "fc.gpu.ada.1" ,
109113 "gpuMemorySize" : 1 ,
110114 }
111- provisionConfig = {}
115+ provisionConfig = None
112116 memorySize = 65536
113117 deploy_function (scalingConfig , provisionConfig , gpuConfig , memorySize )
114118
@@ -121,7 +125,7 @@ def main():
121125 "gpuType" : "fc.gpu.ada.2" ,
122126 "gpuMemorySize" : 1 ,
123127 }
124- provisionConfig = {}
128+ provisionConfig = None
125129 memorySize = 32768
126130 deploy_function (scalingConfig , provisionConfig , gpuConfig , memorySize )
127131
@@ -133,8 +137,11 @@ def main():
133137 "gpuType" : "fc.gpu.tesla.1" ,
134138 "gpuMemorySize" : 1 ,
135139 }
136- provisionConfig = {}
140+ provisionConfig = None
137141 memorySize = 32768
138142 deploy_function (scalingConfig , provisionConfig , gpuConfig , memorySize )
139143
140144 cleanup_deployment ()
145+
146+ if __name__ == "__main__" :
147+ main ()
0 commit comments