Skip to content

Commit 084fd64

Browse files
rsonghuster高魏洪
authored andcommitted
bump 0.1.8
1 parent 6c02961 commit 084fd64

4 files changed

Lines changed: 25 additions & 18 deletions

File tree

__tests__/e2e/scaling/s_149.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ resources:
1919
cpu: 8
2020
customContainerConfig:
2121
image: >-
22-
registry.cn-shanghai.aliyuncs.com/serverless_devs/custom-container-http-examples:springboot
22+
registry.cn-hangzhou.aliyuncs.com/serverless_devs/custom-container-http-examples:springboot
2323
port: 9000
2424
instanceConcurrency: 20
2525
memorySize: 65536

__tests__/e2e/scaling/scalingConfig_provisionConfig.py

Lines changed: 21 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -20,11 +20,15 @@ def deploy_function(scalingConfig, provisionConfig, gpuConfig=None, memorySize=N
2020
s_yaml = yaml.safe_load(f)
2121

2222
s_yaml["resources"]["fcDemo"]["props"]["scalingConfig"] = scalingConfig
23-
s_yaml["resources"]["fcDemo"]["props"]["provisionConfig"] = provisionConfig
24-
if gpuConfig:
23+
if provisionConfig is not None:
24+
s_yaml["resources"]["fcDemo"]["props"]["provisionConfig"] = provisionConfig
25+
elif "provisionConfig" in s_yaml["resources"]["fcDemo"]["props"]:
26+
del s_yaml["resources"]["fcDemo"]["props"]["provisionConfig"]
27+
28+
if gpuConfig is not None:
2529
s_yaml["resources"]["fcDemo"]["props"]["gpuConfig"] = gpuConfig
2630

27-
if memorySize:
31+
if memorySize is not None:
2832
s_yaml["resources"]["fcDemo"]["props"]["memorySize"] = memorySize
2933

3034
with open(s_yaml_file, "w", encoding="utf-8") as f:
@@ -34,15 +38,15 @@ def deploy_function(scalingConfig, provisionConfig, gpuConfig=None, memorySize=N
3438

3539
# 执行部署命令
3640
print("正在部署...")
37-
subprocess.check_call(f"s deploy -y -t {s_yaml_file} --skip-push", shell=True)
41+
subprocess.check_call(f"echo 123456 | sudo -S s deploy -y -t {s_yaml_file} --skip-push", shell=True)
3842

3943

4044
def cleanup_deployment():
4145
"""
4246
清理部署
4347
"""
4448
print(f"正在清理部署: {s_yaml_file}")
45-
subprocess.check_call(f"s remove -y -t {s_yaml_file}", shell=True)
49+
subprocess.check_call(f"echo 123456 | sudo -S s remove -y -t {s_yaml_file}", shell=True)
4650

4751

4852
def main():
@@ -51,37 +55,37 @@ def main():
5155
修改 scalingConfig 和 provisionConfig,并测试转换
5256
"""
5357

54-
subprocess.check_call(f"s deploy -y -t {s_yaml_file} --skip-push", shell=True)
58+
subprocess.check_call(f"echo 123456 | sudo -S s deploy -y -t {s_yaml_file} --skip-push", shell=True)
5559

5660
# GPU remains unchanged
5761
# 弹性 ----> 弹性
5862
scalingConfig = {
5963
"minInstances": 1,
6064
}
61-
provisionConfig = {}
65+
provisionConfig = None
6266
deploy_function(scalingConfig, provisionConfig)
6367

6468
# 弹性 ----> 常驻
6569
scalingConfig = {
6670
"residentPoolId": "fc-pool-5f044a31f87171jkwaraws",
6771
"maxInstances": 1,
6872
}
69-
provisionConfig = {}
73+
provisionConfig = None
7074
deploy_function(scalingConfig, provisionConfig)
7175

7276
# 常驻 ----> 常驻
7377
scalingConfig = {
7478
"residentPoolId": "fc-pool-16bedd56db9626uva1it08",
7579
"maxInstances": 1,
7680
}
77-
provisionConfig = {}
81+
provisionConfig = None
7882
deploy_function(scalingConfig, provisionConfig)
7983

8084
# 常驻 ----> 弹性
8185
scalingConfig = {
8286
"minInstances": 1,
8387
}
84-
provisionConfig = {}
88+
provisionConfig = None
8589
deploy_function(scalingConfig, provisionConfig)
8690

8791
cleanup_deployment()
@@ -95,7 +99,7 @@ def main():
9599
"gpuType": "fc.gpu.tesla.1",
96100
"gpuMemorySize": 1,
97101
}
98-
provisionConfig = {}
102+
provisionConfig = None
99103
memorySize = 32768
100104
deploy_function(scalingConfig, provisionConfig, gpuConfig, memorySize)
101105

@@ -108,7 +112,7 @@ def main():
108112
"gpuType": "fc.gpu.ada.1",
109113
"gpuMemorySize": 1,
110114
}
111-
provisionConfig = {}
115+
provisionConfig = None
112116
memorySize = 65536
113117
deploy_function(scalingConfig, provisionConfig, gpuConfig, memorySize)
114118

@@ -121,7 +125,7 @@ def main():
121125
"gpuType": "fc.gpu.ada.2",
122126
"gpuMemorySize": 1,
123127
}
124-
provisionConfig = {}
128+
provisionConfig = None
125129
memorySize = 32768
126130
deploy_function(scalingConfig, provisionConfig, gpuConfig, memorySize)
127131

@@ -133,8 +137,11 @@ def main():
133137
"gpuType": "fc.gpu.tesla.1",
134138
"gpuMemorySize": 1,
135139
}
136-
provisionConfig = {}
140+
provisionConfig = None
137141
memorySize = 32768
138142
deploy_function(scalingConfig, provisionConfig, gpuConfig, memorySize)
139143

140144
cleanup_deployment()
145+
146+
if __name__ == "__main__":
147+
main()

publish.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ Type: Component
33
Name: fc3
44
Provider:
55
- 阿里云
6-
Version: 0.1.7
6+
Version: dev
77
Description: 阿里云函数计算全生命周期管理
88
HomePage: https://github.com/devsapp/fc3
99
Organization: 阿里云函数计算(FC)

src/resources/fc/index.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -304,8 +304,8 @@ export default class FC extends FC_Client {
304304
*/
305305
const { project, logstore } = (config.logConfig || {}) as ILogConfig;
306306
const retrySls = slsAuto && isSlsNotExistException(project, logstore, ex);
307-
const localGPUType = config.gpuConfig?.gpuType;
308-
const remoteGPUType = remoteConfig.gpuConfig?.gpuType;
307+
const localGPUType = config?.gpuConfig?.gpuType;
308+
const remoteGPUType = remoteConfig?.gpuConfig?.gpuType;
309309
if (retrySls) {
310310
if (calculateRetryTime(3)) {
311311
throw ex;

0 commit comments

Comments
 (0)