Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 36 additions & 0 deletions __tests__/e2e/scaling/s_149.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
edition: 3.0.0
name: test-scaling-config
access: quanxi_149

resources:
fcDemo:
component: ${env('fc_component_version', path('../../../'))}
props:
region: cn-hangzhou
functionName: fc3-scaling-pool-149-${env('fc_component_function_name', 'scaling')}
code: ./code
handler: index.handler
timeout: 60
logConfig: auto
gpuConfig:
gpuMemorySize: 49152
gpuType: fc.gpu.ada.1
runtime: custom-container
cpu: 8
customContainerConfig:
image: >-
registry.cn-shanghai.aliyuncs.com/serverless_devs/custom-container-http-examples:springboot
port: 9000
instanceConcurrency: 20
memorySize: 65536
diskSize: 10240

provisionConfig:
defaultTarget: 1
alwaysAllocateCPU: false
alwaysAllocateGPU: false

# scalingConfig:
# # residentPoolId: fc-pool-5f044a31f87171jkwaraws
# minInstances: 1

140 changes: 140 additions & 0 deletions __tests__/e2e/scaling/scalingConfig_provisionConfig.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,140 @@
#!/usr/bin/env python3
"""
Test the conversion between scalingConfig and provisionConfig, as well as the conversion between GPUs of the same type and different types.
"""
import subprocess

import yaml

s_yaml_file = f"s_149.yaml"


def deploy_function(scalingConfig, provisionConfig, gpuConfig=None, memorySize=None):
"""
部署函数
根据 scalingConfig 或 provisionConfig 创建函数
"""
print(f"scalingConfig: {scalingConfig} provisionConfig: {provisionConfig}")
# 使用当前文件夹下的s.yaml内容作为基础配置
with open("s_149.yaml", "r", encoding="utf-8") as f:
s_yaml = yaml.safe_load(f)

s_yaml["resources"]["fcDemo"]["props"]["scalingConfig"] = scalingConfig
s_yaml["resources"]["fcDemo"]["props"]["provisionConfig"] = provisionConfig
if gpuConfig:
s_yaml["resources"]["fcDemo"]["props"]["gpuConfig"] = gpuConfig

if memorySize:
s_yaml["resources"]["fcDemo"]["props"]["memorySize"] = memorySize

with open(s_yaml_file, "w", encoding="utf-8") as f:
yaml.dump(s_yaml, f, default_flow_style=False, allow_unicode=True)

print(f"使用配置文件部署: {s_yaml_file}")

# 执行部署命令
print("正在部署...")
subprocess.check_call(f"s deploy -y -t {s_yaml_file} --skip-push", shell=True)


def cleanup_deployment():
"""
清理部署
"""
print(f"正在清理部署: {s_yaml_file}")
subprocess.check_call(f"s remove -y -t {s_yaml_file}", shell=True)


def main():
"""
主函数
修改 scalingConfig 和 provisionConfig,并测试转换
"""

subprocess.check_call(f"s deploy -y -t {s_yaml_file} --skip-push", shell=True)

# GPU remains unchanged
# 弹性 ----> 弹性
scalingConfig = {
"minInstances": 1,
}
provisionConfig = {}
deploy_function(scalingConfig, provisionConfig)

# 弹性 ----> 常驻
scalingConfig = {
"residentPoolId": "fc-pool-5f044a31f87171jkwaraws",
"maxInstances": 1,
}
provisionConfig = {}
deploy_function(scalingConfig, provisionConfig)

# 常驻 ----> 常驻
scalingConfig = {
"residentPoolId": "fc-pool-16bedd56db9626uva1it08",
"maxInstances": 1,
}
provisionConfig = {}
deploy_function(scalingConfig, provisionConfig)

# 常驻 ----> 弹性
scalingConfig = {
"minInstances": 1,
}
provisionConfig = {}
deploy_function(scalingConfig, provisionConfig)

cleanup_deployment()

# GPU remains changed
# 弹性 ----> 弹性
scalingConfig = {
"minInstances": 1,
}
gpuConfig = {
"gpuType": "fc.gpu.tesla.1",
"gpuMemorySize": 1,
}
provisionConfig = {}
memorySize = 32768
deploy_function(scalingConfig, provisionConfig, gpuConfig, memorySize)

# 弹性 ----> 常驻
scalingConfig = {
"residentPoolId": "fc-pool-16bedd56db9626uva1it08",
"maxInstances": 1,
}
gpuConfig = {
"gpuType": "fc.gpu.ada.1",
"gpuMemorySize": 1,
}
provisionConfig = {}
memorySize = 65536
deploy_function(scalingConfig, provisionConfig, gpuConfig, memorySize)

# 常驻 ----> 常驻
scalingConfig = {
"residentPoolId": "fc-pool-16bedd56db96260yid15cs",
"maxInstances": 1,
}
gpuConfig = {
"gpuType": "fc.gpu.ada.2",
"gpuMemorySize": 1,
}
provisionConfig = {}
memorySize = 32768
deploy_function(scalingConfig, provisionConfig, gpuConfig, memorySize)

# 常驻 ----> 弹性
scalingConfig = {
"minInstances": 1,
}
gpuConfig = {
"gpuType": "fc.gpu.tesla.1",
"gpuMemorySize": 1,
}
provisionConfig = {}
memorySize = 32768
deploy_function(scalingConfig, provisionConfig, gpuConfig, memorySize)

cleanup_deployment()
75 changes: 48 additions & 27 deletions __tests__/ut/commands/deploy/impl/provision_config_test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -137,20 +137,27 @@ describe('ProvisionConfig', () => {
writable: true,
});

// Mock scalingConfig.provisionConfigErrorRetry
// Mock provisionConfigErrorRetry
const utils = require('../../../../../src/subCommands/deploy/utils');
const provisionConfigErrorRetrySpy = jest
.spyOn(provisionConfig.scalingConfig, 'provisionConfigErrorRetry')
.spyOn(utils, 'provisionConfigErrorRetry')
.mockResolvedValue(undefined);

const result = await provisionConfig.run();

expect(provisionConfigErrorRetrySpy).toHaveBeenCalledWith('ProvisionConfig', 'LATEST', {
defaultTarget: 10,
alwaysAllocateCPU: false,
alwaysAllocateGPU: false,
scheduledActions: [],
targetTrackingPolicies: [],
});
expect(provisionConfigErrorRetrySpy).toHaveBeenCalledWith(
provisionConfig.fcSdk,
'ProvisionConfig',
'test-function',
'LATEST',
{
defaultTarget: 10,
alwaysAllocateCPU: false,
alwaysAllocateGPU: false,
scheduledActions: [],
targetTrackingPolicies: [],
},
);
expect(result).toBe(true);
});

Expand All @@ -171,9 +178,10 @@ describe('ProvisionConfig', () => {
writable: true,
});

// Mock scalingConfig.provisionConfigErrorRetry
// Mock provisionConfigErrorRetry
const utils = require('../../../../../src/subCommands/deploy/utils');
const provisionConfigErrorRetrySpy = jest
.spyOn(provisionConfig.scalingConfig, 'provisionConfigErrorRetry')
.spyOn(utils, 'provisionConfigErrorRetry')
.mockResolvedValue(undefined);

const waitForProvisionReadySpy = jest
Expand All @@ -182,13 +190,19 @@ describe('ProvisionConfig', () => {

await provisionConfig.run();

expect(provisionConfigErrorRetrySpy).toHaveBeenCalledWith('ProvisionConfig', 'LATEST', {
defaultTarget: 10,
alwaysAllocateCPU: false,
alwaysAllocateGPU: false,
scheduledActions: [],
targetTrackingPolicies: [],
});
expect(provisionConfigErrorRetrySpy).toHaveBeenCalledWith(
provisionConfig.fcSdk,
'ProvisionConfig',
'test-function',
'LATEST',
{
defaultTarget: 10,
alwaysAllocateCPU: false,
alwaysAllocateGPU: false,
scheduledActions: [],
targetTrackingPolicies: [],
},
);
expect(waitForProvisionReadySpy).toHaveBeenCalledWith('LATEST', {
defaultTarget: 10,
alwaysAllocateCPU: false,
Expand All @@ -215,22 +229,29 @@ describe('ProvisionConfig', () => {
writable: true,
});

// Mock scalingConfig.provisionConfigErrorRetry
// Mock provisionConfigErrorRetry
const utils = require('../../../../../src/subCommands/deploy/utils');
const provisionConfigErrorRetrySpy = jest
.spyOn(provisionConfig.scalingConfig, 'provisionConfigErrorRetry')
.spyOn(utils, 'provisionConfigErrorRetry')
.mockResolvedValue(undefined);

const waitForProvisionReadySpy = jest.spyOn(provisionConfig as any, 'waitForProvisionReady');

await provisionConfig.run();

expect(provisionConfigErrorRetrySpy).toHaveBeenCalledWith('ProvisionConfig', 'LATEST', {
defaultTarget: 10,
alwaysAllocateCPU: false,
alwaysAllocateGPU: false,
scheduledActions: [],
targetTrackingPolicies: [],
});
expect(provisionConfigErrorRetrySpy).toHaveBeenCalledWith(
provisionConfig.fcSdk,
'ProvisionConfig',
'test-function',
'LATEST',
{
defaultTarget: 10,
alwaysAllocateCPU: false,
alwaysAllocateGPU: false,
scheduledActions: [],
targetTrackingPolicies: [],
},
);
expect(waitForProvisionReadySpy).not.toHaveBeenCalled();
expect(logger.info).toHaveBeenCalledWith(
`Skip wait provisionConfig of ${provisionConfig.functionName}/LATEST to instance up`,
Expand Down
Loading
Loading