diff --git a/src/aks-preview/HISTORY.rst b/src/aks-preview/HISTORY.rst index 00ad96dd87f..e46422f00d2 100644 --- a/src/aks-preview/HISTORY.rst +++ b/src/aks-preview/HISTORY.rst @@ -12,6 +12,10 @@ To release a new version, please select a new version number (usually plus 1 to Pending +++++++ +17.0.0b2 +++++++++ +* Add option `--migrate-vmas-to-vms` to `az aks update` + 17.0.0b1 +++++++ * [BREAKING CHANGE]: `az aks create`: Change default value of option `--node-vm-size` to "" diff --git a/src/aks-preview/azext_aks_preview/_help.py b/src/aks-preview/azext_aks_preview/_help.py index c63e17995a6..c8332ec1276 100644 --- a/src/aks-preview/azext_aks_preview/_help.py +++ b/src/aks-preview/azext_aks_preview/_help.py @@ -1267,6 +1267,9 @@ - name: --disable-imds-restriction type: bool short-summary: Disable IMDS restriction in the cluster. All Pods in the cluster will be able to access IMDS. + - name: --migrate-vmas-to-vms + type: bool + short-summary: Migrate cluster with VMAS node pool to VMS node pool. examples: - name: Reconcile the cluster back to its current state. text: az aks update -g MyResourceGroup -n MyManagedCluster diff --git a/src/aks-preview/azext_aks_preview/_params.py b/src/aks-preview/azext_aks_preview/_params.py index f717f587741..1e9adf722ee 100644 --- a/src/aks-preview/azext_aks_preview/_params.py +++ b/src/aks-preview/azext_aks_preview/_params.py @@ -1409,6 +1409,8 @@ def load_arguments(self, _): arg_type=get_enum_type(health_probe_modes), ) + c.argument('migrate_vmas_to_vms', is_preview=True, action='store_true') + with self.argument_context("aks upgrade") as c: c.argument("kubernetes_version", completer=get_k8s_upgrades_completion_list) c.argument( diff --git a/src/aks-preview/azext_aks_preview/custom.py b/src/aks-preview/azext_aks_preview/custom.py index 357ce198a50..6b5957e5bc0 100644 --- a/src/aks-preview/azext_aks_preview/custom.py +++ b/src/aks-preview/azext_aks_preview/custom.py @@ -761,6 +761,7 @@ def aks_update( # IMDS restriction enable_imds_restriction=False, disable_imds_restriction=False, + migrate_vmas_to_vms=False, ): # DO NOT MOVE: get all the original parameters and save them as a dictionary raw_parameters = locals() diff --git a/src/aks-preview/azext_aks_preview/managed_cluster_decorator.py b/src/aks-preview/azext_aks_preview/managed_cluster_decorator.py index 81cef62c3db..3db1d120dd6 100644 --- a/src/aks-preview/azext_aks_preview/managed_cluster_decorator.py +++ b/src/aks-preview/azext_aks_preview/managed_cluster_decorator.py @@ -20,6 +20,7 @@ CONST_AZURE_SERVICE_MESH_UPGRADE_COMMAND_START, CONST_AZURE_SERVICE_MESH_DEFAULT_EGRESS_NAMESPACE, CONST_LOAD_BALANCER_SKU_BASIC, + CONST_LOAD_BALANCER_SKU_STANDARD, CONST_MANAGED_CLUSTER_SKU_NAME_BASE, CONST_MANAGED_CLUSTER_SKU_NAME_AUTOMATIC, CONST_MANAGED_CLUSTER_SKU_TIER_FREE, @@ -40,6 +41,8 @@ CONST_OUTBOUND_TYPE_BLOCK, CONST_IMDS_RESTRICTION_ENABLED, CONST_IMDS_RESTRICTION_DISABLED, + CONST_AVAILABILITY_SET, + CONST_VIRTUAL_MACHINES, ) from azext_aks_preview._helpers import ( check_is_apiserver_vnet_integration_cluster, @@ -2857,6 +2860,13 @@ def get_disable_imds_restriction(self) -> bool: """ return self.raw_param.get("disable_imds_restriction") + def get_migrate_vmas_to_vms(self) -> bool: + """Obtain the value of migrate_vmas_to_vms. + + :return: bool + """ + return self.raw_param.get("migrate_vmas_to_vms") + # pylint: disable=too-many-public-methods class AKSPreviewManagedClusterCreateDecorator(AKSManagedClusterCreateDecorator): @@ -5335,6 +5345,31 @@ def update_imds_restriction(self, mc: ManagedCluster) -> ManagedCluster: raise DecoratorEarlyExitException() return mc + def update_vmas_to_vms(self, mc: ManagedCluster) -> ManagedCluster: + """Update the agent pool profile type from VMAS to VMS and LB sku to standard + + :return: the ManagedCluster object + """ + self._ensure_mc(mc) + + if self.context.get_migrate_vmas_to_vms(): + msg = ( + "\nWARNING: This operation will be disruptive to your workload while underway. " + "Do you wish to continue?" + ) + if not self.context.get_yes() and not prompt_y_n(msg, default="n"): + raise DecoratorEarlyExitException() + # Ensure we have valid vmas AP + if len(mc.agent_pool_profiles) == 1 and mc.agent_pool_profiles[0].type == CONST_AVAILABILITY_SET: + mc.agent_pool_profiles[0].type = CONST_VIRTUAL_MACHINES + else: + raise CLIError('This is not a valid VMAS cluster, we cannot proceed with the migration.') + + if mc.network_profile.load_balancer_sku == CONST_LOAD_BALANCER_SKU_BASIC: + mc.network_profile.load_balancer_sku = CONST_LOAD_BALANCER_SKU_STANDARD + + return mc + def update_mc_profile_preview(self) -> ManagedCluster: """The overall controller used to update the preview ManagedCluster profile. @@ -5410,6 +5445,8 @@ def update_mc_profile_preview(self) -> ManagedCluster: mc = self.update_static_egress_gateway(mc) # update imds restriction mc = self.update_imds_restriction(mc) + # update VMAS to VMS + mc = self.update_vmas_to_vms(mc) return mc diff --git a/src/aks-preview/azext_aks_preview/tests/latest/test_aks_commands.py b/src/aks-preview/azext_aks_preview/tests/latest/test_aks_commands.py index 9d4474b02bf..079f3b63d85 100644 --- a/src/aks-preview/azext_aks_preview/tests/latest/test_aks_commands.py +++ b/src/aks-preview/azext_aks_preview/tests/latest/test_aks_commands.py @@ -16079,3 +16079,59 @@ def test_aks_loadbalancer_commands( "aks delete -g {resource_group} -n {name} --yes --no-wait", checks=[self.is_empty()], ) + + + # Comment out below tests as we only allow this in certain regions. + # @AllowLargeResponse() + # @AKSCustomResourceGroupPreparer( + # random_name_length=17, + # name_prefix="clitest", + # location="centraluseuap", + # ) + # def test_aks_migrate_vmas_to_vms( + # self, resource_group, resource_group_location + # ): + # _, create_version = self._get_versions(resource_group_location) + # aks_name = self.create_random_name("cliakstest", 16) + # self.kwargs.update( + # { + # "resource_group": resource_group, + # "name": aks_name, + # "location": resource_group_location, + # "k8s_version": create_version, + # "ssh_key_value": self.generate_ssh_keys(), + # } + # ) + + # # create + # create_cmd = ( + # "aks create --resource-group={resource_group} --name={name} --location={location} " + # "--ssh-key-value={ssh_key_value} " + # "--vm-set-type AvailabilitySet " + # "--load-balancer-sku Basic " + # ) + # self.cmd( + # create_cmd, + # checks=[ + # self.check('provisioningState', 'Succeeded'), + # self.check("agentPoolProfiles[0].type", "AvailabilitySet"), + # self.check("networkProfile.loadBalancerSku", "basic"), + # ], + # ) + + # # update -- migrate vmas to vma + # update_cmd = ( + # "aks update --resource-group {resource_group} --name {name} " + # "--migrate-vmas-to-vms --yes " + # ) + # self.cmd(update_cmd, checks=[ + # self.check('provisioningState', 'Succeeded'), + # self.check("agentPoolProfiles[0].type", "VirtualMachines"), + # self.check("networkProfile.loadBalancerSku", "standard"), + # ]) + + # # delete + # self.cmd( + # "aks delete -g {resource_group} -n {name} --yes --no-wait", + # checks=[self.is_empty()], + # ) \ No newline at end of file diff --git a/src/aks-preview/azext_aks_preview/tests/latest/test_managed_cluster_decorator.py b/src/aks-preview/azext_aks_preview/tests/latest/test_managed_cluster_decorator.py index 93fc9e56e5c..a691104e0c8 100644 --- a/src/aks-preview/azext_aks_preview/tests/latest/test_managed_cluster_decorator.py +++ b/src/aks-preview/azext_aks_preview/tests/latest/test_managed_cluster_decorator.py @@ -37,6 +37,7 @@ CONST_KUBE_DASHBOARD_ADDON_NAME, CONST_LOAD_BALANCER_BACKEND_POOL_TYPE_NODE_IP, CONST_LOAD_BALANCER_SKU_STANDARD, + CONST_LOAD_BALANCER_SKU_BASIC, CONST_MONITORING_ADDON_NAME, CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID, CONST_MONITORING_USING_AAD_MSI_AUTH, @@ -47,6 +48,8 @@ CONST_ROTATION_POLL_INTERVAL, CONST_SECRET_ROTATION_ENABLED, CONST_VIRTUAL_MACHINE_SCALE_SETS, + CONST_AVAILABILITY_SET, + CONST_VIRTUAL_MACHINES, CONST_VIRTUAL_NODE_ADDON_NAME, CONST_VIRTUAL_NODE_SUBNET_NAME, CONST_WORKLOAD_RUNTIME_OCI_CONTAINER, @@ -85,6 +88,7 @@ MutuallyExclusiveArgumentError, RequiredArgumentMissingError, UnknownError, + CLIError, ) from azure.cli.command_modules.acs._consts import ( CONST_OUTBOUND_TYPE_LOAD_BALANCER, @@ -9078,6 +9082,113 @@ def test_update_acns_in_network_profile(self): ), ) self.assertEqual(dec_mc_1, ground_truth_mc_1) + + def test_update_vmas_to_vms(self): + # Should not update mc if unset + dec_0 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + {}, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_0 = self.models.ManagedCluster( + location="test_location", + ) + dec_0.context.attach_mc(mc_0) + dec_mc_0 = dec_0.update_vmas_to_vms(mc_0) + ground_truth_mc_0 = self.models.ManagedCluster( + location="test_location", + ) + self.assertEqual(dec_mc_0, ground_truth_mc_0) + + # Should raise error if trying to migrate non-vmas cluster to vms + dec_1 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "migrate_vmas_to_vms": True, + "yes": True, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_1 = self.models.ManagedCluster( + location="test_location", + ) + ap_1 = self.models.ManagedClusterAgentPoolProfile( + name="test_np_name", + type=CONST_VIRTUAL_MACHINE_SCALE_SETS, + ) + mc_1.agent_pool_profiles = [ap_1] + dec_1.context.attach_mc(mc_1) + with self.assertRaises(CLIError): + dec_1.update_vmas_to_vms(mc_1) + + # Should raise error if cluster has more than 1 AP + dec_2 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "migrate_vmas_to_vms": True, + "yes": True, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_2 = self.models.ManagedCluster( + location="test_location", + ) + ap_2_1 = self.models.ManagedClusterAgentPoolProfile( + name="test_np_name_1", + type=CONST_AVAILABILITY_SET, + ) + ap_2_2 = self.models.ManagedClusterAgentPoolProfile( + name="test_np_name_2", + type=CONST_AVAILABILITY_SET, + ) + mc_2.agent_pool_profiles = [ap_2_1, ap_2_2] + dec_2.context.attach_mc(mc_2) + with self.assertRaises(CLIError): + dec_2.update_vmas_to_vms(mc_2) + + # Should migrate vmas-blb to vms-slb + dec_3 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "migrate_vmas_to_vms": True, + "yes": True, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_3 = self.models.ManagedCluster( + location="test_location", + ) + ap_3 = self.models.ManagedClusterAgentPoolProfile( + name="test_np_name", + type=CONST_AVAILABILITY_SET, + ) + network_profile_3 = self.models.ContainerServiceNetworkProfile( + load_balancer_sku=CONST_LOAD_BALANCER_SKU_BASIC, + ) + mc_3.agent_pool_profiles = [ap_3] + mc_3.network_profile = network_profile_3 + dec_3.context.attach_mc(mc_3) + dec_mc_3 = dec_3.update_vmas_to_vms(mc_3) + + ground_truth_mc_3 = self.models.ManagedCluster( + location="test_location", + ) + ground_truth_ap_3 = self.models.ManagedClusterAgentPoolProfile( + name="test_np_name", + type=CONST_VIRTUAL_MACHINES, + ) + ground_truth_network_profile_3 = self.models.ContainerServiceNetworkProfile( + load_balancer_sku=CONST_LOAD_BALANCER_SKU_STANDARD, + ) + ground_truth_mc_3.agent_pool_profiles = [ground_truth_ap_3] + ground_truth_mc_3.network_profile = ground_truth_network_profile_3 + self.assertEqual(dec_mc_3, ground_truth_mc_3) + + def test_enable_retina_network_flow_logs(self): # Case 1: enable_acns, enable monitoring addons_profile, enable retina_network_flow_logs diff --git a/src/aks-preview/setup.py b/src/aks-preview/setup.py index aac89ad5596..f616649c417 100644 --- a/src/aks-preview/setup.py +++ b/src/aks-preview/setup.py @@ -9,7 +9,7 @@ from setuptools import setup, find_packages -VERSION = "17.0.0b1" +VERSION = "17.0.0b2" CLASSIFIERS = [ "Development Status :: 4 - Beta",