diff --git a/src/aks-preview/HISTORY.rst b/src/aks-preview/HISTORY.rst index 16968a8a68b..01e34dfc072 100644 --- a/src/aks-preview/HISTORY.rst +++ b/src/aks-preview/HISTORY.rst @@ -12,6 +12,11 @@ To release a new version, please select a new version number (usually plus 1 to Pending +++++++ +20.0.0b3 ++++++++ +* `az aks create/update`: Fix DCR not being created or updated when `--enable-container-network-logs`, `--enable-retina-flow-logs`, or `--enable-high-log-scale-mode` flags are used, ensuring the Data Collection Rule streams (e.g. `Microsoft-ContainerLogV2-HighScale`) are kept in sync. +* `az aks update`: Add validation for `--enable-high-log-scale-mode` on the update path requiring the monitoring addon with MSI authentication to be enabled + 20.0.0b2 +++++++ * `az aks nodepool update`: clean up some useless code in the update managed gpu function. diff --git a/src/aks-preview/azext_aks_preview/_helpers.py b/src/aks-preview/azext_aks_preview/_helpers.py index 1f035643969..e1c31d582e9 100644 --- a/src/aks-preview/azext_aks_preview/_helpers.py +++ b/src/aks-preview/azext_aks_preview/_helpers.py @@ -389,6 +389,27 @@ def check_is_azure_cli_core_editable_installed(): return False +def get_monitoring_addon_key(addon_profiles, monitoring_addon_name): + """Return the canonical key for the monitoring addon, normalizing non-standard casing. + + The API response may return the monitoring addon key in any casing (e.g. + "omsagent", "omsAgent", "oMSaGent"). This helper performs a + case-insensitive lookup and, when a non-standard key is found, re-keys + addon_profiles in-place so that subsequent code always uses the canonical + monitoring_addon_name (lowercase) form. + """ + if addon_profiles is None: + return monitoring_addon_name + if monitoring_addon_name in addon_profiles: + return monitoring_addon_name + target_lower = monitoring_addon_name.lower() + for key in list(addon_profiles): + if key.lower() == target_lower: + addon_profiles[monitoring_addon_name] = addon_profiles.pop(key) + return monitoring_addon_name + return monitoring_addon_name + + def check_is_monitoring_addon_enabled(addons, instance): is_monitoring_addon_enabled = False is_monitoring_addon = False @@ -401,10 +422,11 @@ def check_is_monitoring_addon_enabled(addons, instance): is_monitoring_addon = True break addon_profiles = instance.addon_profiles or {} + monitoring_addon_key = get_monitoring_addon_key(addon_profiles, CONST_MONITORING_ADDON_NAME) is_monitoring_addon_enabled = ( is_monitoring_addon - and CONST_MONITORING_ADDON_NAME in addon_profiles - and addon_profiles[CONST_MONITORING_ADDON_NAME].enabled + and monitoring_addon_key in addon_profiles + and addon_profiles[monitoring_addon_key].enabled ) except Exception as ex: # pylint: disable=broad-except logger.debug("failed to check monitoring addon enabled: %s", ex) diff --git a/src/aks-preview/azext_aks_preview/custom.py b/src/aks-preview/azext_aks_preview/custom.py index cf7c7197235..de9ae1a3ab8 100644 --- a/src/aks-preview/azext_aks_preview/custom.py +++ b/src/aks-preview/azext_aks_preview/custom.py @@ -66,6 +66,7 @@ check_is_private_link_cluster, get_cluster_snapshot_by_snapshot_id, get_k8s_extension_module, + get_monitoring_addon_key, get_nodepool_snapshot_by_snapshot_id, print_or_merge_credentials, process_message_for_run_command, @@ -568,7 +569,7 @@ def __init__(self, location, resource_id): ) error = None break - except CLIError as e: + except (CLIError, HttpResponseError) as e: error = e else: raise error @@ -3045,14 +3046,16 @@ def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=F subscription_id = get_subscription_id(cmd.cli_ctx) try: + addon_profiles = instance.addon_profiles or {} + monitoring_addon_key = get_monitoring_addon_key(addon_profiles, CONST_MONITORING_ADDON_NAME) if ( addons == "monitoring" and - CONST_MONITORING_ADDON_NAME in instance.addon_profiles and - instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled and + monitoring_addon_key in addon_profiles and + addon_profiles[monitoring_addon_key].enabled and CONST_MONITORING_USING_AAD_MSI_AUTH in - instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config and + addon_profiles[monitoring_addon_key].config and str( - instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config[ + addon_profiles[monitoring_addon_key].config[ CONST_MONITORING_USING_AAD_MSI_AUTH ] ).lower() == "true" @@ -3060,7 +3063,7 @@ def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=F # remove the DCR association because otherwise the DCR can't be deleted ensure_container_insights_for_monitoring( cmd, - instance.addon_profiles[CONST_MONITORING_ADDON_NAME], + addon_profiles[monitoring_addon_key], subscription_id, resource_group_name, name, @@ -3163,11 +3166,13 @@ def aks_enable_addons( if ( is_monitoring_addon_enabled ): + addon_profiles = instance.addon_profiles or {} + monitoring_addon_key = get_monitoring_addon_key(addon_profiles, CONST_MONITORING_ADDON_NAME) if ( CONST_MONITORING_USING_AAD_MSI_AUTH in - instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config and + addon_profiles[monitoring_addon_key].config and str( - instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config[ + addon_profiles[monitoring_addon_key].config[ CONST_MONITORING_USING_AAD_MSI_AUTH ] ).lower() == "true" @@ -3175,10 +3180,15 @@ def aks_enable_addons( if not msi_auth: raise ArgumentUsageError( "--enable-msi-auth-for-monitoring can not be used on clusters with service principal auth.") + # Auto-enable HLSM when CNL is active and HLSM wasn't explicitly set + if enable_high_log_scale_mode is None and \ + (addon_profiles[monitoring_addon_key].config or {}).get( + "enableRetinaNetworkFlags", "").lower() == "true": + enable_high_log_scale_mode = True # create a Data Collection Rule (DCR) and associate it with the cluster ensure_container_insights_for_monitoring( cmd, - instance.addon_profiles[CONST_MONITORING_ADDON_NAME], + addon_profiles[monitoring_addon_key], subscription_id, resource_group_name, name, @@ -3206,7 +3216,7 @@ def aks_enable_addons( raise ArgumentUsageError("--ampls-resource-id can not be used without MSI auth.") ensure_container_insights_for_monitoring( cmd, - instance.addon_profiles[CONST_MONITORING_ADDON_NAME], + addon_profiles[monitoring_addon_key], subscription_id, resource_group_name, name, diff --git a/src/aks-preview/azext_aks_preview/managed_cluster_decorator.py b/src/aks-preview/azext_aks_preview/managed_cluster_decorator.py index d0ccbfb0389..9a140ecb664 100644 --- a/src/aks-preview/azext_aks_preview/managed_cluster_decorator.py +++ b/src/aks-preview/azext_aks_preview/managed_cluster_decorator.py @@ -27,7 +27,6 @@ CONST_MANAGED_CLUSTER_SKU_TIER_FREE, CONST_MANAGED_CLUSTER_SKU_TIER_PREMIUM, CONST_MANAGED_CLUSTER_SKU_TIER_STANDARD, - CONST_MONITORING_ADDON_NAME_CAMELCASE, CONST_NETWORK_DATAPLANE_CILIUM, CONST_NETWORK_PLUGIN_AZURE, CONST_NETWORK_PLUGIN_MODE_OVERLAY, @@ -55,6 +54,7 @@ CONST_ACNS_DATAPATH_ACCELERATION_MODE_NONE, CONST_TRANSIT_ENCRYPTION_TYPE_MTLS, CONST_ADVANCED_NETWORKPOLICIES_L7, + CONST_MONITORING_ADDON_NAME_CAMELCASE, ) from azext_aks_preview.azurecontainerstorage._consts import ( CONST_ACSTOR_EXT_INSTALLATION_NAME, @@ -66,6 +66,7 @@ check_is_azure_cli_core_editable_installed, check_is_private_cluster, get_cluster_snapshot_by_snapshot_id, + get_monitoring_addon_key, filter_hard_taints, ) from azext_aks_preview._loadbalancer import create_load_balancer_profile @@ -172,6 +173,14 @@ ResourceReference = TypeVar("ResourceReference") +def _get_monitoring_addon_key_from_consts(addon_profiles, addon_consts): + """Thin wrapper around get_monitoring_addon_key that unpacks addon_consts dict.""" + return get_monitoring_addon_key( + addon_profiles, + addon_consts.get("CONST_MONITORING_ADDON_NAME"), + ) + + # pylint: disable=too-few-public-methods class AKSPreviewManagedClusterModels(AKSManagedClusterModels): """Store the models used in aks series of commands. @@ -428,7 +437,22 @@ def get_enable_msi_auth_for_monitoring(self) -> Union[bool, None]: elif enable_azure_monitor_logs: result = True elif enable_msi_auth_for_monitoring is False: - result = False + # The base class returns False when service_principal_profile.client_id is not None, + # but MSI-based clusters set client_id to "msi". Check if the monitoring addon + # already has useAADAuth=true, which indicates MSI auth is actually in use. + addon_consts = self.get_addon_consts() + CONST_MONITORING_USING_AAD_MSI_AUTH = addon_consts.get("CONST_MONITORING_USING_AAD_MSI_AUTH") + if self.mc and self.mc.addon_profiles: + monitoring_addon_key = _get_monitoring_addon_key_from_consts( + self.mc.addon_profiles, addon_consts) + monitoring_profile = self.mc.addon_profiles.get(monitoring_addon_key) + result = bool( + monitoring_profile and monitoring_profile.config and + str(monitoring_profile.config.get( + CONST_MONITORING_USING_AAD_MSI_AUTH, "")).lower() == "true" + ) + else: + result = False elif enable_msi_auth_for_monitoring is None and not disable_msi_auth and not enable_msi_auth: result = True else: @@ -1038,11 +1062,13 @@ def get_container_network_logs(self, mc: ManagedCluster) -> Union[bool, None]: "monitoring" in enable_addons or bool(self.raw_param.get("enable_azure_monitor_logs")) ) - monitoring_already_enabled = ( - mc.addon_profiles and - mc.addon_profiles.get("omsagent") and - mc.addon_profiles["omsagent"].enabled - ) + monitoring_already_enabled = False + if mc.addon_profiles: + addon_consts = self.get_addon_consts() + mk = _get_monitoring_addon_key_from_consts(mc.addon_profiles, addon_consts) + monitoring_already_enabled = bool( + mc.addon_profiles.get(mk) and mc.addon_profiles[mk].enabled + ) monitoring_enabled = monitoring_being_enabled or monitoring_already_enabled if not acns_enabled or not monitoring_enabled: raise InvalidArgumentValueError( @@ -2831,7 +2857,6 @@ def _get_enable_opentelemetry_logs(self, enable_validation: bool = False) -> boo # 1. New API: azureMonitorProfile.containerInsights.enabled # 2. Legacy: addonProfiles.omsagent.enabled (or omsAgent with camelCase) addon_consts = self.get_addon_consts() - CONST_MONITORING_ADDON_NAME = addon_consts.get("CONST_MONITORING_ADDON_NAME") # Check new API location container_insights_enabled = ( @@ -2840,15 +2865,13 @@ def _get_enable_opentelemetry_logs(self, enable_validation: bool = False) -> boo self.mc.azure_monitor_profile.container_insights.enabled ) - # Check legacy addon location (try both lowercase and camelCase) + # Check legacy addon location monitoring_addon_enabled = False if self.mc.addon_profiles: - # Try lowercase first (constant value) - if CONST_MONITORING_ADDON_NAME in self.mc.addon_profiles: - monitoring_addon_enabled = self.mc.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled - # Try camelCase variant (what Azure actually returns) - elif CONST_MONITORING_ADDON_NAME_CAMELCASE in self.mc.addon_profiles: - monitoring_addon_enabled = self.mc.addon_profiles[CONST_MONITORING_ADDON_NAME_CAMELCASE].enabled + addon_consts = self.get_addon_consts() + mk = _get_monitoring_addon_key_from_consts(self.mc.addon_profiles, addon_consts) + if mk in self.mc.addon_profiles: + monitoring_addon_enabled = self.mc.addon_profiles[mk].enabled monitoring_addon_currently_enabled = container_insights_enabled or monitoring_addon_enabled @@ -2985,7 +3008,6 @@ def get_enable_high_log_scale_mode(self) -> Union[bool, None]: # Validate that monitoring addon is enabled (either being enabled now or already enabled in cluster) addon_consts = self.get_addon_consts() - CONST_MONITORING_ADDON_NAME = addon_consts.get("CONST_MONITORING_ADDON_NAME") # Check if monitoring is being enabled in the command enable_addons = self.raw_param.get("enable_addons") @@ -2997,10 +3019,9 @@ def get_enable_high_log_scale_mode(self) -> Union[bool, None]: # Check if monitoring addon is already enabled in the cluster monitoring_addon_enabled = False if self.mc and self.mc.addon_profiles: - if CONST_MONITORING_ADDON_NAME in self.mc.addon_profiles: - monitoring_addon_enabled = self.mc.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled - elif CONST_MONITORING_ADDON_NAME_CAMELCASE in self.mc.addon_profiles: - monitoring_addon_enabled = self.mc.addon_profiles[CONST_MONITORING_ADDON_NAME_CAMELCASE].enabled + mk = _get_monitoring_addon_key_from_consts(self.mc.addon_profiles, addon_consts) + if mk in self.mc.addon_profiles: + monitoring_addon_enabled = self.mc.addon_profiles[mk].enabled if not monitoring_being_enabled and not enable_azure_monitor_logs and not monitoring_addon_enabled: raise RequiredArgumentMissingError( @@ -3011,10 +3032,25 @@ def get_enable_high_log_scale_mode(self) -> Union[bool, None]: # Auto-enable high log scale mode return True + # If user explicitly disables HLSM, check if CNL is already enabled on the cluster + if enable_high_log_scale_mode is False: + cnl_already_enabled = False + if self.mc and self.mc.addon_profiles: + addon_consts = self.get_addon_consts() + mk = _get_monitoring_addon_key_from_consts(self.mc.addon_profiles, addon_consts) + monitoring_profile = self.mc.addon_profiles.get(mk) + if monitoring_profile and monitoring_profile.config: + cnl_already_enabled = str( + monitoring_profile.config.get("enableRetinaNetworkFlags", "") + ).lower() == "true" + if cnl_already_enabled: + raise MutuallyExclusiveArgumentError( + "Cannot explicitly disable --enable-high-log-scale-mode while " + "container network logs are enabled on the cluster. " + "Please disable container network logs first with --disable-container-network-logs." + ) + # If container network logs are not being enabled, return the original value - # Return False if not explicitly set to maintain backward compatibility with base class - if enable_high_log_scale_mode is None: - return False return enable_high_log_scale_mode def _get_enable_vpa(self, enable_validation: bool = False) -> bool: @@ -4642,17 +4678,11 @@ def _setup_azure_monitor_logs(self, mc: ManagedCluster) -> None: } mc.addon_profiles[CONST_MONITORING_ADDON_NAME] = addon_profile + # DCR and DCRA creation is deferred to postprocessing_after_mc_created + # so that all flags are finalized and the cluster exists. + # Only MSI clusters need a DCR. self.context.set_intermediate("monitoring_addon_enabled", True, overwrite_exists=True) - # Call ensure_container_insights_for_monitoring with all parameters (similar to postprocessing) - CONST_MONITORING_ADDON_NAME = addon_consts.get("CONST_MONITORING_ADDON_NAME") - if (mc.addon_profiles and - CONST_MONITORING_ADDON_NAME in mc.addon_profiles and - mc.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled): - - # Set intermediate value to trigger postprocessing - self.context.set_intermediate("monitoring_addon_postprocessing_required", True, overwrite_exists=True) - def _setup_opentelemetry_metrics(self, mc: ManagedCluster) -> None: """Set up OpenTelemetry metrics configuration.""" self._ensure_app_monitoring_profile(mc) @@ -5304,21 +5334,23 @@ def postprocessing_after_mc_created(self, cluster: ManagedCluster) -> None: self.context.external_functions.add_monitoring_role_assignment( cluster, cluster_resource_id, self.cmd ) - elif (self.context.raw_param.get("enable_addons") is not None or - self.context.raw_param.get("enable-azure-monitor-logs") is not None): - # Create the DCR Association here + elif self._should_create_dcra(): addon_consts = self.context.get_addon_consts() - CONST_MONITORING_ADDON_NAME = addon_consts.get("CONST_MONITORING_ADDON_NAME") + monitoring_addon_key = ( + _get_monitoring_addon_key_from_consts(cluster.addon_profiles, addon_consts) + if cluster.addon_profiles + else addon_consts.get("CONST_MONITORING_ADDON_NAME") + ) self.context.external_functions.ensure_container_insights_for_monitoring( self.cmd, - cluster.addon_profiles[CONST_MONITORING_ADDON_NAME], + cluster.addon_profiles[monitoring_addon_key], self.context.get_subscription_id(), self.context.get_resource_group_name(), self.context.get_name(), self.context.get_location(), remove_monitoring=False, aad_route=self.context.get_enable_msi_auth_for_monitoring(), - create_dcr=False, + create_dcr=True, create_dcra=True, enable_syslog=self.context.get_enable_syslog(), data_collection_settings=self.context.get_data_collection_settings(), @@ -5342,10 +5374,8 @@ def postprocessing_after_mc_created(self, cluster: ManagedCluster) -> None: if (current_cluster.addon_profiles and CONST_MONITORING_ADDON_NAME in current_cluster.addon_profiles): - # Use the current cluster addon profile for cleanup addon_profile = current_cluster.addon_profiles[CONST_MONITORING_ADDON_NAME] - # Call ensure_container_insights_for_monitoring with remove_monitoring=True (same as aks_disable_addons) try: self.context.external_functions.ensure_container_insights_for_monitoring( self.cmd, @@ -5364,7 +5394,6 @@ def postprocessing_after_mc_created(self, cluster: ManagedCluster) -> None: enable_high_log_scale_mode=False ) except TypeError: - # Ignore TypeError just like aks_disable_addons does pass # ingress appgw addon @@ -5535,6 +5564,24 @@ def put_mc(self, mc: ManagedCluster) -> ManagedCluster: ) return cluster + def _should_create_dcra(self) -> bool: + """Return True if any flag that triggers a DCRA/DCR create or update was provided.""" + params = self.context.raw_param + return ( + params.get("enable_addons") is not None or + params.get("enable_azure_monitor_logs") is not None or + self._is_cnl_or_hlsm_changing() + ) + + def _is_cnl_or_hlsm_changing(self) -> bool: + """Return True if any CNL or High Log Scale Mode enable flag was provided.""" + params = self.context.raw_param + return ( + params.get("enable_container_network_logs") is not None or + params.get("enable_retina_flow_logs") is not None or + params.get("enable_high_log_scale_mode") is not None + ) + class AKSPreviewManagedClusterUpdateDecorator(AKSManagedClusterUpdateDecorator): def __init__( @@ -5608,6 +5655,7 @@ def get_special_parameter_default_value_pairs_list(self) -> List[Tuple[Any, Any] (self.context.get_nat_gateway_managed_outbound_ipv6_count(), None), (self.context.get_nat_gateway_outbound_ip_ids(), None), (self.context.get_nat_gateway_outbound_ip_prefix_ids(), None), + (self.context.raw_param.get("enable_high_log_scale_mode"), None), ] def check_raw_parameters(self): @@ -5790,6 +5838,9 @@ def update_monitoring_profile_flow_logs(self, mc: ManagedCluster) -> ManagedClus """ self._ensure_mc(mc) + # Do not call super() — the base class HLSM validation does not handle + # preview-specific flags (--enable-azure-monitor-logs, --enable-addons monitoring). + # Trigger validation for high log scale mode when container network logs are enabled. # This ensures proper error messages are raised before cluster update if the user # explicitly disables high log scale mode while enabling container network logs. @@ -5801,12 +5852,81 @@ def update_monitoring_profile_flow_logs(self, mc: ManagedCluster) -> ManagedClus if container_network_logs_enabled is not None: if mc.addon_profiles: addon_consts = self.context.get_addon_consts() - CONST_MONITORING_ADDON_NAME = addon_consts.get("CONST_MONITORING_ADDON_NAME") - monitoring_addon_profile = mc.addon_profiles.get(CONST_MONITORING_ADDON_NAME) + monitoring_addon_key = _get_monitoring_addon_key_from_consts(mc.addon_profiles, addon_consts) + monitoring_addon_profile = mc.addon_profiles.get(monitoring_addon_key) if monitoring_addon_profile: config = monitoring_addon_profile.config or {} config["enableRetinaNetworkFlags"] = str(container_network_logs_enabled) - mc.addon_profiles[CONST_MONITORING_ADDON_NAME].config = config + mc.addon_profiles[monitoring_addon_key].config = config + + # When enabling CNL, the DCR must be updated to add the high-scale stream. + # Set the postprocessing intermediate so that the update path calls ensure_container_insights. + if self.context.raw_param.get("enable_container_network_logs") or \ + self.context.raw_param.get("enable_retina_flow_logs"): + self.context.set_intermediate("monitoring_addon_postprocessing_required", True, overwrite_exists=True) + + # When --enable-high-log-scale-mode is passed standalone on the update path, validate that + # monitoring with MSI auth is already enabled, then trigger the DCR update via postprocessing. + enable_high_log_scale_mode = self.context.raw_param.get("enable_high_log_scale_mode") + if enable_high_log_scale_mode is True: + # Check if monitoring is being enabled in the same command + enable_azure_monitor_logs = self.context.raw_param.get("enable_azure_monitor_logs") + enable_addons = self.context.raw_param.get("enable_addons") + monitoring_being_enabled = ( + enable_azure_monitor_logs or + (enable_addons and "monitoring" in enable_addons) + ) + + if not monitoring_being_enabled: + # Only validate existing addon state when not enabling monitoring simultaneously + addon_consts = self.context.get_addon_consts() + CONST_MONITORING_USING_AAD_MSI_AUTH = addon_consts.get("CONST_MONITORING_USING_AAD_MSI_AUTH") + + # Resolve the addon profile, normalizing non-standard key casing. + monitoring_addon_profile = None + if mc.addon_profiles: + mk = _get_monitoring_addon_key_from_consts(mc.addon_profiles, addon_consts) + monitoring_addon_profile = mc.addon_profiles.get(mk) + + if not monitoring_addon_profile or not monitoring_addon_profile.enabled: + raise RequiredArgumentMissingError( + "--enable-high-log-scale-mode requires the Azure Monitor logs addon (omsagent) " + "to be enabled on the cluster. Please enable it first with " + "--enable-addons monitoring or --enable-azure-monitor-logs." + ) + + addon_config = monitoring_addon_profile.config or {} + msi_auth_enabled = ( + CONST_MONITORING_USING_AAD_MSI_AUTH in addon_config and + str(addon_config[CONST_MONITORING_USING_AAD_MSI_AUTH]).lower() == "true" + ) + if not msi_auth_enabled: + raise RequiredArgumentMissingError( + "--enable-high-log-scale-mode requires MSI authentication to be enabled " + "for the monitoring addon. Please enable it with --enable-msi-auth-for-monitoring." + ) + + self.context.set_intermediate("monitoring_addon_postprocessing_required", True, overwrite_exists=True) + + elif enable_high_log_scale_mode is False: + # Check if CNL is already enabled on the cluster — cannot disable HLSM while CNL is on + cnl_already_enabled = False + if mc.addon_profiles: + addon_consts = self.context.get_addon_consts() + mk = _get_monitoring_addon_key_from_consts(mc.addon_profiles, addon_consts) + monitoring_profile = mc.addon_profiles.get(mk) + if monitoring_profile and monitoring_profile.config: + cnl_already_enabled = str( + monitoring_profile.config.get("enableRetinaNetworkFlags", "") + ).lower() == "true" + if cnl_already_enabled: + raise MutuallyExclusiveArgumentError( + "Cannot explicitly disable --enable-high-log-scale-mode while " + "container network logs are enabled on the cluster. " + "Please disable container network logs first with --disable-container-network-logs." + ) + self.context.set_intermediate("monitoring_addon_postprocessing_required", True, overwrite_exists=True) + return mc # pylint: disable=too-many-statements,too-many-locals,too-many-branches @@ -7716,6 +7836,13 @@ def _setup_azure_monitor_logs(self, mc: ManagedCluster) -> None: if existing_key: addon_profile = mc.addon_profiles[existing_key] + # Detect workspace change: if the workspace is different from the existing one, + # trigger DCR postprocessing so the DCR destination gets updated. + old_config = addon_profile.config or {} + old_workspace = old_config.get(CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID, "") + if old_workspace and old_workspace.lower() != workspace_resource_id.lower(): + self.context.set_intermediate( + "monitoring_addon_postprocessing_required", True, overwrite_exists=True) else: addon_profile = self.models.ManagedClusterAddonProfile(enabled=False) existing_key = CONST_MONITORING_ADDON_NAME @@ -7727,32 +7854,30 @@ def _setup_azure_monitor_logs(self, mc: ManagedCluster) -> None: CONST_MONITORING_USING_AAD_MSI_AUTH: enable_msi_auth } + # Also set enableRetinaNetworkFlags if container network logs are being enabled + # in the same command. This must be done here because update_monitoring_profile_flow_logs + # may run before update_addon_profiles when the base class calls it first. + container_network_logs_enabled = self.context.get_container_network_logs(mc) + if container_network_logs_enabled is not None: + new_config["enableRetinaNetworkFlags"] = str(container_network_logs_enabled) + # Replace the entire config, not just individual keys addon_profile.config = new_config mc.addon_profiles[existing_key] = addon_profile self.context.set_intermediate("monitoring_addon_enabled", True, overwrite_exists=True) - # Call ensure_container_insights_for_monitoring with all parameters (similar to postprocessing) - if (mc.addon_profiles and - existing_key in mc.addon_profiles and - mc.addon_profiles[existing_key].enabled): - - # Set intermediate value to trigger postprocessing - self.context.set_intermediate("monitoring_addon_postprocessing_required", True, overwrite_exists=True) def _disable_azure_monitor_logs(self, mc: ManagedCluster) -> None: """Disable Azure Monitor logs configuration.""" addon_consts = self.context.get_addon_consts() - CONST_MONITORING_ADDON_NAME = addon_consts.get("CONST_MONITORING_ADDON_NAME") CONST_MONITORING_USING_AAD_MSI_AUTH = addon_consts.get("CONST_MONITORING_USING_AAD_MSI_AUTH") - # Check if the addon profile exists (check both lowercase and camelCase) + # Normalize the addon key (handles any casing variant) addon_key = None if mc.addon_profiles: - if CONST_MONITORING_ADDON_NAME in mc.addon_profiles: - addon_key = CONST_MONITORING_ADDON_NAME - elif CONST_MONITORING_ADDON_NAME_CAMELCASE in mc.addon_profiles: - addon_key = CONST_MONITORING_ADDON_NAME_CAMELCASE + addon_key = _get_monitoring_addon_key_from_consts(mc.addon_profiles, addon_consts) + if addon_key not in mc.addon_profiles: + addon_key = None # If the addon profile doesn't exist at all, there's nothing to disable if not addon_key: @@ -7788,15 +7913,12 @@ def _disable_azure_monitor_logs(self, mc: ManagedCluster) -> None: # Fetch the current cluster state from Azure (same as aks_disable_addons line 2791) current_cluster = self.client.get(self.context.get_resource_group_name(), self.context.get_name()) - # Find the addon key in current_cluster (it may have different casing) - current_addon_key = None - if current_cluster.addon_profiles: - if CONST_MONITORING_ADDON_NAME in current_cluster.addon_profiles: - current_addon_key = CONST_MONITORING_ADDON_NAME - elif CONST_MONITORING_ADDON_NAME_CAMELCASE in current_cluster.addon_profiles: - current_addon_key = CONST_MONITORING_ADDON_NAME_CAMELCASE + # Find the addon key in current_cluster (normalize casing) + current_addon_key = _get_monitoring_addon_key_from_consts( + current_cluster.addon_profiles, addon_consts) if current_cluster.addon_profiles else None + has_addon = current_addon_key and current_addon_key in (current_cluster.addon_profiles or {}) - if current_addon_key: + if has_addon: try: # Use the current cluster's addon profile for cleanup (not the modified mc object) self.context.external_functions.ensure_container_insights_for_monitoring( @@ -7821,10 +7943,15 @@ def _disable_azure_monitor_logs(self, mc: ManagedCluster) -> None: # Now disable the addon and clear configuration mc.addon_profiles[addon_key].enabled = False - - # Clear the config to remove old workspace resource ID and other settings mc.addon_profiles[addon_key].config = None + # Also disable azureMonitorProfile.containerInsights (the new API surface) + # The RP uses containerInsights.enabled as the source of truth; if it remains + # true while the legacy addon is disabled, the RP re-enables the addon. + if (mc.azure_monitor_profile and + mc.azure_monitor_profile.container_insights): + mc.azure_monitor_profile.container_insights.enabled = False + # Also disable OpenTelemetry logs when disabling Azure Monitor logs if opentelemetry_logs_enabled: mc.azure_monitor_profile.app_monitoring.open_telemetry_logs.enabled = False @@ -7962,7 +8089,10 @@ def update_mc_profile_preview(self) -> ManagedCluster: # update acns in network_profile mc = self.update_acns_in_network_profile(mc) # update update_monitoring_profile_flow_logs - mc = self.update_monitoring_profile_flow_logs(mc) + # Only call here if the base class doesn't already call it in update_mc_profile_default + # (CLI >= 2.84.0 added this call to the base class) + if not hasattr(super(), 'update_monitoring_profile_flow_logs'): + mc = self.update_monitoring_profile_flow_logs(mc) # update kubernetes support plan mc = self.update_k8s_support_plan(mc) # update AI toolchain operator @@ -8032,17 +8162,22 @@ def postprocessing_after_mc_created(self, cluster: ManagedCluster) -> None: ) if monitoring_addon_postprocessing_required: addon_consts = self.context.get_addon_consts() - CONST_MONITORING_ADDON_NAME = addon_consts.get("CONST_MONITORING_ADDON_NAME") CONST_MONITORING_USING_AAD_MSI_AUTH = addon_consts.get("CONST_MONITORING_USING_AAD_MSI_AUTH") + monitoring_addon_key = ( + _get_monitoring_addon_key_from_consts(cluster.addon_profiles, addon_consts) + if cluster.addon_profiles + else addon_consts.get("CONST_MONITORING_ADDON_NAME") + ) + if (cluster.addon_profiles and - CONST_MONITORING_ADDON_NAME in cluster.addon_profiles and - cluster.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled): + monitoring_addon_key in cluster.addon_profiles and + cluster.addon_profiles[monitoring_addon_key].enabled): # Check if MSI auth is enabled if (CONST_MONITORING_USING_AAD_MSI_AUTH in - cluster.addon_profiles[CONST_MONITORING_ADDON_NAME].config and - str(cluster.addon_profiles[CONST_MONITORING_ADDON_NAME].config[ + cluster.addon_profiles[monitoring_addon_key].config and + str(cluster.addon_profiles[monitoring_addon_key].config[ CONST_MONITORING_USING_AAD_MSI_AUTH]).lower() == "true"): # Check parameter sizes to identify what might be causing large headers @@ -8057,7 +8192,7 @@ def postprocessing_after_mc_created(self, cluster: ManagedCluster) -> None: self.context.external_functions.ensure_container_insights_for_monitoring( self.cmd, - cluster.addon_profiles[CONST_MONITORING_ADDON_NAME], + cluster.addon_profiles[monitoring_addon_key], self.context.get_subscription_id(), self.context.get_resource_group_name(), self.context.get_name(), diff --git a/src/aks-preview/azext_aks_preview/tests/latest/test_aks_commands.py b/src/aks-preview/azext_aks_preview/tests/latest/test_aks_commands.py index cfa13cb033e..38f2dcc7a06 100644 --- a/src/aks-preview/azext_aks_preview/tests/latest/test_aks_commands.py +++ b/src/aks-preview/azext_aks_preview/tests/latest/test_aks_commands.py @@ -19322,6 +19322,519 @@ def test_aks_create_acns_with_flow_logs( checks=[self.is_empty()], ) + @live_only() + @AllowLargeResponse() + @AKSCustomResourceGroupPreparer( + random_name_length=17, + name_prefix="clitest", + location="eastus2euap", + ) + def test_aks_create_with_azuremonitorlogs_and_cnl( + self, resource_group, resource_group_location + ): + """Test that --enable-azure-monitor-logs with --enable-container-network-logs creates DCR/DCRA correctly. + + This covers the scenario where monitoring is enabled via --enable-azure-monitor-logs (not --enable-addons monitoring) + combined with --enable-container-network-logs. The DCRA postprocessing must detect enable_azure_monitor_logs + to trigger ensure_container_insights_for_monitoring and create the DCR with ContainerNetworkLogs stream. + """ + self.test_resources_count = 0 + aks_name = self.create_random_name("cliakstest", 16) + self.kwargs.update( + { + "resource_group": resource_group, + "name": aks_name, + "ssh_key_value": self.generate_ssh_keys(), + "location": resource_group_location, + } + ) + + create_cmd = ( + "aks create --resource-group={resource_group} --name={name} --location={location} " + "--ssh-key-value={ssh_key_value} --node-count=1 --tier standard " + "--network-plugin azure --network-dataplane=cilium --network-plugin-mode overlay " + "--enable-acns --enable-container-network-logs " + "--enable-azure-monitor-logs --enable-high-log-scale-mode " + "--aks-custom-headers AKSHTTPCustomFeatures=Microsoft.ContainerService/AdvancedNetworkingFlowLogsPreview " + ) + + response = self.cmd( + create_cmd, + checks=[ + self.check("provisioningState", "Succeeded"), + self.check("addonProfiles.omsagent.enabled", True), + self.check("addonProfiles.omsagent.config.useAADAuth", "true"), + self.check("addonProfiles.omsagent.config.enableRetinaNetworkFlags", "True"), + ], + ).get_output_in_json() + + cluster_resource_id = response["id"] + subscription = cluster_resource_id.split("/")[2] + + # Verify DCR was created with ContainerNetworkLogs stream + location = resource_group_location + dataCollectionRuleName = f"MSCI-{location}-{aks_name}" + dataCollectionRuleName = dataCollectionRuleName[0:64] + dcr_resource_id = f"/subscriptions/{subscription}/resourceGroups/{resource_group}/providers/Microsoft.Insights/dataCollectionRules/{dataCollectionRuleName}" + + get_cmd = f'rest --method get --url https://management.azure.com{dcr_resource_id}?api-version=2022-06-01' + self.cmd(get_cmd, checks=[ + self.check('properties.dataFlows[0].streams[-1]', 'Microsoft-ContainerNetworkLogs'), + ]) + + # Verify DCRA was created + dcra_resource_id = f"{cluster_resource_id}/providers/Microsoft.Insights/dataCollectionRuleAssociations/ContainerInsightsExtension" + get_cmd = f'rest --method get --url https://management.azure.com{dcra_resource_id}?api-version=2022-06-01' + self.cmd(get_cmd, checks=[ + self.check('properties.dataCollectionRuleId', f'{dcr_resource_id}') + ]) + + # delete + self.cmd( + "aks delete -g {resource_group} -n {name} --yes --no-wait", + checks=[self.is_empty()], + ) + + @live_only() + @AllowLargeResponse() + @AKSCustomResourceGroupPreparer( + random_name_length=17, + name_prefix="clitest", + location="westus2", + ) + def test_aks_update_enable_azuremonitorlogs_with_hlsm( + self, resource_group, resource_group_location + ): + """Test that --enable-azure-monitor-logs with --enable-high-log-scale-mode on update creates DCR with HighScale stream. + + Creates a plain cluster, then updates it with --enable-azure-monitor-logs --enable-high-log-scale-mode, + and verifies that the DCR is created with the Microsoft-ContainerLogV2-HighScale stream. + """ + self.test_resources_count = 0 + aks_name = self.create_random_name("cliakstest", 16) + node_vm_size = "standard_d2s_v3" + self.kwargs.update( + { + "resource_group": resource_group, + "name": aks_name, + "location": resource_group_location, + "ssh_key_value": self.generate_ssh_keys(), + "node_vm_size": node_vm_size, + } + ) + + # Create a plain cluster without monitoring + create_cmd = ( + "aks create --resource-group={resource_group} --name={name} --location={location} " + "--ssh-key-value={ssh_key_value} --node-vm-size={node_vm_size} " + "--enable-managed-identity --output=json" + ) + self.cmd(create_cmd, checks=[ + self.check("provisioningState", "Succeeded"), + ]) + + # Wait for any in-progress addon operations to complete before next update + wait_cmd = 'aks wait --resource-group={resource_group} --name={name} --updated --timeout=1800' + self.cmd(wait_cmd, checks=[self.is_empty()]) + + # Update: enable monitoring with high log scale mode + update_cmd = ( + "aks update --resource-group={resource_group} --name={name} --yes " + "--enable-azure-monitor-logs --enable-high-log-scale-mode --output=json" + ) + self.cmd(update_cmd, checks=[ + self.check("provisioningState", "Succeeded"), + self.check("addonProfiles.omsagent.enabled", True), + self.check("addonProfiles.omsagent.config.useAADAuth", "true"), + ]) + + # Verify aks show reflects the update + show_cmd = "aks show --resource-group={resource_group} --name={name} --output=json" + response = self.cmd(show_cmd, checks=[ + self.check("provisioningState", "Succeeded"), + self.check("addonProfiles.omsagent.enabled", True), + ]).get_output_in_json() + + cluster_resource_id = response["id"] + subscription = cluster_resource_id.split("/")[2] + + # Verify DCR was created with HighScale stream + location = resource_group_location + dataCollectionRuleName = f"MSCI-{location}-{aks_name}" + dataCollectionRuleName = dataCollectionRuleName[0:64] + dcr_resource_id = f"/subscriptions/{subscription}/resourceGroups/{resource_group}/providers/Microsoft.Insights/dataCollectionRules/{dataCollectionRuleName}" + + get_cmd = f'rest --method get --url https://management.azure.com{dcr_resource_id}?api-version=2022-06-01' + self.cmd(get_cmd, checks=[ + self.check('properties.dataFlows[0].streams[1]', 'Microsoft-ContainerLogV2-HighScale'), + ]) + + # delete + self.cmd( + "aks delete -g {resource_group} -n {name} --yes --no-wait", + checks=[self.is_empty()], + ) + + @live_only() + @AllowLargeResponse() + @AKSCustomResourceGroupPreparer( + random_name_length=17, + name_prefix="clitest", + location="eastus2euap", + ) + def test_aks_create_with_retina_flow_logs_alias( + self, resource_group, resource_group_location + ): + """Test that --enable-retina-flow-logs works as an alias for --enable-container-network-logs. + + This verifies the alias parameter path: enable_retina_flow_logs is treated identically to + enable_container_network_logs in get_container_network_logs() and _is_cnl_or_hlsm_changing(). + """ + self.test_resources_count = 0 + aks_name = self.create_random_name("cliakstest", 16) + self.kwargs.update( + { + "resource_group": resource_group, + "name": aks_name, + "ssh_key_value": self.generate_ssh_keys(), + "location": resource_group_location, + } + ) + + create_cmd = ( + "aks create --resource-group={resource_group} --name={name} --location={location} " + "--ssh-key-value={ssh_key_value} --node-count=1 --tier standard " + "--network-plugin azure --network-dataplane=cilium --network-plugin-mode overlay " + "--enable-acns --enable-retina-flow-logs " + "--enable-addons monitoring --enable-high-log-scale-mode " + "--aks-custom-headers AKSHTTPCustomFeatures=Microsoft.ContainerService/AdvancedNetworkingFlowLogsPreview " + ) + + response = self.cmd( + create_cmd, + checks=[ + self.check("provisioningState", "Succeeded"), + self.check("addonProfiles.omsagent.enabled", True), + self.check("addonProfiles.omsagent.config.enableRetinaNetworkFlags", "True"), + ], + ).get_output_in_json() + + cluster_resource_id = response["id"] + subscription = cluster_resource_id.split("/")[2] + + # Verify DCR was created with ContainerNetworkLogs stream + location = resource_group_location + dataCollectionRuleName = f"MSCI-{location}-{aks_name}" + dataCollectionRuleName = dataCollectionRuleName[0:64] + dcr_resource_id = f"/subscriptions/{subscription}/resourceGroups/{resource_group}/providers/Microsoft.Insights/dataCollectionRules/{dataCollectionRuleName}" + + get_cmd = f'rest --method get --url https://management.azure.com{dcr_resource_id}?api-version=2022-06-01' + self.cmd(get_cmd, checks=[ + self.check('properties.dataFlows[0].streams[-1]', 'Microsoft-ContainerNetworkLogs'), + ]) + + # Wait for any in-progress addon operations to complete before next update + wait_cmd = 'aks wait --resource-group={resource_group} --name={name} --updated --timeout=1800' + self.cmd(wait_cmd, checks=[self.is_empty()]) + + # Disable via the alias — run twice like test_aks_create_acns_with_flow_logs + # (first run applies the change, second run verifies the result) + disable_cmd = "aks update --resource-group={resource_group} --name={name} --disable-retina-flow-logs -o json" + self.cmd(disable_cmd, checks=[self.check("provisioningState", "Succeeded")]) + self.cmd( + disable_cmd, + checks=[ + self.check("provisioningState", "Succeeded"), + self.check("addonProfiles.omsagent.config.enableRetinaNetworkFlags", "False"), + ], + ) + + # delete + self.cmd( + "aks delete -g {resource_group} -n {name} --yes --no-wait", + checks=[self.is_empty()], + ) + + @live_only() + @AllowLargeResponse() + @AKSCustomResourceGroupPreparer( + random_name_length=17, + name_prefix="clitest", + location="eastus2euap", + ) + def test_aks_update_enable_cnl_via_azuremonitorlogs( + self, resource_group, resource_group_location + ): + """Test enabling CNL on an existing ACNS cluster by adding --enable-azure-monitor-logs on update. + + Creates an ACNS cluster without monitoring, then updates with --enable-azure-monitor-logs + --enable-container-network-logs to cover the update decorator path for enabling both + monitoring and CNL simultaneously. + """ + self.test_resources_count = 0 + aks_name = self.create_random_name("cliakstest", 16) + self.kwargs.update( + { + "resource_group": resource_group, + "name": aks_name, + "ssh_key_value": self.generate_ssh_keys(), + "location": resource_group_location, + } + ) + + # Create an ACNS cluster without monitoring + create_cmd = ( + "aks create --resource-group={resource_group} --name={name} --location={location} " + "--ssh-key-value={ssh_key_value} --node-count=1 --tier standard " + "--network-plugin azure --network-dataplane=cilium --network-plugin-mode overlay " + "--enable-acns --enable-managed-identity --output=json " + ) + self.cmd(create_cmd, checks=[ + self.check("provisioningState", "Succeeded"), + self.check("networkProfile.advancedNetworking.enabled", True), + ]) + + # Wait for any in-progress addon operations to complete before next update + wait_cmd = 'aks wait --resource-group={resource_group} --name={name} --updated --timeout=1800' + self.cmd(wait_cmd, checks=[self.is_empty()]) + + # Update: enable monitoring + CNL together + update_cmd = ( + "aks update --resource-group={resource_group} --name={name} --yes " + "--enable-azure-monitor-logs --enable-container-network-logs --enable-high-log-scale-mode " + "--aks-custom-headers AKSHTTPCustomFeatures=Microsoft.ContainerService/AdvancedNetworkingFlowLogsPreview " + "--output=json" + ) + self.cmd(update_cmd) + + # Wait for the update to fully complete, then verify via aks show + self.cmd(wait_cmd, checks=[self.is_empty()]) + show_cmd = "aks show --resource-group={resource_group} --name={name} --output=json" + self.cmd(show_cmd, checks=[ + self.check("provisioningState", "Succeeded"), + self.check("addonProfiles.omsagent.enabled", True), + self.check("addonProfiles.omsagent.config.enableRetinaNetworkFlags", "True"), + ]) + + # Verify DCR was created with ContainerNetworkLogs stream + response = self.cmd(show_cmd).get_output_in_json() + + cluster_resource_id = response["id"] + subscription = cluster_resource_id.split("/")[2] + + location = resource_group_location + dataCollectionRuleName = f"MSCI-{location}-{aks_name}" + dataCollectionRuleName = dataCollectionRuleName[0:64] + dcr_resource_id = f"/subscriptions/{subscription}/resourceGroups/{resource_group}/providers/Microsoft.Insights/dataCollectionRules/{dataCollectionRuleName}" + + get_cmd = f'rest --method get --url https://management.azure.com{dcr_resource_id}?api-version=2022-06-01' + self.cmd(get_cmd, checks=[ + self.check('properties.dataFlows[0].streams[-1]', 'Microsoft-ContainerNetworkLogs'), + ]) + + # delete + self.cmd( + "aks delete -g {resource_group} -n {name} --yes --no-wait", + checks=[self.is_empty()], + ) + + @live_only() + @AllowLargeResponse() + @AKSCustomResourceGroupPreparer( + random_name_length=17, + name_prefix="clitest", + location="westus2", + ) + def test_aks_update_disable_azuremonitorlogs( + self, resource_group, resource_group_location + ): + """Test disabling Azure Monitor Logs via --disable-azure-monitor-logs on update. + + Creates a cluster with --enable-azure-monitor-logs, then disables monitoring + with --disable-azure-monitor-logs. Verifies the _disable_azure_monitor_logs code path + which performs DCR/DCRA cleanup before disabling the addon. + """ + self.test_resources_count = 0 + aks_name = self.create_random_name("cliakstest", 16) + self.kwargs.update( + { + "resource_group": resource_group, + "name": aks_name, + "ssh_key_value": self.generate_ssh_keys(), + "location": resource_group_location, + } + ) + + # Create cluster with Azure Monitor Logs enabled + create_cmd = ( + "aks create --resource-group={resource_group} --name={name} --location={location} " + "--ssh-key-value={ssh_key_value} --node-count=1 " + "--enable-azure-monitor-logs --enable-managed-identity --output=json" + ) + self.cmd(create_cmd, checks=[ + self.check("provisioningState", "Succeeded"), + self.check("addonProfiles.omsagent.enabled", True), + ]) + + # Disable Azure Monitor Logs + # Note: provisioningState may be "Updating" here due to a race between + # DCRA deletion (fire-and-forget LRO) and the subsequent PUT request. + # The aks show below verifies the final Succeeded state. + disable_cmd = ( + "aks update --resource-group={resource_group} --name={name} --yes " + "--disable-azure-monitor-logs --output=json" + ) + self.cmd(disable_cmd, checks=[ + self.check("addonProfiles.omsagent.enabled", False), + ]) + + # Verify aks show confirms monitoring is disabled + show_cmd = "aks show --resource-group={resource_group} --name={name} --output=json" + self.cmd(show_cmd, checks=[ + self.check("provisioningState", "Succeeded"), + self.check("addonProfiles.omsagent.enabled", False), + ]) + + # delete + self.cmd( + "aks delete -g {resource_group} -n {name} --yes --no-wait", + checks=[self.is_empty()], + ) + + @live_only() + @AllowLargeResponse() + @AKSCustomResourceGroupPreparer( + random_name_length=17, + name_prefix="clitest", + location="westus2", + ) + def test_aks_update_standalone_enable_high_log_scale_mode( + self, resource_group, resource_group_location + ): + """Test standalone --enable-high-log-scale-mode on update path. + + Creates a cluster with monitoring enabled, then updates with just + --enable-high-log-scale-mode to verify the DCR is updated with + the Microsoft-ContainerLogV2-HighScale stream. + """ + self.test_resources_count = 0 + aks_name = self.create_random_name("cliakstest", 16) + self.kwargs.update( + { + "resource_group": resource_group, + "name": aks_name, + "ssh_key_value": self.generate_ssh_keys(), + "location": resource_group_location, + } + ) + + # Create cluster with Azure Monitor Logs enabled + create_cmd = ( + "aks create --resource-group={resource_group} --name={name} --location={location} " + "--ssh-key-value={ssh_key_value} --node-count=1 " + "--enable-azure-monitor-logs --enable-managed-identity --output=json" + ) + self.cmd(create_cmd, checks=[ + self.check("provisioningState", "Succeeded"), + self.check("addonProfiles.omsagent.enabled", True), + ]) + + # Wait for any in-progress addon operations to complete before update + wait_cmd = 'aks wait --resource-group={resource_group} --name={name} --updated --timeout=1800' + self.cmd(wait_cmd, checks=[self.is_empty()]) + + # Update: enable high log scale mode standalone + update_cmd = ( + "aks update --resource-group={resource_group} --name={name} --yes " + "--enable-high-log-scale-mode --output=json" + ) + response = self.cmd(update_cmd, checks=[ + self.check("provisioningState", "Succeeded"), + ]).get_output_in_json() + + cluster_resource_id = response["id"] + subscription = cluster_resource_id.split("/")[2] + location = resource_group_location + dataCollectionRuleName = f"MSCI-{location}-{aks_name}" + dataCollectionRuleName = dataCollectionRuleName[0:64] + dcr_resource_id = ( + f"/subscriptions/{subscription}/resourceGroups/{resource_group}" + f"/providers/Microsoft.Insights/dataCollectionRules/{dataCollectionRuleName}" + ) + + # Verify DCR contains the HighScale stream + get_cmd = f'rest --method get --url https://management.azure.com{dcr_resource_id}?api-version=2022-06-01' + self.cmd(get_cmd, checks=[ + self.check("contains(properties.dataFlows[0].streams, 'Microsoft-ContainerLogV2-HighScale')", True), + ]) + + # Wait for any in-progress addon operations to complete before next update + wait_cmd = 'aks wait --resource-group={resource_group} --name={name} --updated --timeout=1800' + self.cmd(wait_cmd, checks=[self.is_empty()]) + + # Now disable high log scale mode + disable_cmd = ( + "aks update --resource-group={resource_group} --name={name} --yes " + "--enable-high-log-scale-mode false --output=json" + ) + self.cmd(disable_cmd, checks=[ + self.check("provisioningState", "Succeeded"), + ]) + + # delete + self.cmd( + "aks delete -g {resource_group} -n {name} --yes --no-wait", + checks=[self.is_empty()], + ) + + @live_only() + @AllowLargeResponse() + @AKSCustomResourceGroupPreparer( + random_name_length=17, + name_prefix="clitest", + location="westus2", + ) + def test_aks_update_disable_hlsm_error_when_cnl_enabled( + self, resource_group, resource_group_location + ): + """Test that disabling --enable-high-log-scale-mode raises an error + when container network logs are already enabled on the cluster.""" + self.test_resources_count = 0 + aks_name = self.create_random_name("cliakstest", 16) + self.kwargs.update( + { + "resource_group": resource_group, + "name": aks_name, + "ssh_key_value": self.generate_ssh_keys(), + "location": resource_group_location, + } + ) + + # Create cluster with monitoring + CNL + HLSM + create_cmd = ( + "aks create --resource-group={resource_group} --name={name} --location={location} " + "--ssh-key-value={ssh_key_value} --node-count=1 " + "--enable-azure-monitor-logs --enable-managed-identity " + "--enable-acns --enable-container-network-logs --output=json" + ) + self.cmd(create_cmd, checks=[ + self.check("provisioningState", "Succeeded"), + ]) + + # Attempt to disable HLSM while CNL is still enabled — should fail + disable_cmd = ( + "aks update --resource-group={resource_group} --name={name} --yes " + "--enable-high-log-scale-mode false --output=json" + ) + with self.assertRaisesRegex(Exception, "container network logs"): + self.cmd(disable_cmd) + + # delete + self.cmd( + "aks delete -g {resource_group} -n {name} --yes --no-wait", + checks=[self.is_empty()], + ) + @AllowLargeResponse() @AKSCustomResourceGroupPreparer( random_name_length=17, diff --git a/src/aks-preview/azext_aks_preview/tests/latest/test_helpers.py b/src/aks-preview/azext_aks_preview/tests/latest/test_helpers.py index 19b3146fae8..bfa1f5fc7e5 100644 --- a/src/aks-preview/azext_aks_preview/tests/latest/test_helpers.py +++ b/src/aks-preview/azext_aks_preview/tests/latest/test_helpers.py @@ -11,6 +11,7 @@ check_is_private_link_cluster, get_cluster_snapshot, get_cluster_snapshot_by_snapshot_id, + get_monitoring_addon_key, get_nodepool_snapshot, get_nodepool_snapshot_by_snapshot_id, process_message_for_run_command, @@ -211,5 +212,53 @@ def test_filter_hard_taints_mixed_effects(self): expected_filtered_taints = ["key3=value3:PreferNoSchedule", "key5:PreferNoSchedule"] self.assertEqual(filter_hard_taints(input_taints), expected_filtered_taints) + +class TestGetMonitoringAddonKey(unittest.TestCase): + """Tests for the get_monitoring_addon_key helper.""" + + def test_returns_canonical_when_present(self): + addon_profiles = {"omsagent": Mock(enabled=True)} + result = get_monitoring_addon_key(addon_profiles, "omsagent") + self.assertEqual(result, "omsagent") + # dict should be unchanged + self.assertIn("omsagent", addon_profiles) + + def test_normalizes_camelcase_key(self): + addon_profiles = {"omsAgent": Mock(enabled=True)} + result = get_monitoring_addon_key(addon_profiles, "omsagent") + self.assertEqual(result, "omsagent") + # dict should have been re-keyed + self.assertIn("omsagent", addon_profiles) + self.assertNotIn("omsAgent", addon_profiles) + + def test_normalizes_arbitrary_casing(self): + addon_profiles = {"OMSagent": Mock(enabled=True)} + result = get_monitoring_addon_key(addon_profiles, "omsagent") + self.assertEqual(result, "omsagent") + self.assertIn("omsagent", addon_profiles) + self.assertNotIn("OMSagent", addon_profiles) + + def test_returns_canonical_when_none_profiles(self): + result = get_monitoring_addon_key(None, "omsagent") + self.assertEqual(result, "omsagent") + + def test_returns_canonical_when_key_not_present(self): + addon_profiles = {"some_other_addon": Mock(enabled=True)} + result = get_monitoring_addon_key(addon_profiles, "omsagent") + self.assertEqual(result, "omsagent") + + def test_prefers_exact_match_over_case_insensitive(self): + # If both canonical and variant exist, canonical wins (no re-keying) + addon_profiles = { + "omsagent": Mock(enabled=True), + "omsAgent": Mock(enabled=False), + } + result = get_monitoring_addon_key(addon_profiles, "omsagent") + self.assertEqual(result, "omsagent") + # Both keys should still be present (no re-keying needed) + self.assertIn("omsagent", addon_profiles) + self.assertIn("omsAgent", addon_profiles) + + if __name__ == "__main__": unittest.main() diff --git a/src/aks-preview/azext_aks_preview/tests/latest/test_managed_cluster_decorator.py b/src/aks-preview/azext_aks_preview/tests/latest/test_managed_cluster_decorator.py index f84639fab49..3f8cd77ae37 100644 --- a/src/aks-preview/azext_aks_preview/tests/latest/test_managed_cluster_decorator.py +++ b/src/aks-preview/azext_aks_preview/tests/latest/test_managed_cluster_decorator.py @@ -43,6 +43,7 @@ CONST_APP_ROUTING_ISTIO_MODE_ENABLED, CONST_APP_ROUTING_ISTIO_MODE_DISABLED, CONST_MONITORING_ADDON_NAME, + CONST_MONITORING_ADDON_NAME_CAMELCASE, CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID, CONST_MONITORING_USING_AAD_MSI_AUTH, CONST_NODEPOOL_MODE_SYSTEM, @@ -72,6 +73,7 @@ AKSPreviewManagedClusterCreateDecorator, AKSPreviewManagedClusterModels, AKSPreviewManagedClusterUpdateDecorator, + _get_monitoring_addon_key_from_consts, ) from azext_aks_preview.tests.latest.utils import get_test_data_file_path from azure.cli.command_modules.acs._consts import ( @@ -133,6 +135,56 @@ def test_models(self): ) +class AKSPreviewGetMonitoringAddonKeyTestCase(unittest.TestCase): + """Tests for the _get_monitoring_addon_key_from_consts helper function.""" + + def setUp(self): + register_aks_preview_resource_type() + self.cli_ctx = MockCLI() + self.cmd = MockCmd(self.cli_ctx) + self.models = AKSPreviewManagedClusterModels(self.cmd, CUSTOM_MGMT_AKS_PREVIEW) + self.addon_consts = { + "CONST_MONITORING_ADDON_NAME": CONST_MONITORING_ADDON_NAME, + } + + def test_returns_lowercase_key_when_present(self): + addon_profiles = { + CONST_MONITORING_ADDON_NAME: self.models.ManagedClusterAddonProfile(enabled=True), + } + result = _get_monitoring_addon_key_from_consts(addon_profiles, self.addon_consts) + self.assertEqual(result, CONST_MONITORING_ADDON_NAME) + + def test_normalizes_camelcase_key(self): + addon_profiles = { + CONST_MONITORING_ADDON_NAME_CAMELCASE: self.models.ManagedClusterAddonProfile(enabled=True), + } + result = _get_monitoring_addon_key_from_consts(addon_profiles, self.addon_consts) + # After normalization, the canonical key should always be returned + self.assertEqual(result, CONST_MONITORING_ADDON_NAME) + # Dict should have been re-keyed in place + self.assertIn(CONST_MONITORING_ADDON_NAME, addon_profiles) + self.assertNotIn(CONST_MONITORING_ADDON_NAME_CAMELCASE, addon_profiles) + + def test_prefers_lowercase_when_both_present(self): + addon_profiles = { + CONST_MONITORING_ADDON_NAME: self.models.ManagedClusterAddonProfile(enabled=True), + CONST_MONITORING_ADDON_NAME_CAMELCASE: self.models.ManagedClusterAddonProfile(enabled=True), + } + result = _get_monitoring_addon_key_from_consts(addon_profiles, self.addon_consts) + self.assertEqual(result, CONST_MONITORING_ADDON_NAME) + + def test_returns_default_when_no_addon_profiles(self): + result = _get_monitoring_addon_key_from_consts(None, self.addon_consts) + self.assertEqual(result, CONST_MONITORING_ADDON_NAME) + + def test_returns_default_when_neither_key_present(self): + addon_profiles = { + "some_other_addon": self.models.ManagedClusterAddonProfile(enabled=True), + } + result = _get_monitoring_addon_key_from_consts(addon_profiles, self.addon_consts) + self.assertEqual(result, CONST_MONITORING_ADDON_NAME) + + class AKSPreviewManagedClusterContextTestCase(unittest.TestCase): def setUp(self): # manually register CUSTOM_MGMT_AKS_PREVIEW @@ -4836,7 +4888,7 @@ def test_get_enable_high_log_scale_mode_default(self): """Test default behavior when no container network logs or high log scale mode is specified. When enable_high_log_scale_mode is not explicitly set and container network logs are not enabled, - the method should return False (not None) to maintain backward compatibility with the base class. + the method should return None to align with the base class behavior. """ ctx = AKSPreviewManagedClusterContext( self.cmd, @@ -4845,7 +4897,7 @@ def test_get_enable_high_log_scale_mode_default(self): decorator_mode=DecoratorMode.CREATE, ) result = ctx.get_enable_high_log_scale_mode() - self.assertFalse(result) + self.assertIsNone(result) def test_get_enable_high_log_scale_mode_explicit_false_without_cnl(self): """Test when user explicitly sets enable_high_log_scale_mode to False without container network logs. @@ -4872,6 +4924,23 @@ def test_get_enable_high_log_scale_mode_explicit_true(self): result = ctx.get_enable_high_log_scale_mode() self.assertTrue(result) + def test_get_enable_high_log_scale_mode_flag_without_value_create(self): + """Test passing --enable-high-log-scale-mode without an explicit boolean value. + + When using get_three_state_flag() and the user passes the flag without a value + (e.g. --enable-high-log-scale-mode), argparse sets it to True via nargs='?'. + This should behave identically to passing --enable-high-log-scale-mode true. + """ + # get_three_state_flag uses nargs='?'; no value => True + ctx = AKSPreviewManagedClusterContext( + self.cmd, + AKSManagedClusterParamDict({"enable_high_log_scale_mode": True}), + self.models, + decorator_mode=DecoratorMode.CREATE, + ) + result = ctx.get_enable_high_log_scale_mode() + self.assertTrue(result) + def test_get_enable_high_log_scale_mode_auto_enable_with_container_network_logs(self): """Test auto-enable when container network logs are enabled with proper prerequisites.""" ctx = AKSPreviewManagedClusterContext( @@ -5035,6 +5104,31 @@ def test_get_enable_high_log_scale_mode_update_error_without_existing_monitoring with self.assertRaises(RequiredArgumentMissingError): ctx.get_enable_high_log_scale_mode() + def test_get_enable_high_log_scale_mode_flag_without_value_update(self): + """Test passing --enable-high-log-scale-mode without an explicit boolean in update mode. + + When using get_three_state_flag() and the user passes the flag without a value + (e.g. --enable-high-log-scale-mode), argparse sets it to True via nargs='?'. + In update mode this should enable HLSM on the existing cluster. + """ + ctx = AKSPreviewManagedClusterContext( + self.cmd, + AKSManagedClusterParamDict({"enable_high_log_scale_mode": True}), + self.models, + decorator_mode=DecoratorMode.UPDATE, + ) + mc = self.models.ManagedCluster( + location="test_location", + addon_profiles={ + "omsagent": self.models.ManagedClusterAddonProfile( + enabled=True, + ) + }, + ) + ctx.attach_mc(mc) + result = ctx.get_enable_high_log_scale_mode() + self.assertTrue(result) + def test_get_container_network_logs_returns_none_when_not_specified(self): """Test get_container_network_logs returns None when neither enable nor disable is specified.""" ctx = AKSPreviewManagedClusterContext( @@ -5117,6 +5211,33 @@ def test_get_container_network_logs_with_monitoring_already_on_mc(self): result = ctx.get_container_network_logs(mc) self.assertTrue(result) + def test_get_container_network_logs_with_monitoring_camelcase_key_on_mc(self): + """Test get_container_network_logs succeeds when monitoring uses omsAgent (camelCase) key on mc.""" + ctx = AKSPreviewManagedClusterContext( + self.cmd, + AKSManagedClusterParamDict({ + "enable_container_network_logs": True, + }), + self.models, + decorator_mode=DecoratorMode.UPDATE, + ) + mc = self.models.ManagedCluster( + location="test_location", + network_profile=self.models.ContainerServiceNetworkProfile( + advanced_networking=self.models.AdvancedNetworking( + enabled=True, + ), + ), + addon_profiles={ + "omsAgent": self.models.ManagedClusterAddonProfile( + enabled=True, + ) + }, + ) + ctx.attach_mc(mc) + result = ctx.get_container_network_logs(mc) + self.assertTrue(result) + def test_get_container_network_logs_error_without_acns(self): """Test get_container_network_logs raises error when ACNS is not enabled.""" ctx = AKSPreviewManagedClusterContext( @@ -5195,116 +5316,352 @@ def test_get_container_network_logs_legacy_retina_flow_logs_param(self): result = ctx.get_container_network_logs(mc) self.assertTrue(result) - def test_get_enable_default_domain(self): - # default value - ctx_1 = AKSPreviewManagedClusterContext( - self.cmd, - AKSManagedClusterParamDict({}), - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - enable_default_domain_1 = ctx_1.get_enable_default_domain() - self.assertEqual(enable_default_domain_1, None) - - # custom value - True - ctx_2 = AKSPreviewManagedClusterContext( + def test_get_container_network_logs_with_azure_monitor_logs(self): + """Test get_container_network_logs succeeds when monitoring is enabled via enable_azure_monitor_logs param.""" + ctx = AKSPreviewManagedClusterContext( self.cmd, - AKSManagedClusterParamDict({"enable_default_domain": True}), + AKSManagedClusterParamDict({ + "enable_container_network_logs": True, + "enable_acns": True, + "enable_azure_monitor_logs": True, + }), self.models, decorator_mode=DecoratorMode.CREATE, ) - enable_default_domain_2 = ctx_2.get_enable_default_domain() - self.assertEqual(enable_default_domain_2, True) - - # custom value - False - ctx_3 = AKSPreviewManagedClusterContext( - self.cmd, - AKSManagedClusterParamDict({"enable_default_domain": False}), - self.models, - decorator_mode=DecoratorMode.CREATE, + mc = self.models.ManagedCluster( + location="test_location", + network_profile=self.models.ContainerServiceNetworkProfile( + advanced_networking=self.models.AdvancedNetworking( + enabled=True, + ), + ), ) - enable_default_domain_3 = ctx_3.get_enable_default_domain() - self.assertEqual(enable_default_domain_3, False) + ctx.attach_mc(mc) + result = ctx.get_container_network_logs(mc) + self.assertTrue(result) - def test_get_disable_default_domain(self): - # default value - ctx_1 = AKSPreviewManagedClusterContext( + def test_get_container_network_logs_legacy_disable_retina_flow_logs(self): + """Test get_container_network_logs returns False when legacy disable_retina_flow_logs is specified.""" + ctx = AKSPreviewManagedClusterContext( self.cmd, - AKSManagedClusterParamDict({}), + AKSManagedClusterParamDict({ + "disable_retina_flow_logs": True, + }), self.models, decorator_mode=DecoratorMode.UPDATE, ) - disable_default_domain_1 = ctx_1.get_disable_default_domain() - self.assertEqual(disable_default_domain_1, None) + mc = self.models.ManagedCluster(location="test_location") + ctx.attach_mc(mc) + result = ctx.get_container_network_logs(mc) + self.assertFalse(result) - # custom value - True - ctx_2 = AKSPreviewManagedClusterContext( + def test_get_container_network_logs_with_acns_already_on_mc(self): + """Test get_container_network_logs succeeds when ACNS is already enabled on mc (not via raw param).""" + ctx = AKSPreviewManagedClusterContext( self.cmd, - AKSManagedClusterParamDict({"disable_default_domain": True}), + AKSManagedClusterParamDict({ + "enable_container_network_logs": True, + "enable_addons": "monitoring", + }), self.models, decorator_mode=DecoratorMode.UPDATE, ) - disable_default_domain_2 = ctx_2.get_disable_default_domain() - self.assertEqual(disable_default_domain_2, True) - - # custom value - False - ctx_3 = AKSPreviewManagedClusterContext( - self.cmd, - AKSManagedClusterParamDict({"disable_default_domain": False}), - self.models, - decorator_mode=DecoratorMode.UPDATE, + mc = self.models.ManagedCluster( + location="test_location", + network_profile=self.models.ContainerServiceNetworkProfile( + advanced_networking=self.models.AdvancedNetworking( + enabled=True, + ), + ), ) - disable_default_domain_3 = ctx_3.get_disable_default_domain() - self.assertEqual(disable_default_domain_3, False) + ctx.attach_mc(mc) + result = ctx.get_container_network_logs(mc) + self.assertTrue(result) - def test_get_enable_continuous_control_plane_and_addon_monitor(self): - # default value - ctx_0 = AKSPreviewManagedClusterContext( + def test_get_enable_high_log_scale_mode_cnl_with_explicit_true(self): + """Test when user enables both CNL and HLSM=True explicitly. Should succeed and return True.""" + ctx = AKSPreviewManagedClusterContext( self.cmd, - AKSManagedClusterParamDict({}), + AKSManagedClusterParamDict({ + "enable_container_network_logs": True, + "enable_high_log_scale_mode": True, + "enable_acns": True, + "enable_addons": "monitoring", + }), self.models, decorator_mode=DecoratorMode.CREATE, ) - self.assertEqual(ctx_0.get_enable_continuous_control_plane_and_addon_monitor(), None) + result = ctx.get_enable_high_log_scale_mode() + self.assertTrue(result) - # custom value - True - ctx_1 = AKSPreviewManagedClusterContext( + def test_get_enable_high_log_scale_mode_update_explicit_false_without_cnl(self): + """Test that HLSM=False without CNL returns False in update mode without error.""" + ctx = AKSPreviewManagedClusterContext( self.cmd, - AKSManagedClusterParamDict({"enable_continuous_control_plane_and_addon_monitor": True}), + AKSManagedClusterParamDict({ + "enable_high_log_scale_mode": False, + }), self.models, - decorator_mode=DecoratorMode.CREATE, + decorator_mode=DecoratorMode.UPDATE, ) - self.assertEqual(ctx_1.get_enable_continuous_control_plane_and_addon_monitor(), True) + mc = self.models.ManagedCluster( + location="test_location", + addon_profiles={ + "omsagent": self.models.ManagedClusterAddonProfile( + enabled=True, + ) + }, + ) + ctx.attach_mc(mc) + result = ctx.get_enable_high_log_scale_mode() + self.assertFalse(result) - # custom value - False - ctx_2 = AKSPreviewManagedClusterContext( + def test_get_enable_high_log_scale_mode_update_error_explicit_false_with_cnl(self): + """Test error when user explicitly disables HLSM with CNL enabled in update mode.""" + ctx = AKSPreviewManagedClusterContext( self.cmd, - AKSManagedClusterParamDict({"enable_continuous_control_plane_and_addon_monitor": False}), + AKSManagedClusterParamDict({ + "enable_container_network_logs": True, + "enable_high_log_scale_mode": False, + }), self.models, - decorator_mode=DecoratorMode.CREATE, + decorator_mode=DecoratorMode.UPDATE, ) - self.assertEqual(ctx_2.get_enable_continuous_control_plane_and_addon_monitor(), False) + mc = self.models.ManagedCluster( + location="test_location", + network_profile=self.models.ContainerServiceNetworkProfile( + advanced_networking=self.models.AdvancedNetworking( + enabled=True, + ), + ), + addon_profiles={ + "omsagent": self.models.ManagedClusterAddonProfile( + enabled=True, + ) + }, + ) + ctx.attach_mc(mc) + with self.assertRaises(MutuallyExclusiveArgumentError): + ctx.get_enable_high_log_scale_mode() - def test_get_disable_continuous_control_plane_and_addon_monitor(self): - # default value - ctx_0 = AKSPreviewManagedClusterContext( + def test_get_enable_high_log_scale_mode_update_error_disable_hlsm_with_existing_cnl(self): + """Test error when user disables HLSM while CNL is already enabled on the cluster. + + When CNL (enableRetinaNetworkFlags) is already set to 'True' on the existing cluster + and the user passes --enable-high-log-scale-mode false without --enable-container-network-logs, + the method should raise a MutuallyExclusiveArgumentError. + """ + ctx = AKSPreviewManagedClusterContext( self.cmd, - AKSManagedClusterParamDict({}), + AKSManagedClusterParamDict({ + "enable_high_log_scale_mode": False, + }), self.models, decorator_mode=DecoratorMode.UPDATE, ) - self.assertEqual(ctx_0.get_disable_continuous_control_plane_and_addon_monitor(), None) + mc = self.models.ManagedCluster( + location="test_location", + addon_profiles={ + "omsagent": self.models.ManagedClusterAddonProfile( + enabled=True, + config={"enableRetinaNetworkFlags": "True"}, + ) + }, + ) + ctx.attach_mc(mc) + with self.assertRaises(MutuallyExclusiveArgumentError): + ctx.get_enable_high_log_scale_mode() - # custom value - True - ctx_1 = AKSPreviewManagedClusterContext( + def test_get_enable_high_log_scale_mode_update_error_disable_hlsm_with_existing_cnl_camelcase(self): + """Test error when user disables HLSM while CNL is already enabled (omsAgent camelCase key).""" + ctx = AKSPreviewManagedClusterContext( self.cmd, - AKSManagedClusterParamDict({"disable_continuous_control_plane_and_addon_monitor": True}), + AKSManagedClusterParamDict({ + "enable_high_log_scale_mode": False, + }), self.models, decorator_mode=DecoratorMode.UPDATE, ) - self.assertEqual(ctx_1.get_disable_continuous_control_plane_and_addon_monitor(), True) - - # custom value - False + mc = self.models.ManagedCluster( + location="test_location", + addon_profiles={ + "omsAgent": self.models.ManagedClusterAddonProfile( + enabled=True, + config={"enableRetinaNetworkFlags": "True"}, + ) + }, + ) + ctx.attach_mc(mc) + with self.assertRaises(MutuallyExclusiveArgumentError): + ctx.get_enable_high_log_scale_mode() + + def test_get_enable_high_log_scale_mode_update_monitoring_camelcase_key(self): + """Test auto-enable HLSM in update mode when monitoring uses camelCase 'omsAgent' key.""" + ctx = AKSPreviewManagedClusterContext( + self.cmd, + AKSManagedClusterParamDict({ + "enable_container_network_logs": True, + }), + self.models, + decorator_mode=DecoratorMode.UPDATE, + ) + mc = self.models.ManagedCluster( + location="test_location", + network_profile=self.models.ContainerServiceNetworkProfile( + advanced_networking=self.models.AdvancedNetworking( + enabled=True, + ), + ), + addon_profiles={ + "omsAgent": self.models.ManagedClusterAddonProfile( + enabled=True, + ) + }, + ) + ctx.attach_mc(mc) + result = ctx.get_enable_high_log_scale_mode() + self.assertTrue(result) + + def test_get_enable_msi_auth_for_monitoring_with_msi_service_principal(self): + """Test that MSI auth is correctly detected when service_principal_profile.client_id='msi'. + + The base class returns False when client_id is not None, but MSI-based clusters set + client_id to 'msi'. The preview override should check the addon config for useAADAuth. + """ + ctx = AKSPreviewManagedClusterContext( + self.cmd, + AKSManagedClusterParamDict({}), + self.models, + decorator_mode=DecoratorMode.UPDATE, + ) + mc = self.models.ManagedCluster( + location="test_location", + service_principal_profile=self.models.ManagedClusterServicePrincipalProfile( + client_id="msi", + ), + addon_profiles={ + "omsagent": self.models.ManagedClusterAddonProfile( + enabled=True, + config={ + CONST_MONITORING_USING_AAD_MSI_AUTH: "true", + } + ) + }, + ) + ctx.attach_mc(mc) + result = ctx.get_enable_msi_auth_for_monitoring() + self.assertTrue(result) + + def test_get_enable_default_domain(self): + # default value + ctx_1 = AKSPreviewManagedClusterContext( + self.cmd, + AKSManagedClusterParamDict({}), + self.models, + decorator_mode=DecoratorMode.CREATE, + ) + enable_default_domain_1 = ctx_1.get_enable_default_domain() + self.assertEqual(enable_default_domain_1, None) + + # custom value - True + ctx_2 = AKSPreviewManagedClusterContext( + self.cmd, + AKSManagedClusterParamDict({"enable_default_domain": True}), + self.models, + decorator_mode=DecoratorMode.CREATE, + ) + enable_default_domain_2 = ctx_2.get_enable_default_domain() + self.assertEqual(enable_default_domain_2, True) + + # custom value - False + ctx_3 = AKSPreviewManagedClusterContext( + self.cmd, + AKSManagedClusterParamDict({"enable_default_domain": False}), + self.models, + decorator_mode=DecoratorMode.CREATE, + ) + enable_default_domain_3 = ctx_3.get_enable_default_domain() + self.assertEqual(enable_default_domain_3, False) + + def test_get_disable_default_domain(self): + # default value + ctx_1 = AKSPreviewManagedClusterContext( + self.cmd, + AKSManagedClusterParamDict({}), + self.models, + decorator_mode=DecoratorMode.UPDATE, + ) + disable_default_domain_1 = ctx_1.get_disable_default_domain() + self.assertEqual(disable_default_domain_1, None) + + # custom value - True + ctx_2 = AKSPreviewManagedClusterContext( + self.cmd, + AKSManagedClusterParamDict({"disable_default_domain": True}), + self.models, + decorator_mode=DecoratorMode.UPDATE, + ) + disable_default_domain_2 = ctx_2.get_disable_default_domain() + self.assertEqual(disable_default_domain_2, True) + + # custom value - False + ctx_3 = AKSPreviewManagedClusterContext( + self.cmd, + AKSManagedClusterParamDict({"disable_default_domain": False}), + self.models, + decorator_mode=DecoratorMode.UPDATE, + ) + disable_default_domain_3 = ctx_3.get_disable_default_domain() + self.assertEqual(disable_default_domain_3, False) + + def test_get_enable_continuous_control_plane_and_addon_monitor(self): + # default value + ctx_0 = AKSPreviewManagedClusterContext( + self.cmd, + AKSManagedClusterParamDict({}), + self.models, + decorator_mode=DecoratorMode.CREATE, + ) + self.assertEqual(ctx_0.get_enable_continuous_control_plane_and_addon_monitor(), None) + + # custom value - True + ctx_1 = AKSPreviewManagedClusterContext( + self.cmd, + AKSManagedClusterParamDict({"enable_continuous_control_plane_and_addon_monitor": True}), + self.models, + decorator_mode=DecoratorMode.CREATE, + ) + self.assertEqual(ctx_1.get_enable_continuous_control_plane_and_addon_monitor(), True) + + # custom value - False + ctx_2 = AKSPreviewManagedClusterContext( + self.cmd, + AKSManagedClusterParamDict({"enable_continuous_control_plane_and_addon_monitor": False}), + self.models, + decorator_mode=DecoratorMode.CREATE, + ) + self.assertEqual(ctx_2.get_enable_continuous_control_plane_and_addon_monitor(), False) + + def test_get_disable_continuous_control_plane_and_addon_monitor(self): + # default value + ctx_0 = AKSPreviewManagedClusterContext( + self.cmd, + AKSManagedClusterParamDict({}), + self.models, + decorator_mode=DecoratorMode.UPDATE, + ) + self.assertEqual(ctx_0.get_disable_continuous_control_plane_and_addon_monitor(), None) + + # custom value - True + ctx_1 = AKSPreviewManagedClusterContext( + self.cmd, + AKSManagedClusterParamDict({"disable_continuous_control_plane_and_addon_monitor": True}), + self.models, + decorator_mode=DecoratorMode.UPDATE, + ) + self.assertEqual(ctx_1.get_disable_continuous_control_plane_and_addon_monitor(), True) + + # custom value - False ctx_2 = AKSPreviewManagedClusterContext( self.cmd, AKSManagedClusterParamDict({"disable_continuous_control_plane_and_addon_monitor": False}), @@ -5879,6 +6236,65 @@ def test_set_up_addon_profiles_auto_enables_high_log_scale_mode_with_cnl(self): # Verify high log scale mode is auto-enabled self.assertTrue(dec.context.get_enable_high_log_scale_mode()) + def test_set_up_addon_profiles_cnl_and_hlsm_flag_without_value(self): + """Regression test: CREATE with --enable-container-network-logs --enable-acns + --enable-addons monitoring --enable-high-log-scale-mode (flag without boolean value). + + When --enable-high-log-scale-mode is passed without a value, get_three_state_flag() + sets it to True via nargs='?'. This must NOT trigger the 'Container network logs + requires --enable-acns...' error when ACNS and monitoring are both provided. + """ + dec = AKSPreviewManagedClusterCreateDecorator( + self.cmd, + self.client, + { + "enable_addons": "monitoring", + "enable_container_network_logs": True, + "enable_high_log_scale_mode": True, # simulates --enable-high-log-scale-mode without value + "enable_acns": True, + "workspace_resource_id": "test_workspace_resource_id", + "enable_msi_auth_for_monitoring": True, + "enable_syslog": False, + "data_collection_settings": None, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + network_profile = self.models.ContainerServiceNetworkProfile( + advanced_networking=self.models.AdvancedNetworking(enabled=True), + ) + mc = self.models.ManagedCluster(location="test_location", network_profile=network_profile) + dec.context.attach_mc(mc) + dec.context.set_intermediate("subscription_id", "test_subscription_id") + external_functions = dec.context.external_functions + with patch.object(external_functions, 'ensure_container_insights_for_monitoring', return_value=None): + # Should NOT raise InvalidArgumentValueError + dec_mc = dec.set_up_addon_profiles(mc) + self.assertTrue(dec.context.get_enable_high_log_scale_mode()) + + def test_set_up_addon_profiles_hlsm_only_no_cnl(self): + """Test that enabling HLSM without CNL does not set enableRetinaNetworkFlags in config.""" + dec = AKSPreviewManagedClusterCreateDecorator( + self.cmd, + self.client, + { + "enable_addons": "monitoring", + "enable_high_log_scale_mode": True, + "workspace_resource_id": "test_workspace_resource_id", + "enable_msi_auth_for_monitoring": True, + "enable_syslog": False, + "data_collection_settings": None, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc = self.models.ManagedCluster(location="test_location") + dec.context.attach_mc(mc) + dec.context.set_intermediate("subscription_id", "test_subscription_id") + external_functions = dec.context.external_functions + with patch.object(external_functions, 'ensure_container_insights_for_monitoring', return_value=None): + dec_mc = dec.set_up_addon_profiles(mc) + # enableRetinaNetworkFlags should NOT be set when CNL is not enabled + omsagent_config = dec_mc.addon_profiles[CONST_MONITORING_ADDON_NAME].config + self.assertNotIn("enableRetinaNetworkFlags", omsagent_config) def test_set_up_http_proxy_config(self): dec_1 = AKSPreviewManagedClusterCreateDecorator( @@ -7909,47 +8325,441 @@ def test_get_enable_azure_monitor_logs_create_mode_succeeds(self): result = ctx_1.get_enable_azure_monitor_logs() self.assertTrue(result) - - def test_set_up_health_monitor_profile(self): - # no flag - no change - dec_0 = AKSPreviewManagedClusterCreateDecorator( + # ------------------------------------------------------------------ + # Tests for postprocessing_after_mc_created: + # cnl_or_hlsm_changing flag logic → create_dcr passed to + # ensure_container_insights_for_monitoring + # ------------------------------------------------------------------ + def _make_postprocessing_decorator(self, extra_raw_params): + """Helper: build a minimal Create decorator ready for postprocessing tests.""" + raw_params = { + "name": "test_name", + "resource_group_name": "test_rg_name", + "location": "test_location", + "enable_msi_auth_for_monitoring": True, + "enable_syslog": False, + "data_collection_settings": None, + } + raw_params.update(extra_raw_params) + dec = AKSPreviewManagedClusterCreateDecorator( self.cmd, self.client, - {}, + raw_params, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_0 = self.models.ManagedCluster(location="test_location") - dec_0.context.attach_mc(mc_0) - dec_mc_0 = dec_0.set_up_health_monitor_profile(mc_0) - ground_truth_mc_0 = self.models.ManagedCluster(location="test_location") - self.assertEqual(dec_mc_0, ground_truth_mc_0) + mc = self.models.ManagedCluster(location="test_location") + dec.context.attach_mc(mc) + dec.context.set_intermediate("subscription_id", "test_subscription_id") + dec.context.set_intermediate("monitoring_addon_enabled", True) + return dec - # enable flag set - dec_1 = AKSPreviewManagedClusterCreateDecorator( - self.cmd, - self.client, - { - "enable_continuous_control_plane_and_addon_monitor": True, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - mc_1 = self.models.ManagedCluster(location="test_location") - dec_1.context.attach_mc(mc_1) - dec_mc_1 = dec_1.set_up_health_monitor_profile(mc_1) - ground_truth_mc_1 = self.models.ManagedCluster( + def _make_cluster_with_monitoring(self): + """Helper: build a minimal cluster object that has the monitoring addon profile.""" + return self.models.ManagedCluster( location="test_location", - health_monitor_profile=self.models.ManagedClusterHealthMonitorProfile( - enable_continuous_control_plane_and_addon_monitor=True, - ), + addon_profiles={ + CONST_MONITORING_ADDON_NAME: self.models.ManagedClusterAddonProfile(enabled=True) + }, ) - self.assertEqual(dec_mc_1, ground_truth_mc_1) + def test_postprocessing_create_dcr_true_when_only_enable_addons(self): + """create_dcr=True when enable_addons triggers ensure_container_insights_for_monitoring. -class AKSPreviewManagedClusterUpdateDecoratorTestCase(unittest.TestCase): - def setUp(self): - # manually register CUSTOM_MGMT_AKS_PREVIEW - register_aks_preview_resource_type() - self.cli_ctx = MockCLI() + DCR creation is now always requested during postprocessing so the + DCR is created alongside the DCRA after the cluster exists. + """ + dec = self._make_postprocessing_decorator({"enable_addons": "monitoring"}) + cluster = self._make_cluster_with_monitoring() + external_functions = dec.context.external_functions + with patch.object( + external_functions, "ensure_container_insights_for_monitoring", return_value=None + ) as mock_ecifm: + dec.postprocessing_after_mc_created(cluster) + mock_ecifm.assert_called_once() + _, kwargs = mock_ecifm.call_args + self.assertTrue(kwargs["create_dcr"]) + + def test_postprocessing_ensure_container_insights_not_called_without_relevant_flags(self): + """ensure_container_insights_for_monitoring is NOT called when no relevant flags are set. + + When neither enable_addons, enable_azure_monitor_logs, nor any CNL/HLSM flag is + present in raw_params, the outer elif is not entered. + """ + dec = self._make_postprocessing_decorator({}) + cluster = self._make_cluster_with_monitoring() + external_functions = dec.context.external_functions + with patch.object( + external_functions, "ensure_container_insights_for_monitoring", return_value=None + ) as mock_ecifm: + dec.postprocessing_after_mc_created(cluster) + mock_ecifm.assert_not_called() + + def test_postprocessing_create_dcr_true_when_enable_container_network_logs(self): + """create_dcr=True when enable_container_network_logs is set. + + enable_container_network_logs is in cnl_or_hlsm_changing, so create_dcr must be True. + get_enable_high_log_scale_mode is mocked to bypass ACNS/monitoring validation. + """ + dec = self._make_postprocessing_decorator({"enable_container_network_logs": True}) + cluster = self._make_cluster_with_monitoring() + external_functions = dec.context.external_functions + with patch.object( + external_functions, "ensure_container_insights_for_monitoring", return_value=None + ) as mock_ecifm, patch.object( + dec.context, "get_enable_high_log_scale_mode", return_value=True + ): + dec.postprocessing_after_mc_created(cluster) + mock_ecifm.assert_called_once() + _, kwargs = mock_ecifm.call_args + self.assertTrue(kwargs["create_dcr"]) + + def test_postprocessing_create_dcr_true_when_enable_retina_flow_logs(self): + """create_dcr=True when the deprecated enable_retina_flow_logs flag is set. + + enable_retina_flow_logs is in cnl_or_hlsm_changing, so create_dcr must be True. + """ + dec = self._make_postprocessing_decorator({"enable_retina_flow_logs": True}) + cluster = self._make_cluster_with_monitoring() + external_functions = dec.context.external_functions + with patch.object( + external_functions, "ensure_container_insights_for_monitoring", return_value=None + ) as mock_ecifm, patch.object( + dec.context, "get_enable_high_log_scale_mode", return_value=True + ): + dec.postprocessing_after_mc_created(cluster) + mock_ecifm.assert_called_once() + _, kwargs = mock_ecifm.call_args + self.assertTrue(kwargs["create_dcr"]) + + def test_postprocessing_no_dcr_when_disable_container_network_logs(self): + """ensure_container_insights is NOT called when only disable_container_network_logs is set. + + Disable flags should not trigger DCR/DCRA creation. + """ + dec = self._make_postprocessing_decorator({"disable_container_network_logs": True}) + cluster = self._make_cluster_with_monitoring() + external_functions = dec.context.external_functions + with patch.object( + external_functions, "ensure_container_insights_for_monitoring", return_value=None + ) as mock_ecifm: + dec.postprocessing_after_mc_created(cluster) + mock_ecifm.assert_not_called() + + def test_postprocessing_no_dcr_when_disable_retina_flow_logs(self): + """ensure_container_insights is NOT called when only disable_retina_flow_logs is set. + + Disable flags should not trigger DCR/DCRA creation. + """ + dec = self._make_postprocessing_decorator({"disable_retina_flow_logs": True}) + cluster = self._make_cluster_with_monitoring() + external_functions = dec.context.external_functions + with patch.object( + external_functions, "ensure_container_insights_for_monitoring", return_value=None + ) as mock_ecifm: + dec.postprocessing_after_mc_created(cluster) + mock_ecifm.assert_not_called() + + def test_postprocessing_create_dcr_true_when_enable_high_log_scale_mode_true(self): + """create_dcr=True when enable_high_log_scale_mode=True is set. + + enable_high_log_scale_mode is in cnl_or_hlsm_changing (any non-None value counts), + so create_dcr must be True. + """ + dec = self._make_postprocessing_decorator({"enable_high_log_scale_mode": True}) + cluster = self._make_cluster_with_monitoring() + external_functions = dec.context.external_functions + with patch.object( + external_functions, "ensure_container_insights_for_monitoring", return_value=None + ) as mock_ecifm: + dec.postprocessing_after_mc_created(cluster) + mock_ecifm.assert_called_once() + _, kwargs = mock_ecifm.call_args + self.assertTrue(kwargs["create_dcr"]) + + def test_postprocessing_create_dcr_true_when_enable_high_log_scale_mode_false(self): + """create_dcr=True when enable_high_log_scale_mode=False is explicitly set. + + The cnl_or_hlsm_changing check uses `is not None`, so even an explicit False + value means the DCR must be updated. + """ + dec = self._make_postprocessing_decorator({"enable_high_log_scale_mode": False}) + cluster = self._make_cluster_with_monitoring() + external_functions = dec.context.external_functions + with patch.object( + external_functions, "ensure_container_insights_for_monitoring", return_value=None + ) as mock_ecifm: + dec.postprocessing_after_mc_created(cluster) + mock_ecifm.assert_called_once() + _, kwargs = mock_ecifm.call_args + self.assertTrue(kwargs["create_dcr"]) + + def test_postprocessing_create_dcr_true_when_cnl_and_hlsm_both_set(self): + """create_dcr=True when both enable_container_network_logs and enable_high_log_scale_mode are set. + + Both flags are individually sufficient to set cnl_or_hlsm_changing=True. + """ + dec = self._make_postprocessing_decorator( + {"enable_container_network_logs": True, "enable_high_log_scale_mode": True} + ) + cluster = self._make_cluster_with_monitoring() + external_functions = dec.context.external_functions + with patch.object( + external_functions, "ensure_container_insights_for_monitoring", return_value=None + ) as mock_ecifm, patch.object( + dec.context, "get_enable_high_log_scale_mode", return_value=True + ): + dec.postprocessing_after_mc_created(cluster) + mock_ecifm.assert_called_once() + _, kwargs = mock_ecifm.call_args + self.assertTrue(kwargs["create_dcr"]) + + def test_postprocessing_create_dcr_true_with_camelcase_addon_key(self): + """create_dcr=True when the API response uses the camelCase 'omsAgent' addon key. + + The API may return 'omsAgent' instead of 'omsagent'. The postprocessing must + handle both key variants to ensure the DCR is updated. + """ + dec = self._make_postprocessing_decorator( + {"enable_container_network_logs": True, "enable_high_log_scale_mode": True} + ) + # Build cluster with camelCase addon key (as the API might return) + cluster = self.models.ManagedCluster( + location="test_location", + addon_profiles={ + CONST_MONITORING_ADDON_NAME_CAMELCASE: self.models.ManagedClusterAddonProfile(enabled=True) + }, + ) + external_functions = dec.context.external_functions + with patch.object( + external_functions, "ensure_container_insights_for_monitoring", return_value=None + ) as mock_ecifm, patch.object( + dec.context, "get_enable_high_log_scale_mode", return_value=True + ): + dec.postprocessing_after_mc_created(cluster) + mock_ecifm.assert_called_once() + _, kwargs = mock_ecifm.call_args + self.assertTrue(kwargs["create_dcr"]) + + def test_postprocessing_create_dcr_true_when_enable_azure_monitor_logs(self): + """create_dcr=True when enable_azure_monitor_logs triggers ensure_container_insights_for_monitoring. + + DCR creation is now always requested during postprocessing so the + DCR is created alongside the DCRA after the cluster exists. + """ + dec = self._make_postprocessing_decorator({"enable_azure_monitor_logs": True}) + cluster = self._make_cluster_with_monitoring() + external_functions = dec.context.external_functions + with patch.object( + external_functions, "ensure_container_insights_for_monitoring", return_value=None + ) as mock_ecifm: + dec.postprocessing_after_mc_created(cluster) + mock_ecifm.assert_called_once() + _, kwargs = mock_ecifm.call_args + self.assertTrue(kwargs["create_dcr"]) + + # ------------------------------------------------------------------ + # Tests for _should_create_dcra and _is_cnl_or_hlsm_changing helpers + # ------------------------------------------------------------------ + def test_is_cnl_or_hlsm_changing_true_for_cnl_flags(self): + """_is_cnl_or_hlsm_changing returns True when any CNL/HLSM enable flag is set.""" + for param_name in [ + "enable_container_network_logs", + "enable_retina_flow_logs", + ]: + dec = self._make_postprocessing_decorator({param_name: True}) + self.assertTrue(dec._is_cnl_or_hlsm_changing(), f"Expected True for {param_name}") + + def test_is_cnl_or_hlsm_changing_false_for_disable_flags(self): + """_is_cnl_or_hlsm_changing returns False when only disable flags are set.""" + for param_name in [ + "disable_container_network_logs", + "disable_retina_flow_logs", + ]: + dec = self._make_postprocessing_decorator({param_name: True}) + self.assertFalse(dec._is_cnl_or_hlsm_changing(), f"Expected False for {param_name}") + + def test_is_cnl_or_hlsm_changing_true_for_hlsm_flag(self): + dec = self._make_postprocessing_decorator({"enable_high_log_scale_mode": True}) + self.assertTrue(dec._is_cnl_or_hlsm_changing()) + + def test_is_cnl_or_hlsm_changing_false_when_no_flags(self): + dec = self._make_postprocessing_decorator({}) + self.assertFalse(dec._is_cnl_or_hlsm_changing()) + + def test_should_create_dcra_true_for_enable_addons(self): + dec = self._make_postprocessing_decorator({"enable_addons": "monitoring"}) + self.assertTrue(dec._should_create_dcra()) + + def test_should_create_dcra_true_for_enable_azure_monitor_logs(self): + dec = self._make_postprocessing_decorator({"enable_azure_monitor_logs": True}) + self.assertTrue(dec._should_create_dcra()) + + def test_should_create_dcra_true_for_cnl_flag(self): + dec = self._make_postprocessing_decorator({"enable_container_network_logs": True}) + self.assertTrue(dec._should_create_dcra()) + + def test_should_create_dcra_false_when_no_relevant_flags(self): + dec = self._make_postprocessing_decorator({}) + self.assertFalse(dec._should_create_dcra()) + + # ------------------------------------------------------------------ + # Tests for monitoring disable postprocessing (inlined in postprocessing_after_mc_created) + # ------------------------------------------------------------------ + def test_postprocessing_monitoring_disable_calls_ensure_container_insights(self): + """postprocessing_after_mc_created calls ensure_container_insights with remove_monitoring=True for disable.""" + dec = self._make_postprocessing_decorator({}) + # Disable the enable path so only the disable path runs + dec.context.set_intermediate("monitoring_addon_enabled", False, overwrite_exists=True) + dec.context.set_intermediate("monitoring_addon_disable_postprocessing_required", True, overwrite_exists=True) + # Mock client.get to return cluster with monitoring addon + cluster_with_monitoring = self.models.ManagedCluster( + location="test_location", + addon_profiles={ + CONST_MONITORING_ADDON_NAME: self.models.ManagedClusterAddonProfile(enabled=True) + }, + ) + dec.client = Mock() + dec.client.get = Mock(return_value=cluster_with_monitoring) + external_functions = dec.context.external_functions + with patch.object( + external_functions, "ensure_container_insights_for_monitoring", return_value=None + ) as mock_ecifm: + dec.postprocessing_after_mc_created(cluster_with_monitoring) + mock_ecifm.assert_called_once() + _, kwargs = mock_ecifm.call_args + self.assertTrue(kwargs["remove_monitoring"]) + + def test_postprocessing_monitoring_disable_no_addon_is_noop(self): + """postprocessing_after_mc_created disable path is a no-op when no monitoring addon on current cluster.""" + dec = self._make_postprocessing_decorator({}) + dec.context.set_intermediate("monitoring_addon_enabled", False, overwrite_exists=True) + dec.context.set_intermediate("monitoring_addon_disable_postprocessing_required", True, overwrite_exists=True) + cluster_without_monitoring = self.models.ManagedCluster(location="test_location") + dec.client = Mock() + dec.client.get = Mock(return_value=cluster_without_monitoring) + external_functions = dec.context.external_functions + with patch.object( + external_functions, "ensure_container_insights_for_monitoring", return_value=None + ) as mock_ecifm: + dec.postprocessing_after_mc_created(cluster_without_monitoring) + mock_ecifm.assert_not_called() + + def test_postprocessing_monitoring_disable_swallows_type_error(self): + """postprocessing_after_mc_created disable path should not raise on TypeError.""" + dec = self._make_postprocessing_decorator({}) + dec.context.set_intermediate("monitoring_addon_enabled", False, overwrite_exists=True) + dec.context.set_intermediate("monitoring_addon_disable_postprocessing_required", True, overwrite_exists=True) + cluster_with_monitoring = self.models.ManagedCluster( + location="test_location", + addon_profiles={ + CONST_MONITORING_ADDON_NAME: self.models.ManagedClusterAddonProfile(enabled=True) + }, + ) + dec.client = Mock() + dec.client.get = Mock(return_value=cluster_with_monitoring) + external_functions = dec.context.external_functions + with patch.object( + external_functions, "ensure_container_insights_for_monitoring", side_effect=TypeError("test") + ): + # Should not raise + dec.postprocessing_after_mc_created(cluster_with_monitoring) + + # ------------------------------------------------------------------ + # Tests for put_mc conditional postprocessing + # ------------------------------------------------------------------ + def test_put_mc_with_postprocessing(self): + """put_mc waits for the operation and calls postprocessing when needed.""" + dec = self._make_postprocessing_decorator({"enable_addons": "monitoring"}) + mc = dec.context.mc + dec.client = Mock() + mock_poller = Mock() + dec.client.begin_create_or_update = Mock(return_value=mock_poller) + returned_cluster = self._make_cluster_with_monitoring() + with patch( + "azext_aks_preview.managed_cluster_decorator.LongRunningOperation", + return_value=Mock(return_value=returned_cluster), + ), patch.object(dec, "postprocessing_after_mc_created") as mock_post, \ + patch.object(dec, "immediate_processing_after_request"): + result = dec.put_mc(mc) + mock_post.assert_called_once_with(returned_cluster) + self.assertEqual(result, returned_cluster) + + def test_put_mc_without_postprocessing_uses_sdk_no_wait(self): + """put_mc uses sdk_no_wait when no postprocessing is required.""" + dec = self._make_postprocessing_decorator({}) + mc = dec.context.mc + dec.client = Mock() + expected_result = Mock() + with patch.object(dec, "check_is_postprocessing_required", return_value=False), \ + patch( + "azext_aks_preview.managed_cluster_decorator.sdk_no_wait", + return_value=expected_result, + ) as mock_sdk_no_wait: + result = dec.put_mc(mc) + mock_sdk_no_wait.assert_called_once() + self.assertEqual(result, expected_result) + + # ------------------------------------------------------------------ + # Test for postprocessing_after_mc_created with monitoring_being_enabled bypass + # ------------------------------------------------------------------ + def test_postprocessing_enable_hlsm_with_monitoring_being_enabled_simultaneously(self): + """When enable_high_log_scale_mode=True and monitoring is being enabled in same command, + the standalone HLSM validation should be skipped (monitoring_being_enabled=True path).""" + dec = self._make_postprocessing_decorator({ + "enable_addons": "monitoring", + "enable_high_log_scale_mode": True, + }) + cluster = self._make_cluster_with_monitoring() + external_functions = dec.context.external_functions + with patch.object( + external_functions, "ensure_container_insights_for_monitoring", return_value=None + ) as mock_ecifm: + dec.postprocessing_after_mc_created(cluster) + mock_ecifm.assert_called_once() + _, kwargs = mock_ecifm.call_args + self.assertTrue(kwargs["create_dcr"]) + self.assertTrue(kwargs["enable_high_log_scale_mode"]) + + def test_set_up_health_monitor_profile(self): + # no flag - no change + dec_0 = AKSPreviewManagedClusterCreateDecorator( + self.cmd, + self.client, + {}, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_0 = self.models.ManagedCluster(location="test_location") + dec_0.context.attach_mc(mc_0) + dec_mc_0 = dec_0.set_up_health_monitor_profile(mc_0) + ground_truth_mc_0 = self.models.ManagedCluster(location="test_location") + self.assertEqual(dec_mc_0, ground_truth_mc_0) + + # enable flag set + dec_1 = AKSPreviewManagedClusterCreateDecorator( + self.cmd, + self.client, + { + "enable_continuous_control_plane_and_addon_monitor": True, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_1 = self.models.ManagedCluster(location="test_location") + dec_1.context.attach_mc(mc_1) + dec_mc_1 = dec_1.set_up_health_monitor_profile(mc_1) + ground_truth_mc_1 = self.models.ManagedCluster( + location="test_location", + health_monitor_profile=self.models.ManagedClusterHealthMonitorProfile( + enable_continuous_control_plane_and_addon_monitor=True, + ), + ) + self.assertEqual(dec_mc_1, ground_truth_mc_1) + + +class AKSPreviewManagedClusterUpdateDecoratorTestCase(unittest.TestCase): + def setUp(self): + # manually register CUSTOM_MGMT_AKS_PREVIEW + register_aks_preview_resource_type() + self.cli_ctx = MockCLI() self.cmd = MockCmd(self.cli_ctx) self.models = AKSPreviewManagedClusterModels(self.cmd, CUSTOM_MGMT_AKS_PREVIEW) self.client = MockClient() @@ -10941,13 +11751,14 @@ def test_setup_azure_monitor_logs_with_omsagent_camelcase(self): # Call _setup_azure_monitor_logs dec_1._setup_azure_monitor_logs(mc_1) - # Verify: The parent class normalizes addon keys to lowercase in-place, - # so "omsAgent" becomes "omsagent". The key point is no duplicate is created. - self.assertIn("omsagent", mc_1.addon_profiles) + # Verify: The existing key is preserved (no duplicate created). + # The implementation keeps the original casing ("omsAgent") found in addon_profiles. self.assertEqual(len([k for k in mc_1.addon_profiles if k.lower() == "omsagent"]), 1) # No duplicate - self.assertTrue(mc_1.addon_profiles["omsagent"].enabled) + # Find the actual key used (could be normalized or preserved depending on parent behavior) + actual_key = next(k for k in mc_1.addon_profiles if k.lower() == "omsagent") + self.assertTrue(mc_1.addon_profiles[actual_key].enabled) self.assertEqual( - mc_1.addon_profiles["omsagent"].config["logAnalyticsWorkspaceResourceID"], + mc_1.addon_profiles[actual_key].config["logAnalyticsWorkspaceResourceID"], "/subscriptions/test/resourceGroups/test/providers/Microsoft.OperationalInsights/workspaces/test-workspace" ) @@ -11018,10 +11829,10 @@ def test_disable_azure_monitor_logs_with_omsagent_camelcase(self): # Call _disable_azure_monitor_logs dec_1._disable_azure_monitor_logs(mc_1) - # Verify: omsAgent should be disabled - self.assertIn("omsAgent", mc_1.addon_profiles) - self.assertFalse(mc_1.addon_profiles["omsAgent"].enabled) - self.assertIsNone(mc_1.addon_profiles["omsAgent"].config) + # After normalization, the camelCase key is re-keyed to canonical lowercase + self.assertIn("omsagent", mc_1.addon_profiles) + self.assertFalse(mc_1.addon_profiles["omsagent"].enabled) + self.assertIsNone(mc_1.addon_profiles["omsagent"].config) def test_disable_azure_monitor_logs_with_omsagent_lowercase(self): # Test that _disable_azure_monitor_logs handles omsagent (lowercase) correctly @@ -11059,6 +11870,124 @@ def test_disable_azure_monitor_logs_with_omsagent_lowercase(self): self.assertFalse(mc_1.addon_profiles["omsagent"].enabled) self.assertIsNone(mc_1.addon_profiles["omsagent"].config) + def test_disable_azure_monitor_logs_disables_container_insights(self): + # Test that _disable_azure_monitor_logs disables both addon profile AND + # azureMonitorProfile.containerInsights (the new API surface) + dec_1 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "disable_azure_monitor_logs": True, + "yes": True, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + + mc_1 = self.models.ManagedCluster( + location="test_location", + addon_profiles={ + "omsagent": self.models.ManagedClusterAddonProfile( + enabled=True, + config={ + "logAnalyticsWorkspaceResourceID": "/subscriptions/test/workspace", + "useAADAuth": "false" + } + ) + }, + azure_monitor_profile=self.models.ManagedClusterAzureMonitorProfile( + container_insights=self.models.ManagedClusterAzureMonitorProfileContainerInsights( + enabled=True, + log_analytics_workspace_resource_id="/subscriptions/test/workspace" + ) + ), + ) + dec_1.context.attach_mc(mc_1) + + dec_1._disable_azure_monitor_logs(mc_1) + + # Verify addon profile is disabled + self.assertFalse(mc_1.addon_profiles["omsagent"].enabled) + self.assertIsNone(mc_1.addon_profiles["omsagent"].config) + + # Verify container_insights is also disabled + self.assertFalse(mc_1.azure_monitor_profile.container_insights.enabled) + + def test_disable_azure_monitor_logs_disables_container_insights_camelcase(self): + # Same test but with omsAgent (camelCase) key + dec_1 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "disable_azure_monitor_logs": True, + "yes": True, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + + mc_1 = self.models.ManagedCluster( + location="test_location", + addon_profiles={ + "omsAgent": self.models.ManagedClusterAddonProfile( + enabled=True, + config={ + "logAnalyticsWorkspaceResourceID": "/subscriptions/test/workspace", + "useAADAuth": "false" + } + ) + }, + azure_monitor_profile=self.models.ManagedClusterAzureMonitorProfile( + container_insights=self.models.ManagedClusterAzureMonitorProfileContainerInsights( + enabled=True, + log_analytics_workspace_resource_id="/subscriptions/test/workspace" + ) + ), + ) + dec_1.context.attach_mc(mc_1) + + dec_1._disable_azure_monitor_logs(mc_1) + + # After normalization, the camelCase key is re-keyed to the canonical lowercase form + self.assertFalse(mc_1.addon_profiles["omsagent"].enabled) + self.assertIsNone(mc_1.addon_profiles["omsagent"].config) + + # Verify container_insights is also disabled + self.assertFalse(mc_1.azure_monitor_profile.container_insights.enabled) + + def test_disable_azure_monitor_logs_without_container_insights(self): + # Test that disable works when container_insights is not set (addon-only cluster) + dec_1 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "disable_azure_monitor_logs": True, + "yes": True, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + + mc_1 = self.models.ManagedCluster( + location="test_location", + addon_profiles={ + "omsagent": self.models.ManagedClusterAddonProfile( + enabled=True, + config={ + "logAnalyticsWorkspaceResourceID": "/subscriptions/test/workspace", + "useAADAuth": "false" + } + ) + }, + ) + dec_1.context.attach_mc(mc_1) + + dec_1._disable_azure_monitor_logs(mc_1) + + # Verify addon profile is disabled + self.assertFalse(mc_1.addon_profiles["omsagent"].enabled) + self.assertIsNone(mc_1.addon_profiles["omsagent"].config) + + # container_insights was never set, should not error + self.assertIsNone(mc_1.azure_monitor_profile) + def test_get_enable_opentelemetry_logs_validation_with_omsagent_camelcase(self): # Test that OpenTelemetry logs validation recognizes omsAgent (camelCase) as enabled ctx_1 = AKSPreviewManagedClusterContext( @@ -12899,6 +13828,8 @@ def test_enable_container_network_logs(self): }, ) self.assertEqual(dec_mc_1, ground_truth_mc_1) + # Verify HLSM is auto-enabled when CNL is enabled + self.assertTrue(dec_1.context.get_enable_high_log_scale_mode()) # Case 2: acns is enabled, monitoring is enabled, disable retina network flow logs dec_2 = AKSPreviewManagedClusterUpdateDecorator( @@ -13143,6 +14074,8 @@ def test_enable_container_network_logs(self): }, ) self.assertEqual(dec_mc_7, ground_truth_mc_7) + # Verify HLSM is auto-enabled when using deprecated flag + self.assertTrue(dec_7.context.get_enable_high_log_scale_mode()) # Case 8: Error when explicitly disabling high log scale mode with container network logs enabled dec_8 = AKSPreviewManagedClusterUpdateDecorator( @@ -13272,1375 +14205,2556 @@ def test_enable_container_network_logs(self): } self.assertEqual(dec_mc_11.addon_profiles["omsagent"], ground_truth_mc_11["omsagent"]) - def test_update_node_provisioning_profile(self): - dec_0 = AKSPreviewManagedClusterUpdateDecorator( - self.cmd, - self.client, - {}, - CUSTOM_MGMT_AKS_PREVIEW, - ) - # Not specified case - mc_0 = self.models.ManagedCluster(location="test_location") - dec_0.context.attach_mc(mc_0) - dec_mc_0 = dec_0.update_node_provisioning_profile(mc_0) - ground_truth_mc_0 = self.models.ManagedCluster(location="test_location") - self.assertEqual(dec_mc_0, ground_truth_mc_0) - - # Set Mode to Auto - dec_1 = AKSPreviewManagedClusterUpdateDecorator( + # Case 12: Verify monitoring_addon_postprocessing_required is set when CNL is enabled (update path) + # This test verifies the fix for the bug where DCR is not updated when enabling CNL on update + dec_12 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "node_provisioning_mode": "Auto", + "enable_container_network_logs": True, }, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_1 = self.models.ManagedCluster( - location="test_location", - ) - dec_1.context.attach_mc(mc_1) - dec_mc_1 = dec_1.update_node_provisioning_profile(mc_1) - ground_truth_mc_1 = self.models.ManagedCluster( + mc_12 = self.models.ManagedCluster( location="test_location", - node_provisioning_profile=self.models.ManagedClusterNodeProvisioningProfile( - mode="Auto", + network_profile=self.models.ContainerServiceNetworkProfile( + network_plugin="azure", + network_plugin_mode="overlay", + network_dataplane="cilium", + pod_cidr="100.64.0.0/16", + service_cidr="192.168.0.0/16", + advanced_networking=self.models.AdvancedNetworking( + enabled=True, + ), ), + addon_profiles={ + "omsagent": self.models.ManagedClusterAddonProfile( + enabled=True, + ) + }, ) - self.assertEqual(dec_mc_1, ground_truth_mc_1) - - # Set Mode to Auto and DefaultPools to None - dec_2 = AKSPreviewManagedClusterUpdateDecorator( + dec_12.context.attach_mc(mc_12) + dec_mc_12 = dec_12.update_monitoring_profile_flow_logs(mc_12) + # Verify the intermediate is set to trigger DCR update in postprocessing + self.assertTrue(dec_12.context.get_intermediate("monitoring_addon_postprocessing_required")) + # Verify HLSM is auto-enabled when CNL is enabled + self.assertTrue(dec_12.context.get_enable_high_log_scale_mode()) + ground_truth_mc_12 = self.models.ManagedCluster( + location="test_location", + network_profile=self.models.ContainerServiceNetworkProfile( + network_plugin="azure", + network_plugin_mode="overlay", + network_dataplane="cilium", + pod_cidr="100.64.0.0/16", + service_cidr="192.168.0.0/16", + advanced_networking=self.models.AdvancedNetworking( + enabled=True, + ), + ), + addon_profiles={ + "omsagent": self.models.ManagedClusterAddonProfile( + enabled=True, + config={"enableRetinaNetworkFlags": "True"} + ) + }, + ) + self.assertEqual(dec_mc_12, ground_truth_mc_12) + + # Case 13: Verify monitoring_addon_postprocessing_required is NOT set when CNL is disabled (update path) + # Disabling CNL should not trigger DCR update + dec_13 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "node_provisioning_mode": "Auto", - "node_provisioning_default_pools": "None", - + "disable_container_network_logs": True, }, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_2 = self.models.ManagedCluster( + mc_13 = self.models.ManagedCluster( location="test_location", + network_profile=self.models.ContainerServiceNetworkProfile( + network_plugin="azure", + network_plugin_mode="overlay", + network_dataplane="cilium", + pod_cidr="100.64.0.0/16", + service_cidr="192.168.0.0/16", + advanced_networking=self.models.AdvancedNetworking( + enabled=True, + ), + ), + addon_profiles={ + "omsagent": self.models.ManagedClusterAddonProfile( + enabled=True, + config={"enableRetinaNetworkFlags": "True"} + ) + }, ) - dec_2.context.attach_mc(mc_2) - dec_mc_2 = dec_2.update_node_provisioning_profile(mc_2) - ground_truth_mc_2 = self.models.ManagedCluster( + dec_13.context.attach_mc(mc_13) + dec_mc_13 = dec_13.update_monitoring_profile_flow_logs(mc_13) + # Disabling CNL should not set the postprocessing intermediate + self.assertFalse(dec_13.context.get_intermediate("monitoring_addon_postprocessing_required", default_value=False)) + ground_truth_mc_13 = self.models.ManagedCluster( location="test_location", - node_provisioning_profile=self.models.ManagedClusterNodeProvisioningProfile( - mode="Auto", - default_node_pools="None", + network_profile=self.models.ContainerServiceNetworkProfile( + network_plugin="azure", + network_plugin_mode="overlay", + network_dataplane="cilium", + pod_cidr="100.64.0.0/16", + service_cidr="192.168.0.0/16", + advanced_networking=self.models.AdvancedNetworking( + enabled=True, + ), ), + addon_profiles={ + "omsagent": self.models.ManagedClusterAddonProfile( + enabled=True, + config={"enableRetinaNetworkFlags": "False"} + ) + }, ) - self.assertEqual(dec_mc_2, ground_truth_mc_2) + self.assertEqual(dec_mc_13, ground_truth_mc_13) - def test_update_managed_system_pools(self): - """Test update_managed_system_pools method functionality.""" - # Test with no agent pools - dec_1 = AKSPreviewManagedClusterUpdateDecorator( + # Case 13b: Disable CNL with omsAgent (camelCase key) - verifies the fix + # for the bug where disable-container-network-logs didn't work when Azure API + # returned the addon profile key as "omsAgent" instead of "omsagent" + dec_13b = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - {}, + { + "disable_container_network_logs": True, + }, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_1 = self.models.ManagedCluster( + mc_13b = self.models.ManagedCluster( location="test_location", - agent_pool_profiles=None, + network_profile=self.models.ContainerServiceNetworkProfile( + network_plugin="azure", + network_plugin_mode="overlay", + network_dataplane="cilium", + pod_cidr="100.64.0.0/16", + service_cidr="192.168.0.0/16", + advanced_networking=self.models.AdvancedNetworking( + enabled=True, + ), + ), + addon_profiles={ + "omsAgent": self.models.ManagedClusterAddonProfile( + enabled=True, + config={"enableRetinaNetworkFlags": "True"} + ) + }, ) - dec_1.context.attach_mc(mc_1) - dec_mc_1 = dec_1.update_managed_system_pools(mc_1) - - ground_truth_mc_1 = self.models.ManagedCluster( + dec_13b.context.attach_mc(mc_13b) + dec_mc_13b = dec_13b.update_monitoring_profile_flow_logs(mc_13b) + ground_truth_mc_13b = self.models.ManagedCluster( location="test_location", - agent_pool_profiles=None, + network_profile=self.models.ContainerServiceNetworkProfile( + network_plugin="azure", + network_plugin_mode="overlay", + network_dataplane="cilium", + pod_cidr="100.64.0.0/16", + service_cidr="192.168.0.0/16", + advanced_networking=self.models.AdvancedNetworking( + enabled=True, + ), + ), + addon_profiles={ + "omsagent": self.models.ManagedClusterAddonProfile( + enabled=True, + config={"enableRetinaNetworkFlags": "False"} + ) + }, ) - self.assertEqual(dec_mc_1, ground_truth_mc_1) + self.assertEqual(dec_mc_13b, ground_truth_mc_13b) - # Test with empty agent pool profiles - dec_2 = AKSPreviewManagedClusterUpdateDecorator( + # Case 14: Verify monitoring_addon_postprocessing_required is set when using deprecated flag (update path) + dec_14 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - {}, + { + "enable_retina_flow_logs": True, + }, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_2 = self.models.ManagedCluster( + mc_14 = self.models.ManagedCluster( location="test_location", + network_profile=self.models.ContainerServiceNetworkProfile( + network_plugin="azure", + network_plugin_mode="overlay", + network_dataplane="cilium", + pod_cidr="100.64.0.0/16", + service_cidr="192.168.0.0/16", + advanced_networking=self.models.AdvancedNetworking( + enabled=True, + ), + ), + addon_profiles={ + "omsagent": self.models.ManagedClusterAddonProfile( + enabled=True, + ) + }, ) - mc_2.agent_pool_profiles = [] - dec_2.context.attach_mc(mc_2) - dec_mc_2 = dec_2.update_managed_system_pools(mc_2) + dec_14.context.attach_mc(mc_14) + dec_mc_14 = dec_14.update_monitoring_profile_flow_logs(mc_14) + # Verify the intermediate is set to trigger DCR update in postprocessing + self.assertTrue(dec_14.context.get_intermediate("monitoring_addon_postprocessing_required")) + # Verify HLSM is auto-enabled when using deprecated flag + self.assertTrue(dec_14.context.get_enable_high_log_scale_mode()) - ground_truth_mc_2 = self.models.ManagedCluster( + # Case 15: Standalone HLSM enable with monitoring addon enabled and MSI auth + # This test verifies the fix for the bug where standalone --enable-high-log-scale-mode was silently ignored + dec_15 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "enable_high_log_scale_mode": True, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_15 = self.models.ManagedCluster( location="test_location", + network_profile=self.models.ContainerServiceNetworkProfile( + network_plugin="azure", + ), + addon_profiles={ + "omsagent": self.models.ManagedClusterAddonProfile( + enabled=True, + config={ + CONST_MONITORING_USING_AAD_MSI_AUTH: "true", + } + ) + }, ) - ground_truth_mc_2.agent_pool_profiles = [] - self.assertEqual(dec_mc_2, ground_truth_mc_2) + dec_15.context.attach_mc(mc_15) + dec_mc_15 = dec_15.update_monitoring_profile_flow_logs(mc_15) + # Verify the intermediate is set to trigger DCR update in postprocessing + self.assertTrue(dec_15.context.get_intermediate("monitoring_addon_postprocessing_required")) - # Test with regular System mode agent pool (should remain unchanged) - dec_3 = AKSPreviewManagedClusterUpdateDecorator( + # Case 16: Standalone HLSM enable without monitoring addon (should error) + dec_16 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - {}, + { + "enable_high_log_scale_mode": True, + }, CUSTOM_MGMT_AKS_PREVIEW, ) - - regular_agentpool = self.models.ManagedClusterAgentPoolProfile( - name="nodepool1", - mode=CONST_NODEPOOL_MODE_SYSTEM, - vm_size="Standard_DS2_v2", - count=3, - max_pods=110, + mc_16 = self.models.ManagedCluster( + location="test_location", + network_profile=self.models.ContainerServiceNetworkProfile( + network_plugin="azure", + ), ) + dec_16.context.attach_mc(mc_16) + with self.assertRaises(RequiredArgumentMissingError): + dec_16.update_monitoring_profile_flow_logs(mc_16) - mc_3 = self.models.ManagedCluster(location="test_location") - mc_3.agent_pool_profiles = [regular_agentpool] - dec_3.context.attach_mc(mc_3) - dec_mc_3 = dec_3.update_managed_system_pools(mc_3) - - # Regular system pool should remain unchanged - self.assertEqual(dec_mc_3.agent_pool_profiles[0].name, "nodepool1") - self.assertEqual(dec_mc_3.agent_pool_profiles[0].mode, CONST_NODEPOOL_MODE_SYSTEM) - self.assertEqual(dec_mc_3.agent_pool_profiles[0].vm_size, "Standard_DS2_v2") - self.assertEqual(dec_mc_3.agent_pool_profiles[0].count, 3) - self.assertEqual(dec_mc_3.agent_pool_profiles[0].max_pods, 110) - - # Test with ManagedSystem mode agent pool (should clean all other attributes) - dec_4 = AKSPreviewManagedClusterUpdateDecorator( + # Case 17: Standalone HLSM enable without MSI auth (should error) + dec_17 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - {}, + { + "enable_high_log_scale_mode": True, + }, CUSTOM_MGMT_AKS_PREVIEW, ) - - managed_system_agentpool = self.models.ManagedClusterAgentPoolProfile( - name="managedsystem1", - mode=CONST_NODEPOOL_MODE_MANAGEDSYSTEM, - vm_size="Standard_DS2_v2", - count=3, - max_pods=110, - os_disk_size_gb=100, - vnet_subnet_id="/subscriptions/test/resourceGroups/test/providers/Microsoft.Network/virtualNetworks/test/subnets/test", + mc_17 = self.models.ManagedCluster( + location="test_location", + network_profile=self.models.ContainerServiceNetworkProfile( + network_plugin="azure", + ), + addon_profiles={ + "omsagent": self.models.ManagedClusterAddonProfile( + enabled=True, + config={} + ) + }, ) + dec_17.context.attach_mc(mc_17) + with self.assertRaises(RequiredArgumentMissingError): + dec_17.update_monitoring_profile_flow_logs(mc_17) - mc_4 = self.models.ManagedCluster(location="test_location") - mc_4.agent_pool_profiles = [managed_system_agentpool] - dec_4.context.attach_mc(mc_4) - dec_mc_4 = dec_4.update_managed_system_pools(mc_4) - - # ManagedSystem pool should have all attributes except name and mode set to None - result_agentpool = dec_mc_4.agent_pool_profiles[0] - self.assertEqual(result_agentpool.name, "managedsystem1") - self.assertEqual(result_agentpool.mode, CONST_NODEPOOL_MODE_MANAGEDSYSTEM) - self.assertIsNone(result_agentpool.vm_size) - self.assertIsNone(result_agentpool.count) - self.assertIsNone(result_agentpool.max_pods) - self.assertIsNone(result_agentpool.os_disk_size_gb) - self.assertIsNone(result_agentpool.vnet_subnet_id) - - # Test with mixed agent pools (System and ManagedSystem) - dec_5 = AKSPreviewManagedClusterUpdateDecorator( + # Case 18: Verify HLSM is NOT triggered when explicitly set to False + dec_18 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - {}, + { + "enable_high_log_scale_mode": False, + }, CUSTOM_MGMT_AKS_PREVIEW, ) - - system_agentpool = self.models.ManagedClusterAgentPoolProfile( - name="systempool", - mode=CONST_NODEPOOL_MODE_SYSTEM, - vm_size="Standard_DS2_v2", - count=3, - max_pods=110, + mc_18 = self.models.ManagedCluster( + location="test_location", + network_profile=self.models.ContainerServiceNetworkProfile( + network_plugin="azure", + ), + addon_profiles={ + "omsagent": self.models.ManagedClusterAddonProfile( + enabled=True, + config={ + CONST_MONITORING_USING_AAD_MSI_AUTH: "true", + } + ) + }, ) + dec_18.context.attach_mc(mc_18) + dec_mc_18 = dec_18.update_monitoring_profile_flow_logs(mc_18) + # HLSM=false should trigger postprocessing to update DCR (remove high-scale stream) + self.assertTrue(dec_18.context.get_intermediate("monitoring_addon_postprocessing_required", default_value=False)) - managed_system_agentpool = self.models.ManagedClusterAgentPoolProfile( - name="managedsystempool", - mode=CONST_NODEPOOL_MODE_MANAGEDSYSTEM, - vm_size="Standard_DS4_v2", - count=5, - max_pods=250, - os_disk_size_gb=200, + # Case 19: Standalone HLSM enable with omsAgent (camelCase key) + dec_19 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "enable_high_log_scale_mode": True, + }, + CUSTOM_MGMT_AKS_PREVIEW, ) + mc_19 = self.models.ManagedCluster( + location="test_location", + network_profile=self.models.ContainerServiceNetworkProfile( + network_plugin="azure", + ), + addon_profiles={ + "omsAgent": self.models.ManagedClusterAddonProfile( + enabled=True, + config={ + CONST_MONITORING_USING_AAD_MSI_AUTH: "true", + } + ) + }, + ) + dec_19.context.attach_mc(mc_19) + dec_mc_19 = dec_19.update_monitoring_profile_flow_logs(mc_19) + # Verify the intermediate is set to trigger DCR update in postprocessing + self.assertTrue(dec_19.context.get_intermediate("monitoring_addon_postprocessing_required")) - mc_5 = self.models.ManagedCluster(location="test_location") - mc_5.agent_pool_profiles = [system_agentpool, managed_system_agentpool] - dec_5.context.attach_mc(mc_5) - dec_mc_5 = dec_5.update_managed_system_pools(mc_5) - - # System pool should remain unchanged - system_result = dec_mc_5.agent_pool_profiles[0] - self.assertEqual(system_result.name, "systempool") - self.assertEqual(system_result.mode, CONST_NODEPOOL_MODE_SYSTEM) - self.assertEqual(system_result.vm_size, "Standard_DS2_v2") - self.assertEqual(system_result.count, 3) - self.assertEqual(system_result.max_pods, 110) - - # ManagedSystem pool should be cleaned - managed_result = dec_mc_5.agent_pool_profiles[1] - self.assertEqual(managed_result.name, "managedsystempool") - self.assertEqual(managed_result.mode, CONST_NODEPOOL_MODE_MANAGEDSYSTEM) - self.assertIsNone(managed_result.vm_size) - self.assertIsNone(managed_result.count) - self.assertIsNone(managed_result.max_pods) - self.assertIsNone(managed_result.os_disk_size_gb) + def test_update_standalone_high_log_scale_mode(self): + """Tests for Bug 1 fix: --enable-high-log-scale-mode standalone on update path. - # Test with multiple ManagedSystem pools - dec_6 = AKSPreviewManagedClusterUpdateDecorator( + Before this fix, --enable-high-log-scale-mode alone on an existing cluster was silently + ignored because monitoring_addon_postprocessing_required was never set, so the DCR was + never updated. + """ + # Case 1: Happy path - monitoring enabled with MSI auth → sets postprocessing intermediate + dec_1 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - {}, + {"enable_high_log_scale_mode": True}, CUSTOM_MGMT_AKS_PREVIEW, ) - - managed_system_agentpool_1 = self.models.ManagedClusterAgentPoolProfile( - name="managedsystem1", - mode=CONST_NODEPOOL_MODE_MANAGEDSYSTEM, - vm_size="Standard_DS2_v2", - count=3, - enable_auto_scaling=True, - min_count=1, - max_count=5, - ) - - managed_system_agentpool_2 = self.models.ManagedClusterAgentPoolProfile( - name="managedsystem2", - mode=CONST_NODEPOOL_MODE_MANAGEDSYSTEM, - vm_size="Standard_DS4_v2", - count=2, - os_type="Linux", + mc_1 = self.models.ManagedCluster( + location="test_location", + addon_profiles={ + "omsagent": self.models.ManagedClusterAddonProfile( + enabled=True, + config={CONST_MONITORING_USING_AAD_MSI_AUTH: "true"}, + ) + }, ) + dec_1.context.attach_mc(mc_1) + dec_1.update_monitoring_profile_flow_logs(mc_1) + self.assertTrue(dec_1.context.get_intermediate("monitoring_addon_postprocessing_required")) - mc_6 = self.models.ManagedCluster(location="test_location") - mc_6.agent_pool_profiles = [managed_system_agentpool_1, managed_system_agentpool_2] - dec_6.context.attach_mc(mc_6) - dec_mc_6 = dec_6.update_managed_system_pools(mc_6) - - # Both ManagedSystem pools should be cleaned - result_1 = dec_mc_6.agent_pool_profiles[0] - self.assertEqual(result_1.name, "managedsystem1") - self.assertEqual(result_1.mode, CONST_NODEPOOL_MODE_MANAGEDSYSTEM) - self.assertIsNone(result_1.vm_size) - self.assertIsNone(result_1.count) - self.assertIsNone(result_1.enable_auto_scaling) - self.assertIsNone(result_1.min_count) - self.assertIsNone(result_1.max_count) - - result_2 = dec_mc_6.agent_pool_profiles[1] - self.assertEqual(result_2.name, "managedsystem2") - self.assertEqual(result_2.mode, CONST_NODEPOOL_MODE_MANAGEDSYSTEM) - self.assertIsNone(result_2.vm_size) - self.assertIsNone(result_2.count) - self.assertIsNone(result_2.os_type) - - # Test error handling: fail on passing wrong mc object - dec_7 = AKSPreviewManagedClusterUpdateDecorator( + # Case 2: Monitoring addon present but not enabled → RequiredArgumentMissingError + dec_2 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - {}, + {"enable_high_log_scale_mode": True}, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_7 = self.models.ManagedCluster(location="test_location") - dec_7.context.attach_mc(mc_7) - - with self.assertRaises(CLIInternalError): - dec_7.update_managed_system_pools(None) + mc_2 = self.models.ManagedCluster( + location="test_location", + addon_profiles={ + "omsagent": self.models.ManagedClusterAddonProfile( + enabled=False, + config={CONST_MONITORING_USING_AAD_MSI_AUTH: "true"}, + ) + }, + ) + dec_2.context.attach_mc(mc_2) + with self.assertRaises(RequiredArgumentMissingError): + dec_2.update_monitoring_profile_flow_logs(mc_2) - def test_set_up_upstream_kubescheduler_user_configuration(self): - # Test default behavior - no configuration - dec_0 = AKSPreviewManagedClusterCreateDecorator( + # Case 3: No monitoring addon at all → RequiredArgumentMissingError + dec_3 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - {}, + {"enable_high_log_scale_mode": True}, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_0 = self.models.ManagedCluster(location="test_location") - dec_0.context.attach_mc(mc_0) - dec_mc_0 = dec_0.set_up_upstream_kubescheduler_user_configuration(mc_0) - ground_truth_mc_0 = self.models.ManagedCluster(location="test_location") - self.assertEqual(dec_mc_0, ground_truth_mc_0) + mc_3 = self.models.ManagedCluster(location="test_location") + dec_3.context.attach_mc(mc_3) + with self.assertRaises(RequiredArgumentMissingError): + dec_3.update_monitoring_profile_flow_logs(mc_3) - # Test enabling upstream kubescheduler user configuration - dec_1 = AKSPreviewManagedClusterCreateDecorator( + # Case 4: Monitoring enabled but MSI auth missing → RequiredArgumentMissingError + dec_4 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - { - "enable_upstream_kubescheduler_user_configuration": True, - }, + {"enable_high_log_scale_mode": True}, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_1 = self.models.ManagedCluster(location="test_location") - dec_1.context.attach_mc(mc_1) - dec_mc_1 = dec_1.set_up_upstream_kubescheduler_user_configuration(mc_1) - ground_truth_mc_1 = self.models.ManagedCluster( + mc_4 = self.models.ManagedCluster( location="test_location", - scheduler_profile=self.models.SchedulerProfile( - scheduler_instance_profiles=self.models.SchedulerProfileSchedulerInstanceProfiles( - upstream=self.models.SchedulerInstanceProfile( - scheduler_config_mode=self.models.SchedulerConfigMode.MANAGED_BY_CRD - ) + addon_profiles={ + "omsagent": self.models.ManagedClusterAddonProfile( + enabled=True, + config={}, ) - ), + }, ) - self.assertEqual(dec_mc_1, ground_truth_mc_1) + dec_4.context.attach_mc(mc_4) + with self.assertRaises(RequiredArgumentMissingError): + dec_4.update_monitoring_profile_flow_logs(mc_4) - # Test with existing scheduler profile - dec_2 = AKSPreviewManagedClusterCreateDecorator( + # Case 5: Monitoring enabled but MSI auth set to "false" → RequiredArgumentMissingError + dec_5 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - { - "enable_upstream_kubescheduler_user_configuration": True, - }, + {"enable_high_log_scale_mode": True}, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_2 = self.models.ManagedCluster( + mc_5 = self.models.ManagedCluster( location="test_location", - scheduler_profile=self.models.SchedulerProfile( - scheduler_instance_profiles=self.models.SchedulerProfileSchedulerInstanceProfiles( - upstream=self.models.SchedulerInstanceProfile( - scheduler_config_mode=self.models.SchedulerConfigMode.DEFAULT - ) + addon_profiles={ + "omsagent": self.models.ManagedClusterAddonProfile( + enabled=True, + config={CONST_MONITORING_USING_AAD_MSI_AUTH: "false"}, ) - ), + }, ) - dec_2.context.attach_mc(mc_2) - dec_mc_2 = dec_2.set_up_upstream_kubescheduler_user_configuration(mc_2) - ground_truth_mc_2 = self.models.ManagedCluster( + dec_5.context.attach_mc(mc_5) + with self.assertRaises(RequiredArgumentMissingError): + dec_5.update_monitoring_profile_flow_logs(mc_5) + + # Case 6: enable_high_log_scale_mode=False (no CNL) → postprocessing triggered to update DCR + dec_6 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + {"enable_high_log_scale_mode": False}, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_6 = self.models.ManagedCluster( location="test_location", - scheduler_profile=self.models.SchedulerProfile( - scheduler_instance_profiles=self.models.SchedulerProfileSchedulerInstanceProfiles( - upstream=self.models.SchedulerInstanceProfile( - scheduler_config_mode=self.models.SchedulerConfigMode.MANAGED_BY_CRD - ) + addon_profiles={ + "omsagent": self.models.ManagedClusterAddonProfile( + enabled=True, + config={CONST_MONITORING_USING_AAD_MSI_AUTH: "true"}, ) - ), + }, + ) + dec_6.context.attach_mc(mc_6) + dec_6.update_monitoring_profile_flow_logs(mc_6) + self.assertTrue( + dec_6.context.get_intermediate("monitoring_addon_postprocessing_required", default_value=False) ) - self.assertEqual(dec_mc_2, ground_truth_mc_2) - def test_update_upstream_kubescheduler_user_configuration(self): - # Test default behavior - no configuration change - dec_0 = AKSPreviewManagedClusterUpdateDecorator( + # Case 7: Not specified (None) → no postprocessing triggered + dec_7 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, {}, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_0 = self.models.ManagedCluster(location="test_location") - dec_0.context.attach_mc(mc_0) - dec_mc_0 = dec_0.update_upstream_kubescheduler_user_configuration(mc_0) - ground_truth_mc_0 = self.models.ManagedCluster(location="test_location") - self.assertEqual(dec_mc_0, ground_truth_mc_0) + mc_7 = self.models.ManagedCluster( + location="test_location", + addon_profiles={ + "omsagent": self.models.ManagedClusterAddonProfile( + enabled=True, + config={CONST_MONITORING_USING_AAD_MSI_AUTH: "true"}, + ) + }, + ) + dec_7.context.attach_mc(mc_7) + dec_7.update_monitoring_profile_flow_logs(mc_7) + self.assertFalse( + dec_7.context.get_intermediate("monitoring_addon_postprocessing_required", default_value=False) + ) - # Test enabling upstream kubescheduler user configuration - dec_1 = AKSPreviewManagedClusterUpdateDecorator( + # Case 8: camelCase "omsAgent" key is also recognised + dec_8 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - { - "enable_upstream_kubescheduler_user_configuration": True, - }, + {"enable_high_log_scale_mode": True}, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_1 = self.models.ManagedCluster(location="test_location") - dec_1.context.attach_mc(mc_1) - dec_mc_1 = dec_1.update_upstream_kubescheduler_user_configuration(mc_1) - ground_truth_mc_1 = self.models.ManagedCluster( + mc_8 = self.models.ManagedCluster( location="test_location", - scheduler_profile=self.models.SchedulerProfile( - scheduler_instance_profiles=self.models.SchedulerProfileSchedulerInstanceProfiles( - upstream=self.models.SchedulerInstanceProfile( - scheduler_config_mode=self.models.SchedulerConfigMode.MANAGED_BY_CRD - ) + addon_profiles={ + "omsAgent": self.models.ManagedClusterAddonProfile( + enabled=True, + config={CONST_MONITORING_USING_AAD_MSI_AUTH: "true"}, ) - ), + }, ) - self.assertEqual(dec_mc_1, ground_truth_mc_1) + dec_8.context.attach_mc(mc_8) + dec_8.update_monitoring_profile_flow_logs(mc_8) + self.assertTrue(dec_8.context.get_intermediate("monitoring_addon_postprocessing_required")) - # Test disabling upstream kubescheduler user configuration - dec_2 = AKSPreviewManagedClusterUpdateDecorator( + def test_update_monitoring_profile_flow_logs_no_flags_noop(self): + """Test that update_monitoring_profile_flow_logs is a no-op when no CNL/HLSM flags are specified.""" + dec = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - { - "disable_upstream_kubescheduler_user_configuration": True, - }, + {}, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_2 = self.models.ManagedCluster(location="test_location") - dec_2.context.attach_mc(mc_2) - dec_mc_2 = dec_2.update_upstream_kubescheduler_user_configuration(mc_2) - ground_truth_mc_2 = self.models.ManagedCluster( + mc = self.models.ManagedCluster( location="test_location", - scheduler_profile=self.models.SchedulerProfile( - scheduler_instance_profiles=self.models.SchedulerProfileSchedulerInstanceProfiles( - upstream=self.models.SchedulerInstanceProfile( - scheduler_config_mode=self.models.SchedulerConfigMode.DEFAULT - ) + addon_profiles={ + "omsagent": self.models.ManagedClusterAddonProfile( + enabled=True, + config={"enableRetinaNetworkFlags": "True"}, ) - ), + }, + ) + dec.context.attach_mc(mc) + dec_mc = dec.update_monitoring_profile_flow_logs(mc) + # Existing config should remain unchanged + self.assertEqual( + dec_mc.addon_profiles["omsagent"].config["enableRetinaNetworkFlags"], + "True", + ) + self.assertFalse( + dec.context.get_intermediate("monitoring_addon_postprocessing_required", default_value=False) ) - self.assertEqual(dec_mc_2, ground_truth_mc_2) - # Test mutual exclusivity - should raise exception when both enable and disable are specified - dec_3 = AKSPreviewManagedClusterUpdateDecorator( + def test_update_enable_cnl_with_azure_monitor_logs_on_cluster(self): + """Test enabling CNL on update when monitoring was enabled via enable_azure_monitor_logs on existing cluster.""" + dec = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "enable_upstream_kubescheduler_user_configuration": True, - "disable_upstream_kubescheduler_user_configuration": True, + "enable_container_network_logs": True, + "enable_azure_monitor_logs": True, }, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_3 = self.models.ManagedCluster(location="test_location") - dec_3.context.attach_mc(mc_3) - with self.assertRaises(MutuallyExclusiveArgumentError): - dec_3.update_upstream_kubescheduler_user_configuration(mc_3) + mc = self.models.ManagedCluster( + location="test_location", + network_profile=self.models.ContainerServiceNetworkProfile( + advanced_networking=self.models.AdvancedNetworking( + enabled=True, + ), + ), + addon_profiles={ + "omsagent": self.models.ManagedClusterAddonProfile( + enabled=True, + ) + }, + ) + dec.context.attach_mc(mc) + dec_mc = dec.update_monitoring_profile_flow_logs(mc) + self.assertEqual( + dec_mc.addon_profiles["omsagent"].config["enableRetinaNetworkFlags"], + "True", + ) + self.assertTrue(dec.context.get_intermediate("monitoring_addon_postprocessing_required")) - # Test enabling with existing scheduler profile being updated - dec_4 = AKSPreviewManagedClusterUpdateDecorator( + def test_update_cnl_explicit_true_hlsm_with_prerequisites(self): + """Test enabling CNL + HLSM=True explicitly on update with all prerequisites met.""" + dec = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "enable_upstream_kubescheduler_user_configuration": True, + "enable_container_network_logs": True, + "enable_high_log_scale_mode": True, }, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_4 = self.models.ManagedCluster( + mc = self.models.ManagedCluster( location="test_location", - scheduler_profile=self.models.SchedulerProfile( - scheduler_instance_profiles=self.models.SchedulerProfileSchedulerInstanceProfiles( - upstream=self.models.SchedulerInstanceProfile( - scheduler_config_mode=self.models.SchedulerConfigMode.DEFAULT - ) - ) + network_profile=self.models.ContainerServiceNetworkProfile( + advanced_networking=self.models.AdvancedNetworking( + enabled=True, + ), ), + addon_profiles={ + "omsagent": self.models.ManagedClusterAddonProfile( + enabled=True, + config={ + CONST_MONITORING_USING_AAD_MSI_AUTH: "true", + } + ) + }, ) - dec_4.context.attach_mc(mc_4) - dec_mc_4 = dec_4.update_upstream_kubescheduler_user_configuration(mc_4) - ground_truth_mc_4 = self.models.ManagedCluster( + dec.context.attach_mc(mc) + dec_mc = dec.update_monitoring_profile_flow_logs(mc) + self.assertEqual( + dec_mc.addon_profiles["omsagent"].config["enableRetinaNetworkFlags"], + "True", + ) + self.assertTrue(dec.context.get_enable_high_log_scale_mode()) + self.assertTrue(dec.context.get_intermediate("monitoring_addon_postprocessing_required")) + + def test_update_disable_hlsm_standalone_triggers_postprocessing(self): + """Test that disabling HLSM standalone (no CNL flag) triggers postprocessing to update DCR.""" + dec = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "enable_high_log_scale_mode": False, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc = self.models.ManagedCluster( location="test_location", - scheduler_profile=self.models.SchedulerProfile( - scheduler_instance_profiles=self.models.SchedulerProfileSchedulerInstanceProfiles( - upstream=self.models.SchedulerInstanceProfile( - scheduler_config_mode=self.models.SchedulerConfigMode.MANAGED_BY_CRD - ) + addon_profiles={ + "omsagent": self.models.ManagedClusterAddonProfile( + enabled=True, + config={ + CONST_MONITORING_USING_AAD_MSI_AUTH: "true", + } ) - ), + }, + ) + dec.context.attach_mc(mc) + dec.update_monitoring_profile_flow_logs(mc) + self.assertTrue( + dec.context.get_intermediate("monitoring_addon_postprocessing_required", default_value=False) ) - self.assertEqual(dec_mc_4, ground_truth_mc_4) - # Test disabling with existing scheduler profile being updated - dec_5 = AKSPreviewManagedClusterUpdateDecorator( + def test_update_disable_hlsm_error_when_cnl_already_enabled(self): + """Test that disabling HLSM raises error when CNL is already enabled on the cluster.""" + dec = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "disable_upstream_kubescheduler_user_configuration": True, + "enable_high_log_scale_mode": False, }, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_5 = self.models.ManagedCluster( + mc = self.models.ManagedCluster( location="test_location", - scheduler_profile=self.models.SchedulerProfile( - scheduler_instance_profiles=self.models.SchedulerProfileSchedulerInstanceProfiles( - upstream=self.models.SchedulerInstanceProfile( - scheduler_config_mode=self.models.SchedulerConfigMode.MANAGED_BY_CRD - ) + addon_profiles={ + "omsagent": self.models.ManagedClusterAddonProfile( + enabled=True, + config={ + "enableRetinaNetworkFlags": "True", + CONST_MONITORING_USING_AAD_MSI_AUTH: "true", + } ) + }, + ) + dec.context.attach_mc(mc) + with self.assertRaises(MutuallyExclusiveArgumentError): + dec.update_monitoring_profile_flow_logs(mc) + + def test_update_postprocessing_with_camelcase_addon_key(self): + """Test that update postprocessing works when the API response uses 'omsAgent' (camelCase). + + The API may return the monitoring addon as 'omsAgent' instead of 'omsagent'. + The postprocessing must handle both key variants so the DCR gets updated. + """ + dec = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "enable_container_network_logs": True, + "enable_high_log_scale_mode": True, + "name": "test_name", + "resource_group_name": "test_rg_name", + "location": "test_location", + "enable_msi_auth_for_monitoring": True, + "enable_syslog": False, + "data_collection_settings": None, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc = self.models.ManagedCluster( + location="test_location", + network_profile=self.models.ContainerServiceNetworkProfile( + advanced_networking=self.models.AdvancedNetworking( + enabled=True, + ), ), + addon_profiles={ + "omsagent": self.models.ManagedClusterAddonProfile( + enabled=True, + config={ + CONST_MONITORING_USING_AAD_MSI_AUTH: "true", + } + ) + }, ) - dec_5.context.attach_mc(mc_5) - dec_mc_5 = dec_5.update_upstream_kubescheduler_user_configuration(mc_5) - ground_truth_mc_5 = self.models.ManagedCluster( + dec.context.attach_mc(mc) + dec.context.set_intermediate("subscription_id", "test_subscription_id") + # Simulate profile update setting the postprocessing flag + dec.update_monitoring_profile_flow_logs(mc) + self.assertTrue(dec.context.get_intermediate("monitoring_addon_postprocessing_required")) + + # Build API response cluster with camelCase addon key + cluster = self.models.ManagedCluster( location="test_location", - scheduler_profile=self.models.SchedulerProfile( - scheduler_instance_profiles=self.models.SchedulerProfileSchedulerInstanceProfiles( - upstream=self.models.SchedulerInstanceProfile( - scheduler_config_mode=self.models.SchedulerConfigMode.DEFAULT - ) + addon_profiles={ + CONST_MONITORING_ADDON_NAME_CAMELCASE: self.models.ManagedClusterAddonProfile( + enabled=True, + config={ + CONST_MONITORING_USING_AAD_MSI_AUTH: "true", + } ) - ), + }, ) - self.assertEqual(dec_mc_5, ground_truth_mc_5) + external_functions = dec.context.external_functions + with patch.object( + external_functions, "ensure_container_insights_for_monitoring", return_value=None + ) as mock_ecifm: + dec.postprocessing_after_mc_created(cluster) + mock_ecifm.assert_called_once() + _, kwargs = mock_ecifm.call_args + self.assertTrue(kwargs["create_dcr"]) + self.assertTrue(kwargs["enable_high_log_scale_mode"]) - def test_update_ingress_profile_gateway_api(self): - # Test enabling Gateway API - dec_1 = AKSPreviewManagedClusterUpdateDecorator( + def test_update_node_provisioning_profile(self): + dec_0 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - {"enable_gateway_api": True}, + {}, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_1 = self.models.ManagedCluster(location="test_location") - dec_1.context.attach_mc(mc_1) - dec_mc_1 = dec_1.update_ingress_profile_gateway_api(mc_1) + # Not specified case + mc_0 = self.models.ManagedCluster(location="test_location") + dec_0.context.attach_mc(mc_0) + dec_mc_0 = dec_0.update_node_provisioning_profile(mc_0) + ground_truth_mc_0 = self.models.ManagedCluster(location="test_location") + self.assertEqual(dec_mc_0, ground_truth_mc_0) - ground_truth_ingress_profile_1 = self.models.ManagedClusterIngressProfile( - gateway_api=self.models.ManagedClusterIngressProfileGatewayConfiguration( - installation=CONST_MANAGED_GATEWAY_INSTALLATION_STANDARD - ) + # Set Mode to Auto + dec_1 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "node_provisioning_mode": "Auto", + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_1 = self.models.ManagedCluster( + location="test_location", ) + dec_1.context.attach_mc(mc_1) + dec_mc_1 = dec_1.update_node_provisioning_profile(mc_1) ground_truth_mc_1 = self.models.ManagedCluster( - location="test_location", ingress_profile=ground_truth_ingress_profile_1 + location="test_location", + node_provisioning_profile=self.models.ManagedClusterNodeProvisioningProfile( + mode="Auto", + ), ) self.assertEqual(dec_mc_1, ground_truth_mc_1) - # Test with existing ingress profile and web_app_routing enabled + # Set Mode to Auto and DefaultPools to None dec_2 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - {"enable_gateway_api": True}, + { + "node_provisioning_mode": "Auto", + "node_provisioning_default_pools": "None", + + }, CUSTOM_MGMT_AKS_PREVIEW, ) mc_2 = self.models.ManagedCluster( location="test_location", - ingress_profile=self.models.ManagedClusterIngressProfile( - web_app_routing=self.models.ManagedClusterIngressProfileWebAppRouting(enabled=True) - ) ) dec_2.context.attach_mc(mc_2) - dec_mc_2 = dec_2.update_ingress_profile_gateway_api(mc_2) - - ground_truth_ingress_profile_2 = self.models.ManagedClusterIngressProfile( - web_app_routing=self.models.ManagedClusterIngressProfileWebAppRouting(enabled=True), - gateway_api=self.models.ManagedClusterIngressProfileGatewayConfiguration( - installation=CONST_MANAGED_GATEWAY_INSTALLATION_STANDARD - ) - ) + dec_mc_2 = dec_2.update_node_provisioning_profile(mc_2) ground_truth_mc_2 = self.models.ManagedCluster( - location="test_location", ingress_profile=ground_truth_ingress_profile_2 + location="test_location", + node_provisioning_profile=self.models.ManagedClusterNodeProvisioningProfile( + mode="Auto", + default_node_pools="None", + ), ) self.assertEqual(dec_mc_2, ground_truth_mc_2) - # Test disable_gateway_api parameter - dec_3 = AKSPreviewManagedClusterUpdateDecorator( + def test_update_managed_system_pools(self): + """Test update_managed_system_pools method functionality.""" + # Test with no agent pools + dec_1 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - {"disable_gateway_api": True}, + {}, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_3 = self.models.ManagedCluster( + mc_1 = self.models.ManagedCluster( location="test_location", - ingress_profile=self.models.ManagedClusterIngressProfile( - gateway_api=self.models.ManagedClusterIngressProfileGatewayConfiguration( - installation=CONST_MANAGED_GATEWAY_INSTALLATION_STANDARD - ) - ) + agent_pool_profiles=None, ) - dec_3.context.attach_mc(mc_3) - dec_mc_3 = dec_3.update_ingress_profile_gateway_api(mc_3) + dec_1.context.attach_mc(mc_1) + dec_mc_1 = dec_1.update_managed_system_pools(mc_1) - ground_truth_ingress_profile_3 = self.models.ManagedClusterIngressProfile( - gateway_api=self.models.ManagedClusterIngressProfileGatewayConfiguration( - installation=CONST_MANAGED_GATEWAY_INSTALLATION_DISABLED - ) - ) - ground_truth_mc_3 = self.models.ManagedCluster( - location="test_location", ingress_profile=ground_truth_ingress_profile_3 + ground_truth_mc_1 = self.models.ManagedCluster( + location="test_location", + agent_pool_profiles=None, ) - self.assertEqual(dec_mc_3, ground_truth_mc_3) + self.assertEqual(dec_mc_1, ground_truth_mc_1) - # Test mutual exclusion - both enable and disable should raise exception - with self.assertRaises(MutuallyExclusiveArgumentError): - dec = AKSPreviewManagedClusterUpdateDecorator( + # Test with empty agent pool profiles + dec_2 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + {}, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_2 = self.models.ManagedCluster( + location="test_location", + ) + mc_2.agent_pool_profiles = [] + dec_2.context.attach_mc(mc_2) + dec_mc_2 = dec_2.update_managed_system_pools(mc_2) + + ground_truth_mc_2 = self.models.ManagedCluster( + location="test_location", + ) + ground_truth_mc_2.agent_pool_profiles = [] + self.assertEqual(dec_mc_2, ground_truth_mc_2) + + # Test with regular System mode agent pool (should remain unchanged) + dec_3 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + {}, + CUSTOM_MGMT_AKS_PREVIEW, + ) + + regular_agentpool = self.models.ManagedClusterAgentPoolProfile( + name="nodepool1", + mode=CONST_NODEPOOL_MODE_SYSTEM, + vm_size="Standard_DS2_v2", + count=3, + max_pods=110, + ) + + mc_3 = self.models.ManagedCluster(location="test_location") + mc_3.agent_pool_profiles = [regular_agentpool] + dec_3.context.attach_mc(mc_3) + dec_mc_3 = dec_3.update_managed_system_pools(mc_3) + + # Regular system pool should remain unchanged + self.assertEqual(dec_mc_3.agent_pool_profiles[0].name, "nodepool1") + self.assertEqual(dec_mc_3.agent_pool_profiles[0].mode, CONST_NODEPOOL_MODE_SYSTEM) + self.assertEqual(dec_mc_3.agent_pool_profiles[0].vm_size, "Standard_DS2_v2") + self.assertEqual(dec_mc_3.agent_pool_profiles[0].count, 3) + self.assertEqual(dec_mc_3.agent_pool_profiles[0].max_pods, 110) + + # Test with ManagedSystem mode agent pool (should clean all other attributes) + dec_4 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + {}, + CUSTOM_MGMT_AKS_PREVIEW, + ) + + managed_system_agentpool = self.models.ManagedClusterAgentPoolProfile( + name="managedsystem1", + mode=CONST_NODEPOOL_MODE_MANAGEDSYSTEM, + vm_size="Standard_DS2_v2", + count=3, + max_pods=110, + os_disk_size_gb=100, + vnet_subnet_id="/subscriptions/test/resourceGroups/test/providers/Microsoft.Network/virtualNetworks/test/subnets/test", + ) + + mc_4 = self.models.ManagedCluster(location="test_location") + mc_4.agent_pool_profiles = [managed_system_agentpool] + dec_4.context.attach_mc(mc_4) + dec_mc_4 = dec_4.update_managed_system_pools(mc_4) + + # ManagedSystem pool should have all attributes except name and mode set to None + result_agentpool = dec_mc_4.agent_pool_profiles[0] + self.assertEqual(result_agentpool.name, "managedsystem1") + self.assertEqual(result_agentpool.mode, CONST_NODEPOOL_MODE_MANAGEDSYSTEM) + self.assertIsNone(result_agentpool.vm_size) + self.assertIsNone(result_agentpool.count) + self.assertIsNone(result_agentpool.max_pods) + self.assertIsNone(result_agentpool.os_disk_size_gb) + self.assertIsNone(result_agentpool.vnet_subnet_id) + + # Test with mixed agent pools (System and ManagedSystem) + dec_5 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + {}, + CUSTOM_MGMT_AKS_PREVIEW, + ) + + system_agentpool = self.models.ManagedClusterAgentPoolProfile( + name="systempool", + mode=CONST_NODEPOOL_MODE_SYSTEM, + vm_size="Standard_DS2_v2", + count=3, + max_pods=110, + ) + + managed_system_agentpool = self.models.ManagedClusterAgentPoolProfile( + name="managedsystempool", + mode=CONST_NODEPOOL_MODE_MANAGEDSYSTEM, + vm_size="Standard_DS4_v2", + count=5, + max_pods=250, + os_disk_size_gb=200, + ) + + mc_5 = self.models.ManagedCluster(location="test_location") + mc_5.agent_pool_profiles = [system_agentpool, managed_system_agentpool] + dec_5.context.attach_mc(mc_5) + dec_mc_5 = dec_5.update_managed_system_pools(mc_5) + + # System pool should remain unchanged + system_result = dec_mc_5.agent_pool_profiles[0] + self.assertEqual(system_result.name, "systempool") + self.assertEqual(system_result.mode, CONST_NODEPOOL_MODE_SYSTEM) + self.assertEqual(system_result.vm_size, "Standard_DS2_v2") + self.assertEqual(system_result.count, 3) + self.assertEqual(system_result.max_pods, 110) + + # ManagedSystem pool should be cleaned + managed_result = dec_mc_5.agent_pool_profiles[1] + self.assertEqual(managed_result.name, "managedsystempool") + self.assertEqual(managed_result.mode, CONST_NODEPOOL_MODE_MANAGEDSYSTEM) + self.assertIsNone(managed_result.vm_size) + self.assertIsNone(managed_result.count) + self.assertIsNone(managed_result.max_pods) + self.assertIsNone(managed_result.os_disk_size_gb) + + # Test with multiple ManagedSystem pools + dec_6 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + {}, + CUSTOM_MGMT_AKS_PREVIEW, + ) + + managed_system_agentpool_1 = self.models.ManagedClusterAgentPoolProfile( + name="managedsystem1", + mode=CONST_NODEPOOL_MODE_MANAGEDSYSTEM, + vm_size="Standard_DS2_v2", + count=3, + enable_auto_scaling=True, + min_count=1, + max_count=5, + ) + + managed_system_agentpool_2 = self.models.ManagedClusterAgentPoolProfile( + name="managedsystem2", + mode=CONST_NODEPOOL_MODE_MANAGEDSYSTEM, + vm_size="Standard_DS4_v2", + count=2, + os_type="Linux", + ) + + mc_6 = self.models.ManagedCluster(location="test_location") + mc_6.agent_pool_profiles = [managed_system_agentpool_1, managed_system_agentpool_2] + dec_6.context.attach_mc(mc_6) + dec_mc_6 = dec_6.update_managed_system_pools(mc_6) + + # Both ManagedSystem pools should be cleaned + result_1 = dec_mc_6.agent_pool_profiles[0] + self.assertEqual(result_1.name, "managedsystem1") + self.assertEqual(result_1.mode, CONST_NODEPOOL_MODE_MANAGEDSYSTEM) + self.assertIsNone(result_1.vm_size) + self.assertIsNone(result_1.count) + self.assertIsNone(result_1.enable_auto_scaling) + self.assertIsNone(result_1.min_count) + self.assertIsNone(result_1.max_count) + + result_2 = dec_mc_6.agent_pool_profiles[1] + self.assertEqual(result_2.name, "managedsystem2") + self.assertEqual(result_2.mode, CONST_NODEPOOL_MODE_MANAGEDSYSTEM) + self.assertIsNone(result_2.vm_size) + self.assertIsNone(result_2.count) + self.assertIsNone(result_2.os_type) + + # Test error handling: fail on passing wrong mc object + dec_7 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + {}, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_7 = self.models.ManagedCluster(location="test_location") + dec_7.context.attach_mc(mc_7) + + with self.assertRaises(CLIInternalError): + dec_7.update_managed_system_pools(None) + + def test_set_up_upstream_kubescheduler_user_configuration(self): + # Test default behavior - no configuration + dec_0 = AKSPreviewManagedClusterCreateDecorator( + self.cmd, + self.client, + {}, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_0 = self.models.ManagedCluster(location="test_location") + dec_0.context.attach_mc(mc_0) + dec_mc_0 = dec_0.set_up_upstream_kubescheduler_user_configuration(mc_0) + ground_truth_mc_0 = self.models.ManagedCluster(location="test_location") + self.assertEqual(dec_mc_0, ground_truth_mc_0) + + # Test enabling upstream kubescheduler user configuration + dec_1 = AKSPreviewManagedClusterCreateDecorator( + self.cmd, + self.client, + { + "enable_upstream_kubescheduler_user_configuration": True, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_1 = self.models.ManagedCluster(location="test_location") + dec_1.context.attach_mc(mc_1) + dec_mc_1 = dec_1.set_up_upstream_kubescheduler_user_configuration(mc_1) + ground_truth_mc_1 = self.models.ManagedCluster( + location="test_location", + scheduler_profile=self.models.SchedulerProfile( + scheduler_instance_profiles=self.models.SchedulerProfileSchedulerInstanceProfiles( + upstream=self.models.SchedulerInstanceProfile( + scheduler_config_mode=self.models.SchedulerConfigMode.MANAGED_BY_CRD + ) + ) + ), + ) + self.assertEqual(dec_mc_1, ground_truth_mc_1) + + # Test with existing scheduler profile + dec_2 = AKSPreviewManagedClusterCreateDecorator( + self.cmd, + self.client, + { + "enable_upstream_kubescheduler_user_configuration": True, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_2 = self.models.ManagedCluster( + location="test_location", + scheduler_profile=self.models.SchedulerProfile( + scheduler_instance_profiles=self.models.SchedulerProfileSchedulerInstanceProfiles( + upstream=self.models.SchedulerInstanceProfile( + scheduler_config_mode=self.models.SchedulerConfigMode.DEFAULT + ) + ) + ), + ) + dec_2.context.attach_mc(mc_2) + dec_mc_2 = dec_2.set_up_upstream_kubescheduler_user_configuration(mc_2) + ground_truth_mc_2 = self.models.ManagedCluster( + location="test_location", + scheduler_profile=self.models.SchedulerProfile( + scheduler_instance_profiles=self.models.SchedulerProfileSchedulerInstanceProfiles( + upstream=self.models.SchedulerInstanceProfile( + scheduler_config_mode=self.models.SchedulerConfigMode.MANAGED_BY_CRD + ) + ) + ), + ) + self.assertEqual(dec_mc_2, ground_truth_mc_2) + + def test_update_upstream_kubescheduler_user_configuration(self): + # Test default behavior - no configuration change + dec_0 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + {}, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_0 = self.models.ManagedCluster(location="test_location") + dec_0.context.attach_mc(mc_0) + dec_mc_0 = dec_0.update_upstream_kubescheduler_user_configuration(mc_0) + ground_truth_mc_0 = self.models.ManagedCluster(location="test_location") + self.assertEqual(dec_mc_0, ground_truth_mc_0) + + # Test enabling upstream kubescheduler user configuration + dec_1 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "enable_upstream_kubescheduler_user_configuration": True, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_1 = self.models.ManagedCluster(location="test_location") + dec_1.context.attach_mc(mc_1) + dec_mc_1 = dec_1.update_upstream_kubescheduler_user_configuration(mc_1) + ground_truth_mc_1 = self.models.ManagedCluster( + location="test_location", + scheduler_profile=self.models.SchedulerProfile( + scheduler_instance_profiles=self.models.SchedulerProfileSchedulerInstanceProfiles( + upstream=self.models.SchedulerInstanceProfile( + scheduler_config_mode=self.models.SchedulerConfigMode.MANAGED_BY_CRD + ) + ) + ), + ) + self.assertEqual(dec_mc_1, ground_truth_mc_1) + + # Test disabling upstream kubescheduler user configuration + dec_2 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "disable_upstream_kubescheduler_user_configuration": True, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_2 = self.models.ManagedCluster(location="test_location") + dec_2.context.attach_mc(mc_2) + dec_mc_2 = dec_2.update_upstream_kubescheduler_user_configuration(mc_2) + ground_truth_mc_2 = self.models.ManagedCluster( + location="test_location", + scheduler_profile=self.models.SchedulerProfile( + scheduler_instance_profiles=self.models.SchedulerProfileSchedulerInstanceProfiles( + upstream=self.models.SchedulerInstanceProfile( + scheduler_config_mode=self.models.SchedulerConfigMode.DEFAULT + ) + ) + ), + ) + self.assertEqual(dec_mc_2, ground_truth_mc_2) + + # Test mutual exclusivity - should raise exception when both enable and disable are specified + dec_3 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "enable_upstream_kubescheduler_user_configuration": True, + "disable_upstream_kubescheduler_user_configuration": True, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_3 = self.models.ManagedCluster(location="test_location") + dec_3.context.attach_mc(mc_3) + with self.assertRaises(MutuallyExclusiveArgumentError): + dec_3.update_upstream_kubescheduler_user_configuration(mc_3) + + # Test enabling with existing scheduler profile being updated + dec_4 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "enable_upstream_kubescheduler_user_configuration": True, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_4 = self.models.ManagedCluster( + location="test_location", + scheduler_profile=self.models.SchedulerProfile( + scheduler_instance_profiles=self.models.SchedulerProfileSchedulerInstanceProfiles( + upstream=self.models.SchedulerInstanceProfile( + scheduler_config_mode=self.models.SchedulerConfigMode.DEFAULT + ) + ) + ), + ) + dec_4.context.attach_mc(mc_4) + dec_mc_4 = dec_4.update_upstream_kubescheduler_user_configuration(mc_4) + ground_truth_mc_4 = self.models.ManagedCluster( + location="test_location", + scheduler_profile=self.models.SchedulerProfile( + scheduler_instance_profiles=self.models.SchedulerProfileSchedulerInstanceProfiles( + upstream=self.models.SchedulerInstanceProfile( + scheduler_config_mode=self.models.SchedulerConfigMode.MANAGED_BY_CRD + ) + ) + ), + ) + self.assertEqual(dec_mc_4, ground_truth_mc_4) + + # Test disabling with existing scheduler profile being updated + dec_5 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "disable_upstream_kubescheduler_user_configuration": True, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_5 = self.models.ManagedCluster( + location="test_location", + scheduler_profile=self.models.SchedulerProfile( + scheduler_instance_profiles=self.models.SchedulerProfileSchedulerInstanceProfiles( + upstream=self.models.SchedulerInstanceProfile( + scheduler_config_mode=self.models.SchedulerConfigMode.MANAGED_BY_CRD + ) + ) + ), + ) + dec_5.context.attach_mc(mc_5) + dec_mc_5 = dec_5.update_upstream_kubescheduler_user_configuration(mc_5) + ground_truth_mc_5 = self.models.ManagedCluster( + location="test_location", + scheduler_profile=self.models.SchedulerProfile( + scheduler_instance_profiles=self.models.SchedulerProfileSchedulerInstanceProfiles( + upstream=self.models.SchedulerInstanceProfile( + scheduler_config_mode=self.models.SchedulerConfigMode.DEFAULT + ) + ) + ), + ) + self.assertEqual(dec_mc_5, ground_truth_mc_5) + + def test_update_ingress_profile_gateway_api(self): + # Test enabling Gateway API + dec_1 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + {"enable_gateway_api": True}, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_1 = self.models.ManagedCluster(location="test_location") + dec_1.context.attach_mc(mc_1) + dec_mc_1 = dec_1.update_ingress_profile_gateway_api(mc_1) + + ground_truth_ingress_profile_1 = self.models.ManagedClusterIngressProfile( + gateway_api=self.models.ManagedClusterIngressProfileGatewayConfiguration( + installation=CONST_MANAGED_GATEWAY_INSTALLATION_STANDARD + ) + ) + ground_truth_mc_1 = self.models.ManagedCluster( + location="test_location", ingress_profile=ground_truth_ingress_profile_1 + ) + self.assertEqual(dec_mc_1, ground_truth_mc_1) + + # Test with existing ingress profile and web_app_routing enabled + dec_2 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + {"enable_gateway_api": True}, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_2 = self.models.ManagedCluster( + location="test_location", + ingress_profile=self.models.ManagedClusterIngressProfile( + web_app_routing=self.models.ManagedClusterIngressProfileWebAppRouting(enabled=True) + ) + ) + dec_2.context.attach_mc(mc_2) + dec_mc_2 = dec_2.update_ingress_profile_gateway_api(mc_2) + + ground_truth_ingress_profile_2 = self.models.ManagedClusterIngressProfile( + web_app_routing=self.models.ManagedClusterIngressProfileWebAppRouting(enabled=True), + gateway_api=self.models.ManagedClusterIngressProfileGatewayConfiguration( + installation=CONST_MANAGED_GATEWAY_INSTALLATION_STANDARD + ) + ) + ground_truth_mc_2 = self.models.ManagedCluster( + location="test_location", ingress_profile=ground_truth_ingress_profile_2 + ) + self.assertEqual(dec_mc_2, ground_truth_mc_2) + + # Test disable_gateway_api parameter + dec_3 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + {"disable_gateway_api": True}, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_3 = self.models.ManagedCluster( + location="test_location", + ingress_profile=self.models.ManagedClusterIngressProfile( + gateway_api=self.models.ManagedClusterIngressProfileGatewayConfiguration( + installation=CONST_MANAGED_GATEWAY_INSTALLATION_STANDARD + ) + ) + ) + dec_3.context.attach_mc(mc_3) + dec_mc_3 = dec_3.update_ingress_profile_gateway_api(mc_3) + + ground_truth_ingress_profile_3 = self.models.ManagedClusterIngressProfile( + gateway_api=self.models.ManagedClusterIngressProfileGatewayConfiguration( + installation=CONST_MANAGED_GATEWAY_INSTALLATION_DISABLED + ) + ) + ground_truth_mc_3 = self.models.ManagedCluster( + location="test_location", ingress_profile=ground_truth_ingress_profile_3 + ) + self.assertEqual(dec_mc_3, ground_truth_mc_3) + + # Test mutual exclusion - both enable and disable should raise exception + with self.assertRaises(MutuallyExclusiveArgumentError): + dec = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + {"enable_gateway_api": True, "disable_gateway_api": True}, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc = self.models.ManagedCluster( + location="test_location", + ) + dec.context.attach_mc(mc) + dec_mc = dec.update_ingress_profile_gateway_api(mc) + + # Test without any gateway_api parameters (should not modify anything) + dec_4 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + {}, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_4 = self.models.ManagedCluster(location="test_location") + dec_4.context.attach_mc(mc_4) + dec_mc_4 = dec_4.update_ingress_profile_gateway_api(mc_4) + + ground_truth_mc_4 = self.models.ManagedCluster(location="test_location") + self.assertEqual(dec_mc_4, ground_truth_mc_4) + + # Test without any gateway_api parameters but with existing gateway_api configuration (should not modify) + dec_5 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + {}, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_5 = self.models.ManagedCluster( + location="test_location", + ingress_profile=self.models.ManagedClusterIngressProfile( + gateway_api=self.models.ManagedClusterIngressProfileGatewayConfiguration( + installation=CONST_MANAGED_GATEWAY_INSTALLATION_STANDARD + ) + ) + ) + dec_5.context.attach_mc(mc_5) + dec_mc_5 = dec_5.update_ingress_profile_gateway_api(mc_5) + + # Should remain unchanged + ground_truth_mc_5 = self.models.ManagedCluster( + location="test_location", + ingress_profile=self.models.ManagedClusterIngressProfile( + gateway_api=self.models.ManagedClusterIngressProfileGatewayConfiguration( + installation=CONST_MANAGED_GATEWAY_INSTALLATION_STANDARD + ) + ) + ) + self.assertEqual(dec_mc_5, ground_truth_mc_5) + + def test_update_ingress_profile_app_routing_istio(self): + # Test enabling app routing istio on fresh cluster + dec_1 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + {"enable_app_routing_istio": True}, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_1 = self.models.ManagedCluster(location="test_location") + dec_1.context.attach_mc(mc_1) + dec_mc_1 = dec_1.update_ingress_profile_app_routing_istio(mc_1) + + ground_truth_ingress_profile_1 = self.models.ManagedClusterIngressProfile( + web_app_routing=self.models.ManagedClusterIngressProfileWebAppRouting( + gateway_api_implementations=self.models.ManagedClusterWebAppRoutingGatewayAPIImplementations( + app_routing_istio=self.models.ManagedClusterAppRoutingIstio( + mode=CONST_APP_ROUTING_ISTIO_MODE_ENABLED + ) + ) + ) + ) + ground_truth_mc_1 = self.models.ManagedCluster( + location="test_location", ingress_profile=ground_truth_ingress_profile_1 + ) + self.assertEqual(dec_mc_1, ground_truth_mc_1) + + # Test disabling app routing istio + dec_2 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + {"disable_app_routing_istio": True}, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_2 = self.models.ManagedCluster( + location="test_location", + ingress_profile=self.models.ManagedClusterIngressProfile( + web_app_routing=self.models.ManagedClusterIngressProfileWebAppRouting( + enabled=True, + gateway_api_implementations=self.models.ManagedClusterWebAppRoutingGatewayAPIImplementations( + app_routing_istio=self.models.ManagedClusterAppRoutingIstio( + mode=CONST_APP_ROUTING_ISTIO_MODE_ENABLED + ) + ) + ) + ) + ) + dec_2.context.attach_mc(mc_2) + dec_mc_2 = dec_2.update_ingress_profile_app_routing_istio(mc_2) + + ground_truth_ingress_profile_2 = self.models.ManagedClusterIngressProfile( + web_app_routing=self.models.ManagedClusterIngressProfileWebAppRouting( + enabled=True, + gateway_api_implementations=self.models.ManagedClusterWebAppRoutingGatewayAPIImplementations( + app_routing_istio=self.models.ManagedClusterAppRoutingIstio( + mode=CONST_APP_ROUTING_ISTIO_MODE_DISABLED + ) + ) + ) + ) + ground_truth_mc_2 = self.models.ManagedCluster( + location="test_location", ingress_profile=ground_truth_ingress_profile_2 + ) + self.assertEqual(dec_mc_2, ground_truth_mc_2) + + # Test mutual exclusion - both enable and disable should raise exception + with self.assertRaises(MutuallyExclusiveArgumentError): + dec = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - {"enable_gateway_api": True, "disable_gateway_api": True}, + {"enable_app_routing_istio": True, "disable_app_routing_istio": True}, CUSTOM_MGMT_AKS_PREVIEW, ) - mc = self.models.ManagedCluster( - location="test_location", - ) + mc = self.models.ManagedCluster(location="test_location") dec.context.attach_mc(mc) - dec_mc = dec.update_ingress_profile_gateway_api(mc) + dec.update_ingress_profile_app_routing_istio(mc) - # Test without any gateway_api parameters (should not modify anything) + # Test without any app_routing_istio parameters (should not modify anything) + dec_3 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + {}, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_3 = self.models.ManagedCluster(location="test_location") + dec_3.context.attach_mc(mc_3) + dec_mc_3 = dec_3.update_ingress_profile_app_routing_istio(mc_3) + + ground_truth_mc_3 = self.models.ManagedCluster(location="test_location") + self.assertEqual(dec_mc_3, ground_truth_mc_3) + + # Test without any app_routing_istio parameters but with existing config (should not modify) dec_4 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, {}, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_4 = self.models.ManagedCluster(location="test_location") + mc_4 = self.models.ManagedCluster( + location="test_location", + ingress_profile=self.models.ManagedClusterIngressProfile( + web_app_routing=self.models.ManagedClusterIngressProfileWebAppRouting( + enabled=True, + gateway_api_implementations=self.models.ManagedClusterWebAppRoutingGatewayAPIImplementations( + app_routing_istio=self.models.ManagedClusterAppRoutingIstio( + mode=CONST_APP_ROUTING_ISTIO_MODE_ENABLED + ) + ) + ) + ) + ) dec_4.context.attach_mc(mc_4) - dec_mc_4 = dec_4.update_ingress_profile_gateway_api(mc_4) + dec_mc_4 = dec_4.update_ingress_profile_app_routing_istio(mc_4) + + # Should remain unchanged + ground_truth_mc_4 = self.models.ManagedCluster( + location="test_location", + ingress_profile=self.models.ManagedClusterIngressProfile( + web_app_routing=self.models.ManagedClusterIngressProfileWebAppRouting( + enabled=True, + gateway_api_implementations=self.models.ManagedClusterWebAppRoutingGatewayAPIImplementations( + app_routing_istio=self.models.ManagedClusterAppRoutingIstio( + mode=CONST_APP_ROUTING_ISTIO_MODE_ENABLED + ) + ) + ) + ) + ) + self.assertEqual(dec_mc_4, ground_truth_mc_4) + + def test_update_azure_monitor_profile_with_opentelemetry_metrics(self): + # Test enabling OpenTelemetry metrics on update + dec_1 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "enable_opentelemetry_metrics": True, + "opentelemetry_metrics_port": 8080, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + + # Mock existing cluster with Azure Monitor metrics enabled + mc_1 = self.models.ManagedCluster( + location="test_location", + azure_monitor_profile=self.models.ManagedClusterAzureMonitorProfile( + metrics=self.models.ManagedClusterAzureMonitorProfileMetrics( + enabled=True + ) + ) + ) + dec_1.context.attach_mc(mc_1) + + # Mock authentication-related functions + with patch('azext_aks_preview.managed_cluster_decorator.ensure_azure_monitor_profile_prerequisites'), \ + patch.object(dec_1.context, 'get_subscription_id', return_value='test-subscription'), \ + patch.object(dec_1.context, 'get_resource_group_name', return_value='test-rg'), \ + patch.object(dec_1.context, 'get_name', return_value='test-cluster'), \ + patch.object(dec_1.context, 'get_location', return_value='test-location'): + dec_mc_1 = dec_1.update_azure_monitor_profile(mc_1) + + # Verify OpenTelemetry metrics configuration is updated + self.assertIsNotNone(dec_mc_1.azure_monitor_profile.app_monitoring) + self.assertIsNotNone(dec_mc_1.azure_monitor_profile.app_monitoring.open_telemetry_metrics) + self.assertTrue(dec_mc_1.azure_monitor_profile.app_monitoring.open_telemetry_metrics.enabled) + self.assertEqual(dec_mc_1.azure_monitor_profile.app_monitoring.open_telemetry_metrics.port, 8080) + + # Test disabling OpenTelemetry metrics on update + dec_2 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "disable_opentelemetry_metrics": True, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + + # Mock existing cluster with OpenTelemetry metrics enabled + mc_2 = self.models.ManagedCluster( + location="test_location", + azure_monitor_profile=self.models.ManagedClusterAzureMonitorProfile( + metrics=self.models.ManagedClusterAzureMonitorProfileMetrics( + enabled=True + ), + app_monitoring=self.models.ManagedClusterAzureMonitorProfileAppMonitoring( + open_telemetry_metrics=self.models.ManagedClusterAzureMonitorProfileAppMonitoringOpenTelemetryMetrics( + enabled=True, + port=8080 + ) + ) + ) + ) + dec_2.context.attach_mc(mc_2) + + # Mock authentication-related functions for second test + with patch('azext_aks_preview.managed_cluster_decorator.ensure_azure_monitor_profile_prerequisites'), \ + patch.object(dec_2.context, 'get_subscription_id', return_value='test-subscription'), \ + patch.object(dec_2.context, 'get_resource_group_name', return_value='test-rg'), \ + patch.object(dec_2.context, 'get_name', return_value='test-cluster'), \ + patch.object(dec_2.context, 'get_location', return_value='test-location'): + dec_mc_2 = dec_2.update_azure_monitor_profile(mc_2) + + # Verify OpenTelemetry metrics is disabled + self.assertIsNotNone(dec_mc_2.azure_monitor_profile.app_monitoring.open_telemetry_metrics) + self.assertFalse(dec_mc_2.azure_monitor_profile.app_monitoring.open_telemetry_metrics.enabled) + self.assertIsNone(dec_mc_2.azure_monitor_profile.app_monitoring.open_telemetry_metrics.port) + + # Test standalone port update for OpenTelemetry metrics (without enable/disable flags) + dec_3 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "opentelemetry_metrics_port": 9090, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + + # Mock existing cluster with OpenTelemetry metrics already enabled + mc_3 = self.models.ManagedCluster( + location="test_location", + azure_monitor_profile=self.models.ManagedClusterAzureMonitorProfile( + metrics=self.models.ManagedClusterAzureMonitorProfileMetrics( + enabled=True + ), + app_monitoring=self.models.ManagedClusterAzureMonitorProfileAppMonitoring( + open_telemetry_metrics=self.models.ManagedClusterAzureMonitorProfileAppMonitoringOpenTelemetryMetrics( + enabled=True, + port=8080 # Original port + ) + ) + ) + ) + dec_3.context.attach_mc(mc_3) + # Mock authentication-related functions for third test + with patch('azext_aks_preview.managed_cluster_decorator.ensure_azure_monitor_profile_prerequisites'), \ + patch.object(dec_3.context, 'get_subscription_id', return_value='test-subscription'), \ + patch.object(dec_3.context, 'get_resource_group_name', return_value='test-rg'), \ + patch.object(dec_3.context, 'get_name', return_value='test-cluster'), \ + patch.object(dec_3.context, 'get_location', return_value='test-location'): + dec_mc_3 = dec_3.update_azure_monitor_profile(mc_3) + + # Verify OpenTelemetry metrics port is updated while remaining enabled + self.assertIsNotNone(dec_mc_3.azure_monitor_profile.app_monitoring.open_telemetry_metrics) + self.assertTrue(dec_mc_3.azure_monitor_profile.app_monitoring.open_telemetry_metrics.enabled) + self.assertEqual(dec_mc_3.azure_monitor_profile.app_monitoring.open_telemetry_metrics.port, 9090) + + def test_update_azure_monitor_profile_with_opentelemetry_logs(self): + # Test enabling OpenTelemetry logs on update + dec_1 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "enable_opentelemetry_logs": True, + "opentelemetry_logs_port": 8081, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + + # Mock existing cluster with Azure Monitor logs enabled (monitoring addon enabled) + addon_profiles = { + "omsagent": self.models.ManagedClusterAddonProfile( + enabled=True, + config={ + "logAnalyticsWorkspaceResourceID": "/subscriptions/test/resourceGroups/test/providers/Microsoft.OperationalInsights/workspaces/test-workspace" + } + ) + } + mc_1 = self.models.ManagedCluster( + location="test_location", + addon_profiles=addon_profiles, + azure_monitor_profile=self.models.ManagedClusterAzureMonitorProfile( + container_insights=self.models.ManagedClusterAzureMonitorProfileContainerInsights( + enabled=True + ) + ) + ) + dec_1.context.attach_mc(mc_1) + + # Mock authentication-related functions + with patch('azext_aks_preview.managed_cluster_decorator.ensure_azure_monitor_profile_prerequisites'), \ + patch.object(dec_1.context, 'get_subscription_id', return_value='test-subscription'), \ + patch.object(dec_1.context, 'get_resource_group_name', return_value='test-rg'), \ + patch.object(dec_1.context, 'get_name', return_value='test-cluster'), \ + patch.object(dec_1.context, 'get_location', return_value='test-location'): + dec_mc_1 = dec_1.update_azure_monitor_profile(mc_1) + + # Verify OpenTelemetry logs configuration is updated + self.assertIsNotNone(dec_mc_1.azure_monitor_profile.app_monitoring) + self.assertIsNotNone(dec_mc_1.azure_monitor_profile.app_monitoring.open_telemetry_logs) + self.assertTrue(dec_mc_1.azure_monitor_profile.app_monitoring.open_telemetry_logs.enabled) + self.assertEqual(dec_mc_1.azure_monitor_profile.app_monitoring.open_telemetry_logs.port, 8081) + + # Test disabling OpenTelemetry logs on update + dec_2 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "disable_opentelemetry_logs": True, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + + # Mock existing cluster with OpenTelemetry logs enabled + mc_2 = self.models.ManagedCluster( + location="test_location", + addon_profiles=addon_profiles, + azure_monitor_profile=self.models.ManagedClusterAzureMonitorProfile( + container_insights=self.models.ManagedClusterAzureMonitorProfileContainerInsights( + enabled=True + ), + app_monitoring=self.models.ManagedClusterAzureMonitorProfileAppMonitoring( + open_telemetry_logs=self.models.ManagedClusterAzureMonitorProfileAppMonitoringOpenTelemetryLogs( + enabled=True, + port=8081 + ) + ) + ) + ) + dec_2.context.attach_mc(mc_2) + + # Mock authentication-related functions for second test + with patch('azext_aks_preview.managed_cluster_decorator.ensure_azure_monitor_profile_prerequisites'), \ + patch.object(dec_2.context, 'get_subscription_id', return_value='test-subscription'), \ + patch.object(dec_2.context, 'get_resource_group_name', return_value='test-rg'), \ + patch.object(dec_2.context, 'get_name', return_value='test-cluster'), \ + patch.object(dec_2.context, 'get_location', return_value='test-location'): + dec_mc_2 = dec_2.update_azure_monitor_profile(mc_2) - ground_truth_mc_4 = self.models.ManagedCluster(location="test_location") - self.assertEqual(dec_mc_4, ground_truth_mc_4) + # Verify OpenTelemetry logs is disabled + self.assertIsNotNone(dec_mc_2.azure_monitor_profile.app_monitoring.open_telemetry_logs) + self.assertFalse(dec_mc_2.azure_monitor_profile.app_monitoring.open_telemetry_logs.enabled) + self.assertIsNone(dec_mc_2.azure_monitor_profile.app_monitoring.open_telemetry_logs.port) - # Test without any gateway_api parameters but with existing gateway_api configuration (should not modify) - dec_5 = AKSPreviewManagedClusterUpdateDecorator( + # Test standalone port update for OpenTelemetry logs (without enable/disable flags) + dec_3 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - {}, + { + "opentelemetry_logs_port": 9091, + }, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_5 = self.models.ManagedCluster( - location="test_location", - ingress_profile=self.models.ManagedClusterIngressProfile( - gateway_api=self.models.ManagedClusterIngressProfileGatewayConfiguration( - installation=CONST_MANAGED_GATEWAY_INSTALLATION_STANDARD - ) - ) - ) - dec_5.context.attach_mc(mc_5) - dec_mc_5 = dec_5.update_ingress_profile_gateway_api(mc_5) - # Should remain unchanged - ground_truth_mc_5 = self.models.ManagedCluster( + # Mock existing cluster with OpenTelemetry logs already enabled + mc_3 = self.models.ManagedCluster( location="test_location", - ingress_profile=self.models.ManagedClusterIngressProfile( - gateway_api=self.models.ManagedClusterIngressProfileGatewayConfiguration( - installation=CONST_MANAGED_GATEWAY_INSTALLATION_STANDARD + addon_profiles=addon_profiles, + azure_monitor_profile=self.models.ManagedClusterAzureMonitorProfile( + container_insights=self.models.ManagedClusterAzureMonitorProfileContainerInsights( + enabled=True + ), + app_monitoring=self.models.ManagedClusterAzureMonitorProfileAppMonitoring( + open_telemetry_logs=self.models.ManagedClusterAzureMonitorProfileAppMonitoringOpenTelemetryLogs( + enabled=True, + port=8081 # Original port + ) ) ) ) - self.assertEqual(dec_mc_5, ground_truth_mc_5) + dec_3.context.attach_mc(mc_3) - def test_update_ingress_profile_app_routing_istio(self): - # Test enabling app routing istio on fresh cluster - dec_1 = AKSPreviewManagedClusterUpdateDecorator( + # Mock authentication-related functions for third test + with patch('azext_aks_preview.managed_cluster_decorator.ensure_azure_monitor_profile_prerequisites'), \ + patch.object(dec_3.context, 'get_subscription_id', return_value='test-subscription'), \ + patch.object(dec_3.context, 'get_resource_group_name', return_value='test-rg'), \ + patch.object(dec_3.context, 'get_name', return_value='test-cluster'), \ + patch.object(dec_3.context, 'get_location', return_value='test-location'): + dec_mc_3 = dec_3.update_azure_monitor_profile(mc_3) + + # Verify OpenTelemetry logs port is updated while remaining enabled + self.assertIsNotNone(dec_mc_3.azure_monitor_profile.app_monitoring.open_telemetry_logs) + self.assertTrue(dec_mc_3.azure_monitor_profile.app_monitoring.open_telemetry_logs.enabled) + self.assertEqual(dec_mc_3.azure_monitor_profile.app_monitoring.open_telemetry_logs.port, 9091) + + def test_disable_azure_monitor_app_monitoring_preserves_opentelemetry(self): + # Test that disabling Azure Monitor app monitoring preserves existing OpenTelemetry configuration + dec = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - {"enable_app_routing_istio": True}, + { + "disable_azure_monitor_app_monitoring": True, + }, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_1 = self.models.ManagedCluster(location="test_location") - dec_1.context.attach_mc(mc_1) - dec_mc_1 = dec_1.update_ingress_profile_app_routing_istio(mc_1) - ground_truth_ingress_profile_1 = self.models.ManagedClusterIngressProfile( - web_app_routing=self.models.ManagedClusterIngressProfileWebAppRouting( - gateway_api_implementations=self.models.ManagedClusterWebAppRoutingGatewayAPIImplementations( - app_routing_istio=self.models.ManagedClusterAppRoutingIstio( - mode=CONST_APP_ROUTING_ISTIO_MODE_ENABLED + # Mock existing cluster with both Azure Monitor app monitoring and OpenTelemetry metrics enabled + mc = self.models.ManagedCluster( + location="test_location", + azure_monitor_profile=self.models.ManagedClusterAzureMonitorProfile( + app_monitoring=self.models.ManagedClusterAzureMonitorProfileAppMonitoring( + auto_instrumentation=self.models.ManagedClusterAzureMonitorProfileAppMonitoringAutoInstrumentation( + enabled=True + ), + open_telemetry_metrics=self.models.ManagedClusterAzureMonitorProfileAppMonitoringOpenTelemetryMetrics( + enabled=True, + port=8080 ) ) ) ) - ground_truth_mc_1 = self.models.ManagedCluster( - location="test_location", ingress_profile=ground_truth_ingress_profile_1 - ) - self.assertEqual(dec_mc_1, ground_truth_mc_1) + dec.context.attach_mc(mc) - # Test disabling app routing istio - dec_2 = AKSPreviewManagedClusterUpdateDecorator( + # Mock authentication-related functions + with patch('azext_aks_preview.managed_cluster_decorator.ensure_azure_monitor_profile_prerequisites'), \ + patch.object(dec.context, 'get_subscription_id', return_value='test-subscription'), \ + patch.object(dec.context, 'get_resource_group_name', return_value='test-rg'), \ + patch.object(dec.context, 'get_name', return_value='test-cluster'), \ + patch.object(dec.context, 'get_location', return_value='test-location'): + dec_mc = dec.update_azure_monitor_profile(mc) + + # Verify Azure Monitor app monitoring auto instrumentation is disabled + self.assertIsNotNone(dec_mc.azure_monitor_profile.app_monitoring.auto_instrumentation) + self.assertFalse(dec_mc.azure_monitor_profile.app_monitoring.auto_instrumentation.enabled) + + # Verify OpenTelemetry metrics configuration is preserved + self.assertIsNotNone(dec_mc.azure_monitor_profile.app_monitoring.open_telemetry_metrics) + self.assertTrue(dec_mc.azure_monitor_profile.app_monitoring.open_telemetry_metrics.enabled) + self.assertEqual(dec_mc.azure_monitor_profile.app_monitoring.open_telemetry_metrics.port, 8080) + + def test_enable_azure_monitor_app_monitoring_preserves_opentelemetry(self): + # Test that enabling Azure Monitor app monitoring preserves existing OpenTelemetry configuration + dec = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - {"disable_app_routing_istio": True}, + { + "enable_azure_monitor_app_monitoring": True, + }, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_2 = self.models.ManagedCluster( + + # Mock existing cluster with OpenTelemetry logs enabled but Azure Monitor app monitoring disabled + mc = self.models.ManagedCluster( location="test_location", - ingress_profile=self.models.ManagedClusterIngressProfile( - web_app_routing=self.models.ManagedClusterIngressProfileWebAppRouting( - enabled=True, - gateway_api_implementations=self.models.ManagedClusterWebAppRoutingGatewayAPIImplementations( - app_routing_istio=self.models.ManagedClusterAppRoutingIstio( - mode=CONST_APP_ROUTING_ISTIO_MODE_ENABLED - ) + azure_monitor_profile=self.models.ManagedClusterAzureMonitorProfile( + app_monitoring=self.models.ManagedClusterAzureMonitorProfileAppMonitoring( + auto_instrumentation=self.models.ManagedClusterAzureMonitorProfileAppMonitoringAutoInstrumentation( + enabled=False + ), + open_telemetry_logs=self.models.ManagedClusterAzureMonitorProfileAppMonitoringOpenTelemetryLogs( + enabled=True, + port=8081 ) ) ) ) - dec_2.context.attach_mc(mc_2) - dec_mc_2 = dec_2.update_ingress_profile_app_routing_istio(mc_2) + dec.context.attach_mc(mc) - ground_truth_ingress_profile_2 = self.models.ManagedClusterIngressProfile( - web_app_routing=self.models.ManagedClusterIngressProfileWebAppRouting( - enabled=True, - gateway_api_implementations=self.models.ManagedClusterWebAppRoutingGatewayAPIImplementations( - app_routing_istio=self.models.ManagedClusterAppRoutingIstio( - mode=CONST_APP_ROUTING_ISTIO_MODE_DISABLED - ) - ) - ) + # Mock authentication-related functions + with patch('azext_aks_preview.managed_cluster_decorator.ensure_azure_monitor_profile_prerequisites'), \ + patch.object(dec.context, 'get_subscription_id', return_value='test-subscription'), \ + patch.object(dec.context, 'get_resource_group_name', return_value='test-rg'), \ + patch.object(dec.context, 'get_name', return_value='test-cluster'), \ + patch.object(dec.context, 'get_location', return_value='test-location'): + dec_mc = dec.update_azure_monitor_profile(mc) + + # Verify Azure Monitor app monitoring auto instrumentation is enabled + self.assertIsNotNone(dec_mc.azure_monitor_profile.app_monitoring.auto_instrumentation) + self.assertTrue(dec_mc.azure_monitor_profile.app_monitoring.auto_instrumentation.enabled) + + # Verify OpenTelemetry logs configuration is preserved + self.assertIsNotNone(dec_mc.azure_monitor_profile.app_monitoring.open_telemetry_logs) + self.assertTrue(dec_mc.azure_monitor_profile.app_monitoring.open_telemetry_logs.enabled) + self.assertEqual(dec_mc.azure_monitor_profile.app_monitoring.open_telemetry_logs.port, 8081) + + def test_azure_keyvault_kms_network_access_parameter_fix(self): + """Test that azure_keyvault_kms_key_vault_network_access parameter is correctly passed through. + + This test verifies the fix for the issue where --azure-keyvault-kms-key-vault-network-access + was always being set to "Public" regardless of user input. + """ + key_id = "https://fakekeyvault.vault.azure.net/secrets/fakekeyname/fakekeyversion" + + # Test CREATE scenario with Private network access + dec_create_private = AKSPreviewManagedClusterCreateDecorator( + self.cmd, + self.client, + { + "enable_azure_keyvault_kms": True, + "azure_keyvault_kms_key_id": key_id, + "azure_keyvault_kms_key_vault_network_access": "Private", + "azure_keyvault_kms_key_vault_resource_id": "/subscriptions/test/resourceGroups/test/providers/Microsoft.KeyVault/vaults/test", + }, + CUSTOM_MGMT_AKS_PREVIEW, ) - ground_truth_mc_2 = self.models.ManagedCluster( - location="test_location", ingress_profile=ground_truth_ingress_profile_2 + mc_create_private = self.models.ManagedCluster(location="test_location") + dec_create_private.context.attach_mc(mc_create_private) + result_create_private = dec_create_private.set_up_azure_keyvault_kms(mc_create_private) + + # Verify Private network access is correctly set during CREATE + self.assertEqual( + result_create_private.security_profile.azure_key_vault_kms.key_vault_network_access, + "Private" ) - self.assertEqual(dec_mc_2, ground_truth_mc_2) - # Test mutual exclusion - both enable and disable should raise exception - with self.assertRaises(MutuallyExclusiveArgumentError): - dec = AKSPreviewManagedClusterUpdateDecorator( - self.cmd, - self.client, - {"enable_app_routing_istio": True, "disable_app_routing_istio": True}, - CUSTOM_MGMT_AKS_PREVIEW, - ) - mc = self.models.ManagedCluster(location="test_location") - dec.context.attach_mc(mc) - dec.update_ingress_profile_app_routing_istio(mc) + # Test CREATE scenario with Public network access + dec_create_public = AKSPreviewManagedClusterCreateDecorator( + self.cmd, + self.client, + { + "enable_azure_keyvault_kms": True, + "azure_keyvault_kms_key_id": key_id, + "azure_keyvault_kms_key_vault_network_access": "Public", + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_create_public = self.models.ManagedCluster(location="test_location") + dec_create_public.context.attach_mc(mc_create_public) + result_create_public = dec_create_public.set_up_azure_keyvault_kms(mc_create_public) - # Test without any app_routing_istio parameters (should not modify anything) - dec_3 = AKSPreviewManagedClusterUpdateDecorator( + # Verify Public network access is correctly set during CREATE + self.assertEqual( + result_create_public.security_profile.azure_key_vault_kms.key_vault_network_access, + "Public" + ) + + # Test UPDATE scenario - changing from Public to Private + dec_update_to_private = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - {}, + { + "enable_azure_keyvault_kms": True, + "azure_keyvault_kms_key_id": key_id, + "azure_keyvault_kms_key_vault_network_access": "Private", + "azure_keyvault_kms_key_vault_resource_id": "/subscriptions/test/resourceGroups/test/providers/Microsoft.KeyVault/vaults/test", + }, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_3 = self.models.ManagedCluster(location="test_location") - dec_3.context.attach_mc(mc_3) - dec_mc_3 = dec_3.update_ingress_profile_app_routing_istio(mc_3) - ground_truth_mc_3 = self.models.ManagedCluster(location="test_location") - self.assertEqual(dec_mc_3, ground_truth_mc_3) + # Start with existing cluster that has Public access + existing_kms_profile = self.models.AzureKeyVaultKms( + enabled=True, + key_id=key_id, + key_vault_network_access="Public", + ) + existing_security_profile = self.models.ManagedClusterSecurityProfile( + azure_key_vault_kms=existing_kms_profile + ) + mc_update_to_private = self.models.ManagedCluster( + location="test_location", + security_profile=existing_security_profile, + ) + dec_update_to_private.context.attach_mc(mc_update_to_private) + result_update_to_private = dec_update_to_private.update_azure_keyvault_kms(mc_update_to_private) + + # Verify network access was updated from Public to Private + self.assertEqual( + result_update_to_private.security_profile.azure_key_vault_kms.key_vault_network_access, + "Private" + ) - # Test without any app_routing_istio parameters but with existing config (should not modify) - dec_4 = AKSPreviewManagedClusterUpdateDecorator( + # Test UPDATE scenario - changing from Private to Public + dec_update_to_public = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - {}, + { + "enable_azure_keyvault_kms": True, + "azure_keyvault_kms_key_id": key_id, + "azure_keyvault_kms_key_vault_network_access": "Public", + }, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_4 = self.models.ManagedCluster( + + # Start with existing cluster that has Private access + existing_kms_profile_private = self.models.AzureKeyVaultKms( + enabled=True, + key_id=key_id, + key_vault_network_access="Private", + key_vault_resource_id="/subscriptions/test/resourceGroups/test/providers/Microsoft.KeyVault/vaults/test", + ) + existing_security_profile_private = self.models.ManagedClusterSecurityProfile( + azure_key_vault_kms=existing_kms_profile_private + ) + mc_update_to_public = self.models.ManagedCluster( location="test_location", - ingress_profile=self.models.ManagedClusterIngressProfile( - web_app_routing=self.models.ManagedClusterIngressProfileWebAppRouting( - enabled=True, - gateway_api_implementations=self.models.ManagedClusterWebAppRoutingGatewayAPIImplementations( - app_routing_istio=self.models.ManagedClusterAppRoutingIstio( - mode=CONST_APP_ROUTING_ISTIO_MODE_ENABLED - ) - ) - ) - ) + security_profile=existing_security_profile_private, ) - dec_4.context.attach_mc(mc_4) - dec_mc_4 = dec_4.update_ingress_profile_app_routing_istio(mc_4) + dec_update_to_public.context.attach_mc(mc_update_to_public) + result_update_to_public = dec_update_to_public.update_azure_keyvault_kms(mc_update_to_public) - # Should remain unchanged - ground_truth_mc_4 = self.models.ManagedCluster( - location="test_location", - ingress_profile=self.models.ManagedClusterIngressProfile( - web_app_routing=self.models.ManagedClusterIngressProfileWebAppRouting( - enabled=True, - gateway_api_implementations=self.models.ManagedClusterWebAppRoutingGatewayAPIImplementations( - app_routing_istio=self.models.ManagedClusterAppRoutingIstio( - mode=CONST_APP_ROUTING_ISTIO_MODE_ENABLED - ) - ) - ) - ) + # Verify network access was updated from Private to Public + self.assertEqual( + result_update_to_public.security_profile.azure_key_vault_kms.key_vault_network_access, + "Public" ) - self.assertEqual(dec_mc_4, ground_truth_mc_4) - def test_update_azure_monitor_profile_with_opentelemetry_metrics(self): - # Test enabling OpenTelemetry metrics on update + def test_update_agentpool_profile_with_none_agent_pool_profiles(self): + """Test update_agentpool_profile handles None agent_pool_profiles with hosted system components""" + # Test case 1: None agent_pool_profiles with hosted system components enabled (should succeed) dec_1 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - { - "enable_opentelemetry_metrics": True, - "opentelemetry_metrics_port": 8080, - }, + {}, CUSTOM_MGMT_AKS_PREVIEW, ) - # Mock existing cluster with Azure Monitor metrics enabled + # Create a managed cluster with None agent_pool_profiles but hosted system components enabled + hosted_system_profile = self.models.ManagedClusterHostedSystemProfile(enabled=True) mc_1 = self.models.ManagedCluster( location="test_location", - azure_monitor_profile=self.models.ManagedClusterAzureMonitorProfile( - metrics=self.models.ManagedClusterAzureMonitorProfileMetrics( - enabled=True - ) - ) + agent_pool_profiles=None, # This is the key scenario + hosted_system_profile=hosted_system_profile ) dec_1.context.attach_mc(mc_1) - # Mock authentication-related functions - with patch('azext_aks_preview.managed_cluster_decorator.ensure_azure_monitor_profile_prerequisites'), \ - patch.object(dec_1.context, 'get_subscription_id', return_value='test-subscription'), \ - patch.object(dec_1.context, 'get_resource_group_name', return_value='test-rg'), \ - patch.object(dec_1.context, 'get_name', return_value='test-cluster'), \ - patch.object(dec_1.context, 'get_location', return_value='test-location'): - dec_mc_1 = dec_1.update_azure_monitor_profile(mc_1) - - # Verify OpenTelemetry metrics configuration is updated - self.assertIsNotNone(dec_mc_1.azure_monitor_profile.app_monitoring) - self.assertIsNotNone(dec_mc_1.azure_monitor_profile.app_monitoring.open_telemetry_metrics) - self.assertTrue(dec_mc_1.azure_monitor_profile.app_monitoring.open_telemetry_metrics.enabled) - self.assertEqual(dec_mc_1.azure_monitor_profile.app_monitoring.open_telemetry_metrics.port, 8080) + # Should return the MC unchanged without raising an error + result_1 = dec_1.update_agentpool_profile(mc_1) + self.assertEqual(result_1, mc_1) + self.assertIsNone(result_1.agent_pool_profiles) + self.assertTrue(result_1.hosted_system_profile.enabled) - # Test disabling OpenTelemetry metrics on update + def test_update_agentpool_profile_with_none_agent_pool_profiles_no_hosted_system(self): + """Test update_agentpool_profile raises UnknownError for None agent_pool_profiles without hosted system components""" + # Test case 2: None agent_pool_profiles without hosted system components (should raise UnknownError) dec_2 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - { - "disable_opentelemetry_metrics": True, - }, + {}, CUSTOM_MGMT_AKS_PREVIEW, ) - # Mock existing cluster with OpenTelemetry metrics enabled + # Create a managed cluster with None agent_pool_profiles and no hosted system components mc_2 = self.models.ManagedCluster( location="test_location", - azure_monitor_profile=self.models.ManagedClusterAzureMonitorProfile( - metrics=self.models.ManagedClusterAzureMonitorProfileMetrics( - enabled=True - ), - app_monitoring=self.models.ManagedClusterAzureMonitorProfileAppMonitoring( - open_telemetry_metrics=self.models.ManagedClusterAzureMonitorProfileAppMonitoringOpenTelemetryMetrics( - enabled=True, - port=8080 - ) - ) - ) + agent_pool_profiles=None, # This is the key scenario + hosted_system_profile=None ) dec_2.context.attach_mc(mc_2) - # Mock authentication-related functions for second test - with patch('azext_aks_preview.managed_cluster_decorator.ensure_azure_monitor_profile_prerequisites'), \ - patch.object(dec_2.context, 'get_subscription_id', return_value='test-subscription'), \ - patch.object(dec_2.context, 'get_resource_group_name', return_value='test-rg'), \ - patch.object(dec_2.context, 'get_name', return_value='test-cluster'), \ - patch.object(dec_2.context, 'get_location', return_value='test-location'): - dec_mc_2 = dec_2.update_azure_monitor_profile(mc_2) - - # Verify OpenTelemetry metrics is disabled - self.assertIsNotNone(dec_mc_2.azure_monitor_profile.app_monitoring.open_telemetry_metrics) - self.assertFalse(dec_mc_2.azure_monitor_profile.app_monitoring.open_telemetry_metrics.enabled) - self.assertIsNone(dec_mc_2.azure_monitor_profile.app_monitoring.open_telemetry_metrics.port) + # Should raise UnknownError + with self.assertRaises(UnknownError): + dec_2.update_agentpool_profile(mc_2) - # Test standalone port update for OpenTelemetry metrics (without enable/disable flags) + def test_update_agentpool_profile_with_none_agent_pool_profiles_hosted_system_disabled(self): + """Test update_agentpool_profile raises UnknownError for None agent_pool_profiles with hosted system components disabled""" + # Test case 3: None agent_pool_profiles with hosted system components disabled (should raise UnknownError) dec_3 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - { - "opentelemetry_metrics_port": 9090, - }, + {}, CUSTOM_MGMT_AKS_PREVIEW, ) - # Mock existing cluster with OpenTelemetry metrics already enabled + # Create a managed cluster with None agent_pool_profiles and hosted system components disabled + hosted_system_profile_disabled = self.models.ManagedClusterHostedSystemProfile(enabled=False) mc_3 = self.models.ManagedCluster( location="test_location", - azure_monitor_profile=self.models.ManagedClusterAzureMonitorProfile( - metrics=self.models.ManagedClusterAzureMonitorProfileMetrics( - enabled=True - ), - app_monitoring=self.models.ManagedClusterAzureMonitorProfileAppMonitoring( - open_telemetry_metrics=self.models.ManagedClusterAzureMonitorProfileAppMonitoringOpenTelemetryMetrics( - enabled=True, - port=8080 # Original port - ) - ) - ) + agent_pool_profiles=None, # This is the key scenario + hosted_system_profile=hosted_system_profile_disabled ) dec_3.context.attach_mc(mc_3) - # Mock authentication-related functions for third test - with patch('azext_aks_preview.managed_cluster_decorator.ensure_azure_monitor_profile_prerequisites'), \ - patch.object(dec_3.context, 'get_subscription_id', return_value='test-subscription'), \ - patch.object(dec_3.context, 'get_resource_group_name', return_value='test-rg'), \ - patch.object(dec_3.context, 'get_name', return_value='test-cluster'), \ - patch.object(dec_3.context, 'get_location', return_value='test-location'): - dec_mc_3 = dec_3.update_azure_monitor_profile(mc_3) - # Verify OpenTelemetry metrics port is updated while remaining enabled - self.assertIsNotNone(dec_mc_3.azure_monitor_profile.app_monitoring.open_telemetry_metrics) - self.assertTrue(dec_mc_3.azure_monitor_profile.app_monitoring.open_telemetry_metrics.enabled) - self.assertEqual(dec_mc_3.azure_monitor_profile.app_monitoring.open_telemetry_metrics.port, 9090) + # Should raise UnknownError + with self.assertRaises(UnknownError): + dec_3.update_agentpool_profile(mc_3) - def test_update_azure_monitor_profile_with_opentelemetry_logs(self): - # Test enabling OpenTelemetry logs on update - dec_1 = AKSPreviewManagedClusterUpdateDecorator( + def test_update_agentpool_profile_with_empty_agent_pool_profiles(self): + """Test update_agentpool_profile raises UnknownError for empty agent_pool_profiles list""" + # Test case 4: Empty agent_pool_profiles list (should raise UnknownError) + dec_4 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - { - "enable_opentelemetry_logs": True, - "opentelemetry_logs_port": 8081, - }, + {}, CUSTOM_MGMT_AKS_PREVIEW, ) - # Mock existing cluster with Azure Monitor logs enabled (monitoring addon enabled) - addon_profiles = { - "omsagent": self.models.ManagedClusterAddonProfile( - enabled=True, - config={ - "logAnalyticsWorkspaceResourceID": "/subscriptions/test/resourceGroups/test/providers/Microsoft.OperationalInsights/workspaces/test-workspace" - } - ) - } - mc_1 = self.models.ManagedCluster( + # Create a managed cluster with empty agent_pool_profiles + mc_4 = self.models.ManagedCluster( location="test_location", - addon_profiles=addon_profiles, - azure_monitor_profile=self.models.ManagedClusterAzureMonitorProfile( - container_insights=self.models.ManagedClusterAzureMonitorProfileContainerInsights( - enabled=True - ) - ) + agent_pool_profiles=[], # Empty list scenario + hosted_system_profile=None ) - dec_1.context.attach_mc(mc_1) + dec_4.context.attach_mc(mc_4) - # Mock authentication-related functions - with patch('azext_aks_preview.managed_cluster_decorator.ensure_azure_monitor_profile_prerequisites'), \ - patch.object(dec_1.context, 'get_subscription_id', return_value='test-subscription'), \ - patch.object(dec_1.context, 'get_resource_group_name', return_value='test-rg'), \ - patch.object(dec_1.context, 'get_name', return_value='test-cluster'), \ - patch.object(dec_1.context, 'get_location', return_value='test-location'): - dec_mc_1 = dec_1.update_azure_monitor_profile(mc_1) + # Should raise UnknownError + with self.assertRaises(UnknownError): + dec_4.update_agentpool_profile(mc_4) - # Verify OpenTelemetry logs configuration is updated - self.assertIsNotNone(dec_mc_1.azure_monitor_profile.app_monitoring) - self.assertIsNotNone(dec_mc_1.azure_monitor_profile.app_monitoring.open_telemetry_logs) - self.assertTrue(dec_mc_1.azure_monitor_profile.app_monitoring.open_telemetry_logs.enabled) - self.assertEqual(dec_mc_1.azure_monitor_profile.app_monitoring.open_telemetry_logs.port, 8081) + def test_update_health_monitor_profile(self): + # no flags - no change + dec_0 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + {}, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_0 = self.models.ManagedCluster(location="test_location") + dec_0.context.attach_mc(mc_0) + dec_mc_0 = dec_0.update_health_monitor_profile(mc_0) + ground_truth_mc_0 = self.models.ManagedCluster(location="test_location") + self.assertEqual(dec_mc_0, ground_truth_mc_0) + + # both flags - mutual exclusivity error + dec_1 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "enable_continuous_control_plane_and_addon_monitor": True, + "disable_continuous_control_plane_and_addon_monitor": True, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_1 = self.models.ManagedCluster(location="test_location") + dec_1.context.attach_mc(mc_1) + with self.assertRaises(MutuallyExclusiveArgumentError): + dec_1.update_health_monitor_profile(mc_1) - # Test disabling OpenTelemetry logs on update + # enable flag dec_2 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "disable_opentelemetry_logs": True, + "enable_continuous_control_plane_and_addon_monitor": True, }, CUSTOM_MGMT_AKS_PREVIEW, ) - - # Mock existing cluster with OpenTelemetry logs enabled - mc_2 = self.models.ManagedCluster( + mc_2 = self.models.ManagedCluster(location="test_location") + dec_2.context.attach_mc(mc_2) + dec_mc_2 = dec_2.update_health_monitor_profile(mc_2) + ground_truth_mc_2 = self.models.ManagedCluster( location="test_location", - addon_profiles=addon_profiles, - azure_monitor_profile=self.models.ManagedClusterAzureMonitorProfile( - container_insights=self.models.ManagedClusterAzureMonitorProfileContainerInsights( - enabled=True - ), - app_monitoring=self.models.ManagedClusterAzureMonitorProfileAppMonitoring( - open_telemetry_logs=self.models.ManagedClusterAzureMonitorProfileAppMonitoringOpenTelemetryLogs( - enabled=True, - port=8081 - ) - ) - ) + health_monitor_profile=self.models.ManagedClusterHealthMonitorProfile( + enable_continuous_control_plane_and_addon_monitor=True, + ), ) - dec_2.context.attach_mc(mc_2) - - # Mock authentication-related functions for second test - with patch('azext_aks_preview.managed_cluster_decorator.ensure_azure_monitor_profile_prerequisites'), \ - patch.object(dec_2.context, 'get_subscription_id', return_value='test-subscription'), \ - patch.object(dec_2.context, 'get_resource_group_name', return_value='test-rg'), \ - patch.object(dec_2.context, 'get_name', return_value='test-cluster'), \ - patch.object(dec_2.context, 'get_location', return_value='test-location'): - dec_mc_2 = dec_2.update_azure_monitor_profile(mc_2) - - # Verify OpenTelemetry logs is disabled - self.assertIsNotNone(dec_mc_2.azure_monitor_profile.app_monitoring.open_telemetry_logs) - self.assertFalse(dec_mc_2.azure_monitor_profile.app_monitoring.open_telemetry_logs.enabled) - self.assertIsNone(dec_mc_2.azure_monitor_profile.app_monitoring.open_telemetry_logs.port) + self.assertEqual(dec_mc_2, ground_truth_mc_2) - # Test standalone port update for OpenTelemetry logs (without enable/disable flags) + # disable flag on existing enabled profile dec_3 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "opentelemetry_logs_port": 9091, + "disable_continuous_control_plane_and_addon_monitor": True, }, CUSTOM_MGMT_AKS_PREVIEW, ) - - # Mock existing cluster with OpenTelemetry logs already enabled mc_3 = self.models.ManagedCluster( location="test_location", - addon_profiles=addon_profiles, - azure_monitor_profile=self.models.ManagedClusterAzureMonitorProfile( - container_insights=self.models.ManagedClusterAzureMonitorProfileContainerInsights( - enabled=True - ), - app_monitoring=self.models.ManagedClusterAzureMonitorProfileAppMonitoring( - open_telemetry_logs=self.models.ManagedClusterAzureMonitorProfileAppMonitoringOpenTelemetryLogs( - enabled=True, - port=8081 # Original port - ) - ) - ) + health_monitor_profile=self.models.ManagedClusterHealthMonitorProfile( + enable_continuous_control_plane_and_addon_monitor=True, + ), ) dec_3.context.attach_mc(mc_3) + dec_mc_3 = dec_3.update_health_monitor_profile(mc_3) + ground_truth_mc_3 = self.models.ManagedCluster( + location="test_location", + health_monitor_profile=self.models.ManagedClusterHealthMonitorProfile( + enable_continuous_control_plane_and_addon_monitor=False, + ), + ) + self.assertEqual(dec_mc_3, ground_truth_mc_3) - # Mock authentication-related functions for third test - with patch('azext_aks_preview.managed_cluster_decorator.ensure_azure_monitor_profile_prerequisites'), \ - patch.object(dec_3.context, 'get_subscription_id', return_value='test-subscription'), \ - patch.object(dec_3.context, 'get_resource_group_name', return_value='test-rg'), \ - patch.object(dec_3.context, 'get_name', return_value='test-cluster'), \ - patch.object(dec_3.context, 'get_location', return_value='test-location'): - dec_mc_3 = dec_3.update_azure_monitor_profile(mc_3) - - # Verify OpenTelemetry logs port is updated while remaining enabled - self.assertIsNotNone(dec_mc_3.azure_monitor_profile.app_monitoring.open_telemetry_logs) - self.assertTrue(dec_mc_3.azure_monitor_profile.app_monitoring.open_telemetry_logs.enabled) - self.assertEqual(dec_mc_3.azure_monitor_profile.app_monitoring.open_telemetry_logs.port, 9091) - - def test_disable_azure_monitor_app_monitoring_preserves_opentelemetry(self): - # Test that disabling Azure Monitor app monitoring preserves existing OpenTelemetry configuration + # ------------------------------------------------------------------ + # Tests for _setup_azure_monitor_logs setting enableRetinaNetworkFlags + # ------------------------------------------------------------------ + def test_setup_azure_monitor_logs_sets_retina_flags_when_cnl_enabled(self): + """_setup_azure_monitor_logs sets enableRetinaNetworkFlags in config when CNL is being enabled.""" dec = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "disable_azure_monitor_app_monitoring": True, + "enable_azure_monitor_logs": True, + "enable_container_network_logs": True, + "workspace_resource_id": "/subscriptions/test/resourceGroups/test/providers/Microsoft.OperationalInsights/workspaces/test", }, CUSTOM_MGMT_AKS_PREVIEW, ) - - # Mock existing cluster with both Azure Monitor app monitoring and OpenTelemetry metrics enabled mc = self.models.ManagedCluster( location="test_location", - azure_monitor_profile=self.models.ManagedClusterAzureMonitorProfile( - app_monitoring=self.models.ManagedClusterAzureMonitorProfileAppMonitoring( - auto_instrumentation=self.models.ManagedClusterAzureMonitorProfileAppMonitoringAutoInstrumentation( - enabled=True - ), - open_telemetry_metrics=self.models.ManagedClusterAzureMonitorProfileAppMonitoringOpenTelemetryMetrics( - enabled=True, - port=8080 - ) - ) - ) + network_profile=self.models.ContainerServiceNetworkProfile( + advanced_networking=self.models.AdvancedNetworking(enabled=True), + ), + addon_profiles={}, ) dec.context.attach_mc(mc) - # Mock authentication-related functions - with patch('azext_aks_preview.managed_cluster_decorator.ensure_azure_monitor_profile_prerequisites'), \ - patch.object(dec.context, 'get_subscription_id', return_value='test-subscription'), \ - patch.object(dec.context, 'get_resource_group_name', return_value='test-rg'), \ - patch.object(dec.context, 'get_name', return_value='test-cluster'), \ - patch.object(dec.context, 'get_location', return_value='test-location'): - dec_mc = dec.update_azure_monitor_profile(mc) - - # Verify Azure Monitor app monitoring auto instrumentation is disabled - self.assertIsNotNone(dec_mc.azure_monitor_profile.app_monitoring.auto_instrumentation) - self.assertFalse(dec_mc.azure_monitor_profile.app_monitoring.auto_instrumentation.enabled) + with patch.object( + dec.context.external_functions, + "sanitize_loganalytics_ws_resource_id", + side_effect=lambda x: x, + ): + dec._setup_azure_monitor_logs(mc) - # Verify OpenTelemetry metrics configuration is preserved - self.assertIsNotNone(dec_mc.azure_monitor_profile.app_monitoring.open_telemetry_metrics) - self.assertTrue(dec_mc.azure_monitor_profile.app_monitoring.open_telemetry_metrics.enabled) - self.assertEqual(dec_mc.azure_monitor_profile.app_monitoring.open_telemetry_metrics.port, 8080) + addon_profile = mc.addon_profiles.get(CONST_MONITORING_ADDON_NAME) + self.assertIsNotNone(addon_profile) + self.assertEqual(addon_profile.config.get("enableRetinaNetworkFlags"), "True") - def test_enable_azure_monitor_app_monitoring_preserves_opentelemetry(self): - # Test that enabling Azure Monitor app monitoring preserves existing OpenTelemetry configuration + def test_setup_azure_monitor_logs_no_retina_flags_without_cnl(self): + """_setup_azure_monitor_logs does NOT set enableRetinaNetworkFlags when CNL is not specified.""" dec = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "enable_azure_monitor_app_monitoring": True, + "enable_azure_monitor_logs": True, + "workspace_resource_id": "/subscriptions/test/resourceGroups/test/providers/Microsoft.OperationalInsights/workspaces/test", }, CUSTOM_MGMT_AKS_PREVIEW, ) - - # Mock existing cluster with OpenTelemetry logs enabled but Azure Monitor app monitoring disabled mc = self.models.ManagedCluster( location="test_location", - azure_monitor_profile=self.models.ManagedClusterAzureMonitorProfile( - app_monitoring=self.models.ManagedClusterAzureMonitorProfileAppMonitoring( - auto_instrumentation=self.models.ManagedClusterAzureMonitorProfileAppMonitoringAutoInstrumentation( - enabled=False - ), - open_telemetry_logs=self.models.ManagedClusterAzureMonitorProfileAppMonitoringOpenTelemetryLogs( - enabled=True, - port=8081 - ) - ) - ) + addon_profiles={}, ) dec.context.attach_mc(mc) - # Mock authentication-related functions - with patch('azext_aks_preview.managed_cluster_decorator.ensure_azure_monitor_profile_prerequisites'), \ - patch.object(dec.context, 'get_subscription_id', return_value='test-subscription'), \ - patch.object(dec.context, 'get_resource_group_name', return_value='test-rg'), \ - patch.object(dec.context, 'get_name', return_value='test-cluster'), \ - patch.object(dec.context, 'get_location', return_value='test-location'): - dec_mc = dec.update_azure_monitor_profile(mc) - - # Verify Azure Monitor app monitoring auto instrumentation is enabled - self.assertIsNotNone(dec_mc.azure_monitor_profile.app_monitoring.auto_instrumentation) - self.assertTrue(dec_mc.azure_monitor_profile.app_monitoring.auto_instrumentation.enabled) - - # Verify OpenTelemetry logs configuration is preserved - self.assertIsNotNone(dec_mc.azure_monitor_profile.app_monitoring.open_telemetry_logs) - self.assertTrue(dec_mc.azure_monitor_profile.app_monitoring.open_telemetry_logs.enabled) - self.assertEqual(dec_mc.azure_monitor_profile.app_monitoring.open_telemetry_logs.port, 8081) - - def test_azure_keyvault_kms_network_access_parameter_fix(self): - """Test that azure_keyvault_kms_key_vault_network_access parameter is correctly passed through. - - This test verifies the fix for the issue where --azure-keyvault-kms-key-vault-network-access - was always being set to "Public" regardless of user input. - """ - key_id = "https://fakekeyvault.vault.azure.net/secrets/fakekeyname/fakekeyversion" - - # Test CREATE scenario with Private network access - dec_create_private = AKSPreviewManagedClusterCreateDecorator( + with patch.object( + dec.context.external_functions, + "sanitize_loganalytics_ws_resource_id", + side_effect=lambda x: x, + ): + dec._setup_azure_monitor_logs(mc) + + addon_profile = mc.addon_profiles.get(CONST_MONITORING_ADDON_NAME) + self.assertIsNotNone(addon_profile) + self.assertNotIn("enableRetinaNetworkFlags", addon_profile.config) + + # ------------------------------------------------------------------ + # Tests for _setup_azure_monitor_logs workspace change detection + # ------------------------------------------------------------------ + def test_setup_azure_monitor_logs_workspace_change_triggers_postprocessing(self): + """_setup_azure_monitor_logs sets monitoring_addon_postprocessing_required when workspace changes.""" + old_ws = "/subscriptions/test/resourceGroups/test/providers/Microsoft.OperationalInsights/workspaces/old-ws" + new_ws = "/subscriptions/test/resourceGroups/test/providers/Microsoft.OperationalInsights/workspaces/new-ws" + dec = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "enable_azure_keyvault_kms": True, - "azure_keyvault_kms_key_id": key_id, - "azure_keyvault_kms_key_vault_network_access": "Private", - "azure_keyvault_kms_key_vault_resource_id": "/subscriptions/test/resourceGroups/test/providers/Microsoft.KeyVault/vaults/test", + "enable_azure_monitor_logs": True, + "workspace_resource_id": new_ws, }, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_create_private = self.models.ManagedCluster(location="test_location") - dec_create_private.context.attach_mc(mc_create_private) - result_create_private = dec_create_private.set_up_azure_keyvault_kms(mc_create_private) + mc = self.models.ManagedCluster( + location="test_location", + addon_profiles={ + CONST_MONITORING_ADDON_NAME: self.models.ManagedClusterAddonProfile( + enabled=True, + config={ + "logAnalyticsWorkspaceResourceID": old_ws, + "useAADAuth": "true", + }, + ) + }, + ) + dec.context.attach_mc(mc) + dec.context.set_intermediate("subscription_id", "test-subscription-id") - # Verify Private network access is correctly set during CREATE - self.assertEqual( - result_create_private.security_profile.azure_key_vault_kms.key_vault_network_access, - "Private" + with patch.object( + dec.context.external_functions, + "sanitize_loganalytics_ws_resource_id", + side_effect=lambda x: x, + ): + dec._setup_azure_monitor_logs(mc) + + self.assertTrue( + dec.context.get_intermediate("monitoring_addon_postprocessing_required", default_value=False) ) + # Verify workspace was updated + actual_key = next(k for k in mc.addon_profiles if k.lower() == "omsagent") + self.assertEqual(mc.addon_profiles[actual_key].config["logAnalyticsWorkspaceResourceID"], new_ws) - # Test CREATE scenario with Public network access - dec_create_public = AKSPreviewManagedClusterCreateDecorator( + def test_setup_azure_monitor_logs_same_workspace_no_postprocessing(self): + """_setup_azure_monitor_logs does NOT set monitoring_addon_postprocessing_required when workspace is unchanged.""" + ws = "/subscriptions/test/resourceGroups/test/providers/Microsoft.OperationalInsights/workspaces/same-ws" + dec = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "enable_azure_keyvault_kms": True, - "azure_keyvault_kms_key_id": key_id, - "azure_keyvault_kms_key_vault_network_access": "Public", + "enable_azure_monitor_logs": True, + "workspace_resource_id": ws, }, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_create_public = self.models.ManagedCluster(location="test_location") - dec_create_public.context.attach_mc(mc_create_public) - result_create_public = dec_create_public.set_up_azure_keyvault_kms(mc_create_public) + mc = self.models.ManagedCluster( + location="test_location", + addon_profiles={ + CONST_MONITORING_ADDON_NAME: self.models.ManagedClusterAddonProfile( + enabled=True, + config={ + "logAnalyticsWorkspaceResourceID": ws, + "useAADAuth": "true", + }, + ) + }, + ) + dec.context.attach_mc(mc) + dec.context.set_intermediate("subscription_id", "test-subscription-id") - # Verify Public network access is correctly set during CREATE - self.assertEqual( - result_create_public.security_profile.azure_key_vault_kms.key_vault_network_access, - "Public" + with patch.object( + dec.context.external_functions, + "sanitize_loganalytics_ws_resource_id", + side_effect=lambda x: x, + ): + dec._setup_azure_monitor_logs(mc) + + self.assertFalse( + dec.context.get_intermediate("monitoring_addon_postprocessing_required", default_value=False) ) - # Test UPDATE scenario - changing from Public to Private - dec_update_to_private = AKSPreviewManagedClusterUpdateDecorator( + def test_setup_azure_monitor_logs_workspace_change_case_insensitive(self): + """_setup_azure_monitor_logs compares workspaces case-insensitively (no false positives on casing).""" + ws_lower = "/subscriptions/test/resourcegroups/test/providers/microsoft.operationalinsights/workspaces/my-ws" + ws_mixed = "/subscriptions/test/resourceGroups/test/providers/Microsoft.OperationalInsights/workspaces/my-ws" + dec = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "enable_azure_keyvault_kms": True, - "azure_keyvault_kms_key_id": key_id, - "azure_keyvault_kms_key_vault_network_access": "Private", - "azure_keyvault_kms_key_vault_resource_id": "/subscriptions/test/resourceGroups/test/providers/Microsoft.KeyVault/vaults/test", + "enable_azure_monitor_logs": True, + "workspace_resource_id": ws_mixed, }, CUSTOM_MGMT_AKS_PREVIEW, ) - - # Start with existing cluster that has Public access - existing_kms_profile = self.models.AzureKeyVaultKms( - enabled=True, - key_id=key_id, - key_vault_network_access="Public", - ) - existing_security_profile = self.models.ManagedClusterSecurityProfile( - azure_key_vault_kms=existing_kms_profile - ) - mc_update_to_private = self.models.ManagedCluster( + mc = self.models.ManagedCluster( location="test_location", - security_profile=existing_security_profile, + addon_profiles={ + CONST_MONITORING_ADDON_NAME: self.models.ManagedClusterAddonProfile( + enabled=True, + config={ + "logAnalyticsWorkspaceResourceID": ws_lower, + "useAADAuth": "true", + }, + ) + }, ) - dec_update_to_private.context.attach_mc(mc_update_to_private) - result_update_to_private = dec_update_to_private.update_azure_keyvault_kms(mc_update_to_private) + dec.context.attach_mc(mc) + dec.context.set_intermediate("subscription_id", "test-subscription-id") + + with patch.object( + dec.context.external_functions, + "sanitize_loganalytics_ws_resource_id", + side_effect=lambda x: x, + ): + dec._setup_azure_monitor_logs(mc) - # Verify network access was updated from Public to Private - self.assertEqual( - result_update_to_private.security_profile.azure_key_vault_kms.key_vault_network_access, - "Private" + # Same workspace (different casing) should NOT trigger postprocessing + self.assertFalse( + dec.context.get_intermediate("monitoring_addon_postprocessing_required", default_value=False) ) - # Test UPDATE scenario - changing from Private to Public - dec_update_to_public = AKSPreviewManagedClusterUpdateDecorator( + def test_setup_azure_monitor_logs_new_addon_no_postprocessing(self): + """_setup_azure_monitor_logs does NOT trigger postprocessing when there is no existing addon (fresh enable).""" + new_ws = "/subscriptions/test/resourceGroups/test/providers/Microsoft.OperationalInsights/workspaces/new-ws" + dec = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "enable_azure_keyvault_kms": True, - "azure_keyvault_kms_key_id": key_id, - "azure_keyvault_kms_key_vault_network_access": "Public", + "enable_azure_monitor_logs": True, + "workspace_resource_id": new_ws, }, CUSTOM_MGMT_AKS_PREVIEW, ) - - # Start with existing cluster that has Private access - existing_kms_profile_private = self.models.AzureKeyVaultKms( - enabled=True, - key_id=key_id, - key_vault_network_access="Private", - key_vault_resource_id="/subscriptions/test/resourceGroups/test/providers/Microsoft.KeyVault/vaults/test", - ) - existing_security_profile_private = self.models.ManagedClusterSecurityProfile( - azure_key_vault_kms=existing_kms_profile_private - ) - mc_update_to_public = self.models.ManagedCluster( + mc = self.models.ManagedCluster( location="test_location", - security_profile=existing_security_profile_private, + addon_profiles={}, ) - dec_update_to_public.context.attach_mc(mc_update_to_public) - result_update_to_public = dec_update_to_public.update_azure_keyvault_kms(mc_update_to_public) + dec.context.attach_mc(mc) + dec.context.set_intermediate("subscription_id", "test-subscription-id") - # Verify network access was updated from Private to Public - self.assertEqual( - result_update_to_public.security_profile.azure_key_vault_kms.key_vault_network_access, - "Public" + with patch.object( + dec.context.external_functions, + "sanitize_loganalytics_ws_resource_id", + side_effect=lambda x: x, + ): + dec._setup_azure_monitor_logs(mc) + + # Fresh enable — no old workspace to compare, should NOT trigger postprocessing + self.assertFalse( + dec.context.get_intermediate("monitoring_addon_postprocessing_required", default_value=False) ) - def test_update_agentpool_profile_with_none_agent_pool_profiles(self): - """Test update_agentpool_profile handles None agent_pool_profiles with hosted system components""" - # Test case 1: None agent_pool_profiles with hosted system components enabled (should succeed) - dec_1 = AKSPreviewManagedClusterUpdateDecorator( + # ------------------------------------------------------------------ + # Tests for _disable_azure_monitor_logs disabling containerInsights + # ------------------------------------------------------------------ + def test_disable_azure_monitor_logs_disables_container_insights_with_msi_auth(self): + """_disable_azure_monitor_logs sets containerInsights.enabled=False when MSI auth is enabled, + triggering DCR/DCRA cleanup path.""" + dec = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - {}, + { + "disable_azure_monitor_logs": True, + "yes": True, + }, CUSTOM_MGMT_AKS_PREVIEW, ) - - # Create a managed cluster with None agent_pool_profiles but hosted system components enabled - hosted_system_profile = self.models.ManagedClusterHostedSystemProfile(enabled=True) - mc_1 = self.models.ManagedCluster( + mc = self.models.ManagedCluster( location="test_location", - agent_pool_profiles=None, # This is the key scenario - hosted_system_profile=hosted_system_profile + addon_profiles={ + CONST_MONITORING_ADDON_NAME: self.models.ManagedClusterAddonProfile( + enabled=True, + config={ + CONST_MONITORING_USING_AAD_MSI_AUTH: "true", + CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: "/subscriptions/test/rg/ws", + }, + ), + }, + azure_monitor_profile=self.models.ManagedClusterAzureMonitorProfile( + container_insights=self.models.ManagedClusterAzureMonitorProfileContainerInsights( + enabled=True, + ), + ), ) - dec_1.context.attach_mc(mc_1) - - # Should return the MC unchanged without raising an error - result_1 = dec_1.update_agentpool_profile(mc_1) - self.assertEqual(result_1, mc_1) - self.assertIsNone(result_1.agent_pool_profiles) - self.assertTrue(result_1.hosted_system_profile.enabled) - - def test_update_agentpool_profile_with_none_agent_pool_profiles_no_hosted_system(self): - """Test update_agentpool_profile raises UnknownError for None agent_pool_profiles without hosted system components""" - # Test case 2: None agent_pool_profiles without hosted system components (should raise UnknownError) - dec_2 = AKSPreviewManagedClusterUpdateDecorator( + dec.context.attach_mc(mc) + dec.client = Mock() + dec.client.get = Mock(return_value=mc) + with patch.object(dec.context, "get_subscription_id", return_value="test-sub"), \ + patch.object(dec.context, "get_resource_group_name", return_value="test-rg"), \ + patch.object(dec.context, "get_name", return_value="test-cluster"), \ + patch.object( + dec.context.external_functions, "ensure_container_insights_for_monitoring", return_value=None, + ): + dec._disable_azure_monitor_logs(mc) + self.assertFalse(mc.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled) + self.assertFalse(mc.azure_monitor_profile.container_insights.enabled) + + def test_disable_azure_monitor_logs_no_container_insights_skips(self): + """_disable_azure_monitor_logs works fine when azureMonitorProfile has no containerInsights.""" + dec = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - {}, + { + "disable_azure_monitor_logs": True, + "yes": True, + }, CUSTOM_MGMT_AKS_PREVIEW, ) - - # Create a managed cluster with None agent_pool_profiles and no hosted system components - mc_2 = self.models.ManagedCluster( + mc = self.models.ManagedCluster( location="test_location", - agent_pool_profiles=None, # This is the key scenario - hosted_system_profile=None + addon_profiles={ + CONST_MONITORING_ADDON_NAME: self.models.ManagedClusterAddonProfile( + enabled=True, + config={ + CONST_MONITORING_USING_AAD_MSI_AUTH: "false", + }, + ), + }, ) - dec_2.context.attach_mc(mc_2) - - # Should raise UnknownError - with self.assertRaises(UnknownError): - dec_2.update_agentpool_profile(mc_2) - - def test_update_agentpool_profile_with_none_agent_pool_profiles_hosted_system_disabled(self): - """Test update_agentpool_profile raises UnknownError for None agent_pool_profiles with hosted system components disabled""" - # Test case 3: None agent_pool_profiles with hosted system components disabled (should raise UnknownError) - dec_3 = AKSPreviewManagedClusterUpdateDecorator( + dec.context.attach_mc(mc) + dec.client = Mock() + dec.client.get = Mock(return_value=mc) + dec._disable_azure_monitor_logs(mc) + self.assertFalse(mc.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled) + + # ------------------------------------------------------------------ + # Tests for update_monitoring_profile_flow_logs: monitoring_being_enabled bypass + # ------------------------------------------------------------------ + def test_update_hlsm_standalone_skips_validation_when_monitoring_being_enabled(self): + """When enable_high_log_scale_mode=True and monitoring is being enabled simultaneously, + the validation of existing addon state is skipped.""" + dec = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - {}, + { + "enable_high_log_scale_mode": True, + "enable_azure_monitor_logs": True, + }, CUSTOM_MGMT_AKS_PREVIEW, ) - - # Create a managed cluster with None agent_pool_profiles and hosted system components disabled - hosted_system_profile_disabled = self.models.ManagedClusterHostedSystemProfile(enabled=False) - mc_3 = self.models.ManagedCluster( - location="test_location", - agent_pool_profiles=None, # This is the key scenario - hosted_system_profile=hosted_system_profile_disabled + # No monitoring addon present yet — would raise RequiredArgumentMissingError + # if validation wasn't bypassed. + mc = self.models.ManagedCluster(location="test_location") + dec.context.attach_mc(mc) + # Should NOT raise because monitoring is being enabled in the same command + dec.update_monitoring_profile_flow_logs(mc) + self.assertTrue( + dec.context.get_intermediate("monitoring_addon_postprocessing_required", default_value=False) ) - dec_3.context.attach_mc(mc_3) - - # Should raise UnknownError - with self.assertRaises(UnknownError): - dec_3.update_agentpool_profile(mc_3) - def test_update_agentpool_profile_with_empty_agent_pool_profiles(self): - """Test update_agentpool_profile raises UnknownError for empty agent_pool_profiles list""" - # Test case 4: Empty agent_pool_profiles list (should raise UnknownError) - dec_4 = AKSPreviewManagedClusterUpdateDecorator( + def test_update_hlsm_standalone_skips_validation_when_enable_addons_monitoring(self): + """When enable_high_log_scale_mode=True and --enable-addons monitoring in same command, + the validation of existing addon state is skipped.""" + dec = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - {}, + { + "enable_high_log_scale_mode": True, + "enable_addons": "monitoring", + }, CUSTOM_MGMT_AKS_PREVIEW, ) + mc = self.models.ManagedCluster(location="test_location") + dec.context.attach_mc(mc) + dec.update_monitoring_profile_flow_logs(mc) + self.assertTrue( + dec.context.get_intermediate("monitoring_addon_postprocessing_required", default_value=False) + ) - # Create a managed cluster with empty agent_pool_profiles - mc_4 = self.models.ManagedCluster( + def test_update_enable_cnl_sets_postprocessing_flag(self): + """Enabling CNL via enable_container_network_logs sets the postprocessing flag.""" + dec = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + {"enable_container_network_logs": True}, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc = self.models.ManagedCluster( location="test_location", - agent_pool_profiles=[], # Empty list scenario - hosted_system_profile=None + network_profile=self.models.ContainerServiceNetworkProfile( + advanced_networking=self.models.AdvancedNetworking(enabled=True), + ), + addon_profiles={ + CONST_MONITORING_ADDON_NAME: self.models.ManagedClusterAddonProfile( + enabled=True, + config={CONST_MONITORING_USING_AAD_MSI_AUTH: "true"}, + ) + }, + ) + dec.context.attach_mc(mc) + dec.update_monitoring_profile_flow_logs(mc) + self.assertTrue( + dec.context.get_intermediate("monitoring_addon_postprocessing_required", default_value=False) ) - dec_4.context.attach_mc(mc_4) - - # Should raise UnknownError - with self.assertRaises(UnknownError): - dec_4.update_agentpool_profile(mc_4) - def test_update_health_monitor_profile(self): - # no flags - no change - dec_0 = AKSPreviewManagedClusterUpdateDecorator( + def test_update_enable_retina_flow_logs_sets_postprocessing_flag(self): + """Enabling CNL via legacy enable_retina_flow_logs sets the postprocessing flag.""" + dec = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - {}, + {"enable_retina_flow_logs": True}, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_0 = self.models.ManagedCluster(location="test_location") - dec_0.context.attach_mc(mc_0) - dec_mc_0 = dec_0.update_health_monitor_profile(mc_0) - ground_truth_mc_0 = self.models.ManagedCluster(location="test_location") - self.assertEqual(dec_mc_0, ground_truth_mc_0) + mc = self.models.ManagedCluster( + location="test_location", + network_profile=self.models.ContainerServiceNetworkProfile( + advanced_networking=self.models.AdvancedNetworking(enabled=True), + ), + addon_profiles={ + CONST_MONITORING_ADDON_NAME: self.models.ManagedClusterAddonProfile( + enabled=True, + config={CONST_MONITORING_USING_AAD_MSI_AUTH: "true"}, + ) + }, + ) + dec.context.attach_mc(mc) + dec.update_monitoring_profile_flow_logs(mc) + self.assertTrue( + dec.context.get_intermediate("monitoring_addon_postprocessing_required", default_value=False) + ) - # both flags - mutual exclusivity error - dec_1 = AKSPreviewManagedClusterUpdateDecorator( + def test_disable_monitoring_clears_cnl_and_hlsm_config(self): + """Disabling monitoring addon should wipe config including enableRetinaNetworkFlags (CNL) + and any HLSM-related settings, so that re-enabling does not carry them forward.""" + dec = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "enable_continuous_control_plane_and_addon_monitor": True, - "disable_continuous_control_plane_and_addon_monitor": True, + "disable_azure_monitor_logs": True, + "yes": True, }, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_1 = self.models.ManagedCluster(location="test_location") - dec_1.context.attach_mc(mc_1) - with self.assertRaises(MutuallyExclusiveArgumentError): - dec_1.update_health_monitor_profile(mc_1) + mc = self.models.ManagedCluster( + location="test_location", + addon_profiles={ + CONST_MONITORING_ADDON_NAME: self.models.ManagedClusterAddonProfile( + enabled=True, + config={ + "logAnalyticsWorkspaceResourceID": "/subscriptions/test/workspace", + CONST_MONITORING_USING_AAD_MSI_AUTH: "false", + "enableRetinaNetworkFlags": "true", + }, + ), + }, + ) + dec.context.attach_mc(mc) + dec.client = Mock() + dec.client.get = Mock(return_value=mc) - # enable flag - dec_2 = AKSPreviewManagedClusterUpdateDecorator( + dec._disable_azure_monitor_logs(mc) + + # Config should be completely wiped — no CNL or HLSM values survive + self.assertFalse(mc.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled) + self.assertIsNone(mc.addon_profiles[CONST_MONITORING_ADDON_NAME].config) + + def test_reenable_monitoring_after_disable_does_not_carry_cnl(self): + """Re-enabling monitoring after disable should produce a fresh config + without enableRetinaNetworkFlags (CNL) or HLSM-related keys.""" + # Step 1: start with monitoring enabled + CNL (useAADAuth=false to skip DCR cleanup) + mc = self.models.ManagedCluster( + location="test_location", + addon_profiles={ + CONST_MONITORING_ADDON_NAME: self.models.ManagedClusterAddonProfile( + enabled=True, + config={ + "logAnalyticsWorkspaceResourceID": "/subscriptions/test/workspace", + CONST_MONITORING_USING_AAD_MSI_AUTH: "false", + "enableRetinaNetworkFlags": "true", + }, + ), + }, + ) + + # Step 2: disable monitoring + dec_disable = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "enable_continuous_control_plane_and_addon_monitor": True, + "disable_azure_monitor_logs": True, + "yes": True, }, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_2 = self.models.ManagedCluster(location="test_location") - dec_2.context.attach_mc(mc_2) - dec_mc_2 = dec_2.update_health_monitor_profile(mc_2) - ground_truth_mc_2 = self.models.ManagedCluster( - location="test_location", - health_monitor_profile=self.models.ManagedClusterHealthMonitorProfile( - enable_continuous_control_plane_and_addon_monitor=True, - ), - ) - self.assertEqual(dec_mc_2, ground_truth_mc_2) + dec_disable.context.attach_mc(mc) + dec_disable.client = Mock() + dec_disable.client.get = Mock(return_value=mc) + dec_disable._disable_azure_monitor_logs(mc) + self.assertIsNone(mc.addon_profiles[CONST_MONITORING_ADDON_NAME].config) - # disable flag on existing enabled profile - dec_3 = AKSPreviewManagedClusterUpdateDecorator( + # Step 3: re-enable monitoring (no CNL flag passed) + # Simulate server round-trip: after PUT with config=None, the server + # returns the addon with config as empty dict, not None. + mc.addon_profiles[CONST_MONITORING_ADDON_NAME].config = {} + dec_enable = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "disable_continuous_control_plane_and_addon_monitor": True, + "enable_azure_monitor_logs": True, + "workspace_resource_id": "/subscriptions/test/resourceGroups/test/providers/Microsoft.OperationalInsights/workspaces/new-workspace", }, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_3 = self.models.ManagedCluster( - location="test_location", - health_monitor_profile=self.models.ManagedClusterHealthMonitorProfile( - enable_continuous_control_plane_and_addon_monitor=True, - ), - ) - dec_3.context.attach_mc(mc_3) - dec_mc_3 = dec_3.update_health_monitor_profile(mc_3) - ground_truth_mc_3 = self.models.ManagedCluster( - location="test_location", - health_monitor_profile=self.models.ManagedClusterHealthMonitorProfile( - enable_continuous_control_plane_and_addon_monitor=False, - ), - ) - self.assertEqual(dec_mc_3, ground_truth_mc_3) + dec_enable.context.attach_mc(mc) + dec_enable.context.set_intermediate("subscription_id", "test-subscription-id") + dec_enable._setup_azure_monitor_logs(mc) + + # Config should only have workspace + MSI auth — no CNL or HLSM keys + addon_config = mc.addon_profiles[CONST_MONITORING_ADDON_NAME].config + self.assertTrue(mc.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled) + self.assertIn("logAnalyticsWorkspaceResourceID", addon_config) + self.assertIn(CONST_MONITORING_USING_AAD_MSI_AUTH, addon_config) + self.assertNotIn("enableRetinaNetworkFlags", addon_config) if __name__ == "__main__": diff --git a/src/aks-preview/linter_exclusions.yml b/src/aks-preview/linter_exclusions.yml index 17c229ab774..aebdb64dd8d 100644 --- a/src/aks-preview/linter_exclusions.yml +++ b/src/aks-preview/linter_exclusions.yml @@ -1,5 +1,41 @@ +aks addon: + rule_exclusions: + - require_wait_command_if_no_wait +aks extension: + rule_exclusions: + - require_wait_command_if_no_wait +aks jwtauthenticator: + rule_exclusions: + - require_wait_command_if_no_wait +aks machine: + rule_exclusions: + - require_wait_command_if_no_wait +aks mesh upgrade: + rule_exclusions: + - require_wait_command_if_no_wait +aks mesh: + rule_exclusions: + - require_wait_command_if_no_wait +aks namespace: + rule_exclusions: + - require_wait_command_if_no_wait +aks nodepool manual-scale: + rule_exclusions: + - require_wait_command_if_no_wait +aks nodepool snapshot: + rule_exclusions: + - require_wait_command_if_no_wait +aks nodepool: + rule_exclusions: + - require_wait_command_if_no_wait +aks snapshot: + rule_exclusions: + - require_wait_command_if_no_wait aks create: parameters: + node_resource_group: + rule_exclusions: + - parameter_should_not_end_in_resource_group enable_sgxquotehelper: rule_exclusions: - option_length_too_long diff --git a/src/aks-preview/setup.py b/src/aks-preview/setup.py index cbf33355898..d28112e3442 100644 --- a/src/aks-preview/setup.py +++ b/src/aks-preview/setup.py @@ -9,7 +9,7 @@ from setuptools import find_packages, setup -VERSION = "20.0.0b2" +VERSION = "20.0.0b3" CLASSIFIERS = [ "Development Status :: 4 - Beta",