diff --git a/src/aks-preview/HISTORY.rst b/src/aks-preview/HISTORY.rst index 31089d30dbe..6edf4713d67 100644 --- a/src/aks-preview/HISTORY.rst +++ b/src/aks-preview/HISTORY.rst @@ -12,6 +12,22 @@ To release a new version, please select a new version number (usually plus 1 to Pending +++++++ +19.0.0b2 ++++++++ +* `az aks create`: Add new parameter `--enable-opentelemetry-metrics` to enable OTLP feature for metrics addon. +* `az aks update`: Add new parameter `--enable-opentelemetry-metrics` to enable OTLP feature for metrics addon. +* `az aks update`: Add new parameter `--disable-opentelemetry-metrics` to disable OTLP feature for metrics addon. +* `az aks create`: Add new parameter `--opentelemetry-metrics-port` to change the OTLP port from the default for metrics addon. +* `az aks update`: Add new parameter `--opentelemetry-metrics-port` to change the OTLP port from the default for metrics addon. +* `az aks create`: Add new parameter `--enable-opentelemetry-logs` to enable OTLP feature for logs addon. +* `az aks update`: Add new parameter `--enable-opentelemetry-logs` to enable OTLP feature for logs addon. +* `az aks update`: Add new parameter `--disable-opentelemetry-logs` to disable OTLP feature for logs addon. +* `az aks create`: Add new parameter `--opentelemetry-logs-port` to change the OTLP port from the default for logs addon. +* `az aks update`: Add new parameter `--opentelemetry-logs-port` to change the OTLP port from the default for logs addon. +* `az aks create`: Add new parameter `--enable-azure-monitor-logs` that is a wrapper to enable-addons -a monitoring. +* `az aks update`: Add new parameter `--enable-azure-monitor-logs` that is a wrapper to enable-addons -a monitoring. +* `az aks update`: Add new parameter `--disable-azure-monitor-logs` that is a wrapper to disable-addons -a monitoring. + 19.0.0b1 +++++++ * [BREAKING CHANGE]: `az aks create`: remove `--enable-custom-ca-trust` and `--disable-custom-ca-trust` options diff --git a/src/aks-preview/azext_aks_preview/_consts.py b/src/aks-preview/azext_aks_preview/_consts.py index 5725d83d04c..4ae25e4e945 100644 --- a/src/aks-preview/azext_aks_preview/_consts.py +++ b/src/aks-preview/azext_aks_preview/_consts.py @@ -172,6 +172,7 @@ # monitoring CONST_MONITORING_ADDON_NAME = "omsagent" +CONST_MONITORING_ADDON_NAME_CAMELCASE = "omsAgent" CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID = "logAnalyticsWorkspaceResourceID" CONST_MONITORING_USING_AAD_MSI_AUTH = "useAADAuth" diff --git a/src/aks-preview/azext_aks_preview/_help.py b/src/aks-preview/azext_aks_preview/_help.py index 486d7b1c354..59546e9a779 100644 --- a/src/aks-preview/azext_aks_preview/_help.py +++ b/src/aks-preview/azext_aks_preview/_help.py @@ -181,6 +181,10 @@ - gitops : enable GitOps (PREVIEW). - azure-keyvault-secrets-provider : enable Azure Keyvault Secrets Provider addon. - web_application_routing : enable the App Routing addon (PREVIEW). Specify "--dns-zone-resource-id" to configure DNS. + - name: --enable-azure-monitor-logs + type: bool + short-summary: Enable Azure Monitor logs for the cluster. + long-summary: This is equivalent to using "--enable-addons monitoring". Turn on Log Analytics monitoring. Uses the Log Analytics Default Workspace if it exists, else creates one. Specify "--workspace-resource-id" to use an existing workspace. If monitoring addon is enabled --no-wait argument will have no effect - name: --disable-rbac type: bool short-summary: Disable Kubernetes Role-Based Access Control. @@ -587,6 +591,24 @@ - name: --enable-azure-monitor-app-monitoring type: bool short-summary: Enable Azure Monitor Application Monitoring + - name: --enable-opentelemetry-metrics + type: bool + short-summary: Enable OpenTelemetry metrics collection. Requires Azure Monitor metrics to be enabled. + - name: --opentelemetry-metrics-port + type: int + short-summary: Port for OpenTelemetry metrics collection (default port will be used if not specified) + - name: --disable-opentelemetry-metrics + type: bool + short-summary: Disable OpenTelemetry metrics collection + - name: --enable-opentelemetry-logs + type: bool + short-summary: Enable OpenTelemetry logs collection. Requires Azure Monitor logs to be enabled. + - name: --opentelemetry-logs-port + type: int + short-summary: Port for OpenTelemetry logs collection (default port will be used if not specified) + - name: --disable-opentelemetry-logs + type: bool + short-summary: Disable OpenTelemetry logs collection - name: --nodepool-labels type: string short-summary: The node labels for all node pools in this cluster. See https://aka.ms/node-labels for syntax of labels. @@ -737,6 +759,16 @@ text: az aks create -g MyResourceGroup -n MyManagedCluster --enable-azuremonitormetrics - name: Create a kubernetes cluster with Azure Monitor App Monitoring enabled text: az aks create -g MyResourceGroup -n MyManagedCluster --enable-azure-monitor-app-monitoring + - name: Create a kubernetes cluster with OpenTelemetry metrics collection enabled + text: az aks create -g MyResourceGroup -n MyManagedCluster --enable-opentelemetry-metrics --enable-azuremonitormetrics + - name: Create a kubernetes cluster with OpenTelemetry logs collection enabled + text: az aks create -g MyResourceGroup -n MyManagedCluster --enable-opentelemetry-logs --enable-addons monitoring + - name: Create a kubernetes cluster with Azure Monitor logs enabled (shorthand) + text: az aks create -g MyResourceGroup -n MyManagedCluster --enable-azure-monitor-logs + - name: Create a kubernetes cluster with OpenTelemetry metrics on custom port + text: az aks create -g MyResourceGroup -n MyManagedCluster --enable-opentelemetry-metrics --opentelemetry-metrics-port 8888 --enable-azuremonitormetrics + - name: Create a kubernetes cluster with OpenTelemetry logs on custom port + text: az aks create -g MyResourceGroup -n MyManagedCluster --enable-opentelemetry-logs --opentelemetry-logs-port 4317 --enable-azure-monitor-logs - name: Create a kubernetes cluster with a nodepool having ip allocation mode set to "StaticBlock" text: az aks create -g MyResourceGroup -n MyManagedCluster --os-sku Ubuntu --max-pods MaxPodsPerNode --network-plugin azure --vnet-subnet-id /subscriptions/00000/resourceGroups/AnotherResourceGroup/providers/Microsoft.Network/virtualNetworks/MyVnet/subnets/NodeSubnet --pod-subnet-id /subscriptions/00000/resourceGroups/AnotherResourceGroup/providers/Microsoft.Network/virtualNetworks/MyVnet/subnets/PodSubnet --pod-ip-allocation-mode StaticBlock - name: Create a kubernetes cluster with a VirtualMachines nodepool @@ -961,6 +993,32 @@ - name: --disable-workload-identity type: bool short-summary: (PREVIEW) Disable Workload Identity addon for cluster. + - name: --enable-azure-monitor-logs + type: bool + short-summary: Enable Azure Monitor logs for the cluster. + long-summary: This is equivalent to using "az aks enable-addons -a monitoring". Enables Log Analytics monitoring for the cluster. Uses the Log Analytics Default Workspace if it exists, else creates one. Specify "--workspace-resource-id" to use an existing workspace. If monitoring addon is enabled --no-wait argument will have no effect + - name: --disable-azure-monitor-logs + type: bool + short-summary: Disable Azure Monitor logs for the cluster. + long-summary: This is equivalent to using "az aks disable-addons -a monitoring". Disables Log Analytics monitoring for the cluster. + - name: --workspace-resource-id + type: string + short-summary: The resource ID of an existing Log Analytics Workspace to use for storing monitoring data. If not specified, uses the default Log Analytics Workspace if it exists, otherwise creates one. + - name: --enable-msi-auth-for-monitoring + type: bool + short-summary: Send monitoring data to Log Analytics using the cluster's assigned identity (instead of the Log Analytics Workspace's shared key). + - name: --enable-syslog + type: bool + short-summary: Enable syslog data collection for Monitoring addon + - name: --data-collection-settings + type: string + short-summary: Path to JSON file containing data collection settings for Monitoring addon. + - name: --enable-high-log-scale-mode + type: bool + short-summary: Enable High Log Scale Mode for Container Logs. + - name: --ampls-resource-id + type: string + short-summary: Resource ID of Azure Monitor Private Link scope for Monitoring Addon. - name: --enable-secret-rotation type: bool short-summary: Enable secret rotation. Use with azure-keyvault-secrets-provider addon. @@ -1184,6 +1242,24 @@ - name: --disable-azure-monitor-app-monitoring type: bool short-summary: Disable Azure Monitor Application Monitoring + - name: --enable-opentelemetry-metrics + type: bool + short-summary: Enable OpenTelemetry metrics collection. Requires Azure Monitor metrics to be enabled. + - name: --opentelemetry-metrics-port + type: int + short-summary: Port for OpenTelemetry metrics collection (default port will be used if not specified) + - name: --disable-opentelemetry-metrics + type: bool + short-summary: Disable OpenTelemetry metrics collection + - name: --enable-opentelemetry-logs + type: bool + short-summary: Enable OpenTelemetry logs collection. Requires Azure Monitor logs to be enabled. + - name: --opentelemetry-logs-port + type: int + short-summary: Port for OpenTelemetry logs collection (default port will be used if not specified) + - name: --disable-opentelemetry-logs + type: bool + short-summary: Disable OpenTelemetry logs collection - name: --enable-private-cluster type: bool short-summary: Enable private cluster for apiserver vnet integration cluster. @@ -1379,8 +1455,24 @@ text: az aks update -g MyResourceGroup -n MyManagedCluster --safeguards-level Warning - name: Update a kubernetes cluster with safeguards set to "Warning" and some namespaces excluded. Assumes azure policy addon is already enabled text: az aks update -g MyResourceGroup -n MyManagedCluster --safeguards-level Warning --safeguards-excluded-ns ns1,ns2 + - name: Enable Azure Monitor logs for a kubernetes cluster + text: az aks update -g MyResourceGroup -n MyManagedCluster --enable-azure-monitor-logs + - name: Disable Azure Monitor logs for a kubernetes cluster + text: az aks update -g MyResourceGroup -n MyManagedCluster --disable-azure-monitor-logs - name: Update a kubernetes cluster to clear any namespaces excluded from safeguards. Assumes azure policy addon is already enabled text: az aks update -g MyResourceGroup -n MyManagedCluster --safeguards-excluded-ns "" + - name: Enable OpenTelemetry metrics collection on an existing cluster + text: az aks update -g MyResourceGroup -n MyManagedCluster --enable-opentelemetry-metrics + - name: Enable OpenTelemetry logs collection on an existing cluster + text: az aks update -g MyResourceGroup -n MyManagedCluster --enable-opentelemetry-logs + - name: Configure OpenTelemetry metrics with custom port + text: az aks update -g MyResourceGroup -n MyManagedCluster --enable-opentelemetry-metrics --opentelemetry-metrics-port 8888 + - name: Configure OpenTelemetry logs with custom port + text: az aks update -g MyResourceGroup -n MyManagedCluster --enable-opentelemetry-logs --opentelemetry-logs-port 4317 + - name: Disable OpenTelemetry metrics collection on an existing cluster + text: az aks update -g MyResourceGroup -n MyManagedCluster --disable-opentelemetry-metrics + - name: Disable OpenTelemetry logs collection on an existing cluster + text: az aks update -g MyResourceGroup -n MyManagedCluster --disable-opentelemetry-logs """ helps['aks kollect'] = """ diff --git a/src/aks-preview/azext_aks_preview/_params.py b/src/aks-preview/azext_aks_preview/_params.py index 4b66be3678e..a827551d868 100644 --- a/src/aks-preview/azext_aks_preview/_params.py +++ b/src/aks-preview/azext_aks_preview/_params.py @@ -176,6 +176,10 @@ validate_assign_kubelet_identity, validate_azure_keyvault_kms_key_id, validate_azure_keyvault_kms_key_vault_resource_id, + validate_azure_monitor_and_opentelemetry_for_create, + validate_azure_monitor_and_opentelemetry_for_update, + validate_azure_monitor_logs_and_enable_addons, + validate_azure_monitor_logs_enable_disable, validate_azuremonitorworkspaceresourceid, validate_cluster_id, validate_cluster_snapshot_id, @@ -749,6 +753,12 @@ def load_arguments(self, _): options_list=["--enable-addons", "-a"], validator=validate_addons, ) + c.argument( + "enable_azure_monitor_logs", + action="store_true", + validator=validate_azure_monitor_logs_and_enable_addons, + help="Enable Azure Monitor logs for the cluster. Equivalent to '--enable-addons monitoring'." + ) c.argument("workspace_resource_id") c.argument( "enable_msi_auth_for_monitoring", @@ -1028,9 +1038,50 @@ def load_arguments(self, _): c.argument("ksm_metric_annotations_allow_list") c.argument("grafana_resource_id", validator=validate_grafanaresourceid) c.argument("enable_windows_recording_rules", action="store_true") - c.argument("enable_azure_monitor_app_monitoring", is_preview=True, action="store_true") - c.argument("enable_cost_analysis", action="store_true") - c.argument('enable_ai_toolchain_operator', is_preview=True, action='store_true') + c.argument("enable_azure_monitor_app_monitoring", + is_preview=True, + action="store_true" + ) + # OpenTelemetry parameters + c.argument("enable_opentelemetry_metrics", + is_preview=True, + action="store_true", + help="Enable OpenTelemetry metrics collection", + validator=validate_azure_monitor_and_opentelemetry_for_create + ) + c.argument("opentelemetry_metrics_port", + is_preview=True, + type=int, + help="Port for OpenTelemetry metrics collection" + ) + c.argument("disable_opentelemetry_metrics", + is_preview=True, + action="store_true", + help="Disable OpenTelemetry metrics collection" + ) + c.argument("enable_opentelemetry_logs", + options_list=["--enable-opentelemetry-logs"], + is_preview=True, + action="store_true", + help="Enable OpenTelemetry logs collection" + ) + c.argument("opentelemetry_logs_port", + is_preview=True, + type=int, + help="Port for OpenTelemetry logs collection" + ) + c.argument("disable_opentelemetry_logs", + is_preview=True, + action="store_true", + help="Disable OpenTelemetry logs collection" + ) + c.argument("enable_cost_analysis", + action="store_true" + ) + c.argument("enable_ai_toolchain_operator", + is_preview=True, + action="store_true" + ) # azure container storage c.argument( "enable_azure_container_storage", @@ -1281,6 +1332,18 @@ def load_arguments(self, _): is_preview=True, ) # addons + c.argument( + "enable_azure_monitor_logs", + action="store_true", + validator=validate_azure_monitor_logs_enable_disable, + help="Enable Azure Monitor logs for the cluster. Equivalent to 'az aks enable-addons -a monitoring'." + ) +# Monitoring parameters are inherited from base CLI + c.argument( + "disable_azure_monitor_logs", + action="store_true", + help="Disable Azure Monitor logs for the cluster. Equivalent to 'az aks disable-addons -a monitoring'." + ) c.argument("enable_secret_rotation", action="store_true") c.argument("disable_secret_rotation", action="store_true") c.argument("rotation_poll_interval") @@ -1407,9 +1470,69 @@ def load_arguments(self, _): hide=True, ), ) - c.argument("disable_azure_monitor_metrics", action="store_true") - c.argument("enable_azure_monitor_app_monitoring", action="store_true", is_preview=True) - c.argument("disable_azure_monitor_app_monitoring", action="store_true", is_preview=True) + c.argument("enable_azure_monitor_app_monitoring", + action="store_true", + is_preview=True + ) + c.argument("disable_azure_monitor_app_monitoring", + action="store_true", + is_preview=True + ) + # Azure Monitor logs additional parameters + c.argument("workspace_resource_id", + help="Resource ID of the Azure Log Analytics workspace to use for monitoring") + c.argument( + "enable_msi_auth_for_monitoring", + arg_type=get_three_state_flag(), + is_preview=True, + help="Enable managed identity authentication for Azure Monitor logs" + ) + c.argument("enable_syslog", + arg_type=get_three_state_flag(), + is_preview=True, + help="Enable syslog collection for Azure Monitor logs") + c.argument("data_collection_settings", + is_preview=True, + help="Data collection settings for Azure Monitor logs") + c.argument("enable_high_log_scale_mode", + arg_type=get_three_state_flag(), + is_preview=True, + help="Enable high log scale mode for Azure Monitor logs") + c.argument("ampls_resource_id", + is_preview=True, + help="Resource ID of the Azure Monitor Private Link Scope to associate with the cluster") + # OpenTelemetry parameters + c.argument("enable_opentelemetry_metrics", + is_preview=True, + action="store_true", + help="Enable OpenTelemetry metrics collection", + validator=validate_azure_monitor_and_opentelemetry_for_update + ) + c.argument("opentelemetry_metrics_port", + is_preview=True, + type=int, + help="Port for OpenTelemetry metrics collection" + ) + c.argument("disable_opentelemetry_metrics", + is_preview=True, + action="store_true", + help="Disable OpenTelemetry metrics collection" + ) + c.argument("enable_opentelemetry_logs", + is_preview=True, + action="store_true", + help="Enable OpenTelemetry logs collection" + ) + c.argument("opentelemetry_logs_port", + is_preview=True, + type=int, + help="Port for OpenTelemetry logs collection" + ) + c.argument("disable_opentelemetry_logs", + is_preview=True, + action="store_true", + help="Disable OpenTelemetry logs collection" + ) c.argument( "enable_vpa", action="store_true", diff --git a/src/aks-preview/azext_aks_preview/_validators.py b/src/aks-preview/azext_aks_preview/_validators.py index 491e7326479..771a37e8cb3 100644 --- a/src/aks-preview/azext_aks_preview/_validators.py +++ b/src/aks-preview/azext_aks_preview/_validators.py @@ -981,3 +981,141 @@ def validate_location_resource_group_cluster_parameters(namespace): raise MutuallyExclusiveArgumentError( "Cannot specify --location and --resource-group and --cluster at the same time." ) + + +def validate_opentelemetry_ports(namespace): + """Validate that OpenTelemetry metrics and logs ports don't conflict.""" + metrics_port = getattr(namespace, 'opentelemetry_metrics_port', None) + logs_port = getattr(namespace, 'opentelemetry_logs_port', None) + + # Check if both ports are specified and are the same + if metrics_port is not None and logs_port is not None and metrics_port == logs_port: + raise ArgumentUsageError( + "OpenTelemetry metrics port and logs port cannot be the same. " + "Please specify different ports for --opentelemetry-metrics-port and --opentelemetry-logs-port." + ) + + # Validate port ranges + for port, port_name in [(metrics_port, 'metrics'), (logs_port, 'logs')]: + if port is not None and not (1 <= port <= 65535): + raise ArgumentUsageError( + f"OpenTelemetry {port_name} port must be between 1 and 65535, got {port}." + ) + + +def validate_opentelemetry_metrics_dependencies(namespace): + """Validate OpenTelemetry metrics dependencies for create operations.""" + enable_otlp_metrics = getattr(namespace, 'enable_opentelemetry_metrics', False) + disable_otlp_metrics = getattr(namespace, 'disable_opentelemetry_metrics', False) + # Try both new and deprecated parameter names for Azure Monitor metrics + enable_azure_monitor_metrics = getattr(namespace, 'enable_azure_monitor_metrics', False) + enable_azuremonitormetrics = getattr(namespace, 'enable_azuremonitormetrics', False) # deprecated flag + + # Check mutual exclusion + if enable_otlp_metrics and disable_otlp_metrics: + raise MutuallyExclusiveArgumentError( + "Cannot specify both --enable-opentelemetry-metrics and --disable-opentelemetry-metrics at the same time." + ) + + # Check if trying to enable OTLP metrics without Azure Monitor metrics + # For create operations, require explicit Azure Monitor enablement + azure_monitor_enabled_via_params = enable_azure_monitor_metrics or enable_azuremonitormetrics + + if enable_otlp_metrics and not azure_monitor_enabled_via_params: + raise ArgumentUsageError( + "OpenTelemetry metrics requires Azure Monitor metrics to be enabled. " + "Please add --enable-azure-monitor-metrics or --enable-azuremonitormetrics to your command." + ) + + +def validate_opentelemetry_metrics_dependencies_for_update(namespace): + """Validate OpenTelemetry metrics dependencies for update operations.""" + enable_otlp_metrics = getattr(namespace, 'enable_opentelemetry_metrics', False) + disable_otlp_metrics = getattr(namespace, 'disable_opentelemetry_metrics', False) + + # Check mutual exclusion + if enable_otlp_metrics and disable_otlp_metrics: + raise MutuallyExclusiveArgumentError( + "Cannot specify both --enable-opentelemetry-metrics and --disable-opentelemetry-metrics at the same time." + ) + + # For update operations, validation is deferred to the decorator where we have access + # to the cluster's Azure Monitor profile + + +def validate_opentelemetry_logs_dependencies(namespace): + """Validate OpenTelemetry logs dependencies for create operations.""" + enable_otlp_logs = getattr(namespace, 'enable_opentelemetry_logs', False) + disable_otlp_logs = getattr(namespace, 'disable_opentelemetry_logs', False) + enable_azure_monitor_logs = getattr(namespace, 'enable_azure_monitor_logs', False) + enable_addons = getattr(namespace, 'enable_addons', None) + + # Check mutual exclusion + if enable_otlp_logs and disable_otlp_logs: + raise MutuallyExclusiveArgumentError( + "Cannot specify both --enable-opentelemetry-logs and --disable-opentelemetry-logs at the same time." + ) + + # Check if trying to enable OTLP logs without Azure Monitor + # For create operations, require explicit Azure Monitor enablement via either: + # 1. --enable-azure-monitor-logs + # 2. --enable-addons monitoring + azure_monitor_logs_enabled = (enable_azure_monitor_logs or + (enable_addons and 'monitoring' in enable_addons)) + + if enable_otlp_logs and not azure_monitor_logs_enabled: + raise ArgumentUsageError( + "OpenTelemetry logs requires Azure Monitor logs to be enabled. " + "Please add --enable-azure-monitor-logs to your command." + ) + + +def validate_opentelemetry_logs_dependencies_for_update(namespace): + """Validate OpenTelemetry logs dependencies for update operations.""" + enable_otlp_logs = getattr(namespace, 'enable_opentelemetry_logs', False) + disable_otlp_logs = getattr(namespace, 'disable_opentelemetry_logs', False) + + # Check mutual exclusion + if enable_otlp_logs and disable_otlp_logs: + raise MutuallyExclusiveArgumentError( + "Cannot specify both --enable-opentelemetry-logs and --disable-opentelemetry-logs at the same time." + ) + # For update operations, validation is deferred to the decorator where we have access + # to the cluster's Azure Monitor profile + + +def validate_azure_monitor_and_opentelemetry_for_create(namespace): + """Main validator for Azure Monitor and OpenTelemetry configurations for create operations.""" + # Run all OpenTelemetry-related validations + validate_opentelemetry_ports(namespace) + validate_opentelemetry_metrics_dependencies(namespace) + validate_opentelemetry_logs_dependencies(namespace) + + +def validate_azure_monitor_and_opentelemetry_for_update(namespace): + """Main validator for Azure Monitor and OpenTelemetry configurations for update operations.""" + # Run all OpenTelemetry-related validations + validate_opentelemetry_ports(namespace) + validate_opentelemetry_metrics_dependencies_for_update(namespace) + validate_opentelemetry_logs_dependencies_for_update(namespace) + + +def validate_azure_monitor_logs_and_enable_addons(namespace): + """Validate that enable_azure_monitor_logs and enable_addons don't conflict.""" + if hasattr(namespace, 'enable_azure_monitor_logs') and namespace.enable_azure_monitor_logs: + if hasattr(namespace, 'enable_addons') and namespace.enable_addons: + if 'monitoring' in namespace.enable_addons: + raise ArgumentUsageError( + "Cannot specify both '--enable-azure-monitor-logs' and '--enable-addons monitoring'. " + "Use either '--enable-azure-monitor-logs' or '--enable-addons monitoring'." + ) + + +def validate_azure_monitor_logs_enable_disable(namespace): + """Validate that enable and disable azure monitor logs parameters don't conflict.""" + if (hasattr(namespace, 'enable_azure_monitor_logs') and namespace.enable_azure_monitor_logs and + hasattr(namespace, 'disable_azure_monitor_logs') and namespace.disable_azure_monitor_logs): + raise ArgumentUsageError( + "Cannot specify both '--enable-azure-monitor-logs' and '--disable-azure-monitor-logs'. " + "Use either '--enable-azure-monitor-logs' or '--disable-azure-monitor-logs'." + ) diff --git a/src/aks-preview/azext_aks_preview/custom.py b/src/aks-preview/azext_aks_preview/custom.py index 2df19b2c7fa..5d20e17f5e9 100644 --- a/src/aks-preview/azext_aks_preview/custom.py +++ b/src/aks-preview/azext_aks_preview/custom.py @@ -132,6 +132,16 @@ ensure_container_insights_for_monitoring, ensure_default_log_analytics_workspace_for_monitoring, sanitize_loganalytics_ws_resource_id, + get_existing_container_insights_extension_dcr_tags, + validate_data_collection_settings, + create_data_collection_endpoint, + create_or_delete_dcr_association, + create_dce_association, + create_ampls_scope, + get_resources_client, + _get_data_collection_settings, + _trim_suffix_if_needed, + ContainerInsightsStreams, ) from azure.cli.core.api import get_config_dir from azure.cli.core.azclierror import ( @@ -141,6 +151,7 @@ MutuallyExclusiveArgumentError, RequiredArgumentMissingError, ValidationError, + AzCLIError, ) from azure.cli.core.commands import LongRunningOperation from azure.cli.core.commands.client_factory import ( @@ -151,6 +162,7 @@ from azure.cli.core.util import ( in_cloud_console, sdk_no_wait, + send_raw_request, shell_safe_json_parse, ) from azure.core.exceptions import ( @@ -200,6 +212,377 @@ def _ssl_context(): return ssl.create_default_context() +# pylint: disable=too-many-locals,too-many-branches,too-many-statements,line-too-long +def ensure_container_insights_for_monitoring_preview( + cmd, + addon, + cluster_subscription, + cluster_resource_group_name, + cluster_name, + cluster_region, + remove_monitoring=False, + aad_route=False, + create_dcr=False, + create_dcra=False, + enable_syslog=False, + data_collection_settings=None, + is_private_cluster=False, + ampls_resource_id=None, + enable_high_log_scale_mode=False, +): + """ + Preview extension version of ensure_container_insights_for_monitoring that uses REST API + to avoid large workspace resource objects causing "Request Header Fields Too Large" errors. + + Either adds the ContainerInsights solution to a LA Workspace OR sets up a DCR (Data Collection Rule) and DCRA + (Data Collection Rule Association). Both let the monitoring addon send data to a Log Analytics Workspace. + """ + if not addon.enabled: + return None + + if (not is_private_cluster or not aad_route) and ampls_resource_id is not None: + raise ArgumentUsageError("--ampls-resource-id can only be used with private cluster in MSI mode.") + + is_use_ampls = False + if ampls_resource_id is not None: + is_use_ampls = True + + # workaround for this addon key which has been seen lowercased in the wild + for key in list(addon.config): + if ( + key.lower() == CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID.lower() and + key != CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID + ): + addon.config[ + CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID + ] = addon.config.pop(key) + + workspace_resource_id = addon.config[ + CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID + ] + workspace_resource_id = sanitize_loganalytics_ws_resource_id( + workspace_resource_id + ) + + # extract subscription ID and workspace name from workspace_resource_id + try: + subscription_id = workspace_resource_id.split("/")[2] + except IndexError: + raise AzCLIError( + "Could not locate resource group in workspace-resource-id." + ) + + # extract workspace name from workspace_resource_id + try: + workspace_name = workspace_resource_id.split("/")[8] + except IndexError: + raise AzCLIError( + "Could not locate workspace name in --workspace-resource-id." + ) + + location = "" + # region of workspace can be different from region of RG so find the location of the workspace_resource_id + if not remove_monitoring: + try: + # The problem is that the Azure CLI adds many headers that cause "Request Header Fields Too Large" (431) + # Let's make a minimal direct HTTP request with only essential headers + import requests + from azure.cli.core._profile import Profile + + # Get access token manually to avoid CLI headers + profile = Profile(cli_ctx=cmd.cli_ctx) + creds, _, _ = profile.get_login_credentials() + + # Get the access token for Azure Resource Manager + access_token = "" + if hasattr(creds, 'get_token'): + # For newer Azure Identity credentials + token_info = creds.get_token('https://management.azure.com/.default') + access_token = token_info.token + + # Build minimal request + url = f"https://management.azure.com{workspace_resource_id}?api-version=2015-11-01-preview&$select=location,id" + + headers = { + 'Authorization': f'Bearer {access_token}', + 'Content-Type': 'application/json', + 'Accept': 'application/json' + } + + response = requests.get(url, headers=headers, timeout=30) + + if response.status_code == 200: + resource_data = response.json() + + # Create a minimal resource object with just what we need + # pylint: disable=too-few-public-methods + class MinimalResource: + def __init__(self, location, resource_id): + self.location = location + self.id = resource_id # pylint: disable=invalid-name + + resource = MinimalResource( + location=resource_data.get('location'), + resource_id=resource_data.get('id') + ) + else: + # Fallback to original approach + resources = get_resources_client(cmd.cli_ctx, subscription_id) + resource = resources.get_by_id(workspace_resource_id, "2015-11-01-preview") + + location = resource.location + # location can have spaces for example 'East US' hence remove the spaces + location = location.replace(" ", "").lower() + + except Exception as ex: + # If all methods fail, fall back to a reasonable default or let the original function handle it + raise ex + + # Limit data_collection_settings size to prevent header overflow + if data_collection_settings is not None: + data_collection_size = len(str(data_collection_settings)) + if data_collection_size > 10240: # 10KB threshold + data_collection_settings = None + + if aad_route: # pylint: disable=too-many-nested-blocks + cluster_resource_id = ( + f"/subscriptions/{cluster_subscription}/resourceGroups/{cluster_resource_group_name}/" + f"providers/Microsoft.ContainerService/managedClusters/{cluster_name}" + ) + dataCollectionRuleName = f"MSCI-{location}-{cluster_name}" + # Max length of the DCR name is 64 chars + dataCollectionRuleName = _trim_suffix_if_needed(dataCollectionRuleName[0:64]) + dcr_resource_id = ( + f"/subscriptions/{cluster_subscription}/resourceGroups/{cluster_resource_group_name}/" + f"providers/Microsoft.Insights/dataCollectionRules/{dataCollectionRuleName}" + ) + + # ingestion DCE MUST be in workspace region + ingestionDataCollectionEndpointName = f"MSCI-ingest-{location}-{cluster_name}" + # Max length of the DCE name is 44 chars + ingestionDataCollectionEndpointName = _trim_suffix_if_needed(ingestionDataCollectionEndpointName[0:43]) + ingestion_dce_resource_id = None + + # config DCE MUST be in cluster region + configDataCollectionEndpointName = f"MSCI-config-{cluster_region}-{cluster_name}" + # Max length of the DCE name is 44 chars + configDataCollectionEndpointName = _trim_suffix_if_needed(configDataCollectionEndpointName[0:43]) + config_dce_resource_id = None + + # create ingestion DCE if high log scale mode enabled + if enable_high_log_scale_mode: + ingestion_dce_resource_id = create_data_collection_endpoint(cmd, cluster_subscription, cluster_resource_group_name, location, ingestionDataCollectionEndpointName, is_use_ampls) + + # create config DCE if AMPLS resource specified + if is_use_ampls: + config_dce_resource_id = create_data_collection_endpoint(cmd, cluster_subscription, cluster_resource_group_name, cluster_region, configDataCollectionEndpointName, is_use_ampls) + + if create_dcr: + # first get the association between region display names and region IDs (because for some reason + # the "which RPs are available in which regions" check returns region display names) + region_names_to_id = {} + # retry the request up to two times + for _ in range(3): + try: + location_list_url = cmd.cli_ctx.cloud.endpoints.resource_manager + \ + f"/subscriptions/{cluster_subscription}/locations?api-version=2019-11-01" + r = send_raw_request(cmd.cli_ctx, "GET", location_list_url) + # this is required to fool the static analyzer. The else statement will only run if an exception + # is thrown, but flake8 will complain that e is undefined if we don't also define it here. + error = None + break + except AzCLIError as e: + error = e + else: + # This will run if the above for loop was not broken out of. This means all three requests failed + raise error + json_response = json.loads(r.text) + for region_data in json_response["value"]: + region_names_to_id[region_data["displayName"]] = region_data[ + "name" + ] + + dcr_url = cmd.cli_ctx.cloud.endpoints.resource_manager + \ + f"{dcr_resource_id}?api-version=2022-06-01" + # get existing tags on the container insights extension DCR if the customer added any + existing_tags = get_existing_container_insights_extension_dcr_tags( + cmd, dcr_url) + # get data collection settings + extensionSettings = {} + cistreams = ["Microsoft-ContainerInsights-Group-Default"] + if enable_high_log_scale_mode: + cistreams = ContainerInsightsStreams + if data_collection_settings is not None: + dataCollectionSettings = _get_data_collection_settings(data_collection_settings) + validate_data_collection_settings(dataCollectionSettings) + dataCollectionSettings.setdefault("enableContainerLogV2", True) + extensionSettings["dataCollectionSettings"] = dataCollectionSettings + cistreams = dataCollectionSettings["streams"] + else: + # If data_collection_settings is None, set default dataCollectionSettings + dataCollectionSettings = { + "enableContainerLogV2": True + } + extensionSettings["dataCollectionSettings"] = dataCollectionSettings + + if enable_high_log_scale_mode: + for i, v in enumerate(cistreams): + if v == "Microsoft-ContainerLogV2": + cistreams[i] = "Microsoft-ContainerLogV2-HighScale" + # create the DCR + dcr_creation_body_without_syslog = json.dumps( + { + "location": location, + "tags": existing_tags, + "kind": "Linux", + "properties": { + "dataSources": { + "extensions": [ + { + "name": "ContainerInsightsExtension", + "streams": cistreams, + "extensionName": "ContainerInsights", + "extensionSettings": extensionSettings, + } + ] + }, + "dataFlows": [ + { + "streams": cistreams, + "destinations": ["la-workspace"], + } + ], + "destinations": { + "logAnalytics": [ + { + "workspaceResourceId": workspace_resource_id, + "name": "la-workspace", + } + ] + }, + "dataCollectionEndpointId": ingestion_dce_resource_id + }, + } + ) + + dcr_creation_body_with_syslog = json.dumps( + { + "location": location, + "tags": existing_tags, + "kind": "Linux", + "properties": { + "dataSources": { + "syslog": [ + { + "streams": [ + "Microsoft-Syslog" + ], + "facilityNames": [ + "auth", + "authpriv", + "cron", + "daemon", + "mark", + "kern", + "local0", + "local1", + "local2", + "local3", + "local4", + "local5", + "local6", + "local7", + "lpr", + "mail", + "news", + "syslog", + "user", + "uucp" + ], + "logLevels": [ + "Debug", + "Info", + "Notice", + "Warning", + "Error", + "Critical", + "Alert", + "Emergency" + ], + "name": "sysLogsDataSource" + } + ], + "extensions": [ + { + "name": "ContainerInsightsExtension", + "streams": cistreams, + "extensionName": "ContainerInsights", + "extensionSettings": extensionSettings, + } + ] + }, + "dataFlows": [ + { + "streams": cistreams, + "destinations": ["la-workspace"], + }, + { + "streams": [ + "Microsoft-Syslog" + ], + "destinations": ["la-workspace"], + } + ], + "destinations": { + "logAnalytics": [ + { + "workspaceResourceId": workspace_resource_id, + "name": "la-workspace", + } + ] + }, + "dataCollectionEndpointId": ingestion_dce_resource_id + }, + } + ) + + resources = get_resources_client(cmd.cli_ctx, cluster_subscription) + for _ in range(3): + try: + if enable_syslog: + resources.begin_create_or_update_by_id( + dcr_resource_id, + "2022-06-01", + json.loads(dcr_creation_body_with_syslog) + ) + else: + resources.begin_create_or_update_by_id( + dcr_resource_id, + "2022-06-01", + json.loads(dcr_creation_body_without_syslog) + ) + error = None + break + except CLIError as e: + error = e + else: + raise error + + if create_dcra: + # only create or delete the association between the DCR and cluster + create_or_delete_dcr_association(cmd, cluster_region, remove_monitoring, cluster_resource_id, dcr_resource_id) + if is_use_ampls: + # associate config DCE to the cluster + create_dce_association(cmd, cluster_region, cluster_resource_id, config_dce_resource_id) + # link config DCE to AMPLS + create_ampls_scope(cmd, ampls_resource_id, configDataCollectionEndpointName, config_dce_resource_id) + # link workspace to AMPLS + create_ampls_scope(cmd, ampls_resource_id, workspace_name, workspace_resource_id) + # link ingest DCE to AMPLS + if enable_high_log_scale_mode: + create_ampls_scope(cmd, ampls_resource_id, ingestionDataCollectionEndpointName, ingestion_dce_resource_id) + + # pylint: disable=too-many-locals def store_acs_service_principal(subscription_id, client_secret, service_principal, file_name='acsServicePrincipal.json'): @@ -622,6 +1005,7 @@ def aks_create( bootstrap_container_registry_resource_id=None, # addons enable_addons=None, # pylint: disable=redefined-outer-name + enable_azure_monitor_logs=False, workspace_resource_id=None, enable_msi_auth_for_monitoring=True, enable_syslog=False, @@ -729,6 +1113,13 @@ def aks_create( enable_windows_recording_rules=False, # azure monitor profile - app monitoring enable_azure_monitor_app_monitoring=False, + # opentelemetry parameters + enable_opentelemetry_metrics=False, + opentelemetry_metrics_port=None, + disable_opentelemetry_metrics=False, + enable_opentelemetry_logs=False, + opentelemetry_logs_port=None, + disable_opentelemetry_logs=False, # metrics profile enable_cost_analysis=False, # AI toolchain operator @@ -871,6 +1262,14 @@ def aks_update( bootstrap_artifact_source=None, bootstrap_container_registry_resource_id=None, # addons + enable_azure_monitor_logs=False, + disable_azure_monitor_logs=False, + workspace_resource_id=None, + enable_msi_auth_for_monitoring=None, + enable_syslog=False, + data_collection_settings=None, + enable_high_log_scale_mode=False, + ampls_resource_id=None, enable_secret_rotation=False, disable_secret_rotation=False, rotation_poll_interval=None, @@ -927,6 +1326,13 @@ def aks_update( # azure monitor profile - app monitoring enable_azure_monitor_app_monitoring=False, disable_azure_monitor_app_monitoring=False, + # opentelemetry parameters + enable_opentelemetry_metrics=False, + opentelemetry_metrics_port=None, + disable_opentelemetry_metrics=False, + enable_opentelemetry_logs=False, + opentelemetry_logs_port=None, + disable_opentelemetry_logs=False, enable_vpa=False, disable_vpa=False, enable_optimized_addon_scaling=False, diff --git a/src/aks-preview/azext_aks_preview/managed_cluster_decorator.py b/src/aks-preview/azext_aks_preview/managed_cluster_decorator.py index 500376d5116..c2701d747f7 100644 --- a/src/aks-preview/azext_aks_preview/managed_cluster_decorator.py +++ b/src/aks-preview/azext_aks_preview/managed_cluster_decorator.py @@ -25,6 +25,7 @@ CONST_MANAGED_CLUSTER_SKU_TIER_FREE, CONST_MANAGED_CLUSTER_SKU_TIER_PREMIUM, CONST_MANAGED_CLUSTER_SKU_TIER_STANDARD, + CONST_MONITORING_ADDON_NAME_CAMELCASE, CONST_NETWORK_DATAPLANE_CILIUM, CONST_NETWORK_PLUGIN_AZURE, CONST_NETWORK_PLUGIN_MODE_OVERLAY, @@ -88,6 +89,9 @@ from azext_aks_preview.azuremonitormetrics.azuremonitorprofile import ( ensure_azure_monitor_profile_prerequisites, ) +from azext_aks_preview.custom import ( + ensure_container_insights_for_monitoring_preview, +) from azure.cli.command_modules.acs._client_factory import get_graph_client from azure.cli.command_modules.acs._consts import ( CONST_OUTBOUND_TYPE_LOAD_BALANCER, @@ -104,6 +108,9 @@ safe_lower, ) from azure.cli.command_modules.acs._validators import extract_comma_separated_string +from azure.cli.command_modules.acs.addonconfiguration import ( + sanitize_loganalytics_ws_resource_id, +) from azure.cli.command_modules.acs.managed_cluster_decorator import ( AKSManagedClusterContext, AKSManagedClusterCreateDecorator, @@ -222,6 +229,12 @@ def external_functions(self) -> SimpleNamespace: ] = perform_disable_azure_container_storage_v1 external_functions["perform_enable_azure_container_storage"] = perform_enable_azure_container_storage external_functions["perform_disable_azure_container_storage"] = perform_disable_azure_container_storage + external_functions["sanitize_loganalytics_ws_resource_id"] = sanitize_loganalytics_ws_resource_id + # Override base module function with preview version that uses REST API to avoid + # "Request Header Fields Too Large" errors + external_functions["ensure_container_insights_for_monitoring"] = ( + ensure_container_insights_for_monitoring_preview + ) self.__external_functions = SimpleNamespace(**external_functions) return self.__external_functions @@ -360,7 +373,26 @@ def get_enable_msi_auth_for_monitoring(self) -> Union[bool, None]: sku_name = self.get_sku_name() if sku_name == CONST_MANAGED_CLUSTER_SKU_NAME_AUTOMATIC: return True - return enable_msi_auth_for_monitoring + + # Check explicit settings + disable_msi_auth = self.raw_param.get("disable_msi_auth_for_monitoring") + enable_msi_auth = self.raw_param.get("enable_msi_auth_for_monitoring") + enable_azure_monitor_logs = self.raw_param.get("enable_azure_monitor_logs") + + # Process explicit settings and special cases + if disable_msi_auth: + result = False + elif enable_msi_auth: + result = True + elif enable_azure_monitor_logs: + result = True + elif enable_msi_auth_for_monitoring is False: + result = False + elif enable_msi_auth_for_monitoring is None and not disable_msi_auth and not enable_msi_auth: + result = True + else: + result = enable_msi_auth_for_monitoring + return result def _get_load_balancer_sku(self, enable_validation: bool = False) -> Union[str, None]: """Internal function to obtain the value of load_balancer_sku, default value is @@ -2228,6 +2260,320 @@ def get_disable_azure_monitor_app_monitoring(self) -> bool: """ return self._get_disable_azure_monitor_app_monitoring(enable_validation=True) + def _get_enable_azure_monitor_logs(self, enable_validation: bool = False) -> bool: + """Internal function to obtain the value of enable_azure_monitor_logs. + This function supports the option of enable_validation. When enabled, if both + enable_azure_monitor_logs and disable_azure_monitor_logs are specified, + raise a MutuallyExclusiveArgumentError. + :return: bool + """ + # Read the original value passed by the command. + enable_azure_monitor_logs = self.raw_param.get("enable_azure_monitor_logs") + if enable_validation and enable_azure_monitor_logs and self._get_disable_azure_monitor_logs(False): + raise MutuallyExclusiveArgumentError( + "Cannot specify --enable-azure-monitor-logs and --disable-azure-monitor-logs at the same time." + ) + + return enable_azure_monitor_logs + + def get_enable_azure_monitor_logs(self) -> bool: + """Obtain the value of enable_azure_monitor_logs. + If both enable_azure_monitor_logs and disable_azure_monitor_logs are specified, + raise a MutuallyExclusiveArgumentError. + :return: bool + """ + return self._get_enable_azure_monitor_logs(enable_validation=True) + + def _get_disable_azure_monitor_logs(self, enable_validation: bool = False) -> bool: + """Internal function to obtain the value of disable_azure_monitor_logs. + This function supports the option of enable_validation. When enabled, if both + enable_azure_monitor_logs and disable_azure_monitor_logs are specified, + raise a MutuallyExclusiveArgumentError. + :return: bool + """ + # Read the original value passed by the command. + disable_azure_monitor_logs = self.raw_param.get("disable_azure_monitor_logs") + if enable_validation and disable_azure_monitor_logs and self._get_enable_azure_monitor_logs(False): + raise MutuallyExclusiveArgumentError( + "Cannot specify --enable-azure-monitor-logs and --disable-azure-monitor-logs at the same time." + ) + return disable_azure_monitor_logs + + def get_disable_azure_monitor_logs(self) -> bool: + """Obtain the value of disable_azure_monitor_logs. + If both enable_azure_monitor_logs and disable_azure_monitor_logs are specified, + raise a MutuallyExclusiveArgumentError. + :return: bool + """ + return self._get_disable_azure_monitor_logs(enable_validation=True) + + # OpenTelemetry methods + def _get_enable_opentelemetry_metrics(self, enable_validation: bool = False) -> bool: + """Internal function to obtain the value of enable_opentelemetry_metrics. + This function supports the option of enable_validation. When enabled, if both + enable_opentelemetry_metrics and disable_opentelemetry_metrics are specified, + raise a MutuallyExclusiveArgumentError. + For update operations, also validates that Azure Monitor metrics is enabled + in the cluster's Azure Monitor profile. + :return: bool + """ + # Read the original value passed by the command. + enable_opentelemetry_metrics = self.raw_param.get("enable_opentelemetry_metrics") + + # This parameter does not need dynamic completion. + if enable_validation: + if enable_opentelemetry_metrics and self._get_disable_opentelemetry_metrics( + enable_validation=False): + raise MutuallyExclusiveArgumentError( + "Cannot specify --enable-opentelemetry-metrics and " + "--disable-opentelemetry-metrics at the same time." + ) + + # For update operations, validate that Azure Monitor metrics is enabled + # in the cluster's Azure Monitor profile + if (enable_opentelemetry_metrics and + self.decorator_mode == DecoratorMode.UPDATE and + self.mc): + # Check if Azure Monitor metrics is enabled via command parameters + azure_monitor_enabled_via_params = ( + self._get_enable_azure_monitor_metrics(enable_validation=False) or + self.raw_param.get("enable_azuremonitormetrics") + ) + + # Check if Azure Monitor metrics is already enabled in the cluster's Azure Monitor profile + azure_monitor_enabled_in_profile = ( + self.mc.azure_monitor_profile and + self.mc.azure_monitor_profile.metrics and + self.mc.azure_monitor_profile.metrics.enabled + ) + + if not azure_monitor_enabled_via_params and not azure_monitor_enabled_in_profile: + raise ArgumentUsageError( + "OpenTelemetry metrics requires Azure Monitor metrics to be enabled. " + "Azure Monitor metrics is not currently enabled in the cluster. " + "Please add --enable-azure-monitor-metrics to your command." + ) + return enable_opentelemetry_metrics if enable_opentelemetry_metrics is not None else False + + def get_enable_opentelemetry_metrics(self) -> bool: + """Obtain the value of enable_opentelemetry_metrics. + This function will verify the parameter by default. If both enable_opentelemetry_metrics and + disable_opentelemetry_metrics are specified, raise a MutuallyExclusiveArgumentError. + :return: bool + """ + return self._get_enable_opentelemetry_metrics(enable_validation=True) + + def _get_disable_opentelemetry_metrics(self, enable_validation: bool = False) -> bool: + """Internal function to obtain the value of disable_opentelemetry_metrics. + This function supports the option of enable_validation. When enabled, if both enable_opentelemetry_metrics and + disable_opentelemetry_metrics are specified, raise a MutuallyExclusiveArgumentError. + :return: bool + """ + # Read the original value passed by the command. + disable_opentelemetry_metrics = self.raw_param.get("disable_opentelemetry_metrics") + + if enable_validation: + if disable_opentelemetry_metrics and self._get_enable_opentelemetry_metrics( + enable_validation=False): + raise MutuallyExclusiveArgumentError( + "Cannot specify --enable-opentelemetry-metrics and " + "--disable-opentelemetry-metrics at the same time." + ) + return disable_opentelemetry_metrics if disable_opentelemetry_metrics is not None else False + + def get_disable_opentelemetry_metrics(self) -> bool: + """Obtain the value of disable_opentelemetry_metrics. + This function will verify the parameter by default. If both enable_opentelemetry_metrics and + disable_opentelemetry_metrics are specified, raise a MutuallyExclusiveArgumentError. + :return: bool + """ + return self._get_disable_opentelemetry_metrics(enable_validation=True) + + def get_opentelemetry_metrics_port(self) -> Union[int, None]: + """Obtain the value of opentelemetry_metrics_port. + :return: int or None + """ + opentelemetry_metrics_port = self.raw_param.get("opentelemetry_metrics_port") + + # Validate that port is only specified when OpenTelemetry metrics are enabled + if opentelemetry_metrics_port is not None: + # Validate that port is not negative + if opentelemetry_metrics_port < 0: + raise InvalidArgumentValueError( + "--opentelemetry-metrics-port must be a non-negative integer." + ) + # Check if disabling Azure Monitor metrics - port specification is invalid + if self.get_disable_azure_monitor_metrics(): + raise InvalidArgumentValueError( + "--opentelemetry-metrics-port cannot be specified when --disable-azure-monitor-metrics is used." + ) + + # For CREATE: --enable-opentelemetry-metrics must be explicitly specified + if self.decorator_mode == DecoratorMode.CREATE: + if not self.get_enable_opentelemetry_metrics(): + raise InvalidArgumentValueError( + "--opentelemetry-metrics-port can only be specified when " + "--enable-opentelemetry-metrics is also specified." + ) + # For UPDATE: allow if either explicitly enabling OR already enabled in cluster + elif self.decorator_mode == DecoratorMode.UPDATE: + explicitly_enabling = self.get_enable_opentelemetry_metrics() + already_enabled = ( + self.mc and + self.mc.azure_monitor_profile and + self.mc.azure_monitor_profile.app_monitoring and + self.mc.azure_monitor_profile.app_monitoring.open_telemetry_metrics and + self.mc.azure_monitor_profile.app_monitoring.open_telemetry_metrics.enabled + ) + if not explicitly_enabling and not already_enabled: + raise InvalidArgumentValueError( + "--opentelemetry-metrics-port can only be specified when " + "--enable-opentelemetry-metrics is also specified or " + "OpenTelemetry metrics are already enabled." + ) + + return opentelemetry_metrics_port + + def _get_enable_opentelemetry_logs(self, enable_validation: bool = False) -> bool: + """Internal function to obtain the value of enable_opentelemetry_logs. + This function supports the option of enable_validation. When enabled, if both enable_opentelemetry_logs and + disable_opentelemetry_logs are specified, raise a MutuallyExclusiveArgumentError. + For update operations, also validates that Azure Monitor logs is enabled in the cluster's Azure Monitor profile. + :return: bool + """ + # Read the original value passed by the command. + enable_opentelemetry_logs = self.raw_param.get("enable_opentelemetry_logs") + + # This parameter does not need dynamic completion. + if enable_validation: + if enable_opentelemetry_logs and self._get_disable_opentelemetry_logs( + enable_validation=False): + raise MutuallyExclusiveArgumentError( + "Cannot specify --enable-opentelemetry-logs and " + "--disable-opentelemetry-logs at the same time." + ) + + # For update operations, validate that Azure Monitor logs is enabled + # in the cluster's Azure Monitor profile OR being enabled in this command + if (enable_opentelemetry_logs and + self.decorator_mode == DecoratorMode.UPDATE and + self.mc): + # Check if Azure Monitor logs is being enabled in this command + enable_azure_monitor_logs_in_command = self.raw_param.get("enable_azure_monitor_logs") + + # Check if Azure Monitor logs is currently enabled in the cluster + # This can be in two places: + # 1. New API: azureMonitorProfile.containerInsights.enabled + # 2. Legacy: addonProfiles.omsagent.enabled (or omsAgent with camelCase) + addon_consts = self.get_addon_consts() + CONST_MONITORING_ADDON_NAME = addon_consts.get("CONST_MONITORING_ADDON_NAME") + + # Check new API location + container_insights_enabled = ( + self.mc.azure_monitor_profile and + self.mc.azure_monitor_profile.container_insights and + self.mc.azure_monitor_profile.container_insights.enabled + ) + + # Check legacy addon location (try both lowercase and camelCase) + monitoring_addon_enabled = False + if self.mc.addon_profiles: + # Try lowercase first (constant value) + if CONST_MONITORING_ADDON_NAME in self.mc.addon_profiles: + monitoring_addon_enabled = self.mc.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled + # Try camelCase variant (what Azure actually returns) + elif CONST_MONITORING_ADDON_NAME_CAMELCASE in self.mc.addon_profiles: + monitoring_addon_enabled = self.mc.addon_profiles[CONST_MONITORING_ADDON_NAME_CAMELCASE].enabled + + monitoring_addon_currently_enabled = container_insights_enabled or monitoring_addon_enabled + + # Allow OpenTelemetry logs if monitoring is either: + # 1. Currently enabled in the cluster (via new API or legacy addon), OR + # 2. Being enabled in this command + if not (monitoring_addon_currently_enabled or enable_azure_monitor_logs_in_command): + raise ArgumentUsageError( + "OpenTelemetry logs requires Azure Monitor logs (monitoring addon) to be enabled. " + "Add --enable-azure-monitor-logs to your command." + ) + return enable_opentelemetry_logs if enable_opentelemetry_logs is not None else False + + def get_enable_opentelemetry_logs(self) -> bool: + """Obtain the value of enable_opentelemetry_logs. + This function will verify the parameter by default. If both enable_opentelemetry_logs and + disable_opentelemetry_logs are specified, raise a MutuallyExclusiveArgumentError. + :return: bool + """ + return self._get_enable_opentelemetry_logs(enable_validation=True) + + def _get_disable_opentelemetry_logs(self, enable_validation: bool = False) -> bool: + """Internal function to obtain the value of disable_opentelemetry_logs. + This function supports the option of enable_validation. When enabled, if both enable_opentelemetry_logs and + disable_opentelemetry_logs are specified, raise a MutuallyExclusiveArgumentError. + :return: bool + """ + # Read the original value passed by the command. + disable_opentelemetry_logs = self.raw_param.get("disable_opentelemetry_logs") + + if enable_validation: + if disable_opentelemetry_logs and self._get_enable_opentelemetry_logs(enable_validation=False): + raise MutuallyExclusiveArgumentError( + "Cannot specify --enable-opentelemetry-logs and --disable-opentelemetry-logs at the same time." + ) + return disable_opentelemetry_logs if disable_opentelemetry_logs is not None else False + + def get_disable_opentelemetry_logs(self) -> bool: + """Obtain the value of disable_opentelemetry_logs. + This function will verify the parameter by default. If both enable_opentelemetry_logs and + disable_opentelemetry_logs are specified, raise a MutuallyExclusiveArgumentError. + :return: bool + """ + return self._get_disable_opentelemetry_logs(enable_validation=True) + + def get_opentelemetry_logs_port(self) -> Union[int, None]: + """Obtain the value of opentelemetry_logs_port. + :return: int or None + """ + opentelemetry_logs_port = self.raw_param.get("opentelemetry_logs_port") + + # Validate that port is only specified when OpenTelemetry logs are enabled + if opentelemetry_logs_port is not None: + # Validate that port is not negative + if opentelemetry_logs_port < 0: + raise InvalidArgumentValueError( + "--opentelemetry-logs-port must be a non-negative integer." + ) + # Check if disabling Azure Monitor logs - port specification is invalid + if self.get_disable_azure_monitor_logs(): + raise InvalidArgumentValueError( + "--opentelemetry-logs-port cannot be specified when --disable-azure-monitor-logs is used." + ) + + # For CREATE: --enable-opentelemetry-logs must be explicitly specified + if self.decorator_mode == DecoratorMode.CREATE: + if not self.get_enable_opentelemetry_logs(): + raise InvalidArgumentValueError( + "--opentelemetry-logs-port can only be specified when " + "--enable-opentelemetry-logs is also specified." + ) + # For UPDATE: allow if either explicitly enabling OR already enabled in cluster + elif self.decorator_mode == DecoratorMode.UPDATE: + explicitly_enabling = self.get_enable_opentelemetry_logs() + already_enabled = ( + self.mc and + self.mc.azure_monitor_profile and + self.mc.azure_monitor_profile.app_monitoring and + self.mc.azure_monitor_profile.app_monitoring.open_telemetry_logs and + self.mc.azure_monitor_profile.app_monitoring.open_telemetry_logs.enabled + ) + if not explicitly_enabling and not already_enabled: + raise InvalidArgumentValueError( + "--opentelemetry-logs-port can only be specified when " + "--enable-opentelemetry-logs is also specified or " + "OpenTelemetry logs are already enabled." + ) + + return opentelemetry_logs_port + def _get_enable_vpa(self, enable_validation: bool = False) -> bool: """Internal function to obtain the value of enable_vpa. This function supports the option of enable_vpa. When enabled, if both enable_vpa and enable_vpa are @@ -3256,6 +3602,11 @@ def set_up_addon_profiles(self, mc: ManagedCluster) -> ManagedCluster: CONST_GITOPS_ADDON_NAME = addon_consts.get("CONST_GITOPS_ADDON_NAME") mc = super().set_up_addon_profiles(mc) + + # Handle enable Azure Monitor logs directly + if self.context.get_enable_azure_monitor_logs(): + self._setup_azure_monitor_logs(mc) + addon_profiles = mc.addon_profiles addons = self.context.get_enable_addons() if "gitops" in addons: @@ -3504,45 +3855,176 @@ def set_up_node_resource_group_profile(self, mc: ManagedCluster) -> ManagedClust mc.node_resource_group_profile = node_resource_group_profile return mc + def _ensure_azure_monitor_profile(self, mc: ManagedCluster) -> None: + """Ensure azure monitor profile exists on the managed cluster.""" + if mc.azure_monitor_profile is None: + mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile() + + def _ensure_app_monitoring_profile(self, mc: ManagedCluster) -> None: + """Ensure app monitoring profile exists on the managed cluster.""" + self._ensure_azure_monitor_profile(mc) + if mc.azure_monitor_profile.app_monitoring is None: + mc.azure_monitor_profile.app_monitoring = ( + self.models.ManagedClusterAzureMonitorProfileAppMonitoring() + ) + + def _setup_azure_monitor_metrics(self, mc: ManagedCluster) -> None: + """Set up Azure Monitor metrics configuration.""" + ksm_metric_labels_allow_list = self.context.raw_param.get("ksm_metric_labels_allow_list", "") + ksm_metric_annotations_allow_list = self.context.raw_param.get("ksm_metric_annotations_allow_list", "") + + self._ensure_azure_monitor_profile(mc) + mc.azure_monitor_profile.metrics = ( + self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=True) + ) + + kube_state_metrics = ( + self.models.ManagedClusterAzureMonitorProfileKubeStateMetrics( + metric_labels_allowlist=str(ksm_metric_labels_allow_list), + metric_annotations_allow_list=str(ksm_metric_annotations_allow_list) + )) + mc.azure_monitor_profile.metrics.kube_state_metrics = kube_state_metrics + self.context.set_intermediate("azuremonitormetrics_addon_enabled", True, overwrite_exists=True) + + def _setup_azure_monitor_app_monitoring(self, mc: ManagedCluster) -> None: + """Set up Azure Monitor app monitoring configuration.""" + self._ensure_azure_monitor_profile(mc) + mc.azure_monitor_profile.app_monitoring = ( + self.models.ManagedClusterAzureMonitorProfileAppMonitoring() + ) + mc.azure_monitor_profile.app_monitoring.auto_instrumentation = ( + self.models.ManagedClusterAzureMonitorProfileAppMonitoringAutoInstrumentation(enabled=True) + ) + + def _setup_azure_monitor_logs(self, mc: ManagedCluster) -> None: + """Set up Azure Monitor logs configuration.""" + + addon_consts = self.context.get_addon_consts() + + if mc.addon_profiles is None: + mc.addon_profiles = {} + + CONST_MONITORING_ADDON_NAME = addon_consts.get("CONST_MONITORING_ADDON_NAME") + addon_profile = mc.addon_profiles.get( + CONST_MONITORING_ADDON_NAME, + self.models.ManagedClusterAddonProfile(enabled=False)) + addon_profile.enabled = True + + # Get or create workspace resource ID + workspace_resource_id = self.context.raw_param.get("workspace_resource_id") + if not workspace_resource_id: + ensure_workspace_func = ( + self.context.external_functions.ensure_default_log_analytics_workspace_for_monitoring) + workspace_resource_id = ensure_workspace_func( + self.cmd, + self.context.get_subscription_id(), + self.context.get_resource_group_name() + ) + + # Sanitize and configure + sanitize_func = self.context.external_functions.sanitize_loganalytics_ws_resource_id + workspace_resource_id = sanitize_func(workspace_resource_id) + + CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID = addon_consts.get( + "CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID") + CONST_MONITORING_USING_AAD_MSI_AUTH = addon_consts.get("CONST_MONITORING_USING_AAD_MSI_AUTH") + + # Get MSI auth setting using the same logic as update decorator + enable_msi_auth_bool = self.context.get_enable_msi_auth_for_monitoring() + + if enable_msi_auth_bool: + enable_msi_auth = "true" + else: + enable_msi_auth = "false" + + # Create completely new config + addon_profile.config = { + CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id, + CONST_MONITORING_USING_AAD_MSI_AUTH: enable_msi_auth + } + mc.addon_profiles[CONST_MONITORING_ADDON_NAME] = addon_profile + + self.context.set_intermediate("monitoring_addon_enabled", True, overwrite_exists=True) + + # Call ensure_container_insights_for_monitoring with all parameters (similar to postprocessing) + CONST_MONITORING_ADDON_NAME = addon_consts.get("CONST_MONITORING_ADDON_NAME") + if (mc.addon_profiles and + CONST_MONITORING_ADDON_NAME in mc.addon_profiles and + mc.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled): + + # Set intermediate value to trigger postprocessing + self.context.set_intermediate("monitoring_addon_postprocessing_required", True, overwrite_exists=True) + + def _setup_opentelemetry_metrics(self, mc: ManagedCluster) -> None: + """Set up OpenTelemetry metrics configuration.""" + self._ensure_app_monitoring_profile(mc) + + otlp_metrics_config = ( + self.models.ManagedClusterAzureMonitorProfileAppMonitoringOpenTelemetryMetrics(enabled=True)) + metrics_port = self.context.get_opentelemetry_metrics_port() + if metrics_port: + otlp_metrics_config.port = metrics_port + + mc.azure_monitor_profile.app_monitoring.open_telemetry_metrics = otlp_metrics_config + + def _disable_opentelemetry_metrics(self, mc: ManagedCluster) -> None: + """Disable OpenTelemetry metrics configuration.""" + self._ensure_app_monitoring_profile(mc) + if mc.azure_monitor_profile.app_monitoring.open_telemetry_metrics is None: + mc.azure_monitor_profile.app_monitoring.open_telemetry_metrics = ( + self.models.ManagedClusterAzureMonitorProfileAppMonitoringOpenTelemetryMetrics(enabled=False)) + else: + mc.azure_monitor_profile.app_monitoring.open_telemetry_metrics.enabled = False + # Clear the port when disabling OpenTelemetry metrics + mc.azure_monitor_profile.app_monitoring.open_telemetry_metrics.port = None + + def _setup_opentelemetry_logs(self, mc: ManagedCluster) -> None: + """Set up OpenTelemetry logs configuration.""" + self._ensure_app_monitoring_profile(mc) + + otlp_logs_config = self.models.ManagedClusterAzureMonitorProfileAppMonitoringOpenTelemetryLogs(enabled=True) + logs_port = self.context.get_opentelemetry_logs_port() + if logs_port: + otlp_logs_config.port = logs_port + + mc.azure_monitor_profile.app_monitoring.open_telemetry_logs = otlp_logs_config + + def _disable_opentelemetry_logs(self, mc: ManagedCluster) -> None: + """Disable OpenTelemetry logs configuration.""" + if (mc.azure_monitor_profile is not None and + mc.azure_monitor_profile.app_monitoring is not None): + if mc.azure_monitor_profile.app_monitoring.open_telemetry_logs is None: + mc.azure_monitor_profile.app_monitoring.open_telemetry_logs = ( + self.models.ManagedClusterAzureMonitorProfileAppMonitoringOpenTelemetryLogs(enabled=False) + ) + else: + mc.azure_monitor_profile.app_monitoring.open_telemetry_logs.enabled = False + # Clear the port when disabling OpenTelemetry logs + mc.azure_monitor_profile.app_monitoring.open_telemetry_logs.port = None + def set_up_azure_monitor_profile(self, mc: ManagedCluster) -> ManagedCluster: """Set up azure monitor profile for the ManagedCluster object. :return: the ManagedCluster object """ self._ensure_mc(mc) - # read the original value passed by the command - ksm_metric_labels_allow_list = self.context.raw_param.get("ksm_metric_labels_allow_list") - ksm_metric_annotations_allow_list = self.context.raw_param.get("ksm_metric_annotations_allow_list") - if ksm_metric_labels_allow_list is None: - ksm_metric_labels_allow_list = "" - if ksm_metric_annotations_allow_list is None: - ksm_metric_annotations_allow_list = "" + if self.context.get_enable_azure_monitor_metrics(): - if mc.azure_monitor_profile is None: - mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile() # pylint: disable=no-member - mc.azure_monitor_profile.metrics = ( - self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=False) # pylint: disable=no-member - ) - # pylint: disable=line-too-long, no-member - mc.azure_monitor_profile.metrics.kube_state_metrics = self.models.ManagedClusterAzureMonitorProfileKubeStateMetrics( - metric_labels_allowlist=str(ksm_metric_labels_allow_list), - metric_annotations_allow_list=str(ksm_metric_annotations_allow_list)) - self.context.set_intermediate("azuremonitormetrics_addon_enabled", True, overwrite_exists=True) + self._setup_azure_monitor_metrics(mc) if self.context.get_enable_azure_monitor_app_monitoring(): - if mc.azure_monitor_profile is None: - mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile() - mc.azure_monitor_profile.app_monitoring = ( - self.models.ManagedClusterAzureMonitorProfileAppMonitoring() - ) - mc.azure_monitor_profile.app_monitoring.auto_instrumentation = ( - self.models.ManagedClusterAzureMonitorProfileAppMonitoringAutoInstrumentation(enabled=True) - ) - mc.azure_monitor_profile.app_monitoring.open_telemetry_metrics = ( - self.models.ManagedClusterAzureMonitorProfileAppMonitoringOpenTelemetryMetrics(enabled=True) - ) - mc.azure_monitor_profile.app_monitoring.open_telemetry_logs = ( - self.models.ManagedClusterAzureMonitorProfileAppMonitoringOpenTelemetryLogs(enabled=True) - ) + self._setup_azure_monitor_app_monitoring(mc) + + if self.context.get_enable_opentelemetry_metrics(): + self._setup_opentelemetry_metrics(mc) + + if self.context.get_disable_opentelemetry_metrics(): + self._disable_opentelemetry_metrics(mc) + + if self.context.get_enable_opentelemetry_logs(): + self._setup_opentelemetry_logs(mc) + + if self.context.get_disable_opentelemetry_logs(): + self._disable_opentelemetry_logs(mc) return mc @@ -4049,7 +4531,7 @@ def immediate_processing_after_request(self, mc: ManagedCluster) -> None: "Could not create a role assignment for subnet. Are you an Owner on this subscription?" ) - # pylint: disable=too-many-locals + # pylint: disable=too-many-locals,too-many-branches def postprocessing_after_mc_created(self, cluster: ManagedCluster) -> None: """Postprocessing performed after the cluster is created. @@ -4074,7 +4556,8 @@ def postprocessing_after_mc_created(self, cluster: ManagedCluster) -> None: self.context.external_functions.add_monitoring_role_assignment( cluster, cluster_resource_id, self.cmd ) - elif self.context.raw_param.get("enable_addons") is not None: + elif (self.context.raw_param.get("enable_addons") is not None or + self.context.raw_param.get("enable-azure-monitor-logs") is not None): # Create the DCR Association here addon_consts = self.context.get_addon_consts() CONST_MONITORING_ADDON_NAME = addon_consts.get("CONST_MONITORING_ADDON_NAME") @@ -4096,6 +4579,46 @@ def postprocessing_after_mc_created(self, cluster: ManagedCluster) -> None: enable_high_log_scale_mode=self.context.get_enable_high_log_scale_mode(), ) + # Handle monitoring addon postprocessing (disable case) - same logic as aks_disable_addons + monitoring_addon_disable_postprocessing_required = self.context.get_intermediate( + "monitoring_addon_disable_postprocessing_required", default_value=False + ) + + if monitoring_addon_disable_postprocessing_required: + addon_consts = self.context.get_addon_consts() + CONST_MONITORING_ADDON_NAME = addon_consts.get("CONST_MONITORING_ADDON_NAME") + + # Get the current cluster state to check config before it was disabled + current_cluster = self.client.get(self.context.get_resource_group_name(), self.context.get_name()) + + if (current_cluster.addon_profiles and + CONST_MONITORING_ADDON_NAME in current_cluster.addon_profiles): + + # Use the current cluster addon profile for cleanup + addon_profile = current_cluster.addon_profiles[CONST_MONITORING_ADDON_NAME] + + # Call ensure_container_insights_for_monitoring with remove_monitoring=True (same as aks_disable_addons) + try: + self.context.external_functions.ensure_container_insights_for_monitoring( + self.cmd, + addon_profile, + self.context.get_subscription_id(), + self.context.get_resource_group_name(), + self.context.get_name(), + self.context.get_location(), + remove_monitoring=True, + aad_route=True, + create_dcr=False, + create_dcra=True, + enable_syslog=False, + data_collection_settings=None, + ampls_resource_id=None, + enable_high_log_scale_mode=False + ) + except TypeError: + # Ignore TypeError just like aks_disable_addons does + pass + # ingress appgw addon ingress_appgw_addon_enabled = self.context.get_intermediate("ingress_appgw_addon_enabled", default_value=False) if ingress_appgw_addon_enabled: @@ -5267,49 +5790,145 @@ def update_azure_monitor_profile(self, mc: ManagedCluster) -> ManagedCluster: ) ) + # Handle disable Azure Monitor metrics if self.context.get_disable_azure_monitor_metrics(): - if mc.azure_monitor_profile is None: - mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile() # pylint: disable=no-member - mc.azure_monitor_profile.metrics = ( - self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=False) # pylint: disable=no-member - ) + self._disable_azure_monitor_metrics(mc) if self.context.get_enable_azure_monitor_app_monitoring(): if mc.azure_monitor_profile is None: mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile() - mc.azure_monitor_profile.app_monitoring = self.models.ManagedClusterAzureMonitorProfileAppMonitoring() + + # Preserve existing app_monitoring configuration if it exists + if mc.azure_monitor_profile.app_monitoring is None: + mc.azure_monitor_profile.app_monitoring = ( + self.models.ManagedClusterAzureMonitorProfileAppMonitoring()) + + # Only enable auto instrumentation, preserve OpenTelemetry settings mc.azure_monitor_profile.app_monitoring.auto_instrumentation = ( self.models.ManagedClusterAzureMonitorProfileAppMonitoringAutoInstrumentation(enabled=True) ) - mc.azure_monitor_profile.app_monitoring.open_telemetry_metrics = ( + + # Handle OpenTelemetry metrics updates - these work within the Azure Monitor App Monitoring context + if self.context.get_enable_opentelemetry_metrics(): + if mc.azure_monitor_profile is None: + mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile() + if mc.azure_monitor_profile.app_monitoring is None: + mc.azure_monitor_profile.app_monitoring = ( + self.models.ManagedClusterAzureMonitorProfileAppMonitoring() + ) + + # Configure OpenTelemetry metrics with custom port if provided + otlp_metrics_config = ( self.models.ManagedClusterAzureMonitorProfileAppMonitoringOpenTelemetryMetrics(enabled=True) ) - mc.azure_monitor_profile.app_monitoring.open_telemetry_logs = ( - self.models.ManagedClusterAzureMonitorProfileAppMonitoringOpenTelemetryLogs(enabled=True) - ) + if self.context.get_opentelemetry_metrics_port(): + otlp_metrics_config.port = self.context.get_opentelemetry_metrics_port() + + mc.azure_monitor_profile.app_monitoring.open_telemetry_metrics = otlp_metrics_config + + # Handle OpenTelemetry logs updates - these work within the Azure Monitor App Monitoring context + if self.context.get_enable_opentelemetry_logs(): + if mc.azure_monitor_profile is None: + mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile() + if mc.azure_monitor_profile.app_monitoring is None: + mc.azure_monitor_profile.app_monitoring = ( + self.models.ManagedClusterAzureMonitorProfileAppMonitoring() + ) + + # Configure OpenTelemetry logs with custom port if provided + otlp_logs_config = self.models.ManagedClusterAzureMonitorProfileAppMonitoringOpenTelemetryLogs(enabled=True) + if self.context.get_opentelemetry_logs_port(): + otlp_logs_config.port = self.context.get_opentelemetry_logs_port() + + mc.azure_monitor_profile.app_monitoring.open_telemetry_logs = otlp_logs_config if self.context.get_disable_azure_monitor_app_monitoring(): if mc.azure_monitor_profile is None: mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile() - mc.azure_monitor_profile.app_monitoring = self.models.ManagedClusterAzureMonitorProfileAppMonitoring() + + # Preserve existing app_monitoring configuration if it exists + if mc.azure_monitor_profile.app_monitoring is None: + mc.azure_monitor_profile.app_monitoring = ( + self.models.ManagedClusterAzureMonitorProfileAppMonitoring()) + + # Only disable auto instrumentation, preserve OpenTelemetry settings mc.azure_monitor_profile.app_monitoring.auto_instrumentation = ( self.models.ManagedClusterAzureMonitorProfileAppMonitoringAutoInstrumentation(enabled=False) ) - mc.azure_monitor_profile.app_monitoring.open_telemetry_metrics = ( - self.models.ManagedClusterAzureMonitorProfileAppMonitoringOpenTelemetryMetrics(enabled=False) - ) - mc.azure_monitor_profile.app_monitoring.open_telemetry_logs = ( - self.models.ManagedClusterAzureMonitorProfileAppMonitoringOpenTelemetryLogs(enabled=False) - ) + + # Handle disable OpenTelemetry metrics updates + if self.context.get_disable_opentelemetry_metrics(): + if mc.azure_monitor_profile is None: + mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile() + if mc.azure_monitor_profile.app_monitoring is None: + mc.azure_monitor_profile.app_monitoring = ( + self.models.ManagedClusterAzureMonitorProfileAppMonitoring() + ) + + # Create or update the metrics config, setting enabled=False and clearing the port + if mc.azure_monitor_profile.app_monitoring.open_telemetry_metrics is None: + mc.azure_monitor_profile.app_monitoring.open_telemetry_metrics = ( + self.models.ManagedClusterAzureMonitorProfileAppMonitoringOpenTelemetryMetrics(enabled=False) + ) + else: + mc.azure_monitor_profile.app_monitoring.open_telemetry_metrics.enabled = False + mc.azure_monitor_profile.app_monitoring.open_telemetry_metrics.port = None + + # Handle disable OpenTelemetry logs updates + if self.context.get_disable_opentelemetry_logs(): + if mc.azure_monitor_profile is None: + mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile() + if mc.azure_monitor_profile.app_monitoring is None: + mc.azure_monitor_profile.app_monitoring = ( + self.models.ManagedClusterAzureMonitorProfileAppMonitoring() + ) + + # Create or update the logs config, setting enabled=False and clearing the port + if mc.azure_monitor_profile.app_monitoring.open_telemetry_logs is None: + mc.azure_monitor_profile.app_monitoring.open_telemetry_logs = ( + self.models.ManagedClusterAzureMonitorProfileAppMonitoringOpenTelemetryLogs(enabled=False) + ) + else: + mc.azure_monitor_profile.app_monitoring.open_telemetry_logs.enabled = False + mc.azure_monitor_profile.app_monitoring.open_telemetry_logs.port = None + + # Handle standalone port updates for OpenTelemetry metrics + if (self.context.get_opentelemetry_metrics_port() and + not self.context.get_enable_opentelemetry_metrics() and + not self.context.get_disable_opentelemetry_metrics()): + # Only update port if OpenTelemetry metrics is already enabled and we're not changing the enabled state + if (mc.azure_monitor_profile and + mc.azure_monitor_profile.app_monitoring and + mc.azure_monitor_profile.app_monitoring.open_telemetry_metrics and + mc.azure_monitor_profile.app_monitoring.open_telemetry_metrics.enabled): + metrics_port = self.context.get_opentelemetry_metrics_port() + mc.azure_monitor_profile.app_monitoring.open_telemetry_metrics.port = metrics_port + + # Handle standalone port updates for OpenTelemetry logs + if (self.context.get_opentelemetry_logs_port() and + not self.context.get_enable_opentelemetry_logs() and + not self.context.get_disable_opentelemetry_logs()): + # Only update port if OpenTelemetry logs is already enabled and we're not changing the enabled state + if (mc.azure_monitor_profile and + mc.azure_monitor_profile.app_monitoring and + mc.azure_monitor_profile.app_monitoring.open_telemetry_logs and + mc.azure_monitor_profile.app_monitoring.open_telemetry_logs.enabled): + logs_port = self.context.get_opentelemetry_logs_port() + mc.azure_monitor_profile.app_monitoring.open_telemetry_logs.port = logs_port # TODO: should remove get value from enable_azuremonitormetrics once the option is removed # TODO: should remove get value from disable_azuremonitormetrics once the option is removed - if ( - self.context.raw_param.get("enable_azure_monitor_metrics") or - self.context.raw_param.get("enable_azuremonitormetrics") or - self.context.raw_param.get("disable_azure_monitor_metrics") or - self.context.raw_param.get("disable_azuremonitormetrics") - ): + azure_monitor_metrics = (self.context.raw_param.get("enable_azuremonitormetrics") or + self.context.get_enable_azure_monitor_metrics() or + self.context.raw_param.get("disable_azuremonitormetrics") or + self.context.get_disable_azure_monitor_metrics()) + opentelemetry = (self.context.raw_param.get("enable_opentelemetry_metrics") or + self.context.raw_param.get("enable_opentelemetry_logs") or + self.context.raw_param.get("disable_opentelemetry_metrics") or + self.context.raw_param.get("disable_opentelemetry_logs") or + self.context.get_opentelemetry_metrics_port() or + self.context.get_opentelemetry_logs_port()) + if azure_monitor_metrics or opentelemetry: ensure_azure_monitor_profile_prerequisites( self.cmd, self.context.get_subscription_id(), @@ -5965,6 +6584,228 @@ def update_upstream_kubescheduler_user_configuration(self, mc: ManagedCluster) - return mc + def _ensure_azure_monitor_profile(self, mc: ManagedCluster) -> None: + """Ensure azure monitor profile exists on the managed cluster.""" + if mc.azure_monitor_profile is None: + mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile() + + def _setup_azure_monitor_logs(self, mc: ManagedCluster) -> None: + """Set up Azure Monitor logs configuration.""" + + addon_consts = self.context.get_addon_consts() + if mc.addon_profiles is None: + mc.addon_profiles = {} + + CONST_MONITORING_ADDON_NAME = addon_consts.get("CONST_MONITORING_ADDON_NAME") + + # Detect existing key (could be "omsagent" or "omsAgent" from Azure API) + existing_key = None + if CONST_MONITORING_ADDON_NAME in mc.addon_profiles: + existing_key = CONST_MONITORING_ADDON_NAME + elif CONST_MONITORING_ADDON_NAME_CAMELCASE in mc.addon_profiles: + existing_key = CONST_MONITORING_ADDON_NAME_CAMELCASE + + if existing_key: + addon_profile = mc.addon_profiles[existing_key] + else: + addon_profile = self.models.ManagedClusterAddonProfile(enabled=False) + existing_key = CONST_MONITORING_ADDON_NAME + + addon_profile.enabled = True + + # Get or create workspace resource ID + workspace_resource_id = self.context.raw_param.get("workspace_resource_id") + if not workspace_resource_id: + ensure_workspace_func = ( + self.context.external_functions.ensure_default_log_analytics_workspace_for_monitoring) + workspace_resource_id = ensure_workspace_func( + self.cmd, + self.context.get_subscription_id(), + self.context.get_resource_group_name() + ) + + # Sanitize and configure + sanitize_func = self.context.external_functions.sanitize_loganalytics_ws_resource_id + workspace_resource_id = sanitize_func(workspace_resource_id) + + CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID = addon_consts.get( + "CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID") + CONST_MONITORING_USING_AAD_MSI_AUTH = addon_consts.get("CONST_MONITORING_USING_AAD_MSI_AUTH") + + enable_msi_auth_bool = self.context.get_enable_msi_auth_for_monitoring() + if enable_msi_auth_bool: + enable_msi_auth = "true" + else: + enable_msi_auth = "false" + + new_config = { + CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id, + CONST_MONITORING_USING_AAD_MSI_AUTH: enable_msi_auth + } + + # Replace the entire config, not just individual keys + addon_profile.config = new_config + + mc.addon_profiles[existing_key] = addon_profile + self.context.set_intermediate("monitoring_addon_enabled", True, overwrite_exists=True) + # Call ensure_container_insights_for_monitoring with all parameters (similar to postprocessing) + if (mc.addon_profiles and + existing_key in mc.addon_profiles and + mc.addon_profiles[existing_key].enabled): + + # Set intermediate value to trigger postprocessing + self.context.set_intermediate("monitoring_addon_postprocessing_required", True, overwrite_exists=True) + + def _disable_azure_monitor_logs(self, mc: ManagedCluster) -> None: + """Disable Azure Monitor logs configuration.""" + addon_consts = self.context.get_addon_consts() + CONST_MONITORING_ADDON_NAME = addon_consts.get("CONST_MONITORING_ADDON_NAME") + CONST_MONITORING_USING_AAD_MSI_AUTH = addon_consts.get("CONST_MONITORING_USING_AAD_MSI_AUTH") + + # Check if the addon profile exists (check both lowercase and camelCase) + addon_key = None + if mc.addon_profiles: + if CONST_MONITORING_ADDON_NAME in mc.addon_profiles: + addon_key = CONST_MONITORING_ADDON_NAME + elif CONST_MONITORING_ADDON_NAME_CAMELCASE in mc.addon_profiles: + addon_key = CONST_MONITORING_ADDON_NAME_CAMELCASE + + # If the addon profile doesn't exist at all, there's nothing to disable + if not addon_key: + return + + # Check if Azure Monitor logs (monitoring addon) is currently enabled + azure_monitor_logs_enabled = mc.addon_profiles[addon_key].enabled + + # Check if OpenTelemetry logs are enabled and prompt for confirmation + opentelemetry_logs_enabled = ( + mc.azure_monitor_profile and + mc.azure_monitor_profile.app_monitoring and + mc.azure_monitor_profile.app_monitoring.open_telemetry_logs and + mc.azure_monitor_profile.app_monitoring.open_telemetry_logs.enabled + ) + + if opentelemetry_logs_enabled and not self.context.get_yes(): + msg = ( + "Disabling Azure Monitor logs will also disable OpenTelemetry logs. " + "Do you want to continue?" + ) + if not prompt_y_n(msg, default="n"): + raise CLIError("Operation cancelled.") + + # Check if MSI auth is enabled - if so, cleanup DCR/DCRA BEFORE disabling (same as aks_disable_addons) + addon_config = mc.addon_profiles[addon_key].config + has_msi_auth_key = addon_config and CONST_MONITORING_USING_AAD_MSI_AUTH in addon_config + msi_auth_enabled = (addon_config and has_msi_auth_key and + str(addon_config[CONST_MONITORING_USING_AAD_MSI_AUTH]).lower() == "true") + + # Perform DCR/DCRA cleanup BEFORE disabling (same as aks_disable_addons lines 2796-2822) + if azure_monitor_logs_enabled and msi_auth_enabled: + # Fetch the current cluster state from Azure (same as aks_disable_addons line 2791) + current_cluster = self.client.get(self.context.get_resource_group_name(), self.context.get_name()) + + # Find the addon key in current_cluster (it may have different casing) + current_addon_key = None + if current_cluster.addon_profiles: + if CONST_MONITORING_ADDON_NAME in current_cluster.addon_profiles: + current_addon_key = CONST_MONITORING_ADDON_NAME + elif CONST_MONITORING_ADDON_NAME_CAMELCASE in current_cluster.addon_profiles: + current_addon_key = CONST_MONITORING_ADDON_NAME_CAMELCASE + + if current_addon_key: + try: + # Use the current cluster's addon profile for cleanup (not the modified mc object) + self.context.external_functions.ensure_container_insights_for_monitoring( + self.cmd, + current_cluster.addon_profiles[current_addon_key], + self.context.get_subscription_id(), + self.context.get_resource_group_name(), + self.context.get_name(), + current_cluster.location, + remove_monitoring=True, + aad_route=True, + create_dcr=False, + create_dcra=True, + enable_syslog=False, + data_collection_settings=None, + ampls_resource_id=None, + enable_high_log_scale_mode=False + ) + except TypeError: + # Ignore TypeError just like aks_disable_addons does (line 2823) + pass + + # Now disable the addon and clear configuration + mc.addon_profiles[addon_key].enabled = False + + # Clear the config to remove old workspace resource ID and other settings + mc.addon_profiles[addon_key].config = None + + # Also disable OpenTelemetry logs when disabling Azure Monitor logs + if opentelemetry_logs_enabled: + mc.azure_monitor_profile.app_monitoring.open_telemetry_logs.enabled = False + # Clear the port when disabling OpenTelemetry logs + mc.azure_monitor_profile.app_monitoring.open_telemetry_logs.port = None + + def _disable_azure_monitor_metrics(self, mc: ManagedCluster) -> None: + """Disable Azure Monitor metrics configuration.""" + # Check if Azure Monitor metrics are currently enabled + azure_monitor_metrics_enabled = ( + mc.azure_monitor_profile and + mc.azure_monitor_profile.metrics and + mc.azure_monitor_profile.metrics.enabled + ) + + # If Azure Monitor metrics are not enabled, there's nothing to disable + if not azure_monitor_metrics_enabled: + return + + # Check if OpenTelemetry metrics are enabled and prompt for confirmation + opentelemetry_metrics_enabled = ( + mc.azure_monitor_profile and + mc.azure_monitor_profile.app_monitoring and + mc.azure_monitor_profile.app_monitoring.open_telemetry_metrics and + mc.azure_monitor_profile.app_monitoring.open_telemetry_metrics.enabled + ) + + if opentelemetry_metrics_enabled and not self.context.get_yes(): + msg = ( + "Disabling Azure Monitor metrics will also disable OpenTelemetry metrics. " + "Do you want to continue?" + ) + if not prompt_y_n(msg, default="n"): + raise CLIError("Operation cancelled.") + + # Disable Azure Monitor metrics + if mc.azure_monitor_profile is None: + mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile() # pylint: disable=no-member + mc.azure_monitor_profile.metrics = ( + self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=False) # pylint: disable=no-member + ) + + # Also disable OpenTelemetry metrics when disabling Azure Monitor metrics + if opentelemetry_metrics_enabled: + mc.azure_monitor_profile.app_monitoring.open_telemetry_metrics.enabled = False + # Clear the port when disabling OpenTelemetry metrics + mc.azure_monitor_profile.app_monitoring.open_telemetry_metrics.port = None + + def update_addon_profiles(self, mc: ManagedCluster) -> ManagedCluster: + """Update addon profiles for the ManagedCluster object. + + :return: the ManagedCluster object + """ + self._ensure_mc(mc) + + # Handle enable Azure Monitor logs + if self.context.get_enable_azure_monitor_logs(): + self._setup_azure_monitor_logs(mc) + + # Handle disable Azure Monitor logs + if self.context.get_disable_azure_monitor_logs(): + self._disable_azure_monitor_logs(mc) + + return mc + def update_mc_profile_preview(self) -> ManagedCluster: """The overall controller used to update the preview ManagedCluster profile. @@ -5988,6 +6829,8 @@ def update_mc_profile_preview(self) -> ManagedCluster: mc = self.update_kms_infrastructure_encryption(mc) # update workload auto scaler profile mc = self.update_workload_auto_scaler_profile(mc) + # Note: update_addon_profiles is already called by base class (update_mc_profile_default) + # so we don't call it again here to avoid duplicate processing # update azure monitor metrics profile mc = self.update_azure_monitor_profile(mc) # update vpa @@ -6068,8 +6911,13 @@ def check_is_postprocessing_required(self, mc: ManagedCluster) -> bool: enable_azure_keyvault_secrets_provider_addon = self.context.get_enable_kv() or ( mc.addon_profiles and mc.addon_profiles.get(CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME) and mc.addon_profiles[CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME].enabled) + monitoring_addon_postprocessing_required = self.context.get_intermediate( + "monitoring_addon_postprocessing_required", default_value=False + ) + # Note: monitoring_addon_disable_postprocessing_required is no longer used - cleanup is done upfront if (enable_azure_container_storage or disable_azure_container_storage) or \ - (keyvault_id and enable_azure_keyvault_secrets_provider_addon): + (keyvault_id and enable_azure_keyvault_secrets_provider_addon) or \ + (monitoring_addon_postprocessing_required): return True return postprocessing_required @@ -6081,6 +6929,57 @@ def postprocessing_after_mc_created(self, cluster: ManagedCluster) -> None: :return: None """ super().postprocessing_after_mc_created(cluster) + + # Handle monitoring addon postprocessing (enable case) + monitoring_addon_postprocessing_required = self.context.get_intermediate( + "monitoring_addon_postprocessing_required", default_value=False + ) + if monitoring_addon_postprocessing_required: + addon_consts = self.context.get_addon_consts() + CONST_MONITORING_ADDON_NAME = addon_consts.get("CONST_MONITORING_ADDON_NAME") + CONST_MONITORING_USING_AAD_MSI_AUTH = addon_consts.get("CONST_MONITORING_USING_AAD_MSI_AUTH") + + if (cluster.addon_profiles and + CONST_MONITORING_ADDON_NAME in cluster.addon_profiles and + cluster.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled): + + # Check if MSI auth is enabled + if (CONST_MONITORING_USING_AAD_MSI_AUTH in + cluster.addon_profiles[CONST_MONITORING_ADDON_NAME].config and + str(cluster.addon_profiles[CONST_MONITORING_ADDON_NAME].config[ + CONST_MONITORING_USING_AAD_MSI_AUTH]).lower() == "true"): + + # Check parameter sizes to identify what might be causing large headers + data_collection_settings = self.context.get_data_collection_settings() + + # Try to limit data_collection_settings size to avoid "Request Header Fields Too Large" error + safe_data_collection_settings = None + if data_collection_settings and len(str(data_collection_settings)) > 10000: + safe_data_collection_settings = None + else: + safe_data_collection_settings = data_collection_settings + + self.context.external_functions.ensure_container_insights_for_monitoring( + self.cmd, + cluster.addon_profiles[CONST_MONITORING_ADDON_NAME], + self.context.get_subscription_id(), + self.context.get_resource_group_name(), + self.context.get_name(), + self.context.get_location(), + remove_monitoring=False, + aad_route=self.context.get_enable_msi_auth_for_monitoring(), + create_dcr=True, + create_dcra=True, + enable_syslog=self.context.get_enable_syslog(), + data_collection_settings=safe_data_collection_settings, + is_private_cluster=self.context.get_enable_private_cluster(), + ampls_resource_id=self.context.get_ampls_resource_id(), + enable_high_log_scale_mode=self.context.get_enable_high_log_scale_mode(), + ) + + # Monitoring addon disable cleanup is now done upfront in _disable_azure_monitor_logs (not in postprocessing) + # This matches the pattern from aks_disable_addons lines 2796-2822 where cleanup happens BEFORE the PUT + enable_azure_container_storage = self.context.get_intermediate("enable_azure_container_storage") disable_azure_container_storage = self.context.get_intermediate("disable_azure_container_storage") container_storage_version = self.context.get_intermediate("container_storage_version") @@ -6268,4 +7167,5 @@ def put_mc(self, mc: ManagedCluster) -> ManagedCluster: if_none_match=self.context.get_if_none_match(), headers=self.context.get_aks_custom_headers(), ) + return cluster diff --git a/src/aks-preview/azext_aks_preview/tests/latest/test_aks_commands.py b/src/aks-preview/azext_aks_preview/tests/latest/test_aks_commands.py index 2257181d1a5..02d5b81d713 100644 --- a/src/aks-preview/azext_aks_preview/tests/latest/test_aks_commands.py +++ b/src/aks-preview/azext_aks_preview/tests/latest/test_aks_commands.py @@ -7602,6 +7602,10 @@ def enable_monitoring_existing_cluster_aad_atuh( # make sure monitoring can be smoothly disabled self.cmd(f"aks disable-addons -a monitoring -g={resource_group} -n={aks_name}") + # Wait for the disable operation to complete + wait_cmd = f'aks wait --resource-group={resource_group} --name={aks_name} --updated --timeout=1800' + self.cmd(wait_cmd) + # delete self.cmd( f"aks delete -g {resource_group} -n {aks_name} --yes --no-wait", @@ -13296,19 +13300,15 @@ def test_aks_create_with_azuremonitormetrics( ], ) - # azuremonitor metrics will be set to false after initial creation command as its in the - # postprocessing step that we do an update to enable it. Adding a wait for the second put request - # in addonput.py which enables the Azure Monitor Metrics addon as all the DC* resources - # have now been created. wait_cmd = " ".join( [ "aks", "wait", "--resource-group={resource_group}", "--name={name}", - "--updated", + "--created", "--interval 60", - "--timeout 300", + "--timeout 1800", ] ) self.cmd( @@ -13318,6 +13318,8 @@ def test_aks_create_with_azuremonitormetrics( ], ) + time.sleep(5 * 60) + self.cmd( "aks show -g {resource_group} -n {name} --output=json", checks=[ @@ -13373,8 +13375,6 @@ def test_aks_create_with_azuremonitorappmonitoring( checks=[ self.check("provisioningState", "Succeeded"), self.check("azureMonitorProfile.appMonitoring.autoInstrumentation.enabled", True), - self.check("azureMonitorProfile.appMonitoring.openTelemetryMetrics.enabled", True), - self.check("azureMonitorProfile.appMonitoring.openTelemetryLogs.enabled", True) ], ) @@ -13681,7 +13681,7 @@ def test_aks_update_with_azurecontainerstorage(self, resource_group, resource_gr '--disable-azure-container-storage' self.cmd(update_cmd, checks=[ self.check('provisioningState', 'Succeeded'), - ]) + ]) # Verify that the azure-container-storage extension doesn't exist anymore extension_list_cmd = "k8s-extension list --resource-group={resource_group} --cluster-name={name} --cluster-type managedClusters" @@ -13741,10 +13741,23 @@ def test_aks_update_with_azuremonitormetrics( update_cmd, checks=[ self.check("provisioningState", "Succeeded"), - self.check("azureMonitorProfile.metrics.enabled", True), ], ) + time.sleep(5 * 60) + + wait_cmd = 'aks wait --resource-group={resource_group} --name={name} --updated --timeout=1800' + self.cmd(wait_cmd, checks=[ + self.is_empty(), + ]) + + # Verify the creation was successful + show_cmd = 'aks show --resource-group={resource_group} --name={name} --output=json' + self.cmd(show_cmd, checks=[ + self.check("provisioningState", "Succeeded"), + self.check("azureMonitorProfile.metrics.enabled", True), + ]) + # update: disable-azure-monitor-metrics update_cmd = ( "aks update --resource-group={resource_group} --name={name} --yes --output=json " @@ -13754,10 +13767,21 @@ def test_aks_update_with_azuremonitormetrics( update_cmd, checks=[ self.check("provisioningState", "Succeeded"), - self.check("azureMonitorProfile.metrics.enabled", False), ], ) + wait_cmd = 'aks wait --resource-group={resource_group} --name={name} --updated --timeout=1800' + self.cmd(wait_cmd, checks=[ + self.is_empty(), + ]) + + # Verify the creation was successful + show_cmd = 'aks show --resource-group={resource_group} --name={name} --output=json' + self.cmd(show_cmd, checks=[ + self.check("provisioningState", "Succeeded"), + self.check("azureMonitorProfile.metrics.enabled", False), + ]) + # delete cmd = ( "aks delete --resource-group={resource_group} --name={name} --yes --no-wait" @@ -13799,9 +13823,7 @@ def test_aks_update_with_azuremonitorappmonitoring(self, resource_group, resourc ) self.cmd(update_cmd, checks=[ self.check('provisioningState', 'Succeeded'), - self.check('azureMonitorProfile.appMonitoring.autoInstrumentation.enabled', True), - self.check('azureMonitorProfile.appMonitoring.openTelemetryMetrics.enabled', True), - self.check('azureMonitorProfile.appMonitoring.openTelemetryLogs.enabled', True) + self.check('azureMonitorProfile.appMonitoring.autoInstrumentation.enabled', True) ]) # update: disable-azure-monitor-app-monitoring @@ -13812,11 +13834,534 @@ def test_aks_update_with_azuremonitorappmonitoring(self, resource_group, resourc self.cmd(update_cmd, checks=[ self.check('provisioningState', 'Succeeded'), - self.check('azureMonitorProfile.appMonitoring.autoInstrumentation.enabled', False), - self.check('azureMonitorProfile.appMonitoring.openTelemetryMetrics.enabled', False), - self.check('azureMonitorProfile.appMonitoring.openTelemetryLogs.enabled', False) + self.check('azureMonitorProfile.appMonitoring.autoInstrumentation.enabled', False) + ]) + + # delete + cmd = 'aks delete --resource-group={resource_group} --name={name} --yes --no-wait' + self.cmd(cmd, checks=[ + self.is_empty(), + ]) + + @live_only() + @AllowLargeResponse() + @AKSCustomResourceGroupPreparer(random_name_length=17, name_prefix='clitest', location='westus2') + def test_aks_create_with_azuremonitorlogs(self, resource_group, resource_group_location): + # reset the count so in replay mode the random names will start with 0 + self.test_resources_count = 0 + # kwargs for string formatting + aks_name = self.create_random_name('cliakstest', 16) + + node_vm_size = 'standard_d2s_v3' + self.kwargs.update({ + 'resource_group': resource_group, + 'name': aks_name, + 'location': resource_group_location, + 'resource_type': 'Microsoft.ContainerService/ManagedClusters', + 'ssh_key_value': self.generate_ssh_keys(), + 'node_vm_size': node_vm_size, + }) + + create_cmd = ( + 'aks create --resource-group={resource_group} --name={name} --location={location} --ssh-key-value={ssh_key_value} --node-vm-size={node_vm_size} ' + '--enable-managed-identity --enable-azure-monitor-logs --output=json' + ) + self.cmd(create_cmd, checks=[ + self.check('provisioningState', 'Succeeded'), + self.check('addonProfiles.omsagent.enabled', True), + self.exists('addonProfiles.omsagent.config.logAnalyticsWorkspaceResourceID'), + self.check('addonProfiles.omsagent.config.useAADAuth', 'true'), + ]) + + # delete + cmd = 'aks delete --resource-group={resource_group} --name={name} --yes --no-wait' + self.cmd(cmd, checks=[ + self.is_empty(), + ]) + + @live_only() + @AllowLargeResponse() + @AKSCustomResourceGroupPreparer(random_name_length=17, name_prefix='clitest', location='westus2') + def test_aks_update_with_azuremonitorlogs(self, resource_group, resource_group_location): + aks_name = self.create_random_name('cliakstest', 16) + node_vm_size = 'standard_d2s_v3' + self.kwargs.update({ + 'resource_group': resource_group, + 'name': aks_name, + 'location': resource_group_location, + 'ssh_key_value': self.generate_ssh_keys(), + 'node_vm_size': node_vm_size, + }) + + # create: without enable-azure-monitor-logs + create_cmd = ( + 'aks create --resource-group={resource_group} --name={name} --location={location} --ssh-key-value={ssh_key_value} ' + '--node-vm-size={node_vm_size} --enable-managed-identity --output=json') + self.cmd(create_cmd) + + # Wait for the create operation to complete + wait_cmd = 'aks wait --resource-group={resource_group} --name={name} --created --timeout=1800' + self.cmd(wait_cmd) + + # Verify the creation was successful + show_cmd = 'aks show --resource-group={resource_group} --name={name} --output=json' + self.cmd(show_cmd, checks=[ + self.check('provisioningState', 'Succeeded'), + self.not_exists('addonProfiles.omsagent'), + ]) + + # update: enable-azure-monitor-logs + update_cmd = ( + 'aks update --resource-group={resource_group} --name={name} --yes ' + '--enable-azure-monitor-logs' + ) + self.cmd(update_cmd, checks=[ + self.check('provisioningState', 'Succeeded'), + ]) + + # Wait for the update operation to complete + wait_cmd = 'aks wait --resource-group={resource_group} --name={name} --updated --timeout=1800' + self.cmd(wait_cmd, checks=[ + self.is_empty(), + ]) + + show_cmd = 'aks show --resource-group={resource_group} --name={name} --output=json' + max_retries = 30 + for attempt in range(max_retries): + try: + self.cmd(show_cmd, checks=[ + self.check('provisioningState', 'Succeeded'), + ]) + break # Success, exit loop + except Exception as e: + if attempt < max_retries - 1: + print(f"Attempt {attempt + 1}/{max_retries} waiting for provisioning, retrying in 60 seconds...") + time.sleep(60) + else: + raise # Re-raise on final attempt + + # Verify the update was successful + show_cmd = 'aks show --resource-group={resource_group} --name={name} --output=json' + self.cmd(show_cmd, checks=[ + self.check('provisioningState', 'Succeeded'), + self.check('addonProfiles.omsagent.enabled', True), + self.exists('addonProfiles.omsagent.config.logAnalyticsWorkspaceResourceID'), + self.check('addonProfiles.omsagent.config.useAADAuth', 'true'), + ]) + + # delete + cmd = 'aks delete --resource-group={resource_group} --name={name} --yes --no-wait' + self.cmd(cmd, checks=[ + self.is_empty(), + ]) + + @live_only() + @AllowLargeResponse() + @AKSCustomResourceGroupPreparer(random_name_length=17, name_prefix='clitest', location='westus2') + def test_aks_create_with_azuremonitorlogs_and_opentelemetry(self, resource_group, resource_group_location): + # reset the count so in replay mode the random names will start with 0 + self.test_resources_count = 0 + # kwargs for string formatting + aks_name = self.create_random_name('cliakstest', 16) + + node_vm_size = 'standard_d2s_v3' + self.kwargs.update({ + 'resource_group': resource_group, + 'name': aks_name, + 'location': resource_group_location, + 'resource_type': 'Microsoft.ContainerService/ManagedClusters', + 'ssh_key_value': self.generate_ssh_keys(), + 'node_vm_size': node_vm_size, + }) + + create_cmd = ( + 'aks create --resource-group={resource_group} --name={name} --location={location} --ssh-key-value={ssh_key_value} --node-vm-size={node_vm_size} ' + '--enable-managed-identity --enable-azure-monitor-logs --enable-opentelemetry-logs --opentelemetry-logs-port=8080 ' + '--aks-custom-headers AKSHTTPCustomFeatures=Microsoft.ContainerService/AzureMonitorAppMonitoringPreview --output=json' + ) + self.cmd(create_cmd, checks=[ + self.check('provisioningState', 'Succeeded'), + self.check('addonProfiles.omsagent.enabled', True), + self.exists('addonProfiles.omsagent.config.logAnalyticsWorkspaceResourceID'), + self.check('addonProfiles.omsagent.config.useAADAuth', 'true'), + self.check('azureMonitorProfile.appMonitoring.openTelemetryLogs.enabled', True), + self.check('azureMonitorProfile.appMonitoring.openTelemetryLogs.port', 8080), + ]) + + # delete + cmd = 'aks delete --resource-group={resource_group} --name={name} --yes --no-wait' + self.cmd(cmd, checks=[ + self.is_empty(), + ]) + + @live_only() + @AllowLargeResponse() + @AKSCustomResourceGroupPreparer(random_name_length=17, name_prefix='clitest', location='westus2') + def test_aks_update_with_azuremonitorlogs_and_opentelemetry(self, resource_group, resource_group_location): + aks_name = self.create_random_name('cliakstest', 16) + node_vm_size = 'standard_d2s_v3' + self.kwargs.update({ + 'resource_group': resource_group, + 'name': aks_name, + 'location': resource_group_location, + 'ssh_key_value': self.generate_ssh_keys(), + 'node_vm_size': node_vm_size, + }) + + # create: without enable-azure-monitor-logs + create_cmd = 'aks create --resource-group={resource_group} --name={name} --location={location} --ssh-key-value={ssh_key_value} --node-vm-size={node_vm_size} --enable-managed-identity --output=json' + self.cmd(create_cmd, checks=[ + self.check('provisioningState', 'Succeeded'), + self.not_exists('addonProfiles.omsagent'), + ]) + + # update: enable-azure-monitor-logs with OpenTelemetry logs + update_cmd = ( + 'aks update --resource-group={resource_group} --name={name} --yes --output=json ' + '--enable-azure-monitor-logs --enable-opentelemetry-logs --opentelemetry-logs-port=9090 ' + '--aks-custom-headers AKSHTTPCustomFeatures=Microsoft.ContainerService/AzureMonitorAppMonitoringPreview' + ) + self.cmd(update_cmd, checks=[ + self.check('provisioningState', 'Succeeded'), + self.check('addonProfiles.omsagent.enabled', True), + self.exists('addonProfiles.omsagent.config.logAnalyticsWorkspaceResourceID'), + self.check('addonProfiles.omsagent.config.useAADAuth', 'true'), + self.check('azureMonitorProfile.appMonitoring.openTelemetryLogs.enabled', True), + self.check('azureMonitorProfile.appMonitoring.openTelemetryLogs.port', 9090), + ]) + + # update: disable OpenTelemetry logs but keep Azure Monitor logs + update_cmd = ( + 'aks update --resource-group={resource_group} --name={name} --yes --output=json ' + '--disable-opentelemetry-logs' + ) + self.cmd(update_cmd, checks=[ + self.check('provisioningState', 'Succeeded'), + self.check('addonProfiles.omsagent.enabled', True), # Still enabled + self.check('addonProfiles.omsagent.config.useAADAuth', 'true'), + self.check('azureMonitorProfile.appMonitoring.openTelemetryLogs.enabled', False), + ]) + + # update: disable-azure-monitor-logs (should also disable OpenTelemetry logs) + update_cmd = ( + 'aks update --resource-group={resource_group} --name={name} --yes --output=json ' + '--disable-azure-monitor-logs' + ) + self.cmd(update_cmd, checks=[ + self.check('provisioningState', 'Succeeded'), + self.check('addonProfiles.omsagent.enabled', False), + ]) + + # delete + cmd = 'aks delete --resource-group={resource_group} --name={name} --yes --no-wait' + self.cmd(cmd, checks=[ + self.is_empty(), + ]) + + @live_only() + @AllowLargeResponse() + @AKSCustomResourceGroupPreparer(random_name_length=17, name_prefix='clitest', location='westus2') + def test_aks_create_with_azuremonitormetrics_v2(self, resource_group, resource_group_location): + # reset the count so in replay mode the random names will start with 0 + self.test_resources_count = 0 + # kwargs for string formatting + aks_name = self.create_random_name('cliakstest', 16) + + node_vm_size = 'standard_d2s_v3' + self.kwargs.update({ + 'resource_group': resource_group, + 'name': aks_name, + 'location': resource_group_location, + 'resource_type': 'Microsoft.ContainerService/ManagedClusters', + 'ssh_key_value': self.generate_ssh_keys(), + 'node_vm_size': node_vm_size, + }) + + create_cmd = ( + 'aks create --resource-group={resource_group} --name={name} --location={location} --ssh-key-value={ssh_key_value} --node-vm-size={node_vm_size} ' + '--enable-managed-identity --enable-azure-monitor-metrics --output=json' + ) + self.cmd(create_cmd, checks=[ + self.check('provisioningState', 'Succeeded'), + self.check('azureMonitorProfile.metrics.enabled', True), + ]) + + wait_cmd = ' '.join([ + 'aks', 'wait', '--resource-group={resource_group}', '--name={name}', '--created', + '--interval 60', '--timeout 1800', + ]) + self.cmd(wait_cmd, checks=[ + self.is_empty(), + ]) + + # Retry up to 10 times with 60-second intervals to allow provisioning to complete + show_cmd = 'aks show -g {resource_group} -n {name} --output=json' + max_retries = 10 + for attempt in range(max_retries): + try: + self.cmd(show_cmd, checks=[ + self.check('provisioningState', 'Succeeded'), + self.check('azureMonitorProfile.metrics.enabled', True), + ]) + break # Success, exit loop + except Exception as e: + if attempt < max_retries - 1: + print(f"Attempt {attempt + 1}/{max_retries} failed, retrying in 60 seconds...") + time.sleep(60) + else: + raise # Re-raise on final attempt + + + # delete + cmd = 'aks delete --resource-group={resource_group} --name={name} --yes --no-wait' + self.cmd(cmd, checks=[ + self.is_empty(), + ]) + + @live_only() + @AllowLargeResponse() + @AKSCustomResourceGroupPreparer(random_name_length=17, name_prefix='clitest', location='westus2') + def test_aks_update_with_azuremonitormetrics_v2(self, resource_group, resource_group_location): + aks_name = self.create_random_name('cliakstest', 16) + node_vm_size = 'standard_d2s_v3' + self.kwargs.update({ + 'resource_group': resource_group, + 'name': aks_name, + 'location': resource_group_location, + 'ssh_key_value': self.generate_ssh_keys(), + 'node_vm_size': node_vm_size, + }) + + # create: without enable-azure-monitor-metrics + create_cmd = 'aks create --resource-group={resource_group} --name={name} --location={location} --ssh-key-value={ssh_key_value} --node-vm-size={node_vm_size} --enable-managed-identity --output=json' + self.cmd(create_cmd, checks=[ + self.check('provisioningState', 'Succeeded'), + self.not_exists('azureMonitorProfile.metrics'), + ]) + + # update: enable-azure-monitor-metrics + update_cmd = ( + 'aks update --resource-group={resource_group} --name={name} --yes --output=json ' + '--enable-azure-monitor-metrics' + ) + self.cmd(update_cmd) + + # Wait for enable operation to complete before attempting disable + show_cmd = 'aks show --resource-group={resource_group} --name={name} --output=json' + max_retries = 30 + for attempt in range(max_retries): + try: + self.cmd(show_cmd, checks=[ + self.check('provisioningState', 'Succeeded'), + self.check('azureMonitorProfile.metrics.enabled', True), + ]) + break # Success, exit loop + except Exception as e: + if attempt < max_retries - 1: + print(f"Attempt {attempt + 1}/{max_retries} waiting for enable to complete, retrying in 60 seconds...") + time.sleep(60) + else: + raise # Re-raise on final attempt + + # update: disable-azure-monitor-metrics + update_cmd = ( + 'aks update --resource-group={resource_group} --name={name} --yes --output=json ' + '--disable-azure-monitor-metrics' + ) + self.cmd(update_cmd) + + # Wait for disable operation to complete + max_retries = 30 + for attempt in range(max_retries): + try: + self.cmd(show_cmd, checks=[ + self.check('provisioningState', 'Succeeded'), + self.check('azureMonitorProfile.metrics.enabled', False), + ]) + break # Success, exit loop + except Exception as e: + if attempt < max_retries - 1: + print(f"Attempt {attempt + 1}/{max_retries} waiting for disable to complete, retrying in 60 seconds...") + time.sleep(60) + else: + raise # Re-raise on final attempt + + # delete + cmd = 'aks delete --resource-group={resource_group} --name={name} --yes --no-wait' + self.cmd(cmd, checks=[ + self.is_empty(), + ]) + + @live_only() + @AllowLargeResponse() + @AKSCustomResourceGroupPreparer(random_name_length=17, name_prefix='clitest', location='westus2') + def test_aks_create_with_azuremonitormetrics_and_opentelemetry(self, resource_group, resource_group_location): + # reset the count so in replay mode the random names will start with 0 + self.test_resources_count = 0 + # kwargs for string formatting + aks_name = self.create_random_name('cliakstest', 16) + + node_vm_size = 'standard_d2s_v3' + self.kwargs.update({ + 'resource_group': resource_group, + 'name': aks_name, + 'location': resource_group_location, + 'resource_type': 'Microsoft.ContainerService/ManagedClusters', + 'ssh_key_value': self.generate_ssh_keys(), + 'node_vm_size': node_vm_size, + }) + + create_cmd = ( + 'aks create --resource-group={resource_group} --name={name} --location={location} --ssh-key-value={ssh_key_value} --node-vm-size={node_vm_size} ' + '--enable-managed-identity --enable-azure-monitor-metrics --enable-opentelemetry-metrics --opentelemetry-metrics-port=8080 ' + '--aks-custom-headers AKSHTTPCustomFeatures=Microsoft.ContainerService/AzureMonitorAppMonitoringPreview --output=json' + ) + self.cmd(create_cmd, checks=[ + self.check('provisioningState', 'Succeeded'), + ]) + + # azuremonitor metrics will be set to false after initial creation command as its in the + # postprocessing step that we do an update to enable it. Adding a wait for the second put request + # in addonput.py which enables the Azure Monitor Metrics addon as all the DC* resources + # have now been created. + wait_cmd = ' '.join([ + 'aks', 'wait', '--resource-group={resource_group}', '--name={name}', '--updated', + '--interval 60', '--timeout 1800', + ]) + self.cmd(wait_cmd, checks=[ + self.is_empty(), + ]) + + self.cmd('aks show -g {resource_group} -n {name} --output=json', checks=[ + self.check('provisioningState', 'Succeeded'), + self.check('azureMonitorProfile.metrics.enabled', True), + self.check('azureMonitorProfile.appMonitoring.openTelemetryMetrics.enabled', True), + self.check('azureMonitorProfile.appMonitoring.openTelemetryMetrics.port', 8080), + ]) + + # delete + cmd = 'aks delete --resource-group={resource_group} --name={name} --yes --no-wait' + self.cmd(cmd, checks=[ + self.is_empty(), + ]) + + @live_only() + @AllowLargeResponse() + @AKSCustomResourceGroupPreparer(random_name_length=17, name_prefix='clitest', location='westus2') + def test_aks_update_with_azuremonitormetrics_and_opentelemetry(self, resource_group, resource_group_location): + aks_name = self.create_random_name('cliakstest', 16) + node_vm_size = 'standard_d2s_v3' + self.kwargs.update({ + 'resource_group': resource_group, + 'name': aks_name, + 'location': resource_group_location, + 'ssh_key_value': self.generate_ssh_keys(), + 'node_vm_size': node_vm_size, + }) + + # create: without enable-azure-monitor-metrics + create_cmd = 'aks create --resource-group={resource_group} --name={name} --location={location} --ssh-key-value={ssh_key_value} --node-vm-size={node_vm_size} --enable-managed-identity --output=json' + self.cmd(create_cmd, checks=[ + self.check('provisioningState', 'Succeeded'), + self.not_exists('azureMonitorProfile.metrics'), + ]) + + # update: enable-azure-monitor-metrics with OpenTelemetry metrics + update_cmd = ( + 'aks update --resource-group={resource_group} --name={name} --yes ' + '--enable-azure-monitor-metrics --enable-opentelemetry-metrics --opentelemetry-metrics-port=9090 ' + '--aks-custom-headers AKSHTTPCustomFeatures=Microsoft.ContainerService/AzureMonitorAppMonitoringPreview ' + '--output=json' + ) + + self.cmd(update_cmd, checks=[ + self.check('provisioningState', 'Succeeded'), + ]) + + # Wait for the update operation to complete + wait_cmd = 'aks wait --resource-group={resource_group} --name={name} --updated --timeout=1800' + self.cmd(wait_cmd) + + # Verify the update was successful + # Retry up to 10 times with 60-second intervals to allow provisioning to complete + show_cmd = 'aks show --resource-group={resource_group} --name={name} --output=json' + max_retries = 10 + for attempt in range(max_retries): + try: + self.cmd(show_cmd, checks=[ + self.check('provisioningState', 'Succeeded'), + ]) + break # Success, exit loop + except Exception as e: + if attempt < max_retries - 1: + print(f"Attempt {attempt + 1}/{max_retries} failed, retrying in 60 seconds...") + time.sleep(60) + else: + raise # Re-raise on final attempt + + # update: disable OpenTelemetry metrics but keep Azure Monitor metrics + update_cmd = ( + 'aks update --resource-group={resource_group} --name={name} --yes ' + '--disable-opentelemetry-metrics --output=json ' + ) + self.cmd(update_cmd, checks=[ + self.check('provisioningState', 'Succeeded'), ]) + # Wait for the update operation to complete + wait_cmd = 'aks wait --resource-group={resource_group} --name={name} --updated --timeout=1800' + self.cmd(wait_cmd) + + # Verify the update was successful + # Retry up to 10 times with 60-second intervals to allow provisioning to complete + show_cmd = 'aks show --resource-group={resource_group} --name={name} --output=json' + max_retries = 10 + for attempt in range(max_retries): + try: + self.cmd(show_cmd, checks=[ + self.check('provisioningState', 'Succeeded'), + self.check('azureMonitorProfile.metrics.enabled', True), + self.check('azureMonitorProfile.appMonitoring.openTelemetryMetrics.enabled', False), + ]) + break # Success, exit loop + except Exception as e: + if attempt < max_retries - 1: + print(f"Attempt {attempt + 1}/{max_retries} failed, retrying in 60 seconds...") + time.sleep(60) + else: + raise # Re-raise on final attempt + + # update: disable-azure-monitor-metrics (should also disable OpenTelemetry metrics) + update_cmd = ( + 'aks update --resource-group={resource_group} --name={name} --yes ' + '--disable-azure-monitor-metrics --output=json' + ) + self.cmd(update_cmd, checks=[ + self.check('provisioningState', 'Succeeded'), + ]) + + # Wait for the update operation to complete + wait_cmd = 'aks wait --resource-group={resource_group} --name={name} --updated --timeout=1800' + self.cmd(wait_cmd) + + # Verify the update was successful + # Retry up to 10 times with 60-second intervals to allow provisioning to complete + show_cmd = 'aks show --resource-group={resource_group} --name={name} --output=json' + max_retries = 10 + for attempt in range(max_retries): + try: + self.cmd(show_cmd, checks=[ + self.check('provisioningState', 'Succeeded'), + self.check('azureMonitorProfile.metrics.enabled', False), + ]) + break # Success, exit loop + except Exception as e: + if attempt < max_retries - 1: + print(f"Attempt {attempt + 1}/{max_retries} failed, retrying in 60 seconds...") + time.sleep(60) + else: + raise # Re-raise on final attempt + # delete cmd = 'aks delete --resource-group={resource_group} --name={name} --yes --no-wait' self.cmd(cmd, checks=[ @@ -13861,7 +14406,7 @@ def test_aks_update_with_azurecontainerstorage_v1(self, resource_group, resource # some time to post process. time.sleep(10 * 60) - # update: disable-azure-container-storage + # update: disable-azure-container-storage update_cmd = 'aks update --resource-group={resource_group} --name={name} --yes --output=json ' \ '--disable-azure-container-storage all' self.cmd(update_cmd, checks=[ @@ -13909,18 +14454,306 @@ def test_aks_update_with_azurecontainerstorage_v1_with_ephemeral_disk_parameters self.cmd(update_cmd, checks=[ self.check('provisioningState', 'Succeeded'), ]) - # Sleep for 10 mins before next operation, - # since azure container storage operations take - # some time to post process. - time.sleep(10 * 60) - + # update: disable-azure-container-storage - update_cmd = 'aks update --resource-group={resource_group} --name={name} --yes --output=json ' \ + update_cmd = 'aks update --resource-group={resource_group} --name={name} --yes --no-wait ' \ '--disable-azure-container-storage all' + self.cmd(update_cmd) + + # Wait for the update operation to complete + wait_cmd = 'aks wait --resource-group={resource_group} --name={name} --updated --timeout=1800' + self.cmd(wait_cmd) + + # Verify the update was successful + show_cmd = 'aks show --resource-group={resource_group} --name={name} --output=json' + self.cmd(show_cmd, checks=[ + self.check('provisioningState', 'Succeeded'), + ]) + + # delete + cmd = 'aks delete --resource-group={resource_group} --name={name} --yes --no-wait' + self.cmd(cmd, checks=[ + self.is_empty(), + ]) + + @live_only() + @AllowLargeResponse() + @AKSCustomResourceGroupPreparer(random_name_length=17, name_prefix='clitest', location='westus2') + def test_aks_comprehensive_monitoring_integration(self, resource_group, resource_group_location): + """ + Comprehensive test for all monitoring features: Azure Monitor logs, metrics, app monitoring, and OpenTelemetry integration. + Tests create and update scenarios with all monitoring features enabled and disabled. + """ + # reset the count so in replay mode the random names will start with 0 + self.test_resources_count = 0 + # kwargs for string formatting + aks_name = self.create_random_name('cliakstest', 16) + + node_vm_size = 'standard_d2s_v3' + self.kwargs.update({ + 'resource_group': resource_group, + 'name': aks_name, + 'location': resource_group_location, + 'resource_type': 'Microsoft.ContainerService/ManagedClusters', + 'ssh_key_value': self.generate_ssh_keys(), + 'node_vm_size': node_vm_size, + }) + + # Phase 1: Create cluster with all monitoring features enabled + create_cmd = ( + 'aks create --resource-group={resource_group} --name={name} --location={location} --ssh-key-value={ssh_key_value} --node-vm-size={node_vm_size} ' + '--enable-managed-identity --enable-azure-monitor-logs --enable-azure-monitor-metrics --enable-azure-monitor-app-monitoring ' + '--enable-opentelemetry-logs --opentelemetry-logs-port=8080 ' + '--enable-opentelemetry-metrics --opentelemetry-metrics-port=8081 ' + '--enable-windows-recording-rules ' + '--aks-custom-headers AKSHTTPCustomFeatures=Microsoft.ContainerService/AzureMonitorAppMonitoringPreview --output=json' + ) + self.cmd(create_cmd, checks=[ + self.check('provisioningState', 'Succeeded'), + ]) + + # azuremonitor metrics will be set to false after initial creation command as its in the + # postprocessing step that we do an update to enable it. Adding a wait for the second put request + # in addonput.py which enables the Azure Monitor Metrics addon as all the DC* resources + # have now been created. + wait_cmd = ' '.join([ + 'aks', 'wait', '--resource-group={resource_group}', '--name={name}', '--updated', + '--interval 60', '--timeout 1800', + ]) + self.cmd(wait_cmd, checks=[ + self.is_empty(), + ]) + + self.cmd('aks show -g {resource_group} -n {name} --output=json', checks=[ + self.check('provisioningState', 'Succeeded'), + # Azure Monitor logs checks + self.check('addonProfiles.omsagent.enabled', True), + self.exists('addonProfiles.omsagent.config.logAnalyticsWorkspaceResourceID'), + self.check('addonProfiles.omsagent.config.useAADAuth', 'true'), + # Azure Monitor metrics checks + self.check('azureMonitorProfile.metrics.enabled', True), + # Azure Monitor app monitoring checks + self.check('azureMonitorProfile.appMonitoring.autoInstrumentation.enabled', True), + # OpenTelemetry logs checks + self.check('azureMonitorProfile.appMonitoring.openTelemetryLogs.enabled', True), + self.check('azureMonitorProfile.appMonitoring.openTelemetryLogs.port', 8080), + # OpenTelemetry metrics checks + self.check('azureMonitorProfile.appMonitoring.openTelemetryMetrics.enabled', True), + self.check('azureMonitorProfile.appMonitoring.openTelemetryMetrics.port', 8081), + ]) + + # Phase 2: Update - disable only OpenTelemetry logs (keep everything else) + update_cmd = ( + 'aks update --resource-group={resource_group} --name={name} --yes --output=json ' + '--disable-opentelemetry-logs' + ) + self.cmd(update_cmd, checks=[ + self.check('provisioningState', 'Succeeded'), + ]) + + wait_cmd = ' '.join([ + 'aks', 'wait', '--resource-group={resource_group}', '--name={name}', '--updated', + '--interval 60', '--timeout 1800', + ]) + self.cmd(wait_cmd, checks=[ + self.is_empty(), + ]) + + self.cmd('aks show -g {resource_group} -n {name} --output=json', checks=[ + self.check('provisioningState', 'Succeeded'), + # Azure Monitor logs should still be enabled + self.check('addonProfiles.omsagent.enabled', True), + self.check('addonProfiles.omsagent.config.useAADAuth', 'true'), + # Azure Monitor metrics should still be enabled + self.check('azureMonitorProfile.metrics.enabled', True), + # Azure Monitor app monitoring should still be enabled + self.check('azureMonitorProfile.appMonitoring.autoInstrumentation.enabled', True), + # OpenTelemetry logs should be disabled + self.check('azureMonitorProfile.appMonitoring.openTelemetryLogs.enabled', False), + # OpenTelemetry metrics should still be enabled + self.check('azureMonitorProfile.appMonitoring.openTelemetryMetrics.enabled', True), + ]) + + # Phase 3: Update - disable only OpenTelemetry metrics (keep Azure Monitor features) + update_cmd = ( + 'aks update --resource-group={resource_group} --name={name} --yes --output=json ' + '--disable-opentelemetry-metrics' + ) + self.cmd(update_cmd, checks=[ + self.check('provisioningState', 'Succeeded'), + ]) + + wait_cmd = ' '.join([ + 'aks', 'wait', '--resource-group={resource_group}', '--name={name}', '--updated', + '--interval 60', '--timeout 1800', + ]) + self.cmd(wait_cmd, checks=[ + self.is_empty(), + ]) + + self.cmd('aks show -g {resource_group} -n {name} --output=json', checks=[ + self.check('provisioningState', 'Succeeded'), + # Azure Monitor logs should still be enabled + self.check('addonProfiles.omsagent.enabled', True), + self.check('addonProfiles.omsagent.config.useAADAuth', 'true'), + # Azure Monitor metrics should still be enabled + self.check('azureMonitorProfile.metrics.enabled', True), + # Azure Monitor app monitoring should still be enabled + self.check('azureMonitorProfile.appMonitoring.autoInstrumentation.enabled', True), + # OpenTelemetry metrics should be disabled + self.check('azureMonitorProfile.appMonitoring.openTelemetryMetrics.enabled', False), + ]) + + # Phase 4: Update - re-enable all OpenTelemetry features with different ports + update_cmd = ( + 'aks update --resource-group={resource_group} --name={name} --yes --output=json ' + '--enable-opentelemetry-logs --opentelemetry-logs-port=9090 ' + '--enable-opentelemetry-metrics --opentelemetry-metrics-port=9091 ' + '--aks-custom-headers AKSHTTPCustomFeatures=Microsoft.ContainerService/AzureMonitorAppMonitoringPreview' + ) + self.cmd(update_cmd, checks=[ + self.check('provisioningState', 'Succeeded'), + ]) + + wait_cmd = ' '.join([ + 'aks', 'wait', '--resource-group={resource_group}', '--name={name}', '--updated', + '--interval 60', '--timeout 1800', + ]) + self.cmd(wait_cmd, checks=[ + self.is_empty(), + ]) + + self.cmd('aks show -g {resource_group} -n {name} --output=json', checks=[ + self.check('provisioningState', 'Succeeded'), + # Azure Monitor features should still be enabled + self.check('addonProfiles.omsagent.enabled', True), + self.check('addonProfiles.omsagent.config.useAADAuth', 'true'), + self.check('azureMonitorProfile.metrics.enabled', True), + self.check('azureMonitorProfile.appMonitoring.autoInstrumentation.enabled', True), + # OpenTelemetry features should be re-enabled with new ports + self.check('azureMonitorProfile.appMonitoring.openTelemetryLogs.enabled', True), + self.check('azureMonitorProfile.appMonitoring.openTelemetryLogs.port', 9090), + self.check('azureMonitorProfile.appMonitoring.openTelemetryMetrics.enabled', True), + self.check('azureMonitorProfile.appMonitoring.openTelemetryMetrics.port', 9091), + ]) + + # Phase 5: Update - disable Azure Monitor metrics (should also disable OpenTelemetry metrics) + update_cmd = ( + 'aks update --resource-group={resource_group} --name={name} --yes --output=json ' + '--disable-azure-monitor-metrics' + ) + self.cmd(update_cmd, checks=[ + self.check('provisioningState', 'Succeeded'), + ]) + + wait_cmd = ' '.join([ + 'aks', 'wait', '--resource-group={resource_group}', '--name={name}', '--updated', + '--interval 60', '--timeout 1800', + ]) + self.cmd(wait_cmd, checks=[ + self.is_empty(), + ]) + + self.cmd('aks show -g {resource_group} -n {name} --output=json', checks=[ + self.check('provisioningState', 'Succeeded'), + # Azure Monitor logs should still be enabled + self.check('addonProfiles.omsagent.enabled', True), + self.check('addonProfiles.omsagent.config.useAADAuth', 'true'), + # Azure Monitor metrics should be disabled + self.check('azureMonitorProfile.metrics.enabled', False), + # Azure Monitor app monitoring should still be enabled + self.check('azureMonitorProfile.appMonitoring.autoInstrumentation.enabled', True), + # OpenTelemetry logs should still be enabled (independent of metrics) + self.check('azureMonitorProfile.appMonitoring.openTelemetryLogs.enabled', True), + ]) + + # Phase 6: Update - disable Azure Monitor logs (should also disable OpenTelemetry logs) + update_cmd = ( + 'aks update --resource-group={resource_group} --name={name} --yes --output=json ' + '--disable-azure-monitor-logs' + ) + self.cmd(update_cmd, checks=[ + self.check('provisioningState', 'Succeeded'), + ]) + + wait_cmd = ' '.join([ + 'aks', 'wait', '--resource-group={resource_group}', '--name={name}', '--updated', + '--interval 60', '--timeout 1800', + ]) + self.cmd(wait_cmd, checks=[ + self.is_empty(), + ]) + + self.cmd('aks show -g {resource_group} -n {name} --output=json', checks=[ + self.check('provisioningState', 'Succeeded'), + # Azure Monitor logs should be disabled + self.check('addonProfiles.omsagent.enabled', False), + # Azure Monitor metrics should still be disabled + self.check('azureMonitorProfile.metrics.enabled', False), + ]) + + # Phase 7: Update - re-enable all monitoring features at once + update_cmd = ( + 'aks update --resource-group={resource_group} --name={name} --yes --output=json ' + '--enable-azure-monitor-logs --enable-azure-monitor-metrics --enable-azure-monitor-app-monitoring ' + '--enable-opentelemetry-logs --opentelemetry-logs-port=7070 ' + '--enable-opentelemetry-metrics --opentelemetry-metrics-port=7071 ' + '--enable-windows-recording-rules ' + '--aks-custom-headers AKSHTTPCustomFeatures=Microsoft.ContainerService/AzureMonitorAppMonitoringPreview' + ) + self.cmd(update_cmd, checks=[ + self.check('provisioningState', 'Succeeded'), + ]) + + wait_cmd = ' '.join([ + 'aks', 'wait', '--resource-group={resource_group}', '--name={name}', '--updated', + '--interval 60', '--timeout 1800', + ]) + self.cmd(wait_cmd, checks=[ + self.is_empty(), + ]) + + self.cmd('aks show -g {resource_group} -n {name} --output=json', checks=[ + self.check('provisioningState', 'Succeeded'), + # All Azure Monitor features should be enabled + self.check('addonProfiles.omsagent.enabled', True), + self.exists('addonProfiles.omsagent.config.logAnalyticsWorkspaceResourceID'), + self.check('addonProfiles.omsagent.config.useAADAuth', 'true'), + self.check('azureMonitorProfile.metrics.enabled', True), + self.check('azureMonitorProfile.appMonitoring.autoInstrumentation.enabled', True), + # All OpenTelemetry features should be enabled with new ports + self.check('azureMonitorProfile.appMonitoring.openTelemetryLogs.enabled', True), + self.check('azureMonitorProfile.appMonitoring.openTelemetryLogs.port', 7070), + self.check('azureMonitorProfile.appMonitoring.openTelemetryMetrics.enabled', True), + self.check('azureMonitorProfile.appMonitoring.openTelemetryMetrics.port', 7071), + ]) + + # Phase 8: Final cleanup - disable all monitoring features + update_cmd = ( + 'aks update --resource-group={resource_group} --name={name} --yes --output=json ' + '--disable-azure-monitor-logs --disable-azure-monitor-metrics --disable-azure-monitor-app-monitoring ' + '--disable-opentelemetry-logs --disable-opentelemetry-metrics' + ) self.cmd(update_cmd, checks=[ self.check('provisioningState', 'Succeeded'), ]) + wait_cmd = ' '.join([ + 'aks', 'wait', '--resource-group={resource_group}', '--name={name}', '--updated', + '--interval 60', '--timeout 1800', + ]) + self.cmd(wait_cmd, checks=[ + self.is_empty(), + ]) + + self.cmd('aks show -g {resource_group} -n {name} --output=json', checks=[ + self.check('provisioningState', 'Succeeded'), + # All monitoring features should be disabled + self.check('addonProfiles.omsagent.enabled', False), + self.check('azureMonitorProfile.metrics.enabled', False), + self.check('azureMonitorProfile.appMonitoring.autoInstrumentation.enabled', False), + ]) + # delete cmd = 'aks delete --resource-group={resource_group} --name={name} --yes --no-wait' self.cmd(cmd, checks=[ diff --git a/src/aks-preview/azext_aks_preview/tests/latest/test_managed_cluster_decorator.py b/src/aks-preview/azext_aks_preview/tests/latest/test_managed_cluster_decorator.py index dafde8fdec3..c6f073e5aa9 100644 --- a/src/aks-preview/azext_aks_preview/tests/latest/test_managed_cluster_decorator.py +++ b/src/aks-preview/azext_aks_preview/tests/latest/test_managed_cluster_decorator.py @@ -77,7 +77,7 @@ from azure.cli.command_modules.acs.managed_cluster_decorator import ( AKSManagedClusterParamDict, ) -from azure.cli.command_modules.acs.tests.latest.mocks import ( +from azext_aks_preview.tests.latest.mocks import ( MockCLI, MockClient, MockCmd, @@ -4624,10 +4624,8 @@ def test_set_up_addon_profiles(self): dec_2.context.set_intermediate("subscription_id", "test_subscription_id") mc_2 = self.models.ManagedCluster(location="test_location") dec_2.context.attach_mc(mc_2) - with patch( - "azure.cli.command_modules.acs.managed_cluster_decorator.ensure_container_insights_for_monitoring", - return_value=None, - ): + external_functions = dec_2.context.external_functions + with patch.object(external_functions, 'ensure_container_insights_for_monitoring', return_value=None): dec_mc_2 = dec_2.set_up_addon_profiles(mc_2) addon_profiles_2 = { @@ -4679,18 +4677,16 @@ def test_set_up_addon_profiles(self): mc_3 = self.models.ManagedCluster(location="test_location") dec_3.context.attach_mc(mc_3) dec_mc_sku_3 = dec_3.set_up_sku(mc_3) - with patch( - "azure.cli.command_modules.acs.managed_cluster_decorator.ensure_container_insights_for_monitoring", - return_value=None), patch( - "azure.cli.command_modules.acs.managed_cluster_decorator.ensure_default_log_analytics_workspace_for_monitoring", - return_value = "test_workspace_resource_id", + external_functions = dec_3.context.external_functions + with patch.object(external_functions, 'ensure_container_insights_for_monitoring', return_value=None), patch.object(external_functions, 'ensure_default_log_analytics_workspace_for_monitoring', + return_value = "/subscriptions/test_subscription_id/resourceGroups/test_rg_name/providers/Microsoft.OperationalInsights/workspaces/test_workspace_resource_id", ): dec_mc_3 = dec_3.set_up_addon_profiles(dec_mc_sku_3) addon_profiles_3 = { CONST_MONITORING_ADDON_NAME: self.models.ManagedClusterAddonProfile( enabled=True, config={ - CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: "/test_workspace_resource_id", + CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: "/subscriptions/test_subscription_id/resourceGroups/test_rg_name/providers/Microsoft.OperationalInsights/workspaces/test_workspace_resource_id", CONST_MONITORING_USING_AAD_MSI_AUTH: "true", }, ), @@ -4881,7 +4877,7 @@ def test_set_up_azure_monitor_profile(self): dec_mc_1 = dec_1.set_up_azure_monitor_profile(dec_1_mc_sku) azure_monitor_profiles_1 = self.models.ManagedClusterAzureMonitorProfile( metrics = self.models.ManagedClusterAzureMonitorProfileMetrics( - enabled = False, + enabled = True, kube_state_metrics = self.models.ManagedClusterAzureMonitorProfileKubeStateMetrics( metric_labels_allowlist = '', metric_annotations_allow_list = '', @@ -5849,2124 +5845,3343 @@ def test_set_up_node_provisioning_profile(self): ) self.assertEqual(dec_mc_2, ground_truth_mc_2) + def test_get_enable_opentelemetry_metrics(self): + # default + ctx_1 = AKSPreviewManagedClusterContext( + self.cmd, + AKSManagedClusterParamDict({"enable_opentelemetry_metrics": None}), + self.models, + decorator_mode=DecoratorMode.CREATE, + ) + self.assertEqual(ctx_1.get_enable_opentelemetry_metrics(), False) -class AKSPreviewManagedClusterUpdateDecoratorTestCase(unittest.TestCase): - def setUp(self): - # manually register CUSTOM_MGMT_AKS_PREVIEW - register_aks_preview_resource_type() - self.cli_ctx = MockCLI() - self.cmd = MockCmd(self.cli_ctx) - self.models = AKSPreviewManagedClusterModels(self.cmd, CUSTOM_MGMT_AKS_PREVIEW) - self.client = MockClient() + # custom value + ctx_2 = AKSPreviewManagedClusterContext( + self.cmd, + AKSManagedClusterParamDict({"enable_opentelemetry_metrics": True}), + self.models, + decorator_mode=DecoratorMode.CREATE, + ) + self.assertEqual(ctx_2.get_enable_opentelemetry_metrics(), True) - def test_check_raw_parameters(self): - # default value in `aks_create` - dec_1 = AKSPreviewManagedClusterUpdateDecorator( + def test_get_disable_opentelemetry_metrics(self): + # default + ctx_1 = AKSPreviewManagedClusterContext( self.cmd, - self.client, - {}, - CUSTOM_MGMT_AKS_PREVIEW, + AKSManagedClusterParamDict({"disable_opentelemetry_metrics": None}), + self.models, + decorator_mode=DecoratorMode.CREATE, ) - # fail on no updated parameter provided - with patch( - "azext_aks_preview.managed_cluster_decorator.prompt_y_n", - return_value=False, - ), self.assertRaises(RequiredArgumentMissingError): - dec_1.check_raw_parameters() + self.assertEqual(ctx_1.get_disable_opentelemetry_metrics(), False) - # unless user says they want to reconcile - with patch( - "azext_aks_preview.managed_cluster_decorator.prompt_y_n", - return_value=True, - ): - dec_1.check_raw_parameters() + # custom value + ctx_2 = AKSPreviewManagedClusterContext( + self.cmd, + AKSManagedClusterParamDict({"disable_opentelemetry_metrics": True}), + self.models, + decorator_mode=DecoratorMode.CREATE, + ) + self.assertEqual(ctx_2.get_disable_opentelemetry_metrics(), True) + + def test_get_opentelemetry_metrics_port(self): + # default + ctx_1 = AKSPreviewManagedClusterContext( + self.cmd, + AKSManagedClusterParamDict({"opentelemetry_metrics_port": None}), + self.models, + decorator_mode=DecoratorMode.CREATE, + ) + self.assertEqual(ctx_1.get_opentelemetry_metrics_port(), None) # custom value - dec_2 = AKSPreviewManagedClusterUpdateDecorator( + ctx_2 = AKSPreviewManagedClusterContext( self.cmd, - self.client, - { - "cluster_autoscaler_profile": {}, - "api_server_authorized_ip_ranges": "", - }, - CUSTOM_MGMT_AKS_PREVIEW, + AKSManagedClusterParamDict({"opentelemetry_metrics_port": 8080, "enable_opentelemetry_metrics": True}), + self.models, + decorator_mode=DecoratorMode.CREATE, ) - self.assertIsNone(dec_2.check_raw_parameters()) + self.assertEqual(ctx_2.get_opentelemetry_metrics_port(), 8080) + + def test_get_enable_opentelemetry_logs(self): + # default + ctx_1 = AKSPreviewManagedClusterContext( + self.cmd, + AKSManagedClusterParamDict({"enable_opentelemetry_logs": None}), + self.models, + decorator_mode=DecoratorMode.CREATE, + ) + self.assertEqual(ctx_1.get_enable_opentelemetry_logs(), False) # custom value - dec_3 = AKSPreviewManagedClusterUpdateDecorator( + ctx_2 = AKSPreviewManagedClusterContext( self.cmd, - self.client, - { - "enable_workload_identity": False, - }, - CUSTOM_MGMT_AKS_PREVIEW, + AKSManagedClusterParamDict({"enable_opentelemetry_logs": True}), + self.models, + decorator_mode=DecoratorMode.CREATE, ) - with patch( - "azext_aks_preview.managed_cluster_decorator.prompt_y_n", - return_value=True, - ): - self.assertIsNone(dec_3.check_raw_parameters()) + self.assertEqual(ctx_2.get_enable_opentelemetry_logs(), True) - def test_update_load_balancer_profile(self): - # default value in `aks_update` - dec_1 = AKSPreviewManagedClusterUpdateDecorator( + def test_get_disable_opentelemetry_logs(self): + # default + ctx_1 = AKSPreviewManagedClusterContext( + self.cmd, + AKSManagedClusterParamDict({"disable_opentelemetry_logs": None}), + self.models, + decorator_mode=DecoratorMode.CREATE, + ) + self.assertEqual(ctx_1.get_disable_opentelemetry_logs(), False) + + # custom value + ctx_2 = AKSPreviewManagedClusterContext( + self.cmd, + AKSManagedClusterParamDict({"disable_opentelemetry_logs": True}), + self.models, + decorator_mode=DecoratorMode.CREATE, + ) + self.assertEqual(ctx_2.get_disable_opentelemetry_logs(), True) + + def test_get_opentelemetry_logs_port(self): + # default + ctx_1 = AKSPreviewManagedClusterContext( + self.cmd, + AKSManagedClusterParamDict({"opentelemetry_logs_port": None}), + self.models, + decorator_mode=DecoratorMode.CREATE, + ) + self.assertEqual(ctx_1.get_opentelemetry_logs_port(), None) + + # custom value + ctx_2 = AKSPreviewManagedClusterContext( + self.cmd, + AKSManagedClusterParamDict({"opentelemetry_logs_port": 8081, "enable_opentelemetry_logs": True}), + self.models, + decorator_mode=DecoratorMode.CREATE, + ) + self.assertEqual(ctx_2.get_opentelemetry_logs_port(), 8081) + + def test_set_up_azure_monitor_profile_with_opentelemetry(self): + # Test enabling Azure Monitor metrics with OpenTelemetry metrics + dec_1 = AKSPreviewManagedClusterCreateDecorator( self.cmd, self.client, { - "load_balancer_sku": None, - "load_balancer_managed_outbound_ip_count": None, - "load_balancer_managed_outbound_ipv6_count": None, - "load_balancer_outbound_ips": None, - "load_balancer_outbound_ip_prefixes": None, - "load_balancer_outbound_ports": None, - "load_balancer_idle_timeout": None, + "enable_azure_monitor_metrics": True, + "enable_opentelemetry_metrics": True, + "opentelemetry_metrics_port": 8080, }, CUSTOM_MGMT_AKS_PREVIEW, ) + mc_1 = self.models.ManagedCluster( location="test_location", - network_profile=self.models.ContainerServiceNetworkProfile(), + identity=self.models.ManagedClusterIdentity(type="SystemAssigned"), ) dec_1.context.attach_mc(mc_1) - # fail on passing the wrong mc object - with self.assertRaises(CLIInternalError): - dec_1.update_load_balancer_profile(None) - dec_mc_1 = dec_1.update_load_balancer_profile(mc_1) - + dec_mc_1 = dec_1.set_up_azure_monitor_profile(mc_1) + + # Expected ground truth object + ground_truth_opentelemetry_metrics = self.models.ManagedClusterAzureMonitorProfileAppMonitoringOpenTelemetryMetrics( + enabled=True, + port=8080, + ) + ground_truth_app_monitoring = self.models.ManagedClusterAzureMonitorProfileAppMonitoring( + open_telemetry_metrics=ground_truth_opentelemetry_metrics, + ) + ground_truth_kube_state_metrics = self.models.ManagedClusterAzureMonitorProfileKubeStateMetrics( + metric_labels_allowlist="", + metric_annotations_allow_list="", + ) + ground_truth_metrics_profile = self.models.ManagedClusterAzureMonitorProfileMetrics( + enabled=True, + kube_state_metrics=ground_truth_kube_state_metrics, + ) + ground_truth_azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile( + metrics=ground_truth_metrics_profile, + app_monitoring=ground_truth_app_monitoring, + ) ground_truth_mc_1 = self.models.ManagedCluster( location="test_location", - network_profile=self.models.ContainerServiceNetworkProfile(), + azure_monitor_profile=ground_truth_azure_monitor_profile, + identity=self.models.ManagedClusterIdentity(type="SystemAssigned"), ) self.assertEqual(dec_mc_1, ground_truth_mc_1) - # custom value - outbound ip prefixes - dec_2 = AKSPreviewManagedClusterUpdateDecorator( + def test_set_up_azure_monitor_profile_disable_opentelemetry(self): + # Test disabling OpenTelemetry metrics + dec_1 = AKSPreviewManagedClusterCreateDecorator( self.cmd, self.client, { - "load_balancer_managed_outbound_ip_count": None, - "load_balancer_managed_outbound_ipv6_count": None, - "load_balancer_outbound_ips": None, - "load_balancer_outbound_ip_prefixes": "id3,id4", + "enable_azure_monitor_metrics": True, + "disable_opentelemetry_metrics": True, }, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_2 = self.models.ManagedCluster( + + mc_1 = self.models.ManagedCluster( location="test_location", - network_profile=self.models.ContainerServiceNetworkProfile( - load_balancer_profile=self.models.load_balancer_models.ManagedClusterLoadBalancerProfile( - outbound_ip_prefixes=self.models.load_balancer_models.ManagedClusterLoadBalancerProfileOutboundIPPrefixes( - public_ip_prefixes=[ - self.models.load_balancer_models.ResourceReference( - id="id1" - ), - self.models.load_balancer_models.ResourceReference( - id="id2" - ), - ] - ) - ) - ), + identity=self.models.ManagedClusterIdentity(type="SystemAssigned"), ) - dec_2.context.attach_mc(mc_2) - dec_mc_2 = dec_2.update_load_balancer_profile(mc_2) - - ground_truth_mc_2 = self.models.ManagedCluster( + dec_1.context.attach_mc(mc_1) + dec_mc_1 = dec_1.set_up_azure_monitor_profile(mc_1) + + # Expected ground truth object + ground_truth_opentelemetry_metrics = self.models.ManagedClusterAzureMonitorProfileAppMonitoringOpenTelemetryMetrics( + enabled=False, + ) + ground_truth_app_monitoring = self.models.ManagedClusterAzureMonitorProfileAppMonitoring( + open_telemetry_metrics=ground_truth_opentelemetry_metrics, + ) + ground_truth_kube_state_metrics = self.models.ManagedClusterAzureMonitorProfileKubeStateMetrics( + metric_labels_allowlist="", + metric_annotations_allow_list="", + ) + ground_truth_metrics_profile = self.models.ManagedClusterAzureMonitorProfileMetrics( + enabled=True, + kube_state_metrics=ground_truth_kube_state_metrics, + ) + ground_truth_azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile( + metrics=ground_truth_metrics_profile, + app_monitoring=ground_truth_app_monitoring, + ) + ground_truth_mc_1 = self.models.ManagedCluster( location="test_location", - network_profile=self.models.ContainerServiceNetworkProfile( - load_balancer_profile=self.models.load_balancer_models.ManagedClusterLoadBalancerProfile( - outbound_ip_prefixes=self.models.load_balancer_models.ManagedClusterLoadBalancerProfileOutboundIPPrefixes( - public_ip_prefixes=[ - self.models.load_balancer_models.ResourceReference( - id="id3" - ), - self.models.load_balancer_models.ResourceReference( - id="id4" - ), - ] - ) - ) - ), + azure_monitor_profile=ground_truth_azure_monitor_profile, + identity=self.models.ManagedClusterIdentity(type="SystemAssigned"), ) - self.assertEqual(dec_mc_2, ground_truth_mc_2) + self.assertEqual(dec_mc_1, ground_truth_mc_1) - # custom value - outbound ip - dec_3 = AKSPreviewManagedClusterUpdateDecorator( + def test_azure_monitor_logs_containerinsights_enabled_simple(self): + # Test that container_insights.enabled=True when Azure Monitor logs are enabled + dec_1 = AKSPreviewManagedClusterCreateDecorator( self.cmd, self.client, { - "load_balancer_managed_outbound_ip_count": None, - "load_balancer_managed_outbound_ipv6_count": None, - "load_balancer_outbound_ips": "id3,id4", - "load_balancer_outbound_ip_prefixes": None, + "enable_addons": "monitoring", # Pass as string + "workspace_resource_id": "/subscriptions/test/resourceGroups/test/providers/Microsoft.OperationalInsights/workspaces/test-workspace", }, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_3 = self.models.ManagedCluster( - location="test_location", - network_profile=self.models.ContainerServiceNetworkProfile( - load_balancer_profile=self.models.load_balancer_models.ManagedClusterLoadBalancerProfile( - outbound_i_ps=self.models.load_balancer_models.ManagedClusterLoadBalancerProfileOutboundIPs( - public_i_ps=[ - self.models.load_balancer_models.ResourceReference( - id="id1" - ), - self.models.load_balancer_models.ResourceReference( - id="id2" - ), - ] - ) - ) - ), - ) - dec_3.context.attach_mc(mc_3) - dec_mc_3 = dec_3.update_load_balancer_profile(mc_3) - ground_truth_mc_3 = self.models.ManagedCluster( + mc_1 = self.models.ManagedCluster( location="test_location", - network_profile=self.models.ContainerServiceNetworkProfile( - load_balancer_profile=self.models.load_balancer_models.ManagedClusterLoadBalancerProfile( - outbound_i_ps=self.models.load_balancer_models.ManagedClusterLoadBalancerProfileOutboundIPs( - public_i_ps=[ - self.models.load_balancer_models.ResourceReference( - id="id3" - ), - self.models.load_balancer_models.ResourceReference( - id="id4" - ), - ] - ) - ) - ), + identity=self.models.ManagedClusterIdentity(type="SystemAssigned"), ) - self.assertEqual(dec_mc_3, ground_truth_mc_3) - - # custom value - managed outbound ip, count only - dec_4 = AKSPreviewManagedClusterUpdateDecorator( + dec_1.context.attach_mc(mc_1) + + # Mock the subscription_id to avoid authentication requirement + dec_1.context.set_intermediate("subscription_id", "test-subscription-id") + + # First set up addon profiles - this enables the monitoring addon + external_functions = dec_1.context.external_functions + with patch.object(external_functions, 'ensure_container_insights_for_monitoring', return_value=None): + dec_mc_1 = dec_1.set_up_addon_profiles(mc_1) + + # Then set up Azure Monitor profile + dec_mc_1 = dec_1.set_up_azure_monitor_profile(dec_mc_1) + + # Since container insights profiles were removed, just check that the monitoring addon was set up + self.assertIsNotNone(dec_mc_1.addon_profiles) + self.assertIn("omsagent", dec_mc_1.addon_profiles) + self.assertTrue(dec_mc_1.addon_profiles["omsagent"].enabled) + + def test_azure_monitor_logs_with_opentelemetry_logs_port_validation(self): + # Test that OpenTelemetry logs port is set correctly with Azure Monitor logs + dec_1 = AKSPreviewManagedClusterCreateDecorator( self.cmd, self.client, { - "load_balancer_managed_outbound_ip_count": 5, - "load_balancer_managed_outbound_ipv6_count": None, - "load_balancer_outbound_ips": None, - "load_balancer_outbound_ip_prefixes": None, + "enable_addons": "monitoring", # Pass as string + "workspace_resource_id": "/subscriptions/test/resourceGroups/test/providers/Microsoft.OperationalInsights/workspaces/test-workspace", + "enable_opentelemetry_logs": True, + "opentelemetry_logs_port": 9090, }, CUSTOM_MGMT_AKS_PREVIEW, ) - - mc_4 = self.models.ManagedCluster( + + mc_1 = self.models.ManagedCluster( location="test_location", - network_profile=self.models.ContainerServiceNetworkProfile( - load_balancer_profile=self.models.load_balancer_models.ManagedClusterLoadBalancerProfile( - managed_outbound_i_ps=self.models.load_balancer_models.ManagedClusterLoadBalancerProfileManagedOutboundIPs( - count=10, count_ipv6=20 - ), - ) - ), + identity=self.models.ManagedClusterIdentity(type="SystemAssigned"), ) - dec_4.context.attach_mc(mc_4) - dec_mc_4 = dec_4.update_load_balancer_profile(mc_4) - - ground_truth_mc_4 = self.models.ManagedCluster( + dec_1.context.attach_mc(mc_1) + + # Mock the subscription_id to avoid authentication requirement + dec_1.context.set_intermediate("subscription_id", "test-subscription-id") + + # First set up addon profiles - this enables the monitoring addon + external_functions = dec_1.context.external_functions + with patch.object(external_functions, 'ensure_container_insights_for_monitoring', return_value=None): + dec_mc_1 = dec_1.set_up_addon_profiles(mc_1) + + dec_mc_1 = dec_1.set_up_azure_monitor_profile(dec_mc_1) + + # Verify OpenTelemetry logs are configured correctly (since this includes OpenTelemetry logs) + self.assertIsNotNone(dec_mc_1.azure_monitor_profile) + self.assertIsNotNone(dec_mc_1.azure_monitor_profile.app_monitoring) + self.assertIsNotNone(dec_mc_1.azure_monitor_profile.app_monitoring.open_telemetry_logs) + self.assertTrue(dec_mc_1.azure_monitor_profile.app_monitoring.open_telemetry_logs.enabled) + self.assertEqual(dec_mc_1.azure_monitor_profile.app_monitoring.open_telemetry_logs.port, 9090) + + def test_azure_monitor_logs_without_opentelemetry(self): + # Test that container_insights works without OpenTelemetry logs + dec_1 = AKSPreviewManagedClusterCreateDecorator( + self.cmd, + self.client, + { + "enable_addons": "monitoring", # Pass as string + "workspace_resource_id": "/subscriptions/test/resourceGroups/test/providers/Microsoft.OperationalInsights/workspaces/test-workspace", + # No OpenTelemetry parameters + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + + mc_1 = self.models.ManagedCluster( location="test_location", - network_profile=( - self.models.ContainerServiceNetworkProfile( - load_balancer_profile=self.models.load_balancer_models.ManagedClusterLoadBalancerProfile( - managed_outbound_i_ps=self.models.load_balancer_models.ManagedClusterLoadBalancerProfileManagedOutboundIPs( - count=5, count_ipv6=20 - ), - ) - ) - ), + identity=self.models.ManagedClusterIdentity(type="SystemAssigned"), ) - self.assertEqual(dec_mc_4, ground_truth_mc_4) - - # custom value - managed outbound ip, count_ipv6 only - dec_5 = AKSPreviewManagedClusterUpdateDecorator( + dec_1.context.attach_mc(mc_1) + + # Mock the subscription_id to avoid authentication requirement + dec_1.context.set_intermediate("subscription_id", "test-subscription-id") + + # First set up addon profiles - this enables the monitoring addon + external_functions = dec_1.context.external_functions + with patch.object(external_functions, 'ensure_container_insights_for_monitoring', return_value=None): + dec_mc_1 = dec_1.set_up_addon_profiles(mc_1) + + dec_mc_1 = dec_1.set_up_azure_monitor_profile(dec_mc_1) + + # Since no OpenTelemetry features are enabled and container insights profiles were removed, + # just verify the monitoring addon was set up properly + self.assertIsNotNone(dec_mc_1.addon_profiles) + self.assertIn("omsagent", dec_mc_1.addon_profiles) + self.assertTrue(dec_mc_1.addon_profiles["omsagent"].enabled) + + def test_azure_monitor_logs_addon_profile_creation(self): + # Test that when monitoring addon is enabled via enable_addons (as would happen + # when enable_azure_monitor_logs=True), the correct addon profile is created + dec_1 = AKSPreviewManagedClusterCreateDecorator( self.cmd, self.client, { - "load_balancer_managed_outbound_ip_count": None, - "load_balancer_managed_outbound_ipv6_count": 5, - "load_balancer_outbound_ips": None, - "load_balancer_outbound_ip_prefixes": None, + "enable_addons": "monitoring", # Pass as string, not list + "workspace_resource_id": "/subscriptions/test/resourceGroups/test/providers/Microsoft.OperationalInsights/workspaces/test-workspace", }, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_5 = self.models.ManagedCluster( + mc_1 = self.models.ManagedCluster( location="test_location", - network_profile=self.models.ContainerServiceNetworkProfile( - load_balancer_profile=self.models.load_balancer_models.ManagedClusterLoadBalancerProfile( - managed_outbound_i_ps=self.models.load_balancer_models.ManagedClusterLoadBalancerProfileManagedOutboundIPs( - count=10, count_ipv6=20 - ), - ) - ), + identity=self.models.ManagedClusterIdentity(type="SystemAssigned"), ) - dec_5.context.attach_mc(mc_5) - dec_mc_5 = dec_5.update_load_balancer_profile(mc_5) - - ground_truth_mc_5 = self.models.ManagedCluster( - location="test_location", - network_profile=( - self.models.ContainerServiceNetworkProfile( - load_balancer_profile=self.models.load_balancer_models.ManagedClusterLoadBalancerProfile( - managed_outbound_i_ps=self.models.load_balancer_models.ManagedClusterLoadBalancerProfileManagedOutboundIPs( - count=10, count_ipv6=5 - ), - ) - ) - ), + dec_1.context.attach_mc(mc_1) + + # Mock the subscription_id to avoid authentication requirement + dec_1.context.set_intermediate("subscription_id", "test-subscription-id") + + # Set up addon profiles (this is what would happen with enable_azure_monitor_logs=True) + external_functions = dec_1.context.external_functions + with patch.object(external_functions, 'ensure_container_insights_for_monitoring', return_value=None): + dec_mc_1 = dec_1.set_up_addon_profiles(mc_1) + + # Verify that monitoring addon is enabled correctly + self.assertIsNotNone(dec_mc_1.addon_profiles) + self.assertIn("omsagent", dec_mc_1.addon_profiles) + self.assertTrue(dec_mc_1.addon_profiles["omsagent"].enabled) + self.assertIn("logAnalyticsWorkspaceResourceID", dec_mc_1.addon_profiles["omsagent"].config) + self.assertEqual( + dec_mc_1.addon_profiles["omsagent"].config["logAnalyticsWorkspaceResourceID"], + "/subscriptions/test/resourceGroups/test/providers/Microsoft.OperationalInsights/workspaces/test-workspace" ) - self.assertEqual(dec_mc_5, ground_truth_mc_5) - # custom value - managed outbound ip - dec_6 = AKSPreviewManagedClusterUpdateDecorator( + def test_azure_monitor_logs_with_mixed_addons_decorator(self): + # Test Azure Monitor logs (via monitoring addon) with other addons enabled + dec_1 = AKSPreviewManagedClusterCreateDecorator( self.cmd, self.client, { - "load_balancer_managed_outbound_ip_count": 25, - "load_balancer_managed_outbound_ipv6_count": 5, - "load_balancer_outbound_ips": None, - "load_balancer_outbound_ip_prefixes": None, + "enable_addons": "monitoring,azure-policy", # Pass as comma-separated string + "workspace_resource_id": "/subscriptions/test/resourceGroups/test/providers/Microsoft.OperationalInsights/workspaces/test-workspace", }, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_6 = self.models.ManagedCluster( + mc_1 = self.models.ManagedCluster( location="test_location", - network_profile=self.models.ContainerServiceNetworkProfile( - load_balancer_profile=self.models.load_balancer_models.ManagedClusterLoadBalancerProfile( - managed_outbound_i_ps=self.models.load_balancer_models.ManagedClusterLoadBalancerProfileManagedOutboundIPs( - count=10, count_ipv6=20 - ), - ) - ), + identity=self.models.ManagedClusterIdentity(type="SystemAssigned"), + ) + dec_1.context.attach_mc(mc_1) + + # Mock the subscription_id to avoid authentication requirement + dec_1.context.set_intermediate("subscription_id", "test-subscription-id") + + # Set up addon profiles + external_functions = dec_1.context.external_functions + with patch.object(external_functions, 'ensure_container_insights_for_monitoring', return_value=None): + dec_mc_1 = dec_1.set_up_addon_profiles(mc_1) + + # Verify both addons are enabled + self.assertIsNotNone(dec_mc_1.addon_profiles) + self.assertIn("omsagent", dec_mc_1.addon_profiles) + self.assertIn("azurepolicy", dec_mc_1.addon_profiles) + self.assertTrue(dec_mc_1.addon_profiles["omsagent"].enabled) + self.assertTrue(dec_mc_1.addon_profiles["azurepolicy"].enabled) + + def test_azure_monitor_logs_with_opentelemetry_logs_decorator(self): + # Test Azure Monitor logs with OpenTelemetry logs integration at decorator level + dec_1 = AKSPreviewManagedClusterCreateDecorator( + self.cmd, + self.client, + { + "enable_addons": "monitoring", # Pass as string + "workspace_resource_id": "/subscriptions/test/resourceGroups/test/providers/Microsoft.OperationalInsights/workspaces/test-workspace", + "enable_opentelemetry_logs": True, + "opentelemetry_logs_port": 8080, + }, + CUSTOM_MGMT_AKS_PREVIEW, ) - dec_6.context.attach_mc(mc_6) - dec_mc_6 = dec_6.update_load_balancer_profile(mc_6) - ground_truth_mc_6 = self.models.ManagedCluster( + mc_1 = self.models.ManagedCluster( location="test_location", - network_profile=( - self.models.ContainerServiceNetworkProfile( - load_balancer_profile=self.models.load_balancer_models.ManagedClusterLoadBalancerProfile( - managed_outbound_i_ps=self.models.load_balancer_models.ManagedClusterLoadBalancerProfileManagedOutboundIPs( - count=25, count_ipv6=5 - ), - ) - ) - ), + identity=self.models.ManagedClusterIdentity(type="SystemAssigned"), ) - self.assertEqual(dec_mc_6, ground_truth_mc_6) - - # custom value - from managed outbound ip to outbound ip - dec_7 = AKSPreviewManagedClusterUpdateDecorator( + dec_1.context.attach_mc(mc_1) + + # Mock the subscription_id to avoid authentication requirement + dec_1.context.set_intermediate("subscription_id", "test-subscription-id") + + # First set up addon profiles - this enables the monitoring addon + external_functions = dec_1.context.external_functions + with patch.object(external_functions, 'ensure_container_insights_for_monitoring', return_value=None): + dec_mc_1 = dec_1.set_up_addon_profiles(mc_1) + + # Set up Azure Monitor profile with OpenTelemetry logs + dec_mc_1 = dec_1.set_up_azure_monitor_profile(dec_mc_1) + + # Verify Azure Monitor logs profile with OpenTelemetry is set up correctly + # Since this test includes OpenTelemetry logs, the azure_monitor_profile should be set + self.assertIsNotNone(dec_mc_1.azure_monitor_profile) + self.assertIsNotNone(dec_mc_1.azure_monitor_profile.app_monitoring.open_telemetry_logs) + self.assertTrue(dec_mc_1.azure_monitor_profile.app_monitoring.open_telemetry_logs.enabled) + self.assertEqual(dec_mc_1.azure_monitor_profile.app_monitoring.open_telemetry_logs.port, 8080) + + def test_azure_monitor_logs_containerinsights_enabled(self): + # Test that --enable-azure-monitor-logs results in ManagedClusterAzureMonitorProfile + # with containerinsights.enabled=True + dec_1 = AKSPreviewManagedClusterCreateDecorator( self.cmd, self.client, { - "load_balancer_managed_outbound_ip_count": None, - "load_balancer_managed_outbound_ipv6_count": None, - "load_balancer_outbound_ips": "id1,id2", - "load_balancer_outbound_ip_prefixes": None, + "enable_addons": "monitoring", # This is what enable_azure_monitor_logs does + "workspace_resource_id": "/subscriptions/test/resourceGroups/test/providers/Microsoft.OperationalInsights/workspaces/test-workspace", }, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_7 = self.models.ManagedCluster( - location="test_location", - network_profile=self.models.ContainerServiceNetworkProfile( - load_balancer_profile=self.models.load_balancer_models.ManagedClusterLoadBalancerProfile( - managed_outbound_i_ps=self.models.load_balancer_models.ManagedClusterLoadBalancerProfileManagedOutboundIPs( - count=3, count_ipv6=2 - ) - ) - ), - ) - dec_7.context.attach_mc(mc_7) - dec_mc_7 = dec_7.update_load_balancer_profile(mc_7) - ground_truth_mc_7 = self.models.ManagedCluster( + mc_1 = self.models.ManagedCluster( location="test_location", - network_profile=( - self.models.ContainerServiceNetworkProfile( - load_balancer_profile=self.models.load_balancer_models.ManagedClusterLoadBalancerProfile( - outbound_i_ps=self.models.load_balancer_models.ManagedClusterLoadBalancerProfileOutboundIPs( - public_i_ps=[ - self.models.load_balancer_models.ResourceReference( - id="id1" - ), - self.models.load_balancer_models.ResourceReference( - id="id2" - ), - ] - ) - ) - ) - ), + identity=self.models.ManagedClusterIdentity(type="SystemAssigned"), ) - self.assertEqual(dec_mc_7, ground_truth_mc_7) + dec_1.context.attach_mc(mc_1) + + # Mock the subscription_id to avoid authentication requirement + dec_1.context.set_intermediate("subscription_id", "test-subscription-id") + + # First set up addon profiles - this enables the monitoring addon + external_functions = dec_1.context.external_functions + with patch.object(external_functions, 'ensure_container_insights_for_monitoring', return_value=None): + dec_mc_1 = dec_1.set_up_addon_profiles(mc_1) + + # Then set up Azure Monitor profile - this should create container insights profile + # when monitoring addon is enabled + dec_mc_1 = dec_1.set_up_azure_monitor_profile(dec_mc_1) + + # Verify the monitoring addon is enabled correctly + self.assertIsNotNone(dec_mc_1.addon_profiles) + self.assertIn("omsagent", dec_mc_1.addon_profiles) + self.assertTrue(dec_mc_1.addon_profiles["omsagent"].enabled) + + # Container insights profile setup was removed, so azure_monitor_profile may be None + # The monitoring addon itself should still be configured correctly in addon_profiles + # (This is the current expected behavior after container insights profile changes were removed) - # custom value - from outbound ip prefix to managed outbound ip - dec_8 = AKSPreviewManagedClusterUpdateDecorator( + def test_azure_monitor_logs_containerinsights_with_workspace_id(self): + # Test that containerinsights is enabled with correct workspace resource ID + workspace_id = "/subscriptions/12345/resourceGroups/rg-test/providers/Microsoft.OperationalInsights/workspaces/test-ws" + dec_1 = AKSPreviewManagedClusterCreateDecorator( self.cmd, self.client, { - "load_balancer_managed_outbound_ip_count": 10, - "load_balancer_managed_outbound_ipv6_count": 5, - "load_balancer_outbound_ips": None, - "load_balancer_outbound_ip_prefixes": None, + "enable_addons": "monitoring", # This is what enable_azure_monitor_logs does + "workspace_resource_id": workspace_id, }, CUSTOM_MGMT_AKS_PREVIEW, ) - load_balancer_profile_8 = self.models.load_balancer_models.ManagedClusterLoadBalancerProfile( - outbound_ip_prefixes=self.models.load_balancer_models.ManagedClusterLoadBalancerProfileOutboundIPPrefixes( - public_ip_prefixes=[ - self.models.load_balancer_models.ResourceReference( - id="test_public_ip_prefix" - ) - ] - ), - ) - network_profile_8 = self.models.ContainerServiceNetworkProfile( - load_balancer_profile=load_balancer_profile_8 - ) - mc_8 = self.models.ManagedCluster( - location="test_location", network_profile=network_profile_8 - ) - dec_8.context.attach_mc(mc_8) - dec_mc_8 = dec_8.update_load_balancer_profile(mc_8) - - ground_truth_load_balancer_profile_8 = self.models.load_balancer_models.ManagedClusterLoadBalancerProfile( - managed_outbound_i_ps=self.models.load_balancer_models.ManagedClusterLoadBalancerProfileManagedOutboundIPs( - count=10, count_ipv6=5 - ), - ) - ground_truth_network_profile_8 = self.models.ContainerServiceNetworkProfile( - load_balancer_profile=ground_truth_load_balancer_profile_8 - ) - ground_truth_mc_8 = self.models.ManagedCluster( + mc_1 = self.models.ManagedCluster( location="test_location", - network_profile=ground_truth_network_profile_8, + identity=self.models.ManagedClusterIdentity(type="SystemAssigned"), + ) + dec_1.context.attach_mc(mc_1) + + # Mock the subscription_id to avoid authentication requirement + dec_1.context.set_intermediate("subscription_id", "test-subscription-id") + + # First set up addon profiles - this enables the monitoring addon + external_functions = dec_1.context.external_functions + with patch.object(external_functions, 'ensure_container_insights_for_monitoring', return_value=None): + dec_mc_1 = dec_1.set_up_addon_profiles(mc_1) + + # Then set up Azure Monitor profile + dec_mc_1 = dec_1.set_up_azure_monitor_profile(dec_mc_1) + + # Since container insights profiles were removed, verify the addon profile has the correct workspace + self.assertIn("omsagent", dec_mc_1.addon_profiles) + self.assertTrue(dec_mc_1.addon_profiles["omsagent"].enabled) + self.assertEqual( + dec_mc_1.addon_profiles["omsagent"].config["logAnalyticsWorkspaceResourceID"], + workspace_id ) - self.assertEqual(dec_mc_8, ground_truth_mc_8) - # custom value - dec_9 = AKSPreviewManagedClusterUpdateDecorator( + def test_azure_monitor_logs_disabled_containerinsights(self): + # Test that when --disable-azure-monitor-logs is used, containerinsights.enabled=False + dec_1 = AKSPreviewManagedClusterCreateDecorator( self.cmd, self.client, - {}, + { + "disable_azure_monitor_logs": True, + }, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_9 = self.models.ManagedCluster(location="test_location") - dec_9.context.attach_mc(mc_9) - # fail on incomplete mc object (no network profile) - with self.assertRaises(UnknownError): - dec_9.update_load_balancer_profile(mc_9) - # custom value - dec_10 = AKSPreviewManagedClusterUpdateDecorator( + mc_1 = self.models.ManagedCluster( + location="test_location", + identity=self.models.ManagedClusterIdentity(type="SystemAssigned"), + ) + dec_1.context.attach_mc(mc_1) + + # Set up Azure Monitor profile + dec_mc_1 = dec_1.set_up_azure_monitor_profile(mc_1) + + # Verify containerinsights is disabled + if dec_mc_1.azure_monitor_profile and dec_mc_1.azure_monitor_profile.containerinsights: + self.assertFalse(dec_mc_1.azure_monitor_profile.containerinsights.enabled) + + def test_get_enable_azure_monitor_logs_already_enabled_idempotent(self): + # Test that enabling Azure Monitor logs when already enabled is idempotent (succeeds) + ctx_1 = AKSPreviewManagedClusterContext( self.cmd, - self.client, - {"outbound_type": "managedNATGateway"}, - CUSTOM_MGMT_AKS_PREVIEW, + AKSManagedClusterParamDict( + { + "enable_azure_monitor_logs": True, + } + ), + self.models, + decorator_mode=DecoratorMode.UPDATE, ) - mc_10 = self.models.ManagedCluster( + + # Create a managed cluster with monitoring addon already enabled + mc = self.models.ManagedCluster( location="test_location", - network_profile=self.models.ContainerServiceNetworkProfile( - load_balancer_sku="standard", - outbound_type="loadBalancer", - load_balancer_profile=self.models.load_balancer_models.ManagedClusterLoadBalancerProfile( - outbound_ip_prefixes=self.models.load_balancer_models.ManagedClusterLoadBalancerProfileOutboundIPPrefixes( - public_ip_prefixes=[ - self.models.load_balancer_models.ResourceReference( - id="id1" - ), - self.models.load_balancer_models.ResourceReference( - id="id2" - ), - ] - ) - ), - ), + addon_profiles={ + CONST_MONITORING_ADDON_NAME: self.models.ManagedClusterAddonProfile( + enabled=True, + ) + } ) - dec_10.context.attach_mc(mc_10) - dec_mc_10 = dec_10.update_load_balancer_profile(mc_10) + ctx_1.attach_mc(mc) + + # Should succeed when trying to enable Azure Monitor logs that's already enabled (idempotent) + result = ctx_1.get_enable_azure_monitor_logs() + self.assertTrue(result) - ground_truth_mc_10 = self.models.ManagedCluster( - location="test_location", - network_profile=self.models.ContainerServiceNetworkProfile( - load_balancer_sku="standard", - outbound_type="loadBalancer", - ), - ) - self.assertEqual(dec_mc_10, ground_truth_mc_10) - # basiclb migration - dec_11 = AKSPreviewManagedClusterUpdateDecorator( + def test_get_enable_azure_monitor_logs_not_enabled_succeeds(self): + # Test that enabling Azure Monitor logs when not enabled succeeds + ctx_1 = AKSPreviewManagedClusterContext( self.cmd, - self.client, - {"load_balancer_sku": "standard"}, - CUSTOM_MGMT_AKS_PREVIEW, + AKSManagedClusterParamDict( + { + "enable_azure_monitor_logs": True, + } + ), + self.models, + decorator_mode=DecoratorMode.UPDATE, ) - mc_11 = self.models.ManagedCluster( + + # Create a managed cluster without monitoring addon enabled + mc = self.models.ManagedCluster( location="test_location", - network_profile=self.models.ContainerServiceNetworkProfile( - load_balancer_sku="basic", - ), + addon_profiles={} ) - dec_11.context.attach_mc(mc_11) - dec_mc_11 = dec_11.update_network_profile(mc_11) + ctx_1.attach_mc(mc) + + # Should succeed when monitoring addon is not enabled + result = ctx_1.get_enable_azure_monitor_logs() + self.assertTrue(result) - ground_truth_mc_11 = self.models.ManagedCluster( - location="test_location", - network_profile=self.models.ContainerServiceNetworkProfile( - load_balancer_sku="standard", + def test_get_enable_azure_monitor_logs_create_mode_succeeds(self): + # Test that enabling Azure Monitor logs in create mode always succeeds (no validation) + ctx_1 = AKSPreviewManagedClusterContext( + self.cmd, + AKSManagedClusterParamDict( + { + "enable_azure_monitor_logs": True, + } ), + self.models, + decorator_mode=DecoratorMode.CREATE, ) - self.assertEqual(dec_mc_11, ground_truth_mc_11) + + # Create a managed cluster with monitoring addon already enabled (shouldn't matter in CREATE mode) + mc = self.models.ManagedCluster( + location="test_location", + addon_profiles={ + CONST_MONITORING_ADDON_NAME: self.models.ManagedClusterAddonProfile( + enabled=True, + ) + } + ) + ctx_1.attach_mc(mc) + + # Should succeed in CREATE mode even if addon appears enabled + result = ctx_1.get_enable_azure_monitor_logs() + self.assertTrue(result) - def test_update_nat_gateway_profile(self): - # default value in `aks_update` + +class AKSPreviewManagedClusterUpdateDecoratorTestCase(unittest.TestCase): + def setUp(self): + # manually register CUSTOM_MGMT_AKS_PREVIEW + register_aks_preview_resource_type() + self.cli_ctx = MockCLI() + self.cmd = MockCmd(self.cli_ctx) + self.models = AKSPreviewManagedClusterModels(self.cmd, CUSTOM_MGMT_AKS_PREVIEW) + self.client = MockClient() + + def test_check_raw_parameters(self): + # default value in `aks_create` dec_1 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - {"outbound_type": "loadBalancer"}, + {}, CUSTOM_MGMT_AKS_PREVIEW, ) - # fail on passing the wrong mc object - with self.assertRaises(CLIInternalError): - dec_1.update_nat_gateway_profile(None) - - mc_1 = self.models.ManagedCluster( - location="test_location", - network_profile=self.models.ContainerServiceNetworkProfile( - nat_gateway_profile=self.models.nat_gateway_models.ManagedClusterNATGatewayProfile(), - ), - ) - dec_1.context.attach_mc(mc_1) - dec_mc_1 = dec_1.update_nat_gateway_profile(mc_1) + # fail on no updated parameter provided + with patch( + "azext_aks_preview.managed_cluster_decorator.prompt_y_n", + return_value=False, + ), self.assertRaises(RequiredArgumentMissingError): + dec_1.check_raw_parameters() - ground_truth_mc_1 = self.models.ManagedCluster( - location="test_location", - network_profile=self.models.ContainerServiceNetworkProfile( - outbound_type="loadBalancer", - ), - ) - self.assertEqual(dec_mc_1, ground_truth_mc_1) + # unless user says they want to reconcile + with patch( + "azext_aks_preview.managed_cluster_decorator.prompt_y_n", + return_value=True, + ): + dec_1.check_raw_parameters() - def test_update_outbound_type(self): - # default value in `aks_update` - dec_1 = AKSPreviewManagedClusterUpdateDecorator( + # custom value + dec_2 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "outbound_type": "managedNATGateway", + "cluster_autoscaler_profile": {}, + "api_server_authorized_ip_ranges": "", }, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_1 = self.models.ManagedCluster( - location="test_location", - network_profile=self.models.ContainerServiceNetworkProfile( - load_balancer_sku="standard", - outbound_type="loadBalancer", - ), - ) - dec_1.context.attach_mc(mc_1) - # fail on passing the wrong mc object - with self.assertRaises(CLIInternalError): - dec_1.update_outbound_type_in_network_profile(None) - dec_mc_1 = dec_1.update_outbound_type_in_network_profile(mc_1) + self.assertIsNone(dec_2.check_raw_parameters()) - ground_truth_mc_1 = self.models.ManagedCluster( - location="test_location", - network_profile=self.models.ContainerServiceNetworkProfile( - load_balancer_sku="standard", - outbound_type="managedNATGateway", - ), + # custom value + dec_3 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "enable_workload_identity": False, + }, + CUSTOM_MGMT_AKS_PREVIEW, ) - self.assertEqual(dec_mc_1, ground_truth_mc_1) + with patch( + "azext_aks_preview.managed_cluster_decorator.prompt_y_n", + return_value=True, + ): + self.assertIsNone(dec_3.check_raw_parameters()) - def test_update_network_plugin_settings(self): + def test_update_load_balancer_profile(self): # default value in `aks_update` dec_1 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "pod_cidr": "100.64.0.0/10", - "network_plugin_mode": "overlay", + "load_balancer_sku": None, + "load_balancer_managed_outbound_ip_count": None, + "load_balancer_managed_outbound_ipv6_count": None, + "load_balancer_outbound_ips": None, + "load_balancer_outbound_ip_prefixes": None, + "load_balancer_outbound_ports": None, + "load_balancer_idle_timeout": None, }, CUSTOM_MGMT_AKS_PREVIEW, ) mc_1 = self.models.ManagedCluster( location="test_location", - network_profile=self.models.ContainerServiceNetworkProfile( - network_plugin="azure", pod_cidr=None, service_cidr="192.168.0.0/16" - ), + network_profile=self.models.ContainerServiceNetworkProfile(), ) - dec_1.context.attach_mc(mc_1) # fail on passing the wrong mc object with self.assertRaises(CLIInternalError): - dec_1.update_network_plugin_settings(None) - dec_mc_1 = dec_1.update_network_plugin_settings(mc_1) + dec_1.update_load_balancer_profile(None) + dec_mc_1 = dec_1.update_load_balancer_profile(mc_1) ground_truth_mc_1 = self.models.ManagedCluster( location="test_location", - network_profile=self.models.ContainerServiceNetworkProfile( - network_plugin="azure", - network_plugin_mode="overlay", - pod_cidr="100.64.0.0/10", - service_cidr="192.168.0.0/16", - ), + network_profile=self.models.ContainerServiceNetworkProfile(), ) - self.assertEqual(dec_mc_1, ground_truth_mc_1) - # test expanding pod cidr + # custom value - outbound ip prefixes dec_2 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "pod_cidr": "100.64.0.0/10", + "load_balancer_managed_outbound_ip_count": None, + "load_balancer_managed_outbound_ipv6_count": None, + "load_balancer_outbound_ips": None, + "load_balancer_outbound_ip_prefixes": "id3,id4", }, CUSTOM_MGMT_AKS_PREVIEW, ) mc_2 = self.models.ManagedCluster( location="test_location", network_profile=self.models.ContainerServiceNetworkProfile( - network_plugin="azure", - pod_cidr="100.64.0.0/16", - service_cidr="192.168.0.0/16", + load_balancer_profile=self.models.load_balancer_models.ManagedClusterLoadBalancerProfile( + outbound_ip_prefixes=self.models.load_balancer_models.ManagedClusterLoadBalancerProfileOutboundIPPrefixes( + public_ip_prefixes=[ + self.models.load_balancer_models.ResourceReference( + id="id1" + ), + self.models.load_balancer_models.ResourceReference( + id="id2" + ), + ] + ) + ) ), ) - dec_2.context.attach_mc(mc_2) - # fail on passing the wrong mc object - with self.assertRaises(CLIInternalError): - dec_2.update_network_plugin_settings(None) - dec_mc_2 = dec_2.update_network_plugin_settings(mc_2) + dec_mc_2 = dec_2.update_load_balancer_profile(mc_2) ground_truth_mc_2 = self.models.ManagedCluster( location="test_location", network_profile=self.models.ContainerServiceNetworkProfile( - network_plugin="azure", - pod_cidr="100.64.0.0/10", - service_cidr="192.168.0.0/16", + load_balancer_profile=self.models.load_balancer_models.ManagedClusterLoadBalancerProfile( + outbound_ip_prefixes=self.models.load_balancer_models.ManagedClusterLoadBalancerProfileOutboundIPPrefixes( + public_ip_prefixes=[ + self.models.load_balancer_models.ResourceReference( + id="id3" + ), + self.models.load_balancer_models.ResourceReference( + id="id4" + ), + ] + ) + ) ), ) - self.assertEqual(dec_mc_2, ground_truth_mc_2) - # test no updates made with same network plugin mode + # custom value - outbound ip dec_3 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "network_plugin_mode": "overlay", + "load_balancer_managed_outbound_ip_count": None, + "load_balancer_managed_outbound_ipv6_count": None, + "load_balancer_outbound_ips": "id3,id4", + "load_balancer_outbound_ip_prefixes": None, }, CUSTOM_MGMT_AKS_PREVIEW, ) mc_3 = self.models.ManagedCluster( location="test_location", network_profile=self.models.ContainerServiceNetworkProfile( - network_plugin="azure", - network_plugin_mode="overlay", - pod_cidr="100.64.0.0/16", - service_cidr="192.168.0.0/16", - ), - ) - - dec_3.context.attach_mc(mc_3) - # fail on passing the wrong mc object - with self.assertRaises(CLIInternalError): - dec_3.update_network_plugin_settings(None) - dec_mc_3 = dec_3.update_network_plugin_settings(mc_3) - - ground_truth_mc_3 = self.models.ManagedCluster( + load_balancer_profile=self.models.load_balancer_models.ManagedClusterLoadBalancerProfile( + outbound_i_ps=self.models.load_balancer_models.ManagedClusterLoadBalancerProfileOutboundIPs( + public_i_ps=[ + self.models.load_balancer_models.ResourceReference( + id="id1" + ), + self.models.load_balancer_models.ResourceReference( + id="id2" + ), + ] + ) + ) + ), + ) + dec_3.context.attach_mc(mc_3) + dec_mc_3 = dec_3.update_load_balancer_profile(mc_3) + + ground_truth_mc_3 = self.models.ManagedCluster( location="test_location", network_profile=self.models.ContainerServiceNetworkProfile( - network_plugin="azure", - network_plugin_mode="overlay", - pod_cidr="100.64.0.0/16", - service_cidr="192.168.0.0/16", + load_balancer_profile=self.models.load_balancer_models.ManagedClusterLoadBalancerProfile( + outbound_i_ps=self.models.load_balancer_models.ManagedClusterLoadBalancerProfileOutboundIPs( + public_i_ps=[ + self.models.load_balancer_models.ResourceReference( + id="id3" + ), + self.models.load_balancer_models.ResourceReference( + id="id4" + ), + ] + ) + ) ), ) - self.assertEqual(dec_mc_3, ground_truth_mc_3) - # test update network dataplane + # custom value - managed outbound ip, count only dec_4 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "network_dataplane": "cilium", + "load_balancer_managed_outbound_ip_count": 5, + "load_balancer_managed_outbound_ipv6_count": None, + "load_balancer_outbound_ips": None, + "load_balancer_outbound_ip_prefixes": None, }, CUSTOM_MGMT_AKS_PREVIEW, ) + mc_4 = self.models.ManagedCluster( location="test_location", network_profile=self.models.ContainerServiceNetworkProfile( - network_plugin="azure", - network_plugin_mode="overlay", - network_dataplane="cilium", - network_policy="", - pod_cidr="100.64.0.0/16", - service_cidr="192.168.0.0/16", + load_balancer_profile=self.models.load_balancer_models.ManagedClusterLoadBalancerProfile( + managed_outbound_i_ps=self.models.load_balancer_models.ManagedClusterLoadBalancerProfileManagedOutboundIPs( + count=10, count_ipv6=20 + ), + ) ), ) - dec_4.context.attach_mc(mc_4) - # fail on passing the wrong mc object - with self.assertRaises(CLIInternalError): - dec_4.update_network_plugin_settings(None) - dec_mc_4 = dec_4.update_network_plugin_settings(mc_4) + dec_mc_4 = dec_4.update_load_balancer_profile(mc_4) ground_truth_mc_4 = self.models.ManagedCluster( location="test_location", - network_profile=self.models.ContainerServiceNetworkProfile( - network_plugin="azure", - network_plugin_mode="overlay", - network_dataplane="cilium", - network_policy="cilium", - pod_cidr="100.64.0.0/16", - service_cidr="192.168.0.0/16", + network_profile=( + self.models.ContainerServiceNetworkProfile( + load_balancer_profile=self.models.load_balancer_models.ManagedClusterLoadBalancerProfile( + managed_outbound_i_ps=self.models.load_balancer_models.ManagedClusterLoadBalancerProfileManagedOutboundIPs( + count=5, count_ipv6=20 + ), + ) + ) ), ) - self.assertEqual(dec_mc_4, ground_truth_mc_4) - # test no updates made with empty network plugin settings + # custom value - managed outbound ip, count_ipv6 only dec_5 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - {}, + { + "load_balancer_managed_outbound_ip_count": None, + "load_balancer_managed_outbound_ipv6_count": 5, + "load_balancer_outbound_ips": None, + "load_balancer_outbound_ip_prefixes": None, + }, CUSTOM_MGMT_AKS_PREVIEW, ) + mc_5 = self.models.ManagedCluster( location="test_location", network_profile=self.models.ContainerServiceNetworkProfile( - network_plugin="azure", - network_plugin_mode="overlay", - pod_cidr="100.64.0.0/16", - service_cidr="192.168.0.0/16", + load_balancer_profile=self.models.load_balancer_models.ManagedClusterLoadBalancerProfile( + managed_outbound_i_ps=self.models.load_balancer_models.ManagedClusterLoadBalancerProfileManagedOutboundIPs( + count=10, count_ipv6=20 + ), + ) ), ) - dec_5.context.attach_mc(mc_5) - # fail on passing the wrong mc object - with self.assertRaises(CLIInternalError): - dec_5.update_network_plugin_settings(None) - dec_mc_5 = dec_5.update_network_plugin_settings(mc_5) + dec_mc_5 = dec_5.update_load_balancer_profile(mc_5) ground_truth_mc_5 = self.models.ManagedCluster( location="test_location", - network_profile=self.models.ContainerServiceNetworkProfile( - network_plugin="azure", - network_plugin_mode="overlay", - pod_cidr="100.64.0.0/16", - service_cidr="192.168.0.0/16", + network_profile=( + self.models.ContainerServiceNetworkProfile( + load_balancer_profile=self.models.load_balancer_models.ManagedClusterLoadBalancerProfile( + managed_outbound_i_ps=self.models.load_balancer_models.ManagedClusterLoadBalancerProfileManagedOutboundIPs( + count=10, count_ipv6=5 + ), + ) + ) ), ) - self.assertEqual(dec_mc_5, ground_truth_mc_5) - # test update network policy + # custom value - managed outbound ip dec_6 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "network_policy": "azure", + "load_balancer_managed_outbound_ip_count": 25, + "load_balancer_managed_outbound_ipv6_count": 5, + "load_balancer_outbound_ips": None, + "load_balancer_outbound_ip_prefixes": None, }, CUSTOM_MGMT_AKS_PREVIEW, ) + mc_6 = self.models.ManagedCluster( location="test_location", network_profile=self.models.ContainerServiceNetworkProfile( - network_plugin="azure", - network_policy="", + load_balancer_profile=self.models.load_balancer_models.ManagedClusterLoadBalancerProfile( + managed_outbound_i_ps=self.models.load_balancer_models.ManagedClusterLoadBalancerProfileManagedOutboundIPs( + count=10, count_ipv6=20 + ), + ) ), ) - dec_6.context.attach_mc(mc_6) - # fail on passing the wrong mc object - with self.assertRaises(CLIInternalError): - dec_6.update_network_plugin_settings(None) - dec_mc_6 = dec_6.update_network_plugin_settings(mc_6) + dec_mc_6 = dec_6.update_load_balancer_profile(mc_6) ground_truth_mc_6 = self.models.ManagedCluster( location="test_location", - network_profile=self.models.ContainerServiceNetworkProfile( - network_plugin="azure", - network_policy="azure", + network_profile=( + self.models.ContainerServiceNetworkProfile( + load_balancer_profile=self.models.load_balancer_models.ManagedClusterLoadBalancerProfile( + managed_outbound_i_ps=self.models.load_balancer_models.ManagedClusterLoadBalancerProfileManagedOutboundIPs( + count=25, count_ipv6=5 + ), + ) + ) ), ) - self.assertEqual(dec_mc_6, ground_truth_mc_6) - # test update network plugin for kubenet -> cni overlay migrations + # custom value - from managed outbound ip to outbound ip dec_7 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "network_plugin": "azure", - "network_plugin_mode": "overlay", + "load_balancer_managed_outbound_ip_count": None, + "load_balancer_managed_outbound_ipv6_count": None, + "load_balancer_outbound_ips": "id1,id2", + "load_balancer_outbound_ip_prefixes": None, }, CUSTOM_MGMT_AKS_PREVIEW, ) mc_7 = self.models.ManagedCluster( location="test_location", network_profile=self.models.ContainerServiceNetworkProfile( - network_plugin="kubenet", + load_balancer_profile=self.models.load_balancer_models.ManagedClusterLoadBalancerProfile( + managed_outbound_i_ps=self.models.load_balancer_models.ManagedClusterLoadBalancerProfileManagedOutboundIPs( + count=3, count_ipv6=2 + ) + ) ), ) - dec_7.context.attach_mc(mc_7) - # fail on passing the wrong mc object - with self.assertRaises(CLIInternalError): - dec_7.update_network_plugin_settings(None) - dec_mc_7 = dec_7.update_network_plugin_settings(mc_7) + dec_mc_7 = dec_7.update_load_balancer_profile(mc_7) ground_truth_mc_7 = self.models.ManagedCluster( location="test_location", - network_profile=self.models.ContainerServiceNetworkProfile( - network_plugin="azure", - network_plugin_mode="overlay", + network_profile=( + self.models.ContainerServiceNetworkProfile( + load_balancer_profile=self.models.load_balancer_models.ManagedClusterLoadBalancerProfile( + outbound_i_ps=self.models.load_balancer_models.ManagedClusterLoadBalancerProfileOutboundIPs( + public_i_ps=[ + self.models.load_balancer_models.ResourceReference( + id="id1" + ), + self.models.load_balancer_models.ResourceReference( + id="id2" + ), + ] + ) + ) + ) ), ) - self.assertEqual(dec_mc_7, ground_truth_mc_7) - # test update ip families + # custom value - from outbound ip prefix to managed outbound ip dec_8 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "ip_families": "ipv4,ipv6" + "load_balancer_managed_outbound_ip_count": 10, + "load_balancer_managed_outbound_ipv6_count": 5, + "load_balancer_outbound_ips": None, + "load_balancer_outbound_ip_prefixes": None, }, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_8 = self.models.ManagedCluster( - location="test_location", - network_profile=self.models.ContainerServiceNetworkProfile( - network_plugin="azure", - network_plugin_mode="overlay", - ip_families=["ipv4"] + + load_balancer_profile_8 = self.models.load_balancer_models.ManagedClusterLoadBalancerProfile( + outbound_ip_prefixes=self.models.load_balancer_models.ManagedClusterLoadBalancerProfileOutboundIPPrefixes( + public_ip_prefixes=[ + self.models.load_balancer_models.ResourceReference( + id="test_public_ip_prefix" + ) + ] ), ) - + network_profile_8 = self.models.ContainerServiceNetworkProfile( + load_balancer_profile=load_balancer_profile_8 + ) + mc_8 = self.models.ManagedCluster( + location="test_location", network_profile=network_profile_8 + ) dec_8.context.attach_mc(mc_8) - # fail on passing the wrong mc object - with self.assertRaises(CLIInternalError): - dec_8.update_network_profile(None) - dec_mc_8 = dec_8.update_network_profile(mc_8) + dec_mc_8 = dec_8.update_load_balancer_profile(mc_8) + ground_truth_load_balancer_profile_8 = self.models.load_balancer_models.ManagedClusterLoadBalancerProfile( + managed_outbound_i_ps=self.models.load_balancer_models.ManagedClusterLoadBalancerProfileManagedOutboundIPs( + count=10, count_ipv6=5 + ), + ) + ground_truth_network_profile_8 = self.models.ContainerServiceNetworkProfile( + load_balancer_profile=ground_truth_load_balancer_profile_8 + ) ground_truth_mc_8 = self.models.ManagedCluster( location="test_location", - network_profile=self.models.ContainerServiceNetworkProfile( - network_plugin="azure", - network_plugin_mode="overlay", - ip_families=["ipv4", "ipv6"] - ), + network_profile=ground_truth_network_profile_8, ) - self.assertEqual(dec_mc_8, ground_truth_mc_8) - # test ip_families aren't updated when updating other fields + # custom value dec_9 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - { - "network_plugin_mode": "overlay" - }, + {}, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_9 = self.models.ManagedCluster( + mc_9 = self.models.ManagedCluster(location="test_location") + dec_9.context.attach_mc(mc_9) + # fail on incomplete mc object (no network profile) + with self.assertRaises(UnknownError): + dec_9.update_load_balancer_profile(mc_9) + + # custom value + dec_10 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + {"outbound_type": "managedNATGateway"}, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_10 = self.models.ManagedCluster( location="test_location", network_profile=self.models.ContainerServiceNetworkProfile( - network_plugin="azure", - ip_families=["ipv6", "ipv4"] + load_balancer_sku="standard", + outbound_type="loadBalancer", + load_balancer_profile=self.models.load_balancer_models.ManagedClusterLoadBalancerProfile( + outbound_ip_prefixes=self.models.load_balancer_models.ManagedClusterLoadBalancerProfileOutboundIPPrefixes( + public_ip_prefixes=[ + self.models.load_balancer_models.ResourceReference( + id="id1" + ), + self.models.load_balancer_models.ResourceReference( + id="id2" + ), + ] + ) + ), ), ) + dec_10.context.attach_mc(mc_10) + dec_mc_10 = dec_10.update_load_balancer_profile(mc_10) - dec_9.context.attach_mc(mc_9) - # fail on passing the wrong mc object - with self.assertRaises(CLIInternalError): - dec_9.update_network_profile(None) - dec_mc_9 = dec_9.update_network_profile(mc_9) - - ground_truth_mc_9 = self.models.ManagedCluster( + ground_truth_mc_10 = self.models.ManagedCluster( location="test_location", network_profile=self.models.ContainerServiceNetworkProfile( - network_plugin="azure", - network_plugin_mode="overlay", - ip_families=["ipv6", "ipv4"] + load_balancer_sku="standard", + outbound_type="loadBalancer", ), ) - - self.assertEqual(dec_mc_9, ground_truth_mc_9) - - def test_update_api_server_access_profile(self): - dec_1 = AKSPreviewManagedClusterUpdateDecorator( + self.assertEqual(dec_mc_10, ground_truth_mc_10) + # basiclb migration + dec_11 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - {}, + {"load_balancer_sku": "standard"}, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_1 = self.models.ManagedCluster( + mc_11 = self.models.ManagedCluster( location="test_location", + network_profile=self.models.ContainerServiceNetworkProfile( + load_balancer_sku="basic", + ), ) - dec_1.context.attach_mc(mc_1) - dec_mc_1 = dec_1.update_api_server_access_profile(mc_1) - ground_truth_mc_1 = self.models.ManagedCluster( + dec_11.context.attach_mc(mc_11) + dec_mc_11 = dec_11.update_network_profile(mc_11) + + ground_truth_mc_11 = self.models.ManagedCluster( location="test_location", + network_profile=self.models.ContainerServiceNetworkProfile( + load_balancer_sku="standard", + ), ) - self.assertEqual(dec_mc_1, ground_truth_mc_1) + self.assertEqual(dec_mc_11, ground_truth_mc_11) - apiserver_subnet_id = "/subscriptions/fakesub/resourceGroups/fakerg/providers/Microsoft.Network/virtualNetworks/fakevnet/subnets/apiserver" - dec_2 = AKSPreviewManagedClusterUpdateDecorator( + def test_update_nat_gateway_profile(self): + # default value in `aks_update` + dec_1 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - { - "enable_apiserver_vnet_integration": True, - "apiserver_subnet_id": apiserver_subnet_id, - }, + {"outbound_type": "loadBalancer"}, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_2 = self.models.ManagedCluster(location="test_location") - dec_2.context.attach_mc(mc_2) - dec_mc_2 = dec_2.update_api_server_access_profile(mc_2) - ground_truth_api_server_access_profile_2 = ( - self.models.ManagedClusterAPIServerAccessProfile( - enable_vnet_integration=True, - subnet_id=apiserver_subnet_id, - ) + # fail on passing the wrong mc object + with self.assertRaises(CLIInternalError): + dec_1.update_nat_gateway_profile(None) + + mc_1 = self.models.ManagedCluster( + location="test_location", + network_profile=self.models.ContainerServiceNetworkProfile( + nat_gateway_profile=self.models.nat_gateway_models.ManagedClusterNATGatewayProfile(), + ), ) - ground_truth_mc_2 = self.models.ManagedCluster( + dec_1.context.attach_mc(mc_1) + dec_mc_1 = dec_1.update_nat_gateway_profile(mc_1) + + ground_truth_mc_1 = self.models.ManagedCluster( location="test_location", - api_server_access_profile=ground_truth_api_server_access_profile_2, + network_profile=self.models.ContainerServiceNetworkProfile( + outbound_type="loadBalancer", + ), ) - self.assertEqual(dec_mc_2, ground_truth_mc_2) + self.assertEqual(dec_mc_1, ground_truth_mc_1) - dec_3 = AKSPreviewManagedClusterUpdateDecorator( + def test_update_outbound_type(self): + # default value in `aks_update` + dec_1 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "disable_private_cluster": True, + "outbound_type": "managedNATGateway", }, CUSTOM_MGMT_AKS_PREVIEW, ) - - api_server_access_profile = self.models.ManagedClusterAPIServerAccessProfile() - api_server_access_profile.enable_vnet_integration = True - api_server_access_profile.enable_private_cluster = True - mc_3 = self.models.ManagedCluster( + mc_1 = self.models.ManagedCluster( location="test_location", - api_server_access_profile=api_server_access_profile, - ) - dec_3.context.attach_mc(mc_3) - dec_mc_3 = dec_3.update_api_server_access_profile(mc_3) - ground_truth_api_server_access_profile_3 = ( - self.models.ManagedClusterAPIServerAccessProfile( - enable_vnet_integration=True, enable_private_cluster=False - ) + network_profile=self.models.ContainerServiceNetworkProfile( + load_balancer_sku="standard", + outbound_type="loadBalancer", + ), ) - ground_truth_mc_3 = self.models.ManagedCluster( + dec_1.context.attach_mc(mc_1) + # fail on passing the wrong mc object + with self.assertRaises(CLIInternalError): + dec_1.update_outbound_type_in_network_profile(None) + dec_mc_1 = dec_1.update_outbound_type_in_network_profile(mc_1) + + ground_truth_mc_1 = self.models.ManagedCluster( location="test_location", - api_server_access_profile=ground_truth_api_server_access_profile_3, + network_profile=self.models.ContainerServiceNetworkProfile( + load_balancer_sku="standard", + outbound_type="managedNATGateway", + ), ) - self.assertEqual(dec_mc_3, ground_truth_mc_3) + self.assertEqual(dec_mc_1, ground_truth_mc_1) - dec_4 = AKSPreviewManagedClusterUpdateDecorator( + def test_update_network_plugin_settings(self): + # default value in `aks_update` + dec_1 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "enable_private_cluster": True, + "pod_cidr": "100.64.0.0/10", + "network_plugin_mode": "overlay", }, CUSTOM_MGMT_AKS_PREVIEW, ) - api_server_access_profile = self.models.ManagedClusterAPIServerAccessProfile() - api_server_access_profile.enable_vnet_integration = True - mc_4 = self.models.ManagedCluster( - location="test_location", - api_server_access_profile=api_server_access_profile, - ) - dec_4.context.attach_mc(mc_4) - dec_mc_4 = dec_4.update_api_server_access_profile(mc_4) - ground_truth_api_server_access_profile_4 = ( - self.models.ManagedClusterAPIServerAccessProfile( - enable_vnet_integration=True, enable_private_cluster=True - ) - ) - ground_truth_mc_4 = self.models.ManagedCluster( + mc_1 = self.models.ManagedCluster( location="test_location", - api_server_access_profile=ground_truth_api_server_access_profile_4, + network_profile=self.models.ContainerServiceNetworkProfile( + network_plugin="azure", pod_cidr=None, service_cidr="192.168.0.0/16" + ), ) - self.assertEqual(dec_mc_4, ground_truth_mc_4) - def test_update_http_proxy_config(self): - dec_1 = AKSPreviewManagedClusterUpdateDecorator( - self.cmd, - self.client, - {"http_proxy_config": get_test_data_file_path("httpproxyconfig.json")}, - CUSTOM_MGMT_AKS_PREVIEW, - ) - mc_1 = self.models.ManagedCluster(location="test_location") dec_1.context.attach_mc(mc_1) # fail on passing the wrong mc object with self.assertRaises(CLIInternalError): - dec_1.update_http_proxy_config(None) - dec_mc_1 = dec_1.update_http_proxy_config(mc_1) + dec_1.update_network_plugin_settings(None) + dec_mc_1 = dec_1.update_network_plugin_settings(mc_1) ground_truth_mc_1 = self.models.ManagedCluster( location="test_location", - http_proxy_config={ - "httpProxy": "http://cli-proxy-vm:3128/", - "httpsProxy": "https://cli-proxy-vm:3129/", - "noProxy": ["localhost", "127.0.0.1"], - "trustedCa": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZHekNDQXdPZ0F3SUJBZ0lVT1FvajhDTFpkc2Vscjk3cnZJd3g1T0xEc3V3d0RRWUpLb1pJaHZjTkFRRUwKQlFBd0Z6RVZNQk1HQTFVRUF3d01ZMnhwTFhCeWIzaDVMWFp0TUI0WERUSXlNRE13T0RFMk5EUTBOMW9YRFRNeQpNRE13TlRFMk5EUTBOMW93RnpFVk1CTUdBMVVFQXd3TVkyeHBMWEJ5YjNoNUxYWnRNSUlDSWpBTkJna3Foa2lHCjl3MEJBUUVGQUFPQ0FnOEFNSUlDQ2dLQ0FnRUEvTVB0VjVCVFB0NmNxaTRSZE1sbXIzeUlzYTJ1anpjaHh2NGgKanNDMUR0blJnb3M1UzQxUEgwcmkrM3RUU1ZYMzJ5cndzWStyRDFZUnVwbTZsbUU3R2hVNUkwR2k5b3prU0YwWgpLS2FKaTJveXBVL0ZCK1FQcXpvQ1JzTUV3R0NibUtGVmw4VnVoeW5kWEs0YjRrYmxyOWJsL2V1d2Q3TThTYnZ6CldVam5lRHJRc2lJc3J6UFQ0S0FaTHFjdHpEZTRsbFBUN1lLYTMzaGlFUE9mdldpWitkcWthUUE5UDY0eFhTeW4KZkhYOHVWQUozdUJWSmVHeEQwcGtOSjdqT3J5YVV1SEh1Y1U4UzltSWpuS2pBQjVhUGpMSDV4QXM2bG1iMzEyMgp5KzF0bkVBbVhNNTBEK1VvRWpmUzZIT2I1cmRpcVhHdmMxS2JvS2p6a1BDUnh4MmE3MmN2ZWdVajZtZ0FKTHpnClRoRTFsbGNtVTRpemd4b0lNa1ZwR1RWT0xMbjFWRkt1TmhNWkN2RnZLZ25Lb0F2M0cwRlVuZldFYVJSalNObUQKTFlhTURUNUg5WnQycERJVWpVR1N0Q2w3Z1J6TUVuWXdKTzN5aURwZzQzbzVkUnlzVXlMOUpmRS9OaDdUZzYxOApuOGNKL1c3K1FZYllsanVyYXA4cjdRRlNyb2wzVkNoRkIrT29yNW5pK3ZvaFNBd0pmMFVsTXBHM3hXbXkxVUk0ClRGS2ZGR1JSVHpyUCs3Yk53WDVoSXZJeTVWdGd5YU9xSndUeGhpL0pkeHRPcjJ0QTVyQ1c3K0N0Z1N2emtxTkUKWHlyN3ZrWWdwNlk1TFpneTR0VWpLMEswT1VnVmRqQk9oRHBFenkvRkY4dzFGRVZnSjBxWS9yV2NMa0JIRFQ4Ugp2SmtoaW84Q0F3RUFBYU5mTUYwd0Z3WURWUjBSQkJBd0RvSU1ZMnhwTFhCeWIzaDVMWFp0TUJJR0ExVWRFd0VCCi93UUlNQVlCQWY4Q0FRQXdEd1lEVlIwUEFRSC9CQVVEQXdmbmdEQWRCZ05WSFNVRUZqQVVCZ2dyQmdFRkJRY0QKQWdZSUt3WUJCUVVIQXdFd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dJQkFBb21qQ3lYdmFRT3hnWUs1MHNYTEIyKwp3QWZkc3g1bm5HZGd5Zmc0dXJXMlZtMTVEaEd2STdDL250cTBkWXkyNE4vVWJHN1VEWHZseUxJSkZxMVhQN25mCnBaRzBWQ2paNjlibXhLbTNaOG0wL0F3TXZpOGU5ZWR5OHY5a05CQ3dMR2tIYkE4WW85Q0lpUWdlbGZwcDF2VWgKYm5OQmhhRCtpdTZDZmlDTHdnSmIvaXc3ZW8vQ3lvWnF4K3RqWGFPMnpYdm00cC8rUUlmQU9ndEdRTEZVOGNmWgovZ1VyVHE1Z0ZxMCtQOUd5V3NBVEpGNnE3TDZXWlpqME91VHNlN2Y0Q1NpajZNbk9NTXhBK0pvYWhKejdsc1NpClRKSEl3RXA1ci9SeWhweWVwUXhGWWNVSDVKSmY5cmFoWExXWmkrOVRqeFNNMll5aHhmUlBzaVVFdUdEb2s3OFEKbS9RUGlDaTlKSmIxb2NtVGpBVjh4RFNob2NpdlhPRnlobjZMbjc3dkxqWStBYXZ0V0RoUXRocHVQeHNMdFZ6bQplMFNIMTFkRUxSdGI3NG1xWE9yTzdmdS8rSUJzM0pxTEUvVSt4dXhRdHZHOHZHMXlES0hIU1pxUzJoL1dzNGw0Ck5pQXNoSGdlaFFEUEJjWTl3WVl6ZkJnWnBPVU16ZERmNTB4K0ZTbFk0M1dPSkp6U3VRaDR5WjArM2t5Z3VDRjgKcm5NTFNjZXlTNGNpNExtSi9LQ1N1R2RmNlhWWXo4QkU5Z2pqanBDUDZxeTBVbFJlZldzL2lnL3djSysyYkYxVApuL1l2KzZnWGVDVEhKNzVxRElQbHA3RFJVVWswZmJNajRiSWthb2dXV2s0emYydThteFpMYTBsZVBLTktaTi9tCkdDdkZ3cjNlaSt1LzhjenA1RjdUCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K", - }, + network_profile=self.models.ContainerServiceNetworkProfile( + network_plugin="azure", + network_plugin_mode="overlay", + pod_cidr="100.64.0.0/10", + service_cidr="192.168.0.0/16", + ), ) + self.assertEqual(dec_mc_1, ground_truth_mc_1) - # custom value + # test expanding pod cidr dec_2 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "disable_http_proxy": True, + "pod_cidr": "100.64.0.0/10", }, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_2 = self.models.ManagedCluster( location="test_location", - http_proxy_config = self.models.ManagedClusterHTTPProxyConfig( - enabled=True, - httpProxy="http://cli-proxy-vm:3128/", - httpsProxy="https://cli-proxy-vm:3129/", - ) + network_profile=self.models.ContainerServiceNetworkProfile( + network_plugin="azure", + pod_cidr="100.64.0.0/16", + service_cidr="192.168.0.0/16", + ), ) + dec_2.context.attach_mc(mc_2) # fail on passing the wrong mc object with self.assertRaises(CLIInternalError): - dec_2.update_http_proxy_enabled(None) - dec_mc_2 = dec_2.update_http_proxy_enabled(mc_2) + dec_2.update_network_plugin_settings(None) + dec_mc_2 = dec_2.update_network_plugin_settings(mc_2) ground_truth_mc_2 = self.models.ManagedCluster( location="test_location", - http_proxy_config = self.models.ManagedClusterHTTPProxyConfig( - enabled=False, - httpProxy="http://cli-proxy-vm:3128/", - httpsProxy="https://cli-proxy-vm:3129/", - ) + network_profile=self.models.ContainerServiceNetworkProfile( + network_plugin="azure", + pod_cidr="100.64.0.0/10", + service_cidr="192.168.0.0/16", + ), ) + self.assertEqual(dec_mc_2, ground_truth_mc_2) - # custom value + # test no updates made with same network plugin mode dec_3 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "enable_http_proxy": True, + "network_plugin_mode": "overlay", }, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_3 = self.models.ManagedCluster( location="test_location", - http_proxy_config = self.models.ManagedClusterHTTPProxyConfig( - enabled=False, - httpProxy="http://cli-proxy-vm:3128/", - httpsProxy="https://cli-proxy-vm:3129/", - ) + network_profile=self.models.ContainerServiceNetworkProfile( + network_plugin="azure", + network_plugin_mode="overlay", + pod_cidr="100.64.0.0/16", + service_cidr="192.168.0.0/16", + ), ) + dec_3.context.attach_mc(mc_3) # fail on passing the wrong mc object with self.assertRaises(CLIInternalError): - dec_3.update_http_proxy_enabled(None) - dec_mc_3 = dec_3.update_http_proxy_enabled(mc_3) + dec_3.update_network_plugin_settings(None) + dec_mc_3 = dec_3.update_network_plugin_settings(mc_3) ground_truth_mc_3 = self.models.ManagedCluster( location="test_location", - http_proxy_config = self.models.ManagedClusterHTTPProxyConfig( - enabled=True, - httpProxy="http://cli-proxy-vm:3128/", - httpsProxy="https://cli-proxy-vm:3129/", - ) + network_profile=self.models.ContainerServiceNetworkProfile( + network_plugin="azure", + network_plugin_mode="overlay", + pod_cidr="100.64.0.0/16", + service_cidr="192.168.0.0/16", + ), ) + self.assertEqual(dec_mc_3, ground_truth_mc_3) - def test_update_pod_identity_profile(self): - # default value in `aks_update` - dec_1 = AKSPreviewManagedClusterUpdateDecorator( + # test update network dataplane + dec_4 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "enable_pod_identity": False, - "disable_pod_identity": False, - "enable_pod_identity_with_kubenet": False, + "network_dataplane": "cilium", }, CUSTOM_MGMT_AKS_PREVIEW, ) + mc_4 = self.models.ManagedCluster( + location="test_location", + network_profile=self.models.ContainerServiceNetworkProfile( + network_plugin="azure", + network_plugin_mode="overlay", + network_dataplane="cilium", + network_policy="", + pod_cidr="100.64.0.0/16", + service_cidr="192.168.0.0/16", + ), + ) + + dec_4.context.attach_mc(mc_4) # fail on passing the wrong mc object with self.assertRaises(CLIInternalError): - dec_1.update_pod_identity_profile(None) + dec_4.update_network_plugin_settings(None) + dec_mc_4 = dec_4.update_network_plugin_settings(mc_4) - mc_1 = self.models.ManagedCluster( - location="test_location", - ) - dec_1.context.attach_mc(mc_1) - dec_mc_1 = dec_1.update_pod_identity_profile(mc_1) - ground_truth_mc_1 = self.models.ManagedCluster( + ground_truth_mc_4 = self.models.ManagedCluster( location="test_location", + network_profile=self.models.ContainerServiceNetworkProfile( + network_plugin="azure", + network_plugin_mode="overlay", + network_dataplane="cilium", + network_policy="cilium", + pod_cidr="100.64.0.0/16", + service_cidr="192.168.0.0/16", + ), ) - self.assertEqual(dec_mc_1, ground_truth_mc_1) - # custom value - dec_2 = AKSPreviewManagedClusterUpdateDecorator( + self.assertEqual(dec_mc_4, ground_truth_mc_4) + + # test no updates made with empty network plugin settings + dec_5 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - { - "enable_pod_identity": True, - "disable_pod_identity": False, - "enable_pod_identity_with_kubenet": False, - }, + {}, CUSTOM_MGMT_AKS_PREVIEW, ) + mc_5 = self.models.ManagedCluster( + location="test_location", + network_profile=self.models.ContainerServiceNetworkProfile( + network_plugin="azure", + network_plugin_mode="overlay", + pod_cidr="100.64.0.0/16", + service_cidr="192.168.0.0/16", + ), + ) - mc_2 = self.models.ManagedCluster( + dec_5.context.attach_mc(mc_5) + # fail on passing the wrong mc object + with self.assertRaises(CLIInternalError): + dec_5.update_network_plugin_settings(None) + dec_mc_5 = dec_5.update_network_plugin_settings(mc_5) + + ground_truth_mc_5 = self.models.ManagedCluster( location="test_location", network_profile=self.models.ContainerServiceNetworkProfile( - network_plugin="kubenet", + network_plugin="azure", + network_plugin_mode="overlay", + pod_cidr="100.64.0.0/16", + service_cidr="192.168.0.0/16", ), ) - dec_2.context.attach_mc(mc_2) - # fail on not a msi cluster - with self.assertRaises(RequiredArgumentMissingError): - dec_2.update_pod_identity_profile(mc_2) - # custom value - dec_3 = AKSPreviewManagedClusterUpdateDecorator( + self.assertEqual(dec_mc_5, ground_truth_mc_5) + + # test update network policy + dec_6 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "enable_pod_identity": True, - "disable_pod_identity": False, - "enable_pod_identity_with_kubenet": True, + "network_policy": "azure", }, CUSTOM_MGMT_AKS_PREVIEW, ) - - mc_3 = self.models.ManagedCluster( + mc_6 = self.models.ManagedCluster( location="test_location", network_profile=self.models.ContainerServiceNetworkProfile( - network_plugin="kubenet", - ), - identity=self.models.ManagedClusterIdentity( - type="SystemAssigned", + network_plugin="azure", + network_policy="", ), ) - dec_3.context.attach_mc(mc_3) - dec_mc_3 = dec_3.update_pod_identity_profile(mc_3) - ground_truth_mc_3 = self.models.ManagedCluster( + + dec_6.context.attach_mc(mc_6) + # fail on passing the wrong mc object + with self.assertRaises(CLIInternalError): + dec_6.update_network_plugin_settings(None) + dec_mc_6 = dec_6.update_network_plugin_settings(mc_6) + + ground_truth_mc_6 = self.models.ManagedCluster( location="test_location", network_profile=self.models.ContainerServiceNetworkProfile( - network_plugin="kubenet", - ), - pod_identity_profile=self.models.pod_identity_models.ManagedClusterPodIdentityProfile( - enabled=True, - allow_network_plugin_kubenet=True, - user_assigned_identities=[], - user_assigned_identity_exceptions=[], - ), - identity=self.models.ManagedClusterIdentity( - type="SystemAssigned", + network_plugin="azure", + network_policy="azure", ), ) - self.assertEqual(dec_mc_3, ground_truth_mc_3) - # custom value - dec_4 = AKSPreviewManagedClusterUpdateDecorator( + self.assertEqual(dec_mc_6, ground_truth_mc_6) + + # test update network plugin for kubenet -> cni overlay migrations + dec_7 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "enable_pod_identity": False, - "disable_pod_identity": True, - "enable_pod_identity_with_kubenet": False, + "network_plugin": "azure", + "network_plugin_mode": "overlay", }, CUSTOM_MGMT_AKS_PREVIEW, ) - - mc_4 = self.models.ManagedCluster( - location="test_location", - pod_identity_profile=self.models.pod_identity_models.ManagedClusterPodIdentityProfile( - enabled=True, - user_assigned_identities=[], - user_assigned_identity_exceptions=[], - ), - ) - dec_4.context.attach_mc(mc_4) - dec_mc_4 = dec_4.update_pod_identity_profile(mc_4) - ground_truth_mc_4 = self.models.ManagedCluster( + mc_7 = self.models.ManagedCluster( location="test_location", - pod_identity_profile=self.models.pod_identity_models.ManagedClusterPodIdentityProfile( - enabled=False, + network_profile=self.models.ContainerServiceNetworkProfile( + network_plugin="kubenet", ), ) - self.assertEqual(dec_mc_4, ground_truth_mc_4) - def test_update_oidc_issuer_profile__default_value(self): - dec = AKSPreviewManagedClusterUpdateDecorator( - self.cmd, self.client, {}, CUSTOM_MGMT_AKS_PREVIEW - ) - mc = self.models.ManagedCluster(location="test_location") - dec.context.attach_mc(mc) - updated_mc = dec.update_oidc_issuer_profile(mc) - self.assertIsNone(updated_mc.oidc_issuer_profile) + dec_7.context.attach_mc(mc_7) + # fail on passing the wrong mc object + with self.assertRaises(CLIInternalError): + dec_7.update_network_plugin_settings(None) + dec_mc_7 = dec_7.update_network_plugin_settings(mc_7) - def test_update_oidc_issuer_profile__default_value_mc_enabled(self): - dec = AKSPreviewManagedClusterUpdateDecorator( - self.cmd, self.client, {}, CUSTOM_MGMT_AKS_PREVIEW - ) - mc = self.models.ManagedCluster(location="test_location") - mc.oidc_issuer_profile = self.models.ManagedClusterOIDCIssuerProfile( - enabled=True + ground_truth_mc_7 = self.models.ManagedCluster( + location="test_location", + network_profile=self.models.ContainerServiceNetworkProfile( + network_plugin="azure", + network_plugin_mode="overlay", + ), ) - dec.context.attach_mc(mc) - updated_mc = dec.update_oidc_issuer_profile(mc) - self.assertTrue(updated_mc.oidc_issuer_profile.enabled) - def test_update_oidc_issuer_profile__enabled(self): - dec = AKSPreviewManagedClusterUpdateDecorator( - self.cmd, - self.client, - { - "enable_oidc_issuer": True, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - mc = self.models.ManagedCluster(location="test_location") - dec.context.attach_mc(mc) - updated_mc = dec.update_oidc_issuer_profile(mc) - self.assertIsNotNone(updated_mc.oidc_issuer_profile) - self.assertTrue(updated_mc.oidc_issuer_profile.enabled) + self.assertEqual(dec_mc_7, ground_truth_mc_7) - def test_update_oidc_issuer_profile__enabled_mc_enabled(self): - dec = AKSPreviewManagedClusterUpdateDecorator( + # test update ip families + dec_8 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "enable_oidc_issuer": True, + "ip_families": "ipv4,ipv6" }, CUSTOM_MGMT_AKS_PREVIEW, ) - mc = self.models.ManagedCluster(location="test_location") - mc.oidc_issuer_profile = self.models.ManagedClusterOIDCIssuerProfile( - enabled=True + mc_8 = self.models.ManagedCluster( + location="test_location", + network_profile=self.models.ContainerServiceNetworkProfile( + network_plugin="azure", + network_plugin_mode="overlay", + ip_families=["ipv4"] + ), ) - dec.context.attach_mc(mc) - updated_mc = dec.update_oidc_issuer_profile(mc) - self.assertIsNotNone(updated_mc.oidc_issuer_profile) - self.assertTrue(updated_mc.oidc_issuer_profile.enabled) - def test_update_workload_identity_profile__default_value(self): - dec = AKSPreviewManagedClusterUpdateDecorator( - self.cmd, self.client, {}, CUSTOM_MGMT_AKS_PREVIEW - ) - mc = self.models.ManagedCluster(location="test_location") - dec.context.attach_mc(mc) - updated_mc = dec.update_workload_identity_profile(mc) - self.assertIsNone(updated_mc.security_profile) + dec_8.context.attach_mc(mc_8) + # fail on passing the wrong mc object + with self.assertRaises(CLIInternalError): + dec_8.update_network_profile(None) + dec_mc_8 = dec_8.update_network_profile(mc_8) - def test_update_workload_identity_profile__default_value_mc_enabled(self): - dec = AKSPreviewManagedClusterUpdateDecorator( - self.cmd, self.client, {}, CUSTOM_MGMT_AKS_PREVIEW - ) - mc = self.models.ManagedCluster(location="test_location") - mc.security_profile = self.models.ManagedClusterSecurityProfile( - workload_identity=self.models.ManagedClusterSecurityProfileWorkloadIdentity( - enabled=True, - ) + ground_truth_mc_8 = self.models.ManagedCluster( + location="test_location", + network_profile=self.models.ContainerServiceNetworkProfile( + network_plugin="azure", + network_plugin_mode="overlay", + ip_families=["ipv4", "ipv6"] + ), ) - dec.context.attach_mc(mc) - updated_mc = dec.update_workload_identity_profile(mc) - self.assertIsNotNone(updated_mc.security_profile.workload_identity) - def test_update_workload_identity_profile__enabled(self): - dec = AKSPreviewManagedClusterUpdateDecorator( + self.assertEqual(dec_mc_8, ground_truth_mc_8) + + # test ip_families aren't updated when updating other fields + dec_9 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "enable_workload_identity": True, + "network_plugin_mode": "overlay" }, CUSTOM_MGMT_AKS_PREVIEW, ) - mc = self.models.ManagedCluster(location="test_location") - mc.oidc_issuer_profile = self.models.ManagedClusterOIDCIssuerProfile( - enabled=True - ) - dec.context.attach_mc(mc) - updated_mc = dec.update_workload_identity_profile(mc) - self.assertTrue(updated_mc.security_profile.workload_identity.enabled) + mc_9 = self.models.ManagedCluster( + location="test_location", + network_profile=self.models.ContainerServiceNetworkProfile( + network_plugin="azure", + ip_families=["ipv6", "ipv4"] + ), + ) - def test_update_workload_identity_profile__disabled(self): - dec = AKSPreviewManagedClusterUpdateDecorator( + dec_9.context.attach_mc(mc_9) + # fail on passing the wrong mc object + with self.assertRaises(CLIInternalError): + dec_9.update_network_profile(None) + dec_mc_9 = dec_9.update_network_profile(mc_9) + + ground_truth_mc_9 = self.models.ManagedCluster( + location="test_location", + network_profile=self.models.ContainerServiceNetworkProfile( + network_plugin="azure", + network_plugin_mode="overlay", + ip_families=["ipv6", "ipv4"] + ), + ) + + self.assertEqual(dec_mc_9, ground_truth_mc_9) + + def test_update_api_server_access_profile(self): + dec_1 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + {}, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_1 = self.models.ManagedCluster( + location="test_location", + ) + dec_1.context.attach_mc(mc_1) + dec_mc_1 = dec_1.update_api_server_access_profile(mc_1) + ground_truth_mc_1 = self.models.ManagedCluster( + location="test_location", + ) + self.assertEqual(dec_mc_1, ground_truth_mc_1) + + apiserver_subnet_id = "/subscriptions/fakesub/resourceGroups/fakerg/providers/Microsoft.Network/virtualNetworks/fakevnet/subnets/apiserver" + dec_2 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "disable_workload_identity": True, + "enable_apiserver_vnet_integration": True, + "apiserver_subnet_id": apiserver_subnet_id, }, CUSTOM_MGMT_AKS_PREVIEW, ) - mc = self.models.ManagedCluster(location="test_location") - mc.oidc_issuer_profile = self.models.ManagedClusterOIDCIssuerProfile( - enabled=True + mc_2 = self.models.ManagedCluster(location="test_location") + dec_2.context.attach_mc(mc_2) + dec_mc_2 = dec_2.update_api_server_access_profile(mc_2) + ground_truth_api_server_access_profile_2 = ( + self.models.ManagedClusterAPIServerAccessProfile( + enable_vnet_integration=True, + subnet_id=apiserver_subnet_id, + ) ) - dec.context.attach_mc(mc) - updated_mc = dec.update_workload_identity_profile(mc) - self.assertFalse(updated_mc.security_profile.workload_identity.enabled) + ground_truth_mc_2 = self.models.ManagedCluster( + location="test_location", + api_server_access_profile=ground_truth_api_server_access_profile_2, + ) + self.assertEqual(dec_mc_2, ground_truth_mc_2) - def test_update_image_cleaner(self): - dec_0 = AKSPreviewManagedClusterUpdateDecorator( + dec_3 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - {}, + { + "disable_private_cluster": True, + }, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_0 = self.models.ManagedCluster( + + api_server_access_profile = self.models.ManagedClusterAPIServerAccessProfile() + api_server_access_profile.enable_vnet_integration = True + api_server_access_profile.enable_private_cluster = True + mc_3 = self.models.ManagedCluster( location="test_location", + api_server_access_profile=api_server_access_profile, ) - dec_0.context.attach_mc(mc_0) - dec_mc_0 = dec_0.update_image_cleaner(mc_0) - ground_truth_mc_0 = self.models.ManagedCluster( + dec_3.context.attach_mc(mc_3) + dec_mc_3 = dec_3.update_api_server_access_profile(mc_3) + ground_truth_api_server_access_profile_3 = ( + self.models.ManagedClusterAPIServerAccessProfile( + enable_vnet_integration=True, enable_private_cluster=False + ) + ) + ground_truth_mc_3 = self.models.ManagedCluster( location="test_location", + api_server_access_profile=ground_truth_api_server_access_profile_3, ) - self.assertEqual(dec_mc_0, ground_truth_mc_0) + self.assertEqual(dec_mc_3, ground_truth_mc_3) - dec_1 = AKSPreviewManagedClusterUpdateDecorator( + dec_4 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "enable_image_cleaner": True, + "enable_private_cluster": True, }, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_1 = self.models.ManagedCluster( + api_server_access_profile = self.models.ManagedClusterAPIServerAccessProfile() + api_server_access_profile.enable_vnet_integration = True + mc_4 = self.models.ManagedCluster( location="test_location", + api_server_access_profile=api_server_access_profile, ) - dec_1.context.attach_mc(mc_1) - dec_mc_1 = dec_1.update_image_cleaner(mc_1) - - ground_truth_image_cleaner_profile_1 = ( - self.models.ManagedClusterSecurityProfileImageCleaner( - enabled=True, - interval_hours=7 * 24, + dec_4.context.attach_mc(mc_4) + dec_mc_4 = dec_4.update_api_server_access_profile(mc_4) + ground_truth_api_server_access_profile_4 = ( + self.models.ManagedClusterAPIServerAccessProfile( + enable_vnet_integration=True, enable_private_cluster=True ) ) - ground_truth_security_profile_1 = self.models.ManagedClusterSecurityProfile( - image_cleaner=ground_truth_image_cleaner_profile_1, + ground_truth_mc_4 = self.models.ManagedCluster( + location="test_location", + api_server_access_profile=ground_truth_api_server_access_profile_4, + ) + self.assertEqual(dec_mc_4, ground_truth_mc_4) + + def test_update_http_proxy_config(self): + dec_1 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + {"http_proxy_config": get_test_data_file_path("httpproxyconfig.json")}, + CUSTOM_MGMT_AKS_PREVIEW, ) + mc_1 = self.models.ManagedCluster(location="test_location") + dec_1.context.attach_mc(mc_1) + # fail on passing the wrong mc object + with self.assertRaises(CLIInternalError): + dec_1.update_http_proxy_config(None) + dec_mc_1 = dec_1.update_http_proxy_config(mc_1) + ground_truth_mc_1 = self.models.ManagedCluster( location="test_location", - security_profile=ground_truth_security_profile_1, + http_proxy_config={ + "httpProxy": "http://cli-proxy-vm:3128/", + "httpsProxy": "https://cli-proxy-vm:3129/", + "noProxy": ["localhost", "127.0.0.1"], + "trustedCa": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZHekNDQXdPZ0F3SUJBZ0lVT1FvajhDTFpkc2Vscjk3cnZJd3g1T0xEc3V3d0RRWUpLb1pJaHZjTkFRRUwKQlFBd0Z6RVZNQk1HQTFVRUF3d01ZMnhwTFhCeWIzaDVMWFp0TUI0WERUSXlNRE13T0RFMk5EUTBOMW9YRFRNeQpNRE13TlRFMk5EUTBOMW93RnpFVk1CTUdBMVVFQXd3TVkyeHBMWEJ5YjNoNUxYWnRNSUlDSWpBTkJna3Foa2lHCjl3MEJBUUVGQUFPQ0FnOEFNSUlDQ2dLQ0FnRUEvTVB0VjVCVFB0NmNxaTRSZE1sbXIzeUlzYTJ1anpjaHh2NGgKanNDMUR0blJnb3M1UzQxUEgwcmkrM3RUU1ZYMzJ5cndzWStyRDFZUnVwbTZsbUU3R2hVNUkwR2k5b3prU0YwWgpLS2FKaTJveXBVL0ZCK1FQcXpvQ1JzTUV3R0NibUtGVmw4VnVoeW5kWEs0YjRrYmxyOWJsL2V1d2Q3TThTYnZ6CldVam5lRHJRc2lJc3J6UFQ0S0FaTHFjdHpEZTRsbFBUN1lLYTMzaGlFUE9mdldpWitkcWthUUE5UDY0eFhTeW4KZkhYOHVWQUozdUJWSmVHeEQwcGtOSjdqT3J5YVV1SEh1Y1U4UzltSWpuS2pBQjVhUGpMSDV4QXM2bG1iMzEyMgp5KzF0bkVBbVhNNTBEK1VvRWpmUzZIT2I1cmRpcVhHdmMxS2JvS2p6a1BDUnh4MmE3MmN2ZWdVajZtZ0FKTHpnClRoRTFsbGNtVTRpemd4b0lNa1ZwR1RWT0xMbjFWRkt1TmhNWkN2RnZLZ25Lb0F2M0cwRlVuZldFYVJSalNObUQKTFlhTURUNUg5WnQycERJVWpVR1N0Q2w3Z1J6TUVuWXdKTzN5aURwZzQzbzVkUnlzVXlMOUpmRS9OaDdUZzYxOApuOGNKL1c3K1FZYllsanVyYXA4cjdRRlNyb2wzVkNoRkIrT29yNW5pK3ZvaFNBd0pmMFVsTXBHM3hXbXkxVUk0ClRGS2ZGR1JSVHpyUCs3Yk53WDVoSXZJeTVWdGd5YU9xSndUeGhpL0pkeHRPcjJ0QTVyQ1c3K0N0Z1N2emtxTkUKWHlyN3ZrWWdwNlk1TFpneTR0VWpLMEswT1VnVmRqQk9oRHBFenkvRkY4dzFGRVZnSjBxWS9yV2NMa0JIRFQ4Ugp2SmtoaW84Q0F3RUFBYU5mTUYwd0Z3WURWUjBSQkJBd0RvSU1ZMnhwTFhCeWIzaDVMWFp0TUJJR0ExVWRFd0VCCi93UUlNQVlCQWY4Q0FRQXdEd1lEVlIwUEFRSC9CQVVEQXdmbmdEQWRCZ05WSFNVRUZqQVVCZ2dyQmdFRkJRY0QKQWdZSUt3WUJCUVVIQXdFd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dJQkFBb21qQ3lYdmFRT3hnWUs1MHNYTEIyKwp3QWZkc3g1bm5HZGd5Zmc0dXJXMlZtMTVEaEd2STdDL250cTBkWXkyNE4vVWJHN1VEWHZseUxJSkZxMVhQN25mCnBaRzBWQ2paNjlibXhLbTNaOG0wL0F3TXZpOGU5ZWR5OHY5a05CQ3dMR2tIYkE4WW85Q0lpUWdlbGZwcDF2VWgKYm5OQmhhRCtpdTZDZmlDTHdnSmIvaXc3ZW8vQ3lvWnF4K3RqWGFPMnpYdm00cC8rUUlmQU9ndEdRTEZVOGNmWgovZ1VyVHE1Z0ZxMCtQOUd5V3NBVEpGNnE3TDZXWlpqME91VHNlN2Y0Q1NpajZNbk9NTXhBK0pvYWhKejdsc1NpClRKSEl3RXA1ci9SeWhweWVwUXhGWWNVSDVKSmY5cmFoWExXWmkrOVRqeFNNMll5aHhmUlBzaVVFdUdEb2s3OFEKbS9RUGlDaTlKSmIxb2NtVGpBVjh4RFNob2NpdlhPRnlobjZMbjc3dkxqWStBYXZ0V0RoUXRocHVQeHNMdFZ6bQplMFNIMTFkRUxSdGI3NG1xWE9yTzdmdS8rSUJzM0pxTEUvVSt4dXhRdHZHOHZHMXlES0hIU1pxUzJoL1dzNGw0Ck5pQXNoSGdlaFFEUEJjWTl3WVl6ZkJnWnBPVU16ZERmNTB4K0ZTbFk0M1dPSkp6U3VRaDR5WjArM2t5Z3VDRjgKcm5NTFNjZXlTNGNpNExtSi9LQ1N1R2RmNlhWWXo4QkU5Z2pqanBDUDZxeTBVbFJlZldzL2lnL3djSysyYkYxVApuL1l2KzZnWGVDVEhKNzVxRElQbHA3RFJVVWswZmJNajRiSWthb2dXV2s0emYydThteFpMYTBsZVBLTktaTi9tCkdDdkZ3cjNlaSt1LzhjenA1RjdUCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K", + }, ) self.assertEqual(dec_mc_1, ground_truth_mc_1) + # custom value dec_2 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - {"image_cleaner_interval_hours": 24}, + { + "disable_http_proxy": True, + }, CUSTOM_MGMT_AKS_PREVIEW, ) - security_profile = self.models.ManagedClusterSecurityProfile() - security_profile.image_cleaner = ( - self.models.ManagedClusterSecurityProfileImageCleaner( + + mc_2 = self.models.ManagedCluster( + location="test_location", + http_proxy_config = self.models.ManagedClusterHTTPProxyConfig( enabled=True, - interval_hours=25, + httpProxy="http://cli-proxy-vm:3128/", + httpsProxy="https://cli-proxy-vm:3129/", + ) + ) + dec_2.context.attach_mc(mc_2) + # fail on passing the wrong mc object + with self.assertRaises(CLIInternalError): + dec_2.update_http_proxy_enabled(None) + dec_mc_2 = dec_2.update_http_proxy_enabled(mc_2) + + ground_truth_mc_2 = self.models.ManagedCluster( + location="test_location", + http_proxy_config = self.models.ManagedClusterHTTPProxyConfig( + enabled=False, + httpProxy="http://cli-proxy-vm:3128/", + httpsProxy="https://cli-proxy-vm:3129/", + ) + ) + self.assertEqual(dec_mc_2, ground_truth_mc_2) + + # custom value + dec_3 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "enable_http_proxy": True, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + + mc_3 = self.models.ManagedCluster( + location="test_location", + http_proxy_config = self.models.ManagedClusterHTTPProxyConfig( + enabled=False, + httpProxy="http://cli-proxy-vm:3128/", + httpsProxy="https://cli-proxy-vm:3129/", + ) + ) + dec_3.context.attach_mc(mc_3) + # fail on passing the wrong mc object + with self.assertRaises(CLIInternalError): + dec_3.update_http_proxy_enabled(None) + dec_mc_3 = dec_3.update_http_proxy_enabled(mc_3) + + ground_truth_mc_3 = self.models.ManagedCluster( + location="test_location", + http_proxy_config = self.models.ManagedClusterHTTPProxyConfig( + enabled=True, + httpProxy="http://cli-proxy-vm:3128/", + httpsProxy="https://cli-proxy-vm:3129/", ) ) + self.assertEqual(dec_mc_3, ground_truth_mc_3) + + def test_update_pod_identity_profile(self): + # default value in `aks_update` + dec_1 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "enable_pod_identity": False, + "disable_pod_identity": False, + "enable_pod_identity_with_kubenet": False, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + # fail on passing the wrong mc object + with self.assertRaises(CLIInternalError): + dec_1.update_pod_identity_profile(None) + + mc_1 = self.models.ManagedCluster( + location="test_location", + ) + dec_1.context.attach_mc(mc_1) + dec_mc_1 = dec_1.update_pod_identity_profile(mc_1) + ground_truth_mc_1 = self.models.ManagedCluster( + location="test_location", + ) + self.assertEqual(dec_mc_1, ground_truth_mc_1) + + # custom value + dec_2 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "enable_pod_identity": True, + "disable_pod_identity": False, + "enable_pod_identity_with_kubenet": False, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_2 = self.models.ManagedCluster( location="test_location", - security_profile=security_profile, + network_profile=self.models.ContainerServiceNetworkProfile( + network_plugin="kubenet", + ), ) dec_2.context.attach_mc(mc_2) - dec_mc_2 = dec_2.update_image_cleaner(mc_2) + # fail on not a msi cluster + with self.assertRaises(RequiredArgumentMissingError): + dec_2.update_pod_identity_profile(mc_2) + + # custom value + dec_3 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "enable_pod_identity": True, + "disable_pod_identity": False, + "enable_pod_identity_with_kubenet": True, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + + mc_3 = self.models.ManagedCluster( + location="test_location", + network_profile=self.models.ContainerServiceNetworkProfile( + network_plugin="kubenet", + ), + identity=self.models.ManagedClusterIdentity( + type="SystemAssigned", + ), + ) + dec_3.context.attach_mc(mc_3) + dec_mc_3 = dec_3.update_pod_identity_profile(mc_3) + ground_truth_mc_3 = self.models.ManagedCluster( + location="test_location", + network_profile=self.models.ContainerServiceNetworkProfile( + network_plugin="kubenet", + ), + pod_identity_profile=self.models.pod_identity_models.ManagedClusterPodIdentityProfile( + enabled=True, + allow_network_plugin_kubenet=True, + user_assigned_identities=[], + user_assigned_identity_exceptions=[], + ), + identity=self.models.ManagedClusterIdentity( + type="SystemAssigned", + ), + ) + self.assertEqual(dec_mc_3, ground_truth_mc_3) + + # custom value + dec_4 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "enable_pod_identity": False, + "disable_pod_identity": True, + "enable_pod_identity_with_kubenet": False, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + + mc_4 = self.models.ManagedCluster( + location="test_location", + pod_identity_profile=self.models.pod_identity_models.ManagedClusterPodIdentityProfile( + enabled=True, + user_assigned_identities=[], + user_assigned_identity_exceptions=[], + ), + ) + dec_4.context.attach_mc(mc_4) + dec_mc_4 = dec_4.update_pod_identity_profile(mc_4) + ground_truth_mc_4 = self.models.ManagedCluster( + location="test_location", + pod_identity_profile=self.models.pod_identity_models.ManagedClusterPodIdentityProfile( + enabled=False, + ), + ) + self.assertEqual(dec_mc_4, ground_truth_mc_4) + + def test_update_oidc_issuer_profile__default_value(self): + dec = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, self.client, {}, CUSTOM_MGMT_AKS_PREVIEW + ) + mc = self.models.ManagedCluster(location="test_location") + dec.context.attach_mc(mc) + updated_mc = dec.update_oidc_issuer_profile(mc) + self.assertIsNone(updated_mc.oidc_issuer_profile) + + def test_update_oidc_issuer_profile__default_value_mc_enabled(self): + dec = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, self.client, {}, CUSTOM_MGMT_AKS_PREVIEW + ) + mc = self.models.ManagedCluster(location="test_location") + mc.oidc_issuer_profile = self.models.ManagedClusterOIDCIssuerProfile( + enabled=True + ) + dec.context.attach_mc(mc) + updated_mc = dec.update_oidc_issuer_profile(mc) + self.assertTrue(updated_mc.oidc_issuer_profile.enabled) + + def test_update_oidc_issuer_profile__enabled(self): + dec = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "enable_oidc_issuer": True, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc = self.models.ManagedCluster(location="test_location") + dec.context.attach_mc(mc) + updated_mc = dec.update_oidc_issuer_profile(mc) + self.assertIsNotNone(updated_mc.oidc_issuer_profile) + self.assertTrue(updated_mc.oidc_issuer_profile.enabled) + + def test_update_oidc_issuer_profile__enabled_mc_enabled(self): + dec = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "enable_oidc_issuer": True, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc = self.models.ManagedCluster(location="test_location") + mc.oidc_issuer_profile = self.models.ManagedClusterOIDCIssuerProfile( + enabled=True + ) + dec.context.attach_mc(mc) + updated_mc = dec.update_oidc_issuer_profile(mc) + self.assertIsNotNone(updated_mc.oidc_issuer_profile) + self.assertTrue(updated_mc.oidc_issuer_profile.enabled) + + def test_update_workload_identity_profile__default_value(self): + dec = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, self.client, {}, CUSTOM_MGMT_AKS_PREVIEW + ) + mc = self.models.ManagedCluster(location="test_location") + dec.context.attach_mc(mc) + updated_mc = dec.update_workload_identity_profile(mc) + self.assertIsNone(updated_mc.security_profile) + + def test_update_workload_identity_profile__default_value_mc_enabled(self): + dec = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, self.client, {}, CUSTOM_MGMT_AKS_PREVIEW + ) + mc = self.models.ManagedCluster(location="test_location") + mc.security_profile = self.models.ManagedClusterSecurityProfile( + workload_identity=self.models.ManagedClusterSecurityProfileWorkloadIdentity( + enabled=True, + ) + ) + dec.context.attach_mc(mc) + updated_mc = dec.update_workload_identity_profile(mc) + self.assertIsNotNone(updated_mc.security_profile.workload_identity) + + def test_update_workload_identity_profile__enabled(self): + dec = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "enable_workload_identity": True, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc = self.models.ManagedCluster(location="test_location") + mc.oidc_issuer_profile = self.models.ManagedClusterOIDCIssuerProfile( + enabled=True + ) + dec.context.attach_mc(mc) + updated_mc = dec.update_workload_identity_profile(mc) + self.assertTrue(updated_mc.security_profile.workload_identity.enabled) + + def test_update_workload_identity_profile__disabled(self): + dec = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "disable_workload_identity": True, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc = self.models.ManagedCluster(location="test_location") + mc.oidc_issuer_profile = self.models.ManagedClusterOIDCIssuerProfile( + enabled=True + ) + dec.context.attach_mc(mc) + updated_mc = dec.update_workload_identity_profile(mc) + self.assertFalse(updated_mc.security_profile.workload_identity.enabled) + + def test_update_image_cleaner(self): + dec_0 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + {}, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_0 = self.models.ManagedCluster( + location="test_location", + ) + dec_0.context.attach_mc(mc_0) + dec_mc_0 = dec_0.update_image_cleaner(mc_0) + ground_truth_mc_0 = self.models.ManagedCluster( + location="test_location", + ) + self.assertEqual(dec_mc_0, ground_truth_mc_0) + + dec_1 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "enable_image_cleaner": True, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_1 = self.models.ManagedCluster( + location="test_location", + ) + dec_1.context.attach_mc(mc_1) + dec_mc_1 = dec_1.update_image_cleaner(mc_1) + + ground_truth_image_cleaner_profile_1 = ( + self.models.ManagedClusterSecurityProfileImageCleaner( + enabled=True, + interval_hours=7 * 24, + ) + ) + ground_truth_security_profile_1 = self.models.ManagedClusterSecurityProfile( + image_cleaner=ground_truth_image_cleaner_profile_1, + ) + ground_truth_mc_1 = self.models.ManagedCluster( + location="test_location", + security_profile=ground_truth_security_profile_1, + ) + self.assertEqual(dec_mc_1, ground_truth_mc_1) + + dec_2 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + {"image_cleaner_interval_hours": 24}, + CUSTOM_MGMT_AKS_PREVIEW, + ) + security_profile = self.models.ManagedClusterSecurityProfile() + security_profile.image_cleaner = ( + self.models.ManagedClusterSecurityProfileImageCleaner( + enabled=True, + interval_hours=25, + ) + ) + mc_2 = self.models.ManagedCluster( + location="test_location", + security_profile=security_profile, + ) + dec_2.context.attach_mc(mc_2) + dec_mc_2 = dec_2.update_image_cleaner(mc_2) + + ground_truth_image_cleaner_profile_2 = ( + self.models.ManagedClusterSecurityProfileImageCleaner( + enabled=True, + interval_hours=24, + ) + ) + ground_truth_security_profile_2 = self.models.ManagedClusterSecurityProfile( + image_cleaner=ground_truth_image_cleaner_profile_2, + ) + ground_truth_mc_2 = self.models.ManagedCluster( + location="test_location", + security_profile=ground_truth_security_profile_2, + ) + self.assertEqual(dec_mc_2, ground_truth_mc_2) + + dec_3 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "enable_image_cleaner": True, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + security_profile = self.models.ManagedClusterSecurityProfile() + security_profile.image_cleaner = ( + self.models.ManagedClusterSecurityProfileImageCleaner( + enabled=False, + interval_hours=25, + ) + ) + mc_3 = self.models.ManagedCluster( + location="test_location", + security_profile=security_profile, + ) + dec_3.context.attach_mc(mc_3) + dec_mc_3 = dec_3.update_image_cleaner(mc_3) + + ground_truth_image_cleaner_profile_3 = ( + self.models.ManagedClusterSecurityProfileImageCleaner( + enabled=True, + interval_hours=25, + ) + ) + ground_truth_security_profile_3 = self.models.ManagedClusterSecurityProfile( + image_cleaner=ground_truth_image_cleaner_profile_3, + ) + ground_truth_mc_3 = self.models.ManagedCluster( + location="test_location", + security_profile=ground_truth_security_profile_3, + ) + self.assertEqual(dec_mc_3, ground_truth_mc_3) + + dec_4 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "disable_image_cleaner": True, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + security_profile = self.models.ManagedClusterSecurityProfile() + security_profile.image_cleaner = ( + self.models.ManagedClusterSecurityProfileImageCleaner( + enabled=True, + interval_hours=25, + ) + ) + mc_4 = self.models.ManagedCluster( + location="test_location", + security_profile=security_profile, + ) + dec_4.context.attach_mc(mc_4) + dec_mc_4 = dec_4.update_image_cleaner(mc_4) + + ground_truth_image_cleaner_profile_4 = ( + self.models.ManagedClusterSecurityProfileImageCleaner( + enabled=False, + interval_hours=25, + ) + ) + ground_truth_security_profile_4 = self.models.ManagedClusterSecurityProfile( + image_cleaner=ground_truth_image_cleaner_profile_4, + ) + ground_truth_mc_4 = self.models.ManagedCluster( + location="test_location", + security_profile=ground_truth_security_profile_4, + ) + self.assertEqual(dec_mc_4, ground_truth_mc_4) + + dec_5 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "enable_image_cleaner": True, + "image_cleaner_interval_hours": 24, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + security_profile = self.models.ManagedClusterSecurityProfile() + security_profile.image_cleaner = ( + self.models.ManagedClusterSecurityProfileImageCleaner( + enabled=False, + interval_hours=25, + ) + ) + mc_5 = self.models.ManagedCluster( + location="test_location", + security_profile=security_profile, + ) + dec_5.context.attach_mc(mc_5) + dec_mc_5 = dec_5.update_image_cleaner(mc_5) + + ground_truth_image_cleaner_profile_5 = ( + self.models.ManagedClusterSecurityProfileImageCleaner( + enabled=True, + interval_hours=24, + ) + ) + ground_truth_security_profile_5 = self.models.ManagedClusterSecurityProfile( + image_cleaner=ground_truth_image_cleaner_profile_5, + ) + ground_truth_mc_5 = self.models.ManagedCluster( + location="test_location", + security_profile=ground_truth_security_profile_5, + ) + self.assertEqual(dec_mc_5, ground_truth_mc_5) + + def test_update_azure_keyvault_kms(self): + dec_1 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + {}, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_1 = self.models.ManagedCluster( + location="test_location", + ) + dec_1.context.attach_mc(mc_1) + dec_mc_1 = dec_1.update_azure_keyvault_kms(mc_1) + ground_truth_mc_1 = self.models.ManagedCluster( + location="test_location", + ) + self.assertEqual(dec_mc_1, ground_truth_mc_1) + + key_id_1 = ( + "https://fakekeyvault.vault.azure.net/secrets/fakekeyname/fakekeyversion" + ) + dec_2 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "enable_azure_keyvault_kms": True, + "azure_keyvault_kms_key_id": key_id_1, + "azure_keyvault_kms_key_vault_network_access": "Private", + "azure_keyvault_kms_key_vault_resource_id": "/subscriptions/8ecadfc9-d1a3-4ea4-b844-0d9f87e4d7c8/resourceGroups/foo/providers/Microsoft.KeyVault/vaults/foo", + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_2 = self.models.ManagedCluster( + location="test_location", + ) + dec_2.context.attach_mc(mc_2) + dec_mc_2 = dec_2.update_azure_keyvault_kms(mc_2) + + ground_truth_azure_keyvault_kms_profile_2 = self.models.AzureKeyVaultKms( + enabled=True, + key_id=key_id_1, + key_vault_network_access="Private", + key_vault_resource_id="/subscriptions/8ecadfc9-d1a3-4ea4-b844-0d9f87e4d7c8/resourceGroups/foo/providers/Microsoft.KeyVault/vaults/foo", + ) + ground_truth_security_profile_2 = self.models.ManagedClusterSecurityProfile( + azure_key_vault_kms=ground_truth_azure_keyvault_kms_profile_2, + ) + ground_truth_mc_2 = self.models.ManagedCluster( + location="test_location", + security_profile=ground_truth_security_profile_2, + ) + self.assertEqual(dec_mc_2, ground_truth_mc_2) + + dec_5 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "disable_azure_keyvault_kms": True, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + azure_keyvault_kms_profile_5 = self.models.AzureKeyVaultKms( + enabled=True, + key_id=key_id_1, + key_vault_network_access="Public", + ) + security_profile_5 = self.models.ManagedClusterSecurityProfile( + azure_key_vault_kms=azure_keyvault_kms_profile_5, + ) + mc_5 = self.models.ManagedCluster( + location="test_location", + security_profile=security_profile_5, + ) + dec_5.context.attach_mc(mc_5) + dec_mc_5 = dec_5.update_azure_keyvault_kms(mc_5) + + ground_truth_azure_keyvault_kms_profile_5 = self.models.AzureKeyVaultKms( + enabled=False, + key_id=key_id_1, + key_vault_network_access="Public", + ) + ground_truth_security_profile_5 = self.models.ManagedClusterSecurityProfile( + azure_key_vault_kms=ground_truth_azure_keyvault_kms_profile_5, + ) + ground_truth_mc_5 = self.models.ManagedCluster( + location="test_location", + security_profile=ground_truth_security_profile_5, + ) + self.assertEqual(dec_mc_5, ground_truth_mc_5) + + dec_6 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "disable_azure_keyvault_kms": True, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_6 = self.models.ManagedCluster( + location="test_location", + ) + dec_6.context.attach_mc(mc_6) + dec_mc_6 = dec_6.update_azure_keyvault_kms(mc_6) + + ground_truth_azure_keyvault_kms_profile_6 = self.models.AzureKeyVaultKms() + ground_truth_azure_keyvault_kms_profile_6.enabled = False + ground_truth_security_profile_6 = self.models.ManagedClusterSecurityProfile() + ground_truth_security_profile_6.azure_key_vault_kms = ( + ground_truth_azure_keyvault_kms_profile_6 + ) + ground_truth_mc_6 = self.models.ManagedCluster( + location="test_location", + security_profile=ground_truth_security_profile_6, + ) + self.assertEqual(dec_mc_6, ground_truth_mc_6) + + dec_7 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "enable_azure_keyvault_kms": True, + "azure_keyvault_kms_key_id": key_id_1, + "azure_keyvault_kms_key_vault_network_access": "Public", + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_7 = self.models.ManagedCluster( + location="test_location", + ) + dec_7.context.attach_mc(mc_7) + dec_mc_7 = dec_7.update_azure_keyvault_kms(mc_7) + + ground_truth_azure_keyvault_kms_profile_7 = self.models.AzureKeyVaultKms( + enabled=True, + key_id=key_id_1, + key_vault_network_access="Public", + key_vault_resource_id="", + ) + ground_truth_security_profile_7 = self.models.ManagedClusterSecurityProfile( + azure_key_vault_kms=ground_truth_azure_keyvault_kms_profile_7, + ) + ground_truth_mc_7 = self.models.ManagedCluster( + location="test_location", + security_profile=ground_truth_security_profile_7, + ) + self.assertEqual(dec_mc_7, ground_truth_mc_7) + + def test_update_kms_infrastructure_encryption(self): + # test no change when no parameter provided + dec_1 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + {}, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_1 = self.models.ManagedCluster(location="test_location") + dec_1.context.attach_mc(mc_1) + dec_mc_1 = dec_1.update_kms_infrastructure_encryption(mc_1) + # no change expected + ground_truth_mc_1 = self.models.ManagedCluster(location="test_location") + self.assertEqual(dec_mc_1, ground_truth_mc_1) + + # test no change when Disabled + dec_2 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "kms_infrastructure_encryption": "Disabled", + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_2 = self.models.ManagedCluster(location="test_location") + dec_2.context.attach_mc(mc_2) + dec_mc_2 = dec_2.update_kms_infrastructure_encryption(mc_2) + # no change expected + ground_truth_mc_2 = self.models.ManagedCluster(location="test_location") + self.assertEqual(dec_mc_2, ground_truth_mc_2) + + # test with Enabled on new cluster + dec_3 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "kms_infrastructure_encryption": "Enabled", + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_3 = self.models.ManagedCluster(location="test_location") + dec_3.context.attach_mc(mc_3) + dec_mc_3 = dec_3.update_kms_infrastructure_encryption(mc_3) + + # expected security profile with infrastructure encryption + ground_truth_kube_resource_encryption_profile_3 = self.models.KubernetesResourceObjectEncryptionProfile( + infrastructure_encryption="Enabled" + ) + ground_truth_security_profile_3 = self.models.ManagedClusterSecurityProfile( + kubernetes_resource_object_encryption_profile=ground_truth_kube_resource_encryption_profile_3, + ) + ground_truth_mc_3 = self.models.ManagedCluster( + location="test_location", + security_profile=ground_truth_security_profile_3, + ) + self.assertEqual(dec_mc_3, ground_truth_mc_3) + + # test with Enabled on cluster with existing security profile + dec_4 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "kms_infrastructure_encryption": "Enabled", + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + existing_security_profile = self.models.ManagedClusterSecurityProfile() + mc_4 = self.models.ManagedCluster( + location="test_location", + security_profile=existing_security_profile, + ) + dec_4.context.attach_mc(mc_4) + dec_mc_4 = dec_4.update_kms_infrastructure_encryption(mc_4) + + # should add to existing security profile + ground_truth_kube_resource_encryption_profile_4 = self.models.KubernetesResourceObjectEncryptionProfile( + infrastructure_encryption="Enabled" + ) + ground_truth_security_profile_4 = self.models.ManagedClusterSecurityProfile( + kubernetes_resource_object_encryption_profile=ground_truth_kube_resource_encryption_profile_4, + ) + ground_truth_mc_4 = self.models.ManagedCluster( + location="test_location", + security_profile=ground_truth_security_profile_4, + ) + self.assertEqual(dec_mc_4, ground_truth_mc_4) + + # test with Enabled on cluster with existing kubernetes_resource_object_encryption_profile + dec_5 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "kms_infrastructure_encryption": "Enabled", + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + existing_kube_encryption_profile = self.models.KubernetesResourceObjectEncryptionProfile() + existing_security_profile = self.models.ManagedClusterSecurityProfile( + kubernetes_resource_object_encryption_profile=existing_kube_encryption_profile, + ) + mc_5 = self.models.ManagedCluster( + location="test_location", + security_profile=existing_security_profile, + ) + dec_5.context.attach_mc(mc_5) + dec_mc_5 = dec_5.update_kms_infrastructure_encryption(mc_5) + + # should update existing profile + ground_truth_kube_resource_encryption_profile_5 = self.models.KubernetesResourceObjectEncryptionProfile( + infrastructure_encryption="Enabled" + ) + ground_truth_security_profile_5 = self.models.ManagedClusterSecurityProfile( + kubernetes_resource_object_encryption_profile=ground_truth_kube_resource_encryption_profile_5, + ) + ground_truth_mc_5 = self.models.ManagedCluster( + location="test_location", + security_profile=ground_truth_security_profile_5, + ) + self.assertEqual(dec_mc_5, ground_truth_mc_5) + + def test_update_workload_auto_scaler_profile(self): + # Throws exception when incorrect mc object is passed. + dec_1 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, self.client, {}, CUSTOM_MGMT_AKS_PREVIEW + ) + with self.assertRaisesRegex( + CLIInternalError, r"^Unexpected mc object with type ''\.$" + ): + dec_1.update_workload_auto_scaler_profile(None) + + # Throws exception when the mc object passed does not match the one in context. + dec_2 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, self.client, {}, CUSTOM_MGMT_AKS_PREVIEW + ) + with self.assertRaisesRegex( + CLIInternalError, + r"^Inconsistent state detected\. The incoming `mc` is not the same as the `mc` in the context\.$", + ): + mc_in = self.models.ManagedCluster(location="test_location") + dec_2.update_workload_auto_scaler_profile(mc_in) + + # Leaves profile as None without raw parameters. + dec_3 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, self.client, {}, CUSTOM_MGMT_AKS_PREVIEW + ) + mc_in = self.models.ManagedCluster(location="test_location") + dec_3.context.attach_mc(mc_in) + mc_out = dec_3.update_workload_auto_scaler_profile(mc_in) + self.assertEqual(mc_out, mc_in) + self.assertIsNone(mc_out.workload_auto_scaler_profile) + + # Leaves existing profile untouched without raw parameters. + dec_4 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, self.client, {}, CUSTOM_MGMT_AKS_PREVIEW + ) + profile = self.models.ManagedClusterWorkloadAutoScalerProfile( + keda=self.models.ManagedClusterWorkloadAutoScalerProfileKeda(enabled=True) + ) + mc_in = self.models.ManagedCluster( + location="test_location", workload_auto_scaler_profile=profile + ) + dec_4.context.attach_mc(mc_in) + mc_out = dec_4.update_workload_auto_scaler_profile(mc_in) + self.assertEqual(mc_out, mc_in) + self.assertEqual(mc_out.workload_auto_scaler_profile, profile) + self.assertIsNotNone(mc_out.workload_auto_scaler_profile.keda) + self.assertTrue(mc_out.workload_auto_scaler_profile.keda.enabled) + + # Enables keda when enable_keda is True. + dec_5 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, self.client, {"enable_keda": True}, CUSTOM_MGMT_AKS_PREVIEW + ) + mc_in = self.models.ManagedCluster(location="test_location") + dec_5.context.attach_mc(mc_in) + mc_out = dec_5.update_workload_auto_scaler_profile(mc_in) + self.assertEqual(mc_out, mc_in) + self.assertIsNotNone(mc_out.workload_auto_scaler_profile) + self.assertIsNotNone(mc_out.workload_auto_scaler_profile.keda) + self.assertTrue(mc_out.workload_auto_scaler_profile.keda.enabled) + + # Enables keda in existing profile when enable_keda is True. + dec_6 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, self.client, {"enable_keda": True}, CUSTOM_MGMT_AKS_PREVIEW + ) + profile = self.models.ManagedClusterWorkloadAutoScalerProfile( + keda=self.models.ManagedClusterWorkloadAutoScalerProfileKeda(enabled=False) + ) + mc_in = self.models.ManagedCluster( + location="test_location", workload_auto_scaler_profile=profile + ) + dec_6.context.attach_mc(mc_in) + mc_out = dec_6.update_workload_auto_scaler_profile(mc_in) + self.assertEqual(mc_out, mc_in) + self.assertEqual(mc_out.workload_auto_scaler_profile, profile) + self.assertIsNotNone(mc_out.workload_auto_scaler_profile.keda) + self.assertTrue(mc_out.workload_auto_scaler_profile.keda.enabled) + + # Disables keda when disable_keda is True. + dec_7 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, self.client, {"disable_keda": True}, CUSTOM_MGMT_AKS_PREVIEW + ) + mc_in = self.models.ManagedCluster(location="test_location") + dec_7.context.attach_mc(mc_in) + mc_out = dec_7.update_workload_auto_scaler_profile(mc_in) + self.assertEqual(mc_out, mc_in) + self.assertIsNotNone(mc_out.workload_auto_scaler_profile) + self.assertIsNotNone(mc_out.workload_auto_scaler_profile.keda) + self.assertFalse(mc_out.workload_auto_scaler_profile.keda.enabled) - ground_truth_image_cleaner_profile_2 = ( - self.models.ManagedClusterSecurityProfileImageCleaner( - enabled=True, - interval_hours=24, - ) + # Disables keda in existing profile when disable_keda is True. + dec_8 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, self.client, {"disable_keda": True}, CUSTOM_MGMT_AKS_PREVIEW ) - ground_truth_security_profile_2 = self.models.ManagedClusterSecurityProfile( - image_cleaner=ground_truth_image_cleaner_profile_2, + profile = self.models.ManagedClusterWorkloadAutoScalerProfile( + keda=self.models.ManagedClusterWorkloadAutoScalerProfileKeda(enabled=True) ) - ground_truth_mc_2 = self.models.ManagedCluster( - location="test_location", - security_profile=ground_truth_security_profile_2, + mc_in = self.models.ManagedCluster( + location="test_location", workload_auto_scaler_profile=profile ) - self.assertEqual(dec_mc_2, ground_truth_mc_2) + dec_8.context.attach_mc(mc_in) + mc_out = dec_8.update_workload_auto_scaler_profile(mc_in) + self.assertEqual(mc_out, mc_in) + self.assertEqual(mc_out.workload_auto_scaler_profile, profile) + self.assertIsNotNone(mc_out.workload_auto_scaler_profile.keda) + self.assertFalse(mc_out.workload_auto_scaler_profile.keda.enabled) - dec_3 = AKSPreviewManagedClusterUpdateDecorator( + # Throws exception when both enable_keda and disable_keda are True. + dec_9 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - { - "enable_image_cleaner": True, - }, + {"enable_keda": True, "disable_keda": True}, CUSTOM_MGMT_AKS_PREVIEW, ) - security_profile = self.models.ManagedClusterSecurityProfile() - security_profile.image_cleaner = ( - self.models.ManagedClusterSecurityProfileImageCleaner( - enabled=False, - interval_hours=25, - ) - ) - mc_3 = self.models.ManagedCluster( - location="test_location", - security_profile=security_profile, - ) - dec_3.context.attach_mc(mc_3) - dec_mc_3 = dec_3.update_image_cleaner(mc_3) - - ground_truth_image_cleaner_profile_3 = ( - self.models.ManagedClusterSecurityProfileImageCleaner( - enabled=True, - interval_hours=25, - ) - ) - ground_truth_security_profile_3 = self.models.ManagedClusterSecurityProfile( - image_cleaner=ground_truth_image_cleaner_profile_3, - ) - ground_truth_mc_3 = self.models.ManagedCluster( - location="test_location", - security_profile=ground_truth_security_profile_3, - ) - self.assertEqual(dec_mc_3, ground_truth_mc_3) + mc_in = self.models.ManagedCluster(location="test_location") + dec_9.context.attach_mc(mc_in) + with self.assertRaises(MutuallyExclusiveArgumentError): + mc_out = dec_9.update_workload_auto_scaler_profile(mc_in) - dec_4 = AKSPreviewManagedClusterUpdateDecorator( + def test_update_defender(self): + # enable + dec_1 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "disable_image_cleaner": True, + "enable_defender": True, + "defender_config": get_test_data_file_path("defenderconfig.json"), }, CUSTOM_MGMT_AKS_PREVIEW, ) - security_profile = self.models.ManagedClusterSecurityProfile() - security_profile.image_cleaner = ( - self.models.ManagedClusterSecurityProfileImageCleaner( - enabled=True, - interval_hours=25, - ) - ) - mc_4 = self.models.ManagedCluster( - location="test_location", - security_profile=security_profile, - ) - dec_4.context.attach_mc(mc_4) - dec_mc_4 = dec_4.update_image_cleaner(mc_4) + mc_1 = self.models.ManagedCluster(location="test_location") + dec_1.context.attach_mc(mc_1) + dec_1.context.set_intermediate("subscription_id", "test_subscription_id") - ground_truth_image_cleaner_profile_4 = ( - self.models.ManagedClusterSecurityProfileImageCleaner( - enabled=False, - interval_hours=25, - ) - ) - ground_truth_security_profile_4 = self.models.ManagedClusterSecurityProfile( - image_cleaner=ground_truth_image_cleaner_profile_4, - ) - ground_truth_mc_4 = self.models.ManagedCluster( + dec_mc_1 = dec_1.update_defender(mc_1) + + ground_truth_mc_1 = self.models.ManagedCluster( location="test_location", - security_profile=ground_truth_security_profile_4, + security_profile=self.models.ManagedClusterSecurityProfile( + defender=self.models.ManagedClusterSecurityProfileDefender( + log_analytics_workspace_resource_id="test_workspace_resource_id", + security_monitoring=self.models.ManagedClusterSecurityProfileDefenderSecurityMonitoring( + enabled=True + ), + ) + ), ) - self.assertEqual(dec_mc_4, ground_truth_mc_4) + self.assertEqual(dec_mc_1, ground_truth_mc_1) - dec_5 = AKSPreviewManagedClusterUpdateDecorator( + # disable + dec_2 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - { - "enable_image_cleaner": True, - "image_cleaner_interval_hours": 24, - }, + {"disable_defender": True}, CUSTOM_MGMT_AKS_PREVIEW, ) - security_profile = self.models.ManagedClusterSecurityProfile() - security_profile.image_cleaner = ( - self.models.ManagedClusterSecurityProfileImageCleaner( - enabled=False, - interval_hours=25, - ) - ) - mc_5 = self.models.ManagedCluster( + mc_2 = self.models.ManagedCluster( location="test_location", - security_profile=security_profile, + security_profile=self.models.ManagedClusterSecurityProfile( + defender=self.models.ManagedClusterSecurityProfileDefender( + log_analytics_workspace_resource_id="test_workspace_resource_id", + security_monitoring=self.models.ManagedClusterSecurityProfileDefenderSecurityMonitoring( + enabled=True + ), + ) + ), ) - dec_5.context.attach_mc(mc_5) - dec_mc_5 = dec_5.update_image_cleaner(mc_5) + dec_2.context.attach_mc(mc_2) + dec_2.context.set_intermediate("subscription_id", "test_subscription_id") - ground_truth_image_cleaner_profile_5 = ( - self.models.ManagedClusterSecurityProfileImageCleaner( - enabled=True, - interval_hours=24, - ) - ) - ground_truth_security_profile_5 = self.models.ManagedClusterSecurityProfile( - image_cleaner=ground_truth_image_cleaner_profile_5, - ) - ground_truth_mc_5 = self.models.ManagedCluster( + dec_mc_2 = dec_2.update_defender(mc_2) + + ground_truth_mc_2 = self.models.ManagedCluster( location="test_location", - security_profile=ground_truth_security_profile_5, + security_profile=self.models.ManagedClusterSecurityProfile( + defender=self.models.ManagedClusterSecurityProfileDefender( + security_monitoring=self.models.ManagedClusterSecurityProfileDefenderSecurityMonitoring( + enabled=False + ), + ) + ), ) - self.assertEqual(dec_mc_5, ground_truth_mc_5) + self.assertEqual(dec_mc_2, ground_truth_mc_2) - def test_update_azure_keyvault_kms(self): + def test_update_custom_ca_certificates(self): + # set to non-empty dec_1 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - {}, + { + "custom_ca_trust_certificates": get_test_data_file_path("certs.txt"), + }, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_1 = self.models.ManagedCluster( - location="test_location", - ) + mc_1 = self.models.ManagedCluster(location="test_location") dec_1.context.attach_mc(mc_1) - dec_mc_1 = dec_1.update_azure_keyvault_kms(mc_1) + dec_1.context.set_intermediate("subscription_id", "test_subscription_id") + + dec_mc_1 = dec_1.update_custom_ca_trust_certificates(mc_1) + ground_truth_mc_1 = self.models.ManagedCluster( location="test_location", + security_profile=self.models.ManagedClusterSecurityProfile( + custom_ca_trust_certificates=[ + str.encode(CONST_CUSTOM_CA_TEST_CERT) for _ in range(2) + ] + ), ) self.assertEqual(dec_mc_1, ground_truth_mc_1) - key_id_1 = ( - "https://fakekeyvault.vault.azure.net/secrets/fakekeyname/fakekeyversion" - ) + # set to empty dec_2 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - { - "enable_azure_keyvault_kms": True, - "azure_keyvault_kms_key_id": key_id_1, - "azure_keyvault_kms_key_vault_network_access": "Private", - "azure_keyvault_kms_key_vault_resource_id": "/subscriptions/8ecadfc9-d1a3-4ea4-b844-0d9f87e4d7c8/resourceGroups/foo/providers/Microsoft.KeyVault/vaults/foo", - }, + {"custom_ca_trust_certificates": None}, CUSTOM_MGMT_AKS_PREVIEW, ) mc_2 = self.models.ManagedCluster( location="test_location", + security_profile=self.models.ManagedClusterSecurityProfile( + custom_ca_trust_certificates=None + ), ) dec_2.context.attach_mc(mc_2) - dec_mc_2 = dec_2.update_azure_keyvault_kms(mc_2) + dec_2.context.set_intermediate("subscription_id", "test_subscription_id") + + dec_mc_2 = dec_2.update_custom_ca_trust_certificates(mc_2) - ground_truth_azure_keyvault_kms_profile_2 = self.models.AzureKeyVaultKms( - enabled=True, - key_id=key_id_1, - key_vault_network_access="Private", - key_vault_resource_id="/subscriptions/8ecadfc9-d1a3-4ea4-b844-0d9f87e4d7c8/resourceGroups/foo/providers/Microsoft.KeyVault/vaults/foo", - ) - ground_truth_security_profile_2 = self.models.ManagedClusterSecurityProfile( - azure_key_vault_kms=ground_truth_azure_keyvault_kms_profile_2, - ) ground_truth_mc_2 = self.models.ManagedCluster( location="test_location", - security_profile=ground_truth_security_profile_2, + security_profile=self.models.ManagedClusterSecurityProfile( + custom_ca_trust_certificates=None + ), ) self.assertEqual(dec_mc_2, ground_truth_mc_2) - dec_5 = AKSPreviewManagedClusterUpdateDecorator( + def test_update_run_command(self): + dec_1 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "disable_azure_keyvault_kms": True, + "disable_run_command": True, }, CUSTOM_MGMT_AKS_PREVIEW, ) - azure_keyvault_kms_profile_5 = self.models.AzureKeyVaultKms( - enabled=True, - key_id=key_id_1, - key_vault_network_access="Public", - ) - security_profile_5 = self.models.ManagedClusterSecurityProfile( - azure_key_vault_kms=azure_keyvault_kms_profile_5, - ) - mc_5 = self.models.ManagedCluster( - location="test_location", - security_profile=security_profile_5, - ) - dec_5.context.attach_mc(mc_5) - dec_mc_5 = dec_5.update_azure_keyvault_kms(mc_5) + mc_1 = self.models.ManagedCluster(location="test_location") + dec_1.context.attach_mc(mc_1) + dec_mc_1 = dec_1.update_run_command(mc_1) - ground_truth_azure_keyvault_kms_profile_5 = self.models.AzureKeyVaultKms( - enabled=False, - key_id=key_id_1, - key_vault_network_access="Public", - ) - ground_truth_security_profile_5 = self.models.ManagedClusterSecurityProfile( - azure_key_vault_kms=ground_truth_azure_keyvault_kms_profile_5, - ) - ground_truth_mc_5 = self.models.ManagedCluster( + ground_truth_mc_1 = self.models.ManagedCluster( location="test_location", - security_profile=ground_truth_security_profile_5, + api_server_access_profile=self.models.ManagedClusterAPIServerAccessProfile( + disable_run_command=True + ) ) - self.assertEqual(dec_mc_5, ground_truth_mc_5) + self.assertEqual(dec_mc_1, ground_truth_mc_1) - dec_6 = AKSPreviewManagedClusterUpdateDecorator( + dec_2 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "disable_azure_keyvault_kms": True, + "enable_run_command": True, }, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_6 = self.models.ManagedCluster( + mc_2 = self.models.ManagedCluster( location="test_location", + api_server_access_profile=self.models.ManagedClusterAPIServerAccessProfile( + disable_run_command=True + ) ) - dec_6.context.attach_mc(mc_6) - dec_mc_6 = dec_6.update_azure_keyvault_kms(mc_6) - - ground_truth_azure_keyvault_kms_profile_6 = self.models.AzureKeyVaultKms() - ground_truth_azure_keyvault_kms_profile_6.enabled = False - ground_truth_security_profile_6 = self.models.ManagedClusterSecurityProfile() - ground_truth_security_profile_6.azure_key_vault_kms = ( - ground_truth_azure_keyvault_kms_profile_6 - ) - ground_truth_mc_6 = self.models.ManagedCluster( + dec_2.context.attach_mc(mc_2) + dec_mc_2 = dec_2.update_run_command(mc_2) + ground_truth_mc_2 = self.models.ManagedCluster( location="test_location", - security_profile=ground_truth_security_profile_6, + api_server_access_profile=self.models.ManagedClusterAPIServerAccessProfile( + disable_run_command=False + ) ) - self.assertEqual(dec_mc_6, ground_truth_mc_6) + self.assertEqual(dec_mc_2, ground_truth_mc_2) - dec_7 = AKSPreviewManagedClusterUpdateDecorator( + dec_3 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "enable_azure_keyvault_kms": True, - "azure_keyvault_kms_key_id": key_id_1, - "azure_keyvault_kms_key_vault_network_access": "Public", + "enable_run_command": False, + "disable_run_command": False, }, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_7 = self.models.ManagedCluster( + mc_3 = self.models.ManagedCluster( location="test_location", + api_server_access_profile=self.models.ManagedClusterAPIServerAccessProfile( + disable_run_command=True + ) ) - dec_7.context.attach_mc(mc_7) - dec_mc_7 = dec_7.update_azure_keyvault_kms(mc_7) - - ground_truth_azure_keyvault_kms_profile_7 = self.models.AzureKeyVaultKms( - enabled=True, - key_id=key_id_1, - key_vault_network_access="Public", - key_vault_resource_id="", - ) - ground_truth_security_profile_7 = self.models.ManagedClusterSecurityProfile( - azure_key_vault_kms=ground_truth_azure_keyvault_kms_profile_7, - ) - ground_truth_mc_7 = self.models.ManagedCluster( + dec_3.context.attach_mc(mc_3) + dec_mc_3 = dec_3.update_run_command(mc_3) + ground_truth_mc_3 = self.models.ManagedCluster( location="test_location", - security_profile=ground_truth_security_profile_7, + api_server_access_profile=self.models.ManagedClusterAPIServerAccessProfile( + disable_run_command=True + ) ) - self.assertEqual(dec_mc_7, ground_truth_mc_7) + self.assertEqual(dec_mc_3, ground_truth_mc_3) - def test_update_kms_infrastructure_encryption(self): - # test no change when no parameter provided + def test_update_vpa(self): dec_1 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, {}, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_1 = self.models.ManagedCluster(location="test_location") + mc_1 = self.models.ManagedCluster( + location="test_location", + ) dec_1.context.attach_mc(mc_1) - dec_mc_1 = dec_1.update_kms_infrastructure_encryption(mc_1) - # no change expected - ground_truth_mc_1 = self.models.ManagedCluster(location="test_location") + dec_mc_1 = dec_1.update_vpa(mc_1) + ground_truth_mc_1 = self.models.ManagedCluster( + location="test_location", + ) self.assertEqual(dec_mc_1, ground_truth_mc_1) - # test no change when Disabled dec_2 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - { - "kms_infrastructure_encryption": "Disabled", - }, + {"enable_vpa": True}, CUSTOM_MGMT_AKS_PREVIEW, ) mc_2 = self.models.ManagedCluster(location="test_location") dec_2.context.attach_mc(mc_2) - dec_mc_2 = dec_2.update_kms_infrastructure_encryption(mc_2) - # no change expected - ground_truth_mc_2 = self.models.ManagedCluster(location="test_location") + dec_mc_2 = dec_2.update_vpa(mc_2) + + ground_truth_mc_2 = self.models.ManagedCluster( + location="test_location", + workload_auto_scaler_profile=self.models.ManagedClusterWorkloadAutoScalerProfile( + vertical_pod_autoscaler=self.models.ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler( + enabled=True, + ) + ), + ) self.assertEqual(dec_mc_2, ground_truth_mc_2) - # test with Enabled on new cluster dec_3 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "kms_infrastructure_encryption": "Enabled", + "disable_vpa": True, }, CUSTOM_MGMT_AKS_PREVIEW, ) mc_3 = self.models.ManagedCluster(location="test_location") dec_3.context.attach_mc(mc_3) - dec_mc_3 = dec_3.update_kms_infrastructure_encryption(mc_3) + dec_mc_3 = dec_3.update_vpa(mc_3) - # expected security profile with infrastructure encryption - ground_truth_kube_resource_encryption_profile_3 = self.models.KubernetesResourceObjectEncryptionProfile( - infrastructure_encryption="Enabled" - ) - ground_truth_security_profile_3 = self.models.ManagedClusterSecurityProfile( - kubernetes_resource_object_encryption_profile=ground_truth_kube_resource_encryption_profile_3, - ) ground_truth_mc_3 = self.models.ManagedCluster( location="test_location", - security_profile=ground_truth_security_profile_3, + workload_auto_scaler_profile=self.models.ManagedClusterWorkloadAutoScalerProfile( + vertical_pod_autoscaler=self.models.ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler( + enabled=False, + ) + ), ) self.assertEqual(dec_mc_3, ground_truth_mc_3) - # test with Enabled on cluster with existing security profile - dec_4 = AKSPreviewManagedClusterUpdateDecorator( + def test_update_azure_monitor_profile(self): + dec_1 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - { - "kms_infrastructure_encryption": "Enabled", - }, + {}, CUSTOM_MGMT_AKS_PREVIEW, ) - existing_security_profile = self.models.ManagedClusterSecurityProfile() - mc_4 = self.models.ManagedCluster( + mc_1 = self.models.ManagedCluster( location="test_location", - security_profile=existing_security_profile, - ) - dec_4.context.attach_mc(mc_4) - dec_mc_4 = dec_4.update_kms_infrastructure_encryption(mc_4) - - # should add to existing security profile - ground_truth_kube_resource_encryption_profile_4 = self.models.KubernetesResourceObjectEncryptionProfile( - infrastructure_encryption="Enabled" ) - ground_truth_security_profile_4 = self.models.ManagedClusterSecurityProfile( - kubernetes_resource_object_encryption_profile=ground_truth_kube_resource_encryption_profile_4, - ) - ground_truth_mc_4 = self.models.ManagedCluster( + dec_1.context.attach_mc(mc_1) + dec_mc_1 = dec_1.update_azure_monitor_profile(mc_1) + ground_truth_mc_1 = self.models.ManagedCluster( location="test_location", - security_profile=ground_truth_security_profile_4, ) - self.assertEqual(dec_mc_4, ground_truth_mc_4) + self.assertEqual(dec_mc_1, ground_truth_mc_1) - # test with Enabled on cluster with existing kubernetes_resource_object_encryption_profile - dec_5 = AKSPreviewManagedClusterUpdateDecorator( + def test_update_enable_azure_monitor_logs(self): + # Test enabling Azure Monitor logs when not currently enabled + dec_1 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "kms_infrastructure_encryption": "Enabled", + "enable_azure_monitor_logs": True, + "workspace_resource_id": "/subscriptions/test/resourceGroups/test/providers/Microsoft.OperationalInsights/workspaces/test-workspace", }, CUSTOM_MGMT_AKS_PREVIEW, ) - existing_kube_encryption_profile = self.models.KubernetesResourceObjectEncryptionProfile() - existing_security_profile = self.models.ManagedClusterSecurityProfile( - kubernetes_resource_object_encryption_profile=existing_kube_encryption_profile, - ) - mc_5 = self.models.ManagedCluster( - location="test_location", - security_profile=existing_security_profile, - ) - dec_5.context.attach_mc(mc_5) - dec_mc_5 = dec_5.update_kms_infrastructure_encryption(mc_5) - - # should update existing profile - ground_truth_kube_resource_encryption_profile_5 = self.models.KubernetesResourceObjectEncryptionProfile( - infrastructure_encryption="Enabled" - ) - ground_truth_security_profile_5 = self.models.ManagedClusterSecurityProfile( - kubernetes_resource_object_encryption_profile=ground_truth_kube_resource_encryption_profile_5, - ) - ground_truth_mc_5 = self.models.ManagedCluster( + # Create MC without monitoring addon enabled + mc_1 = self.models.ManagedCluster( location="test_location", - security_profile=ground_truth_security_profile_5, + addon_profiles={}, ) - self.assertEqual(dec_mc_5, ground_truth_mc_5) - - def test_update_workload_auto_scaler_profile(self): - # Throws exception when incorrect mc object is passed. - dec_1 = AKSPreviewManagedClusterUpdateDecorator( - self.cmd, self.client, {}, CUSTOM_MGMT_AKS_PREVIEW + dec_1.context.attach_mc(mc_1) + dec_1.context.set_intermediate("subscription_id", "test-subscription-id") + + # Mock external functions + external_functions = dec_1.context.external_functions + with patch.object(external_functions, 'ensure_container_insights_for_monitoring', return_value=None): + dec_mc_1 = dec_1.update_addon_profiles(mc_1) + + # Verify monitoring addon is enabled + self.assertIn(CONST_MONITORING_ADDON_NAME, dec_mc_1.addon_profiles) + self.assertTrue(dec_mc_1.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled) + self.assertEqual( + dec_mc_1.addon_profiles[CONST_MONITORING_ADDON_NAME].config[CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID], + "/subscriptions/test/resourceGroups/test/providers/Microsoft.OperationalInsights/workspaces/test-workspace" ) - with self.assertRaisesRegex( - CLIInternalError, r"^Unexpected mc object with type ''\.$" - ): - dec_1.update_workload_auto_scaler_profile(None) - # Throws exception when the mc object passed does not match the one in context. + # Test enabling Azure Monitor logs when already enabled (should be idempotent) dec_2 = AKSPreviewManagedClusterUpdateDecorator( - self.cmd, self.client, {}, CUSTOM_MGMT_AKS_PREVIEW - ) - with self.assertRaisesRegex( - CLIInternalError, - r"^Inconsistent state detected\. The incoming `mc` is not the same as the `mc` in the context\.$", - ): - mc_in = self.models.ManagedCluster(location="test_location") - dec_2.update_workload_auto_scaler_profile(mc_in) - - # Leaves profile as None without raw parameters. - dec_3 = AKSPreviewManagedClusterUpdateDecorator( - self.cmd, self.client, {}, CUSTOM_MGMT_AKS_PREVIEW - ) - mc_in = self.models.ManagedCluster(location="test_location") - dec_3.context.attach_mc(mc_in) - mc_out = dec_3.update_workload_auto_scaler_profile(mc_in) - self.assertEqual(mc_out, mc_in) - self.assertIsNone(mc_out.workload_auto_scaler_profile) - - # Leaves existing profile untouched without raw parameters. - dec_4 = AKSPreviewManagedClusterUpdateDecorator( - self.cmd, self.client, {}, CUSTOM_MGMT_AKS_PREVIEW - ) - profile = self.models.ManagedClusterWorkloadAutoScalerProfile( - keda=self.models.ManagedClusterWorkloadAutoScalerProfileKeda(enabled=True) - ) - mc_in = self.models.ManagedCluster( - location="test_location", workload_auto_scaler_profile=profile - ) - dec_4.context.attach_mc(mc_in) - mc_out = dec_4.update_workload_auto_scaler_profile(mc_in) - self.assertEqual(mc_out, mc_in) - self.assertEqual(mc_out.workload_auto_scaler_profile, profile) - self.assertIsNotNone(mc_out.workload_auto_scaler_profile.keda) - self.assertTrue(mc_out.workload_auto_scaler_profile.keda.enabled) - - # Enables keda when enable_keda is True. - dec_5 = AKSPreviewManagedClusterUpdateDecorator( - self.cmd, self.client, {"enable_keda": True}, CUSTOM_MGMT_AKS_PREVIEW - ) - mc_in = self.models.ManagedCluster(location="test_location") - dec_5.context.attach_mc(mc_in) - mc_out = dec_5.update_workload_auto_scaler_profile(mc_in) - self.assertEqual(mc_out, mc_in) - self.assertIsNotNone(mc_out.workload_auto_scaler_profile) - self.assertIsNotNone(mc_out.workload_auto_scaler_profile.keda) - self.assertTrue(mc_out.workload_auto_scaler_profile.keda.enabled) - - # Enables keda in existing profile when enable_keda is True. - dec_6 = AKSPreviewManagedClusterUpdateDecorator( - self.cmd, self.client, {"enable_keda": True}, CUSTOM_MGMT_AKS_PREVIEW - ) - profile = self.models.ManagedClusterWorkloadAutoScalerProfile( - keda=self.models.ManagedClusterWorkloadAutoScalerProfileKeda(enabled=False) - ) - mc_in = self.models.ManagedCluster( - location="test_location", workload_auto_scaler_profile=profile + self.cmd, + self.client, + { + "enable_azure_monitor_logs": True, + }, + CUSTOM_MGMT_AKS_PREVIEW, ) - dec_6.context.attach_mc(mc_in) - mc_out = dec_6.update_workload_auto_scaler_profile(mc_in) - self.assertEqual(mc_out, mc_in) - self.assertEqual(mc_out.workload_auto_scaler_profile, profile) - self.assertIsNotNone(mc_out.workload_auto_scaler_profile.keda) - self.assertTrue(mc_out.workload_auto_scaler_profile.keda.enabled) - - # Disables keda when disable_keda is True. - dec_7 = AKSPreviewManagedClusterUpdateDecorator( - self.cmd, self.client, {"disable_keda": True}, CUSTOM_MGMT_AKS_PREVIEW + # Create MC with monitoring addon already enabled + mc_2 = self.models.ManagedCluster( + location="test_location", + addon_profiles={ + CONST_MONITORING_ADDON_NAME: self.models.ManagedClusterAddonProfile( + enabled=True, + ) + }, ) - mc_in = self.models.ManagedCluster(location="test_location") - dec_7.context.attach_mc(mc_in) - mc_out = dec_7.update_workload_auto_scaler_profile(mc_in) - self.assertEqual(mc_out, mc_in) - self.assertIsNotNone(mc_out.workload_auto_scaler_profile) - self.assertIsNotNone(mc_out.workload_auto_scaler_profile.keda) - self.assertFalse(mc_out.workload_auto_scaler_profile.keda.enabled) + dec_2.context.attach_mc(mc_2) + + # Should succeed when trying to enable already enabled monitoring (idempotent) + result = dec_2.context.get_enable_azure_monitor_logs() + self.assertTrue(result) - # Disables keda in existing profile when disable_keda is True. - dec_8 = AKSPreviewManagedClusterUpdateDecorator( - self.cmd, self.client, {"disable_keda": True}, CUSTOM_MGMT_AKS_PREVIEW - ) - profile = self.models.ManagedClusterWorkloadAutoScalerProfile( - keda=self.models.ManagedClusterWorkloadAutoScalerProfileKeda(enabled=True) + # Test enabling with OpenTelemetry logs integration + dec_3 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "enable_azure_monitor_logs": True, + "workspace_resource_id": "/subscriptions/test/resourceGroups/test/providers/Microsoft.OperationalInsights/workspaces/test-workspace", + "enable_opentelemetry_logs": True, + "opentelemetry_logs_port": 8080, + }, + CUSTOM_MGMT_AKS_PREVIEW, ) - mc_in = self.models.ManagedCluster( - location="test_location", workload_auto_scaler_profile=profile + mc_3 = self.models.ManagedCluster( + location="test_location", + addon_profiles={}, ) - dec_8.context.attach_mc(mc_in) - mc_out = dec_8.update_workload_auto_scaler_profile(mc_in) - self.assertEqual(mc_out, mc_in) - self.assertEqual(mc_out.workload_auto_scaler_profile, profile) - self.assertIsNotNone(mc_out.workload_auto_scaler_profile.keda) - self.assertFalse(mc_out.workload_auto_scaler_profile.keda.enabled) + dec_3.context.attach_mc(mc_3) + dec_3.context.set_intermediate("subscription_id", "test-subscription-id") + + # First update addon profiles + external_functions = dec_3.context.external_functions + with patch.object(external_functions, 'ensure_container_insights_for_monitoring', return_value=None): + dec_mc_3 = dec_3.update_addon_profiles(mc_3) + + # Then update Azure Monitor profile with OpenTelemetry + with patch('azext_aks_preview.managed_cluster_decorator.ensure_azure_monitor_profile_prerequisites'), \ + patch.object(dec_3.context, 'get_subscription_id', return_value='test-subscription-id'), \ + patch.object(dec_3.context, 'get_resource_group_name', return_value='test-rg'), \ + patch.object(dec_3.context, 'get_name', return_value='test-cluster'): + dec_mc_3 = dec_3.update_azure_monitor_profile(dec_mc_3) + + # Verify monitoring addon is enabled + self.assertIn(CONST_MONITORING_ADDON_NAME, dec_mc_3.addon_profiles) + self.assertTrue(dec_mc_3.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled) + + # Verify OpenTelemetry logs are configured + if dec_mc_3.azure_monitor_profile and dec_mc_3.azure_monitor_profile.app_monitoring: + self.assertIsNotNone(dec_mc_3.azure_monitor_profile.app_monitoring.open_telemetry_logs) + self.assertTrue(dec_mc_3.azure_monitor_profile.app_monitoring.open_telemetry_logs.enabled) + self.assertEqual(dec_mc_3.azure_monitor_profile.app_monitoring.open_telemetry_logs.port, 8080) - # Throws exception when both enable_keda and disable_keda are True. - dec_9 = AKSPreviewManagedClusterUpdateDecorator( + # Test with MSI auth enabled + dec_4 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - {"enable_keda": True, "disable_keda": True}, + { + "enable_azure_monitor_logs": True, + "workspace_resource_id": "/subscriptions/test/resourceGroups/test/providers/Microsoft.OperationalInsights/workspaces/test-workspace", + "enable_msi_auth_for_monitoring": True, + }, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_in = self.models.ManagedCluster(location="test_location") - dec_9.context.attach_mc(mc_in) - with self.assertRaises(MutuallyExclusiveArgumentError): - mc_out = dec_9.update_workload_auto_scaler_profile(mc_in) + mc_4 = self.models.ManagedCluster( + location="test_location", + addon_profiles={}, + ) + dec_4.context.attach_mc(mc_4) + dec_4.context.set_intermediate("subscription_id", "test-subscription-id") + + external_functions = dec_4.context.external_functions + with patch.object(external_functions, 'ensure_container_insights_for_monitoring', return_value=None): + dec_mc_4 = dec_4.update_addon_profiles(mc_4) + + # Verify MSI auth is enabled + self.assertIn(CONST_MONITORING_ADDON_NAME, dec_mc_4.addon_profiles) + self.assertTrue(dec_mc_4.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled) + self.assertEqual( + dec_mc_4.addon_profiles[CONST_MONITORING_ADDON_NAME].config[CONST_MONITORING_USING_AAD_MSI_AUTH], + "true" + ) - def test_update_defender(self): - # enable + def test_update_disable_azure_monitor_logs(self): + # Test disabling Azure Monitor logs when currently enabled dec_1 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "enable_defender": True, - "defender_config": get_test_data_file_path("defenderconfig.json"), + "disable_azure_monitor_logs": True, }, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_1 = self.models.ManagedCluster(location="test_location") - dec_1.context.attach_mc(mc_1) - dec_1.context.set_intermediate("subscription_id", "test_subscription_id") - - dec_mc_1 = dec_1.update_defender(mc_1) - - ground_truth_mc_1 = self.models.ManagedCluster( + # Create MC with monitoring addon enabled + mc_1 = self.models.ManagedCluster( location="test_location", - security_profile=self.models.ManagedClusterSecurityProfile( - defender=self.models.ManagedClusterSecurityProfileDefender( - log_analytics_workspace_resource_id="test_workspace_resource_id", - security_monitoring=self.models.ManagedClusterSecurityProfileDefenderSecurityMonitoring( - enabled=True - ), + addon_profiles={ + CONST_MONITORING_ADDON_NAME: self.models.ManagedClusterAddonProfile( + enabled=True, + config={ + CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: "/subscriptions/test/resourceGroups/test/providers/Microsoft.OperationalInsights/workspaces/test-workspace" + } ) - ), + }, ) - self.assertEqual(dec_mc_1, ground_truth_mc_1) + dec_1.context.attach_mc(mc_1) + + external_functions = dec_1.context.external_functions + with patch.object(external_functions, 'ensure_container_insights_for_monitoring', return_value=None), \ + patch("azext_aks_preview.managed_cluster_decorator.prompt_y_n", return_value=True): + dec_mc_1 = dec_1.update_addon_profiles(mc_1) + + # Verify monitoring addon is disabled + self.assertIn(CONST_MONITORING_ADDON_NAME, dec_mc_1.addon_profiles) + self.assertFalse(dec_mc_1.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled) - # disable + # Test disabling Azure Monitor logs when not enabled (should be idempotent) dec_2 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - {"disable_defender": True}, + { + "disable_azure_monitor_logs": True, + }, CUSTOM_MGMT_AKS_PREVIEW, ) + # Create MC without monitoring addon enabled mc_2 = self.models.ManagedCluster( location="test_location", - security_profile=self.models.ManagedClusterSecurityProfile( - defender=self.models.ManagedClusterSecurityProfileDefender( - log_analytics_workspace_resource_id="test_workspace_resource_id", - security_monitoring=self.models.ManagedClusterSecurityProfileDefenderSecurityMonitoring( - enabled=True - ), - ) - ), + addon_profiles={}, ) dec_2.context.attach_mc(mc_2) - dec_2.context.set_intermediate("subscription_id", "test_subscription_id") - - dec_mc_2 = dec_2.update_defender(mc_2) + + # Should succeed even when monitoring is not enabled (idempotent operation) + result = dec_2.context.get_disable_azure_monitor_logs() + self.assertTrue(result) - ground_truth_mc_2 = self.models.ManagedCluster( + # Test disabling with existing Azure Monitor profile cleanup + dec_3 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "disable_azure_monitor_logs": True, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + # Create MC with monitoring addon and Azure Monitor profile enabled + mc_3 = self.models.ManagedCluster( location="test_location", - security_profile=self.models.ManagedClusterSecurityProfile( - defender=self.models.ManagedClusterSecurityProfileDefender( - security_monitoring=self.models.ManagedClusterSecurityProfileDefenderSecurityMonitoring( - enabled=False - ), + addon_profiles={ + CONST_MONITORING_ADDON_NAME: self.models.ManagedClusterAddonProfile( + enabled=True, + ) + }, + azure_monitor_profile=self.models.ManagedClusterAzureMonitorProfile( + app_monitoring=self.models.ManagedClusterAzureMonitorProfileAppMonitoring( + open_telemetry_logs=self.models.ManagedClusterAzureMonitorProfileAppMonitoringOpenTelemetryLogs( + enabled=True, + port=8080, + ) ) ), ) - self.assertEqual(dec_mc_2, ground_truth_mc_2) + dec_3.context.attach_mc(mc_3) + + # First update addon profiles to disable monitoring + external_functions = dec_3.context.external_functions + with patch.object(external_functions, 'ensure_container_insights_for_monitoring', return_value=None), \ + patch("azext_aks_preview.managed_cluster_decorator.prompt_y_n", return_value=True): + dec_mc_3 = dec_3.update_addon_profiles(mc_3) + + # Then update Azure Monitor profile + with patch('azext_aks_preview.managed_cluster_decorator.ensure_azure_monitor_profile_prerequisites'), \ + patch.object(dec_3.context, 'get_subscription_id', return_value='test-subscription-id'), \ + patch.object(dec_3.context, 'get_resource_group_name', return_value='test-rg'), \ + patch.object(dec_3.context, 'get_name', return_value='test-cluster'): + dec_mc_3 = dec_3.update_azure_monitor_profile(dec_mc_3) + + # Verify monitoring addon is disabled + self.assertFalse(dec_mc_3.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled) + + # Verify OpenTelemetry logs are also disabled in Azure Monitor profile + if dec_mc_3.azure_monitor_profile and dec_mc_3.azure_monitor_profile.app_monitoring: + self.assertIsNotNone(dec_mc_3.azure_monitor_profile.app_monitoring.open_telemetry_logs) + self.assertFalse(dec_mc_3.azure_monitor_profile.app_monitoring.open_telemetry_logs.enabled) - def test_update_custom_ca_certificates(self): - # set to non-empty + def test_update_enable_azure_monitor_metrics(self): + # Test enabling Azure Monitor metrics when not currently enabled dec_1 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "custom_ca_trust_certificates": get_test_data_file_path("certs.txt"), + "enable_azure_monitor_metrics": True, }, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_1 = self.models.ManagedCluster(location="test_location") - dec_1.context.attach_mc(mc_1) - dec_1.context.set_intermediate("subscription_id", "test_subscription_id") - - dec_mc_1 = dec_1.update_custom_ca_trust_certificates(mc_1) - - ground_truth_mc_1 = self.models.ManagedCluster( + mc_1 = self.models.ManagedCluster( location="test_location", - security_profile=self.models.ManagedClusterSecurityProfile( - custom_ca_trust_certificates=[ - str.encode(CONST_CUSTOM_CA_TEST_CERT) for _ in range(2) - ] - ), + identity=self.models.ManagedClusterIdentity(type="SystemAssigned"), ) - self.assertEqual(dec_mc_1, ground_truth_mc_1) + dec_1.context.attach_mc(mc_1) + dec_1.context.set_intermediate("subscription_id", "test-subscription-id") + + with patch('azext_aks_preview.managed_cluster_decorator.ensure_azure_monitor_profile_prerequisites'), \ + patch.object(dec_1.context, 'get_subscription_id', return_value='test-subscription-id'), \ + patch.object(dec_1.context, 'get_resource_group_name', return_value='test-rg'), \ + patch.object(dec_1.context, 'get_name', return_value='test-cluster'): + dec_mc_1 = dec_1.update_azure_monitor_profile(mc_1) + + # Verify Azure Monitor metrics are enabled + self.assertIsNotNone(dec_mc_1.azure_monitor_profile) + self.assertIsNotNone(dec_mc_1.azure_monitor_profile.metrics) + self.assertTrue(dec_mc_1.azure_monitor_profile.metrics.enabled) - # set to empty + # Test enabling when already enabled (should be idempotent) dec_2 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - {"custom_ca_trust_certificates": None}, + { + "enable_azure_monitor_metrics": True, + }, CUSTOM_MGMT_AKS_PREVIEW, ) mc_2 = self.models.ManagedCluster( location="test_location", - security_profile=self.models.ManagedClusterSecurityProfile( - custom_ca_trust_certificates=None + identity=self.models.ManagedClusterIdentity(type="SystemAssigned"), + azure_monitor_profile=self.models.ManagedClusterAzureMonitorProfile( + metrics=self.models.ManagedClusterAzureMonitorProfileMetrics( + enabled=True, + ) ), ) dec_2.context.attach_mc(mc_2) - dec_2.context.set_intermediate("subscription_id", "test_subscription_id") - - dec_mc_2 = dec_2.update_custom_ca_trust_certificates(mc_2) + dec_2.context.set_intermediate("subscription_id", "test-subscription-id") + + with patch('azext_aks_preview.managed_cluster_decorator.ensure_azure_monitor_profile_prerequisites'), \ + patch.object(dec_2.context, 'get_subscription_id', return_value='test-subscription-id'), \ + patch.object(dec_2.context, 'get_resource_group_name', return_value='test-rg'), \ + patch.object(dec_2.context, 'get_name', return_value='test-cluster'): + dec_mc_2 = dec_2.update_azure_monitor_profile(mc_2) + + # Should remain enabled + self.assertTrue(dec_mc_2.azure_monitor_profile.metrics.enabled) - ground_truth_mc_2 = self.models.ManagedCluster( + # Test enabling with Windows recording rules + dec_3 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "enable_azure_monitor_metrics": True, + "enable_windows_recording_rules": True, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + mc_3 = self.models.ManagedCluster( location="test_location", - security_profile=self.models.ManagedClusterSecurityProfile( - custom_ca_trust_certificates=None - ), + identity=self.models.ManagedClusterIdentity(type="SystemAssigned"), ) - self.assertEqual(dec_mc_2, ground_truth_mc_2) - - def test_update_run_command(self): + dec_3.context.attach_mc(mc_3) + dec_3.context.set_intermediate("subscription_id", "test-subscription-id") + + with patch('azext_aks_preview.managed_cluster_decorator.ensure_azure_monitor_profile_prerequisites'), \ + patch.object(dec_3.context, 'get_subscription_id', return_value='test-subscription-id'), \ + patch.object(dec_3.context, 'get_resource_group_name', return_value='test-rg'), \ + patch.object(dec_3.context, 'get_name', return_value='test-cluster'): + dec_mc_3 = dec_3.update_azure_monitor_profile(mc_3) + + # Verify Azure Monitor metrics and Windows recording rules are enabled + self.assertIsNotNone(dec_mc_3.azure_monitor_profile) + self.assertIsNotNone(dec_mc_3.azure_monitor_profile.metrics) + self.assertTrue(dec_mc_3.azure_monitor_profile.metrics.enabled) + self.assertIsNotNone(dec_mc_3.azure_monitor_profile.metrics.kube_state_metrics) + self.assertIsNotNone(dec_mc_3.azure_monitor_profile.metrics.kube_state_metrics.metric_annotations_allow_list) + self.assertIsNotNone(dec_mc_3.azure_monitor_profile.metrics.kube_state_metrics.metric_labels_allowlist) + + def test_update_disable_azure_monitor_metrics(self): + # Test disabling Azure Monitor metrics when currently enabled dec_1 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "disable_run_command": True, + "disable_azure_monitor_metrics": True, }, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_1 = self.models.ManagedCluster(location="test_location") - dec_1.context.attach_mc(mc_1) - dec_mc_1 = dec_1.update_run_command(mc_1) - - ground_truth_mc_1 = self.models.ManagedCluster( + # Create MC with Azure Monitor metrics enabled + mc_1 = self.models.ManagedCluster( location="test_location", - api_server_access_profile=self.models.ManagedClusterAPIServerAccessProfile( - disable_run_command=True - ) + azure_monitor_profile=self.models.ManagedClusterAzureMonitorProfile( + metrics=self.models.ManagedClusterAzureMonitorProfileMetrics( + enabled=True, + ) + ), ) - self.assertEqual(dec_mc_1, ground_truth_mc_1) + dec_1.context.attach_mc(mc_1) + + with patch('azext_aks_preview.managed_cluster_decorator.ensure_azure_monitor_profile_prerequisites'), \ + patch.object(dec_1.context, 'get_subscription_id', return_value='test-subscription-id'), \ + patch.object(dec_1.context, 'get_resource_group_name', return_value='test-rg'), \ + patch.object(dec_1.context, 'get_name', return_value='test-cluster'), \ + patch("azext_aks_preview.managed_cluster_decorator.prompt_y_n", return_value=True): + dec_mc_1 = dec_1.update_azure_monitor_profile(mc_1) + + # Verify Azure Monitor metrics are disabled + self.assertIsNotNone(dec_mc_1.azure_monitor_profile) + self.assertIsNotNone(dec_mc_1.azure_monitor_profile.metrics) + self.assertFalse(dec_mc_1.azure_monitor_profile.metrics.enabled) + # Test disabling when not enabled (should be idempotent) dec_2 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "enable_run_command": True, + "disable_azure_monitor_metrics": True, }, CUSTOM_MGMT_AKS_PREVIEW, ) + # Create MC without Azure Monitor metrics mc_2 = self.models.ManagedCluster( location="test_location", - api_server_access_profile=self.models.ManagedClusterAPIServerAccessProfile( - disable_run_command=True - ) ) dec_2.context.attach_mc(mc_2) - dec_mc_2 = dec_2.update_run_command(mc_2) - ground_truth_mc_2 = self.models.ManagedCluster( - location="test_location", - api_server_access_profile=self.models.ManagedClusterAPIServerAccessProfile( - disable_run_command=False - ) - ) - self.assertEqual(dec_mc_2, ground_truth_mc_2) + + # Should succeed even when metrics are not enabled (idempotent operation) + result = dec_2.context.get_disable_azure_monitor_metrics() + self.assertTrue(result) + # Test disabling with OpenTelemetry metrics integration dec_3 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "enable_run_command": False, - "disable_run_command": False, + "disable_azure_monitor_metrics": True, }, CUSTOM_MGMT_AKS_PREVIEW, ) + # Create MC with both Azure Monitor metrics and OpenTelemetry metrics enabled mc_3 = self.models.ManagedCluster( location="test_location", - api_server_access_profile=self.models.ManagedClusterAPIServerAccessProfile( - disable_run_command=True - ) + azure_monitor_profile=self.models.ManagedClusterAzureMonitorProfile( + metrics=self.models.ManagedClusterAzureMonitorProfileMetrics( + enabled=True, + ), + app_monitoring=self.models.ManagedClusterAzureMonitorProfileAppMonitoring( + open_telemetry_metrics=self.models.ManagedClusterAzureMonitorProfileAppMonitoringOpenTelemetryMetrics( + enabled=True, + port=8080, + ) + ) + ), ) dec_3.context.attach_mc(mc_3) - dec_mc_3 = dec_3.update_run_command(mc_3) - ground_truth_mc_3 = self.models.ManagedCluster( - location="test_location", - api_server_access_profile=self.models.ManagedClusterAPIServerAccessProfile( - disable_run_command=True - ) - ) - self.assertEqual(dec_mc_3, ground_truth_mc_3) - - def test_update_vpa(self): + + with patch('azext_aks_preview.managed_cluster_decorator.ensure_azure_monitor_profile_prerequisites'), \ + patch.object(dec_3.context, 'get_subscription_id', return_value='test-subscription-id'), \ + patch.object(dec_3.context, 'get_resource_group_name', return_value='test-rg'), \ + patch.object(dec_3.context, 'get_name', return_value='test-cluster'), \ + patch("azext_aks_preview.managed_cluster_decorator.prompt_y_n", return_value=True): + dec_mc_3 = dec_3.update_azure_monitor_profile(mc_3) + + # Verify Azure Monitor metrics are disabled but OpenTelemetry metrics configuration is preserved + self.assertIsNotNone(dec_mc_3.azure_monitor_profile) + self.assertIsNotNone(dec_mc_3.azure_monitor_profile.metrics) + self.assertFalse(dec_mc_3.azure_monitor_profile.metrics.enabled) + # OpenTelemetry metrics should still be configured but may be disabled depending on implementation + self.assertIsNotNone(dec_mc_3.azure_monitor_profile.app_monitoring.open_telemetry_metrics) + + def test_setup_azure_monitor_logs_with_omsagent_camelcase(self): + # Test that _setup_azure_monitor_logs handles existing omsAgent (camelCase) correctly + # This simulates what Azure API returns dec_1 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - {}, + { + "enable_azure_monitor_logs": True, + "workspace_resource_id": "/subscriptions/test/resourceGroups/test/providers/Microsoft.OperationalInsights/workspaces/test-workspace", + }, CUSTOM_MGMT_AKS_PREVIEW, ) + + # Create MC with omsAgent (camelCase) - as Azure API returns it mc_1 = self.models.ManagedCluster( location="test_location", + addon_profiles={ + "omsAgent": self.models.ManagedClusterAddonProfile( + enabled=True, + config={ + "logAnalyticsWorkspaceResourceID": "/old/workspace", + "useAADAuth": "true" + } + ) + } ) dec_1.context.attach_mc(mc_1) - dec_mc_1 = dec_1.update_vpa(mc_1) - ground_truth_mc_1 = self.models.ManagedCluster( - location="test_location", + dec_1.context.set_intermediate("subscription_id", "test-subscription-id") + + # Call _setup_azure_monitor_logs + dec_1._setup_azure_monitor_logs(mc_1) + + # Verify: Should update existing omsAgent key (not create omsagent lowercase) + self.assertIn("omsAgent", mc_1.addon_profiles) + self.assertNotIn("omsagent", mc_1.addon_profiles) # Should NOT create duplicate + self.assertTrue(mc_1.addon_profiles["omsAgent"].enabled) + self.assertEqual( + mc_1.addon_profiles["omsAgent"].config["logAnalyticsWorkspaceResourceID"], + "/subscriptions/test/resourceGroups/test/providers/Microsoft.OperationalInsights/workspaces/test-workspace" ) - self.assertEqual(dec_mc_1, ground_truth_mc_1) - dec_2 = AKSPreviewManagedClusterUpdateDecorator( + def test_setup_azure_monitor_logs_with_omsagent_lowercase(self): + # Test that _setup_azure_monitor_logs handles existing omsagent (lowercase) correctly + dec_1 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - {"enable_vpa": True}, + { + "enable_azure_monitor_logs": True, + "workspace_resource_id": "/subscriptions/test/resourceGroups/test/providers/Microsoft.OperationalInsights/workspaces/test-workspace", + }, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_2 = self.models.ManagedCluster(location="test_location") - dec_2.context.attach_mc(mc_2) - dec_mc_2 = dec_2.update_vpa(mc_2) - ground_truth_mc_2 = self.models.ManagedCluster( + # Create MC with omsagent (lowercase) - less common but should still work + mc_1 = self.models.ManagedCluster( location="test_location", - workload_auto_scaler_profile=self.models.ManagedClusterWorkloadAutoScalerProfile( - vertical_pod_autoscaler=self.models.ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler( + addon_profiles={ + "omsagent": self.models.ManagedClusterAddonProfile( enabled=True, + config={ + "logAnalyticsWorkspaceResourceID": "/old/workspace", + "useAADAuth": "true" + } ) - ), + } ) - self.assertEqual(dec_mc_2, ground_truth_mc_2) + dec_1.context.attach_mc(mc_1) + dec_1.context.set_intermediate("subscription_id", "test-subscription-id") + + # Call _setup_azure_monitor_logs + dec_1._setup_azure_monitor_logs(mc_1) + + # Verify: Should update existing omsagent key + self.assertIn("omsagent", mc_1.addon_profiles) + self.assertNotIn("omsAgent", mc_1.addon_profiles) # Should NOT create CamelCase variant + self.assertTrue(mc_1.addon_profiles["omsagent"].enabled) - dec_3 = AKSPreviewManagedClusterUpdateDecorator( + def test_disable_azure_monitor_logs_with_omsagent_camelcase(self): + # Test that _disable_azure_monitor_logs handles omsAgent (camelCase) correctly + dec_1 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, { - "disable_vpa": True, + "disable_azure_monitor_logs": True, + "yes": True, # Skip confirmation prompt }, CUSTOM_MGMT_AKS_PREVIEW, ) - mc_3 = self.models.ManagedCluster(location="test_location") - dec_3.context.attach_mc(mc_3) - dec_mc_3 = dec_3.update_vpa(mc_3) - ground_truth_mc_3 = self.models.ManagedCluster( + # Create MC with omsAgent (camelCase) enabled + mc_1 = self.models.ManagedCluster( location="test_location", - workload_auto_scaler_profile=self.models.ManagedClusterWorkloadAutoScalerProfile( - vertical_pod_autoscaler=self.models.ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler( - enabled=False, + addon_profiles={ + "omsAgent": self.models.ManagedClusterAddonProfile( + enabled=True, + config={ + "logAnalyticsWorkspaceResourceID": "/subscriptions/test/workspace", + "useAADAuth": "false" # Non-MSI auth to skip DCR cleanup + } ) - ), + } ) - self.assertEqual(dec_mc_3, ground_truth_mc_3) + dec_1.context.attach_mc(mc_1) + dec_1.context.set_intermediate("subscription_id", "test-subscription-id") + + # Call _disable_azure_monitor_logs + dec_1._disable_azure_monitor_logs(mc_1) + + # Verify: omsAgent should be disabled + self.assertIn("omsAgent", mc_1.addon_profiles) + self.assertFalse(mc_1.addon_profiles["omsAgent"].enabled) + self.assertIsNone(mc_1.addon_profiles["omsAgent"].config) - def test_update_azure_monitor_profile(self): + def test_disable_azure_monitor_logs_with_omsagent_lowercase(self): + # Test that _disable_azure_monitor_logs handles omsagent (lowercase) correctly dec_1 = AKSPreviewManagedClusterUpdateDecorator( self.cmd, self.client, - {}, + { + "disable_azure_monitor_logs": True, + "yes": True, # Skip confirmation prompt + }, CUSTOM_MGMT_AKS_PREVIEW, ) + + # Create MC with omsagent (lowercase) mc_1 = self.models.ManagedCluster( location="test_location", + addon_profiles={ + "omsagent": self.models.ManagedClusterAddonProfile( + enabled=True, + config={ + "logAnalyticsWorkspaceResourceID": "/subscriptions/test/workspace", + "useAADAuth": "false" + } + ) + } ) dec_1.context.attach_mc(mc_1) - dec_mc_1 = dec_1.update_azure_monitor_profile(mc_1) - ground_truth_mc_1 = self.models.ManagedCluster( + dec_1.context.set_intermediate("subscription_id", "test-subscription-id") + + # Call _disable_azure_monitor_logs + dec_1._disable_azure_monitor_logs(mc_1) + + # Verify: omsagent should be disabled + self.assertIn("omsagent", mc_1.addon_profiles) + self.assertFalse(mc_1.addon_profiles["omsagent"].enabled) + self.assertIsNone(mc_1.addon_profiles["omsagent"].config) + + def test_get_enable_opentelemetry_logs_validation_with_omsagent_camelcase(self): + # Test that OpenTelemetry logs validation recognizes omsAgent (camelCase) as enabled + ctx_1 = AKSPreviewManagedClusterContext( + self.cmd, + AKSManagedClusterParamDict( + { + "enable_opentelemetry_logs": True, + } + ), + self.models, + decorator_mode=DecoratorMode.UPDATE, + ) + + # Create MC with omsAgent (camelCase) already enabled + mc = self.models.ManagedCluster( location="test_location", + addon_profiles={ + "omsAgent": self.models.ManagedClusterAddonProfile( + enabled=True, + config={"logAnalyticsWorkspaceResourceID": "/subscriptions/test/workspace"} + ) + } ) - self.assertEqual(dec_mc_1, ground_truth_mc_1) + ctx_1.attach_mc(mc) + + # Should succeed - validation should find omsAgent enabled + result = ctx_1.get_enable_opentelemetry_logs() + self.assertTrue(result) + + def test_get_enable_opentelemetry_logs_validation_with_container_insights(self): + # Test that OpenTelemetry logs validation recognizes containerInsights in azureMonitorProfile + ctx_1 = AKSPreviewManagedClusterContext( + self.cmd, + AKSManagedClusterParamDict( + { + "enable_opentelemetry_logs": True, + } + ), + self.models, + decorator_mode=DecoratorMode.UPDATE, + ) + + # Create MC with containerInsights enabled (new API) + mc = self.models.ManagedCluster( + location="test_location", + azure_monitor_profile=self.models.ManagedClusterAzureMonitorProfile( + container_insights=self.models.ManagedClusterAzureMonitorProfileContainerInsights( + enabled=True, + log_analytics_workspace_resource_id="/subscriptions/test/workspace" + ) + ) + ) + ctx_1.attach_mc(mc) + + # Should succeed - validation should find containerInsights enabled + result = ctx_1.get_enable_opentelemetry_logs() + self.assertTrue(result) def test_update_linux_profile(self): dec_1 = AKSPreviewManagedClusterUpdateDecorator( @@ -9776,10 +10991,8 @@ def test_enable_retina_network_flow_logs(self): ) dec_4.context.set_intermediate("subscription_id", "test_subscription_id") dec_4.context.attach_mc(mc_4) - with patch( - "azure.cli.command_modules.acs.managed_cluster_decorator.ensure_container_insights_for_monitoring", - return_value=None, - ): + external_functions = dec_4.context.external_functions + with patch.object(external_functions, 'ensure_container_insights_for_monitoring', return_value=None): dec_mc_4 = dec_4.set_up_addon_profiles(mc_4) ground_truth_mc_4 = { CONST_MONITORING_ADDON_NAME: self.models.ManagedClusterAddonProfile( @@ -9822,10 +11035,8 @@ def test_enable_retina_network_flow_logs(self): dec_5.context.set_intermediate("subscription_id", "test_subscription_id") dec_5.context.attach_mc(mc_5) with self.assertRaises(InvalidArgumentValueError): - with patch( - "azure.cli.command_modules.acs.managed_cluster_decorator.ensure_container_insights_for_monitoring", - return_value=None, - ): + external_functions = dec_5.context.external_functions + with patch.object(external_functions, 'ensure_container_insights_for_monitoring', return_value=None): dec_5.set_up_addon_profiles(mc_5) # Case 6: enable_monitoring addon with retina_network_flow_logs, but acns is not enabled @@ -9857,10 +11068,8 @@ def test_enable_retina_network_flow_logs(self): dec_6.context.set_intermediate("subscription_id", "test_subscription_id") dec_6.context.attach_mc(mc_6) with self.assertRaises(InvalidArgumentValueError): - with patch( - "azure.cli.command_modules.acs.managed_cluster_decorator.ensure_container_insights_for_monitoring", - return_value=None, - ): + external_functions = dec_6.context.external_functions + with patch.object(external_functions, 'ensure_container_insights_for_monitoring', return_value=None): dec_6.set_up_addon_profiles(mc_6) def test_update_node_provisioning_profile(self): @@ -10349,6 +11558,340 @@ def test_update_upstream_kubescheduler_user_configuration(self): ) self.assertEqual(dec_mc_5, ground_truth_mc_5) + def test_update_azure_monitor_profile_with_opentelemetry_metrics(self): + # Test enabling OpenTelemetry metrics on update + dec_1 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "enable_opentelemetry_metrics": True, + "opentelemetry_metrics_port": 8080, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + + # Mock existing cluster with Azure Monitor metrics enabled + mc_1 = self.models.ManagedCluster( + location="test_location", + azure_monitor_profile=self.models.ManagedClusterAzureMonitorProfile( + metrics=self.models.ManagedClusterAzureMonitorProfileMetrics( + enabled=True + ) + ) + ) + dec_1.context.attach_mc(mc_1) + + # Mock authentication-related functions + with patch('azext_aks_preview.managed_cluster_decorator.ensure_azure_monitor_profile_prerequisites'), \ + patch.object(dec_1.context, 'get_subscription_id', return_value='test-subscription'), \ + patch.object(dec_1.context, 'get_resource_group_name', return_value='test-rg'), \ + patch.object(dec_1.context, 'get_name', return_value='test-cluster'), \ + patch.object(dec_1.context, 'get_location', return_value='test-location'): + dec_mc_1 = dec_1.update_azure_monitor_profile(mc_1) + + # Verify OpenTelemetry metrics configuration is updated + self.assertIsNotNone(dec_mc_1.azure_monitor_profile.app_monitoring) + self.assertIsNotNone(dec_mc_1.azure_monitor_profile.app_monitoring.open_telemetry_metrics) + self.assertTrue(dec_mc_1.azure_monitor_profile.app_monitoring.open_telemetry_metrics.enabled) + self.assertEqual(dec_mc_1.azure_monitor_profile.app_monitoring.open_telemetry_metrics.port, 8080) + + # Test disabling OpenTelemetry metrics on update + dec_2 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "disable_opentelemetry_metrics": True, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + + # Mock existing cluster with OpenTelemetry metrics enabled + mc_2 = self.models.ManagedCluster( + location="test_location", + azure_monitor_profile=self.models.ManagedClusterAzureMonitorProfile( + metrics=self.models.ManagedClusterAzureMonitorProfileMetrics( + enabled=True + ), + app_monitoring=self.models.ManagedClusterAzureMonitorProfileAppMonitoring( + open_telemetry_metrics=self.models.ManagedClusterAzureMonitorProfileAppMonitoringOpenTelemetryMetrics( + enabled=True, + port=8080 + ) + ) + ) + ) + dec_2.context.attach_mc(mc_2) + + # Mock authentication-related functions for second test + with patch('azext_aks_preview.managed_cluster_decorator.ensure_azure_monitor_profile_prerequisites'), \ + patch.object(dec_2.context, 'get_subscription_id', return_value='test-subscription'), \ + patch.object(dec_2.context, 'get_resource_group_name', return_value='test-rg'), \ + patch.object(dec_2.context, 'get_name', return_value='test-cluster'), \ + patch.object(dec_2.context, 'get_location', return_value='test-location'): + dec_mc_2 = dec_2.update_azure_monitor_profile(mc_2) + + # Verify OpenTelemetry metrics is disabled + self.assertIsNotNone(dec_mc_2.azure_monitor_profile.app_monitoring.open_telemetry_metrics) + self.assertFalse(dec_mc_2.azure_monitor_profile.app_monitoring.open_telemetry_metrics.enabled) + self.assertIsNone(dec_mc_2.azure_monitor_profile.app_monitoring.open_telemetry_metrics.port) + + # Test standalone port update for OpenTelemetry metrics (without enable/disable flags) + dec_3 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "opentelemetry_metrics_port": 9090, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + + # Mock existing cluster with OpenTelemetry metrics already enabled + mc_3 = self.models.ManagedCluster( + location="test_location", + azure_monitor_profile=self.models.ManagedClusterAzureMonitorProfile( + metrics=self.models.ManagedClusterAzureMonitorProfileMetrics( + enabled=True + ), + app_monitoring=self.models.ManagedClusterAzureMonitorProfileAppMonitoring( + open_telemetry_metrics=self.models.ManagedClusterAzureMonitorProfileAppMonitoringOpenTelemetryMetrics( + enabled=True, + port=8080 # Original port + ) + ) + ) + ) + dec_3.context.attach_mc(mc_3) + + # Mock authentication-related functions for third test + with patch('azext_aks_preview.managed_cluster_decorator.ensure_azure_monitor_profile_prerequisites'), \ + patch.object(dec_3.context, 'get_subscription_id', return_value='test-subscription'), \ + patch.object(dec_3.context, 'get_resource_group_name', return_value='test-rg'), \ + patch.object(dec_3.context, 'get_name', return_value='test-cluster'), \ + patch.object(dec_3.context, 'get_location', return_value='test-location'): + dec_mc_3 = dec_3.update_azure_monitor_profile(mc_3) + + # Verify OpenTelemetry metrics port is updated while remaining enabled + self.assertIsNotNone(dec_mc_3.azure_monitor_profile.app_monitoring.open_telemetry_metrics) + self.assertTrue(dec_mc_3.azure_monitor_profile.app_monitoring.open_telemetry_metrics.enabled) + self.assertEqual(dec_mc_3.azure_monitor_profile.app_monitoring.open_telemetry_metrics.port, 9090) + + def test_update_azure_monitor_profile_with_opentelemetry_logs(self): + # Test enabling OpenTelemetry logs on update + dec_1 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "enable_opentelemetry_logs": True, + "opentelemetry_logs_port": 8081, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + + # Mock existing cluster with Azure Monitor logs enabled (monitoring addon enabled) + addon_profiles = { + "omsagent": self.models.ManagedClusterAddonProfile( + enabled=True, + config={ + "logAnalyticsWorkspaceResourceID": "/subscriptions/test/resourceGroups/test/providers/Microsoft.OperationalInsights/workspaces/test-workspace" + } + ) + } + mc_1 = self.models.ManagedCluster( + location="test_location", + addon_profiles=addon_profiles, + azure_monitor_profile=self.models.ManagedClusterAzureMonitorProfile( + container_insights=self.models.ManagedClusterAzureMonitorProfileContainerInsights( + enabled=True + ) + ) + ) + dec_1.context.attach_mc(mc_1) + + # Mock authentication-related functions + with patch('azext_aks_preview.managed_cluster_decorator.ensure_azure_monitor_profile_prerequisites'), \ + patch.object(dec_1.context, 'get_subscription_id', return_value='test-subscription'), \ + patch.object(dec_1.context, 'get_resource_group_name', return_value='test-rg'), \ + patch.object(dec_1.context, 'get_name', return_value='test-cluster'), \ + patch.object(dec_1.context, 'get_location', return_value='test-location'): + dec_mc_1 = dec_1.update_azure_monitor_profile(mc_1) + + # Verify OpenTelemetry logs configuration is updated + self.assertIsNotNone(dec_mc_1.azure_monitor_profile.app_monitoring) + self.assertIsNotNone(dec_mc_1.azure_monitor_profile.app_monitoring.open_telemetry_logs) + self.assertTrue(dec_mc_1.azure_monitor_profile.app_monitoring.open_telemetry_logs.enabled) + self.assertEqual(dec_mc_1.azure_monitor_profile.app_monitoring.open_telemetry_logs.port, 8081) + + # Test disabling OpenTelemetry logs on update + dec_2 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "disable_opentelemetry_logs": True, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + + # Mock existing cluster with OpenTelemetry logs enabled + mc_2 = self.models.ManagedCluster( + location="test_location", + addon_profiles=addon_profiles, + azure_monitor_profile=self.models.ManagedClusterAzureMonitorProfile( + container_insights=self.models.ManagedClusterAzureMonitorProfileContainerInsights( + enabled=True + ), + app_monitoring=self.models.ManagedClusterAzureMonitorProfileAppMonitoring( + open_telemetry_logs=self.models.ManagedClusterAzureMonitorProfileAppMonitoringOpenTelemetryLogs( + enabled=True, + port=8081 + ) + ) + ) + ) + dec_2.context.attach_mc(mc_2) + + # Mock authentication-related functions for second test + with patch('azext_aks_preview.managed_cluster_decorator.ensure_azure_monitor_profile_prerequisites'), \ + patch.object(dec_2.context, 'get_subscription_id', return_value='test-subscription'), \ + patch.object(dec_2.context, 'get_resource_group_name', return_value='test-rg'), \ + patch.object(dec_2.context, 'get_name', return_value='test-cluster'), \ + patch.object(dec_2.context, 'get_location', return_value='test-location'): + dec_mc_2 = dec_2.update_azure_monitor_profile(mc_2) + + # Verify OpenTelemetry logs is disabled + self.assertIsNotNone(dec_mc_2.azure_monitor_profile.app_monitoring.open_telemetry_logs) + self.assertFalse(dec_mc_2.azure_monitor_profile.app_monitoring.open_telemetry_logs.enabled) + self.assertIsNone(dec_mc_2.azure_monitor_profile.app_monitoring.open_telemetry_logs.port) + + # Test standalone port update for OpenTelemetry logs (without enable/disable flags) + dec_3 = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "opentelemetry_logs_port": 9091, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + + # Mock existing cluster with OpenTelemetry logs already enabled + mc_3 = self.models.ManagedCluster( + location="test_location", + addon_profiles=addon_profiles, + azure_monitor_profile=self.models.ManagedClusterAzureMonitorProfile( + container_insights=self.models.ManagedClusterAzureMonitorProfileContainerInsights( + enabled=True + ), + app_monitoring=self.models.ManagedClusterAzureMonitorProfileAppMonitoring( + open_telemetry_logs=self.models.ManagedClusterAzureMonitorProfileAppMonitoringOpenTelemetryLogs( + enabled=True, + port=8081 # Original port + ) + ) + ) + ) + dec_3.context.attach_mc(mc_3) + + # Mock authentication-related functions for third test + with patch('azext_aks_preview.managed_cluster_decorator.ensure_azure_monitor_profile_prerequisites'), \ + patch.object(dec_3.context, 'get_subscription_id', return_value='test-subscription'), \ + patch.object(dec_3.context, 'get_resource_group_name', return_value='test-rg'), \ + patch.object(dec_3.context, 'get_name', return_value='test-cluster'), \ + patch.object(dec_3.context, 'get_location', return_value='test-location'): + dec_mc_3 = dec_3.update_azure_monitor_profile(mc_3) + + # Verify OpenTelemetry logs port is updated while remaining enabled + self.assertIsNotNone(dec_mc_3.azure_monitor_profile.app_monitoring.open_telemetry_logs) + self.assertTrue(dec_mc_3.azure_monitor_profile.app_monitoring.open_telemetry_logs.enabled) + self.assertEqual(dec_mc_3.azure_monitor_profile.app_monitoring.open_telemetry_logs.port, 9091) + + def test_disable_azure_monitor_app_monitoring_preserves_opentelemetry(self): + # Test that disabling Azure Monitor app monitoring preserves existing OpenTelemetry configuration + dec = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "disable_azure_monitor_app_monitoring": True, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + + # Mock existing cluster with both Azure Monitor app monitoring and OpenTelemetry metrics enabled + mc = self.models.ManagedCluster( + location="test_location", + azure_monitor_profile=self.models.ManagedClusterAzureMonitorProfile( + app_monitoring=self.models.ManagedClusterAzureMonitorProfileAppMonitoring( + auto_instrumentation=self.models.ManagedClusterAzureMonitorProfileAppMonitoringAutoInstrumentation( + enabled=True + ), + open_telemetry_metrics=self.models.ManagedClusterAzureMonitorProfileAppMonitoringOpenTelemetryMetrics( + enabled=True, + port=8080 + ) + ) + ) + ) + dec.context.attach_mc(mc) + + # Mock authentication-related functions + with patch('azext_aks_preview.managed_cluster_decorator.ensure_azure_monitor_profile_prerequisites'), \ + patch.object(dec.context, 'get_subscription_id', return_value='test-subscription'), \ + patch.object(dec.context, 'get_resource_group_name', return_value='test-rg'), \ + patch.object(dec.context, 'get_name', return_value='test-cluster'), \ + patch.object(dec.context, 'get_location', return_value='test-location'): + dec_mc = dec.update_azure_monitor_profile(mc) + + # Verify Azure Monitor app monitoring auto instrumentation is disabled + self.assertIsNotNone(dec_mc.azure_monitor_profile.app_monitoring.auto_instrumentation) + self.assertFalse(dec_mc.azure_monitor_profile.app_monitoring.auto_instrumentation.enabled) + + # Verify OpenTelemetry metrics configuration is preserved + self.assertIsNotNone(dec_mc.azure_monitor_profile.app_monitoring.open_telemetry_metrics) + self.assertTrue(dec_mc.azure_monitor_profile.app_monitoring.open_telemetry_metrics.enabled) + self.assertEqual(dec_mc.azure_monitor_profile.app_monitoring.open_telemetry_metrics.port, 8080) + + def test_enable_azure_monitor_app_monitoring_preserves_opentelemetry(self): + # Test that enabling Azure Monitor app monitoring preserves existing OpenTelemetry configuration + dec = AKSPreviewManagedClusterUpdateDecorator( + self.cmd, + self.client, + { + "enable_azure_monitor_app_monitoring": True, + }, + CUSTOM_MGMT_AKS_PREVIEW, + ) + + # Mock existing cluster with OpenTelemetry logs enabled but Azure Monitor app monitoring disabled + mc = self.models.ManagedCluster( + location="test_location", + azure_monitor_profile=self.models.ManagedClusterAzureMonitorProfile( + app_monitoring=self.models.ManagedClusterAzureMonitorProfileAppMonitoring( + auto_instrumentation=self.models.ManagedClusterAzureMonitorProfileAppMonitoringAutoInstrumentation( + enabled=False + ), + open_telemetry_logs=self.models.ManagedClusterAzureMonitorProfileAppMonitoringOpenTelemetryLogs( + enabled=True, + port=8081 + ) + ) + ) + ) + dec.context.attach_mc(mc) + + # Mock authentication-related functions + with patch('azext_aks_preview.managed_cluster_decorator.ensure_azure_monitor_profile_prerequisites'), \ + patch.object(dec.context, 'get_subscription_id', return_value='test-subscription'), \ + patch.object(dec.context, 'get_resource_group_name', return_value='test-rg'), \ + patch.object(dec.context, 'get_name', return_value='test-cluster'), \ + patch.object(dec.context, 'get_location', return_value='test-location'): + dec_mc = dec.update_azure_monitor_profile(mc) + + # Verify Azure Monitor app monitoring auto instrumentation is enabled + self.assertIsNotNone(dec_mc.azure_monitor_profile.app_monitoring.auto_instrumentation) + self.assertTrue(dec_mc.azure_monitor_profile.app_monitoring.auto_instrumentation.enabled) + + # Verify OpenTelemetry logs configuration is preserved + self.assertIsNotNone(dec_mc.azure_monitor_profile.app_monitoring.open_telemetry_logs) + self.assertTrue(dec_mc.azure_monitor_profile.app_monitoring.open_telemetry_logs.enabled) + self.assertEqual(dec_mc.azure_monitor_profile.app_monitoring.open_telemetry_logs.port, 8081) if __name__ == "__main__": unittest.main() diff --git a/src/aks-preview/azext_aks_preview/tests/latest/test_validators.py b/src/aks-preview/azext_aks_preview/tests/latest/test_validators.py index 27d5e99d4d0..df4c9095f75 100644 --- a/src/aks-preview/azext_aks_preview/tests/latest/test_validators.py +++ b/src/aks-preview/azext_aks_preview/tests/latest/test_validators.py @@ -1799,5 +1799,613 @@ def test_valid_custom_endpoints(self): validators.validate_custom_endpoints(namespace) +class OpenTelemetryPortsNamespace: + def __init__(self, opentelemetry_metrics_port=None, opentelemetry_logs_port=None): + self.opentelemetry_metrics_port = opentelemetry_metrics_port + self.opentelemetry_logs_port = opentelemetry_logs_port + + +class TestValidateOpenTelemetryPorts(unittest.TestCase): + def test_no_ports_specified(self): + namespace = OpenTelemetryPortsNamespace() + # Should pass without issue + validators.validate_opentelemetry_ports(namespace) + + def test_only_metrics_port_specified(self): + namespace = OpenTelemetryPortsNamespace(opentelemetry_metrics_port=8080) + validators.validate_opentelemetry_ports(namespace) + + def test_only_logs_port_specified(self): + namespace = OpenTelemetryPortsNamespace(opentelemetry_logs_port=8081) + validators.validate_opentelemetry_ports(namespace) + + def test_different_ports_specified(self): + namespace = OpenTelemetryPortsNamespace( + opentelemetry_metrics_port=8080, + opentelemetry_logs_port=8081 + ) + validators.validate_opentelemetry_ports(namespace) + + def test_same_ports_throws_error(self): + namespace = OpenTelemetryPortsNamespace( + opentelemetry_metrics_port=8080, + opentelemetry_logs_port=8080 + ) + err = ( + "OpenTelemetry metrics port and logs port cannot be the same. " + "Please specify different ports for --opentelemetry-metrics-port and --opentelemetry-logs-port." + ) + with self.assertRaises(ArgumentUsageError) as cm: + validators.validate_opentelemetry_ports(namespace) + self.assertEqual(str(cm.exception), err) + + def test_metrics_port_below_range(self): + namespace = OpenTelemetryPortsNamespace(opentelemetry_metrics_port=0) + err = "OpenTelemetry metrics port must be between 1 and 65535, got 0." + with self.assertRaises(ArgumentUsageError) as cm: + validators.validate_opentelemetry_ports(namespace) + self.assertEqual(str(cm.exception), err) + + def test_metrics_port_above_range(self): + namespace = OpenTelemetryPortsNamespace(opentelemetry_metrics_port=65536) + err = "OpenTelemetry metrics port must be between 1 and 65535, got 65536." + with self.assertRaises(ArgumentUsageError) as cm: + validators.validate_opentelemetry_ports(namespace) + self.assertEqual(str(cm.exception), err) + + def test_logs_port_below_range(self): + namespace = OpenTelemetryPortsNamespace(opentelemetry_logs_port=-1) + err = "OpenTelemetry logs port must be between 1 and 65535, got -1." + with self.assertRaises(ArgumentUsageError) as cm: + validators.validate_opentelemetry_ports(namespace) + self.assertEqual(str(cm.exception), err) + + def test_logs_port_above_range(self): + namespace = OpenTelemetryPortsNamespace(opentelemetry_logs_port=100000) + err = "OpenTelemetry logs port must be between 1 and 65535, got 100000." + with self.assertRaises(ArgumentUsageError) as cm: + validators.validate_opentelemetry_ports(namespace) + self.assertEqual(str(cm.exception), err) + + def test_valid_edge_case_ports(self): + # Test boundary values + namespace = OpenTelemetryPortsNamespace( + opentelemetry_metrics_port=1, + opentelemetry_logs_port=65535 + ) + validators.validate_opentelemetry_ports(namespace) + + +class OpenTelemetryMetricsDependenciesNamespace: + def __init__(self, enable_opentelemetry_metrics=False, disable_opentelemetry_metrics=False, + enable_azure_monitor_metrics=False, enable_azuremonitormetrics=False): + self.enable_opentelemetry_metrics = enable_opentelemetry_metrics + self.disable_opentelemetry_metrics = disable_opentelemetry_metrics + self.enable_azure_monitor_metrics = enable_azure_monitor_metrics + self.enable_azuremonitormetrics = enable_azuremonitormetrics + + +class TestValidateOpenTelemetryMetricsDependencies(unittest.TestCase): + def test_no_opentelemetry_flags(self): + namespace = OpenTelemetryMetricsDependenciesNamespace() + # Should pass without issue + validators.validate_opentelemetry_metrics_dependencies(namespace) + + def test_enable_with_azure_monitor_metrics(self): + namespace = OpenTelemetryMetricsDependenciesNamespace( + enable_opentelemetry_metrics=True, + enable_azure_monitor_metrics=True + ) + validators.validate_opentelemetry_metrics_dependencies(namespace) + + def test_enable_with_deprecated_azuremonitormetrics(self): + namespace = OpenTelemetryMetricsDependenciesNamespace( + enable_opentelemetry_metrics=True, + enable_azuremonitormetrics=True + ) + validators.validate_opentelemetry_metrics_dependencies(namespace) + + def test_enable_without_azure_monitor_throws_error(self): + namespace = OpenTelemetryMetricsDependenciesNamespace( + enable_opentelemetry_metrics=True + ) + err = ( + "OpenTelemetry metrics requires Azure Monitor metrics to be enabled. " + "Please add --enable-azure-monitor-metrics or --enable-azuremonitormetrics to your command." + ) + with self.assertRaises(ArgumentUsageError) as cm: + validators.validate_opentelemetry_metrics_dependencies(namespace) + self.assertEqual(str(cm.exception), err) + + def test_mutually_exclusive_flags_throws_error(self): + namespace = OpenTelemetryMetricsDependenciesNamespace( + enable_opentelemetry_metrics=True, + disable_opentelemetry_metrics=True + ) + err = "Cannot specify both --enable-opentelemetry-metrics and --disable-opentelemetry-metrics at the same time." + with self.assertRaises(MutuallyExclusiveArgumentError) as cm: + validators.validate_opentelemetry_metrics_dependencies(namespace) + self.assertEqual(str(cm.exception), err) + + def test_disable_only_flag(self): + namespace = OpenTelemetryMetricsDependenciesNamespace( + disable_opentelemetry_metrics=True + ) + # Should pass - disabling doesn't require Azure Monitor + validators.validate_opentelemetry_metrics_dependencies(namespace) + + +class TestValidateOpenTelemetryMetricsDependenciesForUpdate(unittest.TestCase): + def test_no_opentelemetry_flags_for_update(self): + namespace = OpenTelemetryMetricsDependenciesNamespace() + # Should pass without issue + validators.validate_opentelemetry_metrics_dependencies_for_update(namespace) + + def test_enable_only_flag_for_update(self): + namespace = OpenTelemetryMetricsDependenciesNamespace( + enable_opentelemetry_metrics=True + ) + # Should pass - for updates, dependency validation is deferred to decorator + validators.validate_opentelemetry_metrics_dependencies_for_update(namespace) + + def test_disable_only_flag_for_update(self): + namespace = OpenTelemetryMetricsDependenciesNamespace( + disable_opentelemetry_metrics=True + ) + # Should pass + validators.validate_opentelemetry_metrics_dependencies_for_update(namespace) + + def test_mutually_exclusive_flags_throws_error_for_update(self): + namespace = OpenTelemetryMetricsDependenciesNamespace( + enable_opentelemetry_metrics=True, + disable_opentelemetry_metrics=True + ) + err = "Cannot specify both --enable-opentelemetry-metrics and --disable-opentelemetry-metrics at the same time." + with self.assertRaises(MutuallyExclusiveArgumentError) as cm: + validators.validate_opentelemetry_metrics_dependencies_for_update(namespace) + self.assertEqual(str(cm.exception), err) + + +class OpenTelemetryLogsDependenciesNamespace: + def __init__(self, enable_opentelemetry_logs=False, disable_opentelemetry_logs=False): + self.enable_opentelemetry_logs = enable_opentelemetry_logs + self.disable_opentelemetry_logs = disable_opentelemetry_logs + + +class TestValidateOpenTelemetryLogsDependencies(unittest.TestCase): + def test_no_opentelemetry_flags(self): + namespace = OpenTelemetryLogsDependenciesNamespace() + # Should pass without issue + validators.validate_opentelemetry_logs_dependencies(namespace) + + def test_mutually_exclusive_flags_throws_error(self): + namespace = OpenTelemetryLogsDependenciesNamespace( + enable_opentelemetry_logs=True, + disable_opentelemetry_logs=True + ) + err = "Cannot specify both --enable-opentelemetry-logs and --disable-opentelemetry-logs at the same time." + with self.assertRaises(MutuallyExclusiveArgumentError) as cm: + validators.validate_opentelemetry_logs_dependencies(namespace) + self.assertEqual(str(cm.exception), err) + + def test_disable_only_flag(self): + namespace = OpenTelemetryLogsDependenciesNamespace( + disable_opentelemetry_logs=True + ) + # Should pass - disabling doesn't require any dependency + validators.validate_opentelemetry_logs_dependencies(namespace) + + +class TestValidateOpenTelemetryLogsDependenciesForUpdate(unittest.TestCase): + def test_no_opentelemetry_flags_for_update(self): + namespace = OpenTelemetryLogsDependenciesNamespace() + # Should pass without issue + validators.validate_opentelemetry_logs_dependencies_for_update(namespace) + + def test_enable_only_flag_for_update(self): + namespace = OpenTelemetryLogsDependenciesNamespace( + enable_opentelemetry_logs=True + ) + # Should pass - for updates, dependency validation is deferred to decorator + validators.validate_opentelemetry_logs_dependencies_for_update(namespace) + + def test_disable_only_flag_for_update(self): + namespace = OpenTelemetryLogsDependenciesNamespace( + disable_opentelemetry_logs=True + ) + # Should pass + validators.validate_opentelemetry_logs_dependencies_for_update(namespace) + + def test_mutually_exclusive_flags_throws_error_for_update(self): + namespace = OpenTelemetryLogsDependenciesNamespace( + enable_opentelemetry_logs=True, + disable_opentelemetry_logs=True + ) + err = "Cannot specify both --enable-opentelemetry-logs and --disable-opentelemetry-logs at the same time." + with self.assertRaises(MutuallyExclusiveArgumentError) as cm: + validators.validate_opentelemetry_logs_dependencies_for_update(namespace) + self.assertEqual(str(cm.exception), err) + + +class AzureMonitorAndOpenTelemetryNamespace: + def __init__(self, enable_opentelemetry_metrics=False, disable_opentelemetry_metrics=False, + enable_opentelemetry_logs=False, disable_opentelemetry_logs=False, + enable_azure_monitor_metrics=False, enable_azuremonitormetrics=False, + enable_azure_monitor_logs=False, + opentelemetry_metrics_port=None, opentelemetry_logs_port=None): + self.enable_opentelemetry_metrics = enable_opentelemetry_metrics + self.disable_opentelemetry_metrics = disable_opentelemetry_metrics + self.enable_opentelemetry_logs = enable_opentelemetry_logs + self.disable_opentelemetry_logs = disable_opentelemetry_logs + self.enable_azure_monitor_metrics = enable_azure_monitor_metrics + self.enable_azuremonitormetrics = enable_azuremonitormetrics + self.enable_azure_monitor_logs = enable_azure_monitor_logs + self.opentelemetry_metrics_port = opentelemetry_metrics_port + self.opentelemetry_logs_port = opentelemetry_logs_port + + +class TestValidateAzureMonitorAndOpenTelemetryForCreate(unittest.TestCase): + def test_valid_configuration(self): + namespace = AzureMonitorAndOpenTelemetryNamespace( + enable_opentelemetry_metrics=True, + enable_opentelemetry_logs=True, + enable_azure_monitor_metrics=True, + enable_azure_monitor_logs=True, + opentelemetry_metrics_port=8080, + opentelemetry_logs_port=8081 + ) + # Should pass all validations + validators.validate_azure_monitor_and_opentelemetry_for_create(namespace) + + def test_port_conflict_throws_error(self): + namespace = AzureMonitorAndOpenTelemetryNamespace( + enable_opentelemetry_metrics=True, + enable_azure_monitor_metrics=True, + opentelemetry_metrics_port=8080, + opentelemetry_logs_port=8080 + ) + err = ( + "OpenTelemetry metrics port and logs port cannot be the same. " + "Please specify different ports for --opentelemetry-metrics-port and --opentelemetry-logs-port." + ) + with self.assertRaises(ArgumentUsageError) as cm: + validators.validate_azure_monitor_and_opentelemetry_for_create(namespace) + self.assertEqual(str(cm.exception), err) + + def test_metrics_missing_azure_monitor_throws_error(self): + namespace = AzureMonitorAndOpenTelemetryNamespace( + enable_opentelemetry_metrics=True + ) + err = ( + "OpenTelemetry metrics requires Azure Monitor metrics to be enabled. " + "Please add --enable-azure-monitor-metrics or --enable-azuremonitormetrics to your command." + ) + with self.assertRaises(ArgumentUsageError) as cm: + validators.validate_azure_monitor_and_opentelemetry_for_create(namespace) + self.assertEqual(str(cm.exception), err) + + def test_logs_missing_azure_monitor_throws_error(self): + namespace = AzureMonitorAndOpenTelemetryNamespace( + enable_opentelemetry_logs=True + ) + err = ( + "OpenTelemetry logs requires Azure Monitor logs to be enabled. " + "Please add --enable-azure-monitor-logs to your command." + ) + with self.assertRaises(ArgumentUsageError) as cm: + validators.validate_azure_monitor_and_opentelemetry_for_create(namespace) + self.assertEqual(str(cm.exception), err) + + +class TestValidateAzureMonitorAndOpenTelemetryForUpdate(unittest.TestCase): + def test_valid_configuration_for_update(self): + namespace = AzureMonitorAndOpenTelemetryNamespace( + enable_opentelemetry_metrics=True, + enable_opentelemetry_logs=True, + opentelemetry_metrics_port=8080, + opentelemetry_logs_port=8081 + ) + # Should pass all validations (dependency validation deferred for updates) + validators.validate_azure_monitor_and_opentelemetry_for_update(namespace) + + def test_port_conflict_throws_error_for_update(self): + namespace = AzureMonitorAndOpenTelemetryNamespace( + enable_opentelemetry_metrics=True, + opentelemetry_metrics_port=8080, + opentelemetry_logs_port=8080 + ) + err = ( + "OpenTelemetry metrics port and logs port cannot be the same. " + "Please specify different ports for --opentelemetry-metrics-port and --opentelemetry-logs-port." + ) + with self.assertRaises(ArgumentUsageError) as cm: + validators.validate_azure_monitor_and_opentelemetry_for_update(namespace) + self.assertEqual(str(cm.exception), err) + + def test_mutually_exclusive_metrics_flags_throws_error(self): + namespace = AzureMonitorAndOpenTelemetryNamespace( + enable_opentelemetry_metrics=True, + disable_opentelemetry_metrics=True + ) + err = "Cannot specify both --enable-opentelemetry-metrics and --disable-opentelemetry-metrics at the same time." + with self.assertRaises(MutuallyExclusiveArgumentError) as cm: + validators.validate_azure_monitor_and_opentelemetry_for_update(namespace) + self.assertEqual(str(cm.exception), err) + + def test_mutually_exclusive_logs_flags_throws_error(self): + namespace = AzureMonitorAndOpenTelemetryNamespace( + enable_opentelemetry_logs=True, + disable_opentelemetry_logs=True + ) + err = "Cannot specify both --enable-opentelemetry-logs and --disable-opentelemetry-logs at the same time." + with self.assertRaises(MutuallyExclusiveArgumentError) as cm: + validators.validate_azure_monitor_and_opentelemetry_for_update(namespace) + self.assertEqual(str(cm.exception), err) + + +class TestValidateAzureMonitorLogsAndEnableAddons(unittest.TestCase): + def test_enable_azure_monitor_logs_with_monitoring_addon_throws_error(self): + namespace = SimpleNamespace() + namespace.enable_azure_monitor_logs = True + namespace.enable_addons = ["monitoring", "azure-policy"] + + err = "Cannot specify both '--enable-azure-monitor-logs' and '--enable-addons monitoring'. Use either '--enable-azure-monitor-logs' or '--enable-addons monitoring'." + with self.assertRaises(ArgumentUsageError) as cm: + validators.validate_azure_monitor_logs_and_enable_addons(namespace) + self.assertEqual(str(cm.exception), err) + + def test_enable_azure_monitor_logs_with_other_addons_succeeds(self): + namespace = SimpleNamespace() + namespace.enable_azure_monitor_logs = True + namespace.enable_addons = ["azure-policy", "virtual-node"] + + # Should not raise an exception + validators.validate_azure_monitor_logs_and_enable_addons(namespace) + + def test_enable_azure_monitor_logs_without_enable_addons_succeeds(self): + namespace = SimpleNamespace() + namespace.enable_azure_monitor_logs = True + + # Should not raise an exception + validators.validate_azure_monitor_logs_and_enable_addons(namespace) + + def test_enable_addons_monitoring_without_enable_azure_monitor_logs_succeeds(self): + namespace = SimpleNamespace() + namespace.enable_addons = ["monitoring"] + + # Should not raise an exception + validators.validate_azure_monitor_logs_and_enable_addons(namespace) + + +class TestValidateAzureMonitorLogsEnableDisable(unittest.TestCase): + def test_enable_and_disable_azure_monitor_logs_throws_error(self): + namespace = SimpleNamespace() + namespace.enable_azure_monitor_logs = True + namespace.disable_azure_monitor_logs = True + + err = "Cannot specify both '--enable-azure-monitor-logs' and '--disable-azure-monitor-logs'. Use either '--enable-azure-monitor-logs' or '--disable-azure-monitor-logs'." + with self.assertRaises(ArgumentUsageError) as cm: + validators.validate_azure_monitor_logs_enable_disable(namespace) + self.assertEqual(str(cm.exception), err) + + def test_only_enable_azure_monitor_logs_succeeds(self): + namespace = SimpleNamespace() + namespace.enable_azure_monitor_logs = True + namespace.disable_azure_monitor_logs = False + + # Should not raise an exception + validators.validate_azure_monitor_logs_enable_disable(namespace) + + def test_only_disable_azure_monitor_logs_succeeds(self): + namespace = SimpleNamespace() + namespace.enable_azure_monitor_logs = False + namespace.disable_azure_monitor_logs = True + + # Should not raise an exception + validators.validate_azure_monitor_logs_enable_disable(namespace) + + def test_neither_enable_nor_disable_azure_monitor_logs_succeeds(self): + namespace = SimpleNamespace() + namespace.enable_azure_monitor_logs = False + namespace.disable_azure_monitor_logs = False + + # Should not raise an exception + validators.validate_azure_monitor_logs_enable_disable(namespace) + + +class TestAzureMonitorLogsParameters(unittest.TestCase): + """Test that Azure Monitor logs parameters are processed correctly.""" + + def test_enable_azure_monitor_logs_modifies_enable_addons(self): + """Test that enable_azure_monitor_logs=True adds 'monitoring' to enable_addons.""" + import azext_aks_preview.custom as custom + + # Test case 1: enable_addons is None initially + enable_addons = None + enable_azure_monitor_logs = True + + # This mimics the logic from aks_create function + if enable_azure_monitor_logs: + if enable_addons is None: + enable_addons = ["monitoring"] + else: + enable_addons = list(enable_addons) + if "monitoring" not in enable_addons: + enable_addons.append("monitoring") + + self.assertEqual(enable_addons, ["monitoring"]) + + # Test case 2: enable_addons already has other addons + enable_addons = ["azure-policy", "ingress-appgw"] + enable_azure_monitor_logs = True + + if enable_azure_monitor_logs: + if enable_addons is None: + enable_addons = ["monitoring"] + else: + enable_addons = list(enable_addons) + if "monitoring" not in enable_addons: + enable_addons.append("monitoring") + + self.assertIn("monitoring", enable_addons) + self.assertIn("azure-policy", enable_addons) + self.assertIn("ingress-appgw", enable_addons) + self.assertEqual(len(enable_addons), 3) + + # Test case 3: monitoring already in enable_addons + enable_addons = ["monitoring", "azure-policy"] + enable_azure_monitor_logs = True + + if enable_azure_monitor_logs: + if enable_addons is None: + enable_addons = ["monitoring"] + else: + enable_addons = list(enable_addons) + if "monitoring" not in enable_addons: + enable_addons.append("monitoring") + + # Should not duplicate monitoring + self.assertEqual(enable_addons.count("monitoring"), 1) + self.assertEqual(len(enable_addons), 2) + + def test_disable_azure_monitor_logs_calls_disable_function(self): + """Test that disable_azure_monitor_logs=True calls the disable addons function.""" + import azext_aks_preview.custom as custom + + # Track if disable_addons was called with correct parameters + disable_call_params = {} + + def mock_disable_addons(**kwargs): + nonlocal disable_call_params + disable_call_params.update(kwargs) + return {"addonProfiles": {"omsagent": {"enabled": False}}} + + # Mock the aks_disable_addons function + original_disable_addons = getattr(custom, 'aks_disable_addons', None) + custom.aks_disable_addons = mock_disable_addons + + try: + # Test the logic from aks_update function when disable_azure_monitor_logs=True + disable_azure_monitor_logs = True + + if disable_azure_monitor_logs: + result = custom.aks_disable_addons( + cmd="mock_cmd", + client="mock_client", + resource_group_name="test-rg", + name="test-cluster", + addons="monitoring" + ) + + # Verify the mock was called with correct parameters + self.assertEqual(disable_call_params['addons'], 'monitoring') + self.assertEqual(disable_call_params['resource_group_name'], 'test-rg') + self.assertEqual(disable_call_params['name'], 'test-cluster') + + # Verify the result indicates monitoring is disabled + self.assertIn('addonProfiles', result) + self.assertIn('omsagent', result['addonProfiles']) + self.assertFalse(result['addonProfiles']['omsagent']['enabled']) + + finally: + # Restore original function + if original_disable_addons: + custom.aks_disable_addons = original_disable_addons + + def test_azure_monitor_logs_parameter_equivalency(self): + """Test that Azure Monitor logs parameters behave equivalently to addon parameters.""" + # Test enable equivalency + enable_addons_approach = ["monitoring"] # Using --enable-addons monitoring + + # Using --enable-azure-monitor-logs (our new parameter) + enable_addons = None + enable_azure_monitor_logs = True + + if enable_azure_monitor_logs: + if enable_addons is None: + enable_addons = ["monitoring"] + else: + enable_addons = list(enable_addons) + if "monitoring" not in enable_addons: + enable_addons.append("monitoring") + + # Both approaches should result in the same addon configuration + self.assertEqual(enable_addons, enable_addons_approach) + + # Test disable equivalency - both should call aks_disable_addons with "monitoring" + # This is tested by verifying both approaches use the same function call + disable_addons_approach = "monitoring" # Using --disable-addons monitoring + azure_monitor_logs_approach = "monitoring" # Using --disable-azure-monitor-logs + + self.assertEqual(disable_addons_approach, azure_monitor_logs_approach) + + def test_azure_monitor_logs_with_conflicting_parameters(self): + """Test validation catches conflicting Azure Monitor logs parameters.""" + import azext_aks_preview._validators as validators + + # Test enable and disable together should fail + namespace = SimpleNamespace() + namespace.enable_azure_monitor_logs = True + namespace.disable_azure_monitor_logs = True + + with self.assertRaises(ArgumentUsageError): + validators.validate_azure_monitor_logs_enable_disable(namespace) + + # Test that non-conflicting cases don't raise errors + namespace = SimpleNamespace() + namespace.enable_azure_monitor_logs = True + namespace.disable_azure_monitor_logs = False + + try: + validators.validate_azure_monitor_logs_enable_disable(namespace) + except ArgumentUsageError: + self.fail("validate_azure_monitor_logs_enable_disable raised exception unexpectedly") + + # Test that only disable doesn't raise errors + namespace = SimpleNamespace() + namespace.enable_azure_monitor_logs = False + namespace.disable_azure_monitor_logs = True + + try: + validators.validate_azure_monitor_logs_enable_disable(namespace) + except ArgumentUsageError: + self.fail("validate_azure_monitor_logs_enable_disable raised exception unexpectedly") + + def test_azure_monitor_logs_opentelemetry_dependency(self): + """Test that OpenTelemetry logs requires Azure Monitor logs to be enabled.""" + import azext_aks_preview._validators as validators + + # Test OpenTelemetry logs with Azure Monitor logs enabled should pass + namespace = SimpleNamespace() + namespace.enable_opentelemetry_logs = True + namespace.enable_azure_monitor_logs = True + namespace.enable_addons = None + + try: + validators.validate_opentelemetry_logs_dependencies(namespace) + except ArgumentUsageError: + self.fail("validate_opentelemetry_logs_dependencies raised exception unexpectedly") + + # Test OpenTelemetry logs with monitoring addon enabled should pass + namespace = SimpleNamespace() + namespace.enable_opentelemetry_logs = True + namespace.enable_azure_monitor_logs = False + namespace.enable_addons = ["monitoring"] + + try: + validators.validate_opentelemetry_logs_dependencies(namespace) + except ArgumentUsageError: + self.fail("validate_opentelemetry_logs_dependencies raised exception unexpectedly") + + # Test OpenTelemetry logs without Azure Monitor logs should fail + namespace = SimpleNamespace() + namespace.enable_opentelemetry_logs = True + namespace.enable_azure_monitor_logs = False + namespace.enable_addons = None + + with self.assertRaises(ArgumentUsageError): + validators.validate_opentelemetry_logs_dependencies(namespace) + + if __name__ == "__main__": unittest.main() diff --git a/src/aks-preview/linter_exclusions.yml b/src/aks-preview/linter_exclusions.yml index fee4033fe0f..8cb91181754 100644 --- a/src/aks-preview/linter_exclusions.yml +++ b/src/aks-preview/linter_exclusions.yml @@ -90,6 +90,27 @@ aks create: enable_azure_monitor_app_monitoring: rule_exclusions: - option_length_too_long + enable_azure_monitor_logs: + rule_exclusions: + - option_length_too_long + enable_opentelemetry_metrics: + rule_exclusions: + - option_length_too_long + disable_opentelemetry_metrics: + rule_exclusions: + - option_length_too_long + opentelemetry_metrics_port: + rule_exclusions: + - option_length_too_long + enable_opentelemetry_logs: + rule_exclusions: + - option_length_too_long + disable_opentelemetry_logs: + rule_exclusions: + - option_length_too_long + opentelemetry_logs_port: + rule_exclusions: + - option_length_too_long enable_static_egress_gateway: rule_exclusions: - option_length_too_long @@ -233,6 +254,33 @@ aks update: disable_azure_monitor_app_monitoring: rule_exclusions: - option_length_too_long + disable_azure_monitor_logs: + rule_exclusions: + - option_length_too_long + enable_azure_monitor_logs: + rule_exclusions: + - option_length_too_long + enable_msi_auth_for_monitoring: + rule_exclusions: + - option_length_too_long + enable_opentelemetry_metrics: + rule_exclusions: + - option_length_too_long + disable_opentelemetry_metrics: + rule_exclusions: + - option_length_too_long + opentelemetry_metrics_port: + rule_exclusions: + - option_length_too_long + enable_opentelemetry_logs: + rule_exclusions: + - option_length_too_long + disable_opentelemetry_logs: + rule_exclusions: + - option_length_too_long + opentelemetry_logs_port: + rule_exclusions: + - option_length_too_long bootstrap_artifact_source: rule_exclusions: - option_length_too_long diff --git a/src/aks-preview/setup.py b/src/aks-preview/setup.py index e8aad73d593..0b14687fe75 100644 --- a/src/aks-preview/setup.py +++ b/src/aks-preview/setup.py @@ -9,7 +9,7 @@ from setuptools import find_packages, setup -VERSION = "19.0.0b1" +VERSION = "19.0.0b2" CLASSIFIERS = [ "Development Status :: 4 - Beta",