Skip to content

Commit 41299b9

Browse files
anandgmenonanandgmenon02Copilot
authored
[Containerapp] az containerapp arc: Enable setup custom core dns for Openshift (#8858)
* [Containerapp]: az containerapp arc: Enable setup custom core dns for Openshift on Arc * fix styling * Update src/containerapp/azext_containerapp/_params.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update src/containerapp/azext_containerapp/_arc_utils.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * fix dns * backup config and restart pods * fix comments * fix history and version --------- Co-authored-by: Anand G Menon <anandgmenon@microsoft.com> Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
1 parent 9492266 commit 41299b9

File tree

6 files changed

+457
-87
lines changed

6 files changed

+457
-87
lines changed

src/containerapp/HISTORY.rst

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ upcoming
66
++++++
77
* 'az containerapp session code-interpreter execute': Extend maximum supported value of `--timeout-in-seconds` from 60 to 220.
88
* 'az containerapp job create': Fix message with `--help`
9+
* 'az containerapp arc': Enable setup custom core dns for Openshift on Arc
910

1011
1.2.0b1
1112
++++++

src/containerapp/azext_containerapp/_arc_utils.py

Lines changed: 347 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515

1616
from azure.cli.core.azclierror import (ValidationError, ResourceNotFoundError, CLIError, InvalidArgumentValueError)
1717
from ._constants import (CUSTOM_CORE_DNS_VOLUME_NAME, CUSTOM_CORE_DNS_VOLUME_MOUNT_PATH,
18-
CUSTOM_CORE_DNS, CORE_DNS, KUBE_SYSTEM, EMPTY_CUSTOM_CORE_DNS)
18+
CUSTOM_CORE_DNS, CORE_DNS, KUBE_SYSTEM, EMPTY_CUSTOM_CORE_DNS, OPENSHIFT_DNS)
1919

2020
logger = get_logger(__name__)
2121

@@ -280,6 +280,21 @@ def update_deployment(resource_name, resource_namespace, kube_client, deployment
280280
raise ValidationError(f"other errors while patching deployment coredns in kube-system {str(e)}")
281281

282282

283+
def create_or_update_deployment(name, namespace, kube_client, deployment):
284+
validate_resource_name_and_resource_namespace_not_empty(name, namespace)
285+
286+
try:
287+
logger.info(f"Start to create deployment {name} in namespace {namespace}")
288+
apps_v1_api = client.AppsV1Api(kube_client)
289+
apps_v1_api.create_namespaced_deployment(namespace=namespace, body=deployment)
290+
except client.exceptions.ApiException as e:
291+
if e.status == 409:
292+
logger.warning(f"Deployment '{name}' already exists, replacing it")
293+
apps_v1_api.replace_namespaced_deployment(name=name, namespace=namespace, body=deployment)
294+
else:
295+
raise CLIError(f"Failed to create or replace Deployment'{name}': {str(e)}")
296+
297+
283298
def replace_deployment(resource_name, resource_namespace, kube_client, deployment):
284299
validate_resource_name_and_resource_namespace_not_empty(resource_name, resource_namespace)
285300

@@ -321,6 +336,21 @@ def update_configmap(resource_name, resource_namespace, kube_client, config_map)
321336
raise CLIError(f"other errors while patching config map coredns in kube-system {str(e)}")
322337

323338

339+
def create_or_update_configmap(name, namespace, kube_client, configmap):
340+
validate_resource_name_and_resource_namespace_not_empty(name, namespace)
341+
342+
try:
343+
logger.info(f"Start to create configmap {name} in namespace {namespace}")
344+
core_v1_api = client.CoreV1Api(kube_client)
345+
core_v1_api.create_namespaced_config_map(namespace=namespace, body=configmap)
346+
except client.exceptions.ApiException as e:
347+
if e.status == 409:
348+
logger.warning(f"Configmap '{name}' already exists, replacing it")
349+
core_v1_api.replace_namespaced_config_map(name=name, namespace=namespace, body=configmap)
350+
else:
351+
raise CLIError(f"Failed to create or replace ConfigMap '{name}': {str(e)}")
352+
353+
324354
def replace_configmap(resource_name, resource_namespace, kube_client, config_map):
325355
validate_resource_name_and_resource_namespace_not_empty(resource_name, resource_namespace)
326356

@@ -356,3 +386,319 @@ def validate_resource_name_and_resource_namespace_not_empty(resource_name, resou
356386
raise InvalidArgumentValueError("Arg resource_name should not be None or Empty")
357387
if resource_namespace is None or len(resource_namespace) == 0:
358388
raise InvalidArgumentValueError("Arg resource_namespace should not be None or Empty")
389+
390+
391+
def create_or_replace_cluster_role(rbac_api, role_name, role):
392+
try:
393+
logger.info(f"Creating new ClusterRole '{role_name}'")
394+
rbac_api.create_cluster_role(body=role)
395+
except client.exceptions.ApiException as e:
396+
if e.status == 409:
397+
logger.info(f"ClusterRole '{role_name}' already exists, replacing it")
398+
rbac_api.replace_cluster_role(name=role_name, body=role)
399+
else:
400+
raise CLIError(f"Failed to create or replace ClusterRole '{role_name}': {str(e)}")
401+
402+
403+
def create_or_replace_cluster_rolebinding(rbac_api, rolebinding_name, rolebinding):
404+
try:
405+
logger.info(f"Creating new ClusterRolebinding '{rolebinding_name}'")
406+
rbac_api.create_cluster_role_binding(body=rolebinding)
407+
except client.exceptions.ApiException as e:
408+
if e.status == 409:
409+
logger.info(f"ClusterRole '{rolebinding_name}' already exists, replacing it")
410+
rbac_api.replace_cluster_role_binding(name=rolebinding_name, body=rolebinding)
411+
else:
412+
raise CLIError(f"Failed to create or replace ClusterRole '{rolebinding_name}': {str(e)}")
413+
414+
415+
def create_openshift_custom_coredns_resources(kube_client, namespace=OPENSHIFT_DNS):
416+
try:
417+
logger.info("Creating custom CoreDNS resources in OpenShift")
418+
core_v1_api = client.CoreV1Api(kube_client)
419+
rbac_api = client.RbacAuthorizationV1Api(kube_client)
420+
421+
# 1. Create ClusterRole
422+
cluster_role = client.V1ClusterRole(
423+
metadata=client.V1ObjectMeta(
424+
name=CUSTOM_CORE_DNS
425+
),
426+
rules=[
427+
client.V1PolicyRule(
428+
api_groups=[""],
429+
resources=["services", "endpoints", "pods", "namespaces"],
430+
verbs=["list", "watch"]
431+
),
432+
client.V1PolicyRule(
433+
api_groups=["discovery.k8s.io"],
434+
resources=["endpointslices"],
435+
verbs=["list", "watch"]
436+
)
437+
]
438+
)
439+
create_or_replace_cluster_role(rbac_api, CUSTOM_CORE_DNS, cluster_role)
440+
441+
# 2. Create ClusterRoleBinding
442+
cluster_role_binding = client.V1ClusterRoleBinding(
443+
metadata=client.V1ObjectMeta(
444+
name=CUSTOM_CORE_DNS
445+
),
446+
role_ref=client.V1RoleRef(
447+
api_group="rbac.authorization.k8s.io",
448+
kind="ClusterRole",
449+
name=CUSTOM_CORE_DNS
450+
),
451+
subjects=[
452+
client.V1Subject(
453+
kind="ServiceAccount",
454+
name="default",
455+
namespace=namespace
456+
)
457+
]
458+
)
459+
create_or_replace_cluster_rolebinding(rbac_api, CUSTOM_CORE_DNS, cluster_role_binding)
460+
461+
# 3. Create ConfigMap
462+
existing_config_map = core_v1_api.read_namespaced_config_map(name=CUSTOM_CORE_DNS, namespace=KUBE_SYSTEM)
463+
corefile_data = existing_config_map.data.get("k4apps-default.io.server") or existing_config_map.data.get("Corefile")
464+
if not corefile_data:
465+
raise ValidationError(F"Neither 'k4apps-default.io.server' nor 'Corefile' key found in the {CUSTOM_CORE_DNS} ConfigMap in {KUBE_SYSTEM} namespace.")
466+
467+
config_map = client.V1ConfigMap(
468+
metadata=client.V1ObjectMeta(
469+
name=CUSTOM_CORE_DNS,
470+
namespace=namespace
471+
),
472+
data={"Corefile": corefile_data}
473+
)
474+
475+
create_or_update_configmap(name=CUSTOM_CORE_DNS, namespace=namespace, kube_client=kube_client, configmap=config_map)
476+
logger.info("Custom CoreDNS ConfigMap created successfully")
477+
478+
# 4. Create Deployment
479+
deployment = client.V1Deployment(
480+
metadata=client.V1ObjectMeta(
481+
name=CUSTOM_CORE_DNS,
482+
namespace=namespace
483+
),
484+
spec=client.V1DeploymentSpec(
485+
replicas=1,
486+
selector=client.V1LabelSelector(
487+
match_labels={"app": CUSTOM_CORE_DNS}
488+
),
489+
template=client.V1PodTemplateSpec(
490+
metadata=client.V1ObjectMeta(
491+
labels={"app": CUSTOM_CORE_DNS}
492+
),
493+
spec=client.V1PodSpec(
494+
containers=[
495+
client.V1Container(
496+
name="coredns",
497+
image="coredns/coredns:latest",
498+
args=["-conf", "/etc/coredns/Corefile"],
499+
volume_mounts=[
500+
client.V1VolumeMount(
501+
name="config-volume",
502+
mount_path="/etc/coredns"
503+
)
504+
]
505+
)
506+
],
507+
volumes=[
508+
client.V1Volume(
509+
name="config-volume",
510+
config_map=client.V1ConfigMapVolumeSource(
511+
name=CUSTOM_CORE_DNS
512+
)
513+
)
514+
]
515+
)
516+
)
517+
)
518+
)
519+
create_or_update_deployment(name=CUSTOM_CORE_DNS, namespace=namespace, kube_client=kube_client, deployment=deployment)
520+
logger.info("Custom CoreDNS Deployment created successfully")
521+
522+
# 5 Create Service
523+
service = client.V1Service(
524+
metadata=client.V1ObjectMeta(
525+
name=CUSTOM_CORE_DNS,
526+
namespace=namespace
527+
),
528+
spec=client.V1ServiceSpec(
529+
selector={"app": CUSTOM_CORE_DNS},
530+
ports=[
531+
client.V1ServicePort(
532+
protocol="UDP",
533+
port=53,
534+
target_port=53
535+
)
536+
]
537+
)
538+
)
539+
core_v1_api.create_namespaced_service(namespace=namespace, body=service)
540+
logger.info("Custom CoreDNS Service created successfully")
541+
542+
except client.exceptions.ApiException as e:
543+
if e.status == 409:
544+
logger.warning("Custom CoreDNS resources already exist")
545+
else:
546+
raise CLIError(f"Failed to create custom CoreDNS resources: {str(e)}")
547+
except Exception as e:
548+
raise CLIError(f"An error occurred while creating custom CoreDNS resources: {str(e)}")
549+
550+
551+
def patch_openshift_dns_operator(kube_client, domain, original_folder=None):
552+
try:
553+
logger.info("Patching OpenShift DNS operator to add custom resolver")
554+
555+
# Fetch the existing DNS operator configuration
556+
custom_objects_api = client.CustomObjectsApi(kube_client)
557+
558+
dns_operator_config = get_and_save_openshift_dns_operator_config(kube_client, original_folder)
559+
560+
coredns_service = client.CoreV1Api(kube_client).read_namespaced_service(name=CUSTOM_CORE_DNS, namespace=OPENSHIFT_DNS)
561+
562+
# Add the custom resolver to the DNS operator configuration
563+
servers = dns_operator_config.get("spec", {}).get("servers", [])
564+
custom_resolver = {
565+
"name": CUSTOM_CORE_DNS,
566+
"zones": [domain, f"internal.{domain}"],
567+
"forwardPlugin": {
568+
"upstreams": [coredns_service.spec.cluster_ip],
569+
}
570+
}
571+
572+
# Check if the custom resolver already exists
573+
if not any(server.get("name") == CUSTOM_CORE_DNS for server in servers):
574+
servers.append(custom_resolver)
575+
dns_operator_config["spec"]["servers"] = servers
576+
577+
# Update the DNS operator configuration
578+
custom_objects_api.patch_cluster_custom_object(
579+
group="operator.openshift.io",
580+
version="v1",
581+
plural="dnses",
582+
name="default",
583+
body=dns_operator_config
584+
)
585+
logger.info("Successfully patched OpenShift DNS operator with custom resolver")
586+
else:
587+
logger.info("Custom resolver already exists in the DNS operator configuration")
588+
589+
except client.exceptions.ApiException as e:
590+
raise CLIError(f"Failed to patch DNS operator: {str(e)}")
591+
except Exception as e:
592+
raise CLIError(f"An error occurred while patching DNS operator: {str(e)}")
593+
594+
595+
def extract_domain_from_configmap(kube_client, resource_name=CUSTOM_CORE_DNS, namespace=KUBE_SYSTEM):
596+
import re
597+
598+
try:
599+
core_v1_api = client.CoreV1Api(kube_client)
600+
configmap = core_v1_api.read_namespaced_config_map(name=CUSTOM_CORE_DNS, namespace=KUBE_SYSTEM)
601+
if configmap is None:
602+
raise ResourceNotFoundError(f"ConfigMap '{resource_name}' not found in namespace '{namespace}'.")
603+
604+
corefile = configmap.data.get("k4apps-default.io.server")
605+
if not corefile:
606+
raise ValidationError("'k4apps-default.io.server' key found in the coredns-custom ConfigMap in kube-system namespace.")
607+
608+
# Extract the domain (excluding 'dapr')
609+
for line in corefile.splitlines():
610+
match = re.match(r'^\s*([a-zA-Z0-9\-\.]+):53\s*{', line)
611+
if match and match.group(1) != "dapr":
612+
return match.group(1)
613+
614+
raise ValidationError("No valid domain found in CoreDNS configmap data.")
615+
except Exception as e:
616+
logger.error(f"Failed to extract domain from configmap: {str(e)}")
617+
return None
618+
619+
620+
def get_and_save_openshift_dns_operator_config(kube_client, folder=None):
621+
try:
622+
custom_objects_api = client.CustomObjectsApi(kube_client)
623+
dns_operator_config = custom_objects_api.get_cluster_custom_object(
624+
group="operator.openshift.io",
625+
version="v1",
626+
plural="dnses",
627+
name="default"
628+
)
629+
630+
if folder is not None:
631+
filepath = os.path.join(folder, "openshift-dns-operator-config.json")
632+
with open(filepath, "w") as f:
633+
f.write(json.dumps(dns_operator_config, indent=2))
634+
logger.info(f"OpenShift DNS operator configuration saved to {filepath}")
635+
636+
return dns_operator_config
637+
except Exception as e:
638+
raise ValidationError(f"Failed to retrieve OpenShift DNS operator configuration: {str(e)}")
639+
640+
641+
def restart_openshift_dns_daemonset(kube_client):
642+
try:
643+
# Get the DaemonSet
644+
apps_v1_api = client.AppsV1Api(kube_client)
645+
daemonset_name = "dns-default"
646+
647+
try:
648+
apps_v1_api.read_namespaced_daemon_set(
649+
name=daemonset_name,
650+
namespace=OPENSHIFT_DNS
651+
)
652+
except client.exceptions.ApiException as e:
653+
if e.status == 404:
654+
logger.warning(f"DaemonSet '{daemonset_name}' not found in namespace '{OPENSHIFT_DNS}'")
655+
return
656+
else:
657+
raise
658+
659+
logger.info(f"Restarting DaemonSet '{daemonset_name}' in namespace '{OPENSHIFT_DNS}'...")
660+
661+
try:
662+
response = input(f"The DNS DaemonSet in namespace '{OPENSHIFT_DNS}' needs to be restarted. Are you sure you want to proceed? (y/n): ")
663+
confirmed = response.lower() in ['y', 'yes']
664+
except (EOFError, KeyboardInterrupt):
665+
confirmed = False
666+
667+
if not confirmed:
668+
logger.info(f"The restart of daemonset was cancelled by the user. Please manually restart the daemonset by running 'kubectl rollout restart daemonset {daemonset_name} -n {OPENSHIFT_DNS}'")
669+
return
670+
671+
# Rollout restart - this is exactly what kubectl rollout restart does
672+
import datetime
673+
674+
restart_time = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
675+
676+
# The kubectl rollout restart command internally does this exact patch
677+
patch_body = {
678+
"spec": {
679+
"template": {
680+
"metadata": {
681+
"annotations": {
682+
"kubectl.kubernetes.io/restartedAt": restart_time
683+
}
684+
}
685+
}
686+
}
687+
}
688+
689+
# Patch the daemon set to trigger a restart
690+
apps_v1_api.patch_namespaced_daemon_set(
691+
name=daemonset_name,
692+
namespace=OPENSHIFT_DNS,
693+
body=patch_body
694+
)
695+
696+
logger.info(f"Successfully initiated restart of DaemonSet '{daemonset_name}'. Pods will be recreated automatically.")
697+
698+
except client.exceptions.ApiException as e:
699+
if e.status == 404:
700+
logger.warning(f"DaemonSet '{daemonset_name}' not found in namespace '{OPENSHIFT_DNS}'")
701+
else:
702+
raise CLIError(f"Failed to restart DaemonSet: {str(e)}")
703+
except Exception as e:
704+
raise CLIError(f"An error occurred while restarting DaemonSet: {str(e)}")

src/containerapp/azext_containerapp/_constants.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -142,11 +142,13 @@
142142
SUPPORTED_RUNTIME_LIST = [RUNTIME_GENERIC, RUNTIME_JAVA]
143143

144144
AKS_AZURE_LOCAL_DISTRO = "AksAzureLocal"
145-
SETUP_CORE_DNS_SUPPORTED_DISTRO = [AKS_AZURE_LOCAL_DISTRO]
145+
OPENSHIFT_DISTRO = "openshift"
146+
SETUP_CORE_DNS_SUPPORTED_DISTRO = [AKS_AZURE_LOCAL_DISTRO, OPENSHIFT_DISTRO]
146147
CUSTOM_CORE_DNS_VOLUME_NAME = 'custom-config-volume'
147148
CUSTOM_CORE_DNS_VOLUME_MOUNT_PATH = '/etc/coredns/custom'
148149
CUSTOM_CORE_DNS = 'coredns-custom'
149150
CORE_DNS = 'coredns'
151+
OPENSHIFT_DNS = 'openshift-dns'
150152
KUBE_SYSTEM = 'kube-system'
151153
EMPTY_CUSTOM_CORE_DNS = """
152154
apiVersion: v1

0 commit comments

Comments
 (0)