diff --git a/installing/installing_gcp/installing-gcp-user-infra-vpc.adoc b/installing/installing_gcp/installing-gcp-user-infra-vpc.adoc index adc6acfb9660..d3ec72e070dd 100644 --- a/installing/installing_gcp/installing-gcp-user-infra-vpc.adoc +++ b/installing/installing_gcp/installing-gcp-user-infra-vpc.adoc @@ -81,10 +81,6 @@ include::modules/installation-gcp-user-infra-config-host-project-vpc.adoc[levelo include::modules/installation-gcp-dns.adoc[leveloffset=+2] -include::modules/installation-creating-gcp-vpc.adoc[leveloffset=+2] - -include::modules/installation-deployment-manager-vpc.adoc[leveloffset=+3] - include::modules/installation-user-infra-generate.adoc[leveloffset=+1] include::modules/installation-initializing-manual.adoc[leveloffset=+2] @@ -125,38 +121,36 @@ include::modules/installation-network-user-infra.adoc[leveloffset=+1] include::modules/installation-creating-gcp-lb.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-ext-lb.adoc[leveloffset=+2] +include::modules/installation-infrastructure-manager-ext-lb.adoc[leveloffset=+2] -include::modules/installation-deployment-manager-int-lb.adoc[leveloffset=+2] +include::modules/installation-infrastructure-manager-int-lb.adoc[leveloffset=+2] include::modules/installation-creating-gcp-private-dns.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-private-dns.adoc[leveloffset=+2] +include::modules/installation-infrastructure-manager-private-dns.adoc[leveloffset=+2] include::modules/installation-creating-gcp-firewall-rules-vpc.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-firewall-rules.adoc[leveloffset=+2] +include::modules/installation-infrastructure-manager-firewall-rules.adoc[leveloffset=+2] include::modules/installation-creating-gcp-iam-shared-vpc.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-iam-shared-vpc.adoc[leveloffset=+2] - include::modules/installation-gcp-user-infra-rhcos.adoc[leveloffset=+1] include::modules/installation-creating-gcp-bootstrap.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-bootstrap.adoc[leveloffset=+2] +include::modules/installation-infrastructure-manager-bootstrap.adoc[leveloffset=+2] include::modules/installation-creating-gcp-control-plane.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-control-plane.adoc[leveloffset=+2] - -// Removing bootstrap resources in GCP -include::modules/installation-gcp-user-infra-wait-for-bootstrap.adoc[leveloffset=+1] +include::modules/installation-infrastructure-manager-control-plane.adoc[leveloffset=+2] include::modules/installation-creating-gcp-worker.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-worker.adoc[leveloffset=+2] +include::modules/installation-infrastructure-manager-worker.adoc[leveloffset=+2] + +// Removing bootstrap resources in GCP +include::modules/installation-gcp-user-infra-wait-for-bootstrap.adoc[leveloffset=+1] // Installing the OpenShift CLI on Linux include::modules/cli-installing-cli-linux.adoc[leveloffset=+1] diff --git a/installing/installing_gcp/installing-gcp-user-infra.adoc b/installing/installing_gcp/installing-gcp-user-infra.adoc index 4ec840420018..efe7e47a19f9 100644 --- a/installing/installing_gcp/installing-gcp-user-infra.adoc +++ b/installing/installing_gcp/installing-gcp-user-infra.adoc @@ -120,44 +120,42 @@ include::modules/installation-user-infra-exporting-common-variables.adoc[levelof include::modules/installation-creating-gcp-vpc.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-vpc.adoc[leveloffset=+2] +include::modules/installation-infrastructure-manager-vpc.adoc[leveloffset=+2] include::modules/installation-network-user-infra.adoc[leveloffset=+1] include::modules/installation-creating-gcp-lb.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-ext-lb.adoc[leveloffset=+2] +include::modules/installation-infrastructure-manager-ext-lb.adoc[leveloffset=+2] -include::modules/installation-deployment-manager-int-lb.adoc[leveloffset=+2] +include::modules/installation-infrastructure-manager-int-lb.adoc[leveloffset=+2] include::modules/installation-creating-gcp-private-dns.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-private-dns.adoc[leveloffset=+2] +include::modules/installation-infrastructure-manager-private-dns.adoc[leveloffset=+2] include::modules/installation-creating-gcp-firewall-rules-vpc.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-firewall-rules.adoc[leveloffset=+2] +include::modules/installation-infrastructure-manager-firewall-rules.adoc[leveloffset=+2] include::modules/installation-creating-gcp-iam-shared-vpc.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-iam-shared-vpc.adoc[leveloffset=+2] - include::modules/installation-gcp-user-infra-rhcos.adoc[leveloffset=+1] include::modules/installation-creating-gcp-bootstrap.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-bootstrap.adoc[leveloffset=+2] +include::modules/installation-infrastructure-manager-bootstrap.adoc[leveloffset=+2] include::modules/installation-creating-gcp-control-plane.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-control-plane.adoc[leveloffset=+2] - -// Removing bootstrap resources in GCP -include::modules/installation-gcp-user-infra-wait-for-bootstrap.adoc[leveloffset=+1] +include::modules/installation-infrastructure-manager-control-plane.adoc[leveloffset=+2] include::modules/installation-creating-gcp-worker.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-worker.adoc[leveloffset=+2] +include::modules/installation-infrastructure-manager-worker.adoc[leveloffset=+2] + +// Removing bootstrap resources in GCP +include::modules/installation-gcp-user-infra-wait-for-bootstrap.adoc[leveloffset=+1] // Installing the OpenShift CLI on Linux include::modules/cli-installing-cli-linux.adoc[leveloffset=+1] diff --git a/installing/installing_gcp/installing-restricted-networks-gcp.adoc b/installing/installing_gcp/installing-restricted-networks-gcp.adoc index 97b667964d54..19a4bf55740b 100644 --- a/installing/installing_gcp/installing-restricted-networks-gcp.adoc +++ b/installing/installing_gcp/installing-restricted-networks-gcp.adoc @@ -119,43 +119,41 @@ include::modules/installation-user-infra-exporting-common-variables.adoc[levelof include::modules/installation-creating-gcp-vpc.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-vpc.adoc[leveloffset=+2] +include::modules/installation-infrastructure-manager-vpc.adoc[leveloffset=+2] include::modules/installation-network-user-infra.adoc[leveloffset=+1] include::modules/installation-creating-gcp-lb.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-ext-lb.adoc[leveloffset=+2] +include::modules/installation-infrastructure-manager-ext-lb.adoc[leveloffset=+2] -include::modules/installation-deployment-manager-int-lb.adoc[leveloffset=+2] +include::modules/installation-infrastructure-manager-int-lb.adoc[leveloffset=+2] include::modules/installation-creating-gcp-private-dns.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-private-dns.adoc[leveloffset=+2] +include::modules/installation-infrastructure-manager-private-dns.adoc[leveloffset=+2] include::modules/installation-creating-gcp-firewall-rules-vpc.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-firewall-rules.adoc[leveloffset=+2] +include::modules/installation-infrastructure-manager-firewall-rules.adoc[leveloffset=+2] include::modules/installation-creating-gcp-iam-shared-vpc.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-iam-shared-vpc.adoc[leveloffset=+2] - include::modules/installation-gcp-user-infra-rhcos.adoc[leveloffset=+1] include::modules/installation-creating-gcp-bootstrap.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-bootstrap.adoc[leveloffset=+2] +include::modules/installation-infrastructure-manager-bootstrap.adoc[leveloffset=+2] include::modules/installation-creating-gcp-control-plane.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-control-plane.adoc[leveloffset=+2] - -include::modules/installation-gcp-user-infra-wait-for-bootstrap.adoc[leveloffset=+1] +include::modules/installation-infrastructure-manager-control-plane.adoc[leveloffset=+2] include::modules/installation-creating-gcp-worker.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-worker.adoc[leveloffset=+2] +include::modules/installation-infrastructure-manager-worker.adoc[leveloffset=+2] + +include::modules/installation-gcp-user-infra-wait-for-bootstrap.adoc[leveloffset=+1] //You install the CLI on the mirror host. diff --git a/modules/installation-approve-csrs.adoc b/modules/installation-approve-csrs.adoc index a80f2a2b571b..abe8b48b6597 100644 --- a/modules/installation-approve-csrs.adoc +++ b/modules/installation-approve-csrs.adoc @@ -145,6 +145,7 @@ $ oc get csr -o go-template='{{range .items}}{{if not .status}}{{.metadata.name} [NOTE] ==== Some Operators might not become available until some CSRs are approved. +Each node submits two CSRs, so you may need to run the command to approve CSRs multiple times. ==== . Now that your client requests are approved, you must review the server requests for each machine that you added to the cluster: diff --git a/modules/installation-creating-gcp-bootstrap.adoc b/modules/installation-creating-gcp-bootstrap.adoc index 1eb044d5b07b..b1a27dc4e293 100644 --- a/modules/installation-creating-gcp-bootstrap.adoc +++ b/modules/installation-creating-gcp-bootstrap.adoc @@ -4,24 +4,18 @@ // * installing/installing_gcp/installing-gcp-user-infra-vpc.adoc // * installing/installing_gcp/installing-restricted-networks-gcp.adoc -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:shared-vpc: -endif::[] - :_mod-docs-content-type: PROCEDURE [id="installation-creating-gcp-bootstrap_{context}"] = Creating the bootstrap machine in {gcp-short} -You must create the bootstrap machine in {gcp-first} to use during -{product-title} cluster initialization. One way to create this machine is -to modify the provided Deployment Manager template. +[role="_abstract"] +You must create the bootstrap machine in {gcp-first} to use during {product-title} cluster initialization. One way to create this machine is to modify the provided Infrastructure Manager template. [NOTE] ==== -If you do not use the provided Deployment Manager template to create your bootstrap -machine, you must review the provided information and manually create -the infrastructure. If your cluster does not initialize correctly, you might -have to contact Red Hat support with your installation logs. +If you do not use the provided Deployment Manager template to create your bootstrap machine, you must review the provided information and manually create the infrastructure. If your cluster does not initialize correctly, you might have to contact Red Hat support with your installation logs. + +If you need to redeploy the bootstrap machine for any reason, delete the existing bootstrap machine first. If you do not delete the existing bootstrap machine, the new bootstrap machine will not reload the Ignition file. ==== .Prerequisites @@ -31,84 +25,71 @@ have to contact Red Hat support with your installation logs. .Procedure -. Copy the template from the *Deployment Manager template for the bootstrap machine* -section of this topic and save it as `04_bootstrap.py` on your computer. This -template describes the bootstrap machine that your cluster requires. +. Copy the template from the *Infrastructure Manager template for the bootstrap machine* section of this topic and save it as `04_bootstrap.tf` in a folder called `04_bootstrap` on your computer. This template describes the bootstrap machine that your cluster requires. -. Export the location of the {op-system-first} image that the installation program requires: +** You can edit the `04_bootstrap.tf` file to add additional tags to the bootstrap machine, by modifying the existing `tags` stanza as follows: + -[source,terminal] +[source,bash] ---- -$ export CLUSTER_IMAGE=(`gcloud compute images describe ${INFRA_ID}-rhcos-image --format json | jq -r .selfLink`) +resource "google_compute_instance" "bootstrap" { +# ... + tags = [ + "${var.infra_id}-master", + "${var.infra_id}-bootstrap", + "custom-tag-example" + ] +# ... +} ---- -. Create a bucket and upload the `bootstrap.ign` file: +. Export the location of the {op-system-first} image that the installation program requires by running the following command: + [source,terminal] ---- -$ gsutil mb gs://${INFRA_ID}-bootstrap-ignition +$ export CLUSTER_IMAGE=(`gcloud compute images describe ${INFRA_ID}-rhcos-image --format json | jq -r .selfLink`) ---- + +. Create a bucket by running the following command: + [source,terminal] ---- -$ gsutil cp /bootstrap.ign gs://${INFRA_ID}-bootstrap-ignition/ +$ gcloud storage buckets create "gs://${INFRA_ID}-bootstrap-ignition" ---- -. Create a signed URL for the bootstrap instance to use to access the Ignition -config. Export the URL from the output as a variable: +. Upload the `bootstrap.ign` file by running the following command: + [source,terminal] ---- -$ export BOOTSTRAP_IGN=`gsutil signurl -d 1h service-account-key.json gs://${INFRA_ID}-bootstrap-ignition/bootstrap.ign | grep "^gs:" | awk '{print $5}'` +$ gcloud storage cp bootstrap.ign "gs://${INFRA_ID}-bootstrap-ignition/" ---- -. Create a `04_bootstrap.yaml` resource definition file: +. Create a signed URL for the bootstrap instance and export the URL from the output as a variable by running the following command: + [source,terminal] ---- -$ cat <04_bootstrap.yaml -imports: -- path: 04_bootstrap.py - -resources: -- name: cluster-bootstrap - type: 04_bootstrap.py - properties: - infra_id: '${INFRA_ID}' <1> - region: '${REGION}' <2> - zone: '${ZONE_0}' <3> - - cluster_network: '${CLUSTER_NETWORK}' <4> - control_subnet: '${CONTROL_SUBNET}' <5> - image: '${CLUSTER_IMAGE}' <6> - machine_type: 'n1-standard-4' <7> - root_volume_size: '128' <8> - - bootstrap_ign: '${BOOTSTRAP_IGN}' <9> -EOF +$ export BOOTSTRAP_IGN="$(gcloud storage sign-url --duration=2h --private-key-file=service-account-key.json "gs://${INFRA_ID}-bootstrap-ignition/bootstrap.ign" | grep "^signed_url:" | awk '{print $2}')" ---- -<1> `infra_id` is the `INFRA_ID` infrastructure name from the extraction step. -<2> `region` is the region to deploy the cluster into, for example `us-central1`. -<3> `zone` is the zone to deploy the bootstrap instance into, for example `us-central1-b`. -<4> `cluster_network` is the `selfLink` URL to the cluster network. -<5> `control_subnet` is the `selfLink` URL to the control subnet. -<6> `image` is the `selfLink` URL to the {op-system} image. -<7> `machine_type` is the machine type of the instance, for example `n1-standard-4`. -<8> `root_volume_size` is the boot disk size for the bootstrap machine. -<9> `bootstrap_ign` is the URL output when creating a signed URL. - -. Create the deployment by using the `gcloud` CLI: + +. Create the bootstrap deployment by running the following command: + [source,terminal] ---- -$ gcloud deployment-manager deployments create ${INFRA_ID}-bootstrap --config 04_bootstrap.yaml +$ gcloud infra-manager deployments apply \ + --location=${REGION} \ + --project=${PROJECT_NAME} \ + --local-source=./04_bootstrap \ + --input-values=infra_id=${INFRA_ID},project=${PROJECT_NAME},region=${REGION},zone=${ZONE_0},cluster_network=${CLUSTER_NETWORK},subnet=${CONTROL_SUBNET},image=${CLUSTER_IMAGE},bootstrap_ign="${BOOTSTRAP_IGN}",is_public_cluster= \ + --service-account=${INSTALL_SERVICE_ACCOUNT} ---- ++ +where: + +``:: Specifies the name of the bootstrap deployment. +``:: Specifies whether the cluster is public or private. If it is a public cluster, specify `true`. If it is a private cluster, specify `false`. -ifndef::shared-vpc[] -. The templates do not manage load balancer membership due to limitations of Deployment -Manager, so you must add the bootstrap machine manually. +. The templates do not manage load balancer membership due to limitations of Infrastructure Manager, so you must add the bootstrap machine manually. -.. Add the bootstrap instance to the internal load balancer instance group: +.. Add the bootstrap instance to the internal load balancer instance group by running the following command: + [source,terminal] ---- @@ -116,31 +97,10 @@ $ gcloud compute instance-groups unmanaged add-instances \ ${INFRA_ID}-bootstrap-ig --zone=${ZONE_0} --instances=${INFRA_ID}-bootstrap ---- -.. Add the bootstrap instance group to the internal load balancer backend service: +.. Add the bootstrap instance group to the internal load balancer backend service by running the following command: + [source,terminal] ---- $ gcloud compute backend-services add-backend \ ${INFRA_ID}-api-internal --region=${REGION} --instance-group=${INFRA_ID}-bootstrap-ig --instance-group-zone=${ZONE_0} ----- -endif::shared-vpc[] - -ifdef::shared-vpc[] -. Add the bootstrap instance to the internal load balancer instance group: -+ -[source,terminal] ----- -$ gcloud compute instance-groups unmanaged add-instances ${INFRA_ID}-bootstrap-ig --zone=${ZONE_0} --instances=${INFRA_ID}-bootstrap ----- - -. Add the bootstrap instance group to the internal load balancer backend service: -+ -[source,terminal] ----- -$ gcloud compute backend-services add-backend ${INFRA_ID}-api-internal --region=${REGION} --instance-group=${INFRA_ID}-bootstrap-ig --instance-group-zone=${ZONE_0} ----- -endif::shared-vpc[] - -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:!shared-vpc: -endif::[] +---- \ No newline at end of file diff --git a/modules/installation-creating-gcp-control-plane.adoc b/modules/installation-creating-gcp-control-plane.adoc index d73655f502a6..65624ce8e066 100644 --- a/modules/installation-creating-gcp-control-plane.adoc +++ b/modules/installation-creating-gcp-control-plane.adoc @@ -4,122 +4,112 @@ // * installing/installing_gcp/installing-restricted-networks-gcp.adoc // * installing/installing_gcp/installing-gcp-user-infra-vpc.adoc -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:shared-vpc: -endif::[] - :_mod-docs-content-type: PROCEDURE [id="installation-creating-gcp-control-plane_{context}"] = Creating the control plane machines in {gcp-short} -You must create the control plane machines in {gcp-first} for -your cluster to use. One way to create these machines is to modify the -provided Deployment Manager template. +[role="_abstract"] +You must create the control plane machines in {gcp-first} for your cluster to use. One way to create these machines is to modify the provided Infrastructure Manager template. [NOTE] ==== -If you do not use the provided Deployment Manager template to create your -control plane machines, you must review the provided information and manually -create the infrastructure. If your cluster does not initialize correctly, you -might have to contact Red Hat support with your installation logs. +If you do not use the provided template to create your control plane machines, you must review the provided information and manually create the infrastructure. If your cluster does not initialize correctly, you might have to contact Red Hat support with your installation logs. ==== .Prerequisites -* Ensure you defined the variables in the _Exporting common variables_, _Creating load balancers in {gcp-short}_, _Creating IAM roles in {gcp-short}_, and _Creating the bootstrap machine in {gcp-short}_ sections. -* Create the bootstrap machine. +* You defined the variables in the _Exporting common variables_, _Creating load balancers in {gcp-short}_, _Creating IAM roles in {gcp-short}_, and _Creating the bootstrap machine in {gcp-short}_ sections. +* You created the bootstrap machine. +* You created the Ignition configuration files. .Procedure -. Copy the template from the *Deployment Manager template for control plane machines* -section of this topic and save it as `05_control_plane.py` on your computer. -This template describes the control plane machines that your cluster requires. +. Copy the template from the *Infrastructure Manager template for control plane machines* section of this topic and save it as `05_control_plane.tf` in a folder called `05_control_plane` on your computer. This template describes the control plane machines that your cluster requires. + +** You can edit the `05_control_plane.tf` file to add additional tags to the control plane machines, by modifying the existing `tags` stanza. The following example adds a custom tag to the first control plane machine, which is named `master_0`: ++ +[source,bash] +---- +resource "google_compute_instance" "master_0" { +# ... + tags = [ + "${var.infra_id}-master", + "custom_tag_example" + ] +# ... +} +---- -. Export the following variable required by the resource definition: +. Copy the `master.ign` file from your installation directory into the `05_control_plane` folder by running the following command: + [source,terminal] ---- -$ export MASTER_IGNITION=`cat /master.ign` +$ cp /master.ign 05_control_plane/master.ign ---- +`` specifies the directory where you created the Ignition configuration files. -. Create a `05_control_plane.yaml` resource definition file: +. Create the control plane deployment by running the following command: + [source,terminal] ---- -$ cat <05_control_plane.yaml -imports: -- path: 05_control_plane.py - -resources: -- name: cluster-control-plane - type: 05_control_plane.py - properties: - infra_id: '${INFRA_ID}' <1> - zones: <2> - - '${ZONE_0}' - - '${ZONE_1}' - - '${ZONE_2}' - - control_subnet: '${CONTROL_SUBNET}' <3> - image: '${CLUSTER_IMAGE}' <4> - machine_type: 'n1-standard-4' <5> - root_volume_size: '128' - service_account_email: '${MASTER_SERVICE_ACCOUNT}' <6> - - ignition: '${MASTER_IGNITION}' <7> -EOF ----- -<1> `infra_id` is the `INFRA_ID` infrastructure name from the extraction step. -<2> `zones` are the zones to deploy the control plane instances into, for example `us-central1-a`, `us-central1-b`, and `us-central1-c`. -<3> `control_subnet` is the `selfLink` URL to the control subnet. -<4> `image` is the `selfLink` URL to the {op-system} image. -<5> `machine_type` is the machine type of the instance, for example `n1-standard-4`. -<6> `service_account_email` is the email address for the master service account that you created. -<7> `ignition` is the contents of the `master.ign` file. - -. Create the deployment by using the `gcloud` CLI: +$ gcloud infra-manager deployments apply \ + --location=${REGION} \ + --project=${PROJECT_NAME} \ + --local-source=./05_control_plane \ + --input-values=infra_id=${INFRA_ID},project=${PROJECT_NAME},region=${REGION},zone_0=${ZONE_0},zone_1=${ZONE_1},zone_2=${ZONE_2},subnet=${CONTROL_SUBNET},image=${CLUSTER_IMAGE},service_account_email=${MASTER_SERVICE_ACCOUNT} \ + --service-account=${INSTALL_SERVICE_ACCOUNT} +---- +`` specifies the name of the control plane deployment. + +. Delete the temporary ignition file from the `05_control_plane` folder by running the following command: + [source,terminal] ---- -$ gcloud deployment-manager deployments create ${INFRA_ID}-control-plane --config 05_control_plane.yaml +$ rm 05_control_plane/master.ign ---- -. The templates do not manage load balancer membership due to limitations of Deployment -Manager, so you must add the control plane machines manually. -** Run the following commands to add the control plane machines to the appropriate instance groups: +. The templates do not manage load balancer membership due to limitations of Infrastructure Manager, so you must add the control plane machines manually. + +.. Add the first control plane machine to an internal load balancer instance group by running the following command: + [source,terminal] ---- $ gcloud compute instance-groups unmanaged add-instances ${INFRA_ID}-master-${ZONE_0}-ig --zone=${ZONE_0} --instances=${INFRA_ID}-master-0 ---- + +.. Add the second control plane machine to an internal load balancer instance group by running the following command: + [source,terminal] ---- $ gcloud compute instance-groups unmanaged add-instances ${INFRA_ID}-master-${ZONE_1}-ig --zone=${ZONE_1} --instances=${INFRA_ID}-master-1 ---- + +.. Add the third control plane machine to an internal load balancer instance group by running the following command: + [source,terminal] ---- $ gcloud compute instance-groups unmanaged add-instances ${INFRA_ID}-master-${ZONE_2}-ig --zone=${ZONE_2} --instances=${INFRA_ID}-master-2 ---- -** For an external cluster, you must also run the following commands to add the control plane machines to the target pools: +. For an external cluster, you must also add the control plane machines to external load balancer target pools. + +.. Add the first control plane machine to an external load balancer pool by running the following command: + [source,terminal] ---- $ gcloud compute target-pools add-instances ${INFRA_ID}-api-target-pool --instances-zone="${ZONE_0}" --instances=${INFRA_ID}-master-0 ---- + +.. Add the first control plane machine to an external load balancer pool by running the following command: + [source,terminal] ---- $ gcloud compute target-pools add-instances ${INFRA_ID}-api-target-pool --instances-zone="${ZONE_1}" --instances=${INFRA_ID}-master-1 ---- + +.. Add the first control plane machine to an external load balancer pool by running the following command: + [source,terminal] ---- $ gcloud compute target-pools add-instances ${INFRA_ID}-api-target-pool --instances-zone="${ZONE_2}" --instances=${INFRA_ID}-master-2 ----- - -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:!shared-vpc: -endif::[] +---- \ No newline at end of file diff --git a/modules/installation-creating-gcp-firewall-rules-vpc.adoc b/modules/installation-creating-gcp-firewall-rules-vpc.adoc index 172ae376c466..6f111d56b4aa 100644 --- a/modules/installation-creating-gcp-firewall-rules-vpc.adoc +++ b/modules/installation-creating-gcp-firewall-rules-vpc.adoc @@ -9,18 +9,15 @@ endif::[] :_mod-docs-content-type: PROCEDURE [id="installation-creating-gcp-firewall-rules-vpc_{context}"] -= Creating firewall rules in {gcp-short} += Creating firewall rules and IAM roles in {gcp-short} -You must create firewall rules in {gcp-first} for your -{product-title} cluster to use. One way to create these components is -to modify the provided Deployment Manager template. +[role="_abstract"] +You must create firewall rules and IAM roles in {gcp-first} for your {product-title} cluster to use. One way to create these components is to modify the provided Infrastructure Manager template. +If you are installing a cluster into a shared VPC and the host project already has the necessary firewall rules and IAM roles, you can skip creating these resources. [NOTE] ==== -If you do not use the provided Deployment Manager template to create your {gcp-short} -infrastructure, you must review the provided information and manually create -the infrastructure. If your cluster does not initialize correctly, you might -have to contact Red Hat support with your installation logs. +If you do not use the provided Infrastructure Manager template to create your {gcp-short} infrastructure, you must review the provided information and manually create the infrastructure. If your cluster does not initialize correctly, you might have to contact Red Hat support with your installation logs. ==== .Prerequisites @@ -29,49 +26,29 @@ have to contact Red Hat support with your installation logs. .Procedure -. Copy the template from the -*Deployment Manager template for firewall rules* -section of this topic and save it as `03_firewall.py` on your computer. This -template describes the security groups that your cluster requires. +. Copy the template from the *Infrastructure Manager template for firewall rules and IAM roles* section of this topic and save it as `03_security.tf` in a folder called `03_security` on your computer. This template describes the security groups that your cluster requires. -. Create a `03_firewall.yaml` resource definition file: +. Create the firewall rules and IAM roles by running the following command: + [source,terminal] ---- -$ cat <03_firewall.yaml -imports: -- path: 03_firewall.py - -resources: -- name: cluster-firewall - type: 03_firewall.py - properties: - allowed_external_cidr: '0.0.0.0/0' <1> - infra_id: '${INFRA_ID}' <2> - cluster_network: '${CLUSTER_NETWORK}' <3> - network_cidr: '${NETWORK_CIDR}' <4> -EOF +$ gcloud infra-manager deployments apply \ + --location=${REGION} \ + --project=${PROJECT_NAME} \ + --local-source=./03_security \ + --input-values=infra_id=${INFRA_ID},project=${PROJECT_NAME},region=${REGION},cluster_network=${CLUSTER_NETWORK},network_cidr=${NETWORK_CIDR} \ + --service-account=${INSTALL_SERVICE_ACCOUNT} ---- -<1> `allowed_external_cidr` is the CIDR range that can access the cluster API and SSH to the bootstrap host. For an internal cluster, set this value to `${NETWORK_CIDR}`. -<2> `infra_id` is the `INFRA_ID` infrastructure name from the extraction step. -<3> `cluster_network` is the `selfLink` URL to the cluster network. -<4> `network_cidr` is the CIDR of the VPC network, for example `10.0.0.0/16`. +`` specifies the name of the deployment of firewall rules and IAM roles. -. Create the deployment by using the `gcloud` CLI: +. Configure service account variables based on the roles you created by running the following commands: + -ifdef::shared-vpc[] [source,terminal] ---- -$ gcloud deployment-manager deployments create ${INFRA_ID}-firewall --config 03_firewall.yaml --project ${HOST_PROJECT} --account ${HOST_PROJECT_ACCOUNT} +$ export MASTER_SERVICE_ACCOUNT=$(gcloud iam service-accounts list --filter "email~^${INFRA_ID}-m@${PROJECT_NAME}." --format json | jq -r '.[0].email') ---- -endif::shared-vpc[] -ifndef::shared-vpc[] ++ [source,terminal] ---- -$ gcloud deployment-manager deployments create ${INFRA_ID}-firewall --config 03_firewall.yaml ----- -endif::shared-vpc[] - -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:!shared-vpc: -endif::[] +$ export WORKER_SERVICE_ACCOUNT=$(gcloud iam service-accounts list --filter "email~^${INFRA_ID}-w@${PROJECT_NAME}." --format json | jq -r '.[0].email') +---- \ No newline at end of file diff --git a/modules/installation-creating-gcp-iam-shared-vpc.adoc b/modules/installation-creating-gcp-iam-shared-vpc.adoc index 857e68b3af6f..b344fff5d017 100644 --- a/modules/installation-creating-gcp-iam-shared-vpc.adoc +++ b/modules/installation-creating-gcp-iam-shared-vpc.adoc @@ -9,11 +9,10 @@ endif::[] :_mod-docs-content-type: PROCEDURE [id="installation-creating-gcp-iam-shared-vpc_{context}"] -= Creating IAM roles in {gcp-short} += Creating IAM policy bindings in {gcp-short} -You must create IAM roles in {gcp-first} for your -{product-title} cluster to use. One way to create these components is -to modify the provided Deployment Manager template. +[role="_abstract"] +You must create IAM policy bindings in {gcp-first} for your {product-title} cluster to use. [NOTE] ==== @@ -26,47 +25,8 @@ If you do not use the provided Deployment Manager template to create your {gcp-s .Procedure -. Copy the template from the *Deployment Manager template for IAM roles* section of this topic and save it as `03_iam.py` on your computer. This template describes the IAM roles that your cluster requires. - -. Create a `03_iam.yaml` resource definition file: -+ -[source,terminal] ----- -$ cat <03_iam.yaml -imports: -- path: 03_iam.py -resources: -- name: cluster-iam - type: 03_iam.py - properties: - infra_id: '${INFRA_ID}' <1> -EOF ----- -<1> `infra_id` is the `INFRA_ID` infrastructure name from the extraction step. - -. Create the deployment by using the `gcloud` CLI: -+ -[source,terminal] ----- -$ gcloud deployment-manager deployments create ${INFRA_ID}-iam --config 03_iam.yaml ----- - -. Export the variable for the master service account: -+ -[source,terminal] ----- -$ export MASTER_SERVICE_ACCOUNT=(`gcloud iam service-accounts list --filter "email~^${INFRA_ID}-m@${PROJECT_NAME}." --format json | jq -r '.[0].email'`) ----- - -. Export the variable for the worker service account: -+ -[source,terminal] ----- -$ export WORKER_SERVICE_ACCOUNT=(`gcloud iam service-accounts list --filter "email~^${INFRA_ID}-w@${PROJECT_NAME}." --format json | jq -r '.[0].email'`) ----- - ifndef::shared-vpc[] -. Export the variable for the subnet that hosts the compute machines: +. Export the variable for the subnet that hosts the compute machines by running the following command: + [source,terminal] ---- @@ -77,35 +37,35 @@ endif::shared-vpc[] ifdef::shared-vpc[] . Assign the permissions that the installation program requires to the service accounts for the subnets that host the control plane and compute subnets: + -.. Grant the `networkViewer` role of the project that hosts your shared VPC to the master service account: +.. Grant the `networkViewer` role of the project that hosts your shared VPC to the master service account by running the following command: + [source,terminal] ---- $ gcloud --account=${HOST_PROJECT_ACCOUNT} --project=${HOST_PROJECT} projects add-iam-policy-binding ${HOST_PROJECT} --member "serviceAccount:${MASTER_SERVICE_ACCOUNT}" --role "roles/compute.networkViewer" ---- + -.. Grant the `networkUser` role to the master service account for the control plane subnet: +.. Grant the `networkUser` role to the master service account for the control plane subnet by running the following command: + [source,terminal] ---- $ gcloud --account=${HOST_PROJECT_ACCOUNT} --project=${HOST_PROJECT} compute networks subnets add-iam-policy-binding "${HOST_PROJECT_CONTROL_SUBNET}" --member "serviceAccount:${MASTER_SERVICE_ACCOUNT}" --role "roles/compute.networkUser" --region ${REGION} ---- + -.. Grant the `networkUser` role to the worker service account for the control plane subnet: +.. Grant the `networkUser` role to the worker service account for the control plane subnet by running the following command: + [source,terminal] ---- $ gcloud --account=${HOST_PROJECT_ACCOUNT} --project=${HOST_PROJECT} compute networks subnets add-iam-policy-binding "${HOST_PROJECT_CONTROL_SUBNET}" --member "serviceAccount:${WORKER_SERVICE_ACCOUNT}" --role "roles/compute.networkUser" --region ${REGION} ---- + -.. Grant the `networkUser` role to the master service account for the compute subnet: +.. Grant the `networkUser` role to the master service account for the compute subnet by running the following command: + [source,terminal] ---- $ gcloud --account=${HOST_PROJECT_ACCOUNT} --project=${HOST_PROJECT} compute networks subnets add-iam-policy-binding "${HOST_PROJECT_COMPUTE_SUBNET}" --member "serviceAccount:${MASTER_SERVICE_ACCOUNT}" --role "roles/compute.networkUser" --region ${REGION} ---- + -.. Grant the `networkUser` role to the worker service account for the compute subnet: +.. Grant the `networkUser` role to the worker service account for the compute subnet by running the following command: + [source,terminal] ---- @@ -113,8 +73,7 @@ $ gcloud --account=${HOST_PROJECT_ACCOUNT} --project=${HOST_PROJECT} compute net ---- endif::shared-vpc[] -. The templates do not create the policy bindings due to limitations of Deployment -Manager, so you must create them manually: +. The templates do not create the policy bindings due to limitations of Infrastructure Manager, so you must create them manually by running the following commands: + [source,terminal] ---- @@ -151,7 +110,7 @@ $ gcloud projects add-iam-policy-binding ${PROJECT_NAME} --member "serviceAccoun $ gcloud projects add-iam-policy-binding ${PROJECT_NAME} --member "serviceAccount:${WORKER_SERVICE_ACCOUNT}" --role "roles/storage.admin" ---- -. Create a service account key and store it locally for later use: +. Create a service account key and store it locally for later use by running the following command: + [source,terminal] ---- diff --git a/modules/installation-creating-gcp-lb.adoc b/modules/installation-creating-gcp-lb.adoc index 07c5a0fad166..4757e18a461a 100644 --- a/modules/installation-creating-gcp-lb.adoc +++ b/modules/installation-creating-gcp-lb.adoc @@ -11,137 +11,64 @@ endif::[] [id="installation-creating-gcp-lb_{context}"] = Creating load balancers in {gcp-short} -You must configure load balancers in {gcp-first} for your -{product-title} cluster to use. One way to create these components is -to modify the provided Deployment Manager template. +[role="_abstract"] +You must configure load balancers in {gcp-first} for your {product-title} cluster to use. One way to create these components is to modify the provided Infrastructure Manager template. [NOTE] ==== -If you do not use the provided Deployment Manager template to create your {gcp-short} -infrastructure, you must review the provided information and manually create -the infrastructure. If your cluster does not initialize correctly, you might -have to contact Red Hat support with your installation logs. +If you do not use the provided template to create your {gcp-short} infrastructure, you must review the provided information and manually create the infrastructure. +If your cluster does not initialize correctly, you might have to contact Red Hat support with your installation logs. ==== .Prerequisites * You have defined the variables in the _Exporting common variables_ section. +* You have defined the variables in the _Creating a VPC in {gcp-short}_ section. .Procedure -. Copy the template from the *Deployment Manager template for the internal load balancer* -section of this topic and save it as `02_lb_int.py` on your computer. This -template describes the internal load balancing objects that your cluster -requires. +. Copy the template from the *Infrastructure Manager template for the internal load balancer* section of this topic and save it as `02_lb_int.tf` in a directory called `02_lb_int` on your computer. This template describes the internal load balancing objects that your cluster requires. -. For an external cluster, also copy the template from the *Deployment Manager template for the external load balancer* -section of this topic and save it as `02_lb_ext.py` on your computer. This -template describes the external load balancing objects that your cluster -requires. - -. Export the variables that the deployment template uses: - -.. Export the cluster network location: +.. Create an internal load balancer by running the following command: + -ifdef::shared-vpc[] -[source,terminal] ----- -$ export CLUSTER_NETWORK=(`gcloud compute networks describe ${HOST_PROJECT_NETWORK} --project ${HOST_PROJECT} --account ${HOST_PROJECT_ACCOUNT} --format json | jq -r .selfLink`) ----- -endif::shared-vpc[] -ifndef::shared-vpc[] [source,terminal] ---- -$ export CLUSTER_NETWORK=(`gcloud compute networks describe ${INFRA_ID}-network --format json | jq -r .selfLink`) +$ gcloud infra-manager deployments apply \ + --location=${REGION} \ + --input-values=infra_id=${INFRA_ID},project=${PROJECT_NAME},region=${REGION},cluster_network=${CLUSTER_NETWORK},control_subnet=${CONTROL_SUBNET},zone_0=${ZONE_0},zone_1=${ZONE_1},zone_2=${ZONE_2} \ + --project=${PROJECT_NAME} \ + --local-source=./02_lb_int \ + --service-account=${INSTALL_SERVICE_ACCOUNT} ---- -endif::shared-vpc[] - -.. Export the control plane subnet location: + -ifdef::shared-vpc[] -[source,terminal] ----- -$ export CONTROL_SUBNET=(`gcloud compute networks subnets describe ${HOST_PROJECT_CONTROL_SUBNET} --region=${REGION} --project ${HOST_PROJECT} --account ${HOST_PROJECT_ACCOUNT} --format json | jq -r .selfLink`) ----- -endif::shared-vpc[] -ifndef::shared-vpc[] -[source,terminal] ----- -$ export CONTROL_SUBNET=(`gcloud compute networks subnets describe ${INFRA_ID}-master-subnet --region=${REGION} --format json | jq -r .selfLink`) ----- -endif::shared-vpc[] +`` specifies the name of the internal load balancer deployment you create. -.. Export the three zones that the cluster uses: -+ -[source,terminal] ----- -$ export ZONE_0=(`gcloud compute regions describe ${REGION} --format=json | jq -r .zones[0] | cut -d "/" -f9`) ----- +.. Export the `CLUSTER_IP` variable by running the following command: + [source,terminal] ---- -$ export ZONE_1=(`gcloud compute regions describe ${REGION} --format=json | jq -r .zones[1] | cut -d "/" -f9`) ----- -+ -[source,terminal] ----- -$ export ZONE_2=(`gcloud compute regions describe ${REGION} --format=json | jq -r .zones[2] | cut -d "/" -f9`) +$ export CLUSTER_IP=$(gcloud compute addresses describe ${INFRA_ID}-cluster-ip --region=${REGION} --format json | jq -r .address) ---- -. Create a `02_infra.yaml` resource definition file: -+ -[source,terminal] ----- -$ cat <02_infra.yaml -imports: -- path: 02_lb_ext.py -- path: 02_lb_int.py <1> -resources: -- name: cluster-lb-ext <1> - type: 02_lb_ext.py - properties: - infra_id: '${INFRA_ID}' <2> - region: '${REGION}' <3> -- name: cluster-lb-int - type: 02_lb_int.py - properties: - cluster_network: '${CLUSTER_NETWORK}' - control_subnet: '${CONTROL_SUBNET}' <4> - infra_id: '${INFRA_ID}' - region: '${REGION}' - zones: <5> - - '${ZONE_0}' - - '${ZONE_1}' - - '${ZONE_2}' -EOF ----- -<1> Required only when deploying an external cluster. -<2> `infra_id` is the `INFRA_ID` infrastructure name from the extraction step. -<3> `region` is the region to deploy the cluster into, for example `us-central1`. -<4> `control_subnet` is the URI to the control subnet. -<5> `zones` are the zones to deploy the control plane instances into, like `us-east1-b`, `us-east1-c`, and `us-east1-d`. +. Optional: For a public or externally available cluster, copy the template from the *Infrastructure Manager template for the external load balancer* section of this topic and save it as `02_lb_ext.tf` in a directory called `02_lb_ext` on your computer. This template describes the external load balancing objects that your cluster requires. -. Create the deployment by using the `gcloud` CLI: +.. Create an external load balancer by running the following command: + [source,terminal] ---- -$ gcloud deployment-manager deployments create ${INFRA_ID}-infra --config 02_infra.yaml +$ gcloud infra-manager deployments apply \ + --location=${REGION} \ + --input-values=infra_id=${INFRA_ID},project=${PROJECT_NAME},region=${REGION} \ + --project=${PROJECT_NAME} \ + --local-source=./02_lb_ext \ + --service-account=${INSTALL_SERVICE_ACCOUNT} ---- - -. Export the cluster IP address: + -[source,terminal] ----- -$ export CLUSTER_IP=(`gcloud compute addresses describe ${INFRA_ID}-cluster-ip --region=${REGION} --format json | jq -r .address`) ----- +`` specifies the name of the external load balancer deployment you create. -. For an external cluster, also export the cluster public IP address: +.. Export the `CLUSTER_PUBLIC_IP` variable by running the following command: + [source,terminal] ---- -$ export CLUSTER_PUBLIC_IP=(`gcloud compute addresses describe ${INFRA_ID}-cluster-public-ip --region=${REGION} --format json | jq -r .address`) ----- - -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:!shared-vpc: -endif::[] +$ export CLUSTER_PUBLIC_IP=$(gcloud compute addresses describe ${INFRA_ID}-cluster-public-ip --region=${REGION} --format json | jq -r .address) +---- \ No newline at end of file diff --git a/modules/installation-creating-gcp-private-dns.adoc b/modules/installation-creating-gcp-private-dns.adoc index db247b8489b6..6acbfcfb9f17 100644 --- a/modules/installation-creating-gcp-private-dns.adoc +++ b/modules/installation-creating-gcp-private-dns.adoc @@ -11,16 +11,13 @@ endif::[] [id="installation-creating-gcp-private-dns_{context}"] = Creating a private DNS zone in {gcp-short} -You must configure a private DNS zone in {gcp-first} for your -{product-title} cluster to use. One way to create this component is -to modify the provided Deployment Manager template. +[role="_abstract"] +You must configure a private DNS zone in {gcp-first} for your {product-title} cluster to use. One way to create this component is to modify the provided Infrastructure Manager template. [NOTE] ==== -If you do not use the provided Deployment Manager template to create your {gcp-short} -infrastructure, you must review the provided information and manually create -the infrastructure. If your cluster does not initialize correctly, you might -have to contact Red Hat support with your installation logs. +If you do not use the provided template to create your {gcp-short} infrastructure, you must review the provided information and manually create the infrastructure. +If your cluster does not initialize correctly, you might have to contact Red Hat support with your installation logs. ==== .Prerequisites @@ -29,51 +26,24 @@ have to contact Red Hat support with your installation logs. .Procedure -. Copy the template from the *Deployment Manager template for the private DNS* -section of this topic and save it as `02_dns.py` on your computer. This -template describes the private DNS objects that your cluster -requires. +. Copy the template from the *Infrastructure Manager template for the private DNS* section of this topic and save it as `02_dns.tf` in a folder called `02_dns` on your computer. This template describes the private DNS objects that your cluster requires. -. Create a `02_dns.yaml` resource definition file: +. If you are installing a cluster into a shared VPC, and the host project already has a private DNS zone, skip this step. Create the DNS zone by running the following command: + [source,terminal] ---- -$ cat <02_dns.yaml -imports: -- path: 02_dns.py - -resources: -- name: cluster-dns - type: 02_dns.py - properties: - infra_id: '${INFRA_ID}' <1> - cluster_domain: '${CLUSTER_NAME}.${BASE_DOMAIN}' <2> - cluster_network: '${CLUSTER_NETWORK}' <3> -EOF ----- -<1> `infra_id` is the `INFRA_ID` infrastructure name from the extraction step. -<2> `cluster_domain` is the domain for the cluster, for example `openshift.example.com`. -<3> `cluster_network` is the `selfLink` URL to the cluster network. - -. Create the deployment by using the `gcloud` CLI: -+ -ifdef::shared-vpc[] -[source,terminal] ----- -$ gcloud deployment-manager deployments create ${INFRA_ID}-dns --config 02_dns.yaml --project ${HOST_PROJECT} --account ${HOST_PROJECT_ACCOUNT} +$ gcloud infra-manager deployments apply \ + --location=${REGION} \ + --input-values=infra_id=${INFRA_ID},project=${PROJECT_NAME},region=${REGION},cluster_domain=${CLUSTER_DOMAIN},cluster_network=${CLUSTER_NETWORK} \ + --project=${PROJECT_NAME} \ + --local-source=./02_dns \ + --service-account=${INSTALL_SERVICE_ACCOUNT} ---- -endif::shared-vpc[] -ifndef::shared-vpc[] -[source,terminal] ----- -$ gcloud deployment-manager deployments create ${INFRA_ID}-dns --config 02_dns.yaml ----- -endif::shared-vpc[] +`` specifies the name of the DNS zone deployment you create. -. The templates do not create DNS entries due to limitations of Deployment -Manager, so you must create them manually: +. The templates do not create DNS entries due to limitations of Infrastructure Manager, so you must create them manually: + -.. Add the internal DNS entries: +.. Add the internal DNS entries by running the following commands: + ifdef::shared-vpc[] [source,terminal] @@ -128,7 +98,7 @@ $ gcloud dns record-sets transaction execute --zone ${INFRA_ID}-private-zone ---- endif::shared-vpc[] + -.. For an external cluster, also add the external DNS entries: +.. For an external cluster, also add the external DNS entries by running the following commands: + ifdef::shared-vpc[] [source,terminal] diff --git a/modules/installation-creating-gcp-vpc.adoc b/modules/installation-creating-gcp-vpc.adoc index 16ec634026a4..72ab970176b6 100644 --- a/modules/installation-creating-gcp-vpc.adoc +++ b/modules/installation-creating-gcp-vpc.adoc @@ -2,26 +2,17 @@ // // * installing/installing_gcp/installing-gcp-user-infra.adoc // * installing/installing_gcp/installing-restricted-networks-gcp.adoc -// * installing/installing_gcp/installing-gcp-user-infra-vpc.adoc - -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:shared-vpc: -endif::[] :_mod-docs-content-type: PROCEDURE [id="installation-creating-gcp-vpc_{context}"] = Creating a VPC in {gcp-short} -You must create a VPC in {gcp-first} for your {product-title} -cluster to use. You can customize the VPC to meet your requirements. One way to -create the VPC is to modify the provided Deployment Manager template. +[role="_abstract"] +You must create a VPC in {gcp-first} for your {product-title} cluster to use. You can customize the VPC to meet your requirements. One way to create the VPC is to modify the provided Infrastructure Manager template. [NOTE] ==== -If you do not use the provided Deployment Manager template to create your {gcp-short} -infrastructure, you must review the provided information and manually create -the infrastructure. If your cluster does not initialize correctly, you might -have to contact Red Hat support with your installation logs. +If you do not use the provided Infrastructure Manager template to create your {gcp-short} infrastructure, you must review the provided information and manually create the infrastructure. If your cluster does not initialize correctly, you might have to contact Red Hat support with your installation logs. ==== .Prerequisites @@ -30,120 +21,41 @@ have to contact Red Hat support with your installation logs. .Procedure -. Copy the template from the *Deployment Manager template for the VPC* -section of this topic and save it as `01_vpc.py` on your computer. This template -describes the VPC that your cluster requires. - -ifdef::shared-vpc[] -. Export the following variables required by the resource definition: +. Copy the template from the *Infrastructure Manager template for the VPC* section of this topic and save it as `01_vpc.tf` in a directory called `01_vpc` on your computer. This template describes the VPC that your cluster requires. -.. Export the control plane CIDR: +. Create a VPC by running the following command: + [source,terminal] ---- -$ export MASTER_SUBNET_CIDR='10.0.0.0/17' +$ gcloud infra-manager deployments apply \ + --location=${REGION} \ + --input-values=infra_id=${INFRA_ID},project=${PROJECT_NAME},region=${REGION},master_subnet_cidr=${MASTER_SUBNET_CIDR},worker_subnet_cidr=${WORKER_SUBNET_CIDR} \ + --project=${PROJECT_NAME} \ + --local-source=./01_vpc \ + --service-account=${INSTALL_SERVICE_ACCOUNT} ---- - -.. Export the compute CIDR: + -[source,terminal] ----- -$ export WORKER_SUBNET_CIDR='10.0.128.0/17' ----- +`` specifies the name of the VPC deployment you create. -.. Export the region to deploy the VPC network and cluster to: -+ -[source,terminal] ----- -$ export REGION='' ----- - -. Export the variable for the ID of the project that hosts the shared VPC: -+ -[source,terminal] ----- -$ export HOST_PROJECT= ----- - -. Export the variable for the email of the service account that belongs to host project: -+ -[source,terminal] ----- -$ export HOST_PROJECT_ACCOUNT= ----- -endif::shared-vpc[] +. Configure environment variables that will be used to create other cluster infrastructure. -. Create a `01_vpc.yaml` resource definition file: +.. Configure the `CLUSTER_NETWORK` environment variable by running the following command: + [source,terminal] ---- -$ cat <01_vpc.yaml -imports: -- path: 01_vpc.py - -resources: -- name: cluster-vpc - type: 01_vpc.py - properties: -ifndef::shared-vpc[] - infra_id: '${INFRA_ID}' <1> -endif::shared-vpc[] -ifdef::shared-vpc[] - infra_id: '' <1> -endif::shared-vpc[] - region: '${REGION}' <2> - master_subnet_cidr: '${MASTER_SUBNET_CIDR}' <3> - worker_subnet_cidr: '${WORKER_SUBNET_CIDR}' <4> -EOF +$ export CLUSTER_NETWORK=$(gcloud compute networks describe ${INFRA_ID}-network --format json | jq -r .selfLink) ---- -ifndef::shared-vpc[] -<1> `infra_id` is the `INFRA_ID` infrastructure name from the extraction step. -endif::shared-vpc[] -ifdef::shared-vpc[] -<1> `infra_id` is the prefix of the network name. -endif::shared-vpc[] -<2> `region` is the region to deploy the cluster into, for example `us-central1`. -<3> `master_subnet_cidr` is the CIDR for the master subnet, for example `10.0.0.0/17`. -<4> `worker_subnet_cidr` is the CIDR for the worker subnet, for example `10.0.128.0/17`. -. Create the deployment by using the `gcloud` CLI: +.. Configure the `CONTROL_SUBNET` environment variable by running the following command: + -ifndef::shared-vpc[] [source,terminal] ---- -$ gcloud deployment-manager deployments create ${INFRA_ID}-vpc --config 01_vpc.yaml +$ export CONTROL_SUBNET=$(gcloud compute networks subnets describe ${INFRA_ID}-master-subnet --region=${REGION} --format json | jq -r .selfLink) ---- -endif::shared-vpc[] -ifdef::shared-vpc[] -[source,terminal] ----- -$ gcloud deployment-manager deployments create --config 01_vpc.yaml --project ${HOST_PROJECT} --account ${HOST_PROJECT_ACCOUNT} <1> ----- -<1> For ``, specify the name of the VPC to deploy. -. Export the VPC variable that other components require: -.. Export the name of the host project network: +.. Configure the `COMPUTE_SUBNET` environment variable by running the following command: + [source,terminal] ---- -$ export HOST_PROJECT_NETWORK= +$ export COMPUTE_SUBNET=$(gcloud compute networks subnets describe ${INFRA_ID}-worker-subnet --region=${REGION} --format json | jq -r .selfLink) ---- -.. Export the name of the host project control plane subnet: -+ -[source,terminal] ----- -$ export HOST_PROJECT_CONTROL_SUBNET= ----- -.. Export the name of the host project compute subnet: -+ -[source,terminal] ----- -$ export HOST_PROJECT_COMPUTE_SUBNET= ----- - -. Set up the shared VPC. See link:https://cloud.google.com/vpc/docs/provisioning-shared-vpc#setting_up[Setting up Shared VPC] in the {gcp-short} documentation. -endif::shared-vpc[] - -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:!shared-vpc: -endif::[] diff --git a/modules/installation-creating-gcp-worker.adoc b/modules/installation-creating-gcp-worker.adoc index 0821aac171c2..178cbf0a231d 100644 --- a/modules/installation-creating-gcp-worker.adoc +++ b/modules/installation-creating-gcp-worker.adoc @@ -3,39 +3,18 @@ // * installing/installing_gcp/installing-gcp-user-infra.adoc // * installing/installing_gcp/installing-restricted-networks-gcp.adoc -ifeval::["{context}" == "installing-gcp-user-infra"] -:three-node-cluster: -endif::[] -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:shared-vpc: -endif::[] - :_mod-docs-content-type: PROCEDURE [id="installation-creating-gcp-worker_{context}"] = Creating additional worker machines in {gcp-short} -You can create worker machines in {gcp-first} for your cluster -to use by launching individual instances discretely or by automated processes -outside the cluster, such as auto scaling groups. You can also take advantage of -the built-in cluster scaling mechanisms and the machine API in {product-title}. +[role="_abstract"] +You can create worker machines in {gcp-first} for your cluster by using the Infrastructure Manager template. You can adjust the number of machines by modifying the number of `google_compute_instance` resources in the provided template. -ifdef::three-node-cluster[] [NOTE] ==== -If you are installing a three-node cluster, skip this step. A three-node cluster consists of three control plane machines, which also act as compute machines. -==== -endif::three-node-cluster[] - -In this example, you manually launch one instance by using the Deployment -Manager template. Additional instances can be launched by including additional -resources of type `06_worker.py` in the file. +If you do not use the provided Infrastructure Manager template to create your compute machines, you must review the provided information and manually create the infrastructure. If your cluster does not initialize correctly, you might have to contact Red Hat support with your installation logs. -[NOTE] -==== -If you do not use the provided Deployment Manager template to create your worker -machines, you must review the provided information and manually create -the infrastructure. If your cluster does not initialize correctly, you might -have to contact Red Hat support with your installation logs. +If you are installing a three-node cluster, skip this step. A three-node cluster consists of three control plane machines, which also act as compute machines. ==== .Prerequisites @@ -46,105 +25,46 @@ have to contact Red Hat support with your installation logs. .Procedure -. Copy the template from the *Deployment Manager template for worker machines* -section of this topic and save it as `06_worker.py` on your computer. This -template describes the worker machines that your cluster requires. +. Copy the template from the *Infrastructure Manager template for worker machines* section of this topic and save it as `06_worker.tf` in a folder called `06_worker` on your computer. This template describes the worker machines that your cluster requires. -. Export the variables that the resource definition uses. -.. Export the subnet that hosts the compute machines: +** You can edit the `06_worker.tf` file to add additional tags to the compute machines, by modifying the existing `tags` stanza as follows: + -ifndef::shared-vpc[] -[source,terminal] ----- -$ export COMPUTE_SUBNET=(`gcloud compute networks subnets describe ${INFRA_ID}-worker-subnet --region=${REGION} --format json | jq -r .selfLink`) ----- -endif::shared-vpc[] -ifdef::shared-vpc[] -[source,terminal] +[source,bash] ---- -$ export COMPUTE_SUBNET=(`gcloud compute networks subnets describe ${HOST_PROJECT_COMPUTE_SUBNET} --region=${REGION} --project ${HOST_PROJECT} --account ${HOST_PROJECT_ACCOUNT} --format json | jq -r .selfLink`) +resource "google_compute_instance" "worker_0" { +# ... + tags = [ + "${var.infra_id}-worker-0", + "custom-tag-example" + ] +# ... +} ---- -endif::shared-vpc[] -.. Export the email address for your service account: +. Copy the `worker.ign` file from your installation directory into the `06_worker` folder by running the following command: + [source,terminal] ---- -$ export WORKER_SERVICE_ACCOUNT=(`gcloud iam service-accounts list --filter "email~^${INFRA_ID}-w@${PROJECT_NAME}." --format json | jq -r '.[0].email'`) +$ cp /worker.ign 06_worker/worker.ign ---- +`` specifies the directory where you created the Ignition configuration files. -.. Export the location of the compute machine Ignition config file: +. Create the deployment by running the following command: + [source,terminal] ---- -$ export WORKER_IGNITION=`cat /worker.ign` +$ gcloud infra-manager deployments apply \ + --location=${REGION} \ + --project=${PROJECT_NAME} \ + --local-source=./06_worker \ + --input-values=infra_id=${INFRA_ID},project=${PROJECT_NAME},region=${REGION},zone_0=${ZONE_0},zone_1=${ZONE_1},subnet=${COMPUTE_SUBNET},image=${CLUSTER_IMAGE},service_account_email=${WORKER_SERVICE_ACCOUNT} \ + --service-account=${INSTALL_SERVICE_ACCOUNT} ---- +`` specifies the name of the deployment. -. Create a `06_worker.yaml` resource definition file: +. Remove the `worker.ign` file by running the following command: + [source,terminal] ---- -$ cat <06_worker.yaml -imports: -- path: 06_worker.py - -resources: -- name: 'worker-0' <1> - type: 06_worker.py - properties: - infra_id: '${INFRA_ID}' <2> - zone: '${ZONE_0}' <3> - compute_subnet: '${COMPUTE_SUBNET}' <4> - image: '${CLUSTER_IMAGE}' <5> - machine_type: 'n1-standard-4' <6> - root_volume_size: '128' - service_account_email: '${WORKER_SERVICE_ACCOUNT}' <7> - ignition: '${WORKER_IGNITION}' <8> -- name: 'worker-1' - type: 06_worker.py - properties: - infra_id: '${INFRA_ID}' <2> - zone: '${ZONE_1}' <3> - compute_subnet: '${COMPUTE_SUBNET}' <4> - image: '${CLUSTER_IMAGE}' <5> - machine_type: 'n1-standard-4' <6> - root_volume_size: '128' - service_account_email: '${WORKER_SERVICE_ACCOUNT}' <7> - ignition: '${WORKER_IGNITION}' <8> -EOF ----- -<1> `name` is the name of the worker machine, for example `worker-0`. -<2> `infra_id` is the `INFRA_ID` infrastructure name from the extraction step. -<3> `zone` is the zone to deploy the worker machine into, for example `us-central1-a`. -<4> `compute_subnet` is the `selfLink` URL to the compute subnet. -<5> `image` is the `selfLink` URL to the {op-system} image. ^1^ -<6> `machine_type` is the machine type of the instance, for example `n1-standard-4`. -<7> `service_account_email` is the email address for the worker service account that you created. -<8> `ignition` is the contents of the `worker.ign` file. - -. Optional: If you want to launch additional instances, include additional -resources of type `06_worker.py` in your `06_worker.yaml` resource definition -file. - -. Create the deployment by using the `gcloud` CLI: -+ -[source,terminal] ----- -$ gcloud deployment-manager deployments create ${INFRA_ID}-worker --config 06_worker.yaml ----- - -. To use a {gcp-short} Marketplace image, specify the offer to use: -+ -** {product-title}: `\https://www.googleapis.com/compute/v1/projects/redhat-marketplace-public/global/images/redhat-coreos-ocp-413-x86-64-202305021736` -+ -** {opp}: `\https://www.googleapis.com/compute/v1/projects/redhat-marketplace-public/global/images/redhat-coreos-opp-413-x86-64-202305021736` -+ -** {oke}: `\https://www.googleapis.com/compute/v1/projects/redhat-marketplace-public/global/images/redhat-coreos-oke-413-x86-64-202305021736` - - -ifeval::["{context}" == "installing-gcp-user-infra"] -:!three-node-cluster: -endif::[] -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:!shared-vpc: -endif::[] +$ rm 06_worker/worker.ign +---- \ No newline at end of file diff --git a/modules/installation-deployment-manager-bootstrap.adoc b/modules/installation-deployment-manager-bootstrap.adoc deleted file mode 100644 index fc4d61c81317..000000000000 --- a/modules/installation-deployment-manager-bootstrap.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc - -:_mod-docs-content-type: REFERENCE -[id="installation-deployment-manager-bootstrap_{context}"] -= Deployment Manager template for the bootstrap machine - -You can use the following Deployment Manager template to deploy the bootstrap -machine that you need for your {product-title} cluster: - -.`04_bootstrap.py` Deployment Manager template -[%collapsible] -==== -[source,python] ----- -include::https://raw.githubusercontent.com/openshift/installer/release-4.21/upi/gcp/04_bootstrap.py[] ----- -==== diff --git a/modules/installation-deployment-manager-control-plane.adoc b/modules/installation-deployment-manager-control-plane.adoc deleted file mode 100644 index 1ea45801e20a..000000000000 --- a/modules/installation-deployment-manager-control-plane.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc - -:_mod-docs-content-type: REFERENCE -[id="installation-deployment-manager-control-plane_{context}"] -= Deployment Manager template for control plane machines - -You can use the following Deployment Manager template to deploy the control -plane machines that you need for your {product-title} cluster: - -.`05_control_plane.py` Deployment Manager template -[%collapsible] -==== -[source,python] ----- -include::https://raw.githubusercontent.com/openshift/installer/release-4.21/upi/gcp/05_control_plane.py[] ----- -==== diff --git a/modules/installation-deployment-manager-ext-lb.adoc b/modules/installation-deployment-manager-ext-lb.adoc deleted file mode 100644 index 64d0c8fe1375..000000000000 --- a/modules/installation-deployment-manager-ext-lb.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-user-infra-vpc.adoc - -:_mod-docs-content-type: REFERENCE -[id="installation-deployment-manager-ext-lb_{context}"] -= Deployment Manager template for the external load balancer - -You can use the following Deployment Manager template to deploy the external load balancer that you need for your {product-title} cluster: - -.`02_lb_ext.py` Deployment Manager template -[%collapsible] -==== -[source,python] ----- -include::https://raw.githubusercontent.com/openshift/installer/release-4.21/upi/gcp/02_lb_ext.py[] ----- -==== diff --git a/modules/installation-deployment-manager-firewall-rules.adoc b/modules/installation-deployment-manager-firewall-rules.adoc deleted file mode 100644 index d3cdad8f477d..000000000000 --- a/modules/installation-deployment-manager-firewall-rules.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-user-infra-shared-vpc.adoc - -:_mod-docs-content-type: REFERENCE -[id="installation-deployment-manager-firewall-rules_{context}"] -= Deployment Manager template for firewall rules - -You can use the following Deployment Manager template to deploy the firewall rules that you need for your {product-title} cluster: - -.`03_firewall.py` Deployment Manager template -[%collapsible] -==== -[source,python] ----- -include::https://raw.githubusercontent.com/openshift/installer/release-4.21/upi/gcp/03_firewall.py[] ----- -==== diff --git a/modules/installation-deployment-manager-int-lb.adoc b/modules/installation-deployment-manager-int-lb.adoc deleted file mode 100644 index b001936c1a86..000000000000 --- a/modules/installation-deployment-manager-int-lb.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-user-infra-vpc.adoc - -:_mod-docs-content-type: REFERENCE -[id="installation-deployment-manager-int-lb_{context}"] -= Deployment Manager template for the internal load balancer - -You can use the following Deployment Manager template to deploy the internal load balancer that you need for your {product-title} cluster: - -.`02_lb_int.py` Deployment Manager template -[%collapsible] -==== -[source,python] ----- -include::https://raw.githubusercontent.com/openshift/installer/release-4.21/upi/gcp/02_lb_int.py[] ----- -==== - -You will need this template in addition to the `02_lb_ext.py` template when you create an external cluster. diff --git a/modules/installation-deployment-manager-private-dns.adoc b/modules/installation-deployment-manager-private-dns.adoc deleted file mode 100644 index 1b9365d00778..000000000000 --- a/modules/installation-deployment-manager-private-dns.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-user-infra-vpc.adoc - -:_mod-docs-content-type: REFERENCE -[id="installation-deployment-manager-private-dns_{context}"] -= Deployment Manager template for the private DNS - -You can use the following Deployment Manager template to deploy the private DNS that you need for your {product-title} cluster: - -.`02_dns.py` Deployment Manager template -[%collapsible] -==== -[source,python] ----- -include::https://raw.githubusercontent.com/openshift/installer/release-4.21/upi/gcp/02_dns.py[] ----- -==== diff --git a/modules/installation-deployment-manager-vpc.adoc b/modules/installation-deployment-manager-vpc.adoc deleted file mode 100644 index 9b57f1df4fe5..000000000000 --- a/modules/installation-deployment-manager-vpc.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc - -:_mod-docs-content-type: REFERENCE -[id="installation-deployment-manager-vpc_{context}"] -= Deployment Manager template for the VPC - -You can use the following Deployment Manager template to deploy the VPC that -you need for your {product-title} cluster: - -.`01_vpc.py` Deployment Manager template -[%collapsible] -==== -[source,python] ----- -include::https://raw.githubusercontent.com/openshift/installer/release-4.21/upi/gcp/01_vpc.py[] ----- -==== diff --git a/modules/installation-deployment-manager-worker.adoc b/modules/installation-deployment-manager-worker.adoc deleted file mode 100644 index 2b3dc67114ac..000000000000 --- a/modules/installation-deployment-manager-worker.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc - -:_mod-docs-content-type: REFERENCE -[id="installation-deployment-manager-worker_{context}"] -= Deployment Manager template for worker machines - -You can use the following Deployment Manager template to deploy the worker machines -that you need for your {product-title} cluster: - -.`06_worker.py` Deployment Manager template -[%collapsible] -==== -[source,python] ----- -include::https://raw.githubusercontent.com/openshift/installer/release-4.21/upi/gcp/06_worker.py[] ----- -==== diff --git a/modules/installation-extracting-infraid.adoc b/modules/installation-extracting-infraid.adoc index db999e912a44..043bf3cabc36 100644 --- a/modules/installation-extracting-infraid.adoc +++ b/modules/installation-extracting-infraid.adoc @@ -85,6 +85,11 @@ The Ignition config files contain a unique cluster identifier that you can use t uniquely identify your cluster in {cp-first}. If you plan to use the cluster identifier as the name of your virtual machine folder, you must extract it. endif::vsphere[] +[NOTE] +==== +Do not run the `openshift-install create manifests` command again after creating any {gcp-short} resources. Running the command again generates a new cluster identifier, which will cause errors in existing resources. If you need to regenerate the manifests because you modified the `install-config.yaml` file, delete any {gcp-short} resources you created and recreate them with the new cluster identifier. +==== + .Prerequisites ifndef::gcp[] diff --git a/modules/installation-infrastructure-manager-bootstrap.adoc b/modules/installation-infrastructure-manager-bootstrap.adoc new file mode 100644 index 000000000000..5a7882a1657e --- /dev/null +++ b/modules/installation-infrastructure-manager-bootstrap.adoc @@ -0,0 +1,20 @@ +// Module included in the following assemblies: +// +// * installing/installing_gcp/installing-gcp-user-infra.adoc +// * installing/installing_gcp/installing-restricted-networks-gcp.adoc + +:_mod-docs-content-type: REFERENCE +[id="installation-infrastructure-manager-bootstrap_{context}"] += Infrastructure Manager template for the bootstrap machine + +[role="_abstract"] +You can use the following Infrastructure Manager template to deploy the bootstrap machine that you need for your {product-title} cluster: + +.`04_bootstrap.tf` Deployment Manager template +[%collapsible] +==== +[source,terraform] +---- +include::https://raw.githubusercontent.com/openshift/installer/release-4.22/upi/gcp/04_bootstrap/04_bootstrap.tf[] +---- +==== diff --git a/modules/installation-infrastructure-manager-control-plane.adoc b/modules/installation-infrastructure-manager-control-plane.adoc new file mode 100644 index 000000000000..c56e0388b8ad --- /dev/null +++ b/modules/installation-infrastructure-manager-control-plane.adoc @@ -0,0 +1,20 @@ +// Module included in the following assemblies: +// +// * installing/installing_gcp/installing-gcp-user-infra.adoc +// * installing/installing_gcp/installing-restricted-networks-gcp.adoc + +:_mod-docs-content-type: REFERENCE +[id="installation-infrastructure-manager-control-plane_{context}"] += Infrastructure Manager template for control plane machines + +[role="_abstract"] +You can use the following Infrastructure Manager template to deploy the control plane machines that you need for your {product-title} cluster: + +.`05_control_plane.tf` Infrastructure Manager template +[%collapsible] +==== +[source,terraform] +---- +include::https://raw.githubusercontent.com/openshift/installer/release-4.22/upi/gcp/05_control_plane/05_control_plane.tf[] +---- +==== diff --git a/modules/installation-infrastructure-manager-ext-lb.adoc b/modules/installation-infrastructure-manager-ext-lb.adoc new file mode 100644 index 000000000000..c1c335a92381 --- /dev/null +++ b/modules/installation-infrastructure-manager-ext-lb.adoc @@ -0,0 +1,21 @@ +// Module included in the following assemblies: +// +// * installing/installing_gcp/installing-gcp-user-infra-vpc.adoc +// * installing/installing_gcp/installing-gcp-user-infra.adoc +// * installing/installing_gcp/installing-restricted-networks-gcp.adoc + +:_mod-docs-content-type: REFERENCE +[id="installation-infrastructure-manager-ext-lb_{context}"] += Infrastructure Manager template for the external load balancer + +[role="_abstract"] +You can use the following Infrastructure Manager template to deploy the external load balancer that you need for your {product-title} cluster: + +.`02_lb_ext.tf` Infrastructure Manager template +[%collapsible] +==== +[source,terraform] +---- +include::https://raw.githubusercontent.com/openshift/installer/release-4.22/upi/gcp/02_lb_ext/02_lb_ext.tf[] +---- +==== diff --git a/modules/installation-infrastructure-manager-firewall-rules.adoc b/modules/installation-infrastructure-manager-firewall-rules.adoc new file mode 100644 index 000000000000..0523d385f99d --- /dev/null +++ b/modules/installation-infrastructure-manager-firewall-rules.adoc @@ -0,0 +1,19 @@ +// Module included in the following assemblies: +// +// * installing/installing_gcp/installing-gcp-user-infra-shared-vpc.adoc + +:_mod-docs-content-type: REFERENCE +[id="installation-infrastructure-manager-firewall-rules_{context}"] += Infrastructure Manager template for firewall rules and IAM roles + +[role="_abstract"] +You can use the following Infrastructure Manager template to deploy the firewall rules and IAM roles that you need for your {product-title} cluster: + +.`03_security.tf` Infrastructure Manager template +[%collapsible] +==== +[source,terraform] +---- +include::https://raw.githubusercontent.com/openshift/installer/release-4.22/upi/gcp/03_security/03_security.tf[] +---- +==== diff --git a/modules/installation-infrastructure-manager-int-lb.adoc b/modules/installation-infrastructure-manager-int-lb.adoc new file mode 100644 index 000000000000..7c6e57013343 --- /dev/null +++ b/modules/installation-infrastructure-manager-int-lb.adoc @@ -0,0 +1,21 @@ +// Module included in the following assemblies: +// +// * installing/installing_gcp/installing-gcp-user-infra-vpc.adoc +// * installing/installing_gcp/installing-gcp-user-infra.adoc +// * installing/installing_gcp/installing-restricted-networks-gcp.adoc + +:_mod-docs-content-type: REFERENCE +[id="installation-infrastructure-manager-int-lb_{context}"] += Infrastructure Manager template for the internal load balancer + +[role="_abstract"] +You can use the following Infrastructure Manager template to deploy the internal load balancer that you need for your {product-title} cluster: + +.`02_lb_int.tf` Infrastructure Manager template +[%collapsible] +==== +[source,terraform] +---- +include::https://raw.githubusercontent.com/openshift/installer/release-4.22/upi/gcp/02_lb_int/02_lb_int.tf[] +---- +==== diff --git a/modules/installation-infrastructure-manager-private-dns.adoc b/modules/installation-infrastructure-manager-private-dns.adoc new file mode 100644 index 000000000000..1a1dff0e057e --- /dev/null +++ b/modules/installation-infrastructure-manager-private-dns.adoc @@ -0,0 +1,19 @@ +// Module included in the following assemblies: +// +// * installing/installing_gcp/installing-gcp-user-infra-vpc.adoc + +:_mod-docs-content-type: REFERENCE +[id="installation-infrastructure-manager-private-dns_{context}"] += Infrastructure Manager template for the private DNS + +[role="_abstract"] +You can use the following Infrastructure Manager template to deploy the private DNS that you need for your {product-title} cluster: + +.`02_dns.tf` Infrastructure Manager template +[%collapsible] +==== +[source,terraform] +---- +include::https://raw.githubusercontent.com/openshift/installer/release-4.22/upi/gcp/02_dns/02_dns.tf[] +---- +==== diff --git a/modules/installation-infrastructure-manager-vpc.adoc b/modules/installation-infrastructure-manager-vpc.adoc new file mode 100644 index 000000000000..2654b18eb76e --- /dev/null +++ b/modules/installation-infrastructure-manager-vpc.adoc @@ -0,0 +1,20 @@ +// Module included in the following assemblies: +// +// * installing/installing_gcp/installing-gcp-user-infra.adoc +// * installing/installing_gcp/installing-restricted-networks-gcp.adoc + +:_mod-docs-content-type: REFERENCE +[id="installation-infrastructure-manager-vpc_{context}"] += Infrastructure Manager template for the VPC + +[role="_abstract"] +You can use the following Infrastructure Manager template to deploy the VPC that you need for your {product-title} cluster: + +.`01_vpc.tf` Infrastructure Manager template +[%collapsible] +==== +[source,terraform] +---- +include::https://raw.githubusercontent.com/openshift/installer/release-4.22/upi/gcp/01_vpc/01_vpc.tf[] +---- +==== diff --git a/modules/installation-infrastructure-manager-worker.adoc b/modules/installation-infrastructure-manager-worker.adoc new file mode 100644 index 000000000000..c1329c127cfc --- /dev/null +++ b/modules/installation-infrastructure-manager-worker.adoc @@ -0,0 +1,19 @@ +// Module included in the following assemblies: +// +// * installing/installing_gcp/installing-gcp-user-infra.adoc +// * installing/installing_gcp/installing-restricted-networks-gcp.adoc + +:_mod-docs-content-type: REFERENCE +[id="installation-infrastructure-manager-worker_{context}"] += Infrastructure Manager template for worker machines + +You can use the following Infrastructure Manager template to deploy the worker machines that you need for your {product-title} cluster: + +.`06_worker.tf` Infrastructure Manager template +[%collapsible] +==== +[source,terraform] +---- +include::https://raw.githubusercontent.com/openshift/installer/release-4.22/upi/gcp/06_worker/06_worker.tf[] +---- +==== diff --git a/modules/installation-user-infra-exporting-common-variables.adoc b/modules/installation-user-infra-exporting-common-variables.adoc index 628ac932b757..9b8c68f4a3ae 100644 --- a/modules/installation-user-infra-exporting-common-variables.adoc +++ b/modules/installation-user-infra-exporting-common-variables.adoc @@ -3,170 +3,157 @@ // * installing/installing_gcp/installing-gcp-user-infra.adoc // * installing/installing_gcp/installing-restricted-networks-gcp.adoc - -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:gcp: -:user-infra-vpc: -endif::[] - -ifeval::["{context}" == "installing-gcp-user-infra"] -:cp-first: Google Cloud -:cp: Google Cloud -:cp-template: Deployment Manager -endif::[] - -ifeval::["{context}" == "installing-restricted-networks-gcp"] -:cp-first: Google Cloud -:cp: Google Cloud -:cp-template: Deployment Manager -endif::[] - -ifeval::["{context}" == "installing-restricted-networks-gcp-vpc"] -:cp-first: Google Cloud -:cp: Google Cloud -:cp-template: Deployment Manager -endif::[] - -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:cp-first: Google Cloud -:cp: Google Cloud -:cp-template: Deployment Manager -:shared-vpc: -endif::[] - :_mod-docs-content-type: PROCEDURE [id="installation-user-infra-exporting-common-variables_{context}"] -= Exporting common variables for {cp-template} templates += Exporting common variables for Infrastructure Manager templates -You must export a common set of variables that are used with the provided -{cp-template} templates used to assist in completing a user-provided -infrastructure install on {cp-first}. +You must export a common set of variables that are used with the provided Infrastructure Manager templates used to assist in installing a cluster with user-provisioned infrastructure on {gcp-first}. [NOTE] ==== -Specific {cp-template} templates can also require additional exported variables, which are detailed in their related procedures. +Specific Infrastructure Manager templates can also require additional exported variables, which are detailed in their related procedures. ==== .Procedure -. Export the following common variables to be used by the provided {cp-template} templates. For any command with ``, specify the path to the directory that you stored the installation files in. +* Export the following common variables to be used by the provided Infrastructure Manager templates. For any command with ``, specify the path to the directory that you stored the installation files in. + +** Export the `BASE_DOMAIN` variable by running the following command: + -ifndef::shared-vpc[] [source,terminal] ---- $ export BASE_DOMAIN='' ---- + +``:: If you are installing a cluster into a shared VPC, specify the value for the host project. + +** Export the `BASE_DOMAIN_ZONE_NAME` variable by running the following command: ++ [source,terminal] ---- $ export BASE_DOMAIN_ZONE_NAME='' ---- + +``:: Specifies the base domain zone name. + +** Export the `NETWORK_CIDR` variable by running the following command: ++ [source,terminal] ---- -$ export NETWORK_CIDR='10.0.0.0/16' +$ export NETWORK_CIDR='' ---- + +``:: Specifies the network CIDR your cluster uses. For example, `10.0.0.0/16`. + +** Export the `MASTER_SUBNET_CIDR` variable by running the following command: ++ [source,terminal] ---- -$ export MASTER_SUBNET_CIDR='10.0.0.0/17' +$ export MASTER_SUBNET_CIDR='' ---- + +``:: Specifies the network CIDR that your cluster's control plane uses. For example, `10.0.0.0/17`. + +** Export the `WORKER_SUBNET_CIDR` variable by running the following command: ++ [source,terminal] ---- -$ export WORKER_SUBNET_CIDR='10.0.128.0/17' +$ export WORKER_SUBNET_CIDR='' ---- + +``:: Specifies the network CIDR that your cluster's compute machines use. For example, `10.0.128.0/17`. + +** Export the `KUBECONFIG` variable by running the following command: ++ [source,terminal] ---- $ export KUBECONFIG=/auth/kubeconfig ---- + +** Export the `CLUSTER_NAME` variable by running the following command: + [source,terminal] ---- $ export CLUSTER_NAME=`jq -r .clusterName /metadata.json` ---- + +** Export the `INFRA_ID` variable by running the following command: + [source,terminal] ---- $ export INFRA_ID=`jq -r .infraID /metadata.json` ---- + +** Export the `PROJECT_NAME` variable by running the following command: + [source,terminal] ---- $ export PROJECT_NAME=`jq -r .gcp.projectID /metadata.json` ---- + +** If you are installing a cluster into a shared VPC, export the `HOST_PROJECT` variable by running the following command: + [source,terminal] ---- -$ export REGION=`jq -r .gcp.region /metadata.json` +$ export HOST_PROJECT= ---- -endif::shared-vpc[] +`` specifies the name of the host project that contains the shared VPC. -ifdef::shared-vpc[] +** If you are installing a cluster into a shared VPC, export the `HOST_PROJECT_ACCOUNT` variable by running the following command: ++ [source,terminal] ---- -$ export BASE_DOMAIN='' +$ export HOST_PROJECT_ACCOUNT= ---- -* ``: Supply the values for the host project. +`` specifies the name of an account that can access the host project that contains the shared VPC. + +** Export the `REGION` variable by running the following command: + [source,terminal] ---- -$ export BASE_DOMAIN_ZONE_NAME='' +$ export REGION=`jq -r .gcp.region /metadata.json` ---- + +** Export the `ZONE_0` variable by running the following command: + [source,terminal] ---- -$ export NETWORK_CIDR='10.0.0.0/16' +$ export ZONE_0=$(gcloud compute regions describe ${REGION} --format=json | jq -r '.zones[0]' | cut -d "/" -f9) ---- + +** Export the `ZONE_1` variable by running the following command: + [source,terminal] ---- -$ export KUBECONFIG=/auth/kubeconfig +$ export ZONE_1=$(gcloud compute regions describe ${REGION} --format=json | jq -r '.zones[1]' | cut -d "/" -f9) ---- + +** Export the `ZONE_2` variable by running the following command: + [source,terminal] ---- -$ export CLUSTER_NAME=`jq -r .clusterName /metadata.json` +$ export ZONE_2=$(gcloud compute regions describe ${REGION} --format=json | jq -r '.zones[2]' | cut -d "/" -f9) ---- + +** Export the `SERVICE_ACCOUNT_EMAIL` variable by running the following command: + [source,terminal] ---- -$ export INFRA_ID=`jq -r .infraID /metadata.json` +$ export SERVICE_ACCOUNT_EMAIL="" ---- + +``:: Specifies the email address of the service account you used for the installation. + +** Export the `INSTALL_SERVICE_ACCOUNT` variable by running the following command: ++ [source,terminal] ---- -$ export PROJECT_NAME=`jq -r .gcp.projectID /metadata.json` +$ export INSTALL_SERVICE_ACCOUNT="projects/${PROJECT_NAME}/serviceAccounts/${SERVICE_ACCOUNT_EMAIL}" +---- + +** Export the `CLUSTER_DOMAIN` variable by running the following command: ++ +[source,terminal] ---- -endif::shared-vpc[] - -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:!gcp: -:!user-infra-vpc: -endif::[] - -ifeval::["{context}" == "installing-gcp-user-infra"] -:!cp-first: -:!cp: -:!cp-template: -endif::[] - -ifeval::["{context}" == "installing-restricted-networks-gcp"] -:!cp-first: -:!cp: -:!cp-template: -endif::[] - -ifeval::["{context}" == "installing-restricted-networks-gcp-vpc"] -:!cp-first: Google Cloud -:!cp: Google Cloud -:!cp-template: Deployment Manager -endif::[] - -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:!cp-first: Google Cloud -:!cp: Google Cloud -:!cp-template: Deployment Manager -:!shared-vpc: -endif::[] +$ export CLUSTER_DOMAIN="${CLUSTER_NAME}.${BASE_DOMAIN}" +---- \ No newline at end of file