diff --git a/api/v1/perconaservermysql_types.go b/api/v1/perconaservermysql_types.go index 81b05deb6..ccb4a0d0b 100644 --- a/api/v1/perconaservermysql_types.go +++ b/api/v1/perconaservermysql_types.go @@ -141,6 +141,8 @@ func (t ClusterType) isValid() bool { return false } +// +kubebuilder:validation:XValidation:rule="has(self.image) && self.image != ''",message="mysql.image is required" +// +kubebuilder:validation:XValidation:rule="has(self.size) && self.size > 0",message="mysql.size must be greater than 0" type MySQLSpec struct { // +kubebuilder:validation:Enum=group-replication;async // +kubebuilder:default=group-replication @@ -176,6 +178,8 @@ type SidecarPVC struct { Spec corev1.PersistentVolumeClaimSpec `json:"spec"` } +// +kubebuilder:validation:XValidation:rule="!(has(self.enabled) && self.enabled) || (has(self.image) && self.image != '')",message="orchestrator.image is required when orchestrator is enabled" +// +kubebuilder:validation:XValidation:rule="!(has(self.enabled) && self.enabled) || (has(self.size) && self.size > 0)",message="orchestrator.size must be greater than 0 when orchestrator is enabled" type OrchestratorSpec struct { Enabled bool `json:"enabled,omitempty"` Expose ServiceExpose `json:"expose,omitempty"` @@ -184,7 +188,7 @@ type OrchestratorSpec struct { } type ContainerSpec struct { - Image string `json:"image"` + Image string `json:"image,omitempty"` ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"` ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"` Resources corev1.ResourceRequirements `json:"resources,omitempty"` @@ -200,7 +204,6 @@ type ContainerSpec struct { } type PodSpec struct { - // +kubebuilder:validation:Required Size int32 `json:"size,omitempty"` Annotations map[string]string `json:"annotations,omitempty"` Labels map[string]string `json:"labels,omitempty"` @@ -286,9 +289,10 @@ func (s *PodSpec) GetInitSpec(cr *PerconaServerMySQL) InitContainerSpec { return *s.InitContainer } +// +kubebuilder:validation:XValidation:rule="!(has(self.enabled) && self.enabled) || (has(self.image) && self.image != '')",message="pmm.image is required when pmm is enabled" type PMMSpec struct { Enabled bool `json:"enabled,omitempty"` - Image string `json:"image"` + Image string `json:"image,omitempty"` MySQLParams string `json:"mysqlParams,omitempty"` ServerHost string `json:"serverHost,omitempty"` Resources corev1.ResourceRequirements `json:"resources,omitempty"` @@ -513,6 +517,10 @@ func (b *BackupStorageAzureSpec) ContainerAndPrefix() (string, string) { return container, prefix } +// +kubebuilder:validation:XValidation:rule="!(has(self.enabled) && self.enabled) || has(self.binlogServer)",message="binlogServer is required when pitr is enabled" +// +kubebuilder:validation:XValidation:rule="!(has(self.enabled) && self.enabled) || !has(self.binlogServer) || (has(self.binlogServer.image) && self.binlogServer.image != '')",message="binlogServer.image is required when pitr is enabled" +// +kubebuilder:validation:XValidation:rule="!(has(self.enabled) && self.enabled) || !has(self.binlogServer) || (has(self.binlogServer.size) && self.binlogServer.size > 0)",message="binlogServer.size is required when pitr is enabled" +// +kubebuilder:validation:XValidation:rule="!(has(self.enabled) && self.enabled) || !has(self.binlogServer) || (has(self.binlogServer.serverId) && self.binlogServer.serverId > 0)",message="binlogServer.serverId is required when pitr is enabled" type PiTRSpec struct { Enabled bool `json:"enabled,omitempty"` @@ -523,19 +531,39 @@ type BinlogServerStorageSpec struct { S3 *BackupStorageS3Spec `json:"s3,omitempty"` } +// +kubebuilder:validation:XValidation:rule="!has(self.size) || self.size <= 1",message="binlogServer size cannot be more than 1" type BinlogServerSpec struct { - Storage BinlogServerStorageSpec `json:"storage"` + Storage BinlogServerStorageSpec `json:"storage,omitempty"` // The number of seconds the MySQL client library will wait to establish a connection with a remote host - ConnectTimeout int32 `json:"connectTimeout"` + // +kubebuilder:default=30 + ConnectTimeout int32 `json:"connectTimeout,omitempty"` // The number of seconds the MySQL client library will wait to read data from a remote server. - ReadTimeout int32 `json:"readTimeout"` + // +kubebuilder:default=30 + ReadTimeout int32 `json:"readTimeout,omitempty"` // The number of seconds the MySQL client library will wait to write data to a remote server. - WriteTimeout int32 `json:"writeTimeout"` + // +kubebuilder:default=30 + WriteTimeout int32 `json:"writeTimeout,omitempty"` // Specifies the server ID that the utility will be using when connecting to a remote MySQL server - ServerID int32 `json:"serverId"` + ServerID int32 `json:"serverId,omitempty"` // The number of seconds the utility will spend in disconnected mode between reconnection attempts. - IdleTime int32 `json:"idleTime"` + // +kubebuilder:default=30 + IdleTime int32 `json:"idleTime,omitempty"` + // SSLMode specifies the SSL mode for the connection to MySQL. + // +kubebuilder:default="verify_identity" + SSLMode string `json:"sslMode,omitempty"` + // VerifyChecksum enables checksum verification during replication. + // +kubebuilder:default=true + VerifyChecksum *bool `json:"verifyChecksum,omitempty"` + // RewriteFileSize specifies the maximum binlog file size for rewrite. + // +kubebuilder:default="128M" + RewriteFileSize string `json:"rewriteFileSize,omitempty"` + // CheckpointSize specifies the storage checkpoint size. + // +kubebuilder:default="16M" + CheckpointSize string `json:"checkpointSize,omitempty"` + // CheckpointInterval specifies the storage checkpoint interval. + // +kubebuilder:default="30s" + CheckpointInterval string `json:"checkpointInterval,omitempty"` PodSpec `json:",inline"` } @@ -545,6 +573,8 @@ type ProxySpec struct { HAProxy *HAProxySpec `json:"haproxy,omitempty"` } +// +kubebuilder:validation:XValidation:rule="!(has(self.enabled) && self.enabled) || (has(self.image) && self.image != '')",message="router.image is required when router is enabled" +// +kubebuilder:validation:XValidation:rule="!(has(self.enabled) && self.enabled) || (has(self.size) && self.size > 0)",message="router.size must be greater than 0 when router is enabled" type MySQLRouterSpec struct { Enabled bool `json:"enabled,omitempty"` @@ -555,10 +585,13 @@ type MySQLRouterSpec struct { PodSpec `json:",inline"` } +// +kubebuilder:validation:XValidation:rule="has(self.image) && self.image != ''",message="toolkit.image is required" type ToolkitSpec struct { ContainerSpec `json:",inline"` } +// +kubebuilder:validation:XValidation:rule="!(has(self.enabled) && self.enabled) || (has(self.image) && self.image != '')",message="haproxy.image is required when haproxy is enabled" +// +kubebuilder:validation:XValidation:rule="!(has(self.enabled) && self.enabled) || (has(self.size) && self.size > 0)",message="haproxy.size must be greater than 0 when haproxy is enabled" type HAProxySpec struct { Enabled bool `json:"enabled,omitempty"` @@ -645,6 +678,7 @@ type PerconaServerMySQLStatus struct { // INSERT ADDITIONAL STATUS FIELD - defin Orchestrator StatefulAppStatus `json:"orchestrator,omitempty"` HAProxy StatefulAppStatus `json:"haproxy,omitempty"` Router StatefulAppStatus `json:"router,omitempty"` + BinlogServer StatefulAppStatus `json:"binlogServer,omitempty"` State StatefulAppState `json:"state,omitempty"` BackupVersion string `json:"backupVersion,omitempty"` PMMVersion string `json:"pmmVersion,omitempty"` @@ -1014,11 +1048,46 @@ func (cr *PerconaServerMySQL) CheckNSetDefaults(_ context.Context, serverVersion cr.Spec.Backup.PiTR.BinlogServer = new(BinlogServerSpec) } + if cr.Spec.Backup.PiTR.BinlogServer != nil { + bls := cr.Spec.Backup.PiTR.BinlogServer + if bls.SSLMode == "" { + bls.SSLMode = "verify_identity" + } + if bls.VerifyChecksum == nil { + t := true + bls.VerifyChecksum = &t + } + if bls.RewriteFileSize == "" { + bls.RewriteFileSize = "128M" + } + if bls.CheckpointSize == "" { + bls.CheckpointSize = "16M" + } + if bls.CheckpointInterval == "" { + bls.CheckpointInterval = "30s" + } + if bls.ConnectTimeout == 0 { + bls.ConnectTimeout = 30 + } + if bls.ReadTimeout == 0 { + bls.ReadTimeout = 30 + } + if bls.WriteTimeout == 0 { + bls.WriteTimeout = 30 + } + if bls.IdleTime == 0 { + bls.IdleTime = 30 + } + } + if cr.Spec.Pause { cr.Spec.MySQL.Size = 0 cr.Spec.Orchestrator.Size = 0 cr.Spec.Proxy.Router.Size = 0 cr.Spec.Proxy.HAProxy.Size = 0 + if cr.Spec.Backup.PiTR.BinlogServer != nil { + cr.Spec.Backup.PiTR.BinlogServer.Size = 0 + } } if cr.Spec.SecretsName == "" { @@ -1290,7 +1359,6 @@ const ( UpgradeStrategyDisabled = "disabled" UpgradeStrategyNever = "never" UpgradeStrategyRecommended = "recommended" - UpgradeStrategyLatest = "latest" ) func (s *BackupStorageSpec) Equals(other *BackupStorageSpec) bool { diff --git a/api/v1/perconaservermysql_types_test.go b/api/v1/perconaservermysql_types_test.go index f0da3a005..0203cedf5 100644 --- a/api/v1/perconaservermysql_types_test.go +++ b/api/v1/perconaservermysql_types_test.go @@ -166,6 +166,69 @@ func TestCheckNSetDefaults(t *testing.T) { err := cr.CheckNSetDefaults(t.Context(), nil) assert.NoError(t, err) }) + t.Run("binlog server defaults are set when binlogServer is configured", func(t *testing.T) { + cr := new(PerconaServerMySQL) + cr.Spec.MySQL.VolumeSpec = &VolumeSpec{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimSpec{ + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1G"), + }, + }, + }, + } + cr.Spec.Backup = &BackupSpec{ + PiTR: PiTRSpec{ + BinlogServer: &BinlogServerSpec{}, + }, + } + + err := cr.CheckNSetDefaults(t.Context(), nil) + assert.NoError(t, err) + + bls := cr.Spec.Backup.PiTR.BinlogServer + assert.Equal(t, "verify_identity", bls.SSLMode) + assert.NotNil(t, bls.VerifyChecksum) + assert.True(t, *bls.VerifyChecksum) + assert.Equal(t, "128M", bls.RewriteFileSize) + assert.Equal(t, "16M", bls.CheckpointSize) + assert.Equal(t, "30s", bls.CheckpointInterval) + }) + t.Run("binlog server explicit values are not overridden by defaults", func(t *testing.T) { + cr := new(PerconaServerMySQL) + cr.Spec.MySQL.VolumeSpec = &VolumeSpec{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimSpec{ + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1G"), + }, + }, + }, + } + f := false + cr.Spec.Backup = &BackupSpec{ + PiTR: PiTRSpec{ + BinlogServer: &BinlogServerSpec{ + SSLMode: "required", + VerifyChecksum: &f, + RewriteFileSize: "256M", + CheckpointSize: "4M", + CheckpointInterval: "60s", + }, + }, + } + + err := cr.CheckNSetDefaults(t.Context(), nil) + assert.NoError(t, err) + + bls := cr.Spec.Backup.PiTR.BinlogServer + assert.Equal(t, "required", bls.SSLMode) + assert.NotNil(t, bls.VerifyChecksum) + assert.False(t, *bls.VerifyChecksum) + assert.Equal(t, "256M", bls.RewriteFileSize) + assert.Equal(t, "4M", bls.CheckpointSize) + assert.Equal(t, "60s", bls.CheckpointInterval) + }) } func TestCanBackup(t *testing.T) { diff --git a/api/v1/perconaservermysqlrestore_types.go b/api/v1/perconaservermysqlrestore_types.go index 0c384755c..b36ebe295 100644 --- a/api/v1/perconaservermysqlrestore_types.go +++ b/api/v1/perconaservermysqlrestore_types.go @@ -31,8 +31,24 @@ type PerconaServerMySQLRestoreSpec struct { BackupName string `json:"backupName,omitempty"` BackupSource *PerconaServerMySQLBackupStatus `json:"backupSource,omitempty"` ContainerOptions *BackupContainerOptions `json:"containerOptions,omitempty"` + PITR *RestorePITRSpec `json:"pitr,omitempty"` } +type RestorePITRSpec struct { + // +kubebuilder:validation:Enum=gtid;date + Type PITRType `json:"type"` + Date string `json:"date,omitempty"` + GTID string `json:"gtid,omitempty"` + Force bool `json:"force,omitempty"` +} + +type PITRType string + +const ( + PITRGtid PITRType = "gtid" + PITRDate PITRType = "date" +) + type RestoreState string const ( diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index ad16575b3..5f6cadde0 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -304,6 +304,11 @@ func (in *BackupStorageSpec) DeepCopy() *BackupStorageSpec { func (in *BinlogServerSpec) DeepCopyInto(out *BinlogServerSpec) { *out = *in in.Storage.DeepCopyInto(&out.Storage) + if in.VerifyChecksum != nil { + in, out := &in.VerifyChecksum, &out.VerifyChecksum + *out = new(bool) + **out = **in + } in.PodSpec.DeepCopyInto(&out.PodSpec) } @@ -813,6 +818,11 @@ func (in *PerconaServerMySQLRestoreSpec) DeepCopyInto(out *PerconaServerMySQLRes *out = new(BackupContainerOptions) (*in).DeepCopyInto(*out) } + if in.PITR != nil { + in, out := &in.PITR, &out.PITR + *out = new(RestorePITRSpec) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerconaServerMySQLRestoreSpec. @@ -907,6 +917,7 @@ func (in *PerconaServerMySQLStatus) DeepCopyInto(out *PerconaServerMySQLStatus) out.Orchestrator = in.Orchestrator out.HAProxy = in.HAProxy out.Router = in.Router + out.BinlogServer = in.BinlogServer if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]apismetav1.Condition, len(*in)) @@ -1102,6 +1113,21 @@ func (in *ProxySpec) DeepCopy() *ProxySpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestorePITRSpec) DeepCopyInto(out *RestorePITRSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestorePITRSpec. +func (in *RestorePITRSpec) DeepCopy() *RestorePITRSpec { + if in == nil { + return nil + } + out := new(RestorePITRSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ServiceExpose) DeepCopyInto(out *ServiceExpose) { *out = *in diff --git a/build/Dockerfile b/build/Dockerfile index 802de4ca0..11b432e33 100644 --- a/build/Dockerfile +++ b/build/Dockerfile @@ -50,6 +50,11 @@ RUN GOOS=$GOOS GOARCH=$TARGETARCH CGO_ENABLED=$CGO_ENABLED GO_LDFLAGS=$GO_LDFLAG -o build/_output/bin/mysql-state-monitor \ cmd/mysql-state-monitor/main.go \ && cp -r build/_output/bin/mysql-state-monitor /usr/local/bin/mysql-state-monitor +RUN GOOS=$GOOS GOARCH=$TARGETARCH CGO_ENABLED=$CGO_ENABLED GO_LDFLAGS=$GO_LDFLAGS \ + go build -ldflags "-w -s -X main.GitCommit=$GIT_COMMIT -X main.GitBranch=$GIT_BRANCH -X main.BuildTime=$BUILD_TIME" \ + -o build/_output/bin/pitr \ + ./cmd/pitr/ \ + && cp -r build/_output/bin/pitr /usr/local/bin/pitr FROM redhat/ubi9-minimal AS ubi9 RUN microdnf -y update && microdnf clean all @@ -91,5 +96,7 @@ COPY build/haproxy.cfg /opt/percona-server-mysql-operator/haproxy.cfg COPY build/haproxy-global.cfg /opt/percona-server-mysql-operator/haproxy-global.cfg COPY build/pmm-prerun.sh /opt/percona-server-mysql-operator/pmm-prerun.sh COPY build/binlog-server-entrypoint.sh /opt/percona-server-mysql-operator/binlog-server-entrypoint.sh +COPY --from=go_builder /usr/local/bin/pitr /opt/percona-server-mysql-operator/pitr +COPY build/run-pitr-restore.sh /opt/percona-server-mysql-operator/run-pitr-restore.sh USER 2 diff --git a/build/ps-init-entrypoint.sh b/build/ps-init-entrypoint.sh index 0d86d21f6..ed921426a 100755 --- a/build/ps-init-entrypoint.sh +++ b/build/ps-init-entrypoint.sh @@ -40,3 +40,5 @@ install -o "$(id -u)" -g "$(id -g)" -m 0755 -D "${OPERATORDIR}/haproxy-global.cf install -o "$(id -u)" -g "$(id -g)" -m 0755 -D "${OPERATORDIR}/pmm-prerun.sh" "${BINDIR}/pmm-prerun.sh" install -o "$(id -u)" -g "$(id -g)" -m 0755 -D "${OPERATORDIR}/binlog-server-entrypoint.sh" "${BINDIR}/binlog-server-entrypoint.sh" +install -o "$(id -u)" -g "$(id -g)" -m 0755 -D "${OPERATORDIR}/pitr" "${BINDIR}/pitr" +install -o "$(id -u)" -g "$(id -g)" -m 0755 -D "${OPERATORDIR}/run-pitr-restore.sh" "${BINDIR}/run-pitr-restore.sh" diff --git a/build/run-pitr-restore.sh b/build/run-pitr-restore.sh new file mode 100644 index 000000000..f4a5effed --- /dev/null +++ b/build/run-pitr-restore.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +set -e + +function log() { + local ts=$(date +%Y-%m-%dT%H:%M:%S.%N%z --utc | sed 's/+0000/Z/g') + echo "${ts} 0 [Info] [K8SPS-642] [Job] $*" >&2 +} + +log "Starting mysqld" +# TODO: Add support for data at rest encryption +mysqld \ + --admin-address=127.0.0.1 \ + --user=mysql \ + --gtid-mode=ON \ + --enforce-gtid-consistency=ON & + +log "waiting for mysqld to be ready" +until mysqladmin -u operator -p"$(/dev/null; do + sleep 1; +done +log "mysqld is ready" + +if [[ -n ${SLEEP_FOREVER} ]]; then + SLEEP_FOREVER_FILE=/var/lib/mysql/sleep-forever + log "sleeping forever... remove ${SLEEP_FOREVER_FILE} to terminate." + touch ${SLEEP_FOREVER_FILE} + while [[ -f ${SLEEP_FOREVER_FILE} ]]; do + sleep 10 + done + exit 0 +fi + +log "starting recovery" +/opt/percona/pitr + +log "stopping mysqld" +mysqladmin -u operator -p"$(/dev/null diff --git a/cmd/example-gen/main.go b/cmd/example-gen/main.go index 16db8880b..f704640e6 100644 --- a/cmd/example-gen/main.go +++ b/cmd/example-gen/main.go @@ -164,6 +164,11 @@ func printRestore() error { Spec: apiv1.PerconaServerMySQLRestoreSpec{ ClusterName: defaults.NameCluster, BackupName: defaults.NameBackup, + PITR: &apiv1.RestorePITRSpec{ + Type: apiv1.PITRDate, + Date: "2024-11-18T11:10:48Z", + GTID: "a3e5ff70-83e2-11ef-8e57-7a62caf7e1e3:1-36", + }, BackupSource: &apiv1.PerconaServerMySQLBackupStatus{ Destination: "s3://S3-BACKUP-BUCKET-NAME-HERE/backup-path", Storage: &apiv1.BackupStorageSpec{ diff --git a/cmd/example-gen/pkg/defaults/manual.go b/cmd/example-gen/pkg/defaults/manual.go index 4a3ce28aa..65b74f794 100644 --- a/cmd/example-gen/pkg/defaults/manual.go +++ b/cmd/example-gen/pkg/defaults/manual.go @@ -121,6 +121,29 @@ func pmmDefaults(spec *apiv1.PMMSpec) { func backupDefaults(spec *apiv1.BackupSpec) { spec.Image = ImageBackup spec.Enabled = true + spec.PiTR = apiv1.PiTRSpec{ + Enabled: false, + BinlogServer: &apiv1.BinlogServerSpec{ + Storage: apiv1.BinlogServerStorageSpec{ + S3: &apiv1.BackupStorageS3Spec{ + Bucket: "S3-BACKUP-BUCKET-NAME-HERE", + Prefix: "PREFIX_NAME", + CredentialsSecret: fmt.Sprintf("%s-s3-credentials", NameCluster), + Region: "us-west-2", + EndpointURL: "https://s3.amazonaws.com", + }, + }, + ConnectTimeout: 30, + ReadTimeout: 30, + WriteTimeout: 30, + ServerID: 100, + IdleTime: 30, + CheckpointSize: "16M", + CheckpointInterval: "30s", + }, + } + podSpecDefaults(&spec.PiTR.BinlogServer.PodSpec, ImageBinlogServer, corev1.ResourceRequirements{}, "", 30, nil, nil) + spec.PiTR.BinlogServer.Size = 1 spec.SourcePod = SourcePod spec.ServiceAccountName = "some-service-account" spec.BackoffLimit = ptr.To(int32(6)) diff --git a/cmd/example-gen/pkg/defaults/values.go b/cmd/example-gen/pkg/defaults/values.go index 1160c836e..d9ea30703 100644 --- a/cmd/example-gen/pkg/defaults/values.go +++ b/cmd/example-gen/pkg/defaults/values.go @@ -16,6 +16,7 @@ const ( ImageOrchestrator = "perconalab/percona-server-mysql-operator:main-orchestrator" ImagePMM = "perconalab/pmm-client:3-dev-latest" ImageBackup = "perconalab/percona-server-mysql-operator:main-backup8.4" + ImageBinlogServer = "perconalab/percona-binlog-server:0.2.1" ImageToolkit = "perconalab/percona-server-mysql-operator:main-toolkit" ) diff --git a/cmd/example-gen/scripts/lib/ps-restore.sh b/cmd/example-gen/scripts/lib/ps-restore.sh index 1ded67568..dbd505c3e 100644 --- a/cmd/example-gen/scripts/lib/ps-restore.sh +++ b/cmd/example-gen/scripts/lib/ps-restore.sh @@ -6,7 +6,7 @@ export RESOURCE_PATH="deploy/backup/restore.yaml" sort_yaml() { - SPEC_ORDER='"clusterName", "backupName", "containerOptions", "backupSource"' + SPEC_ORDER='"clusterName", "backupName", "pitr", "containerOptions", "backupSource"' CONTAINER_OPTS_ORDER='"env", "args"' yq - \ @@ -30,6 +30,7 @@ remove_fields() { del_fields_to_comment() { yq - \ + | yq "del(.spec.pitr)" \ | yq "del(.spec.containerOptions)" \ | yq "del(.spec.backupSource)" } diff --git a/cmd/example-gen/scripts/lib/ps.sh b/cmd/example-gen/scripts/lib/ps.sh index 0b1b80b90..b38e1d079 100644 --- a/cmd/example-gen/scripts/lib/ps.sh +++ b/cmd/example-gen/scripts/lib/ps.sh @@ -15,7 +15,9 @@ sort_yaml() { ORCHESTRATOR_ORDER='"enabled", "expose", '"$POD_SPEC_ORDER" PMM_ORDER='"enabled","image","imagePullPolicy","serverHost","mysqlParams","containerSecurityContext", "resources", "readinessProbes", "livenessProbes"' - BACKUP_ORDER='"enabled","pitr","sourcePod","image","imagePullPolicy","imagePullSecrets","schedule","backoffLimit", "serviceAccountName", "initContainer", "containerSecurityContext", "resources","storages","pitr"' + BINLOG_SERVER_ORDER='"enabled","binlogServer"' + BINLOG_SERVER_SPEC_ORDER='"size","image","imagePullPolicy","imagePullSecrets","serverId","storage","connectTimeout","readTimeout","writeTimeout","idleTime"' + BACKUP_ORDER='"enabled","pitr","sourcePod","image","imagePullPolicy","imagePullSecrets","schedule","backoffLimit", "serviceAccountName", "initContainer", "containerSecurityContext", "resources","storages"' TOOLKIT_ORDER='"image","imagePullPolicy","imagePullSecrets","env","envFrom","resources","containerSecurityContext", "startupProbe", "readinessProbe", "livenessProbe"' yq - \ @@ -26,12 +28,13 @@ sort_yaml() { | yq '.spec.orchestrator |= pick((['"$ORCHESTRATOR_ORDER"'] + keys) | unique)' \ | yq '.spec.pmm |= pick((['"$PMM_ORDER"'] + keys) | unique)' \ | yq '.spec.backup |= pick((['"$BACKUP_ORDER"'] + keys) | unique)' \ + | yq '.spec.backup.pitr |= pick((['"$BINLOG_SERVER_ORDER"'] + keys) | unique)' \ + | yq '.spec.backup.pitr.binlogServer |= pick((['"$BINLOG_SERVER_SPEC_ORDER"'] + keys) | unique)' \ | yq '.spec.toolkit |= pick((['"$TOOLKIT_ORDER"'] + keys) | unique)' } remove_fields() { # - removing initImage as it is deprecated - # - removing binlogServer is not used # - removing azure-blob fields to reduce size # - removing gcp-cs fields to reduce size # - removing non-s3 fields in s3-us-west @@ -43,7 +46,14 @@ remove_fields() { | yq 'del(.spec.orchestrator.initImage)' \ | yq 'del(.spec.proxy.haproxy.initImage)' \ | yq 'del(.spec.proxy.router.initImage)' \ - | yq 'del(.spec.backup.pitr.binlogServer)' \ + | yq 'del(.spec.backup.pitr.binlogServer.runtimeClassName)' \ + | yq 'del(.spec.backup.pitr.binlogServer.labels)' \ + | yq 'del(.spec.backup.pitr.binlogServer.annotations)' \ + | yq 'del(.spec.backup.pitr.binlogServer.nodeSelector)' \ + | yq 'del(.spec.backup.pitr.binlogServer.priorityClassName)' \ + | yq 'del(.spec.backup.pitr.binlogServer.schedulerName)' \ + | yq 'del(.spec.backup.pitr.binlogServer.serviceAccountName)' \ + | yq 'del(.spec.backup.pitr.binlogServer.gracePeriod)' \ | yq 'del(.spec.backup.storages.azure-blob.affinity)' \ | yq 'del(.spec.backup.storages.azure-blob.annotations)' \ | yq 'del(.spec.backup.storages.azure-blob.gcs)' \ @@ -189,6 +199,7 @@ del_fields_to_comment() { | yq "del(.spec.pmm.livenessProbes)" \ | yq "del(.spec.pmm.containerSecurityContext)" \ | yq "del(.spec.pmm.resources.limits)" \ + | yq "del(.spec.backup.pitr.binlogServer)" \ | yq "del(.spec.backup.sourcePod)" \ | yq "del(.spec.backup.schedule)" \ | yq "del(.spec.backup.backoffLimit)" \ diff --git a/cmd/internal/db/db.go b/cmd/internal/db/db.go index ae220220f..bc5994681 100644 --- a/cmd/internal/db/db.go +++ b/cmd/internal/db/db.go @@ -394,3 +394,9 @@ func (d *DB) EnableSuperReadonly(ctx context.Context) error { _, err := d.db.ExecContext(ctx, "SET GLOBAL SUPER_READ_ONLY=1") return errors.Wrap(err, "set global super_read_only param to 1") } + +func (d *DB) GetGTIDExecuted(ctx context.Context) (string, error) { + var gtid string + err := d.db.QueryRowContext(ctx, "SELECT @@GTID_EXECUTED").Scan(>id) + return gtid, errors.Wrap(err, "get GTID_EXECUTED") +} diff --git a/cmd/manager/main.go b/cmd/manager/main.go index 12dce1a44..ff64dcf1f 100644 --- a/cmd/manager/main.go +++ b/cmd/manager/main.go @@ -180,6 +180,7 @@ func main() { Client: nsClient, Scheme: mgr.GetScheme(), ServerVersion: serverVersion, + ClientCmd: cliCmd, NewStorageClient: storage.NewClient, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "PerconaServerMySQLRestore") diff --git a/cmd/pitr/main.go b/cmd/pitr/main.go new file mode 100644 index 000000000..2a849d778 --- /dev/null +++ b/cmd/pitr/main.go @@ -0,0 +1,267 @@ +package main + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "log" + "net/url" + "os" + "os/exec" + "strings" + "time" + + apiv1 "github.com/percona/percona-server-mysql-operator/api/v1" + "github.com/percona/percona-server-mysql-operator/cmd/bootstrap/utils" + "github.com/percona/percona-server-mysql-operator/cmd/internal/db" + "github.com/percona/percona-server-mysql-operator/pkg/binlogserver" + "github.com/percona/percona-server-mysql-operator/pkg/xtrabackup/storage" +) + +// Database defines the MySQL operations needed for PITR. +type Database interface { + GetGTIDExecuted(ctx context.Context) (string, error) + Close() error +} + +type newStorageFn func(ctx context.Context, endpoint, accessKey, secretKey, bucket, prefix, region string, verifyTLS bool) (storage.Storage, error) +type newDatabaseFn func(ctx context.Context, params db.DBParams) (Database, error) + +// getObjectFn fetches a single object by key and returns a streaming reader. +type getObjectFn func(ctx context.Context, objectKey string) (io.ReadCloser, error) + +// applyBinlogsFn starts a single mysql client process and for each object key +// fetches the binlog via getObject and streams it through mysqlbinlog into mysql. +type applyBinlogsFn func(ctx context.Context, objectKeys []string, getObject getObjectFn, mysqlbinlogArgs []string, mysqlArgs []string, mysqlPass string) error + +type logWriter struct{} + +func (lw *logWriter) Write(bs []byte) (int, error) { + return fmt.Print(time.Now().UTC().Format(time.RFC3339Nano), " 0 [Info] [K8SPS-642] [Recovery] ", string(bs)) +} + +func main() { + ctx := context.Background() + + // we use a custom writer to match mysqld log format. + // mysqld and pitr logs are printed to together to stdout/stderr + // and it should be possible to parse them together + log.SetFlags(0) + log.SetOutput(new(logWriter)) + + newDB := func(ctx context.Context, params db.DBParams) (Database, error) { + return db.NewDatabase(ctx, params) + } + + if err := run(ctx, storage.NewS3, newDB, utils.GetSecret, applyBinlogs); err != nil { + log.Fatalf("pitr failed: %v", err) + } +} + +func run(ctx context.Context, newS3 newStorageFn, newDB newDatabaseFn, getSecret func(apiv1.SystemUser) (string, error), apply applyBinlogsFn) error { + binlogsPath := os.Getenv("BINLOGS_PATH") + if binlogsPath == "" { + return fmt.Errorf("BINLOGS_PATH is not set") + } + + data, err := os.ReadFile(binlogsPath) + if err != nil { + return fmt.Errorf("read binlogs file: %w", err) + } + + var entries []binlogserver.BinlogEntry + if err := json.Unmarshal(data, &entries); err != nil { + return fmt.Errorf("parse binlogs json: %w", err) + } + + if len(entries) == 0 { + return fmt.Errorf("no binlog entries found") + } + + pitrType := os.Getenv("PITR_TYPE") + pitrDate := os.Getenv("PITR_DATE") + pitrGTID := os.Getenv("PITR_GTID") + + operatorPass, err := getSecret(apiv1.UserOperator) + if err != nil { + return fmt.Errorf("get operator password: %w", err) + } + + database, err := newDB(ctx, db.DBParams{ + User: apiv1.UserOperator, + Pass: operatorPass, + Host: "127.0.0.1", + }) + if err != nil { + return fmt.Errorf("connect to MySQL: %w", err) + } + + gtidExecuted, err := database.GetGTIDExecuted(ctx) + if err != nil { + if closeErr := database.Close(); closeErr != nil { + log.Printf("close database: %v", closeErr) + } + return fmt.Errorf("get GTID_EXECUTED: %w", err) + } + log.Printf("GTID_EXECUTED from backup: %s", gtidExecuted) + if err := database.Close(); err != nil { + log.Printf("close database: %v", err) + } + + endpoint := os.Getenv("AWS_ENDPOINT") + accessKey := os.Getenv("AWS_ACCESS_KEY_ID") + secretKey := os.Getenv("AWS_SECRET_ACCESS_KEY") + region := os.Getenv("AWS_DEFAULT_REGION") + bucket := os.Getenv("S3_BUCKET") + verifyTLS := os.Getenv("VERIFY_TLS") != "false" + + s3Client, err := newS3(ctx, endpoint, accessKey, secretKey, bucket, "", region, verifyTLS) + if err != nil { + return fmt.Errorf("create S3 client: %w", err) + } + + var objectKeys []string + for _, entry := range entries { + objectKey, err := objectKeyFromURI(entry.URI, bucket) + if err != nil { + return fmt.Errorf("parse URI %s: %w", entry.URI, err) + } + objectKeys = append(objectKeys, objectKey) + } + + mysqlbinlogArgs := []string{"--disable-log-bin"} + if gtidExecuted != "" { + mysqlbinlogArgs = append(mysqlbinlogArgs, fmt.Sprintf("--exclude-gtids=%s", gtidExecuted)) + } + + switch pitrType { + case "date": + mysqlbinlogArgs = append(mysqlbinlogArgs, fmt.Sprintf("--stop-datetime=%s", pitrDate)) + case "gtid": + mysqlbinlogArgs = append(mysqlbinlogArgs, fmt.Sprintf("--include-gtids=%s", pitrGTID)) + default: + return fmt.Errorf("unknown PITR_TYPE: %s", pitrType) + } + + mysqlArgs := []string{ + "-u", string(apiv1.UserOperator), + "-h", "127.0.0.1", + "-P", "33062", + } + if os.Getenv("PITR_FORCE") == "true" { + mysqlArgs = append([]string{"--force"}, mysqlArgs...) + } + + log.Printf("applying %d binlog(s) with mysqlbinlog args: %v", len(objectKeys), mysqlbinlogArgs) + + if err := apply(ctx, objectKeys, s3Client.GetObject, mysqlbinlogArgs, mysqlArgs, operatorPass); err != nil { + return fmt.Errorf("apply binlogs: %w", err) + } + + database, err = newDB(ctx, db.DBParams{ + User: apiv1.UserOperator, + Pass: operatorPass, + Host: "127.0.0.1", + }) + if err != nil { + return fmt.Errorf("reconnect to MySQL: %w", err) + } + defer func() { + if err := database.Close(); err != nil { + log.Printf("close db connection: %v", err) + } + }() + + gtidExecuted, err = database.GetGTIDExecuted(ctx) + if err != nil { + return fmt.Errorf("get GTID_EXECUTED after restore: %w", err) + } + log.Printf("GTID_EXECUTED after PITR: %s", gtidExecuted) + + log.Println("PITR complete") + return nil +} + +// applyBinlogs starts a single mysql client and for each object key +// fetches the binlog from storage and streams it through mysqlbinlog into mysql. +func applyBinlogs(ctx context.Context, objectKeys []string, getObject getObjectFn, mysqlbinlogArgs []string, mysqlArgs []string, mysqlPass string) error { + mysqlCmd := exec.CommandContext(ctx, "mysql", mysqlArgs...) + mysqlCmd.Env = append(os.Environ(), fmt.Sprintf("MYSQL_PWD=%s", mysqlPass)) + mysqlStdin, err := mysqlCmd.StdinPipe() + if err != nil { + return fmt.Errorf("create mysql stdin pipe: %w", err) + } + + var mysqlStderr bytes.Buffer + mysqlCmd.Stderr = &mysqlStderr + + if err := mysqlCmd.Start(); err != nil { + return fmt.Errorf("start mysql: %w", err) + } + + for _, objectKey := range objectKeys { + log.Printf("streaming binlog %s", objectKey) + + obj, err := getObject(ctx, objectKey) + if err != nil { + if closeErr := mysqlStdin.Close(); closeErr != nil { + log.Printf("close mysql stdin: %v", closeErr) + } + if waitErr := mysqlCmd.Wait(); waitErr != nil { + log.Printf("wait for mysql: %v", waitErr) + } + return fmt.Errorf("fetch binlog %s: %w", objectKey, err) + } + + args := append(mysqlbinlogArgs, "-") + binlogCmd := exec.CommandContext(ctx, "mysqlbinlog", args...) + binlogCmd.Stdin = obj + + var binlogStderr bytes.Buffer + binlogCmd.Stdout = mysqlStdin + binlogCmd.Stderr = &binlogStderr + + if err := binlogCmd.Run(); err != nil { + if closeErr := obj.Close(); closeErr != nil { + log.Printf("close object %s: %v", objectKey, closeErr) + } + if closeErr := mysqlStdin.Close(); closeErr != nil { + log.Printf("close mysql stdin: %v", closeErr) + } + if waitErr := mysqlCmd.Wait(); waitErr != nil { + log.Printf("wait for mysql: %v", waitErr) + } + return fmt.Errorf("mysqlbinlog %s failed: %w, stderr: %s", objectKey, err, binlogStderr.String()) + } + if err := obj.Close(); err != nil { + log.Printf("close object %s: %v", objectKey, err) + } + } + + if err := mysqlStdin.Close(); err != nil { + log.Printf("close mysql stdin: %v", err) + } + + if err := mysqlCmd.Wait(); err != nil { + return fmt.Errorf("mysql failed: %w, stderr: %s", err, mysqlStderr.String()) + } + + return nil +} + +// objectKeyFromURI extracts the S3 object key from a full URI. +// e.g. "https://minio-service:9000/bucket/binlogs/binlog.000001" -> "binlogs/binlog.000001" +// e.g. "s3://bucket/prefix/binlog.000001" -> "prefix/binlog.000001" +func objectKeyFromURI(uri, bucket string) (string, error) { + u, err := url.Parse(uri) + if err != nil { + return "", fmt.Errorf("parse URL: %w", err) + } + if u.Scheme == "s3" { + return strings.TrimPrefix(u.Path, "/"), nil + } + key := strings.TrimPrefix(u.Path, "/"+bucket+"/") + return key, nil +} diff --git a/cmd/pitr/main_test.go b/cmd/pitr/main_test.go new file mode 100644 index 000000000..dc9079753 --- /dev/null +++ b/cmd/pitr/main_test.go @@ -0,0 +1,312 @@ +package main + +import ( + "context" + "encoding/json" + "errors" + "io" + "os" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + apiv1 "github.com/percona/percona-server-mysql-operator/api/v1" + "github.com/percona/percona-server-mysql-operator/cmd/internal/db" + "github.com/percona/percona-server-mysql-operator/pkg/binlogserver" + "github.com/percona/percona-server-mysql-operator/pkg/xtrabackup/storage" +) + +type fakeStorage struct { + objects map[string]string // key -> content + getErr error +} + +func (f *fakeStorage) GetObject(_ context.Context, objectName string) (io.ReadCloser, error) { + if f.getErr != nil { + return nil, f.getErr + } + content, ok := f.objects[objectName] + if !ok { + return nil, storage.ErrObjectNotFound + } + return io.NopCloser(strings.NewReader(content)), nil +} + +func (f *fakeStorage) PutObject(_ context.Context, _ string, _ io.Reader, _ int64) error { return nil } +func (f *fakeStorage) ListObjects(_ context.Context, _ string) ([]string, error) { return nil, nil } +func (f *fakeStorage) DeleteObject(_ context.Context, _ string) error { return nil } +func (f *fakeStorage) SetPrefix(_ string) {} +func (f *fakeStorage) GetPrefix() string { return "" } + +type fakeDB struct { + getGTIDExecutedResult string + getGTIDExecutedErr error + calls []string +} + +func (f *fakeDB) GetGTIDExecuted(_ context.Context) (string, error) { + f.calls = append(f.calls, "GetGTIDExecuted") + return f.getGTIDExecutedResult, f.getGTIDExecutedErr +} + +func (f *fakeDB) Close() error { return nil } + +func writeBinlogsFile(t *testing.T, entries []binlogserver.BinlogEntry) string { + t.Helper() + data, err := json.Marshal(entries) + require.NoError(t, err) + f, err := os.CreateTemp(t.TempDir(), "binlogs-*.json") + require.NoError(t, err) + _, err = f.Write(data) + require.NoError(t, err) + require.NoError(t, f.Close()) + return f.Name() +} + +type applyCall struct { + objectKeys []string + mysqlbinlogArgs []string + mysqlArgs []string +} + +func TestRun(t *testing.T) { + bucket := "mybucket" + + defaultEntries := []binlogserver.BinlogEntry{ + {URI: "s3://mybucket/binlogs/binlog.000001"}, + {URI: "s3://mybucket/binlogs/binlog.000002"}, + } + + defaultS3 := func(fake *fakeStorage) newStorageFn { + fake.objects = map[string]string{ + "binlogs/binlog.000001": "binlogdata1", + "binlogs/binlog.000002": "binlogdata2", + } + return func(_ context.Context, _, _, _, _, _, _ string, _ bool) (storage.Storage, error) { + return fake, nil + } + } + + tests := map[string]struct { + entries []binlogserver.BinlogEntry + rawContent string + pitrType string + pitrGTID string + pitrDate string + pitrForce string + db *fakeDB + newDB func(ctx context.Context, params db.DBParams) (Database, error) + newS3 func(*fakeStorage) newStorageFn + getSecret func(apiv1.SystemUser) (string, error) + applyErr error + expectedError string + checkApply func(t *testing.T, call applyCall) + }{ + "missing BINLOGS_PATH": { + expectedError: "BINLOGS_PATH", + }, + "invalid JSON in binlogs file": { + rawContent: "not-json", + expectedError: "parse binlogs json", + }, + "empty binlog entries": { + entries: []binlogserver.BinlogEntry{}, + expectedError: "no binlog entries found", + }, + "get secret error": { + entries: defaultEntries, + pitrType: "gtid", + pitrGTID: "uuid:1", + getSecret: func(apiv1.SystemUser) (string, error) { return "", errors.New("secret not found") }, + expectedError: "get operator password", + }, + "DB connect error": { + entries: defaultEntries, + pitrType: "gtid", + pitrGTID: "uuid:1", + newDB: func(_ context.Context, _ db.DBParams) (Database, error) { + return nil, errors.New("connection refused") + }, + expectedError: "connect to MySQL", + }, + "GetGTIDExecuted error": { + entries: defaultEntries, + pitrType: "gtid", + pitrGTID: "uuid:1", + db: &fakeDB{getGTIDExecutedErr: errors.New("query failed")}, + expectedError: "get GTID_EXECUTED", + }, + "S3 client creation error": { + entries: defaultEntries, + pitrType: "gtid", + pitrGTID: "uuid:1", + db: &fakeDB{getGTIDExecutedResult: "uuid:1-5"}, + newS3: func(_ *fakeStorage) newStorageFn { + return func(_ context.Context, _, _, _, _, _, _ string, _ bool) (storage.Storage, error) { + return nil, errors.New("s3 unavailable") + } + }, + expectedError: "create S3 client", + }, + "S3 download error": { + entries: defaultEntries, + pitrType: "gtid", + pitrGTID: "uuid:1", + db: &fakeDB{getGTIDExecutedResult: "uuid:1-5"}, + applyErr: errors.New("fetch binlog binlogs/binlog.000001: download failed"), + expectedError: "apply binlogs", + }, + "unknown PITR type": { + entries: defaultEntries, + pitrType: "unknown", + db: &fakeDB{getGTIDExecutedResult: "uuid:1-5"}, + expectedError: "unknown PITR_TYPE", + }, + "apply error": { + entries: defaultEntries, + pitrType: "gtid", + pitrGTID: "uuid:1-10", + db: &fakeDB{getGTIDExecutedResult: "uuid:1-5"}, + applyErr: errors.New("mysql failed"), + expectedError: "apply binlogs", + }, + "GTID mode success": { + entries: defaultEntries, + pitrType: "gtid", + pitrGTID: "aaaaaaaa-0000-0000-0000-000000000001:1-10", + db: &fakeDB{getGTIDExecutedResult: "aaaaaaaa-0000-0000-0000-000000000001:1-5"}, + checkApply: func(t *testing.T, call applyCall) { + assert.Len(t, call.objectKeys, 2) + assert.Contains(t, call.mysqlbinlogArgs, "--disable-log-bin") + assert.Contains(t, call.mysqlbinlogArgs, "--exclude-gtids=aaaaaaaa-0000-0000-0000-000000000001:1-5") + assert.Contains(t, call.mysqlbinlogArgs, "--include-gtids=aaaaaaaa-0000-0000-0000-000000000001:1-10") + assert.NotContains(t, call.mysqlbinlogArgs, "--stop-datetime") + assert.NotContains(t, call.mysqlArgs, "--force") + }, + }, + "GTID mode with force": { + entries: defaultEntries, + pitrType: "gtid", + pitrGTID: "aaaaaaaa-0000-0000-0000-000000000001:1-10", + pitrForce: "true", + db: &fakeDB{getGTIDExecutedResult: "aaaaaaaa-0000-0000-0000-000000000001:1-5"}, + checkApply: func(t *testing.T, call applyCall) { + assert.Contains(t, call.mysqlArgs, "--force") + }, + }, + "date mode success": { + entries: defaultEntries, + pitrType: "date", + pitrDate: "2024-01-15 12:00:00", + db: &fakeDB{getGTIDExecutedResult: "bbbbbbbb-0000-0000-0000-000000000002:1-5"}, + checkApply: func(t *testing.T, call applyCall) { + assert.Len(t, call.objectKeys, 2) + assert.Contains(t, call.mysqlbinlogArgs, "--disable-log-bin") + assert.Contains(t, call.mysqlbinlogArgs, "--exclude-gtids=bbbbbbbb-0000-0000-0000-000000000002:1-5") + assert.Contains(t, call.mysqlbinlogArgs, "--stop-datetime=2024-01-15 12:00:00") + // Should not contain --include-gtids for date mode + for _, arg := range call.mysqlbinlogArgs { + assert.False(t, strings.HasPrefix(arg, "--include-gtids"), "date mode should not have --include-gtids") + } + assert.NotContains(t, call.mysqlArgs, "--force") + }, + }, + "date mode with force": { + entries: defaultEntries, + pitrType: "date", + pitrDate: "2024-01-15 12:00:00", + pitrForce: "true", + db: &fakeDB{getGTIDExecutedResult: "bbbbbbbb-0000-0000-0000-000000000002:1-5"}, + checkApply: func(t *testing.T, call applyCall) { + assert.Contains(t, call.mysqlArgs, "--force") + }, + }, + "empty GTID_EXECUTED": { + entries: defaultEntries, + pitrType: "gtid", + pitrGTID: "aaaaaaaa-0000-0000-0000-000000000001:1-10", + db: &fakeDB{getGTIDExecutedResult: ""}, + checkApply: func(t *testing.T, call applyCall) { + assert.Contains(t, call.mysqlbinlogArgs, "--disable-log-bin") + assert.Contains(t, call.mysqlbinlogArgs, "--include-gtids=aaaaaaaa-0000-0000-0000-000000000001:1-10") + // No --exclude-gtids when GTID_EXECUTED is empty + for _, arg := range call.mysqlbinlogArgs { + assert.False(t, strings.HasPrefix(arg, "--exclude-gtids"), "should not have --exclude-gtids when GTID_EXECUTED is empty") + } + }, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + // Set up binlogs file. + var binlogsPath string + if tc.rawContent != "" { + f, err := os.CreateTemp(t.TempDir(), "binlogs-*.json") + require.NoError(t, err) + _, err = f.WriteString(tc.rawContent) + require.NoError(t, err) + require.NoError(t, f.Close()) + binlogsPath = f.Name() + } else if tc.entries != nil { + binlogsPath = writeBinlogsFile(t, tc.entries) + } + + if binlogsPath != "" { + t.Setenv("BINLOGS_PATH", binlogsPath) + } else { + t.Setenv("BINLOGS_PATH", "") + } + t.Setenv("PITR_TYPE", tc.pitrType) + t.Setenv("PITR_GTID", tc.pitrGTID) + t.Setenv("PITR_DATE", tc.pitrDate) + t.Setenv("PITR_FORCE", tc.pitrForce) + t.Setenv("S3_BUCKET", bucket) + + fakeDatabase := tc.db + + newDB := tc.newDB + if newDB == nil { + newDB = func(_ context.Context, _ db.DBParams) (Database, error) { + return fakeDatabase, nil + } + } + + getSecret := tc.getSecret + if getSecret == nil { + getSecret = func(apiv1.SystemUser) (string, error) { return "testpass", nil } + } + + fake := &fakeStorage{} + var newS3 newStorageFn + if tc.newS3 != nil { + newS3 = tc.newS3(fake) + } else { + newS3 = defaultS3(fake) + } + + var captured applyCall + apply := func(_ context.Context, objectKeys []string, _ getObjectFn, mysqlbinlogArgs []string, mysqlArgs []string, _ string) error { + captured = applyCall{ + objectKeys: objectKeys, + mysqlbinlogArgs: mysqlbinlogArgs, + mysqlArgs: mysqlArgs, + } + return tc.applyErr + } + + err := run(t.Context(), newS3, newDB, getSecret, apply) + + if tc.expectedError != "" { + require.ErrorContains(t, err, tc.expectedError) + return + } + require.NoError(t, err) + if tc.checkApply != nil { + tc.checkApply(t, captured) + } + }) + } +} diff --git a/config/crd/bases/ps.percona.com_perconaservermysqlrestores.yaml b/config/crd/bases/ps.percona.com_perconaservermysqlrestores.yaml index 3cbd73ad3..86267fca6 100644 --- a/config/crd/bases/ps.percona.com_perconaservermysqlrestores.yaml +++ b/config/crd/bases/ps.percona.com_perconaservermysqlrestores.yaml @@ -1176,6 +1176,22 @@ spec: type: object type: array type: object + pitr: + properties: + date: + type: string + force: + type: boolean + gtid: + type: string + type: + enum: + - gtid + - date + type: string + required: + - type + type: object required: - clusterName type: object diff --git a/config/crd/bases/ps.percona.com_perconaservermysqls.yaml b/config/crd/bases/ps.percona.com_perconaservermysqls.yaml index 7df401b2f..7aac26b5d 100644 --- a/config/crd/bases/ps.percona.com_perconaservermysqls.yaml +++ b/config/crd/bases/ps.percona.com_perconaservermysqls.yaml @@ -709,9 +709,16 @@ spec: additionalProperties: type: string type: object + checkpointInterval: + default: 30s + type: string + checkpointSize: + default: 16M + type: string configuration: type: string connectTimeout: + default: 30 format: int32 type: integer containerSecurityContext: @@ -898,6 +905,7 @@ spec: format: int64 type: integer idleTime: + default: 30 format: int32 type: integer image: @@ -1209,6 +1217,7 @@ spec: priorityClassName: type: string readTimeout: + default: 30 format: int32 type: integer readinessProbe: @@ -1325,6 +1334,9 @@ spec: x-kubernetes-int-or-string: true type: object type: object + rewriteFileSize: + default: 128M + type: string runtimeClassName: type: string schedulerName: @@ -1337,6 +1349,9 @@ spec: size: format: int32 type: integer + sslMode: + default: verify_identity + type: string startupProbe: properties: exec: @@ -1507,22 +1522,35 @@ spec: - whenUnsatisfiable type: object type: array + verifyChecksum: + default: true + type: boolean writeTimeout: + default: 30 format: int32 type: integer - required: - - connectTimeout - - idleTime - - image - - readTimeout - - serverId - - size - - storage - - writeTimeout type: object + x-kubernetes-validations: + - message: binlogServer size cannot be more than 1 + rule: '!has(self.size) || self.size <= 1' enabled: type: boolean type: object + x-kubernetes-validations: + - message: binlogServer is required when pitr is enabled + rule: '!(has(self.enabled) && self.enabled) || has(self.binlogServer)' + - message: binlogServer.image is required when pitr is enabled + rule: '!(has(self.enabled) && self.enabled) || !has(self.binlogServer) + || (has(self.binlogServer.image) && self.binlogServer.image + != '''')' + - message: binlogServer.size is required when pitr is enabled + rule: '!(has(self.enabled) && self.enabled) || !has(self.binlogServer) + || (has(self.binlogServer.size) && self.binlogServer.size + > 0)' + - message: binlogServer.serverId is required when pitr is enabled + rule: '!(has(self.enabled) && self.enabled) || !has(self.binlogServer) + || (has(self.binlogServer.serverId) && self.binlogServer.serverId + > 0)' resources: properties: claims: @@ -5709,10 +5737,12 @@ spec: type: string type: object type: object - required: - - image - - size type: object + x-kubernetes-validations: + - message: mysql.image is required + rule: has(self.image) && self.image != '' + - message: mysql.size must be greater than 0 + rule: has(self.size) && self.size > 0 orchestrator: properties: affinity: @@ -6953,10 +6983,15 @@ spec: - whenUnsatisfiable type: object type: array - required: - - image - - size type: object + x-kubernetes-validations: + - message: orchestrator.image is required when orchestrator is enabled + rule: '!(has(self.enabled) && self.enabled) || (has(self.image) + && self.image != '''')' + - message: orchestrator.size must be greater than 0 when orchestrator + is enabled + rule: '!(has(self.enabled) && self.enabled) || (has(self.size) && + self.size > 0)' pause: type: boolean pmm: @@ -7238,9 +7273,11 @@ spec: type: object serverHost: type: string - required: - - image type: object + x-kubernetes-validations: + - message: pmm.image is required when pmm is enabled + rule: '!(has(self.enabled) && self.enabled) || (has(self.image) + && self.image != '''')' proxy: properties: haproxy: @@ -8483,10 +8520,15 @@ spec: - whenUnsatisfiable type: object type: array - required: - - image - - size type: object + x-kubernetes-validations: + - message: haproxy.image is required when haproxy is enabled + rule: '!(has(self.enabled) && self.enabled) || (has(self.image) + && self.image != '''')' + - message: haproxy.size must be greater than 0 when haproxy is + enabled + rule: '!(has(self.enabled) && self.enabled) || (has(self.size) + && self.size > 0)' router: properties: affinity: @@ -9752,10 +9794,14 @@ spec: - whenUnsatisfiable type: object type: array - required: - - image - - size type: object + x-kubernetes-validations: + - message: router.image is required when router is enabled + rule: '!(has(self.enabled) && self.enabled) || (has(self.image) + && self.image != '''')' + - message: router.size must be greater than 0 when router is enabled + rule: '!(has(self.enabled) && self.enabled) || (has(self.size) + && self.size > 0)' type: object secretsName: type: string @@ -10250,9 +10296,10 @@ spec: format: int32 type: integer type: object - required: - - image type: object + x-kubernetes-validations: + - message: toolkit.image is required + rule: has(self.image) && self.image != '' unsafeFlags: properties: backupNonReadyCluster: @@ -10335,6 +10382,21 @@ spec: properties: backupVersion: type: string + binlogServer: + properties: + imageID: + type: string + ready: + format: int32 + type: integer + size: + format: int32 + type: integer + state: + type: string + version: + type: string + type: object conditions: items: properties: diff --git a/deploy/backup/restore.yaml b/deploy/backup/restore.yaml index b7fe248eb..d1f6480b0 100644 --- a/deploy/backup/restore.yaml +++ b/deploy/backup/restore.yaml @@ -5,6 +5,11 @@ metadata: spec: clusterName: ps-cluster1 backupName: backup1 +# pitr: +# date: "2024-11-18T11:10:48Z" +# force: false +# gtid: a3e5ff70-83e2-11ef-8e57-7a62caf7e1e3:1-36 +# type: date # containerOptions: # env: # - name: CUSTOM_VAR diff --git a/deploy/bundle.yaml b/deploy/bundle.yaml index 2498fbd36..55ada54ac 100644 --- a/deploy/bundle.yaml +++ b/deploy/bundle.yaml @@ -2400,6 +2400,22 @@ spec: type: object type: array type: object + pitr: + properties: + date: + type: string + force: + type: boolean + gtid: + type: string + type: + enum: + - gtid + - date + type: string + required: + - type + type: object required: - clusterName type: object @@ -3134,9 +3150,16 @@ spec: additionalProperties: type: string type: object + checkpointInterval: + default: 30s + type: string + checkpointSize: + default: 16M + type: string configuration: type: string connectTimeout: + default: 30 format: int32 type: integer containerSecurityContext: @@ -3323,6 +3346,7 @@ spec: format: int64 type: integer idleTime: + default: 30 format: int32 type: integer image: @@ -3634,6 +3658,7 @@ spec: priorityClassName: type: string readTimeout: + default: 30 format: int32 type: integer readinessProbe: @@ -3750,6 +3775,9 @@ spec: x-kubernetes-int-or-string: true type: object type: object + rewriteFileSize: + default: 128M + type: string runtimeClassName: type: string schedulerName: @@ -3762,6 +3790,9 @@ spec: size: format: int32 type: integer + sslMode: + default: verify_identity + type: string startupProbe: properties: exec: @@ -3932,22 +3963,35 @@ spec: - whenUnsatisfiable type: object type: array + verifyChecksum: + default: true + type: boolean writeTimeout: + default: 30 format: int32 type: integer - required: - - connectTimeout - - idleTime - - image - - readTimeout - - serverId - - size - - storage - - writeTimeout type: object + x-kubernetes-validations: + - message: binlogServer size cannot be more than 1 + rule: '!has(self.size) || self.size <= 1' enabled: type: boolean type: object + x-kubernetes-validations: + - message: binlogServer is required when pitr is enabled + rule: '!(has(self.enabled) && self.enabled) || has(self.binlogServer)' + - message: binlogServer.image is required when pitr is enabled + rule: '!(has(self.enabled) && self.enabled) || !has(self.binlogServer) + || (has(self.binlogServer.image) && self.binlogServer.image + != '''')' + - message: binlogServer.size is required when pitr is enabled + rule: '!(has(self.enabled) && self.enabled) || !has(self.binlogServer) + || (has(self.binlogServer.size) && self.binlogServer.size + > 0)' + - message: binlogServer.serverId is required when pitr is enabled + rule: '!(has(self.enabled) && self.enabled) || !has(self.binlogServer) + || (has(self.binlogServer.serverId) && self.binlogServer.serverId + > 0)' resources: properties: claims: @@ -8134,10 +8178,12 @@ spec: type: string type: object type: object - required: - - image - - size type: object + x-kubernetes-validations: + - message: mysql.image is required + rule: has(self.image) && self.image != '' + - message: mysql.size must be greater than 0 + rule: has(self.size) && self.size > 0 orchestrator: properties: affinity: @@ -9378,10 +9424,15 @@ spec: - whenUnsatisfiable type: object type: array - required: - - image - - size type: object + x-kubernetes-validations: + - message: orchestrator.image is required when orchestrator is enabled + rule: '!(has(self.enabled) && self.enabled) || (has(self.image) + && self.image != '''')' + - message: orchestrator.size must be greater than 0 when orchestrator + is enabled + rule: '!(has(self.enabled) && self.enabled) || (has(self.size) && + self.size > 0)' pause: type: boolean pmm: @@ -9663,9 +9714,11 @@ spec: type: object serverHost: type: string - required: - - image type: object + x-kubernetes-validations: + - message: pmm.image is required when pmm is enabled + rule: '!(has(self.enabled) && self.enabled) || (has(self.image) + && self.image != '''')' proxy: properties: haproxy: @@ -10908,10 +10961,15 @@ spec: - whenUnsatisfiable type: object type: array - required: - - image - - size type: object + x-kubernetes-validations: + - message: haproxy.image is required when haproxy is enabled + rule: '!(has(self.enabled) && self.enabled) || (has(self.image) + && self.image != '''')' + - message: haproxy.size must be greater than 0 when haproxy is + enabled + rule: '!(has(self.enabled) && self.enabled) || (has(self.size) + && self.size > 0)' router: properties: affinity: @@ -12177,10 +12235,14 @@ spec: - whenUnsatisfiable type: object type: array - required: - - image - - size type: object + x-kubernetes-validations: + - message: router.image is required when router is enabled + rule: '!(has(self.enabled) && self.enabled) || (has(self.image) + && self.image != '''')' + - message: router.size must be greater than 0 when router is enabled + rule: '!(has(self.enabled) && self.enabled) || (has(self.size) + && self.size > 0)' type: object secretsName: type: string @@ -12675,9 +12737,10 @@ spec: format: int32 type: integer type: object - required: - - image type: object + x-kubernetes-validations: + - message: toolkit.image is required + rule: has(self.image) && self.image != '' unsafeFlags: properties: backupNonReadyCluster: @@ -12760,6 +12823,21 @@ spec: properties: backupVersion: type: string + binlogServer: + properties: + imageID: + type: string + ready: + format: int32 + type: integer + size: + format: int32 + type: integer + state: + type: string + version: + type: string + type: object conditions: items: properties: diff --git a/deploy/cr.yaml b/deploy/cr.yaml index 4349f8e76..618ee437a 100644 --- a/deploy/cr.yaml +++ b/deploy/cr.yaml @@ -668,6 +668,106 @@ spec: enabled: true pitr: enabled: false +# binlogServer: +# size: 1 +# image: perconalab/percona-binlog-server:0.2.1 +# imagePullPolicy: Always +# imagePullSecrets: +# - name: my-secret-1 +# - name: my-secret-2 +# serverId: 100 +# storage: +# s3: +# bucket: S3-BACKUP-BUCKET-NAME-HERE +# credentialsSecret: ps-cluster1-s3-credentials +# endpointUrl: https://s3.amazonaws.com +# prefix: PREFIX_NAME +# region: us-west-2 +# connectTimeout: 30 +# readTimeout: 30 +# writeTimeout: 30 +# idleTime: 30 +# affinity: +# advanced: +# nodeAffinity: +# requiredDuringSchedulingIgnoredDuringExecution: +# nodeSelectorTerms: +# - matchExpressions: +# - key: kubernetes.io/e2e-az-name +# operator: In +# values: +# - e2e-az1 +# - e2e-az2 +# antiAffinityTopologyKey: kubernetes.io/hostname +# checkpointInterval: 30s +# checkpointSize: 16M +# configuration: "" +# containerSecurityContext: +# privileged: false +# runAsGroup: 1001 +# runAsUser: 1001 +# env: [] +# envFrom: [] +# initContainer: +# containerSecurityContext: +# privileged: false +# runAsGroup: 1001 +# runAsUser: 1001 +# image: perconalab/percona-server-mysql-operator:main +# resources: +# limits: +# cpu: 100m +# memory: 100M +# requests: +# cpu: 200m +# memory: 200M +# initImage: "" +# livenessProbe: +# failureThreshold: 3 +# periodSeconds: 5 +# successThreshold: 1 +# timeoutSeconds: 3 +# podDisruptionBudget: +# maxUnavailable: 1 +# minAvailable: 0 +# podSecurityContext: +# fsGroup: 1001 +# supplementalGroups: +# - 1001 +# - 1002 +# - 1003 +# readinessProbe: +# failureThreshold: 3 +# periodSeconds: 5 +# successThreshold: 1 +# timeoutSeconds: 3 +# resources: +# limits: +# cpu: 100m +# memory: 100M +# requests: +# cpu: 200m +# memory: 200M +# rewriteFileSize: "" +# sslMode: "" +# startupProbe: +# failureThreshold: 3 +# periodSeconds: 5 +# successThreshold: 1 +# timeoutSeconds: 3 +# tolerations: +# - effect: NoExecute +# key: node.alpha.kubernetes.io/unreachable +# operator: Exists +# tolerationSeconds: 6000 +# topologySpreadConstraints: +# - labelSelector: +# matchLabels: +# app.kubernetes.io/name: percona-server +# maxSkew: 1 +# topologyKey: kubernetes.io/hostname +# whenUnsatisfiable: DoNotSchedule +# verifyChecksum: null # sourcePod: ps-cluster1-mysql-1 image: perconalab/percona-server-mysql-operator:main-backup8.4 imagePullPolicy: Always diff --git a/deploy/crd.yaml b/deploy/crd.yaml index 3a81b1119..2cea0803f 100644 --- a/deploy/crd.yaml +++ b/deploy/crd.yaml @@ -2400,6 +2400,22 @@ spec: type: object type: array type: object + pitr: + properties: + date: + type: string + force: + type: boolean + gtid: + type: string + type: + enum: + - gtid + - date + type: string + required: + - type + type: object required: - clusterName type: object @@ -3134,9 +3150,16 @@ spec: additionalProperties: type: string type: object + checkpointInterval: + default: 30s + type: string + checkpointSize: + default: 16M + type: string configuration: type: string connectTimeout: + default: 30 format: int32 type: integer containerSecurityContext: @@ -3323,6 +3346,7 @@ spec: format: int64 type: integer idleTime: + default: 30 format: int32 type: integer image: @@ -3634,6 +3658,7 @@ spec: priorityClassName: type: string readTimeout: + default: 30 format: int32 type: integer readinessProbe: @@ -3750,6 +3775,9 @@ spec: x-kubernetes-int-or-string: true type: object type: object + rewriteFileSize: + default: 128M + type: string runtimeClassName: type: string schedulerName: @@ -3762,6 +3790,9 @@ spec: size: format: int32 type: integer + sslMode: + default: verify_identity + type: string startupProbe: properties: exec: @@ -3932,22 +3963,35 @@ spec: - whenUnsatisfiable type: object type: array + verifyChecksum: + default: true + type: boolean writeTimeout: + default: 30 format: int32 type: integer - required: - - connectTimeout - - idleTime - - image - - readTimeout - - serverId - - size - - storage - - writeTimeout type: object + x-kubernetes-validations: + - message: binlogServer size cannot be more than 1 + rule: '!has(self.size) || self.size <= 1' enabled: type: boolean type: object + x-kubernetes-validations: + - message: binlogServer is required when pitr is enabled + rule: '!(has(self.enabled) && self.enabled) || has(self.binlogServer)' + - message: binlogServer.image is required when pitr is enabled + rule: '!(has(self.enabled) && self.enabled) || !has(self.binlogServer) + || (has(self.binlogServer.image) && self.binlogServer.image + != '''')' + - message: binlogServer.size is required when pitr is enabled + rule: '!(has(self.enabled) && self.enabled) || !has(self.binlogServer) + || (has(self.binlogServer.size) && self.binlogServer.size + > 0)' + - message: binlogServer.serverId is required when pitr is enabled + rule: '!(has(self.enabled) && self.enabled) || !has(self.binlogServer) + || (has(self.binlogServer.serverId) && self.binlogServer.serverId + > 0)' resources: properties: claims: @@ -8134,10 +8178,12 @@ spec: type: string type: object type: object - required: - - image - - size type: object + x-kubernetes-validations: + - message: mysql.image is required + rule: has(self.image) && self.image != '' + - message: mysql.size must be greater than 0 + rule: has(self.size) && self.size > 0 orchestrator: properties: affinity: @@ -9378,10 +9424,15 @@ spec: - whenUnsatisfiable type: object type: array - required: - - image - - size type: object + x-kubernetes-validations: + - message: orchestrator.image is required when orchestrator is enabled + rule: '!(has(self.enabled) && self.enabled) || (has(self.image) + && self.image != '''')' + - message: orchestrator.size must be greater than 0 when orchestrator + is enabled + rule: '!(has(self.enabled) && self.enabled) || (has(self.size) && + self.size > 0)' pause: type: boolean pmm: @@ -9663,9 +9714,11 @@ spec: type: object serverHost: type: string - required: - - image type: object + x-kubernetes-validations: + - message: pmm.image is required when pmm is enabled + rule: '!(has(self.enabled) && self.enabled) || (has(self.image) + && self.image != '''')' proxy: properties: haproxy: @@ -10908,10 +10961,15 @@ spec: - whenUnsatisfiable type: object type: array - required: - - image - - size type: object + x-kubernetes-validations: + - message: haproxy.image is required when haproxy is enabled + rule: '!(has(self.enabled) && self.enabled) || (has(self.image) + && self.image != '''')' + - message: haproxy.size must be greater than 0 when haproxy is + enabled + rule: '!(has(self.enabled) && self.enabled) || (has(self.size) + && self.size > 0)' router: properties: affinity: @@ -12177,10 +12235,14 @@ spec: - whenUnsatisfiable type: object type: array - required: - - image - - size type: object + x-kubernetes-validations: + - message: router.image is required when router is enabled + rule: '!(has(self.enabled) && self.enabled) || (has(self.image) + && self.image != '''')' + - message: router.size must be greater than 0 when router is enabled + rule: '!(has(self.enabled) && self.enabled) || (has(self.size) + && self.size > 0)' type: object secretsName: type: string @@ -12675,9 +12737,10 @@ spec: format: int32 type: integer type: object - required: - - image type: object + x-kubernetes-validations: + - message: toolkit.image is required + rule: has(self.image) && self.image != '' unsafeFlags: properties: backupNonReadyCluster: @@ -12760,6 +12823,21 @@ spec: properties: backupVersion: type: string + binlogServer: + properties: + imageID: + type: string + ready: + format: int32 + type: integer + size: + format: int32 + type: integer + state: + type: string + version: + type: string + type: object conditions: items: properties: diff --git a/deploy/cw-bundle.yaml b/deploy/cw-bundle.yaml index 271556278..c54ff87f1 100644 --- a/deploy/cw-bundle.yaml +++ b/deploy/cw-bundle.yaml @@ -2400,6 +2400,22 @@ spec: type: object type: array type: object + pitr: + properties: + date: + type: string + force: + type: boolean + gtid: + type: string + type: + enum: + - gtid + - date + type: string + required: + - type + type: object required: - clusterName type: object @@ -3134,9 +3150,16 @@ spec: additionalProperties: type: string type: object + checkpointInterval: + default: 30s + type: string + checkpointSize: + default: 16M + type: string configuration: type: string connectTimeout: + default: 30 format: int32 type: integer containerSecurityContext: @@ -3323,6 +3346,7 @@ spec: format: int64 type: integer idleTime: + default: 30 format: int32 type: integer image: @@ -3634,6 +3658,7 @@ spec: priorityClassName: type: string readTimeout: + default: 30 format: int32 type: integer readinessProbe: @@ -3750,6 +3775,9 @@ spec: x-kubernetes-int-or-string: true type: object type: object + rewriteFileSize: + default: 128M + type: string runtimeClassName: type: string schedulerName: @@ -3762,6 +3790,9 @@ spec: size: format: int32 type: integer + sslMode: + default: verify_identity + type: string startupProbe: properties: exec: @@ -3932,22 +3963,35 @@ spec: - whenUnsatisfiable type: object type: array + verifyChecksum: + default: true + type: boolean writeTimeout: + default: 30 format: int32 type: integer - required: - - connectTimeout - - idleTime - - image - - readTimeout - - serverId - - size - - storage - - writeTimeout type: object + x-kubernetes-validations: + - message: binlogServer size cannot be more than 1 + rule: '!has(self.size) || self.size <= 1' enabled: type: boolean type: object + x-kubernetes-validations: + - message: binlogServer is required when pitr is enabled + rule: '!(has(self.enabled) && self.enabled) || has(self.binlogServer)' + - message: binlogServer.image is required when pitr is enabled + rule: '!(has(self.enabled) && self.enabled) || !has(self.binlogServer) + || (has(self.binlogServer.image) && self.binlogServer.image + != '''')' + - message: binlogServer.size is required when pitr is enabled + rule: '!(has(self.enabled) && self.enabled) || !has(self.binlogServer) + || (has(self.binlogServer.size) && self.binlogServer.size + > 0)' + - message: binlogServer.serverId is required when pitr is enabled + rule: '!(has(self.enabled) && self.enabled) || !has(self.binlogServer) + || (has(self.binlogServer.serverId) && self.binlogServer.serverId + > 0)' resources: properties: claims: @@ -8134,10 +8178,12 @@ spec: type: string type: object type: object - required: - - image - - size type: object + x-kubernetes-validations: + - message: mysql.image is required + rule: has(self.image) && self.image != '' + - message: mysql.size must be greater than 0 + rule: has(self.size) && self.size > 0 orchestrator: properties: affinity: @@ -9378,10 +9424,15 @@ spec: - whenUnsatisfiable type: object type: array - required: - - image - - size type: object + x-kubernetes-validations: + - message: orchestrator.image is required when orchestrator is enabled + rule: '!(has(self.enabled) && self.enabled) || (has(self.image) + && self.image != '''')' + - message: orchestrator.size must be greater than 0 when orchestrator + is enabled + rule: '!(has(self.enabled) && self.enabled) || (has(self.size) && + self.size > 0)' pause: type: boolean pmm: @@ -9663,9 +9714,11 @@ spec: type: object serverHost: type: string - required: - - image type: object + x-kubernetes-validations: + - message: pmm.image is required when pmm is enabled + rule: '!(has(self.enabled) && self.enabled) || (has(self.image) + && self.image != '''')' proxy: properties: haproxy: @@ -10908,10 +10961,15 @@ spec: - whenUnsatisfiable type: object type: array - required: - - image - - size type: object + x-kubernetes-validations: + - message: haproxy.image is required when haproxy is enabled + rule: '!(has(self.enabled) && self.enabled) || (has(self.image) + && self.image != '''')' + - message: haproxy.size must be greater than 0 when haproxy is + enabled + rule: '!(has(self.enabled) && self.enabled) || (has(self.size) + && self.size > 0)' router: properties: affinity: @@ -12177,10 +12235,14 @@ spec: - whenUnsatisfiable type: object type: array - required: - - image - - size type: object + x-kubernetes-validations: + - message: router.image is required when router is enabled + rule: '!(has(self.enabled) && self.enabled) || (has(self.image) + && self.image != '''')' + - message: router.size must be greater than 0 when router is enabled + rule: '!(has(self.enabled) && self.enabled) || (has(self.size) + && self.size > 0)' type: object secretsName: type: string @@ -12675,9 +12737,10 @@ spec: format: int32 type: integer type: object - required: - - image type: object + x-kubernetes-validations: + - message: toolkit.image is required + rule: has(self.image) && self.image != '' unsafeFlags: properties: backupNonReadyCluster: @@ -12760,6 +12823,21 @@ spec: properties: backupVersion: type: string + binlogServer: + properties: + imageID: + type: string + ready: + format: int32 + type: integer + size: + format: int32 + type: integer + state: + type: string + version: + type: string + type: object conditions: items: properties: diff --git a/e2e-tests/functions b/e2e-tests/functions index 0910b6aaa..acb481761 100755 --- a/e2e-tests/functions +++ b/e2e-tests/functions @@ -728,7 +728,8 @@ get_cr() { local image_toolkit=${6:-${IMAGE_TOOLKIT}} local image_haproxy=${7:-${IMAGE_HAPROXY}} local image_pmm_client=${8:-${IMAGE_PMM_CLIENT}} - local cr_file=${9:-${DEPLOY_DIR}/cr.yaml} + local image_binlog_server=${9:-${IMAGE_BINLOG_SERVER}} + local cr_file=${10:-${DEPLOY_DIR}/cr.yaml} local platform="$(detect_k8s_platform)" local cr_name="${test_name}${name_suffix:+-$name_suffix}" @@ -756,6 +757,7 @@ get_cr() { .spec.toolkit.image = "'"$image_toolkit"'" | .spec.proxy.haproxy.image = "'"$image_haproxy"'" | .spec.pmm.image = "'"$image_pmm_client"'" | + .spec.backup.pitr.binlogServer.image="'"${image_binlog_server}"'" | (.. | select(tag == "!!str")) |= sub(""; "'"${NAMESPACE}"'") ' "${crs[@]}" \ | if [[ $platform == "minikube" ]]; then @@ -1657,6 +1659,7 @@ get_cr_with_latest_versions_in_vs() { ${image_toolkit} \ ${image_haproxy} \ ${image_pmm_client} \ + ${image_binlog_server} \ ${TEMP_DIR}/cr.yaml } @@ -1946,7 +1949,7 @@ verify_all_backups_deletion() { count_backups_in_schedule() { local schedule_name="$1" local cluster_name="$2" - + local prefix=$(printf '%s' "${NAMESPACE}-${cluster_name}" | sha1sum | awk '{print $1}' | head -c 5) local bcp_count=$(kubectl get ps-backup -n ${NAMESPACE} -l percona.com/backup-ancestor=${prefix}-${schedule_name} -o yaml | yq '.items | length') echo $bcp_count diff --git a/e2e-tests/run-distro.csv b/e2e-tests/run-distro.csv index c71c3b776..e52e05abf 100644 --- a/e2e-tests/run-distro.csv +++ b/e2e-tests/run-distro.csv @@ -1,7 +1,10 @@ +async-upgrade auto-config config config-router demand-backup +gr-pitr-minio +async-pitr-minio demand-backup-cloud demand-backup-retry gr-demand-backup @@ -19,7 +22,6 @@ gr-self-healing gr-tls-cert-manager gr-users gr-upgrade -async-upgrade haproxy init-deploy one-pod diff --git a/e2e-tests/run-pr.csv b/e2e-tests/run-pr.csv index c3ff923bc..06d0c9d66 100644 --- a/e2e-tests/run-pr.csv +++ b/e2e-tests/run-pr.csv @@ -8,6 +8,8 @@ config-router,8.0 config-router,8.4 demand-backup,8.0 demand-backup,8.4 +gr-pitr-minio,8.4 +async-pitr-minio,8.4 demand-backup-cloud,8.4 demand-backup-retry,8.4 demand-backup-incremental,8.0 diff --git a/e2e-tests/run-release.csv b/e2e-tests/run-release.csv index f8b4e9c1f..16df9239e 100644 --- a/e2e-tests/run-release.csv +++ b/e2e-tests/run-release.csv @@ -1,10 +1,12 @@ -version-service async-ignore-annotations async-global-metadata +async-upgrade auto-config config config-router demand-backup +gr-pitr-minio +async-pitr-minio demand-backup-cloud demand-backup-retry demand-backup-incremental @@ -29,13 +31,13 @@ gr-self-healing gr-tls-cert-manager gr-users gr-upgrade -async-upgrade haproxy init-deploy limits monitoring one-pod operator-self-healing +pvc-resize recreate scaling scheduled-backup @@ -47,4 +49,4 @@ storage telemetry tls-cert-manager users -pvc-resize +version-service diff --git a/e2e-tests/tests/async-pitr-minio/00-assert.yaml b/e2e-tests/tests/async-pitr-minio/00-assert.yaml new file mode 100644 index 000000000..fecdb222e --- /dev/null +++ b/e2e-tests/tests/async-pitr-minio/00-assert.yaml @@ -0,0 +1,9 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 150 +--- +apiVersion: v1 +kind: Secret +metadata: + name: minio-secret +type: Opaque diff --git a/e2e-tests/tests/async-pitr-minio/00-minio-secret.yaml b/e2e-tests/tests/async-pitr-minio/00-minio-secret.yaml new file mode 100644 index 000000000..3c797f054 --- /dev/null +++ b/e2e-tests/tests/async-pitr-minio/00-minio-secret.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Secret +metadata: + name: minio-secret +stringData: + AWS_ACCESS_KEY_ID: some-access$\n"-key + AWS_SECRET_ACCESS_KEY: some-$\n"secret-key diff --git a/e2e-tests/tests/async-pitr-minio/01-assert.yaml b/e2e-tests/tests/async-pitr-minio/01-assert.yaml new file mode 100644 index 000000000..5f346cb51 --- /dev/null +++ b/e2e-tests/tests/async-pitr-minio/01-assert.yaml @@ -0,0 +1,26 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 150 +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: perconaservermysqls.ps.percona.com +spec: + group: ps.percona.com + names: + kind: PerconaServerMySQL + listKind: PerconaServerMySQLList + plural: perconaservermysqls + shortNames: + - ps + singular: perconaservermysql + scope: Namespaced +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +metadata: + name: check-operator-deploy-status +timeout: 120 +commands: + - script: kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1 diff --git a/e2e-tests/tests/async-pitr-minio/01-deploy-operator.yaml b/e2e-tests/tests/async-pitr-minio/01-deploy-operator.yaml new file mode 100644 index 000000000..6ab1b37f9 --- /dev/null +++ b/e2e-tests/tests/async-pitr-minio/01-deploy-operator.yaml @@ -0,0 +1,15 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: |- + set -o errexit + set -o xtrace + + source ../../functions + init_temp_dir # do this only in the first TestStep + + apply_s3_storage_secrets + deploy_operator + deploy_client + deploy_minio + timeout: 300 diff --git a/e2e-tests/tests/async-pitr-minio/02-assert.yaml b/e2e-tests/tests/async-pitr-minio/02-assert.yaml new file mode 100644 index 000000000..a5da7bf0e --- /dev/null +++ b/e2e-tests/tests/async-pitr-minio/02-assert.yaml @@ -0,0 +1,70 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 600 +--- +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: async-pitr-minio-mysql +status: + observedGeneration: 1 + replicas: 3 + readyReplicas: 3 + currentReplicas: 3 + updatedReplicas: 3 + collisionCount: 0 +--- +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: async-pitr-minio-haproxy +status: + observedGeneration: 1 + replicas: 3 + readyReplicas: 3 + updatedReplicas: 3 +--- +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: async-pitr-minio-orc +status: + observedGeneration: 1 + replicas: 3 + readyReplicas: 3 + currentReplicas: 3 + updatedReplicas: 3 + collisionCount: 0 +--- +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: async-pitr-minio-binlog-server +status: + observedGeneration: 1 + replicas: 1 + readyReplicas: 1 + currentReplicas: 1 + updatedReplicas: 1 + collisionCount: 0 +--- +apiVersion: ps.percona.com/v1 +kind: PerconaServerMySQL +metadata: + name: async-pitr-minio + finalizers: + - percona.com/delete-mysql-pods-in-order +status: + mysql: + ready: 3 + size: 3 + state: ready + haproxy: + ready: 3 + size: 3 + state: ready + orchestrator: + ready: 3 + size: 3 + state: ready + state: ready diff --git a/e2e-tests/tests/async-pitr-minio/02-create-cluster.yaml b/e2e-tests/tests/async-pitr-minio/02-create-cluster.yaml new file mode 100644 index 000000000..ffd6f4b93 --- /dev/null +++ b/e2e-tests/tests/async-pitr-minio/02-create-cluster.yaml @@ -0,0 +1,38 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +timeout: 10 +commands: + - script: |- + set -o errexit + set -o xtrace + + source ../../functions + + get_cr \ + | yq eval '.spec.mysql.clusterType="async"' - \ + | yq eval ".spec.mysql.size=3" - \ + | yq eval ".spec.orchestrator.enabled=true" - \ + | yq eval ".spec.proxy.haproxy.enabled=true" - \ + | yq eval ".spec.proxy.haproxy.size=3" - \ + | yq eval ".spec.proxy.router.enabled=false" - \ + | yq eval ".spec.backup.backoffLimit=3" - \ + | yq eval '.spec.backup.storages.minio.type="s3"' - \ + | yq eval '.spec.backup.storages.minio.s3.bucket="operator-testing"' - \ + | yq eval '.spec.backup.storages.minio.s3.credentialsSecret="minio-secret"' - \ + | yq eval ".spec.backup.storages.minio.s3.endpointUrl=\"http://minio-service.${NAMESPACE}:9000\"" - \ + | yq eval '.spec.backup.storages.minio.s3.region="us-east-1"' - \ + | yq eval '.spec.backup.storages.minio.containerOptions.env[0].name="VERIFY_TLS"' - \ + | yq eval '.spec.backup.storages.minio.containerOptions.env[0].value="false"' - \ + | yq eval '.spec.backup.pitr.enabled=true' - \ + | yq eval '.spec.backup.pitr.binlogServer.storage.s3.bucket="operator-testing"' - \ + | yq eval '.spec.backup.pitr.binlogServer.storage.s3.prefix="binlogs"' - \ + | yq eval '.spec.backup.pitr.binlogServer.storage.s3.credentialsSecret="minio-secret"' - \ + | yq eval ".spec.backup.pitr.binlogServer.storage.s3.endpointUrl=\"http://minio-service.${NAMESPACE}:9000\"" - \ + | yq eval '.spec.backup.pitr.binlogServer.storage.s3.region="us-east-1"' - \ + | yq eval '.spec.backup.pitr.binlogServer.size=1' - \ + | yq eval '.spec.backup.pitr.binlogServer.serverId=100' - \ + | yq eval '.spec.backup.pitr.binlogServer.connectTimeout=10' - \ + | yq eval '.spec.backup.pitr.binlogServer.readTimeout=10' - \ + | yq eval '.spec.backup.pitr.binlogServer.writeTimeout=10' - \ + | yq eval '.spec.backup.pitr.binlogServer.idleTime=3' - \ + | kubectl -n "${NAMESPACE}" apply -f - diff --git a/e2e-tests/tests/async-pitr-minio/03-write-data.yaml b/e2e-tests/tests/async-pitr-minio/03-write-data.yaml new file mode 100644 index 000000000..d490db677 --- /dev/null +++ b/e2e-tests/tests/async-pitr-minio/03-write-data.yaml @@ -0,0 +1,16 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: |- + set -o errexit + set -o xtrace + + source ../../functions + + run_mysql \ + "CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)" \ + "-h $(get_haproxy_svc $(get_cluster_name))" + + run_mysql \ + "INSERT myDB.myTable (id) VALUES (100500)" \ + "-h $(get_haproxy_svc $(get_cluster_name))" diff --git a/e2e-tests/tests/async-pitr-minio/04-assert.yaml b/e2e-tests/tests/async-pitr-minio/04-assert.yaml new file mode 100644 index 000000000..f6f17bee2 --- /dev/null +++ b/e2e-tests/tests/async-pitr-minio/04-assert.yaml @@ -0,0 +1,10 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 300 +--- +kind: PerconaServerMySQLBackup +apiVersion: ps.percona.com/v1 +metadata: + name: async-pitr-minio-backup +status: + state: Succeeded diff --git a/e2e-tests/tests/async-pitr-minio/04-create-backup.yaml b/e2e-tests/tests/async-pitr-minio/04-create-backup.yaml new file mode 100644 index 000000000..9c42eacf9 --- /dev/null +++ b/e2e-tests/tests/async-pitr-minio/04-create-backup.yaml @@ -0,0 +1,7 @@ +apiVersion: ps.percona.com/v1 +kind: PerconaServerMySQLBackup +metadata: + name: async-pitr-minio-backup +spec: + clusterName: async-pitr-minio + storageName: minio diff --git a/e2e-tests/tests/async-pitr-minio/05-write-more-data.yaml b/e2e-tests/tests/async-pitr-minio/05-write-more-data.yaml new file mode 100644 index 000000000..ec30f75d5 --- /dev/null +++ b/e2e-tests/tests/async-pitr-minio/05-write-more-data.yaml @@ -0,0 +1,12 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: |- + set -o errexit + set -o xtrace + + source ../../functions + + run_mysql \ + "INSERT myDB.myTable (id) VALUES (100501)" \ + "-h $(get_haproxy_svc $(get_cluster_name))" diff --git a/e2e-tests/tests/async-pitr-minio/06-assert.yaml b/e2e-tests/tests/async-pitr-minio/06-assert.yaml new file mode 100644 index 000000000..90ee0b7eb --- /dev/null +++ b/e2e-tests/tests/async-pitr-minio/06-assert.yaml @@ -0,0 +1,35 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 600 +collectors: + - type: pod + selector: "app.kubernetes.io/component=pitr" + tail: 1000 +--- +apiVersion: ps.percona.com/v1 +kind: PerconaServerMySQL +metadata: + name: async-pitr-minio + finalizers: + - percona.com/delete-mysql-pods-in-order +status: + mysql: + ready: 3 + size: 3 + state: ready + haproxy: + ready: 3 + size: 3 + state: ready + orchestrator: + ready: 3 + size: 3 + state: ready + state: ready +--- +kind: PerconaServerMySQLRestore +apiVersion: ps.percona.com/v1 +metadata: + name: async-pitr-minio-restore +status: + state: Succeeded diff --git a/e2e-tests/tests/async-pitr-minio/06-create-pitr-restore.yaml b/e2e-tests/tests/async-pitr-minio/06-create-pitr-restore.yaml new file mode 100644 index 000000000..654e7503c --- /dev/null +++ b/e2e-tests/tests/async-pitr-minio/06-create-pitr-restore.yaml @@ -0,0 +1,39 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - timeout: 120 + script: |- + set -o errexit + set -o xtrace + + source ../../functions + + cluster_name=$(get_cluster_name) + + GTID_BEFORE=$(run_mysql "SELECT @@GLOBAL.gtid_executed" "-h $(get_haproxy_svc ${cluster_name})" | tr -d '\n') + + run_mysql \ + "INSERT myDB.myTable (id) VALUES (100502)" \ + "-h $(get_haproxy_svc ${cluster_name})" + + PITR_GTID=$(run_mysql "SELECT GTID_SUBTRACT(@@GLOBAL.gtid_executed, '${GTID_BEFORE}')" "-h $(get_haproxy_svc ${cluster_name})" | tr -d '\n') + + run_mysql \ + "INSERT myDB.myTable (id) VALUES (100503)" \ + "-h $(get_haproxy_svc ${cluster_name})" + + sleep 10 + PITR_DATE=$(date -u '+%Y-%m-%d %H:%M:%S') + kubectl create configmap -n "${NAMESPACE}" pitr-date --from-literal=date="${PITR_DATE}" + + sleep 60 + + echo '{}' \ + | yq eval '.apiVersion = "ps.percona.com/v1"' - \ + | yq eval '.kind = "PerconaServerMySQLRestore"' - \ + | yq eval '.metadata.name = "async-pitr-minio-restore"' - \ + | yq eval ".spec.clusterName = \"${cluster_name}\"" - \ + | yq eval '.spec.backupName = "async-pitr-minio-backup"' - \ + | yq eval '.spec.pitr.type = "gtid"' - \ + | yq eval ".spec.pitr.gtid = \"${PITR_GTID}\"" - \ + | kubectl apply -n "${NAMESPACE}" -f - diff --git a/e2e-tests/tests/async-pitr-minio/07-assert.yaml b/e2e-tests/tests/async-pitr-minio/07-assert.yaml new file mode 100644 index 000000000..c2d3d8132 --- /dev/null +++ b/e2e-tests/tests/async-pitr-minio/07-assert.yaml @@ -0,0 +1,24 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 30 +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: 07-read-data-0 +data: + max_id: "100502" +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: 07-read-data-1 +data: + max_id: "100502" +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: 07-read-data-2 +data: + max_id: "100502" diff --git a/e2e-tests/tests/async-pitr-minio/07-read-data.yaml b/e2e-tests/tests/async-pitr-minio/07-read-data.yaml new file mode 100644 index 000000000..68ba007c9 --- /dev/null +++ b/e2e-tests/tests/async-pitr-minio/07-read-data.yaml @@ -0,0 +1,16 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +timeout: 30 +commands: + - script: |- + set -o errexit + set -o xtrace + + source ../../functions + + cluster_name=$(get_cluster_name) + + for i in 0 1 2; do + max_id=$(run_mysql "SELECT MAX(id) FROM myDB.myTable" "-h ${cluster_name}-mysql-${i}.${cluster_name}-mysql") + kubectl create configmap -n "${NAMESPACE}" 07-read-data-${i} --from-literal=max_id="${max_id}" + done diff --git a/e2e-tests/tests/async-pitr-minio/08-assert.yaml b/e2e-tests/tests/async-pitr-minio/08-assert.yaml new file mode 100644 index 000000000..25082063c --- /dev/null +++ b/e2e-tests/tests/async-pitr-minio/08-assert.yaml @@ -0,0 +1,31 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 600 +--- +apiVersion: ps.percona.com/v1 +kind: PerconaServerMySQL +metadata: + name: async-pitr-minio + finalizers: + - percona.com/delete-mysql-pods-in-order +status: + mysql: + ready: 3 + size: 3 + state: ready + haproxy: + ready: 3 + size: 3 + state: ready + orchestrator: + ready: 3 + size: 3 + state: ready + state: ready +--- +kind: PerconaServerMySQLRestore +apiVersion: ps.percona.com/v1 +metadata: + name: async-pitr-minio-restore-date +status: + state: Succeeded diff --git a/e2e-tests/tests/async-pitr-minio/08-create-date-restore.yaml b/e2e-tests/tests/async-pitr-minio/08-create-date-restore.yaml new file mode 100644 index 000000000..485147a9a --- /dev/null +++ b/e2e-tests/tests/async-pitr-minio/08-create-date-restore.yaml @@ -0,0 +1,23 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - timeout: 90 + script: |- + set -o errexit + set -o xtrace + + source ../../functions + + cluster_name=$(get_cluster_name) + + PITR_DATE=$(kubectl get configmap -n "${NAMESPACE}" pitr-date -o jsonpath='{.data.date}') + + echo '{}' \ + | yq eval '.apiVersion = "ps.percona.com/v1"' - \ + | yq eval '.kind = "PerconaServerMySQLRestore"' - \ + | yq eval '.metadata.name = "async-pitr-minio-restore-date"' - \ + | yq eval ".spec.clusterName = \"${cluster_name}\"" - \ + | yq eval '.spec.backupName = "async-pitr-minio-backup"' - \ + | yq eval '.spec.pitr.type = "date"' - \ + | yq eval ".spec.pitr.date = \"${PITR_DATE}\"" - \ + | kubectl apply -n "${NAMESPACE}" -f - diff --git a/e2e-tests/tests/async-pitr-minio/09-assert.yaml b/e2e-tests/tests/async-pitr-minio/09-assert.yaml new file mode 100644 index 000000000..8f6ce88d6 --- /dev/null +++ b/e2e-tests/tests/async-pitr-minio/09-assert.yaml @@ -0,0 +1,24 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 30 +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: 09-read-data-0 +data: + max_id: "100503" +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: 09-read-data-1 +data: + max_id: "100503" +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: 09-read-data-2 +data: + max_id: "100503" diff --git a/e2e-tests/tests/async-pitr-minio/09-read-data.yaml b/e2e-tests/tests/async-pitr-minio/09-read-data.yaml new file mode 100644 index 000000000..8d79b4d3f --- /dev/null +++ b/e2e-tests/tests/async-pitr-minio/09-read-data.yaml @@ -0,0 +1,16 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +timeout: 30 +commands: + - script: |- + set -o errexit + set -o xtrace + + source ../../functions + + cluster_name=$(get_cluster_name) + + for i in 0 1 2; do + max_id=$(run_mysql "SELECT MAX(id) FROM myDB.myTable" "-h ${cluster_name}-mysql-${i}.${cluster_name}-mysql") + kubectl create configmap -n "${NAMESPACE}" 09-read-data-${i} --from-literal=max_id="${max_id}" + done diff --git a/e2e-tests/tests/async-pitr-minio/10-failover.yaml b/e2e-tests/tests/async-pitr-minio/10-failover.yaml new file mode 100644 index 000000000..276518152 --- /dev/null +++ b/e2e-tests/tests/async-pitr-minio/10-failover.yaml @@ -0,0 +1,37 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - timeout: 600 + script: |- + set -o errexit + set -o xtrace + + source ../../functions + + cluster_name=$(get_cluster_name) + primary_before=$(get_primary_from_label) + + kubectl -n "${NAMESPACE}" delete pod "${primary_before}" + + wait_cluster_consistency_async "${cluster_name}" 3 3 + + primary_after=$(get_primary_from_label) + + if [[ "${primary_before}" == "${primary_after}" ]]; then + echo "Primary pod did not change after failover: was ${primary_before}, still ${primary_after}" + exit 1 + fi + echo "Primary changed from ${primary_before} to ${primary_after}" + + retry=0 + until [[ "$(kubectl -n "${NAMESPACE}" get endpoints "${cluster_name}-mysql-primary" \ + -o jsonpath='{.subsets[0].addresses[0].targetRef.name}' 2>/dev/null)" == "${primary_after}" ]]; do + sleep 5 + retry=$((retry + 1)) + if [ $retry -ge 24 ]; then + echo "Primary service endpoint did not update to ${primary_after} after 2 minutes" + kubectl -n "${NAMESPACE}" get endpoints "${cluster_name}-mysql-primary" -o yaml + exit 1 + fi + done + echo "Primary service endpoint correctly points to new primary: ${primary_after}" diff --git a/e2e-tests/tests/async-pitr-minio/11-assert.yaml b/e2e-tests/tests/async-pitr-minio/11-assert.yaml new file mode 100644 index 000000000..3d0975a28 --- /dev/null +++ b/e2e-tests/tests/async-pitr-minio/11-assert.yaml @@ -0,0 +1,31 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 600 +--- +apiVersion: ps.percona.com/v1 +kind: PerconaServerMySQL +metadata: + name: async-pitr-minio + finalizers: + - percona.com/delete-mysql-pods-in-order +status: + mysql: + ready: 3 + size: 3 + state: ready + haproxy: + ready: 3 + size: 3 + state: ready + orchestrator: + ready: 3 + size: 3 + state: ready + state: ready +--- +kind: PerconaServerMySQLBackup +apiVersion: ps.percona.com/v1 +metadata: + name: async-pitr-minio-backup-2 +status: + state: Succeeded diff --git a/e2e-tests/tests/async-pitr-minio/11-write-and-backup.yaml b/e2e-tests/tests/async-pitr-minio/11-write-and-backup.yaml new file mode 100644 index 000000000..010ba1368 --- /dev/null +++ b/e2e-tests/tests/async-pitr-minio/11-write-and-backup.yaml @@ -0,0 +1,23 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +timeout: 60 +commands: + - script: |- + set -o errexit + set -o xtrace + + source ../../functions + + cluster_name=$(get_cluster_name) + + run_mysql \ + "INSERT myDB.myTable (id) VALUES (200500)" \ + "-h $(get_haproxy_svc ${cluster_name})" + + echo '{}' \ + | yq eval '.apiVersion = "ps.percona.com/v1"' - \ + | yq eval '.kind = "PerconaServerMySQLBackup"' - \ + | yq eval '.metadata.name = "async-pitr-minio-backup-2"' - \ + | yq eval ".spec.clusterName = \"${cluster_name}\"" - \ + | yq eval '.spec.storageName = "minio"' - \ + | kubectl apply -n "${NAMESPACE}" -f - diff --git a/e2e-tests/tests/async-pitr-minio/12-assert.yaml b/e2e-tests/tests/async-pitr-minio/12-assert.yaml new file mode 100644 index 000000000..abb266d09 --- /dev/null +++ b/e2e-tests/tests/async-pitr-minio/12-assert.yaml @@ -0,0 +1,35 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 600 +collectors: + - type: pod + selector: "app.kubernetes.io/component=pitr" + tail: 1000 +--- +apiVersion: ps.percona.com/v1 +kind: PerconaServerMySQL +metadata: + name: async-pitr-minio + finalizers: + - percona.com/delete-mysql-pods-in-order +status: + mysql: + ready: 3 + size: 3 + state: ready + haproxy: + ready: 3 + size: 3 + state: ready + orchestrator: + ready: 3 + size: 3 + state: ready + state: ready +--- +kind: PerconaServerMySQLRestore +apiVersion: ps.percona.com/v1 +metadata: + name: async-pitr-minio-restore-post-failover +status: + state: Succeeded diff --git a/e2e-tests/tests/async-pitr-minio/12-write-and-restore.yaml b/e2e-tests/tests/async-pitr-minio/12-write-and-restore.yaml new file mode 100644 index 000000000..93a6157c1 --- /dev/null +++ b/e2e-tests/tests/async-pitr-minio/12-write-and-restore.yaml @@ -0,0 +1,30 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - timeout: 600 + script: |- + set -o errexit + set -o xtrace + + source ../../functions + + cluster_name=$(get_cluster_name) + + run_mysql \ + "INSERT myDB.myTable (id) VALUES (200501)" \ + "-h $(get_haproxy_svc ${cluster_name})" + + sleep 120 + + PITR_DATE_POST=$(date -u '+%Y-%m-%d %H:%M:%S') + kubectl create configmap -n "${NAMESPACE}" pitr-date-post --from-literal=date="${PITR_DATE_POST}" + + echo '{}' \ + | yq eval '.apiVersion = "ps.percona.com/v1"' - \ + | yq eval '.kind = "PerconaServerMySQLRestore"' - \ + | yq eval '.metadata.name = "async-pitr-minio-restore-post-failover"' - \ + | yq eval ".spec.clusterName = \"${cluster_name}\"" - \ + | yq eval '.spec.backupName = "async-pitr-minio-backup-2"' - \ + | yq eval '.spec.pitr.type = "date"' - \ + | yq eval ".spec.pitr.date = \"${PITR_DATE_POST}\"" - \ + | kubectl apply -n "${NAMESPACE}" -f - diff --git a/e2e-tests/tests/async-pitr-minio/13-assert.yaml b/e2e-tests/tests/async-pitr-minio/13-assert.yaml new file mode 100644 index 000000000..a771c8bac --- /dev/null +++ b/e2e-tests/tests/async-pitr-minio/13-assert.yaml @@ -0,0 +1,24 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 30 +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: 13-read-data-0 +data: + max_id: "200501" +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: 13-read-data-1 +data: + max_id: "200501" +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: 13-read-data-2 +data: + max_id: "200501" diff --git a/e2e-tests/tests/async-pitr-minio/13-read-data.yaml b/e2e-tests/tests/async-pitr-minio/13-read-data.yaml new file mode 100644 index 000000000..1034b3210 --- /dev/null +++ b/e2e-tests/tests/async-pitr-minio/13-read-data.yaml @@ -0,0 +1,16 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +timeout: 30 +commands: + - script: |- + set -o errexit + set -o xtrace + + source ../../functions + + cluster_name=$(get_cluster_name) + + for i in 0 1 2; do + max_id=$(run_mysql "SELECT MAX(id) FROM myDB.myTable" "-h ${cluster_name}-mysql-${i}.${cluster_name}-mysql") + kubectl create configmap -n "${NAMESPACE}" 13-read-data-${i} --from-literal=max_id="${max_id}" + done diff --git a/e2e-tests/tests/async-pitr-minio/98-drop-finalizer.yaml b/e2e-tests/tests/async-pitr-minio/98-drop-finalizer.yaml new file mode 100644 index 000000000..ce815f41f --- /dev/null +++ b/e2e-tests/tests/async-pitr-minio/98-drop-finalizer.yaml @@ -0,0 +1,5 @@ +apiVersion: ps.percona.com/v1 +kind: PerconaServerMySQL +metadata: + name: async-pitr-minio + finalizers: [] diff --git a/e2e-tests/tests/async-pitr-minio/99-remove-cluster-gracefully.yaml b/e2e-tests/tests/async-pitr-minio/99-remove-cluster-gracefully.yaml new file mode 100644 index 000000000..b54d33c84 --- /dev/null +++ b/e2e-tests/tests/async-pitr-minio/99-remove-cluster-gracefully.yaml @@ -0,0 +1,16 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +delete: + - apiVersion: ps.percona.com/v1 + kind: PerconaServerMySQL + metadata: + name: async-pitr-minio +commands: + - script: |- + set -o errexit + set -o xtrace + + source ../../functions + + destroy_operator + timeout: 180 diff --git a/e2e-tests/tests/async-upgrade/01-create-cluster.yaml b/e2e-tests/tests/async-upgrade/01-create-cluster.yaml index 195a7c86a..bd94986a5 100644 --- a/e2e-tests/tests/async-upgrade/01-create-cluster.yaml +++ b/e2e-tests/tests/async-upgrade/01-create-cluster.yaml @@ -15,9 +15,23 @@ commands: exit 1 fi + # binlog server options were required in v1.0.0 + # we can delete them after v1.1.0 released get_cr_with_latest_versions_in_vs \ | yq eval "$(printf '.spec.initImage="%s"' "${init_image}")" - \ | yq eval "$(printf '.spec.crVersion="%s"' "${version}")" - \ | yq eval '.spec.mysql.clusterType="async"' - \ | yq eval '.spec.secretsName="async-upgrade-secrets"' - \ + | yq eval '.spec.backup.pitr.enabled=false' - \ + | yq eval '.spec.backup.pitr.binlogServer.storage.s3.bucket="operator-testing"' - \ + | yq eval '.spec.backup.pitr.binlogServer.storage.s3.prefix="binlogs"' - \ + | yq eval '.spec.backup.pitr.binlogServer.storage.s3.credentialsSecret="minio-secret"' - \ + | yq eval ".spec.backup.pitr.binlogServer.storage.s3.endpointUrl=\"http://minio-service.${NAMESPACE}:9000\"" - \ + | yq eval '.spec.backup.pitr.binlogServer.storage.s3.region="us-east-1"' - \ + | yq eval '.spec.backup.pitr.binlogServer.size=1' - \ + | yq eval '.spec.backup.pitr.binlogServer.serverId=100' - \ + | yq eval '.spec.backup.pitr.binlogServer.connectTimeout=10' - \ + | yq eval '.spec.backup.pitr.binlogServer.readTimeout=10' - \ + | yq eval '.spec.backup.pitr.binlogServer.writeTimeout=10' - \ + | yq eval '.spec.backup.pitr.binlogServer.idleTime=3' - \ | kubectl -n "${NAMESPACE}" apply -f - diff --git a/e2e-tests/tests/gr-pitr-minio/00-assert.yaml b/e2e-tests/tests/gr-pitr-minio/00-assert.yaml new file mode 100644 index 000000000..fecdb222e --- /dev/null +++ b/e2e-tests/tests/gr-pitr-minio/00-assert.yaml @@ -0,0 +1,9 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 150 +--- +apiVersion: v1 +kind: Secret +metadata: + name: minio-secret +type: Opaque diff --git a/e2e-tests/tests/gr-pitr-minio/00-minio-secret.yaml b/e2e-tests/tests/gr-pitr-minio/00-minio-secret.yaml new file mode 100644 index 000000000..3c797f054 --- /dev/null +++ b/e2e-tests/tests/gr-pitr-minio/00-minio-secret.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Secret +metadata: + name: minio-secret +stringData: + AWS_ACCESS_KEY_ID: some-access$\n"-key + AWS_SECRET_ACCESS_KEY: some-$\n"secret-key diff --git a/e2e-tests/tests/gr-pitr-minio/01-assert.yaml b/e2e-tests/tests/gr-pitr-minio/01-assert.yaml new file mode 100644 index 000000000..5f346cb51 --- /dev/null +++ b/e2e-tests/tests/gr-pitr-minio/01-assert.yaml @@ -0,0 +1,26 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 150 +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: perconaservermysqls.ps.percona.com +spec: + group: ps.percona.com + names: + kind: PerconaServerMySQL + listKind: PerconaServerMySQLList + plural: perconaservermysqls + shortNames: + - ps + singular: perconaservermysql + scope: Namespaced +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +metadata: + name: check-operator-deploy-status +timeout: 120 +commands: + - script: kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1 diff --git a/e2e-tests/tests/gr-pitr-minio/01-deploy-operator.yaml b/e2e-tests/tests/gr-pitr-minio/01-deploy-operator.yaml new file mode 100644 index 000000000..6ab1b37f9 --- /dev/null +++ b/e2e-tests/tests/gr-pitr-minio/01-deploy-operator.yaml @@ -0,0 +1,15 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: |- + set -o errexit + set -o xtrace + + source ../../functions + init_temp_dir # do this only in the first TestStep + + apply_s3_storage_secrets + deploy_operator + deploy_client + deploy_minio + timeout: 300 diff --git a/e2e-tests/tests/gr-pitr-minio/02-assert.yaml b/e2e-tests/tests/gr-pitr-minio/02-assert.yaml new file mode 100644 index 000000000..b17d34c55 --- /dev/null +++ b/e2e-tests/tests/gr-pitr-minio/02-assert.yaml @@ -0,0 +1,54 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 600 +--- +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: gr-pitr-minio-mysql +status: + observedGeneration: 1 + replicas: 3 + readyReplicas: 3 + currentReplicas: 3 + updatedReplicas: 3 + collisionCount: 0 +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: gr-pitr-minio-router +status: + observedGeneration: 1 + replicas: 3 + readyReplicas: 3 + updatedReplicas: 3 +--- +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: gr-pitr-minio-binlog-server +status: + observedGeneration: 1 + replicas: 1 + readyReplicas: 1 + currentReplicas: 1 + updatedReplicas: 1 + collisionCount: 0 +--- +apiVersion: ps.percona.com/v1 +kind: PerconaServerMySQL +metadata: + name: gr-pitr-minio + finalizers: + - percona.com/delete-mysql-pods-in-order +status: + router: + ready: 3 + size: 3 + state: ready + mysql: + ready: 3 + size: 3 + state: ready + state: ready diff --git a/e2e-tests/tests/gr-pitr-minio/02-create-cluster.yaml b/e2e-tests/tests/gr-pitr-minio/02-create-cluster.yaml new file mode 100644 index 000000000..18d12a7af --- /dev/null +++ b/e2e-tests/tests/gr-pitr-minio/02-create-cluster.yaml @@ -0,0 +1,37 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +timeout: 10 +commands: + - script: |- + set -o errexit + set -o xtrace + + source ../../functions + + get_cr \ + | yq eval '.spec.mysql.clusterType="group-replication"' - \ + | yq eval ".spec.mysql.size=3" - \ + | yq eval ".spec.proxy.haproxy.enabled=false" - \ + | yq eval ".spec.proxy.router.enabled=true" - \ + | yq eval ".spec.proxy.router.size=3" - \ + | yq eval ".spec.backup.backoffLimit=3" - \ + | yq eval '.spec.backup.storages.minio.type="s3"' - \ + | yq eval '.spec.backup.storages.minio.s3.bucket="operator-testing"' - \ + | yq eval '.spec.backup.storages.minio.s3.credentialsSecret="minio-secret"' - \ + | yq eval ".spec.backup.storages.minio.s3.endpointUrl=\"http://minio-service.${NAMESPACE}:9000\"" - \ + | yq eval '.spec.backup.storages.minio.s3.region="us-east-1"' - \ + | yq eval '.spec.backup.storages.minio.containerOptions.env[0].name="VERIFY_TLS"' - \ + | yq eval '.spec.backup.storages.minio.containerOptions.env[0].value="false"' - \ + | yq eval '.spec.backup.pitr.enabled=true' - \ + | yq eval '.spec.backup.pitr.binlogServer.storage.s3.bucket="operator-testing"' - \ + | yq eval '.spec.backup.pitr.binlogServer.storage.s3.prefix="binlogs"' - \ + | yq eval '.spec.backup.pitr.binlogServer.storage.s3.credentialsSecret="minio-secret"' - \ + | yq eval ".spec.backup.pitr.binlogServer.storage.s3.endpointUrl=\"http://minio-service.${NAMESPACE}:9000\"" - \ + | yq eval '.spec.backup.pitr.binlogServer.storage.s3.region="us-east-1"' - \ + | yq eval '.spec.backup.pitr.binlogServer.size=1' - \ + | yq eval '.spec.backup.pitr.binlogServer.serverId=100' - \ + | yq eval '.spec.backup.pitr.binlogServer.connectTimeout=10' - \ + | yq eval '.spec.backup.pitr.binlogServer.readTimeout=10' - \ + | yq eval '.spec.backup.pitr.binlogServer.writeTimeout=10' - \ + | yq eval '.spec.backup.pitr.binlogServer.idleTime=3' - \ + | kubectl -n "${NAMESPACE}" apply -f - diff --git a/e2e-tests/tests/gr-pitr-minio/03-write-data.yaml b/e2e-tests/tests/gr-pitr-minio/03-write-data.yaml new file mode 100644 index 000000000..6bc2f5f72 --- /dev/null +++ b/e2e-tests/tests/gr-pitr-minio/03-write-data.yaml @@ -0,0 +1,16 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: |- + set -o errexit + set -o xtrace + + source ../../functions + + run_mysql \ + "CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)" \ + "-h $(get_router_service $(get_cluster_name))" + + run_mysql \ + "INSERT myDB.myTable (id) VALUES (100500)" \ + "-h $(get_router_service $(get_cluster_name))" diff --git a/e2e-tests/tests/gr-pitr-minio/04-assert.yaml b/e2e-tests/tests/gr-pitr-minio/04-assert.yaml new file mode 100644 index 000000000..1f80a179f --- /dev/null +++ b/e2e-tests/tests/gr-pitr-minio/04-assert.yaml @@ -0,0 +1,10 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 300 +--- +kind: PerconaServerMySQLBackup +apiVersion: ps.percona.com/v1 +metadata: + name: gr-pitr-minio-backup +status: + state: Succeeded diff --git a/e2e-tests/tests/gr-pitr-minio/04-create-backup.yaml b/e2e-tests/tests/gr-pitr-minio/04-create-backup.yaml new file mode 100644 index 000000000..e4b69900e --- /dev/null +++ b/e2e-tests/tests/gr-pitr-minio/04-create-backup.yaml @@ -0,0 +1,7 @@ +apiVersion: ps.percona.com/v1 +kind: PerconaServerMySQLBackup +metadata: + name: gr-pitr-minio-backup +spec: + clusterName: gr-pitr-minio + storageName: minio diff --git a/e2e-tests/tests/gr-pitr-minio/05-write-more-data.yaml b/e2e-tests/tests/gr-pitr-minio/05-write-more-data.yaml new file mode 100644 index 000000000..31811988b --- /dev/null +++ b/e2e-tests/tests/gr-pitr-minio/05-write-more-data.yaml @@ -0,0 +1,12 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: |- + set -o errexit + set -o xtrace + + source ../../functions + + run_mysql \ + "INSERT myDB.myTable (id) VALUES (100501)" \ + "-h $(get_router_service $(get_cluster_name))" diff --git a/e2e-tests/tests/gr-pitr-minio/06-assert.yaml b/e2e-tests/tests/gr-pitr-minio/06-assert.yaml new file mode 100644 index 000000000..841aefa9e --- /dev/null +++ b/e2e-tests/tests/gr-pitr-minio/06-assert.yaml @@ -0,0 +1,31 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 600 +collectors: + - type: pod + selector: "app.kubernetes.io/component=pitr" + tail: 1000 +--- +apiVersion: ps.percona.com/v1 +kind: PerconaServerMySQL +metadata: + name: gr-pitr-minio + finalizers: + - percona.com/delete-mysql-pods-in-order +status: + router: + ready: 3 + size: 3 + state: ready + mysql: + ready: 3 + size: 3 + state: ready + state: ready +--- +kind: PerconaServerMySQLRestore +apiVersion: ps.percona.com/v1 +metadata: + name: gr-pitr-minio-restore +status: + state: Succeeded diff --git a/e2e-tests/tests/gr-pitr-minio/06-create-pitr-restore.yaml b/e2e-tests/tests/gr-pitr-minio/06-create-pitr-restore.yaml new file mode 100644 index 000000000..034372f2d --- /dev/null +++ b/e2e-tests/tests/gr-pitr-minio/06-create-pitr-restore.yaml @@ -0,0 +1,39 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - timeout: 90 + script: |- + set -o errexit + set -o xtrace + + source ../../functions + + cluster_name=$(get_cluster_name) + + GTID_BEFORE=$(run_mysql "SELECT @@GLOBAL.gtid_executed" "-h $(get_router_service ${cluster_name})" | tr -d '\n') + + run_mysql \ + "INSERT myDB.myTable (id) VALUES (100502)" \ + "-h $(get_router_service ${cluster_name})" + + PITR_GTID=$(run_mysql "SELECT GTID_SUBTRACT(@@GLOBAL.gtid_executed, '${GTID_BEFORE}')" "-h $(get_router_service ${cluster_name})" | tr -d '\n') + + run_mysql \ + "INSERT myDB.myTable (id) VALUES (100503)" \ + "-h $(get_router_service ${cluster_name})" + + sleep 10 + PITR_DATE=$(date -u '+%Y-%m-%d %H:%M:%S') + kubectl create configmap -n "${NAMESPACE}" pitr-date --from-literal=date="${PITR_DATE}" + + sleep 60 + + echo '{}' \ + | yq eval '.apiVersion = "ps.percona.com/v1"' - \ + | yq eval '.kind = "PerconaServerMySQLRestore"' - \ + | yq eval '.metadata.name = "gr-pitr-minio-restore"' - \ + | yq eval ".spec.clusterName = \"${cluster_name}\"" - \ + | yq eval '.spec.backupName = "gr-pitr-minio-backup"' - \ + | yq eval '.spec.pitr.type = "gtid"' - \ + | yq eval ".spec.pitr.gtid = \"${PITR_GTID}\"" - \ + | kubectl apply -n "${NAMESPACE}" -f - diff --git a/e2e-tests/tests/gr-pitr-minio/07-assert.yaml b/e2e-tests/tests/gr-pitr-minio/07-assert.yaml new file mode 100644 index 000000000..c2d3d8132 --- /dev/null +++ b/e2e-tests/tests/gr-pitr-minio/07-assert.yaml @@ -0,0 +1,24 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 30 +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: 07-read-data-0 +data: + max_id: "100502" +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: 07-read-data-1 +data: + max_id: "100502" +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: 07-read-data-2 +data: + max_id: "100502" diff --git a/e2e-tests/tests/gr-pitr-minio/07-read-data.yaml b/e2e-tests/tests/gr-pitr-minio/07-read-data.yaml new file mode 100644 index 000000000..68ba007c9 --- /dev/null +++ b/e2e-tests/tests/gr-pitr-minio/07-read-data.yaml @@ -0,0 +1,16 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +timeout: 30 +commands: + - script: |- + set -o errexit + set -o xtrace + + source ../../functions + + cluster_name=$(get_cluster_name) + + for i in 0 1 2; do + max_id=$(run_mysql "SELECT MAX(id) FROM myDB.myTable" "-h ${cluster_name}-mysql-${i}.${cluster_name}-mysql") + kubectl create configmap -n "${NAMESPACE}" 07-read-data-${i} --from-literal=max_id="${max_id}" + done diff --git a/e2e-tests/tests/gr-pitr-minio/08-assert.yaml b/e2e-tests/tests/gr-pitr-minio/08-assert.yaml new file mode 100644 index 000000000..f83683ead --- /dev/null +++ b/e2e-tests/tests/gr-pitr-minio/08-assert.yaml @@ -0,0 +1,27 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 600 +--- +apiVersion: ps.percona.com/v1 +kind: PerconaServerMySQL +metadata: + name: gr-pitr-minio + finalizers: + - percona.com/delete-mysql-pods-in-order +status: + router: + ready: 3 + size: 3 + state: ready + mysql: + ready: 3 + size: 3 + state: ready + state: ready +--- +kind: PerconaServerMySQLRestore +apiVersion: ps.percona.com/v1 +metadata: + name: gr-pitr-minio-restore-date +status: + state: Succeeded diff --git a/e2e-tests/tests/gr-pitr-minio/08-create-date-restore.yaml b/e2e-tests/tests/gr-pitr-minio/08-create-date-restore.yaml new file mode 100644 index 000000000..4d59a212f --- /dev/null +++ b/e2e-tests/tests/gr-pitr-minio/08-create-date-restore.yaml @@ -0,0 +1,23 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - timeout: 90 + script: |- + set -o errexit + set -o xtrace + + source ../../functions + + cluster_name=$(get_cluster_name) + + PITR_DATE=$(kubectl get configmap -n "${NAMESPACE}" pitr-date -o jsonpath='{.data.date}') + + echo '{}' \ + | yq eval '.apiVersion = "ps.percona.com/v1"' - \ + | yq eval '.kind = "PerconaServerMySQLRestore"' - \ + | yq eval '.metadata.name = "gr-pitr-minio-restore-date"' - \ + | yq eval ".spec.clusterName = \"${cluster_name}\"" - \ + | yq eval '.spec.backupName = "gr-pitr-minio-backup"' - \ + | yq eval '.spec.pitr.type = "date"' - \ + | yq eval ".spec.pitr.date = \"${PITR_DATE}\"" - \ + | kubectl apply -n "${NAMESPACE}" -f - \ No newline at end of file diff --git a/e2e-tests/tests/gr-pitr-minio/09-assert.yaml b/e2e-tests/tests/gr-pitr-minio/09-assert.yaml new file mode 100644 index 000000000..8f6ce88d6 --- /dev/null +++ b/e2e-tests/tests/gr-pitr-minio/09-assert.yaml @@ -0,0 +1,24 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 30 +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: 09-read-data-0 +data: + max_id: "100503" +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: 09-read-data-1 +data: + max_id: "100503" +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: 09-read-data-2 +data: + max_id: "100503" diff --git a/e2e-tests/tests/gr-pitr-minio/09-read-data.yaml b/e2e-tests/tests/gr-pitr-minio/09-read-data.yaml new file mode 100644 index 000000000..8d79b4d3f --- /dev/null +++ b/e2e-tests/tests/gr-pitr-minio/09-read-data.yaml @@ -0,0 +1,16 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +timeout: 30 +commands: + - script: |- + set -o errexit + set -o xtrace + + source ../../functions + + cluster_name=$(get_cluster_name) + + for i in 0 1 2; do + max_id=$(run_mysql "SELECT MAX(id) FROM myDB.myTable" "-h ${cluster_name}-mysql-${i}.${cluster_name}-mysql") + kubectl create configmap -n "${NAMESPACE}" 09-read-data-${i} --from-literal=max_id="${max_id}" + done diff --git a/e2e-tests/tests/gr-pitr-minio/10-failover.yaml b/e2e-tests/tests/gr-pitr-minio/10-failover.yaml new file mode 100644 index 000000000..b5ac51568 --- /dev/null +++ b/e2e-tests/tests/gr-pitr-minio/10-failover.yaml @@ -0,0 +1,37 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - timeout: 600 + script: |- + set -o errexit + set -o xtrace + + source ../../functions + + cluster_name=$(get_cluster_name) + primary_before=$(get_primary_from_label) + + kubectl -n "${NAMESPACE}" delete pod "${primary_before}" + + wait_cluster_consistency_gr "${cluster_name}" 3 3 + + primary_after=$(get_primary_from_label) + + if [[ "${primary_before}" == "${primary_after}" ]]; then + echo "Primary pod did not change after failover: was ${primary_before}, still ${primary_after}" + exit 1 + fi + echo "Primary changed from ${primary_before} to ${primary_after}" + + retry=0 + until [[ "$(kubectl -n "${NAMESPACE}" get endpoints "${cluster_name}-mysql-primary" \ + -o jsonpath='{.subsets[0].addresses[0].targetRef.name}' 2>/dev/null)" == "${primary_after}" ]]; do + sleep 5 + retry=$((retry + 1)) + if [ $retry -ge 24 ]; then + echo "Primary service endpoint did not update to ${primary_after} after 2 minutes" + kubectl -n "${NAMESPACE}" get endpoints "${cluster_name}-mysql-primary" -o yaml + exit 1 + fi + done + echo "Primary service endpoint correctly points to new primary: ${primary_after}" diff --git a/e2e-tests/tests/gr-pitr-minio/11-assert.yaml b/e2e-tests/tests/gr-pitr-minio/11-assert.yaml new file mode 100644 index 000000000..27c3468e6 --- /dev/null +++ b/e2e-tests/tests/gr-pitr-minio/11-assert.yaml @@ -0,0 +1,27 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 600 +--- +apiVersion: ps.percona.com/v1 +kind: PerconaServerMySQL +metadata: + name: gr-pitr-minio + finalizers: + - percona.com/delete-mysql-pods-in-order +status: + router: + ready: 3 + size: 3 + state: ready + mysql: + ready: 3 + size: 3 + state: ready + state: ready +--- +kind: PerconaServerMySQLBackup +apiVersion: ps.percona.com/v1 +metadata: + name: gr-pitr-minio-backup-2 +status: + state: Succeeded \ No newline at end of file diff --git a/e2e-tests/tests/gr-pitr-minio/11-write-and-backup.yaml b/e2e-tests/tests/gr-pitr-minio/11-write-and-backup.yaml new file mode 100644 index 000000000..b3dccd2d3 --- /dev/null +++ b/e2e-tests/tests/gr-pitr-minio/11-write-and-backup.yaml @@ -0,0 +1,23 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +timeout: 60 +commands: + - script: |- + set -o errexit + set -o xtrace + + source ../../functions + + cluster_name=$(get_cluster_name) + + run_mysql \ + "INSERT myDB.myTable (id) VALUES (200500)" \ + "-h $(get_router_service ${cluster_name})" + + echo '{}' \ + | yq eval '.apiVersion = "ps.percona.com/v1"' - \ + | yq eval '.kind = "PerconaServerMySQLBackup"' - \ + | yq eval '.metadata.name = "gr-pitr-minio-backup-2"' - \ + | yq eval ".spec.clusterName = \"${cluster_name}\"" - \ + | yq eval '.spec.storageName = "minio"' - \ + | kubectl apply -n "${NAMESPACE}" -f - diff --git a/e2e-tests/tests/gr-pitr-minio/12-assert.yaml b/e2e-tests/tests/gr-pitr-minio/12-assert.yaml new file mode 100644 index 000000000..f51749937 --- /dev/null +++ b/e2e-tests/tests/gr-pitr-minio/12-assert.yaml @@ -0,0 +1,31 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 600 +collectors: + - type: pod + selector: "app.kubernetes.io/component=pitr" + tail: 1000 +--- +apiVersion: ps.percona.com/v1 +kind: PerconaServerMySQL +metadata: + name: gr-pitr-minio + finalizers: + - percona.com/delete-mysql-pods-in-order +status: + router: + ready: 3 + size: 3 + state: ready + mysql: + ready: 3 + size: 3 + state: ready + state: ready +--- +kind: PerconaServerMySQLRestore +apiVersion: ps.percona.com/v1 +metadata: + name: gr-pitr-minio-restore-post-failover +status: + state: Succeeded \ No newline at end of file diff --git a/e2e-tests/tests/gr-pitr-minio/12-write-and-restore.yaml b/e2e-tests/tests/gr-pitr-minio/12-write-and-restore.yaml new file mode 100644 index 000000000..fe2e9760e --- /dev/null +++ b/e2e-tests/tests/gr-pitr-minio/12-write-and-restore.yaml @@ -0,0 +1,31 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - timeout: 600 + script: |- + set -o errexit + set -o xtrace + + source ../../functions + + cluster_name=$(get_cluster_name) + + run_mysql \ + "INSERT myDB.myTable (id) VALUES (200501)" \ + "-h $(get_router_service ${cluster_name})" + + sleep 120 + + PITR_DATE_POST=$(date -u '+%Y-%m-%d %H:%M:%S') + kubectl create configmap -n "${NAMESPACE}" pitr-date-post --from-literal=date="${PITR_DATE_POST}" + + echo '{}' \ + | yq eval '.apiVersion = "ps.percona.com/v1"' - \ + | yq eval '.kind = "PerconaServerMySQLRestore"' - \ + | yq eval '.metadata.name = "gr-pitr-minio-restore-post-failover"' - \ + | yq eval ".spec.clusterName = \"${cluster_name}\"" - \ + | yq eval '.spec.backupName = "gr-pitr-minio-backup-2"' - \ + | yq eval '.spec.pitr.force = true' - \ + | yq eval '.spec.pitr.type = "date"' - \ + | yq eval ".spec.pitr.date = \"${PITR_DATE_POST}\"" - \ + | kubectl apply -n "${NAMESPACE}" -f - diff --git a/e2e-tests/tests/gr-pitr-minio/13-assert.yaml b/e2e-tests/tests/gr-pitr-minio/13-assert.yaml new file mode 100644 index 000000000..a771c8bac --- /dev/null +++ b/e2e-tests/tests/gr-pitr-minio/13-assert.yaml @@ -0,0 +1,24 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 30 +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: 13-read-data-0 +data: + max_id: "200501" +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: 13-read-data-1 +data: + max_id: "200501" +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: 13-read-data-2 +data: + max_id: "200501" diff --git a/e2e-tests/tests/gr-pitr-minio/13-read-data.yaml b/e2e-tests/tests/gr-pitr-minio/13-read-data.yaml new file mode 100644 index 000000000..1034b3210 --- /dev/null +++ b/e2e-tests/tests/gr-pitr-minio/13-read-data.yaml @@ -0,0 +1,16 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +timeout: 30 +commands: + - script: |- + set -o errexit + set -o xtrace + + source ../../functions + + cluster_name=$(get_cluster_name) + + for i in 0 1 2; do + max_id=$(run_mysql "SELECT MAX(id) FROM myDB.myTable" "-h ${cluster_name}-mysql-${i}.${cluster_name}-mysql") + kubectl create configmap -n "${NAMESPACE}" 13-read-data-${i} --from-literal=max_id="${max_id}" + done diff --git a/e2e-tests/tests/gr-pitr-minio/98-drop-finalizer.yaml b/e2e-tests/tests/gr-pitr-minio/98-drop-finalizer.yaml new file mode 100644 index 000000000..70c1c6db6 --- /dev/null +++ b/e2e-tests/tests/gr-pitr-minio/98-drop-finalizer.yaml @@ -0,0 +1,5 @@ +apiVersion: ps.percona.com/v1 +kind: PerconaServerMySQL +metadata: + name: gr-pitr-minio + finalizers: [] diff --git a/e2e-tests/tests/gr-pitr-minio/99-remove-cluster-gracefully.yaml b/e2e-tests/tests/gr-pitr-minio/99-remove-cluster-gracefully.yaml new file mode 100644 index 000000000..5f3cc2863 --- /dev/null +++ b/e2e-tests/tests/gr-pitr-minio/99-remove-cluster-gracefully.yaml @@ -0,0 +1,16 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +delete: + - apiVersion: ps.percona.com/v1 + kind: PerconaServerMySQL + metadata: + name: gr-pitr-minio +commands: + - script: |- + set -o errexit + set -o xtrace + + source ../../functions + + destroy_operator + timeout: 180 diff --git a/e2e-tests/tests/gr-tls-cert-manager/03-check-cert.yaml b/e2e-tests/tests/gr-tls-cert-manager/03-check-cert.yaml index 7a44e42ab..43935ad40 100644 --- a/e2e-tests/tests/gr-tls-cert-manager/03-check-cert.yaml +++ b/e2e-tests/tests/gr-tls-cert-manager/03-check-cert.yaml @@ -12,6 +12,9 @@ commands: "*.gr-tls-cert-manager-mysql", "*.gr-tls-cert-manager-mysql.'"${NAMESPACE}"'", "*.gr-tls-cert-manager-mysql.'"${NAMESPACE}"'.svc", + "gr-tls-cert-manager-mysql-primary", + "gr-tls-cert-manager-mysql-primary.'"${NAMESPACE}"'", + "gr-tls-cert-manager-mysql-primary.'"${NAMESPACE}"'.svc", "*.gr-tls-cert-manager-orchestrator", "*.gr-tls-cert-manager-orchestrator.'"${NAMESPACE}"'", "*.gr-tls-cert-manager-orchestrator.'"${NAMESPACE}"'.svc", diff --git a/e2e-tests/tests/gr-tls-cert-manager/05-check-cert.yaml b/e2e-tests/tests/gr-tls-cert-manager/05-check-cert.yaml index 35ca29679..1542b9f86 100644 --- a/e2e-tests/tests/gr-tls-cert-manager/05-check-cert.yaml +++ b/e2e-tests/tests/gr-tls-cert-manager/05-check-cert.yaml @@ -12,6 +12,9 @@ commands: "*.gr-tls-cert-manager-mysql", "*.gr-tls-cert-manager-mysql.'"${NAMESPACE}"'", "*.gr-tls-cert-manager-mysql.'"${NAMESPACE}"'.svc", + "gr-tls-cert-manager-mysql-primary", + "gr-tls-cert-manager-mysql-primary.'"${NAMESPACE}"'", + "gr-tls-cert-manager-mysql-primary.'"${NAMESPACE}"'.svc", "*.gr-tls-cert-manager-orchestrator", "*.gr-tls-cert-manager-orchestrator.'"${NAMESPACE}"'", "*.gr-tls-cert-manager-orchestrator.'"${NAMESPACE}"'.svc", diff --git a/e2e-tests/tests/gr-upgrade/01-create-cluster.yaml b/e2e-tests/tests/gr-upgrade/01-create-cluster.yaml index f35d312c0..fd603a0dc 100644 --- a/e2e-tests/tests/gr-upgrade/01-create-cluster.yaml +++ b/e2e-tests/tests/gr-upgrade/01-create-cluster.yaml @@ -15,6 +15,8 @@ commands: exit 1 fi + # binlog server options were required in v1.0.0 + # we can delete them after v1.1.0 released get_cr_with_latest_versions_in_vs \ | yq eval "$(printf '.spec.initImage="%s"' "${init_image}")" - \ | yq eval "$(printf '.spec.crVersion="%s"' "${version}")" - \ @@ -22,4 +24,16 @@ commands: | yq eval '.spec.mysql.clusterType="group-replication"' - \ | yq eval '.spec.proxy.router.enabled=false' - \ | yq eval '.spec.proxy.haproxy.enabled=true' - \ + | yq eval '.spec.backup.pitr.enabled=false' - \ + | yq eval '.spec.backup.pitr.binlogServer.storage.s3.bucket="operator-testing"' - \ + | yq eval '.spec.backup.pitr.binlogServer.storage.s3.prefix="binlogs"' - \ + | yq eval '.spec.backup.pitr.binlogServer.storage.s3.credentialsSecret="minio-secret"' - \ + | yq eval ".spec.backup.pitr.binlogServer.storage.s3.endpointUrl=\"http://minio-service.${NAMESPACE}:9000\"" - \ + | yq eval '.spec.backup.pitr.binlogServer.storage.s3.region="us-east-1"' - \ + | yq eval '.spec.backup.pitr.binlogServer.size=1' - \ + | yq eval '.spec.backup.pitr.binlogServer.serverId=100' - \ + | yq eval '.spec.backup.pitr.binlogServer.connectTimeout=10' - \ + | yq eval '.spec.backup.pitr.binlogServer.readTimeout=10' - \ + | yq eval '.spec.backup.pitr.binlogServer.writeTimeout=10' - \ + | yq eval '.spec.backup.pitr.binlogServer.idleTime=3' - \ | kubectl -n "${NAMESPACE}" apply -f - diff --git a/e2e-tests/tests/tls-cert-manager/03-check-cert.yaml b/e2e-tests/tests/tls-cert-manager/03-check-cert.yaml index b65c07694..909f92d02 100644 --- a/e2e-tests/tests/tls-cert-manager/03-check-cert.yaml +++ b/e2e-tests/tests/tls-cert-manager/03-check-cert.yaml @@ -12,6 +12,9 @@ commands: "*.tls-cert-manager-mysql", "*.tls-cert-manager-mysql.'"${NAMESPACE}"'", "*.tls-cert-manager-mysql.'"${NAMESPACE}"'.svc", + "tls-cert-manager-mysql-primary", + "tls-cert-manager-mysql-primary.'"${NAMESPACE}"'", + "tls-cert-manager-mysql-primary.'"${NAMESPACE}"'.svc", "*.tls-cert-manager-orchestrator", "*.tls-cert-manager-orchestrator.'"${NAMESPACE}"'", "*.tls-cert-manager-orchestrator.'"${NAMESPACE}"'.svc", diff --git a/e2e-tests/tests/tls-cert-manager/05-check-cert.yaml b/e2e-tests/tests/tls-cert-manager/05-check-cert.yaml index d1b5b786c..3a44eac24 100644 --- a/e2e-tests/tests/tls-cert-manager/05-check-cert.yaml +++ b/e2e-tests/tests/tls-cert-manager/05-check-cert.yaml @@ -12,6 +12,9 @@ commands: "*.tls-cert-manager-mysql", "*.tls-cert-manager-mysql.'"${NAMESPACE}"'", "*.tls-cert-manager-mysql.'"${NAMESPACE}"'.svc", + "tls-cert-manager-mysql-primary", + "tls-cert-manager-mysql-primary.'"${NAMESPACE}"'", + "tls-cert-manager-mysql-primary.'"${NAMESPACE}"'.svc", "*.tls-cert-manager-orchestrator", "*.tls-cert-manager-orchestrator.'"${NAMESPACE}"'", "*.tls-cert-manager-orchestrator.'"${NAMESPACE}"'.svc", diff --git a/e2e-tests/vars.sh b/e2e-tests/vars.sh index 9bbfed85d..33e273a15 100755 --- a/e2e-tests/vars.sh +++ b/e2e-tests/vars.sh @@ -23,6 +23,7 @@ export IMAGE_ORCHESTRATOR=${IMAGE_ORCHESTRATOR:-"perconalab/percona-server-mysql export IMAGE_ROUTER=${IMAGE_ROUTER:-"perconalab/percona-server-mysql-operator:main-router${MYSQL_VERSION}"} export IMAGE_TOOLKIT=${IMAGE_TOOLKIT:-"perconalab/percona-server-mysql-operator:main-toolkit"} export IMAGE_HAPROXY=${IMAGE_HAPROXY:-"perconalab/percona-server-mysql-operator:main-haproxy"} +export IMAGE_BINLOG_SERVER=${IMAGE_BINLOG_SERVER:-"perconalab/percona-binlog-server:0.2.1"} export PMM_SERVER_VERSION=${PMM_SERVER_VERSION:-"1.4.3"} export IMAGE_PMM_CLIENT=${IMAGE_PMM_CLIENT:-"perconalab/pmm-client:3-dev-latest"} export IMAGE_PMM_SERVER=${IMAGE_PMM_SERVER:-"perconalab/pmm-server:3-dev-latest"} diff --git a/pkg/binlogserver/binlog_server.go b/pkg/binlogserver/binlog_server.go index a45ba5b04..1d99aa8da 100644 --- a/pkg/binlogserver/binlog_server.go +++ b/pkg/binlogserver/binlog_server.go @@ -6,6 +6,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" apiv1 "github.com/percona/percona-server-mysql-operator/api/v1" "github.com/percona/percona-server-mysql-operator/pkg/k8s" @@ -18,7 +19,9 @@ const ( credsVolumeName = "users" CredsMountPath = "/etc/mysql/mysql-users-secret" tlsVolumeName = "tls" - tlsMountPath = "/etc/mysql/mysql-tls-secret" + TLSMountPath = "/etc/mysql/mysql-tls-secret" + bufferVolumeName = "buffer" + BufferMountPath = "/var/lib/binlogsrv" configVolumeName = "config" configMountPath = "/etc/binlog_server/config" storageCredsVolumeName = "storage" @@ -67,7 +70,7 @@ func StatefulSet(cr *apiv1.PerconaServerMySQL, initImage, configHash string) *ap Annotations: cr.GlobalAnnotations(), }, Spec: appsv1.StatefulSetSpec{ - Replicas: &spec.Size, + Replicas: ptr.To(int32(1)), Selector: &metav1.LabelSelector{ MatchLabels: labels, }, @@ -98,6 +101,10 @@ func StatefulSet(cr *apiv1.PerconaServerMySQL, initImage, configHash string) *ap } } +func sslDisabled(cr *apiv1.PerconaServerMySQL) bool { + return cr.Spec.Backup.PiTR.BinlogServer.SSLMode == "disabled" +} + func volumes(cr *apiv1.PerconaServerMySQL) []corev1.Volume { t := true @@ -105,13 +112,19 @@ func volumes(cr *apiv1.PerconaServerMySQL) []corev1.Volume { conf := Configurable(*cr) - return []corev1.Volume{ + vols := []corev1.Volume{ { Name: apiv1.BinVolumeName, VolumeSource: corev1.VolumeSource{ EmptyDir: &corev1.EmptyDirVolumeSource{}, }, }, + { + Name: bufferVolumeName, + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, { Name: credsVolumeName, VolumeSource: corev1.VolumeSource{ @@ -120,15 +133,21 @@ func volumes(cr *apiv1.PerconaServerMySQL) []corev1.Volume { }, }, }, - { + } + + if !sslDisabled(cr) { + vols = append(vols, corev1.Volume{ Name: tlsVolumeName, VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ SecretName: cr.Spec.SSLSecretName, }, }, - }, - { + }) + } + + vols = append(vols, + corev1.Volume{ Name: storageCredsVolumeName, VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ @@ -136,7 +155,7 @@ func volumes(cr *apiv1.PerconaServerMySQL) []corev1.Volume { }, }, }, - { + corev1.Volume{ Name: configVolumeName, VolumeSource: corev1.VolumeSource{ Projected: &corev1.ProjectedVolumeSource{ @@ -172,7 +191,9 @@ func volumes(cr *apiv1.PerconaServerMySQL) []corev1.Volume { }, }, }, - } + ) + + return vols } func containers(cr *apiv1.PerconaServerMySQL) []corev1.Container { @@ -194,6 +215,33 @@ func binlogServerContainer(cr *apiv1.PerconaServerMySQL) corev1.Container { } env = append(env, spec.Env...) + mounts := []corev1.VolumeMount{ + { + Name: apiv1.BinVolumeName, + MountPath: apiv1.BinVolumePath, + }, + { + Name: credsVolumeName, + MountPath: CredsMountPath, + }, + } + if !sslDisabled(cr) { + mounts = append(mounts, corev1.VolumeMount{ + Name: tlsVolumeName, + MountPath: TLSMountPath, + }) + } + mounts = append(mounts, + corev1.VolumeMount{ + Name: configVolumeName, + MountPath: configMountPath, + }, + corev1.VolumeMount{ + Name: bufferVolumeName, + MountPath: BufferMountPath, + }, + ) + return corev1.Container{ Name: AppName, Image: spec.Image, @@ -201,26 +249,9 @@ func binlogServerContainer(cr *apiv1.PerconaServerMySQL) corev1.Container { Resources: spec.Resources, Env: env, EnvFrom: spec.EnvFrom, - VolumeMounts: []corev1.VolumeMount{ - { - Name: apiv1.BinVolumeName, - MountPath: apiv1.BinVolumePath, - }, - { - Name: credsVolumeName, - MountPath: CredsMountPath, - }, - { - Name: tlsVolumeName, - MountPath: tlsMountPath, - }, - { - Name: configVolumeName, - MountPath: configMountPath, - }, - }, + VolumeMounts: mounts, Command: []string{"/opt/percona/binlog-server-entrypoint.sh"}, - Args: []string{"/usr/local/bin/binlog_server", "pull", path.Join(configMountPath, ConfigKey)}, + Args: []string{binlogServerBinary, "pull", path.Join(configMountPath, ConfigKey)}, TerminationMessagePath: "/dev/termination-log", TerminationMessagePolicy: corev1.TerminationMessageReadFile, SecurityContext: spec.ContainerSecurityContext, diff --git a/pkg/binlogserver/binlog_server_test.go b/pkg/binlogserver/binlog_server_test.go new file mode 100644 index 000000000..3077e8c95 --- /dev/null +++ b/pkg/binlogserver/binlog_server_test.go @@ -0,0 +1,328 @@ +package binlogserver + +import ( + "testing" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" + + apiv1 "github.com/percona/percona-server-mysql-operator/api/v1" + "github.com/percona/percona-server-mysql-operator/pkg/naming" +) + +func newTestCR(name, namespace string) *apiv1.PerconaServerMySQL { + return &apiv1.PerconaServerMySQL{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: apiv1.PerconaServerMySQLSpec{ + SSLSecretName: name + "-ssl", + SecretsName: name + "-secrets", + Backup: &apiv1.BackupSpec{ + PiTR: apiv1.PiTRSpec{ + BinlogServer: &apiv1.BinlogServerSpec{ + Storage: apiv1.BinlogServerStorageSpec{ + S3: &apiv1.BackupStorageS3Spec{ + CredentialsSecret: "s3-creds-secret", + }, + }, + }, + }, + }, + }, + } +} + +func TestStatefulSet(t *testing.T) { + tests := map[string]struct { + cr *apiv1.PerconaServerMySQL + initImage string + configHash string + verify func(t *testing.T, cr *apiv1.PerconaServerMySQL) + }{ + "object meta": { + cr: newTestCR("my-cluster", "test-ns"), + initImage: "init:latest", + configHash: "abc123", + verify: func(t *testing.T, cr *apiv1.PerconaServerMySQL) { + sts := StatefulSet(cr, "init:latest", "abc123") + + assert.Equal(t, "apps/v1", sts.APIVersion) + assert.Equal(t, "StatefulSet", sts.Kind) + assert.Equal(t, "my-cluster-binlog-server", sts.Name) + assert.Equal(t, "test-ns", sts.Namespace) + }, + }, + "labels": { + cr: newTestCR("cluster", "ns"), + initImage: "init:latest", + verify: func(t *testing.T, cr *apiv1.PerconaServerMySQL) { + sts := StatefulSet(cr, "init:latest", "") + + expectedLabels := MatchLabels(cr) + assert.Equal(t, expectedLabels, sts.Labels) + assert.Equal(t, expectedLabels, sts.Spec.Selector.MatchLabels) + assert.Equal(t, expectedLabels, sts.Spec.Template.Labels) + }, + }, + "replicas is always 1": { + cr: newTestCR("cluster", "ns"), + initImage: "init:latest", + verify: func(t *testing.T, cr *apiv1.PerconaServerMySQL) { + sts := StatefulSet(cr, "init:latest", "") + assert.Equal(t, ptr.To(int32(1)), sts.Spec.Replicas) + }, + }, + "config hash annotation in pod template": { + cr: newTestCR("cluster", "ns"), + initImage: "init:latest", + configHash: "2pacisnotdead", + verify: func(t *testing.T, cr *apiv1.PerconaServerMySQL) { + sts := StatefulSet(cr, "init:latest", "2pacisnotdead") + assert.Equal(t, "2pacisnotdead", sts.Spec.Template.Annotations[string(naming.AnnotationConfigHash)]) + }, + }, + "empty config hash produces no annotation": { + cr: newTestCR("cluster", "ns"), + initImage: "init:latest", + verify: func(t *testing.T, cr *apiv1.PerconaServerMySQL) { + sts := StatefulSet(cr, "init:latest", "") + assert.NotContains(t, sts.Spec.Template.Annotations, string(naming.AnnotationConfigHash)) + }, + }, + "init container uses provided image": { + cr: newTestCR("cluster", "ns"), + initImage: "percona/init:1.2.3", + verify: func(t *testing.T, cr *apiv1.PerconaServerMySQL) { + sts := StatefulSet(cr, "percona/init:1.2.3", "") + assert.Len(t, sts.Spec.Template.Spec.InitContainers, 1) + assert.Equal(t, "percona/init:1.2.3", sts.Spec.Template.Spec.InitContainers[0].Image) + }, + }, + "binlog server container present with correct name": { + cr: newTestCR("cluster", "ns"), + initImage: "init:latest", + verify: func(t *testing.T, cr *apiv1.PerconaServerMySQL) { + sts := StatefulSet(cr, "init:latest", "") + assert.Len(t, sts.Spec.Template.Spec.Containers, 1) + assert.Equal(t, AppName, sts.Spec.Template.Spec.Containers[0].Name) + }, + }, + "container command and args": { + cr: newTestCR("cluster", "ns"), + initImage: "init:latest", + verify: func(t *testing.T, cr *apiv1.PerconaServerMySQL) { + sts := StatefulSet(cr, "init:latest", "") + container := sts.Spec.Template.Spec.Containers[0] + assert.Equal(t, []string{"/opt/percona/binlog-server-entrypoint.sh"}, container.Command) + assert.Equal(t, []string{ + binlogServerBinary, + "pull", + configMountPath + "/" + ConfigKey, + }, container.Args) + }, + }, + "binlog server container image from spec": { + cr: func() *apiv1.PerconaServerMySQL { + cr := newTestCR("cluster", "ns") + cr.Spec.Backup.PiTR.BinlogServer.Image = "percona/binlog-server:2.0" + return cr + }(), + initImage: "init:latest", + verify: func(t *testing.T, cr *apiv1.PerconaServerMySQL) { + sts := StatefulSet(cr, "init:latest", "") + container := sts.Spec.Template.Spec.Containers[0] + assert.Equal(t, "percona/binlog-server:2.0", container.Image) + }, + }, + "volumes include all expected volumes when ssl enabled": { + cr: newTestCR("cluster", "ns"), + initImage: "init:latest", + verify: func(t *testing.T, cr *apiv1.PerconaServerMySQL) { + sts := StatefulSet(cr, "init:latest", "") + volumeNames := map[string]bool{} + for _, v := range sts.Spec.Template.Spec.Volumes { + volumeNames[v.Name] = true + } + assert.True(t, volumeNames[apiv1.BinVolumeName], "missing bin volume") + assert.True(t, volumeNames[bufferVolumeName], "missing buffer volume") + assert.True(t, volumeNames[credsVolumeName], "missing creds volume") + assert.True(t, volumeNames[tlsVolumeName], "missing tls volume") + assert.True(t, volumeNames[storageCredsVolumeName], "missing storage volume") + assert.True(t, volumeNames[configVolumeName], "missing config volume") + }, + }, + "tls volume and mount absent when ssl disabled": { + cr: func() *apiv1.PerconaServerMySQL { + cr := newTestCR("cluster", "ns") + cr.Spec.Backup.PiTR.BinlogServer.SSLMode = "disabled" + return cr + }(), + initImage: "init:latest", + verify: func(t *testing.T, cr *apiv1.PerconaServerMySQL) { + sts := StatefulSet(cr, "init:latest", "") + for _, v := range sts.Spec.Template.Spec.Volumes { + assert.NotEqual(t, tlsVolumeName, v.Name, "tls volume should be absent when ssl is disabled") + } + container := sts.Spec.Template.Spec.Containers[0] + for _, m := range container.VolumeMounts { + assert.NotEqual(t, tlsVolumeName, m.Name, "tls volume mount should be absent when ssl is disabled") + } + }, + }, + "creds volume uses internal secret name": { + cr: newTestCR("mycluster", "ns"), + initImage: "init:latest", + verify: func(t *testing.T, cr *apiv1.PerconaServerMySQL) { + sts := StatefulSet(cr, "init:latest", "") + for _, v := range sts.Spec.Template.Spec.Volumes { + if v.Name == credsVolumeName { + assert.Equal(t, cr.InternalSecretName(), v.Secret.SecretName) + return + } + } + t.Error("creds volume not found") + }, + }, + "tls volume uses ssl secret name": { + cr: newTestCR("mycluster", "ns"), + initImage: "init:latest", + verify: func(t *testing.T, cr *apiv1.PerconaServerMySQL) { + sts := StatefulSet(cr, "init:latest", "") + for _, v := range sts.Spec.Template.Spec.Volumes { + if v.Name == tlsVolumeName { + assert.Equal(t, "mycluster-ssl", v.Secret.SecretName) + return + } + } + t.Error("tls volume not found") + }, + }, + "storage volume uses s3 credentials secret": { + cr: func() *apiv1.PerconaServerMySQL { + cr := newTestCR("cluster", "ns") + cr.Spec.Backup.PiTR.BinlogServer.Storage.S3.CredentialsSecret = "my-s3-creds" + return cr + }(), + initImage: "init:latest", + verify: func(t *testing.T, cr *apiv1.PerconaServerMySQL) { + sts := StatefulSet(cr, "init:latest", "") + for _, v := range sts.Spec.Template.Spec.Volumes { + if v.Name == storageCredsVolumeName { + assert.Equal(t, "my-s3-creds", v.Secret.SecretName) + return + } + } + t.Error("storage volume not found") + }, + }, + "config volume references config secret": { + cr: newTestCR("cluster", "ns"), + initImage: "init:latest", + verify: func(t *testing.T, cr *apiv1.PerconaServerMySQL) { + sts := StatefulSet(cr, "init:latest", "") + for _, v := range sts.Spec.Template.Spec.Volumes { + if v.Name == configVolumeName { + assert.NotNil(t, v.Projected) + var hasConfigSecret bool + for _, src := range v.Projected.Sources { + if src.Secret != nil && src.Secret.Name == ConfigSecretName(cr) { + hasConfigSecret = true + } + } + assert.True(t, hasConfigSecret, "config volume should reference config secret") + return + } + } + t.Error("config volume not found") + }, + }, + "global annotations propagated to statefulset": { + cr: func() *apiv1.PerconaServerMySQL { + cr := newTestCR("cluster", "ns") + cr.Spec.Metadata = &apiv1.Metadata{ + Annotations: map[string]string{"team": "dba"}, + } + return cr + }(), + initImage: "init:latest", + verify: func(t *testing.T, cr *apiv1.PerconaServerMySQL) { + sts := StatefulSet(cr, "init:latest", "") + assert.Equal(t, "dba", sts.Annotations["team"]) + }, + }, + "global labels propagated to statefulset": { + cr: func() *apiv1.PerconaServerMySQL { + cr := newTestCR("cluster", "ns") + cr.Spec.Metadata = &apiv1.Metadata{ + Labels: map[string]string{"env": "prod"}, + } + return cr + }(), + initImage: "init:latest", + verify: func(t *testing.T, cr *apiv1.PerconaServerMySQL) { + sts := StatefulSet(cr, "init:latest", "") + assert.Equal(t, "prod", sts.Labels["env"]) + }, + }, + "container volume mounts include all expected mounts when ssl enabled": { + cr: newTestCR("cluster", "ns"), + initImage: "init:latest", + verify: func(t *testing.T, cr *apiv1.PerconaServerMySQL) { + sts := StatefulSet(cr, "init:latest", "") + container := sts.Spec.Template.Spec.Containers[0] + mountNames := map[string]bool{} + for _, m := range container.VolumeMounts { + mountNames[m.Name] = true + } + assert.True(t, mountNames[apiv1.BinVolumeName], "missing bin volume mount") + assert.True(t, mountNames[credsVolumeName], "missing creds volume mount") + assert.True(t, mountNames[tlsVolumeName], "missing tls volume mount") + assert.True(t, mountNames[configVolumeName], "missing config volume mount") + assert.True(t, mountNames[bufferVolumeName], "missing buffer volume mount") + }, + }, + "container env includes CONFIG_PATH and CUSTOM_CONFIG_PATH": { + cr: newTestCR("cluster", "ns"), + initImage: "init:latest", + verify: func(t *testing.T, cr *apiv1.PerconaServerMySQL) { + sts := StatefulSet(cr, "init:latest", "") + container := sts.Spec.Template.Spec.Containers[0] + envMap := make(map[string]string) + for _, e := range container.Env { + envMap[e.Name] = e.Value + } + assert.Contains(t, envMap, "CONFIG_PATH") + assert.Contains(t, envMap, "CUSTOM_CONFIG_PATH") + }, + }, + "custom env vars from spec are appended": { + cr: func() *apiv1.PerconaServerMySQL { + cr := newTestCR("cluster", "ns") + cr.Spec.Backup.PiTR.BinlogServer.Env = []corev1.EnvVar{ + {Name: "MY_CUSTOM_VAR", Value: "custom-value"}, + } + return cr + }(), + initImage: "init:latest", + verify: func(t *testing.T, cr *apiv1.PerconaServerMySQL) { + sts := StatefulSet(cr, "init:latest", "") + container := sts.Spec.Template.Spec.Containers[0] + envMap := make(map[string]string) + for _, e := range container.Env { + envMap[e.Name] = e.Value + } + assert.Equal(t, "custom-value", envMap["MY_CUSTOM_VAR"]) + }, + }, + } + + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + tt.verify(t, tt.cr) + }) + } +} diff --git a/pkg/binlogserver/config.go b/pkg/binlogserver/config.go index 0042bd532..f0cb65739 100644 --- a/pkg/binlogserver/config.go +++ b/pkg/binlogserver/config.go @@ -12,23 +12,59 @@ type Logger struct { File string `json:"file,omitempty"` } +type ConnectionSSL struct { + Mode string `json:"mode,omitempty"` + CA string `json:"ca,omitempty"` + CAPath string `json:"capath,omitempty"` + CRL string `json:"crl,omitempty"` + CRLPath string `json:"crlpath,omitempty"` + Cert string `json:"cert,omitempty"` + Key string `json:"key,omitempty"` + Cipher string `json:"cipher,omitempty"` +} + +type ConnectionTLS struct { + CipherSuites string `json:"ciphersuites,omitempty"` + Version string `json:"version,omitempty"` +} + type Connection struct { - Host string `json:"host,omitempty"` - Port int32 `json:"port,omitempty"` - User string `json:"user,omitempty"` - Password string `json:"password,omitempty"` - ConnectTimeout int32 `json:"connect_timeout,omitempty"` - ReadTimeout int32 `json:"read_timeout,omitempty"` - WriteTimeout int32 `json:"write_timeout,omitempty"` + Host string `json:"host,omitempty"` + Port int32 `json:"port,omitempty"` + User string `json:"user,omitempty"` + Password string `json:"password,omitempty"` + ConnectTimeout int32 `json:"connect_timeout,omitempty"` + ReadTimeout int32 `json:"read_timeout,omitempty"` + WriteTimeout int32 `json:"write_timeout,omitempty"` + SSL *ConnectionSSL `json:"ssl,omitempty"` + TLS *ConnectionTLS `json:"tls,omitempty"` +} + +type ReplicationMode string + +const ( + ReplicationModeGTID ReplicationMode = "gtid" +) + +type Rewrite struct { + BaseFileName string `json:"base_file_name,omitempty"` + FileSize string `json:"file_size,omitempty"` } type Replication struct { - ServerID int32 `json:"server_id,omitempty"` - IdleTime int32 `json:"idle_time,omitempty"` + ServerID int32 `json:"server_id,omitempty"` + IdleTime int32 `json:"idle_time,omitempty"` + VerifyChecksum bool `json:"verify_checksum,omitempty"` + Mode ReplicationMode `json:"mode,omitempty"` + Rewrite Rewrite `json:"rewrite,omitempty"` } type Storage struct { - URI string `json:"uri,omitempty"` + Backend string `json:"backend,omitempty"` + URI string `json:"uri,omitempty"` + FsBufferDirectory string `json:"fs_buffer_directory,omitempty"` + CheckpointSize string `json:"checkpoint_size,omitempty"` + CheckpointInterval string `json:"checkpoint_interval,omitempty"` } type Configuration struct { diff --git a/pkg/binlogserver/search.go b/pkg/binlogserver/search.go new file mode 100644 index 000000000..2db2e1fc6 --- /dev/null +++ b/pkg/binlogserver/search.go @@ -0,0 +1,83 @@ +package binlogserver + +import ( + "bytes" + "context" + "encoding/json" + "path" + + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + apiv1 "github.com/percona/percona-server-mysql-operator/api/v1" + "github.com/percona/percona-server-mysql-operator/pkg/clientcmd" + "github.com/percona/percona-server-mysql-operator/pkg/k8s" +) + +const binlogServerBinary = "/usr/bin/binlog_server" + +type SearchResponse struct { + Version int `json:"version"` + Status string `json:"status"` + Result []BinlogEntry `json:"result"` +} + +type BinlogEntry struct { + Name string `json:"name"` + Size int64 `json:"size"` + URI string `json:"uri"` + PreviousGTIDs string `json:"previous_gtids"` + AddedGTIDs string `json:"added_gtids"` + MinTimestamp string `json:"min_timestamp"` + MaxTimestamp string `json:"max_timestamp"` +} + +func SearchByGTID(ctx context.Context, cl client.Client, cliCmd clientcmd.Client, cr *apiv1.PerconaServerMySQL, gtidSet string) (*SearchResponse, error) { + return execSearch(ctx, cl, cliCmd, cr, "search_by_gtid_set", gtidSet) +} + +func SearchByTimestamp(ctx context.Context, cl client.Client, cliCmd clientcmd.Client, cr *apiv1.PerconaServerMySQL, timestamp string) (*SearchResponse, error) { + return execSearch(ctx, cl, cliCmd, cr, "search_by_timestamp", timestamp) +} + +func execSearch(ctx context.Context, cl client.Client, cliCmd clientcmd.Client, cr *apiv1.PerconaServerMySQL, subcommand, arg string) (*SearchResponse, error) { + pod, err := getBinlogServerPod(ctx, cl, cr) + if err != nil { + return nil, errors.Wrap(err, "get binlog server pod") + } + + configPath := path.Join(configMountPath, ConfigKey) + cmd := []string{binlogServerBinary, subcommand, configPath, arg} + + var stdout, stderr bytes.Buffer + if err := cliCmd.Exec(ctx, pod, AppName, cmd, nil, &stdout, &stderr, false); err != nil { + return nil, errors.Wrapf(err, "exec binlog_server %s: stdout: %s stderr: %s", subcommand, stdout.String(), stderr.String()) + } + + var resp SearchResponse + if err := json.Unmarshal(stdout.Bytes(), &resp); err != nil { + return nil, errors.Wrapf(err, "unmarshal response: %s", stdout.String()) + } + + return &resp, nil +} + +func getBinlogServerPod(ctx context.Context, cl client.Client, cr *apiv1.PerconaServerMySQL) (*corev1.Pod, error) { + nn := types.NamespacedName{ + Namespace: cr.Namespace, + Name: Name(cr) + "-0", + } + + pod := &corev1.Pod{} + if err := cl.Get(ctx, nn, pod); err != nil { + return nil, errors.Wrapf(err, "get pod %s", nn) + } + + if !k8s.IsPodReady(*pod) { + return nil, errors.Errorf("binlog server pod %s is not ready", nn) + } + + return pod, nil +} diff --git a/pkg/binlogserver/search_test.go b/pkg/binlogserver/search_test.go new file mode 100644 index 000000000..6e6262c6e --- /dev/null +++ b/pkg/binlogserver/search_test.go @@ -0,0 +1,235 @@ +package binlogserver + +import ( + "context" + "encoding/json" + "fmt" + "io" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + restclient "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + apiv1 "github.com/percona/percona-server-mysql-operator/api/v1" + "github.com/percona/percona-server-mysql-operator/pkg/clientcmd" +) + +type fakeExecClient struct { + response *SearchResponse + execErr error + capturedCmd []string +} + +var _ clientcmd.Client = (*fakeExecClient)(nil) + +func (f *fakeExecClient) Exec(_ context.Context, _ *corev1.Pod, _ string, cmd []string, _ io.Reader, stdout, _ io.Writer, _ bool) error { + f.capturedCmd = cmd + if f.execErr != nil { + return f.execErr + } + if stdout != nil && f.response != nil { + data, _ := json.Marshal(f.response) + _, _ = stdout.Write(data) + } + return nil +} + +func (f *fakeExecClient) REST() restclient.Interface { + return nil +} + +func newReadyBinlogServerPod(cr *apiv1.PerconaServerMySQL) *corev1.Pod { + return &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: Name(cr) + "-0", + Namespace: cr.Namespace, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + Conditions: []corev1.PodCondition{ + { + Type: corev1.ContainersReady, + Status: corev1.ConditionTrue, + }, + }, + }, + } +} + +func newSearchTestClient(t *testing.T, pod *corev1.Pod) *fake.ClientBuilder { + t.Helper() + scheme := runtime.NewScheme() + require.NoError(t, clientgoscheme.AddToScheme(scheme)) + require.NoError(t, apiv1.AddToScheme(scheme)) + cb := fake.NewClientBuilder().WithScheme(scheme) + if pod != nil { + cb = cb.WithObjects(pod) + } + return cb +} + +func TestSearchByGTID(t *testing.T) { + cr := newTestCR("my-cluster", "test-ns") + + successResponse := &SearchResponse{ + Version: 1, + Status: "OK", + Result: []BinlogEntry{ + { + Name: "binlog.000001", + PreviousGTIDs: "00000000-0000-0000-0000-000000000000:1-10", + AddedGTIDs: "00000000-0000-0000-0000-000000000000:11", + }, + }, + } + + tests := map[string]struct { + pod *corev1.Pod + cliCmd clientcmd.Client + gtidSet string + expectedResponse *SearchResponse + expectedError string + }{ + "success": { + pod: newReadyBinlogServerPod(cr), + cliCmd: &fakeExecClient{response: successResponse}, + gtidSet: "00000000-0000-0000-0000-000000000000:1-10", + expectedResponse: successResponse, + }, + "pod not found": { + cliCmd: &fakeExecClient{}, + gtidSet: "some-gtid", + expectedError: "get binlog server pod", + }, + "pod not ready": { + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: Name(cr) + "-0", + Namespace: cr.Namespace, + }, + Status: corev1.PodStatus{Phase: corev1.PodPending}, + }, + cliCmd: &fakeExecClient{}, + gtidSet: "some-gtid", + expectedError: "is not ready", + }, + "exec error": { + pod: newReadyBinlogServerPod(cr), + cliCmd: &fakeExecClient{execErr: fmt.Errorf("exec failed")}, + gtidSet: "some-gtid", + expectedError: "exec binlog_server search_by_gtid_set", + }, + "invalid json response": { + pod: newReadyBinlogServerPod(cr), + cliCmd: &fakeExecClient{response: nil}, + gtidSet: "some-gtid", + expectedError: "unmarshal response", + }, + } + + configPath := configMountPath + "/" + ConfigKey + + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + cl := newSearchTestClient(t, tt.pod).Build() + + execClient, _ := tt.cliCmd.(*fakeExecClient) + resp, err := SearchByGTID(t.Context(), cl, tt.cliCmd, cr, tt.gtidSet) + if tt.expectedError != "" { + require.Error(t, err) + assert.Contains(t, err.Error(), tt.expectedError) + assert.Nil(t, resp) + return + } + require.NoError(t, err) + assert.Equal(t, tt.expectedResponse, resp) + assert.Equal(t, []string{binlogServerBinary, "search_by_gtid_set", configPath, tt.gtidSet}, execClient.capturedCmd) + }) + } +} + +func TestSearchByTimestamp(t *testing.T) { + cr := newTestCR("my-cluster", "test-ns") + + successResponse := &SearchResponse{ + Version: 1, + Status: "OK", + Result: []BinlogEntry{ + { + Name: "binlog.000002", + MinTimestamp: "2024-01-01 00:00:00", + MaxTimestamp: "2024-01-01 01:00:00", + }, + }, + } + + tests := map[string]struct { + pod *corev1.Pod + cliCmd clientcmd.Client + timestamp string + expectedResponse *SearchResponse + expectedError string + }{ + "success": { + pod: newReadyBinlogServerPod(cr), + cliCmd: &fakeExecClient{response: successResponse}, + timestamp: "2024-01-01 00:30:00", + expectedResponse: successResponse, + }, + "pod not found": { + cliCmd: &fakeExecClient{}, + timestamp: "2024-01-01 00:30:00", + expectedError: "get binlog server pod", + }, + "pod not ready": { + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: Name(cr) + "-0", + Namespace: cr.Namespace, + }, + Status: corev1.PodStatus{Phase: corev1.PodPending}, + }, + cliCmd: &fakeExecClient{}, + timestamp: "2024-01-01 00:30:00", + expectedError: "is not ready", + }, + "exec error": { + pod: newReadyBinlogServerPod(cr), + cliCmd: &fakeExecClient{execErr: fmt.Errorf("exec failed")}, + timestamp: "2024-01-01 00:30:00", + expectedError: "exec binlog_server search_by_timestamp", + }, + "invalid json response": { + pod: newReadyBinlogServerPod(cr), + cliCmd: &fakeExecClient{response: nil}, + timestamp: "2024-01-01 00:30:00", + expectedError: "unmarshal response", + }, + } + + configPath := configMountPath + "/" + ConfigKey + + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + cl := newSearchTestClient(t, tt.pod).Build() + + execClient, _ := tt.cliCmd.(*fakeExecClient) + resp, err := SearchByTimestamp(t.Context(), cl, tt.cliCmd, cr, tt.timestamp) + if tt.expectedError != "" { + require.Error(t, err) + assert.Contains(t, err.Error(), tt.expectedError) + assert.Nil(t, resp) + return + } + require.NoError(t, err) + assert.Equal(t, tt.expectedResponse, resp) + assert.Equal(t, []string{binlogServerBinary, "search_by_timestamp", configPath, tt.timestamp}, execClient.capturedCmd) + }) + } +} diff --git a/pkg/controller/ps/binlogserver_test.go b/pkg/controller/ps/binlogserver_test.go new file mode 100644 index 000000000..6257e3b8d --- /dev/null +++ b/pkg/controller/ps/binlogserver_test.go @@ -0,0 +1,59 @@ +package ps + +import ( + "path" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/percona/percona-server-mysql-operator/pkg/binlogserver" +) + +func TestBinlogServerSSLConfig(t *testing.T) { + tests := map[string]struct { + sslMode string + wantMode string + wantCerts bool + }{ + "disabled mode skips certificates": { + sslMode: "disabled", + wantMode: "disabled", + wantCerts: false, + }, + "verify_identity includes certificates": { + sslMode: "verify_identity", + wantMode: "verify_identity", + wantCerts: true, + }, + "verify_ca includes certificates": { + sslMode: "verify_ca", + wantMode: "verify_ca", + wantCerts: true, + }, + "required includes certificates": { + sslMode: "required", + wantMode: "required", + wantCerts: true, + }, + } + + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + got := binlogServerSSLConfig(tt.sslMode) + require.NotNil(t, got) + + assert.Equal(t, tt.wantMode, got.Mode) + + if tt.wantCerts { + assert.Equal(t, path.Join(binlogserver.TLSMountPath, "ca.crt"), got.CA) + assert.Equal(t, path.Join(binlogserver.TLSMountPath, "tls.crt"), got.Cert) + assert.Equal(t, path.Join(binlogserver.TLSMountPath, "tls.key"), got.Key) + } else { + assert.Empty(t, got.CA) + assert.Empty(t, got.Cert) + assert.Empty(t, got.Key) + } + }) + } +} \ No newline at end of file diff --git a/pkg/controller/ps/controller.go b/pkg/controller/ps/controller.go index 0891b5669..f726668ec 100644 --- a/pkg/controller/ps/controller.go +++ b/pkg/controller/ps/controller.go @@ -21,6 +21,8 @@ import ( "crypto/md5" "encoding/json" "fmt" + "net/url" + "path" "slices" "strconv" "strings" @@ -825,7 +827,7 @@ func (r *PerconaServerMySQLReconciler) reconcileHAProxy(ctx context.Context, cr return errors.Wrapf(err, "check if pod %s ready", nn.String()) } - if !firstMySQLPodReady { + if !firstMySQLPodReady && !cr.Spec.Pause { log.V(1).Info("Waiting for pod to be ready", "pod", nn.Name) return nil } @@ -984,7 +986,7 @@ func (r *PerconaServerMySQLReconciler) reconcileGroupReplication(ctx context.Con } func (r *PerconaServerMySQLReconciler) reconcileBootstrapStatus(ctx context.Context, cr *apiv1.PerconaServerMySQL) error { - log := logf.FromContext(ctx) + log := logf.FromContext(ctx).WithName("Bootstrap") if cr.Status.MySQL.Ready == 0 || cr.Status.MySQL.Ready != cr.Spec.MySQL.Size { log.V(1).Info("Waiting for all MySQL pods to be ready", "ready", cr.Status.MySQL.Ready, "expected", cr.Spec.MySQL.Size) @@ -1282,17 +1284,22 @@ func (r *PerconaServerMySQLReconciler) reconcileMySQLRouter(ctx context.Context, return nil } +func binlogServerSSLConfig(sslMode string) *binlogserver.ConnectionSSL { + ssl := &binlogserver.ConnectionSSL{Mode: sslMode} + if sslMode != "disabled" { + ssl.CA = path.Join(binlogserver.TLSMountPath, "ca.crt") + ssl.Cert = path.Join(binlogserver.TLSMountPath, "tls.crt") + ssl.Key = path.Join(binlogserver.TLSMountPath, "tls.key") + } + return ssl +} + func (r *PerconaServerMySQLReconciler) reconcileBinlogServer(ctx context.Context, cr *apiv1.PerconaServerMySQL) error { if !cr.Spec.Backup.PiTR.Enabled { return nil } - logger := logf.FromContext(ctx) - - if len(cr.Status.Host) == 0 { - logger.V(1).Info("Waiting for .status.host to be populated") - return nil - } + logger := logf.FromContext(ctx).WithName("BinlogServer") s3 := cr.Spec.Backup.PiTR.BinlogServer.Storage.S3 @@ -1314,9 +1321,9 @@ func (r *PerconaServerMySQLReconciler) reconcileBinlogServer(ctx context.Context accessKey := s3Secret.Data[secret.CredentialsAWSAccessKey] secretKey := s3Secret.Data[secret.CredentialsAWSSecretKey] - s3Uri := fmt.Sprintf("s3://%s:%s@%s.%s", accessKey, secretKey, s3.Bucket, s3.Region) - if len(s3.Prefix) > 0 { - s3Uri += fmt.Sprintf("/%s", s3.Prefix) + s3Uri, err := s3URI(*s3, accessKey, secretKey) + if err != nil { + return errors.Wrap(err, "get s3 uri") } replPass, err := k8s.UserPassword(ctx, r.Client, cr, apiv1.UserReplication) @@ -1334,26 +1341,42 @@ func (r *PerconaServerMySQLReconciler) reconcileBinlogServer(ctx context.Context } configSecret.Data = make(map[string][]byte) + verifyChecksum := true + if cr.Spec.Backup.PiTR.BinlogServer.VerifyChecksum != nil { + verifyChecksum = *cr.Spec.Backup.PiTR.BinlogServer.VerifyChecksum + } + config := binlogserver.Configuration{ Logger: binlogserver.Logger{ Level: "debug", File: "/dev/stdout", }, Connection: binlogserver.Connection{ - Host: mysql.FQDN(cr, 0), + Host: fmt.Sprintf("%s.%s", mysql.PrimaryServiceName(cr), cr.Namespace), Port: 3306, User: string(apiv1.UserReplication), Password: replPass, ConnectTimeout: cr.Spec.Backup.PiTR.BinlogServer.ConnectTimeout, WriteTimeout: cr.Spec.Backup.PiTR.BinlogServer.WriteTimeout, ReadTimeout: cr.Spec.Backup.PiTR.BinlogServer.ReadTimeout, + SSL: binlogServerSSLConfig(cr.Spec.Backup.PiTR.BinlogServer.SSLMode), }, Replication: binlogserver.Replication{ - ServerID: cr.Spec.Backup.PiTR.BinlogServer.ServerID, - IdleTime: cr.Spec.Backup.PiTR.BinlogServer.IdleTime, + Mode: binlogserver.ReplicationModeGTID, + ServerID: cr.Spec.Backup.PiTR.BinlogServer.ServerID, + IdleTime: cr.Spec.Backup.PiTR.BinlogServer.IdleTime, + VerifyChecksum: verifyChecksum, + Rewrite: binlogserver.Rewrite{ + BaseFileName: "binlog", + FileSize: cr.Spec.Backup.PiTR.BinlogServer.RewriteFileSize, + }, }, Storage: binlogserver.Storage{ - URI: s3Uri, + Backend: "s3", + URI: s3Uri, + CheckpointSize: cr.Spec.Backup.PiTR.BinlogServer.CheckpointSize, + CheckpointInterval: cr.Spec.Backup.PiTR.BinlogServer.CheckpointInterval, + FsBufferDirectory: binlogserver.BufferMountPath, }, } @@ -1367,6 +1390,16 @@ func (r *PerconaServerMySQLReconciler) reconcileBinlogServer(ctx context.Context return errors.Wrap(err, "reconcile secret") } + if cr.Status.MySQL.Ready < 1 { + logger.V(1).Info("Waiting for at least one MySQL pod to be ready") + return nil + } + + if len(cr.Status.Host) == 0 { + logger.V(1).Info("Waiting for .status.host to be populated") + return nil + } + initImage, err := k8s.InitImage(ctx, r.Client, cr, &cr.Spec.Backup.PiTR.BinlogServer.PodSpec) if err != nil { return errors.Wrap(err, "get init image") @@ -1380,6 +1413,32 @@ func (r *PerconaServerMySQLReconciler) reconcileBinlogServer(ctx context.Context return nil } +func (r *PerconaServerMySQLReconciler) cleanupBinlogServer(ctx context.Context, cr *apiv1.PerconaServerMySQL) error { + if cr.Spec.Backup.PiTR.Enabled { + return nil + } + + if err := r.Delete(ctx, &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: binlogserver.Name(cr), + Namespace: cr.Namespace, + }, + }); err != nil && !k8serrors.IsNotFound(err) { + return errors.Wrap(err, "failed to delete binlog server statefulset") + } + + if err := r.Delete(ctx, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: binlogserver.ConfigSecretName(cr), + Namespace: cr.Namespace, + }, + }); err != nil && !k8serrors.IsNotFound(err) { + return errors.Wrap(err, "failed to delete binlog server config secret") + } + + return nil +} + func (r *PerconaServerMySQLReconciler) cleanupOutdated(ctx context.Context, cr *apiv1.PerconaServerMySQL) error { if err := r.cleanupMysql(ctx, cr); err != nil { return errors.Wrap(err, "cleanup mysql") @@ -1393,6 +1452,10 @@ func (r *PerconaServerMySQLReconciler) cleanupOutdated(ctx context.Context, cr * return errors.Wrap(err, "cleanup proxies") } + if err := r.cleanupBinlogServer(ctx, cr); err != nil { + return errors.Wrap(err, "cleanup binlog server") + } + return nil } @@ -1630,3 +1693,25 @@ func getPodIndexFromHostname(hostname string) (int, error) { return idx, nil } + +func s3URI(s3 apiv1.BackupStorageS3Spec, accessKey, secretKey []byte) (string, error) { + bucket := string(s3.Bucket) + if len(s3.Region) > 0 { + bucket = fmt.Sprintf("%s.%s", s3.Bucket, s3.Region) + } + encodedAccessKey := url.QueryEscape(string(accessKey)) + encodedSecretKey := url.QueryEscape(string(secretKey)) + uri := fmt.Sprintf("s3://%s:%s@%s", encodedAccessKey, encodedSecretKey, bucket) + if len(s3.EndpointURL) != 0 { + protocol, host, err := parseEndpointURL(s3.EndpointURL) + if err != nil { + return "", errors.Wrap(err, "parse endpoint URL") + } + uri = fmt.Sprintf("%s://%s:%s@%s/%s", protocol, encodedAccessKey, encodedSecretKey, host, s3.Bucket) + } + if len(s3.Prefix) > 0 { + uri += fmt.Sprintf("/%s", s3.Prefix) + } + + return uri, nil +} diff --git a/pkg/controller/ps/controller_test.go b/pkg/controller/ps/controller_test.go index 7ebe70c17..dd93cae59 100644 --- a/pkg/controller/ps/controller_test.go +++ b/pkg/controller/ps/controller_test.go @@ -18,6 +18,7 @@ package ps import ( "context" + "encoding/json" "fmt" "strconv" "strings" @@ -26,6 +27,8 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" gs "github.com/onsi/gomega/gstruct" + "github.com/percona/percona-server-mysql-operator/pkg/binlogserver" + "github.com/percona/percona-server-mysql-operator/pkg/secret" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" policyv1 "k8s.io/api/policy/v1" @@ -593,10 +596,8 @@ var _ = Describe("CR validations", Ordered, func() { It("should fail the creation of cr", func() { err := k8sClient.Create(ctx, cr) Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("spec.mysql.size: Required value")) - Expect(err.Error()).To(ContainSubstring("spec.proxy.haproxy.size: Required value")) - Expect(err.Error()).To(ContainSubstring("spec.proxy.router.size")) - Expect(err.Error()).To(ContainSubstring("spec.orchestrator.size")) + Expect(err.Error()).To(ContainSubstring("mysql.image is required")) + Expect(err.Error()).To(ContainSubstring("mysql.size must be greater than 0")) }) }) When("group-replication cluster", Ordered, func() { @@ -614,6 +615,7 @@ var _ = Describe("CR validations", Ordered, func() { cr.Spec.Proxy.Router.Enabled = false cr.Spec.MySQL.Image = "mysql-image" + cr.Spec.Toolkit.Image = "toolkit-image" cr.Spec.Proxy.HAProxy.Image = "haproxy-image" cr.Spec.Orchestrator.Image = "orc-image" @@ -947,6 +949,189 @@ var _ = Describe("CR validations", Ordered, func() { Expect(createErr.Error()).To(ContainSubstring("Invalid configuration: MySQL Router and HAProxy can't be enabled at the same time")) }) }) + + When("component image/size is missing but component is disabled", Ordered, func() { + cr, err := readDefaultCR("cr-validations-comp-disabled", ns) + Expect(err).NotTo(HaveOccurred()) + + cr.Spec.MySQL.ClusterType = psv1.ClusterTypeGR + cr.Spec.Proxy.HAProxy.Enabled = true + cr.Spec.Proxy.Router.Enabled = false + cr.Spec.Proxy.Router.Image = "" + cr.Spec.Proxy.Router.Size = 0 + cr.Spec.Orchestrator.Enabled = false + cr.Spec.Orchestrator.Image = "" + cr.Spec.Orchestrator.Size = 0 + It("should create the cluster successfully", func() { + Expect(k8sClient.Create(ctx, cr)).Should(Succeed()) + }) + }) + + When("haproxy is enabled but image is missing", Ordered, func() { + cr, err := readDefaultCR("cr-validations-haproxy-no-image", ns) + Expect(err).NotTo(HaveOccurred()) + + cr.Spec.Proxy.HAProxy.Enabled = true + cr.Spec.Proxy.HAProxy.Image = "" + It("should fail with image required error", func() { + createErr := k8sClient.Create(ctx, cr) + Expect(createErr).To(HaveOccurred()) + Expect(createErr.Error()).To(ContainSubstring("haproxy.image is required when haproxy is enabled")) + }) + }) + + When("haproxy is enabled but size is 0", Ordered, func() { + cr, err := readDefaultCR("cr-validations-haproxy-no-size", ns) + Expect(err).NotTo(HaveOccurred()) + + cr.Spec.Proxy.HAProxy.Enabled = true + cr.Spec.Proxy.HAProxy.Size = 0 + It("should fail with size required error", func() { + createErr := k8sClient.Create(ctx, cr) + Expect(createErr).To(HaveOccurred()) + Expect(createErr.Error()).To(ContainSubstring("haproxy.size must be greater than 0 when haproxy is enabled")) + }) + }) + + When("orchestrator is enabled but image is missing", Ordered, func() { + cr, err := readDefaultCR("cr-validations-orc-no-image", ns) + Expect(err).NotTo(HaveOccurred()) + + cr.Spec.MySQL.ClusterType = psv1.ClusterTypeAsync + cr.Spec.Orchestrator.Enabled = true + cr.Spec.Orchestrator.Image = "" + It("should fail with image required error", func() { + createErr := k8sClient.Create(ctx, cr) + Expect(createErr).To(HaveOccurred()) + Expect(createErr.Error()).To(ContainSubstring("orchestrator.image is required when orchestrator is enabled")) + }) + }) + + When("router is enabled but image is missing", Ordered, func() { + cr, err := readDefaultCR("cr-validations-router-no-image", ns) + Expect(err).NotTo(HaveOccurred()) + + cr.Spec.MySQL.ClusterType = psv1.ClusterTypeGR + cr.Spec.Proxy.Router.Enabled = true + cr.Spec.Proxy.Router.Image = "" + It("should fail with image required error", func() { + createErr := k8sClient.Create(ctx, cr) + Expect(createErr).To(HaveOccurred()) + Expect(createErr.Error()).To(ContainSubstring("router.image is required when router is enabled")) + }) + }) + }) + + Context("PITR validation rules", Ordered, func() { + ns := "validate-pitr" + + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: ns, + }, + } + + BeforeAll(func() { + By("Creating the Namespace to perform the tests") + err := k8sClient.Create(ctx, namespace) + Expect(err).To(Not(HaveOccurred())) + }) + + AfterAll(func() { + By("Deleting the Namespace") + _ = k8sClient.Delete(ctx, namespace) + }) + + When("pitr is disabled, no binlogServer required", Ordered, func() { + cr, err := readDefaultCR("pitr-disabled-no-binlog", ns) + Expect(err).NotTo(HaveOccurred()) + + cr.Spec.Backup.PiTR.Enabled = false + It("should create successfully without any binlogServer fields", func() { + Expect(k8sClient.Create(ctx, cr)).Should(Succeed()) + }) + }) + + When("pitr is disabled, binlogServer provided without image and size", Ordered, func() { + cr, err := readDefaultCR("pitr-disabled-empty-binlog", ns) + Expect(err).NotTo(HaveOccurred()) + + cr.Spec.Backup.PiTR.Enabled = false + cr.Spec.Backup.PiTR.BinlogServer = &psv1.BinlogServerSpec{} + It("should create successfully since pitr is disabled", func() { + Expect(k8sClient.Create(ctx, cr)).Should(Succeed()) + }) + }) + + When("pitr is enabled but binlogServer is missing", Ordered, func() { + cr, err := readDefaultCR("pitr-enabled-no-binlog", ns) + Expect(err).NotTo(HaveOccurred()) + + cr.Spec.Backup.PiTR.Enabled = true + cr.Spec.Backup.PiTR.BinlogServer = nil + It("should fail with binlogServer required error", func() { + createErr := k8sClient.Create(ctx, cr) + Expect(createErr).To(HaveOccurred()) + Expect(createErr.Error()).To(ContainSubstring("binlogServer is required when pitr is enabled")) + }) + }) + + When("pitr is enabled but binlogServer image is missing", Ordered, func() { + cr, err := readDefaultCR("pitr-enabled-no-image", ns) + Expect(err).NotTo(HaveOccurred()) + + cr.Spec.Backup.PiTR.Enabled = true + cr.Spec.Backup.PiTR.BinlogServer = &psv1.BinlogServerSpec{} + cr.Spec.Backup.PiTR.BinlogServer.Size = 1 + It("should fail with image required error", func() { + createErr := k8sClient.Create(ctx, cr) + Expect(createErr).To(HaveOccurred()) + Expect(createErr.Error()).To(ContainSubstring("binlogServer.image is required when pitr is enabled")) + }) + }) + + When("pitr is enabled but binlogServer size is 0", Ordered, func() { + cr, err := readDefaultCR("pitr-enabled-no-size", ns) + Expect(err).NotTo(HaveOccurred()) + + cr.Spec.Backup.PiTR.Enabled = true + cr.Spec.Backup.PiTR.BinlogServer = &psv1.BinlogServerSpec{} + cr.Spec.Backup.PiTR.BinlogServer.Image = "binlog-server-image" + It("should fail with size required error", func() { + createErr := k8sClient.Create(ctx, cr) + Expect(createErr).To(HaveOccurred()) + Expect(createErr.Error()).To(ContainSubstring("binlogServer.size is required when pitr is enabled")) + }) + }) + + When("pitr is enabled but binlogServer serverId is 0", Ordered, func() { + cr, err := readDefaultCR("pitr-enabled-no-serverid", ns) + Expect(err).NotTo(HaveOccurred()) + + cr.Spec.Backup.PiTR.Enabled = true + cr.Spec.Backup.PiTR.BinlogServer = &psv1.BinlogServerSpec{} + cr.Spec.Backup.PiTR.BinlogServer.Image = "binlog-server-image" + cr.Spec.Backup.PiTR.BinlogServer.Size = 1 + It("should fail with serverId required error", func() { + createErr := k8sClient.Create(ctx, cr) + Expect(createErr).To(HaveOccurred()) + Expect(createErr.Error()).To(ContainSubstring("binlogServer.serverId is required when pitr is enabled")) + }) + }) + + When("pitr is enabled with all required fields set", Ordered, func() { + cr, err := readDefaultCR("pitr-enabled-valid", ns) + Expect(err).NotTo(HaveOccurred()) + + cr.Spec.Backup.PiTR.Enabled = true + cr.Spec.Backup.PiTR.BinlogServer = &psv1.BinlogServerSpec{} + cr.Spec.Backup.PiTR.BinlogServer.Image = "binlog-server-image" + cr.Spec.Backup.PiTR.BinlogServer.Size = 1 + cr.Spec.Backup.PiTR.BinlogServer.ServerID = 100 + It("should create successfully", func() { + Expect(k8sClient.Create(ctx, cr)).Should(Succeed()) + }) + }) }) }) @@ -989,7 +1174,7 @@ var _ = Describe("Reconcile Binlog Server", Ordered, func() { S3: &psv1.BackupStorageS3Spec{ Bucket: "s3-test-bucket", Region: "us-west-1", - EndpointURL: "s3.amazonaws.com", + EndpointURL: "https://s3.amazonaws.com", CredentialsSecret: "s3-test-credentials", }, }, @@ -1021,6 +1206,14 @@ var _ = Describe("Reconcile Binlog Server", Ordered, func() { Expect(err).NotTo(HaveOccurred()) }) + It("should set MySQL status as ready", func() { + fetchedCR := cr.DeepCopy() + Expect(k8sClient.Get(ctx, crNamespacedName, fetchedCR)).Should(Succeed()) + fetchedCR.Status.MySQL.Ready = 1 + fetchedCR.Status.Host = mysql.FQDN(fetchedCR, 0) + Expect(k8sClient.Status().Update(ctx, fetchedCR)).Should(Succeed()) + }) + It("should create secret for Binlog Server configuration", func() { _, err = reconciler().Reconcile(ctx, ctrl.Request{NamespacedName: crNamespacedName}) Expect(err).NotTo(HaveOccurred()) @@ -1926,3 +2119,122 @@ var _ = Describe("Global labels and annotations", Ordered, func() { }) }) }) + +var _ = Describe("BinlogServer", Ordered, func() { + ctx := context.Background() + + const crName = "pitr-test" + const ns = crName + crNamespacedName := types.NamespacedName{Name: crName, Namespace: ns} + + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: ns, + }, + } + + BeforeAll(func() { + By("Creating the Namespace") + Expect(k8sClient.Create(ctx, namespace)).To(Succeed()) + }) + + AfterAll(func() { + _ = k8sClient.Delete(ctx, namespace) + }) + + cr, err := readDefaultCR(crName, ns) + It("should read default cr.yaml", func() { + Expect(err).NotTo(HaveOccurred()) + }) + + It("should configure PiTR and create the CR", func() { + cr.Spec.Backup.PiTR.Enabled = true + cr.Spec.Backup.PiTR.BinlogServer = &psv1.BinlogServerSpec{ + Storage: psv1.BinlogServerStorageSpec{ + S3: &psv1.BackupStorageS3Spec{ + Bucket: "test-bucket", + Region: "us-east-1", + EndpointURL: "s3://s3.amazonaws.com", + CredentialsSecret: "s3-secret", + }, + }, + ServerID: 1, + PodSpec: psv1.PodSpec{ + Size: 1, + ContainerSpec: psv1.ContainerSpec{ + Image: "perconalab/percona-binlog-server:0.2.0", + }, + }, + } + Expect(k8sClient.Create(ctx, cr)).To(Succeed()) + }) + + It("should create the S3 credentials secret", func() { + s3Secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "s3-secret", + Namespace: ns, + }, + Data: map[string][]byte{ + secret.CredentialsAWSAccessKey: []byte("access-key"), + secret.CredentialsAWSSecretKey: []byte("secret-key"), + }, + } + Expect(k8sClient.Create(ctx, s3Secret)).To(Succeed()) + }) + + It("should create the internal secret with the replication user password", func() { + internalSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: cr.InternalSecretName(), + Namespace: ns, + }, + Data: map[string][]byte{ + string(psv1.UserReplication): []byte("repl-password"), + }, + } + Expect(k8sClient.Create(ctx, internalSecret)).To(Succeed()) + }) + + It("should set the binlog server connection host to the primary service", func() { + Expect(k8sClient.Get(ctx, crNamespacedName, cr)).To(Succeed()) + + Expect(reconciler().reconcileBinlogServer(ctx, cr)).To(Succeed()) + + configSecret := &corev1.Secret{} + Eventually(func() bool { + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: binlogserver.ConfigSecretName(cr), + Namespace: ns, + }, configSecret) + return err == nil + }, time.Second*15, time.Millisecond*250).Should(BeTrue()) + + var config binlogserver.Configuration + Expect(json.Unmarshal(configSecret.Data[binlogserver.ConfigKey], &config)).To(Succeed()) + + Expect(config.Connection.Host).To(Equal(fmt.Sprintf("%s.%s", mysql.PrimaryServiceName(cr), ns))) + }) + + It("should create the binlog server StatefulSet once MySQL is ready", func() { + Expect(k8sClient.Get(ctx, crNamespacedName, cr)).To(Succeed()) + + cr.Status.MySQL.Ready = 1 + cr.Status.Host = "pitr-test-haproxy.pitr-test" + Expect(k8sClient.Status().Update(ctx, cr)).To(Succeed()) + + Expect(k8sClient.Get(ctx, crNamespacedName, cr)).To(Succeed()) + Expect(reconciler().reconcileBinlogServer(ctx, cr)).To(Succeed()) + + sts := &appsv1.StatefulSet{} + Eventually(func() bool { + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: binlogserver.Name(cr), + Namespace: ns, + }, sts) + return err == nil + }, time.Second*15, time.Millisecond*250).Should(BeTrue()) + + Expect(sts.Spec.Replicas).To(gs.PointTo(BeEquivalentTo(1))) + }) +}) diff --git a/pkg/controller/ps/endpoint.go b/pkg/controller/ps/endpoint.go new file mode 100644 index 000000000..20f7e42ba --- /dev/null +++ b/pkg/controller/ps/endpoint.go @@ -0,0 +1,19 @@ +package ps + +import ( + "fmt" + "net/url" +) + +// parseEndpointURL extracts the protocol and host from an endpoint URL. +// Expected formats: "s3://s3.amazonaws.com", "https://minio-service:9000" +func parseEndpointURL(endpointURL string) (protocol, host string, err error) { + u, err := url.Parse(endpointURL) + if err != nil { + return "", "", fmt.Errorf("parse endpoint URL %q: %w", endpointURL, err) + } + if u.Host == "" { + return "", "", fmt.Errorf("endpoint URL %q must include protocol and host (e.g. s3://... or https://...)", endpointURL) + } + return u.Scheme, u.Host, nil +} diff --git a/pkg/controller/ps/endpoint_test.go b/pkg/controller/ps/endpoint_test.go new file mode 100644 index 000000000..9c553d29d --- /dev/null +++ b/pkg/controller/ps/endpoint_test.go @@ -0,0 +1,86 @@ +package ps + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestParseEndpointURL(t *testing.T) { + tests := []struct { + name string + input string + wantProtocol string + wantHost string + wantErr string + }{ + { + name: "https with host and port", + input: "https://minio-service:9000", + wantProtocol: "https", + wantHost: "minio-service:9000", + }, + { + name: "s3 scheme", + input: "s3://s3.amazonaws.com", + wantProtocol: "s3", + wantHost: "s3.amazonaws.com", + }, + { + name: "http scheme", + input: "http://localhost:9000", + wantProtocol: "http", + wantHost: "localhost:9000", + }, + { + name: "trailing slash is stripped", + input: "https://minio:9000/", + wantProtocol: "https", + wantHost: "minio:9000", + }, + { + name: "path after host is stripped", + input: "https://host/path/to/something", + wantProtocol: "https", + wantHost: "host", + }, + { + name: "host with port and path is stripped", + input: "https://minio:9000/bucket/prefix", + wantProtocol: "https", + wantHost: "minio:9000", + }, + { + name: "no scheme", + input: "minio-service:9000", + wantErr: "must include protocol and host", + }, + { + name: "empty string", + input: "", + wantErr: "must include protocol and host", + }, + { + name: "scheme only", + input: "https://", + wantErr: "must include protocol and host", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + protocol, host, err := parseEndpointURL(tc.input) + + if tc.wantErr != "" { + require.Error(t, err) + assert.Contains(t, err.Error(), tc.wantErr) + return + } + + require.NoError(t, err) + assert.Equal(t, tc.wantProtocol, protocol) + assert.Equal(t, tc.wantHost, host) + }) + } +} diff --git a/pkg/controller/ps/status.go b/pkg/controller/ps/status.go index 0e5a8bea6..028dd6beb 100644 --- a/pkg/controller/ps/status.go +++ b/pkg/controller/ps/status.go @@ -19,6 +19,7 @@ import ( logf "sigs.k8s.io/controller-runtime/pkg/log" apiv1 "github.com/percona/percona-server-mysql-operator/api/v1" + "github.com/percona/percona-server-mysql-operator/pkg/binlogserver" database "github.com/percona/percona-server-mysql-operator/pkg/db" "github.com/percona/percona-server-mysql-operator/pkg/haproxy" "github.com/percona/percona-server-mysql-operator/pkg/innodbcluster" @@ -138,6 +139,15 @@ func (r *PerconaServerMySQLReconciler) reconcileCRStatus(ctx context.Context, cr } status.HAProxy = haproxyStatus + binlogServerStatus := apiv1.StatefulAppStatus{} + if cr.Spec.Backup.PiTR.Enabled { + binlogServerStatus, err = r.appStatus(ctx, cr, binlogserver.Name(cr), 1, binlogserver.MatchLabels(cr), status.BinlogServer.Version) + if err != nil { + return errors.Wrap(err, "get binlog server status") + } + } + status.BinlogServer = binlogServerStatus + status.State = apiv1.StateReady if cr.Spec.MySQL.IsAsync() { if cr.OrchestratorEnabled() && status.Orchestrator.State != apiv1.StateReady { @@ -155,6 +165,10 @@ func (r *PerconaServerMySQLReconciler) reconcileCRStatus(ctx context.Context, cr } } + if cr.Spec.Backup.PiTR.Enabled && status.BinlogServer.State != apiv1.StateReady { + status.State = apiv1.StateInitializing + } + if status.MySQL.State != apiv1.StateReady { status.State = status.MySQL.State } diff --git a/pkg/controller/ps/status_test.go b/pkg/controller/ps/status_test.go index 8c2b379cf..ab83de3a5 100644 --- a/pkg/controller/ps/status_test.go +++ b/pkg/controller/ps/status_test.go @@ -15,6 +15,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/pkg/errors" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -28,6 +29,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" apiv1 "github.com/percona/percona-server-mysql-operator/api/v1" + "github.com/percona/percona-server-mysql-operator/pkg/binlogserver" "github.com/percona/percona-server-mysql-operator/pkg/clientcmd" "github.com/percona/percona-server-mysql-operator/pkg/haproxy" "github.com/percona/percona-server-mysql-operator/pkg/innodbcluster" @@ -1255,6 +1257,9 @@ func makeFakeReadyPods(cr *apiv1.PerconaServerMySQL, amount int, podType string) case "router": pod.Name = router.PodName(cr, i) pod.Labels = router.Labels(cr) + case "binlogserver": + pod.Name = fmt.Sprintf("%s-%d", binlogserver.Name(cr), i) + pod.Labels = binlogserver.MatchLabels(cr) } pod.Namespace = cr.Namespace pods = append(pods, pod) @@ -1262,6 +1267,183 @@ func makeFakeReadyPods(cr *apiv1.PerconaServerMySQL, amount int, podType string) return pods } +func TestReconcileStatusBinlogServer(t *testing.T) { + cr, err := readDefaultCR("ps-cluster1", "status-1") + require.NoError(t, err) + cr.Spec.MySQL.ClusterType = apiv1.ClusterTypeAsync + cr.Spec.UpdateStrategy = appsv1.OnDeleteStatefulSetStrategyType + cr.Spec.Backup.PiTR.Enabled = true + + scheme := runtime.NewScheme() + require.NoError(t, clientgoscheme.AddToScheme(scheme)) + require.NoError(t, apiv1.AddToScheme(scheme)) + + allReadyObjects := appendSlices( + makeFakeReadyPods(cr, 3, "mysql"), + makeFakeReadyPods(cr, 3, "haproxy"), + makeFakeReadyPods(cr, 3, "orchestrator"), + ) + + tests := map[string]struct { + cr *apiv1.PerconaServerMySQL + objects []client.Object + expected apiv1.PerconaServerMySQLStatus + }{ + "pitr enabled, binlog server pod not ready": { + cr: cr, + objects: allReadyObjects, + expected: apiv1.PerconaServerMySQLStatus{ + MySQL: apiv1.StatefulAppStatus{ + Size: 3, + Ready: 3, + State: apiv1.StateReady, + }, + Orchestrator: apiv1.StatefulAppStatus{ + Size: 3, + Ready: 3, + State: apiv1.StateReady, + }, + HAProxy: apiv1.StatefulAppStatus{ + Size: 3, + Ready: 3, + State: apiv1.StateReady, + }, + BinlogServer: apiv1.StatefulAppStatus{ + Size: 1, + State: apiv1.StateInitializing, + }, + State: apiv1.StateInitializing, + Host: cr.Name + "-haproxy." + cr.Namespace, + Conditions: []metav1.Condition{ + { + Type: apiv1.StateInitializing.String(), + Status: metav1.ConditionTrue, + Reason: apiv1.StateInitializing.String(), + }, + { + Type: apiv1.StateReady.String(), + Status: metav1.ConditionFalse, + Reason: apiv1.StateReady.String(), + }, + }, + }, + }, + "pitr enabled, binlog server pod ready": { + cr: cr, + objects: appendSlices( + allReadyObjects, + makeFakeReadyPods(cr, 1, "binlogserver"), + ), + expected: apiv1.PerconaServerMySQLStatus{ + MySQL: apiv1.StatefulAppStatus{ + Size: 3, + Ready: 3, + State: apiv1.StateReady, + }, + Orchestrator: apiv1.StatefulAppStatus{ + Size: 3, + Ready: 3, + State: apiv1.StateReady, + }, + HAProxy: apiv1.StatefulAppStatus{ + Size: 3, + Ready: 3, + State: apiv1.StateReady, + }, + BinlogServer: apiv1.StatefulAppStatus{ + Size: 1, + Ready: 1, + State: apiv1.StateReady, + }, + State: apiv1.StateReady, + Host: cr.Name + "-haproxy." + cr.Namespace, + Conditions: []metav1.Condition{ + { + Type: apiv1.StateInitializing.String(), + Status: metav1.ConditionFalse, + Reason: apiv1.StateInitializing.String(), + }, + { + Type: apiv1.StateReady.String(), + Status: metav1.ConditionTrue, + Reason: apiv1.StateReady.String(), + }, + }, + }, + }, + "pitr disabled, binlog server pod not ready does not affect cluster state": { + cr: updateResource(cr.DeepCopy(), func(cr *apiv1.PerconaServerMySQL) { + cr.Spec.Backup.PiTR.Enabled = false + }), + objects: allReadyObjects, + expected: apiv1.PerconaServerMySQLStatus{ + MySQL: apiv1.StatefulAppStatus{ + Size: 3, + Ready: 3, + State: apiv1.StateReady, + }, + Orchestrator: apiv1.StatefulAppStatus{ + Size: 3, + Ready: 3, + State: apiv1.StateReady, + }, + HAProxy: apiv1.StatefulAppStatus{ + Size: 3, + Ready: 3, + State: apiv1.StateReady, + }, + State: apiv1.StateReady, + Host: cr.Name + "-haproxy." + cr.Namespace, + Conditions: []metav1.Condition{ + { + Type: apiv1.StateInitializing.String(), + Status: metav1.ConditionFalse, + Reason: apiv1.StateInitializing.String(), + }, + { + Type: apiv1.StateReady.String(), + Status: metav1.ConditionTrue, + Reason: apiv1.StateReady.String(), + }, + }, + }, + }, + } + + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + cr := tt.cr.DeepCopy() + cb := fake.NewClientBuilder().WithScheme(scheme).WithObjects(cr).WithStatusSubresource(cr).WithObjects(tt.objects...).WithStatusSubresource(tt.objects...) + + cliCmd, err := getFakeOrchestratorClient(cr) + require.NoError(t, err) + + r := &PerconaServerMySQLReconciler{ + Client: cb.Build(), + Scheme: scheme, + ClientCmd: cliCmd, + Recorder: new(record.FakeRecorder), + ServerVersion: &platform.ServerVersion{ + Platform: platform.PlatformKubernetes, + }, + } + + cr = &apiv1.PerconaServerMySQL{ + ObjectMeta: metav1.ObjectMeta{ + Name: cr.Name, + Namespace: cr.Namespace, + }, + } + + require.NoError(t, r.reconcileCRStatus(t.Context(), cr, nil)) + require.NoError(t, r.Get(t.Context(), types.NamespacedName{Namespace: cr.Namespace, Name: cr.Name}, cr)) + + opt := cmpopts.IgnoreFields(metav1.Condition{}, "LastTransitionTime", "Message") + assert.Empty(t, cmp.Diff(cr.Status, tt.expected, opt)) + }) + } +} + func updateResource[T any](obj *T, updateFuncs ...func(obj *T)) *T { for _, f := range updateFuncs { f(obj) diff --git a/pkg/controller/ps/tls_test.go b/pkg/controller/ps/tls_test.go index 6450fbf99..6a86c129a 100644 --- a/pkg/controller/ps/tls_test.go +++ b/pkg/controller/ps/tls_test.go @@ -78,6 +78,9 @@ var _ = Describe("TLS secrets without cert-manager", Ordered, func() { "*.ps-cluster1-mysql", "*.ps-cluster1-mysql.tls-1", "*.ps-cluster1-mysql.tls-1.svc", + "ps-cluster1-mysql-primary", + "ps-cluster1-mysql-primary.tls-1", + "ps-cluster1-mysql-primary.tls-1.svc", "*.ps-cluster1-orchestrator", "*.ps-cluster1-orchestrator.tls-1", "*.ps-cluster1-orchestrator.tls-1.svc", @@ -126,6 +129,9 @@ var _ = Describe("TLS secrets without cert-manager", Ordered, func() { "*.ps-cluster1-mysql", "*.ps-cluster1-mysql.tls-1", "*.ps-cluster1-mysql.tls-1.svc", + "ps-cluster1-mysql-primary", + "ps-cluster1-mysql-primary.tls-1", + "ps-cluster1-mysql-primary.tls-1.svc", "*.ps-cluster1-orchestrator", "*.ps-cluster1-orchestrator.tls-1", "*.ps-cluster1-orchestrator.tls-1.svc", diff --git a/pkg/controller/psrestore/controller.go b/pkg/controller/psrestore/controller.go index 2d155a59b..5d76de8f7 100644 --- a/pkg/controller/psrestore/controller.go +++ b/pkg/controller/psrestore/controller.go @@ -18,7 +18,9 @@ package psrestore import ( "context" + "encoding/json" "fmt" + "strings" "sync" "time" @@ -34,13 +36,17 @@ import ( k8sretry "k8s.io/client-go/util/retry" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" logf "sigs.k8s.io/controller-runtime/pkg/log" apiv1 "github.com/percona/percona-server-mysql-operator/api/v1" + "github.com/percona/percona-server-mysql-operator/pkg/binlogserver" + "github.com/percona/percona-server-mysql-operator/pkg/clientcmd" "github.com/percona/percona-server-mysql-operator/pkg/haproxy" "github.com/percona/percona-server-mysql-operator/pkg/k8s" "github.com/percona/percona-server-mysql-operator/pkg/mysql" "github.com/percona/percona-server-mysql-operator/pkg/orchestrator" + "github.com/percona/percona-server-mysql-operator/pkg/pitr" "github.com/percona/percona-server-mysql-operator/pkg/platform" "github.com/percona/percona-server-mysql-operator/pkg/router" "github.com/percona/percona-server-mysql-operator/pkg/xtrabackup" @@ -52,6 +58,7 @@ type PerconaServerMySQLRestoreReconciler struct { client.Client Scheme *runtime.Scheme ServerVersion *platform.ServerVersion + ClientCmd clientcmd.Client NewStorageClient storage.NewClientFunc sm sync.Map @@ -74,7 +81,7 @@ func (r *PerconaServerMySQLRestoreReconciler) Reconcile(ctx context.Context, req log := logf.FromContext(ctx).WithName("PerconaServerMySQLRestore").WithValues("name", req.Name, "namespace", req.Namespace) cr := &apiv1.PerconaServerMySQLRestore{} - err := r.Client.Get(ctx, req.NamespacedName, cr) + err := r.Get(ctx, req.NamespacedName, cr) if err != nil { return ctrl.Result{}, errors.Wrapf(err, "get CR %s", req.NamespacedName) } @@ -95,18 +102,18 @@ func (r *PerconaServerMySQLRestoreReconciler) Reconcile(ctx context.Context, req } err := k8sretry.OnError(k8sretry.DefaultRetry, retriable, func() error { cr := &apiv1.PerconaServerMySQLRestore{} - if err := r.Client.Get(ctx, req.NamespacedName, cr); err != nil { - return errors.Wrapf(err, "get %v", req.NamespacedName.String()) + if err := r.Get(ctx, req.NamespacedName, cr); err != nil { + return errors.Wrapf(err, "get %v", req.String()) } cr.Status = status log.Info("Updating status", "state", cr.Status.State) - if err := r.Client.Status().Update(ctx, cr); err != nil { + if err := r.Status().Update(ctx, cr); err != nil { return errors.Wrap(err, "update status") } - if err := r.Client.Get(ctx, req.NamespacedName, cr); err != nil { - return errors.Wrapf(err, "get %v", req.NamespacedName.String()) + if err := r.Get(ctx, req.NamespacedName, cr); err != nil { + return errors.Wrapf(err, "get %v", req.String()) } if cr.Status.State != status.State { return errors.Errorf("status %s was not updated to %s", cr.Status.State, status.State) @@ -128,7 +135,7 @@ func (r *PerconaServerMySQLRestoreReconciler) Reconcile(ctx context.Context, req cluster := &apiv1.PerconaServerMySQL{} nn := types.NamespacedName{Name: cr.Spec.ClusterName, Namespace: cr.Namespace} - if err := r.Client.Get(ctx, nn, cluster); err != nil { + if err := r.Get(ctx, nn, cluster); err != nil { if k8serrors.IsNotFound(err) { status.State = apiv1.RestoreError status.StateDesc = fmt.Sprintf("PerconaServerMySQL %s in namespace %s is not found", cr.Spec.ClusterName, cr.Namespace) @@ -173,6 +180,22 @@ func (r *PerconaServerMySQLRestoreReconciler) Reconcile(ctx context.Context, req } defer r.sm.Delete(cr.Spec.ClusterName) + if cr.Spec.PITR != nil { + if !cluster.Spec.Backup.PiTR.Enabled || cluster.Spec.Backup.PiTR.BinlogServer == nil { + status.State = apiv1.RestoreError + status.StateDesc = "Binlog server is not enabled for the cluster" + return ctrl.Result{}, nil + } + + status.State = apiv1.RestoreStarting + if err := r.reconcilePITRConfig(ctx, cr, cluster); err != nil { + status.State = apiv1.RestoreError + status.StateDesc = errors.Wrap(err, "reconcile pitr config").Error() + return ctrl.Result{}, nil + } + status.StateDesc = "" + } + log.Info("Pausing cluster", "cluster", cluster.Name) if err := r.pauseCluster(ctx, cluster); err != nil { if errors.Is(err, ErrWaitingTermination) { @@ -190,7 +213,7 @@ func (r *PerconaServerMySQLRestoreReconciler) Reconcile(ctx context.Context, req job := &batchv1.Job{} nn = types.NamespacedName{Name: xtrabackup.RestoreJobName(cluster, cr), Namespace: req.Namespace} - err = r.Client.Get(ctx, nn, job) + err = r.Get(ctx, nn, job) if client.IgnoreNotFound(err) != nil { return ctrl.Result{}, errors.Wrapf(err, "get job %s", nn) } @@ -232,7 +255,15 @@ func (r *PerconaServerMySQLRestoreReconciler) Reconcile(ctx context.Context, req case batchv1.JobFailed: status.State = apiv1.RestoreFailed case batchv1.JobComplete: - status.State = apiv1.RestoreSucceeded + if cr.Spec.PITR != nil { + pitrState, err := r.reconcilePITRJob(ctx, cr, cluster) + if err != nil { + return ctrl.Result{}, errors.Wrap(err, "reconcile pitr job") + } + status.State = pitrState + } else { + status.State = apiv1.RestoreSucceeded + } } } case apiv1.RestoreFailed, apiv1.RestoreSucceeded: @@ -256,6 +287,133 @@ func (r *PerconaServerMySQLRestoreReconciler) Reconcile(ctx context.Context, req return ctrl.Result{}, nil } +func (r *PerconaServerMySQLRestoreReconciler) reconcilePITRConfig( + ctx context.Context, + cr *apiv1.PerconaServerMySQLRestore, + cluster *apiv1.PerconaServerMySQL, +) error { + cm := pitr.BinlogsConfigMap(cluster, cr) + if err := r.Get(ctx, client.ObjectKeyFromObject(cm), new(corev1.ConfigMap)); err == nil { + return nil + } + + binlogs, err := r.searchBinlogs(ctx, cr, cluster) + if err != nil { + return errors.Wrap(err, "search binlogs") + } + if len(binlogs) == 0 { + return errors.New("no binlogs found for the given PITR target") + } + + data, err := json.Marshal(binlogs) + if err != nil { + return errors.Wrap(err, "marshal binlog entries") + } + + cm.Data = make(map[string]string) + cm.Data[pitr.BinlogsConfigKey] = string(data) + + if err := controllerutil.SetControllerReference(cr, cm, r.Scheme); err != nil { + return errors.Wrapf(err, "set controller reference to ConfigMap %s/%s", cm.Namespace, cm.Name) + } + if err := r.Create(ctx, cm); err != nil { + return errors.Wrapf(err, "create binlogs configmap %s/%s", cm.Namespace, cm.Name) + } + + return nil +} + +func (r *PerconaServerMySQLRestoreReconciler) reconcilePITRJob( + ctx context.Context, + cr *apiv1.PerconaServerMySQLRestore, + cluster *apiv1.PerconaServerMySQL, +) (apiv1.RestoreState, error) { + log := logf.FromContext(ctx) + + pitrJob := &batchv1.Job{} + nn := types.NamespacedName{Name: pitr.JobName(cr), Namespace: cr.Namespace} + err := r.Get(ctx, nn, pitrJob) + if err != nil { + if !k8serrors.IsNotFound(err) { + return "", errors.Wrapf(err, "get pitr job %s", nn) + } + + log.Info("Creating PITR restore job", "jobName", nn.Name) + + bcp, err := getBackup(ctx, r.Client, cr, cluster) + if err != nil { + return "", errors.Wrap(err, "get backup") + } + + initImage, err := k8s.InitImage(ctx, r.Client, cluster, cluster.Spec.Backup) + if err != nil { + return "", errors.Wrap(err, "get operator image") + } + + job := pitr.RestoreJob(cluster, cr, bcp.Status.Storage, initImage) + if err := controllerutil.SetControllerReference(cr, job, r.Scheme); err != nil { + return "", errors.Wrapf(err, "set controller reference to Job %s/%s", job.Namespace, job.Name) + } + + if err := r.Create(ctx, job); err != nil { + return "", errors.Wrapf(err, "create pitr job %s/%s", job.Namespace, job.Name) + } + + return apiv1.RestoreRunning, nil + } + + if pitrJob.Status.Active > 0 { + return apiv1.RestoreRunning, nil + } + + for _, cond := range pitrJob.Status.Conditions { + if cond.Status != corev1.ConditionTrue { + continue + } + + switch cond.Type { + case batchv1.JobFailed: + return apiv1.RestoreFailed, nil + case batchv1.JobComplete: + return apiv1.RestoreSucceeded, nil + } + } + + return apiv1.RestoreRunning, nil +} + +func (r *PerconaServerMySQLRestoreReconciler) searchBinlogs( + ctx context.Context, + cr *apiv1.PerconaServerMySQLRestore, + cluster *apiv1.PerconaServerMySQL, +) ([]binlogserver.BinlogEntry, error) { + if cr.Spec.PITR == nil { + return nil, errors.New("pitr spec is not set") + } + + var resp *binlogserver.SearchResponse + var err error + + switch cr.Spec.PITR.Type { + case apiv1.PITRDate: + ts := strings.Replace(cr.Spec.PITR.Date, " ", "T", 1) + resp, err = binlogserver.SearchByTimestamp(ctx, r.Client, r.ClientCmd, cluster, ts) + case apiv1.PITRGtid: + resp, err = binlogserver.SearchByGTID(ctx, r.Client, r.ClientCmd, cluster, cr.Spec.PITR.GTID) + default: + return nil, errors.Errorf("unknown PITR type: %s", cr.Spec.PITR.Type) + } + if err != nil { + return nil, errors.Wrap(err, "search binlogs") + } + + if resp.Status != "success" { + return nil, errors.Errorf("binlog search failed with status: %s", resp.Status) + } + + return resp.Result, nil +} + func (r *PerconaServerMySQLRestoreReconciler) deletePVCs(ctx context.Context, cluster *apiv1.PerconaServerMySQL) error { log := logf.FromContext(ctx) @@ -269,7 +427,7 @@ func (r *PerconaServerMySQLRestoreReconciler) deletePVCs(ctx context.Context, cl continue } - if err := r.Client.Delete(ctx, &pvc); err != nil { + if err := r.Delete(ctx, &pvc); err != nil { if !k8serrors.IsNotFound(err) { log.Error(err, "failed to delete PVC") } @@ -288,7 +446,7 @@ func (r *PerconaServerMySQLRestoreReconciler) removeBootstrapCondition(ctx conte err := k8sretry.RetryOnConflict(k8sretry.DefaultRetry, func() error { c := &apiv1.PerconaServerMySQL{} nn := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace} - if err := r.Client.Get(ctx, nn, c); err != nil { + if err := r.Get(ctx, nn, c); err != nil { return err } @@ -311,13 +469,13 @@ func (r *PerconaServerMySQLRestoreReconciler) pauseCluster(ctx context.Context, err := k8sretry.RetryOnConflict(k8sretry.DefaultRetry, func() error { c := &apiv1.PerconaServerMySQL{} nn := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace} - if err := r.Client.Get(ctx, nn, c); err != nil { + if err := r.Get(ctx, nn, c); err != nil { return err } c.Spec.Pause = true - if err := r.Client.Patch(ctx, c, client.MergeFrom(cluster)); err != nil { + if err := r.Patch(ctx, c, client.MergeFrom(cluster)); err != nil { return err } @@ -329,7 +487,7 @@ func (r *PerconaServerMySQLRestoreReconciler) pauseCluster(ctx context.Context, sts := &appsv1.StatefulSet{} nn := types.NamespacedName{Name: mysql.Name(cluster), Namespace: cluster.Namespace} - if err := r.Client.Get(ctx, nn, sts); err != nil { + if err := r.Get(ctx, nn, sts); err != nil { return errors.Wrapf(err, "get statefulset %s", nn) } @@ -340,7 +498,7 @@ func (r *PerconaServerMySQLRestoreReconciler) pauseCluster(ctx context.Context, switch cluster.Spec.MySQL.ClusterType { case apiv1.ClusterTypeAsync: nn = types.NamespacedName{Name: orchestrator.Name(cluster), Namespace: cluster.Namespace} - err := r.Client.Get(ctx, nn, sts) + err := r.Get(ctx, nn, sts) if client.IgnoreNotFound(err) != nil { return errors.Wrapf(err, "get statefulset %s", nn) } @@ -351,7 +509,7 @@ func (r *PerconaServerMySQLRestoreReconciler) pauseCluster(ctx context.Context, if cluster.HAProxyEnabled() { sts := new(appsv1.StatefulSet) nn = types.NamespacedName{Name: haproxy.Name(cluster), Namespace: cluster.Namespace} - if err := r.Client.Get(ctx, nn, sts); err != nil { + if err := r.Get(ctx, nn, sts); err != nil { return errors.Wrapf(err, "get deployment %s", nn) } if sts.Status.Replicas != 0 { @@ -362,7 +520,7 @@ func (r *PerconaServerMySQLRestoreReconciler) pauseCluster(ctx context.Context, if cluster.RouterEnabled() { deployment := new(appsv1.Deployment) nn = types.NamespacedName{Name: router.Name(cluster), Namespace: cluster.Namespace} - if err := r.Client.Get(ctx, nn, deployment); err != nil { + if err := r.Get(ctx, nn, deployment); err != nil { return errors.Wrapf(err, "get deployment %s", nn) } if deployment.Status.Replicas != 0 { @@ -378,13 +536,13 @@ func (r *PerconaServerMySQLRestoreReconciler) unpauseCluster(ctx context.Context return k8sretry.RetryOnConflict(k8sretry.DefaultRetry, func() error { c := &apiv1.PerconaServerMySQL{} nn := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace} - if err := r.Client.Get(ctx, nn, c); err != nil { + if err := r.Get(ctx, nn, c); err != nil { return err } c.Spec.Pause = false - if err := r.Client.Patch(ctx, c, client.MergeFrom(cluster)); err != nil { + if err := r.Patch(ctx, c, client.MergeFrom(cluster)); err != nil { return err } diff --git a/pkg/pitr/pitr.go b/pkg/pitr/pitr.go new file mode 100644 index 000000000..e1f766f53 --- /dev/null +++ b/pkg/pitr/pitr.go @@ -0,0 +1,295 @@ +package pitr + +import ( + "fmt" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" + + apiv1 "github.com/percona/percona-server-mysql-operator/api/v1" + "github.com/percona/percona-server-mysql-operator/pkg/k8s" + "github.com/percona/percona-server-mysql-operator/pkg/mysql" + "github.com/percona/percona-server-mysql-operator/pkg/naming" + "github.com/percona/percona-server-mysql-operator/pkg/secret" + "github.com/percona/percona-server-mysql-operator/pkg/util" +) + +const ( + appName = "pitr" + dataVolumeName = "datadir" + dataMountPath = "/var/lib/mysql" + credsVolumeName = "users" + credsMountPath = "/etc/mysql/mysql-users-secret" + tlsVolumeName = "tls" + tlsMountPath = "/etc/mysql/mysql-tls-secret" + binlogsVolumeName = "binlogs" + binlogsMountPath = "/etc/pitr" + BinlogsConfigKey = "binlogs.json" +) + +func JobName(restore *apiv1.PerconaServerMySQLRestore) string { + return fmt.Sprintf("pitr-restore-%s", restore.Name) +} + +func BinlogsConfigMap(cluster *apiv1.PerconaServerMySQL, restore *apiv1.PerconaServerMySQLRestore) *corev1.ConfigMap { + labels := util.SSMapMerge(cluster.GlobalLabels(), restore.Labels(appName, naming.ComponentPITR)) + + return &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: binlogsConfigMapName(restore), + Namespace: cluster.Namespace, + Labels: labels, + Annotations: cluster.GlobalAnnotations(), + }, + } +} + +func RestoreJob( + cluster *apiv1.PerconaServerMySQL, + restore *apiv1.PerconaServerMySQLRestore, + storage *apiv1.BackupStorageSpec, + initImage string, +) *batchv1.Job { + labels := util.SSMapMerge(cluster.GlobalLabels(), storage.Labels, restore.Labels(appName, naming.ComponentPITR)) + + pvcName := fmt.Sprintf("%s-%s-mysql-0", mysql.DataVolumeName, cluster.Name) + + return &batchv1.Job{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "batch/v1", + Kind: "Job", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: JobName(restore), + Namespace: cluster.Namespace, + Labels: labels, + Annotations: util.SSMapMerge(cluster.GlobalAnnotations(), restore.Annotations, storage.Annotations), + }, + Spec: batchv1.JobSpec{ + Parallelism: ptr.To(int32(1)), + Completions: ptr.To(int32(1)), + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + Annotations: util.SSMapMerge(cluster.GlobalAnnotations(), restore.Annotations, storage.Annotations), + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + ImagePullSecrets: cluster.Spec.Backup.ImagePullSecrets, + InitContainers: []corev1.Container{ + k8s.InitContainer( + cluster, + appName, + initImage, + cluster.Spec.Backup.InitContainer, + cluster.Spec.Backup.ImagePullPolicy, + storage.ContainerSecurityContext, + cluster.Spec.Backup.Resources, + []corev1.VolumeMount{ + { + Name: dataVolumeName, + MountPath: dataMountPath, + }, + { + Name: credsVolumeName, + MountPath: credsMountPath, + }, + { + Name: tlsVolumeName, + MountPath: tlsMountPath, + }, + }, + ), + }, + Containers: []corev1.Container{ + restoreContainer(cluster, restore, storage), + }, + Affinity: storage.Affinity, + TopologySpreadConstraints: storage.TopologySpreadConstraints, + Tolerations: storage.Tolerations, + NodeSelector: storage.NodeSelector, + SchedulerName: storage.SchedulerName, + PriorityClassName: storage.PriorityClassName, + RuntimeClassName: storage.RuntimeClassName, + DNSPolicy: corev1.DNSClusterFirst, + SecurityContext: storage.PodSecurityContext, + Volumes: []corev1.Volume{ + { + Name: apiv1.BinVolumeName, + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + { + Name: dataVolumeName, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvcName, + }, + }, + }, + { + Name: credsVolumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: cluster.Spec.SecretsName, + }, + }, + }, + { + Name: tlsVolumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: cluster.Spec.SSLSecretName, + }, + }, + }, + { + Name: binlogsVolumeName, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: binlogsConfigMapName(restore), + }, + }, + }, + }, + }, + }, + }, + BackoffLimit: cluster.Spec.Backup.BackoffLimit, + }, + } +} + +func restoreContainer( + cluster *apiv1.PerconaServerMySQL, + restore *apiv1.PerconaServerMySQLRestore, + storage *apiv1.BackupStorageSpec, +) corev1.Container { + binlogServer := cluster.Spec.Backup.PiTR.BinlogServer + + envs := []corev1.EnvVar{ + { + Name: "RESTORE_NAME", + Value: restore.Name, + }, + { + Name: "BINLOGS_PATH", + Value: fmt.Sprintf("%s/%s", binlogsMountPath, BinlogsConfigKey), + }, + } + + if _, ok := restore.Annotations["percona.com/pitr-sleep-forever"]; ok { + envs = append(envs, corev1.EnvVar{ + Name: "SLEEP_FOREVER", + Value: "true", + }) + } + + if restore.Spec.PITR != nil { + envs = append(envs, corev1.EnvVar{ + Name: "PITR_TYPE", + Value: string(restore.Spec.PITR.Type), + }) + switch restore.Spec.PITR.Type { + case apiv1.PITRDate: + envs = append(envs, corev1.EnvVar{ + Name: "PITR_DATE", + Value: restore.Spec.PITR.Date, + }) + case apiv1.PITRGtid: + envs = append(envs, corev1.EnvVar{ + Name: "PITR_GTID", + Value: restore.Spec.PITR.GTID, + }) + } + if restore.Spec.PITR.Force { + envs = append(envs, corev1.EnvVar{ + Name: "PITR_FORCE", + Value: "true", + }) + } + } + + if binlogServer.Storage.S3 != nil { + s3 := binlogServer.Storage.S3 + bucket, _ := s3.BucketAndPrefix() + envs = append(envs, + corev1.EnvVar{ + Name: "STORAGE_TYPE", + Value: "s3", + }, + corev1.EnvVar{ + Name: "AWS_ACCESS_KEY_ID", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: k8s.SecretKeySelector(s3.CredentialsSecret, secret.CredentialsAWSAccessKey), + }, + }, + corev1.EnvVar{ + Name: "AWS_SECRET_ACCESS_KEY", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: k8s.SecretKeySelector(s3.CredentialsSecret, secret.CredentialsAWSSecretKey), + }, + }, + corev1.EnvVar{ + Name: "AWS_DEFAULT_REGION", + Value: s3.Region, + }, + corev1.EnvVar{ + Name: "AWS_ENDPOINT", + Value: s3.EndpointURL, + }, + corev1.EnvVar{ + Name: "S3_BUCKET", + Value: bucket, + }, + ) + } + + envs = append(envs, restore.GetContainerOptions(storage).GetEnv()...) + + return corev1.Container{ + Name: appName, + Image: cluster.Spec.MySQL.Image, + ImagePullPolicy: cluster.Spec.MySQL.ImagePullPolicy, + Env: envs, + VolumeMounts: []corev1.VolumeMount{ + { + Name: apiv1.BinVolumeName, + MountPath: apiv1.BinVolumePath, + }, + { + Name: dataVolumeName, + MountPath: dataMountPath, + }, + { + Name: credsVolumeName, + MountPath: credsMountPath, + }, + { + Name: tlsVolumeName, + MountPath: tlsMountPath, + }, + { + Name: binlogsVolumeName, + MountPath: binlogsMountPath, + }, + }, + Command: []string{"/opt/percona/run-pitr-restore.sh"}, + TerminationMessagePath: "/dev/termination-log", + TerminationMessagePolicy: corev1.TerminationMessageReadFile, + SecurityContext: storage.ContainerSecurityContext, + Resources: storage.Resources, + } +} + +func binlogsConfigMapName(restore *apiv1.PerconaServerMySQLRestore) string { + return fmt.Sprintf("pitr-binlogs-%s", restore.Name) +} diff --git a/pkg/pitr/pitr_test.go b/pkg/pitr/pitr_test.go new file mode 100644 index 000000000..a264e1f18 --- /dev/null +++ b/pkg/pitr/pitr_test.go @@ -0,0 +1,718 @@ +package pitr + +import ( + "testing" + + "github.com/stretchr/testify/assert" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" + + apiv1 "github.com/percona/percona-server-mysql-operator/api/v1" + "github.com/percona/percona-server-mysql-operator/pkg/mysql" +) + +func TestRestoreJob(t *testing.T) { + tests := map[string]struct { + cluster *apiv1.PerconaServerMySQL + restore *apiv1.PerconaServerMySQLRestore + storage *apiv1.BackupStorageSpec + initImage string + verify func(t *testing.T, job *batchv1.Job) + }{ + "basic job metadata": { + cluster: &apiv1.PerconaServerMySQL{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "test-ns", + }, + Spec: apiv1.PerconaServerMySQLSpec{ + SecretsName: "my-cluster-secrets", + SSLSecretName: "my-cluster-ssl", + Backup: &apiv1.BackupSpec{ + PiTR: apiv1.PiTRSpec{ + BinlogServer: &apiv1.BinlogServerSpec{}, + }, + }, + }, + }, + restore: &apiv1.PerconaServerMySQLRestore{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-restore", + Namespace: "test-ns", + }, + }, + storage: &apiv1.BackupStorageSpec{}, + initImage: "init:latest", + verify: func(t *testing.T, job *batchv1.Job) { + assert.Equal(t, "pitr-restore-my-restore", job.Name) + assert.Equal(t, "test-ns", job.Namespace) + assert.Equal(t, "batch/v1", job.APIVersion) + assert.Equal(t, "Job", job.Kind) + }, + }, + "job spec parallelism and completions": { + cluster: &apiv1.PerconaServerMySQL{ + ObjectMeta: metav1.ObjectMeta{Name: "cluster", Namespace: "ns"}, + Spec: apiv1.PerconaServerMySQLSpec{ + SecretsName: "cluster-secrets", + SSLSecretName: "cluster-ssl", + Backup: &apiv1.BackupSpec{ + PiTR: apiv1.PiTRSpec{ + BinlogServer: &apiv1.BinlogServerSpec{}, + }, + }, + }, + }, + restore: &apiv1.PerconaServerMySQLRestore{ + ObjectMeta: metav1.ObjectMeta{Name: "restore", Namespace: "ns"}, + }, + storage: &apiv1.BackupStorageSpec{}, + initImage: "init:latest", + verify: func(t *testing.T, job *batchv1.Job) { + assert.Equal(t, ptr.To(int32(1)), job.Spec.Parallelism) + assert.Equal(t, ptr.To(int32(1)), job.Spec.Completions) + assert.Equal(t, corev1.RestartPolicyNever, job.Spec.Template.Spec.RestartPolicy) + }, + }, + "backoff limit from cluster spec": { + cluster: &apiv1.PerconaServerMySQL{ + ObjectMeta: metav1.ObjectMeta{Name: "cluster", Namespace: "ns"}, + Spec: apiv1.PerconaServerMySQLSpec{ + SecretsName: "cluster-secrets", + SSLSecretName: "cluster-ssl", + Backup: &apiv1.BackupSpec{ + BackoffLimit: ptr.To(int32(5)), + PiTR: apiv1.PiTRSpec{ + BinlogServer: &apiv1.BinlogServerSpec{}, + }, + }, + }, + }, + restore: &apiv1.PerconaServerMySQLRestore{ + ObjectMeta: metav1.ObjectMeta{Name: "restore", Namespace: "ns"}, + }, + storage: &apiv1.BackupStorageSpec{}, + initImage: "init:latest", + verify: func(t *testing.T, job *batchv1.Job) { + assert.Equal(t, ptr.To(int32(5)), job.Spec.BackoffLimit) + }, + }, + "pvc name uses cluster name": { + cluster: &apiv1.PerconaServerMySQL{ + ObjectMeta: metav1.ObjectMeta{Name: "mycluster", Namespace: "ns"}, + Spec: apiv1.PerconaServerMySQLSpec{ + SecretsName: "mycluster-secrets", + SSLSecretName: "mycluster-ssl", + Backup: &apiv1.BackupSpec{ + PiTR: apiv1.PiTRSpec{ + BinlogServer: &apiv1.BinlogServerSpec{}, + }, + }, + }, + }, + restore: &apiv1.PerconaServerMySQLRestore{ + ObjectMeta: metav1.ObjectMeta{Name: "restore", Namespace: "ns"}, + }, + storage: &apiv1.BackupStorageSpec{}, + initImage: "init:latest", + verify: func(t *testing.T, job *batchv1.Job) { + volumes := job.Spec.Template.Spec.Volumes + expectedPVCName := mysql.DataVolumeName + "-mycluster-mysql-0" + var found bool + for _, v := range volumes { + if v.Name == dataVolumeName && v.PersistentVolumeClaim != nil { + assert.Equal(t, expectedPVCName, v.PersistentVolumeClaim.ClaimName) + found = true + } + } + assert.True(t, found, "datadir volume with expected PVC not found") + }, + }, + "volumes include all expected volumes": { + cluster: &apiv1.PerconaServerMySQL{ + ObjectMeta: metav1.ObjectMeta{Name: "cluster", Namespace: "ns"}, + Spec: apiv1.PerconaServerMySQLSpec{ + SecretsName: "my-secrets", + SSLSecretName: "my-ssl", + Backup: &apiv1.BackupSpec{ + PiTR: apiv1.PiTRSpec{ + BinlogServer: &apiv1.BinlogServerSpec{}, + }, + }, + }, + }, + restore: &apiv1.PerconaServerMySQLRestore{ + ObjectMeta: metav1.ObjectMeta{Name: "my-restore", Namespace: "ns"}, + }, + storage: &apiv1.BackupStorageSpec{}, + initImage: "init:latest", + verify: func(t *testing.T, job *batchv1.Job) { + volumeNames := map[string]bool{} + for _, v := range job.Spec.Template.Spec.Volumes { + volumeNames[v.Name] = true + } + assert.True(t, volumeNames[apiv1.BinVolumeName], "missing bin volume") + assert.True(t, volumeNames[dataVolumeName], "missing datadir volume") + assert.True(t, volumeNames[credsVolumeName], "missing creds volume") + assert.True(t, volumeNames[tlsVolumeName], "missing tls volume") + assert.True(t, volumeNames[binlogsVolumeName], "missing binlogs volume") + }, + }, + "secrets volume uses cluster secrets name": { + cluster: &apiv1.PerconaServerMySQL{ + ObjectMeta: metav1.ObjectMeta{Name: "cluster", Namespace: "ns"}, + Spec: apiv1.PerconaServerMySQLSpec{ + SecretsName: "custom-secrets", + SSLSecretName: "custom-ssl", + Backup: &apiv1.BackupSpec{ + PiTR: apiv1.PiTRSpec{ + BinlogServer: &apiv1.BinlogServerSpec{}, + }, + }, + }, + }, + restore: &apiv1.PerconaServerMySQLRestore{ + ObjectMeta: metav1.ObjectMeta{Name: "restore", Namespace: "ns"}, + }, + storage: &apiv1.BackupStorageSpec{}, + initImage: "init:latest", + verify: func(t *testing.T, job *batchv1.Job) { + for _, v := range job.Spec.Template.Spec.Volumes { + switch v.Name { + case credsVolumeName: + assert.Equal(t, "custom-secrets", v.Secret.SecretName) + case tlsVolumeName: + assert.Equal(t, "custom-ssl", v.Secret.SecretName) + } + } + }, + }, + "binlogs configmap volume references restore name": { + cluster: &apiv1.PerconaServerMySQL{ + ObjectMeta: metav1.ObjectMeta{Name: "cluster", Namespace: "ns"}, + Spec: apiv1.PerconaServerMySQLSpec{ + SecretsName: "secrets", + SSLSecretName: "ssl", + Backup: &apiv1.BackupSpec{ + PiTR: apiv1.PiTRSpec{ + BinlogServer: &apiv1.BinlogServerSpec{}, + }, + }, + }, + }, + restore: &apiv1.PerconaServerMySQLRestore{ + ObjectMeta: metav1.ObjectMeta{Name: "my-restore", Namespace: "ns"}, + }, + storage: &apiv1.BackupStorageSpec{}, + initImage: "init:latest", + verify: func(t *testing.T, job *batchv1.Job) { + for _, v := range job.Spec.Template.Spec.Volumes { + if v.Name == binlogsVolumeName { + assert.Equal(t, "pitr-binlogs-my-restore", v.ConfigMap.Name) + return + } + } + t.Error("binlogs volume not found") + }, + }, + "storage scheduling fields propagated to pod spec": { + cluster: &apiv1.PerconaServerMySQL{ + ObjectMeta: metav1.ObjectMeta{Name: "cluster", Namespace: "ns"}, + Spec: apiv1.PerconaServerMySQLSpec{ + SecretsName: "secrets", + SSLSecretName: "ssl", + Backup: &apiv1.BackupSpec{ + PiTR: apiv1.PiTRSpec{ + BinlogServer: &apiv1.BinlogServerSpec{}, + }, + }, + }, + }, + restore: &apiv1.PerconaServerMySQLRestore{ + ObjectMeta: metav1.ObjectMeta{Name: "restore", Namespace: "ns"}, + }, + storage: &apiv1.BackupStorageSpec{ + NodeSelector: map[string]string{"disktype": "ssd"}, + SchedulerName: "my-scheduler", + PriorityClassName: "high-priority", + Tolerations: []corev1.Toleration{ + {Key: "dedicated", Operator: corev1.TolerationOpEqual, Value: "mysql", Effect: corev1.TaintEffectNoSchedule}, + }, + }, + initImage: "init:latest", + verify: func(t *testing.T, job *batchv1.Job) { + spec := job.Spec.Template.Spec + assert.Equal(t, map[string]string{"disktype": "ssd"}, spec.NodeSelector) + assert.Equal(t, "my-scheduler", spec.SchedulerName) + assert.Equal(t, "high-priority", spec.PriorityClassName) + assert.Len(t, spec.Tolerations, 1) + assert.Equal(t, "dedicated", spec.Tolerations[0].Key) + }, + }, + "image pull secrets from backup spec": { + cluster: &apiv1.PerconaServerMySQL{ + ObjectMeta: metav1.ObjectMeta{Name: "cluster", Namespace: "ns"}, + Spec: apiv1.PerconaServerMySQLSpec{ + SecretsName: "secrets", + SSLSecretName: "ssl", + Backup: &apiv1.BackupSpec{ + ImagePullSecrets: []corev1.LocalObjectReference{{Name: "registry-secret"}}, + PiTR: apiv1.PiTRSpec{ + BinlogServer: &apiv1.BinlogServerSpec{}, + }, + }, + }, + }, + restore: &apiv1.PerconaServerMySQLRestore{ + ObjectMeta: metav1.ObjectMeta{Name: "restore", Namespace: "ns"}, + }, + storage: &apiv1.BackupStorageSpec{}, + initImage: "init:latest", + verify: func(t *testing.T, job *batchv1.Job) { + assert.Equal(t, []corev1.LocalObjectReference{{Name: "registry-secret"}}, job.Spec.Template.Spec.ImagePullSecrets) + }, + }, + "sleep forever annotation sets SLEEP_FOREVER env var": { + cluster: &apiv1.PerconaServerMySQL{ + ObjectMeta: metav1.ObjectMeta{Name: "cluster", Namespace: "ns"}, + Spec: apiv1.PerconaServerMySQLSpec{ + SecretsName: "secrets", + SSLSecretName: "ssl", + Backup: &apiv1.BackupSpec{ + PiTR: apiv1.PiTRSpec{ + BinlogServer: &apiv1.BinlogServerSpec{}, + }, + }, + }, + }, + restore: &apiv1.PerconaServerMySQLRestore{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-restore", + Namespace: "ns", + Annotations: map[string]string{"percona.com/pitr-sleep-forever": "true"}, + }, + }, + storage: &apiv1.BackupStorageSpec{}, + initImage: "init:latest", + verify: func(t *testing.T, job *batchv1.Job) { + container := job.Spec.Template.Spec.Containers[0] + envMap := envToMap(container.Env) + assert.Equal(t, "true", envMap["SLEEP_FOREVER"]) + }, + }, + "no sleep forever annotation omits SLEEP_FOREVER env var": { + cluster: &apiv1.PerconaServerMySQL{ + ObjectMeta: metav1.ObjectMeta{Name: "cluster", Namespace: "ns"}, + Spec: apiv1.PerconaServerMySQLSpec{ + SecretsName: "secrets", + SSLSecretName: "ssl", + Backup: &apiv1.BackupSpec{ + PiTR: apiv1.PiTRSpec{ + BinlogServer: &apiv1.BinlogServerSpec{}, + }, + }, + }, + }, + restore: &apiv1.PerconaServerMySQLRestore{ + ObjectMeta: metav1.ObjectMeta{Name: "my-restore", Namespace: "ns"}, + }, + storage: &apiv1.BackupStorageSpec{}, + initImage: "init:latest", + verify: func(t *testing.T, job *batchv1.Job) { + container := job.Spec.Template.Spec.Containers[0] + envMap := envToMap(container.Env) + assert.NotContains(t, envMap, "SLEEP_FOREVER") + }, + }, + "restore container has correct env vars without pitr spec": { + cluster: &apiv1.PerconaServerMySQL{ + ObjectMeta: metav1.ObjectMeta{Name: "cluster", Namespace: "ns"}, + Spec: apiv1.PerconaServerMySQLSpec{ + SecretsName: "secrets", + SSLSecretName: "ssl", + Backup: &apiv1.BackupSpec{ + PiTR: apiv1.PiTRSpec{ + BinlogServer: &apiv1.BinlogServerSpec{}, + }, + }, + }, + }, + restore: &apiv1.PerconaServerMySQLRestore{ + ObjectMeta: metav1.ObjectMeta{Name: "my-restore", Namespace: "ns"}, + Spec: apiv1.PerconaServerMySQLRestoreSpec{}, + }, + storage: &apiv1.BackupStorageSpec{}, + initImage: "init:latest", + verify: func(t *testing.T, job *batchv1.Job) { + container := job.Spec.Template.Spec.Containers[0] + envMap := envToMap(container.Env) + assert.Equal(t, "my-restore", envMap["RESTORE_NAME"]) + assert.Equal(t, binlogsMountPath+"/"+BinlogsConfigKey, envMap["BINLOGS_PATH"]) + assert.NotContains(t, envMap, "PITR_TYPE") + assert.NotContains(t, envMap, "PITR_DATE") + assert.NotContains(t, envMap, "PITR_GTID") + }, + }, + "restore container has pitr date env vars": { + cluster: &apiv1.PerconaServerMySQL{ + ObjectMeta: metav1.ObjectMeta{Name: "cluster", Namespace: "ns"}, + Spec: apiv1.PerconaServerMySQLSpec{ + SecretsName: "secrets", + SSLSecretName: "ssl", + Backup: &apiv1.BackupSpec{ + PiTR: apiv1.PiTRSpec{ + BinlogServer: &apiv1.BinlogServerSpec{}, + }, + }, + }, + }, + restore: &apiv1.PerconaServerMySQLRestore{ + ObjectMeta: metav1.ObjectMeta{Name: "my-restore", Namespace: "ns"}, + Spec: apiv1.PerconaServerMySQLRestoreSpec{ + PITR: &apiv1.RestorePITRSpec{ + Type: apiv1.PITRDate, + Date: "2024-01-15 10:00:00", + }, + }, + }, + storage: &apiv1.BackupStorageSpec{}, + initImage: "init:latest", + verify: func(t *testing.T, job *batchv1.Job) { + container := job.Spec.Template.Spec.Containers[0] + envMap := envToMap(container.Env) + assert.Equal(t, "date", envMap["PITR_TYPE"]) + assert.Equal(t, "2024-01-15 10:00:00", envMap["PITR_DATE"]) + assert.NotContains(t, envMap, "PITR_GTID") + }, + }, + "restore container has pitr gtid env vars": { + cluster: &apiv1.PerconaServerMySQL{ + ObjectMeta: metav1.ObjectMeta{Name: "cluster", Namespace: "ns"}, + Spec: apiv1.PerconaServerMySQLSpec{ + SecretsName: "secrets", + SSLSecretName: "ssl", + Backup: &apiv1.BackupSpec{ + PiTR: apiv1.PiTRSpec{ + BinlogServer: &apiv1.BinlogServerSpec{}, + }, + }, + }, + }, + restore: &apiv1.PerconaServerMySQLRestore{ + ObjectMeta: metav1.ObjectMeta{Name: "my-restore", Namespace: "ns"}, + Spec: apiv1.PerconaServerMySQLRestoreSpec{ + PITR: &apiv1.RestorePITRSpec{ + Type: apiv1.PITRGtid, + GTID: "abc123:1-100", + }, + }, + }, + storage: &apiv1.BackupStorageSpec{}, + initImage: "init:latest", + verify: func(t *testing.T, job *batchv1.Job) { + container := job.Spec.Template.Spec.Containers[0] + envMap := envToMap(container.Env) + assert.Equal(t, "gtid", envMap["PITR_TYPE"]) + assert.Equal(t, "abc123:1-100", envMap["PITR_GTID"]) + assert.NotContains(t, envMap, "PITR_DATE") + assert.NotContains(t, envMap, "PITR_FORCE") + }, + }, + "restore container has PITR_FORCE env var when force is true": { + cluster: &apiv1.PerconaServerMySQL{ + ObjectMeta: metav1.ObjectMeta{Name: "cluster", Namespace: "ns"}, + Spec: apiv1.PerconaServerMySQLSpec{ + SecretsName: "secrets", + SSLSecretName: "ssl", + Backup: &apiv1.BackupSpec{ + PiTR: apiv1.PiTRSpec{ + BinlogServer: &apiv1.BinlogServerSpec{}, + }, + }, + }, + }, + restore: &apiv1.PerconaServerMySQLRestore{ + ObjectMeta: metav1.ObjectMeta{Name: "my-restore", Namespace: "ns"}, + Spec: apiv1.PerconaServerMySQLRestoreSpec{ + PITR: &apiv1.RestorePITRSpec{ + Type: apiv1.PITRDate, + Date: "2024-01-15 10:00:00", + Force: true, + }, + }, + }, + storage: &apiv1.BackupStorageSpec{}, + initImage: "init:latest", + verify: func(t *testing.T, job *batchv1.Job) { + container := job.Spec.Template.Spec.Containers[0] + envMap := envToMap(container.Env) + assert.Equal(t, "true", envMap["PITR_FORCE"]) + }, + }, + "restore container omits PITR_FORCE env var when force is false": { + cluster: &apiv1.PerconaServerMySQL{ + ObjectMeta: metav1.ObjectMeta{Name: "cluster", Namespace: "ns"}, + Spec: apiv1.PerconaServerMySQLSpec{ + SecretsName: "secrets", + SSLSecretName: "ssl", + Backup: &apiv1.BackupSpec{ + PiTR: apiv1.PiTRSpec{ + BinlogServer: &apiv1.BinlogServerSpec{}, + }, + }, + }, + }, + restore: &apiv1.PerconaServerMySQLRestore{ + ObjectMeta: metav1.ObjectMeta{Name: "my-restore", Namespace: "ns"}, + Spec: apiv1.PerconaServerMySQLRestoreSpec{ + PITR: &apiv1.RestorePITRSpec{ + Type: apiv1.PITRGtid, + GTID: "abc123:1-100", + }, + }, + }, + storage: &apiv1.BackupStorageSpec{}, + initImage: "init:latest", + verify: func(t *testing.T, job *batchv1.Job) { + container := job.Spec.Template.Spec.Containers[0] + envMap := envToMap(container.Env) + assert.NotContains(t, envMap, "PITR_FORCE") + }, + }, + "restore container has s3 env vars when binlog server has s3 storage": { + cluster: &apiv1.PerconaServerMySQL{ + ObjectMeta: metav1.ObjectMeta{Name: "cluster", Namespace: "ns"}, + Spec: apiv1.PerconaServerMySQLSpec{ + SecretsName: "secrets", + SSLSecretName: "ssl", + Backup: &apiv1.BackupSpec{ + PiTR: apiv1.PiTRSpec{ + BinlogServer: &apiv1.BinlogServerSpec{ + Storage: apiv1.BinlogServerStorageSpec{ + S3: &apiv1.BackupStorageS3Spec{ + Bucket: "my-bucket", + CredentialsSecret: "s3-creds", + Region: "us-east-1", + EndpointURL: "https://s3.example.com", + }, + }, + }, + }, + }, + }, + }, + restore: &apiv1.PerconaServerMySQLRestore{ + ObjectMeta: metav1.ObjectMeta{Name: "restore", Namespace: "ns"}, + }, + storage: &apiv1.BackupStorageSpec{}, + initImage: "init:latest", + verify: func(t *testing.T, job *batchv1.Job) { + container := job.Spec.Template.Spec.Containers[0] + envMap := envToMap(container.Env) + assert.Equal(t, "s3", envMap["STORAGE_TYPE"]) + assert.Equal(t, "my-bucket", envMap["S3_BUCKET"]) + assert.Equal(t, "us-east-1", envMap["AWS_DEFAULT_REGION"]) + assert.Equal(t, "https://s3.example.com", envMap["AWS_ENDPOINT"]) + + envByName := envByNameMap(container.Env) + accessKey := envByName["AWS_ACCESS_KEY_ID"] + assert.NotNil(t, accessKey.ValueFrom) + assert.Equal(t, "s3-creds", accessKey.ValueFrom.SecretKeyRef.Name) + assert.Equal(t, "AWS_ACCESS_KEY_ID", accessKey.ValueFrom.SecretKeyRef.Key) + + secretKey := envByName["AWS_SECRET_ACCESS_KEY"] + assert.NotNil(t, secretKey.ValueFrom) + assert.Equal(t, "s3-creds", secretKey.ValueFrom.SecretKeyRef.Name) + assert.Equal(t, "AWS_SECRET_ACCESS_KEY", secretKey.ValueFrom.SecretKeyRef.Key) + }, + }, + "restore container command and volume mounts": { + cluster: &apiv1.PerconaServerMySQL{ + ObjectMeta: metav1.ObjectMeta{Name: "cluster", Namespace: "ns"}, + Spec: apiv1.PerconaServerMySQLSpec{ + SecretsName: "secrets", + SSLSecretName: "ssl", + Backup: &apiv1.BackupSpec{ + PiTR: apiv1.PiTRSpec{ + BinlogServer: &apiv1.BinlogServerSpec{}, + }, + }, + MySQL: apiv1.MySQLSpec{}, + }, + }, + restore: &apiv1.PerconaServerMySQLRestore{ + ObjectMeta: metav1.ObjectMeta{Name: "restore", Namespace: "ns"}, + }, + storage: &apiv1.BackupStorageSpec{}, + initImage: "init:latest", + verify: func(t *testing.T, job *batchv1.Job) { + assert.Len(t, job.Spec.Template.Spec.Containers, 1) + container := job.Spec.Template.Spec.Containers[0] + assert.Equal(t, appName, container.Name) + assert.Equal(t, []string{"/opt/percona/run-pitr-restore.sh"}, container.Command) + + mountNames := map[string]bool{} + for _, m := range container.VolumeMounts { + mountNames[m.Name] = true + } + assert.True(t, mountNames[apiv1.BinVolumeName]) + assert.True(t, mountNames[dataVolumeName]) + assert.True(t, mountNames[credsVolumeName]) + assert.True(t, mountNames[tlsVolumeName]) + assert.True(t, mountNames[binlogsVolumeName]) + }, + }, + "one init container present": { + cluster: &apiv1.PerconaServerMySQL{ + ObjectMeta: metav1.ObjectMeta{Name: "cluster", Namespace: "ns"}, + Spec: apiv1.PerconaServerMySQLSpec{ + SecretsName: "secrets", + SSLSecretName: "ssl", + Backup: &apiv1.BackupSpec{ + PiTR: apiv1.PiTRSpec{ + BinlogServer: &apiv1.BinlogServerSpec{}, + }, + }, + }, + }, + restore: &apiv1.PerconaServerMySQLRestore{ + ObjectMeta: metav1.ObjectMeta{Name: "restore", Namespace: "ns"}, + }, + storage: &apiv1.BackupStorageSpec{}, + initImage: "percona/init:1.0", + verify: func(t *testing.T, job *batchv1.Job) { + assert.Len(t, job.Spec.Template.Spec.InitContainers, 1) + }, + }, + } + + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + job := RestoreJob(tt.cluster, tt.restore, tt.storage, tt.initImage) + tt.verify(t, job) + }) + } +} + +func envToMap(envs []corev1.EnvVar) map[string]string { + m := make(map[string]string, len(envs)) + for _, e := range envs { + m[e.Name] = e.Value + } + return m +} + +func envByNameMap(envs []corev1.EnvVar) map[string]corev1.EnvVar { + m := make(map[string]corev1.EnvVar, len(envs)) + for _, e := range envs { + m[e.Name] = e + } + return m +} + +func TestJobName(t *testing.T) { + tests := map[string]struct { + restoreName string + expected string + }{ + "simple name": {restoreName: "my-restore", expected: "pitr-restore-my-restore"}, + "name with numbers": {restoreName: "restore-123", expected: "pitr-restore-restore-123"}, + "single word name": {restoreName: "restore", expected: "pitr-restore-restore"}, + "name with dots": {restoreName: "restore.v1", expected: "pitr-restore-restore.v1"}, + } + + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + restore := &apiv1.PerconaServerMySQLRestore{ + ObjectMeta: metav1.ObjectMeta{Name: tt.restoreName}, + } + assert.Equal(t, tt.expected, JobName(restore)) + }) + } +} + +func TestBinlogsConfigMap(t *testing.T) { + tests := map[string]struct { + cluster *apiv1.PerconaServerMySQL + restore *apiv1.PerconaServerMySQLRestore + verify func(t *testing.T, cm *corev1.ConfigMap) + }{ + "basic metadata": { + cluster: &apiv1.PerconaServerMySQL{ + ObjectMeta: metav1.ObjectMeta{Name: "my-cluster", Namespace: "test-ns"}, + }, + restore: &apiv1.PerconaServerMySQLRestore{ + ObjectMeta: metav1.ObjectMeta{Name: "my-restore"}, + }, + verify: func(t *testing.T, cm *corev1.ConfigMap) { + assert.Equal(t, "pitr-binlogs-my-restore", cm.Name) + assert.Equal(t, "test-ns", cm.Namespace) + assert.Equal(t, "v1", cm.APIVersion) + assert.Equal(t, "ConfigMap", cm.Kind) + }, + }, + "no global labels or annotations": { + cluster: &apiv1.PerconaServerMySQL{ + ObjectMeta: metav1.ObjectMeta{Name: "cluster", Namespace: "ns"}, + }, + restore: &apiv1.PerconaServerMySQLRestore{ + ObjectMeta: metav1.ObjectMeta{Name: "restore"}, + }, + verify: func(t *testing.T, cm *corev1.ConfigMap) { + assert.Nil(t, cm.Annotations) + }, + }, + "global labels merged into configmap labels": { + cluster: &apiv1.PerconaServerMySQL{ + ObjectMeta: metav1.ObjectMeta{Name: "cluster", Namespace: "ns"}, + Spec: apiv1.PerconaServerMySQLSpec{ + Metadata: &apiv1.Metadata{ + Labels: map[string]string{"env": "prod"}, + }, + }, + }, + restore: &apiv1.PerconaServerMySQLRestore{ + ObjectMeta: metav1.ObjectMeta{Name: "restore"}, + }, + verify: func(t *testing.T, cm *corev1.ConfigMap) { + assert.Equal(t, "prod", cm.Labels["env"]) + }, + }, + "global annotations propagated": { + cluster: &apiv1.PerconaServerMySQL{ + ObjectMeta: metav1.ObjectMeta{Name: "cluster", Namespace: "ns"}, + Spec: apiv1.PerconaServerMySQLSpec{ + Metadata: &apiv1.Metadata{ + Annotations: map[string]string{"team": "dba"}, + }, + }, + }, + restore: &apiv1.PerconaServerMySQLRestore{ + ObjectMeta: metav1.ObjectMeta{Name: "restore"}, + }, + verify: func(t *testing.T, cm *corev1.ConfigMap) { + assert.Equal(t, "dba", cm.Annotations["team"]) + }, + }, + "name derived from restore name": { + cluster: &apiv1.PerconaServerMySQL{ + ObjectMeta: metav1.ObjectMeta{Name: "cluster", Namespace: "ns"}, + }, + restore: &apiv1.PerconaServerMySQLRestore{ + ObjectMeta: metav1.ObjectMeta{Name: "weekly-restore"}, + }, + verify: func(t *testing.T, cm *corev1.ConfigMap) { + assert.Equal(t, "pitr-binlogs-weekly-restore", cm.Name) + }, + }, + } + + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + cm := BinlogsConfigMap(tt.cluster, tt.restore) + tt.verify(t, cm) + }) + } +} diff --git a/pkg/tls/tls.go b/pkg/tls/tls.go index aa4d63d99..78d4b266c 100644 --- a/pkg/tls/tls.go +++ b/pkg/tls/tls.go @@ -29,6 +29,9 @@ func DNSNames(cr *apiv1.PerconaServerMySQL) []string { fmt.Sprintf("*.%s-mysql", cr.Name), fmt.Sprintf("*.%s-mysql.%s", cr.Name, cr.Namespace), fmt.Sprintf("*.%s-mysql.%s.svc", cr.Name, cr.Namespace), + fmt.Sprintf("%s-mysql-primary", cr.Name), + fmt.Sprintf("%s-mysql-primary.%s", cr.Name, cr.Namespace), + fmt.Sprintf("%s-mysql-primary.%s.svc", cr.Name, cr.Namespace), fmt.Sprintf("*.%s-orchestrator", cr.Name), fmt.Sprintf("*.%s-orchestrator.%s", cr.Name, cr.Namespace), fmt.Sprintf("*.%s-orchestrator.%s.svc", cr.Name, cr.Namespace), diff --git a/pkg/tls/tls_test.go b/pkg/tls/tls_test.go new file mode 100644 index 000000000..caa6ed6a8 --- /dev/null +++ b/pkg/tls/tls_test.go @@ -0,0 +1,78 @@ +package tls + +import ( + "testing" + + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + apiv1 "github.com/percona/percona-server-mysql-operator/api/v1" +) + +func TestDNSNames(t *testing.T) { + tests := map[string]struct { + cr *apiv1.PerconaServerMySQL + expected map[string]struct{} + }{ + "no extra SANs": { + cr: &apiv1.PerconaServerMySQL{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + Namespace: "default", + }, + }, + expected: map[string]struct{}{ + "*.cluster1-mysql": {}, + "*.cluster1-mysql.default": {}, + "*.cluster1-mysql.default.svc": {}, + "cluster1-mysql-primary": {}, + "cluster1-mysql-primary.default": {}, + "cluster1-mysql-primary.default.svc": {}, + "*.cluster1-orchestrator": {}, + "*.cluster1-orchestrator.default": {}, + "*.cluster1-orchestrator.default.svc": {}, + "*.cluster1-router": {}, + "*.cluster1-router.default": {}, + "*.cluster1-router.default.svc": {}, + }, + }, + "with extra SANs": { + cr: &apiv1.PerconaServerMySQL{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + Namespace: "default", + }, + Spec: apiv1.PerconaServerMySQLSpec{ + TLS: &apiv1.TLSSpec{ + SANs: []string{"extra.example.com"}, + }, + }, + }, + expected: map[string]struct{}{ + "*.cluster1-mysql": {}, + "*.cluster1-mysql.default": {}, + "*.cluster1-mysql.default.svc": {}, + "cluster1-mysql-primary": {}, + "cluster1-mysql-primary.default": {}, + "cluster1-mysql-primary.default.svc": {}, + "*.cluster1-orchestrator": {}, + "*.cluster1-orchestrator.default": {}, + "*.cluster1-orchestrator.default.svc": {}, + "*.cluster1-router": {}, + "*.cluster1-router.default": {}, + "*.cluster1-router.default.svc": {}, + "extra.example.com": {}, + }, + }, + } + + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + actual := make(map[string]struct{}) + for _, n := range DNSNames(tt.cr) { + actual[n] = struct{}{} + } + assert.Equal(t, tt.expected, actual) + }) + } +}