diff --git a/Makefile b/Makefile index 2f15b07b6e..aa6509f4cd 100644 --- a/Makefile +++ b/Makefile @@ -201,7 +201,6 @@ check-kuttl: ## example command: make check-kuttl KUTTL_TEST=' --config testing/kuttl/kuttl-test.yaml .PHONY: generate-kuttl -generate-kuttl: export KUTTL_PGCLUSTER_API_VERSION ?= v1 generate-kuttl: export KUTTL_PG_UPGRADE_FROM_VERSION ?= 16 generate-kuttl: export KUTTL_PG_UPGRADE_TO_VERSION ?= 17 generate-kuttl: export KUTTL_PG_VERSION ?= 16 @@ -212,7 +211,6 @@ generate-kuttl: ## Generate kuttl tests [ ! -d testing/kuttl/e2e-generated ] || rm -r testing/kuttl/e2e-generated bash -ceu ' \ render() { envsubst '"'"' \ - $$KUTTL_PGCLUSTER_API_VERSION \ $$KUTTL_PG_UPGRADE_FROM_VERSION $$KUTTL_PG_UPGRADE_TO_VERSION \ $$KUTTL_PG_VERSION $$KUTTL_POSTGIS_VERSION $$KUTTL_PSQL_IMAGE \ $$KUTTL_TEST_DELETE_NAMESPACE'"'"'; }; \ diff --git a/cmd/postgres-operator/main.go b/cmd/postgres-operator/main.go index aa0e626e25..48a6a25d43 100644 --- a/cmd/postgres-operator/main.go +++ b/cmd/postgres-operator/main.go @@ -9,6 +9,7 @@ import ( "crypto/tls" "errors" "fmt" + "net/http" "os" "os/signal" "strconv" @@ -23,6 +24,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/metrics/filters" + "github.com/crunchydata/postgres-operator/internal/bridge" + "github.com/crunchydata/postgres-operator/internal/bridge/crunchybridgecluster" "github.com/crunchydata/postgres-operator/internal/controller/pgupgrade" "github.com/crunchydata/postgres-operator/internal/controller/postgrescluster" "github.com/crunchydata/postgres-operator/internal/controller/runtime" @@ -238,10 +241,24 @@ func main() { manager := need(runtime.NewManager(config, options)) must(manager.Add(k8s)) + bridgeURL := os.Getenv("PGO_BRIDGE_URL") + bridgeClient := func() *bridge.Client { + client := bridge.NewClient(bridgeURL, versionString) + client.Transport = otelTransportWrapper()(http.DefaultTransport) + return client + } + // add all PostgreSQL Operator controllers to the runtime manager must(pgupgrade.ManagedReconciler(manager)) must(postgrescluster.ManagedReconciler(manager)) must(standalone_pgadmin.ManagedReconciler(manager)) + must(crunchybridgecluster.ManagedReconciler(manager, func() bridge.ClientInterface { + return bridgeClient() + })) + + if features.Enabled(feature.BridgeIdentifiers) { + must(bridge.ManagedInstallationReconciler(manager, bridgeClient)) + } // Enable health probes must(manager.AddHealthzCheck("health", healthz.Ping)) diff --git a/config/crd/bases/postgres-operator.crunchydata.com_crunchybridgeclusters.yaml b/config/crd/bases/postgres-operator.crunchydata.com_crunchybridgeclusters.yaml new file mode 100644 index 0000000000..5313e686e1 --- /dev/null +++ b/config/crd/bases/postgres-operator.crunchydata.com_crunchybridgeclusters.yaml @@ -0,0 +1,285 @@ +--- +# controller-gen.kubebuilder.io/version: v0.18.0 +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: crunchybridgeclusters.postgres-operator.crunchydata.com +spec: + group: postgres-operator.crunchydata.com + names: + kind: CrunchyBridgeCluster + listKind: CrunchyBridgeClusterList + plural: crunchybridgeclusters + singular: crunchybridgecluster + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: CrunchyBridgeCluster is the Schema for the crunchybridgeclusters + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + CrunchyBridgeClusterSpec defines the desired state of CrunchyBridgeCluster + to be managed by Crunchy Data Bridge + properties: + clusterName: + description: The name of the cluster + maxLength: 50 + minLength: 5 + pattern: ^[A-Za-z][A-Za-z0-9\-_ ]*[A-Za-z0-9]$ + type: string + isHa: + description: |- + Whether the cluster is high availability, + meaning that it has a secondary it can fail over to quickly + in case the primary becomes unavailable. + type: boolean + isProtected: + description: |- + Whether the cluster is protected. Protected clusters can't be destroyed until + their protected flag is removed + type: boolean + majorVersion: + description: |- + The ID of the cluster's major Postgres version. + Currently Bridge offers 13-17 + maximum: 17 + minimum: 13 + type: integer + metadata: + description: Metadata contains metadata for custom resources + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + plan: + description: The ID of the cluster's plan. Determines instance, CPU, + and memory. + type: string + provider: + description: |- + The cloud provider where the cluster is located. + Currently Bridge offers aws, azure, and gcp only + enum: + - aws + - azure + - gcp + maxLength: 5 + type: string + x-kubernetes-validations: + - message: immutable + rule: self == oldSelf + region: + description: The provider region where the cluster is located. + type: string + x-kubernetes-validations: + - message: immutable + rule: self == oldSelf + roles: + description: |- + Roles for which to create Secrets that contain their credentials which + are retrieved from the Bridge API. An empty list creates no role secrets. + Removing a role from this list does NOT drop the role nor revoke their + access, but it will delete that role's secret from the kube cluster. + items: + properties: + name: + description: |- + Name of the role within Crunchy Bridge. + More info: https://docs.crunchybridge.com/concepts/users + type: string + secretName: + description: The name of the Secret that will hold the role + credentials. + maxLength: 253 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + required: + - name + - secretName + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + secret: + description: The name of the secret containing the API key and team + id + type: string + storage: + description: |- + The amount of storage available to the cluster in gigabytes. + The amount must be an integer, followed by Gi (gibibytes) or G (gigabytes) to match Kubernetes conventions. + If the amount is given in Gi, we round to the nearest G value. + The minimum value allowed by Bridge is 10 GB. + The maximum value allowed by Bridge is 65535 GB. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - clusterName + - isHa + - majorVersion + - plan + - provider + - region + - secret + - storage + type: object + status: + description: CrunchyBridgeClusterStatus defines the observed state of + CrunchyBridgeCluster + properties: + conditions: + description: conditions represent the observations of postgres cluster's + current state. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + maxLength: 7 + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + host: + description: The Hostname of the postgres cluster in Bridge, provided + by Bridge API and null until then. + type: string + id: + description: The ID of the postgres cluster in Bridge, provided by + Bridge API and null until then. + type: string + isHa: + description: |- + Whether the cluster is high availability, meaning that it has a secondary it can fail + over to quickly in case the primary becomes unavailable. + type: boolean + isProtected: + description: |- + Whether the cluster is protected. Protected clusters can't be destroyed until + their protected flag is removed + type: boolean + majorVersion: + description: The cluster's major Postgres version. + type: integer + name: + description: The name of the cluster in Bridge. + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + on which the status was based. + format: int64 + minimum: 0 + type: integer + ongoingUpgrade: + description: The cluster upgrade as represented by Bridge + items: + properties: + flavor: + type: string + starting_from: + type: string + state: + type: string + required: + - flavor + - starting_from + - state + type: object + type: array + plan: + description: The ID of the cluster's plan. Determines instance, CPU, + and memory. + type: string + responses: + description: Most recent, raw responses from Bridge API + type: object + x-kubernetes-preserve-unknown-fields: true + state: + description: State of cluster in Bridge. + type: string + storage: + description: The amount of storage available to the cluster. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index ac24fd6055..85b7cbdf29 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -1,6 +1,7 @@ kind: Kustomization resources: +- bases/postgres-operator.crunchydata.com_crunchybridgeclusters.yaml - bases/postgres-operator.crunchydata.com_postgresclusters.yaml - bases/postgres-operator.crunchydata.com_pgupgrades.yaml - bases/postgres-operator.crunchydata.com_pgadmins.yaml diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 416eac3ada..aa19cdacbf 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -111,6 +111,24 @@ rules: - list - patch - watch +- apiGroups: + - postgres-operator.crunchydata.com + resources: + - crunchybridgeclusters + verbs: + - get + - list + - patch + - update + - watch +- apiGroups: + - postgres-operator.crunchydata.com + resources: + - crunchybridgeclusters/finalizers + - crunchybridgeclusters/status + verbs: + - patch + - update - apiGroups: - postgres-operator.crunchydata.com resources: diff --git a/internal/bridge/client.go b/internal/bridge/client.go new file mode 100644 index 0000000000..272bf67b07 --- /dev/null +++ b/internal/bridge/client.go @@ -0,0 +1,816 @@ +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package bridge + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "time" + + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/apimachinery/pkg/util/wait" + + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +const defaultAPI = "https://api.crunchybridge.com" + +var errAuthentication = errors.New("authentication failed") + +type ClientInterface interface { + ListClusters(ctx context.Context, apiKey, teamId string) ([]*ClusterApiResource, error) + CreateCluster(ctx context.Context, apiKey string, clusterRequestPayload *PostClustersRequestPayload) (*ClusterApiResource, error) + DeleteCluster(ctx context.Context, apiKey, id string) (*ClusterApiResource, bool, error) + GetCluster(ctx context.Context, apiKey, id string) (*ClusterApiResource, error) + GetClusterStatus(ctx context.Context, apiKey, id string) (*ClusterStatusApiResource, error) + GetClusterUpgrade(ctx context.Context, apiKey, id string) (*ClusterUpgradeApiResource, error) + UpgradeCluster(ctx context.Context, apiKey, id string, clusterRequestPayload *PostClustersUpgradeRequestPayload) (*ClusterUpgradeApiResource, error) + UpgradeClusterHA(ctx context.Context, apiKey, id, action string) (*ClusterUpgradeApiResource, error) + UpdateCluster(ctx context.Context, apiKey, id string, clusterRequestPayload *PatchClustersRequestPayload) (*ClusterApiResource, error) + GetClusterRole(ctx context.Context, apiKey, clusterId, roleName string) (*ClusterRoleApiResource, error) +} + +type Client struct { + http.Client + wait.Backoff + + BaseURL url.URL + Version string +} + +// BRIDGE API RESPONSE OBJECTS + +// ClusterApiResource is used to hold cluster information received in Bridge API response. +type ClusterApiResource struct { + ID string `json:"id,omitempty"` + ClusterGroup *ClusterGroupApiResource `json:"cluster_group,omitempty"` + PrimaryClusterID string `json:"cluster_id,omitempty"` + CPU int64 `json:"cpu,omitempty"` + CreatedAt string `json:"created_at,omitempty"` + DiskUsage *ClusterDiskUsageApiResource `json:"disk_usage,omitempty"` + Environment string `json:"environment,omitempty"` + Host string `json:"host,omitempty"` + IsHA *bool `json:"is_ha,omitempty"` + IsProtected *bool `json:"is_protected,omitempty"` + IsSuspended *bool `json:"is_suspended,omitempty"` + Keychain string `json:"keychain_id,omitempty"` + MaintenanceWindowStart int64 `json:"maintenance_window_start,omitempty"` + MajorVersion int `json:"major_version,omitempty"` + Memory float64 `json:"memory,omitempty"` + ClusterName string `json:"name,omitempty"` + Network string `json:"network_id,omitempty"` + Parent string `json:"parent_id,omitempty"` + Plan string `json:"plan_id,omitempty"` + PostgresVersion intstr.IntOrString `json:"postgres_version_id,omitzero"` + Provider string `json:"provider_id,omitempty"` + Region string `json:"region_id,omitempty"` + Replicas []*ClusterApiResource `json:"replicas,omitempty"` + Storage int64 `json:"storage,omitempty"` + Tailscale *bool `json:"tailscale_active,omitempty"` + Team string `json:"team_id,omitempty"` + LastUpdate string `json:"updated_at,omitempty"` + ResponsePayload v1beta1.SchemalessObject `json:""` +} + +func (c *ClusterApiResource) AddDataToClusterStatus(cluster *v1beta1.CrunchyBridgeCluster) { + cluster.Status.ClusterName = c.ClusterName + cluster.Status.Host = c.Host + cluster.Status.ID = c.ID + cluster.Status.IsHA = c.IsHA + cluster.Status.IsProtected = c.IsProtected + cluster.Status.MajorVersion = c.MajorVersion + cluster.Status.Plan = c.Plan + cluster.Status.Storage = FromGibibytes(c.Storage) + cluster.Status.Responses.Cluster = c.ResponsePayload +} + +type ClusterList struct { + Clusters []*ClusterApiResource `json:"clusters"` +} + +// ClusterDiskUsageApiResource hold information on disk usage for a particular cluster. +type ClusterDiskUsageApiResource struct { + DiskAvailableMB int64 `json:"disk_available_mb,omitempty"` + DiskTotalSizeMB int64 `json:"disk_total_size_mb,omitempty"` + DiskUsedMB int64 `json:"disk_used_mb,omitempty"` +} + +// ClusterGroupApiResource holds information on a ClusterGroup +type ClusterGroupApiResource struct { + ID string `json:"id,omitempty"` + Clusters []*ClusterApiResource `json:"clusters,omitempty"` + Kind string `json:"kind,omitempty"` + Name string `json:"name,omitempty"` + Network string `json:"network_id,omitempty"` + Provider string `json:"provider_id,omitempty"` + Region string `json:"region_id,omitempty"` + Team string `json:"team_id,omitempty"` +} + +type ClusterStatusApiResource struct { + DiskUsage *ClusterDiskUsageApiResource `json:"disk_usage,omitempty"` + OldestBackup string `json:"oldest_backup_at,omitempty"` + OngoingUpgrade *ClusterUpgradeApiResource `json:"ongoing_upgrade,omitempty"` + State string `json:"state,omitempty"` + ResponsePayload v1beta1.SchemalessObject `json:""` +} + +func (c *ClusterStatusApiResource) AddDataToClusterStatus(cluster *v1beta1.CrunchyBridgeCluster) { + cluster.Status.State = c.State + cluster.Status.Responses.Status = c.ResponsePayload +} + +type ClusterUpgradeApiResource struct { + ClusterID string `json:"cluster_id,omitempty"` + Operations []*v1beta1.UpgradeOperation `json:"operations,omitempty"` + Team string `json:"team_id,omitempty"` + ResponsePayload v1beta1.SchemalessObject `json:""` +} + +func (c *ClusterUpgradeApiResource) AddDataToClusterStatus(cluster *v1beta1.CrunchyBridgeCluster) { + cluster.Status.OngoingUpgrade = c.Operations + cluster.Status.Responses.Upgrade = c.ResponsePayload +} + +type ClusterUpgradeOperationApiResource struct { + Flavor string `json:"flavor,omitempty"` + StartingFrom string `json:"starting_from,omitempty"` + State string `json:"state,omitempty"` +} + +// ClusterRoleApiResource is used for retrieving details on ClusterRole from the Bridge API +type ClusterRoleApiResource struct { + AccountEmail string `json:"account_email"` + AccountId string `json:"account_id"` + ClusterId string `json:"cluster_id"` + Flavor string `json:"flavor"` + Name string `json:"name"` + Password string `json:"password"` + Team string `json:"team_id"` + URI string `json:"uri"` +} + +// ClusterRoleList holds a slice of ClusterRoleApiResource +type ClusterRoleList struct { + Roles []*ClusterRoleApiResource `json:"roles"` +} + +// BRIDGE API REQUEST PAYLOADS + +// PatchClustersRequestPayload is used for updating various properties of an existing cluster. +type PatchClustersRequestPayload struct { + ClusterGroup string `json:"cluster_group_id,omitempty"` + // DashboardSettings *ClusterDashboardSettings `json:"dashboard_settings,omitempty"` + // TODO (dsessler7): Find docs for DashboardSettings and create appropriate struct + Environment string `json:"environment,omitempty"` + IsProtected *bool `json:"is_protected,omitempty"` + MaintenanceWindowStart int64 `json:"maintenance_window_start,omitempty"` + Name string `json:"name,omitempty"` +} + +// PostClustersRequestPayload is used for creating a new cluster. +type PostClustersRequestPayload struct { + Name string `json:"name"` + Plan string `json:"plan_id"` + Team string `json:"team_id"` + ClusterGroup string `json:"cluster_group_id,omitempty"` + Environment string `json:"environment,omitempty"` + IsHA bool `json:"is_ha,omitempty"` + Keychain string `json:"keychain_id,omitempty"` + Network string `json:"network_id,omitempty"` + PostgresVersion intstr.IntOrString `json:"postgres_version_id,omitzero"` + Provider string `json:"provider_id,omitempty"` + Region string `json:"region_id,omitempty"` + Storage int64 `json:"storage,omitempty"` +} + +// PostClustersUpgradeRequestPayload is used for creating a new cluster upgrade which may include +// changing its plan, upgrading its major version, or increasing its storage size. +type PostClustersUpgradeRequestPayload struct { + Plan string `json:"plan_id,omitempty"` + PostgresVersion intstr.IntOrString `json:"postgres_version_id,omitzero"` + UpgradeStartTime string `json:"starting_from,omitempty"` + Storage int64 `json:"storage,omitempty"` +} + +// PutClustersUpgradeRequestPayload is used for updating an ongoing or scheduled upgrade. +// TODO: Implement the ability to update an upgrade (this isn't currently being used) +type PutClustersUpgradeRequestPayload struct { + Plan string `json:"plan_id,omitempty"` + PostgresVersion intstr.IntOrString `json:"postgres_version_id,omitzero"` + UpgradeStartTime string `json:"starting_from,omitempty"` + Storage int64 `json:"storage,omitempty"` + UseMaintenanceWindow *bool `json:"use_cluster_maintenance_window,omitempty"` +} + +// BRIDGE CLIENT FUNCTIONS AND METHODS + +// NewClient creates a Client with backoff settings that amount to +// ~10 attempts over ~2 minutes. A default is used when apiURL is not +// an acceptable URL. +func NewClient(apiURL, version string) *Client { + // Use the default URL when the argument (1) does not parse at all, or + // (2) has the wrong scheme, or (3) has no hostname. + base, err := url.Parse(apiURL) + if err != nil || (base.Scheme != "http" && base.Scheme != "https") || base.Hostname() == "" { + base, _ = url.Parse(defaultAPI) + } + + return &Client{ + Backoff: wait.Backoff{ + Duration: time.Second, + Factor: 1.6, + Jitter: 0.2, + Steps: 10, + Cap: time.Minute, + }, + BaseURL: *base, + Version: version, + } +} + +// doWithBackoff performs HTTP requests until: +// 1. ctx is cancelled, +// 2. the server returns a status code below 500, "Internal Server Error", or +// 3. the backoff is exhausted. +// +// Be sure to close the [http.Response] Body when the returned error is nil. +// See [http.Client.Do] for more details. +func (c *Client) doWithBackoff( + ctx context.Context, method, path string, params url.Values, body []byte, headers http.Header, +) ( + *http.Response, error, +) { + var response *http.Response + + // Prepare a copy of the passed in headers so we can manipulate them. + if headers = headers.Clone(); headers == nil { + headers = make(http.Header) + } + + // Send a value that identifies this PATCH or POST request so it is safe to + // retry when the server does not respond. + // - https://docs.crunchybridge.com/api-concepts/idempotency/ + if method == http.MethodPatch || method == http.MethodPost { + headers.Set("Idempotency-Key", string(uuid.NewUUID())) + } + + headers.Set("User-Agent", "PGO/"+c.Version) + url := c.BaseURL.JoinPath(path) + if params != nil { + url.RawQuery = params.Encode() + } + urlString := url.String() + + err := wait.ExponentialBackoff(c.Backoff, func() (bool, error) { + // NOTE: The [net/http] package treats an empty [bytes.Reader] the same as nil. + request, err := http.NewRequestWithContext(ctx, method, urlString, bytes.NewReader(body)) + + if err == nil { + request.Header = headers.Clone() + + //nolint:bodyclose // This response is returned to the caller. + response, err = c.Do(request) + } + + // An error indicates there was no response from the server, and the + // request may not have finished. The "Idempotency-Key" header above + // makes it safe to retry in this case. + finished := err == nil + + // When the request finishes with a server error, discard the body and retry. + // - https://docs.crunchybridge.com/api-concepts/getting-started/#status-codes + if finished && response.StatusCode >= 500 { + _ = response.Body.Close() + finished = false + } + + // Stop when the context is cancelled. + return finished, ctx.Err() + }) + + // Discard the response body when there is a timeout from backoff. + if response != nil && err != nil { + _ = response.Body.Close() + } + + // Return the last response, if any. + // Return the cancellation or timeout from backoff, if any. + return response, err +} + +// doWithRetry performs HTTP requests until: +// 1. ctx is cancelled, +// 2. the server returns a status code below 500, "Internal Server Error", +// that is not 429, "Too many requests", or +// 3. the backoff is exhausted. +// +// Be sure to close the [http.Response] Body when the returned error is nil. +// See [http.Client.Do] for more details. +func (c *Client) doWithRetry( + ctx context.Context, method, path string, params url.Values, body []byte, headers http.Header, +) ( + *http.Response, error, +) { + response, err := c.doWithBackoff(ctx, method, path, params, body, headers) + + // Retry the request when the server responds with "Too many requests". + // - https://docs.crunchybridge.com/api-concepts/getting-started/#status-codes + // - https://docs.crunchybridge.com/api-concepts/getting-started/#rate-limiting + for err == nil && response.StatusCode == http.StatusTooManyRequests { + seconds, _ := strconv.Atoi(response.Header.Get("Retry-After")) + + // Only retry when the response indicates how long to wait. + if seconds <= 0 { + break + } + + // Discard the "Too many requests" response body, and retry. + _ = response.Body.Close() + + // Create a channel that sends after the delay indicated by the API. + timer := time.NewTimer(time.Duration(seconds) * time.Second) + defer timer.Stop() + + // Wait for the delay or context cancellation, whichever comes first. + select { + case <-timer.C: + // Try the request again. Check it in the loop condition. + response, err = c.doWithBackoff(ctx, method, path, params, body, headers) + timer.Stop() + + case <-ctx.Done(): + // Exit the loop and return the context cancellation. + err = ctx.Err() + } + } + + return response, err +} + +func (c *Client) CreateAuthObject(ctx context.Context, authn AuthObject) (AuthObject, error) { + var result AuthObject + + response, err := c.doWithRetry(ctx, "POST", "/vendor/operator/auth-objects", nil, nil, http.Header{ + "Accept": []string{"application/json"}, + "Authorization": []string{"Bearer " + authn.Secret}, + }) + + if err == nil { + defer response.Body.Close() + body, _ := io.ReadAll(response.Body) + + switch { + // 2xx, Successful + case response.StatusCode >= 200 && response.StatusCode < 300: + if err = json.Unmarshal(body, &result); err != nil { + err = fmt.Errorf("%w: %s", err, body) + } + + // 401, Unauthorized + case response.StatusCode == http.StatusUnauthorized: + err = fmt.Errorf("%w: %s", errAuthentication, body) + + default: + //nolint:err113 // This is intentionally dynamic. + err = fmt.Errorf("%v: %s", response.Status, body) + } + } + + return result, err +} + +func (c *Client) CreateInstallation(ctx context.Context) (Installation, error) { + var result Installation + + response, err := c.doWithRetry(ctx, "POST", "/vendor/operator/installations", nil, nil, http.Header{ + "Accept": []string{"application/json"}, + }) + + if err == nil { + defer response.Body.Close() + body, _ := io.ReadAll(response.Body) + + switch { + // 2xx, Successful + case response.StatusCode >= 200 && response.StatusCode < 300: + if err = json.Unmarshal(body, &result); err != nil { + err = fmt.Errorf("%w: %s", err, body) + } + + default: + //nolint:err113 // This is intentionally dynamic. + err = fmt.Errorf("%v: %s", response.Status, body) + } + } + + return result, err +} + +// CRUNCHYBRIDGECLUSTER CRUD METHODS + +// ListClusters makes a GET request to the "/clusters" endpoint to retrieve a list of all clusters +// in Bridge that are owned by the team specified by the provided team id. +func (c *Client) ListClusters(ctx context.Context, apiKey, teamId string) ([]*ClusterApiResource, error) { + result := &ClusterList{} + + params := url.Values{} + if len(teamId) > 0 { + params.Add("team_id", teamId) + } + response, err := c.doWithRetry(ctx, "GET", "/clusters", params, nil, http.Header{ + "Accept": []string{"application/json"}, + "Authorization": []string{"Bearer " + apiKey}, + }) + + if err == nil { + defer response.Body.Close() + body, _ := io.ReadAll(response.Body) + + switch { + // 2xx, Successful + case response.StatusCode >= 200 && response.StatusCode < 300: + if err = json.Unmarshal(body, &result); err != nil { + err = fmt.Errorf("%w: %s", err, body) + } + + default: + //nolint:err113 // This is intentionally dynamic. + err = fmt.Errorf("%v: %s", response.Status, body) + } + } + + return result.Clusters, err +} + +// CreateCluster makes a POST request to the "/clusters" endpoint thereby creating a cluster +// in Bridge with the settings specified in the request payload. +func (c *Client) CreateCluster( + ctx context.Context, apiKey string, clusterRequestPayload *PostClustersRequestPayload, +) (*ClusterApiResource, error) { + result := &ClusterApiResource{} + + clusterbyte, err := json.Marshal(clusterRequestPayload) + if err != nil { + return result, err + } + + response, err := c.doWithRetry(ctx, "POST", "/clusters", nil, clusterbyte, http.Header{ + "Accept": []string{"application/json"}, + "Authorization": []string{"Bearer " + apiKey}, + }) + + if err == nil { + defer response.Body.Close() + body, _ := io.ReadAll(response.Body) + + switch { + // 2xx, Successful + case response.StatusCode >= 200 && response.StatusCode < 300: + if err = json.Unmarshal(body, &result); err != nil { + err = fmt.Errorf("%w: %s", err, body) + return result, err + } + if err = json.Unmarshal(body, &result.ResponsePayload); err != nil { + err = fmt.Errorf("%w: %s", err, body) + } + + default: + //nolint:err113 // This is intentionally dynamic. + err = fmt.Errorf("%v: %s", response.Status, body) + } + } + + return result, err +} + +// DeleteCluster calls the delete endpoint, returning +// +// the cluster, +// whether the cluster is deleted already, +// and an error. +func (c *Client) DeleteCluster(ctx context.Context, apiKey, id string) (*ClusterApiResource, bool, error) { + result := &ClusterApiResource{} + var deletedAlready bool + + response, err := c.doWithRetry(ctx, "DELETE", "/clusters/"+id, nil, nil, http.Header{ + "Accept": []string{"application/json"}, + "Authorization": []string{"Bearer " + apiKey}, + }) + + if err == nil { + defer response.Body.Close() + body, _ := io.ReadAll(response.Body) + + switch { + // 2xx, Successful + case response.StatusCode >= 200 && response.StatusCode < 300: + if err = json.Unmarshal(body, &result); err != nil { + err = fmt.Errorf("%w: %s", err, body) + } + + // Already deleted + // Bridge API returns 410 Gone for previously deleted clusters + // --https://docs.crunchybridge.com/api-concepts/idempotency#delete-semantics + // But also, if we can't find it... + // Maybe if no ID we return already deleted? + case response.StatusCode == http.StatusGone: + fallthrough + case response.StatusCode == http.StatusNotFound: + deletedAlready = true + err = nil + + default: + //nolint:err113 // This is intentionally dynamic. + err = fmt.Errorf("%v: %s", response.Status, body) + } + } + + return result, deletedAlready, err +} + +// GetCluster makes a GET request to the "/clusters/" endpoint, thereby retrieving details +// for a given cluster in Bridge specified by the provided cluster id. +func (c *Client) GetCluster(ctx context.Context, apiKey, id string) (*ClusterApiResource, error) { + result := &ClusterApiResource{} + + response, err := c.doWithRetry(ctx, "GET", "/clusters/"+id, nil, nil, http.Header{ + "Accept": []string{"application/json"}, + "Authorization": []string{"Bearer " + apiKey}, + }) + + if err == nil { + defer response.Body.Close() + body, _ := io.ReadAll(response.Body) + + switch { + // 2xx, Successful + case response.StatusCode >= 200 && response.StatusCode < 300: + if err = json.Unmarshal(body, &result); err != nil { + err = fmt.Errorf("%w: %s", err, body) + return result, err + } + if err = json.Unmarshal(body, &result.ResponsePayload); err != nil { + err = fmt.Errorf("%w: %s", err, body) + } + + default: + //nolint:err113 // This is intentionally dynamic. + err = fmt.Errorf("%v: %s", response.Status, body) + } + } + + return result, err +} + +// GetClusterStatus makes a GET request to the "/clusters//status" endpoint, thereby retrieving details +// for a given cluster's status in Bridge, specified by the provided cluster id. +func (c *Client) GetClusterStatus(ctx context.Context, apiKey, id string) (*ClusterStatusApiResource, error) { + result := &ClusterStatusApiResource{} + + response, err := c.doWithRetry(ctx, "GET", "/clusters/"+id+"/status", nil, nil, http.Header{ + "Accept": []string{"application/json"}, + "Authorization": []string{"Bearer " + apiKey}, + }) + + if err == nil { + defer response.Body.Close() + body, _ := io.ReadAll(response.Body) + + switch { + // 2xx, Successful + case response.StatusCode >= 200 && response.StatusCode < 300: + if err = json.Unmarshal(body, &result); err != nil { + err = fmt.Errorf("%w: %s", err, body) + return result, err + } + if err = json.Unmarshal(body, &result.ResponsePayload); err != nil { + err = fmt.Errorf("%w: %s", err, body) + } + + default: + //nolint:err113 // This is intentionally dynamic. + err = fmt.Errorf("%v: %s", response.Status, body) + } + } + + return result, err +} + +// GetClusterUpgrade makes a GET request to the "/clusters//upgrade" endpoint, thereby retrieving details +// for a given cluster's upgrade status in Bridge, specified by the provided cluster id. +func (c *Client) GetClusterUpgrade(ctx context.Context, apiKey, id string) (*ClusterUpgradeApiResource, error) { + result := &ClusterUpgradeApiResource{} + + response, err := c.doWithRetry(ctx, "GET", "/clusters/"+id+"/upgrade", nil, nil, http.Header{ + "Accept": []string{"application/json"}, + "Authorization": []string{"Bearer " + apiKey}, + }) + + if err == nil { + defer response.Body.Close() + body, _ := io.ReadAll(response.Body) + + switch { + // 2xx, Successful + case response.StatusCode >= 200 && response.StatusCode < 300: + if err = json.Unmarshal(body, &result); err != nil { + err = fmt.Errorf("%w: %s", err, body) + return result, err + } + if err = json.Unmarshal(body, &result.ResponsePayload); err != nil { + err = fmt.Errorf("%w: %s", err, body) + } + + default: + //nolint:err113 // This is intentionally dynamic. + err = fmt.Errorf("%v: %s", response.Status, body) + } + } + + return result, err +} + +// UpgradeCluster makes a POST request to the "/clusters//upgrade" endpoint, thereby attempting +// to upgrade certain settings for a given cluster in Bridge. +func (c *Client) UpgradeCluster( + ctx context.Context, apiKey, id string, clusterRequestPayload *PostClustersUpgradeRequestPayload, +) (*ClusterUpgradeApiResource, error) { + result := &ClusterUpgradeApiResource{} + + clusterbyte, err := json.Marshal(clusterRequestPayload) + if err != nil { + return result, err + } + + response, err := c.doWithRetry(ctx, "POST", "/clusters/"+id+"/upgrade", nil, clusterbyte, http.Header{ + "Accept": []string{"application/json"}, + "Authorization": []string{"Bearer " + apiKey}, + }) + + if err == nil { + defer response.Body.Close() + body, _ := io.ReadAll(response.Body) + + switch { + // 2xx, Successful + case response.StatusCode >= 200 && response.StatusCode < 300: + if err = json.Unmarshal(body, &result); err != nil { + err = fmt.Errorf("%w: %s", err, body) + return result, err + } + if err = json.Unmarshal(body, &result.ResponsePayload); err != nil { + err = fmt.Errorf("%w: %s", err, body) + } + + default: + //nolint:err113 // This is intentionally dynamic. + err = fmt.Errorf("%v: %s", response.Status, body) + } + } + + return result, err +} + +// UpgradeClusterHA makes a PUT request to the "/clusters//actions/" endpoint, +// where is either "enable-ha" or "disable-ha", thereby attempting to change the +// HA setting for a given cluster in Bridge. +func (c *Client) UpgradeClusterHA(ctx context.Context, apiKey, id, action string) (*ClusterUpgradeApiResource, error) { + result := &ClusterUpgradeApiResource{} + + response, err := c.doWithRetry(ctx, "PUT", "/clusters/"+id+"/actions/"+action, nil, nil, http.Header{ + "Accept": []string{"application/json"}, + "Authorization": []string{"Bearer " + apiKey}, + }) + + if err == nil { + defer response.Body.Close() + body, _ := io.ReadAll(response.Body) + + switch { + // 2xx, Successful + case response.StatusCode >= 200 && response.StatusCode < 300: + if err = json.Unmarshal(body, &result); err != nil { + err = fmt.Errorf("%w: %s", err, body) + return result, err + } + if err = json.Unmarshal(body, &result.ResponsePayload); err != nil { + err = fmt.Errorf("%w: %s", err, body) + } + + default: + //nolint:err113 // This is intentionally dynamic. + err = fmt.Errorf("%v: %s", response.Status, body) + } + } + + return result, err +} + +// UpdateCluster makes a PATCH request to the "/clusters/" endpoint, thereby attempting to +// update certain settings for a given cluster in Bridge. +func (c *Client) UpdateCluster( + ctx context.Context, apiKey, id string, clusterRequestPayload *PatchClustersRequestPayload, +) (*ClusterApiResource, error) { + result := &ClusterApiResource{} + + clusterbyte, _ := json.Marshal(clusterRequestPayload) + + response, err := c.doWithRetry(ctx, "PATCH", "/clusters/"+id, nil, clusterbyte, http.Header{ + "Accept": []string{"application/json"}, + "Authorization": []string{"Bearer " + apiKey}, + }) + + if err == nil { + defer response.Body.Close() + body, _ := io.ReadAll(response.Body) + + switch { + // 2xx, Successful + case response.StatusCode >= 200 && response.StatusCode < 300: + if err = json.Unmarshal(body, &result); err != nil { + err = fmt.Errorf("%w: %s", err, body) + return result, err + } + if err = json.Unmarshal(body, &result.ResponsePayload); err != nil { + err = fmt.Errorf("%w: %s", err, body) + } + + default: + //nolint:err113 // This is intentionally dynamic. + err = fmt.Errorf("%v: %s", response.Status, body) + } + } + + return result, err +} + +// GetClusterRole sends a GET request to the "/clusters//roles/" endpoint, thereby retrieving +// Role information for a specific role from a specific cluster in Bridge. +func (c *Client) GetClusterRole(ctx context.Context, apiKey, clusterId, roleName string) (*ClusterRoleApiResource, error) { + result := &ClusterRoleApiResource{} + + response, err := c.doWithRetry(ctx, "GET", "/clusters/"+clusterId+"/roles/"+roleName, nil, nil, http.Header{ + "Accept": []string{"application/json"}, + "Authorization": []string{"Bearer " + apiKey}, + }) + + if err == nil { + defer response.Body.Close() + body, _ := io.ReadAll(response.Body) + + switch { + // 2xx, Successful + case response.StatusCode >= 200 && response.StatusCode < 300: + if err = json.Unmarshal(body, &result); err != nil { + err = fmt.Errorf("%w: %s", err, body) + } + + default: + //nolint:err113 // This is intentionally dynamic. + err = fmt.Errorf("%v: %s", response.Status, body) + } + } + + return result, err +} + +// ListClusterRoles sends a GET request to the "/clusters//roles" endpoint thereby retrieving +// a list of all cluster roles for a specific cluster in Bridge. +func (c *Client) ListClusterRoles(ctx context.Context, apiKey, id string) ([]*ClusterRoleApiResource, error) { + result := ClusterRoleList{} + + response, err := c.doWithRetry(ctx, "GET", "/clusters/"+id+"/roles", nil, nil, http.Header{ + "Accept": []string{"application/json"}, + "Authorization": []string{"Bearer " + apiKey}, + }) + + if err == nil { + defer response.Body.Close() + body, _ := io.ReadAll(response.Body) + + switch { + // 2xx, Successful + case response.StatusCode >= 200 && response.StatusCode < 300: + if err = json.Unmarshal(body, &result); err != nil { + err = fmt.Errorf("%w: %s", err, body) + } + + default: + //nolint:err113 // This is intentionally dynamic. + err = fmt.Errorf("%v: %s", response.Status, body) + } + } + + return result.Roles, err +} diff --git a/internal/bridge/client_test.go b/internal/bridge/client_test.go new file mode 100644 index 0000000000..f1aa1c8ddd --- /dev/null +++ b/internal/bridge/client_test.go @@ -0,0 +1,1355 @@ +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package bridge + +import ( + "context" + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "net/url" + "testing" + "time" + + gocmp "github.com/google/go-cmp/cmp" + gocmpopts "github.com/google/go-cmp/cmp/cmpopts" + "gotest.tools/v3/assert" + "k8s.io/apimachinery/pkg/util/intstr" + + "github.com/crunchydata/postgres-operator/internal/initialize" +) + +var testApiKey = "9012" +var testTeamId = "5678" + +// TestClientBackoff logs the backoff timing chosen by [NewClient] for use +// with `go test -v`. +func TestClientBackoff(t *testing.T) { + client := NewClient("", "") + var total time.Duration + + for i := 1; i <= 50 && client.Steps > 0; i++ { + step := client.Step() + total += step + + t.Logf("%02d:%20v%20v", i, step, total) + } +} + +func TestClientURL(t *testing.T) { + assert.Equal(t, defaultAPI, NewClient("", "").BaseURL.String(), + "expected the API constant to parse correctly") + + assert.Equal(t, defaultAPI, NewClient("/path", "").BaseURL.String()) + assert.Equal(t, defaultAPI, NewClient("http://:9999", "").BaseURL.String()) + assert.Equal(t, defaultAPI, NewClient("postgres://localhost", "").BaseURL.String()) + assert.Equal(t, defaultAPI, NewClient("postgres://localhost:5432", "").BaseURL.String()) + + assert.Equal(t, + "http://localhost:12345", NewClient("http://localhost:12345", "").BaseURL.String()) +} + +func TestClientDoWithBackoff(t *testing.T) { + t.Run("Arguments", func(t *testing.T) { + var bodies []string + var requests []http.Request + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + body, _ := io.ReadAll(r.Body) + bodies = append(bodies, string(body)) + requests = append(requests, *r) + + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`some-response`)) + })) + t.Cleanup(server.Close) + + // Client with one attempt, i.e. no backoff. + client := NewClient(server.URL, "xyz") + client.Steps = 1 + assert.Equal(t, client.BaseURL.String(), server.URL) + + ctx := context.Background() + params := url.Values{} + params.Add("foo", "bar") + response, err := client.doWithBackoff(ctx, + "ANY", "/some/path", params, []byte(`the-body`), + http.Header{"Some": []string{"header"}}) + + assert.NilError(t, err) + assert.Assert(t, response != nil) + t.Cleanup(func() { _ = response.Body.Close() }) + + // Arguments became Request fields, including the client version. + assert.Equal(t, len(requests), 1) + assert.Equal(t, bodies[0], "the-body") + assert.Equal(t, requests[0].Method, "ANY") + assert.Equal(t, requests[0].URL.String(), "/some/path?foo=bar") + assert.DeepEqual(t, requests[0].Header.Values("Some"), []string{"header"}) + assert.DeepEqual(t, requests[0].Header.Values("User-Agent"), []string{"PGO/xyz"}) + + body, _ := io.ReadAll(response.Body) + assert.Equal(t, string(body), "some-response") + }) + + t.Run("Idempotency", func(t *testing.T) { + var bodies []string + var requests []http.Request + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + body, _ := io.ReadAll(r.Body) + bodies = append(bodies, string(body)) + requests = append(requests, *r) + + switch len(requests) { + case 1, 2: + w.WriteHeader(http.StatusBadGateway) + default: + w.WriteHeader(http.StatusNotAcceptable) + } + })) + t.Cleanup(server.Close) + + // Client with brief backoff. + client := NewClient(server.URL, "") + client.Duration = time.Millisecond + client.Steps = 5 + assert.Equal(t, client.BaseURL.String(), server.URL) + + ctx := context.Background() + response, err := client.doWithBackoff(ctx, + "POST", "/anything", nil, []byte(`any-body`), + http.Header{"Any": []string{"thing"}}) + + assert.NilError(t, err) + assert.Assert(t, response != nil) + assert.NilError(t, response.Body.Close()) + + assert.Equal(t, len(requests), 3, "expected multiple requests") + + // Headers include an Idempotency-Key. + assert.Equal(t, bodies[0], "any-body") + assert.Equal(t, requests[0].Header.Get("Any"), "thing") + assert.Assert(t, requests[0].Header.Get("Idempotency-Key") != "") + + // Requests are identical, including the Idempotency-Key. + assert.Equal(t, bodies[0], bodies[1]) + assert.DeepEqual(t, requests[0], requests[1], + gocmpopts.IgnoreFields(http.Request{}, "Body"), + gocmpopts.IgnoreUnexported(http.Request{})) + + assert.Equal(t, bodies[1], bodies[2]) + assert.DeepEqual(t, requests[1], requests[2], + gocmpopts.IgnoreFields(http.Request{}, "Body"), + gocmpopts.IgnoreUnexported(http.Request{})) + + // Another, identical request gets a new Idempotency-Key. + response, err = client.doWithBackoff(ctx, + "POST", "/anything", nil, []byte(`any-body`), + http.Header{"Any": []string{"thing"}}) + + assert.NilError(t, err) + assert.Assert(t, response != nil) + assert.NilError(t, response.Body.Close()) + + prior := requests[0].Header.Get("Idempotency-Key") + assert.Assert(t, len(requests) > 3) + assert.Assert(t, requests[3].Header.Get("Idempotency-Key") != "") + assert.Assert(t, requests[3].Header.Get("Idempotency-Key") != prior, + "expected a new idempotency key") + }) + + t.Run("Backoff", func(t *testing.T) { + requests := 0 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + requests++ + w.WriteHeader(http.StatusInternalServerError) + })) + t.Cleanup(server.Close) + + // Client with brief backoff. + client := NewClient(server.URL, "") + client.Duration = time.Millisecond + client.Steps = 5 + assert.Equal(t, client.BaseURL.String(), server.URL) + + ctx := context.Background() + _, err := client.doWithBackoff(ctx, "POST", "/any", nil, nil, nil) //nolint:bodyclose + assert.ErrorContains(t, err, "timed out waiting") + assert.Assert(t, requests > 0, "expected multiple requests") + }) + + t.Run("Cancellation", func(t *testing.T) { + requests := 0 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + requests++ + w.WriteHeader(http.StatusServiceUnavailable) + })) + t.Cleanup(server.Close) + + // Client with lots of brief backoff. + client := NewClient(server.URL, "") + client.Duration = time.Millisecond + client.Steps = 100 + assert.Equal(t, client.BaseURL.String(), server.URL) + + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + t.Cleanup(cancel) + + _, err := client.doWithBackoff(ctx, "POST", "/any", nil, nil, nil) //nolint:bodyclose + assert.ErrorIs(t, err, context.DeadlineExceeded) + assert.Assert(t, requests > 0, "expected multiple requests") + }) +} + +func TestClientDoWithRetry(t *testing.T) { + t.Run("Arguments", func(t *testing.T) { + var bodies []string + var requests []http.Request + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + body, _ := io.ReadAll(r.Body) + bodies = append(bodies, string(body)) + requests = append(requests, *r) + + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`some-response`)) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "xyz") + assert.Equal(t, client.BaseURL.String(), server.URL) + + ctx := context.Background() + params := url.Values{} + params.Add("foo", "bar") + response, err := client.doWithRetry(ctx, + "ANY", "/some/path", params, []byte(`the-body`), + http.Header{"Some": []string{"header"}}) + + assert.NilError(t, err) + assert.Assert(t, response != nil) + t.Cleanup(func() { _ = response.Body.Close() }) + + // Arguments became Request fields, including the client version. + assert.Equal(t, len(requests), 1) + assert.Equal(t, bodies[0], "the-body") + assert.Equal(t, requests[0].Method, "ANY") + assert.Equal(t, requests[0].URL.String(), "/some/path?foo=bar") + assert.DeepEqual(t, requests[0].Header.Values("Some"), []string{"header"}) + assert.DeepEqual(t, requests[0].Header.Values("User-Agent"), []string{"PGO/xyz"}) + + body, _ := io.ReadAll(response.Body) + assert.Equal(t, string(body), "some-response") + }) + + t.Run("Throttling", func(t *testing.T) { + var bodies []string + var requests []http.Request + var times []time.Time + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + body, _ := io.ReadAll(r.Body) + bodies = append(bodies, string(body)) + requests = append(requests, *r) + times = append(times, time.Now()) + + switch len(requests) { + case 1: + w.Header().Set("Retry-After", "1") + w.WriteHeader(http.StatusTooManyRequests) + default: + w.WriteHeader(http.StatusOK) + } + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + ctx := context.Background() + response, err := client.doWithRetry(ctx, + "POST", "/anything", nil, []byte(`any-body`), + http.Header{"Any": []string{"thing"}}) + + assert.NilError(t, err) + assert.Assert(t, response != nil) + assert.NilError(t, response.Body.Close()) + + assert.Equal(t, len(requests), 2, "expected multiple requests") + + // Headers include an Idempotency-Key. + assert.Equal(t, bodies[0], "any-body") + assert.Equal(t, requests[0].Header.Get("Any"), "thing") + assert.Assert(t, requests[0].Header.Get("Idempotency-Key") != "") + + // Requests are identical, except for the Idempotency-Key. + assert.Equal(t, bodies[0], bodies[1]) + assert.DeepEqual(t, requests[0], requests[1], + gocmpopts.IgnoreFields(http.Request{}, "Body"), + gocmpopts.IgnoreUnexported(http.Request{}), + gocmp.FilterPath( + func(p gocmp.Path) bool { return p.String() == "Header" }, + gocmpopts.IgnoreMapEntries( + func(k string, v []string) bool { return k == "Idempotency-Key" }, + ), + ), + ) + + prior := requests[0].Header.Get("Idempotency-Key") + assert.Assert(t, requests[1].Header.Get("Idempotency-Key") != "") + assert.Assert(t, requests[1].Header.Get("Idempotency-Key") != prior, + "expected a new idempotency key") + + // Requests are delayed according the server's response. + // TODO: Mock the clock for faster tests. + assert.Assert(t, times[0].Add(time.Second).Before(times[1]), + "expected the second request over 1sec after the first") + }) + + t.Run("Cancellation", func(t *testing.T) { + requests := 0 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + requests++ + w.Header().Set("Retry-After", "5") + w.WriteHeader(http.StatusTooManyRequests) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + t.Cleanup(cancel) + + start := time.Now() + _, err := client.doWithRetry(ctx, "POST", "/any", nil, nil, nil) //nolint:bodyclose + assert.ErrorIs(t, err, context.DeadlineExceeded) + assert.Assert(t, time.Since(start) < time.Second) + assert.Equal(t, requests, 1, "expected one request") + }) + + t.Run("UnexpectedResponse", func(t *testing.T) { + for _, tt := range []struct { + Name string + Send func(http.ResponseWriter) + Expect func(testing.TB, http.Response) + }{ + { + Name: "NoHeader", + Send: func(w http.ResponseWriter) { + w.WriteHeader(http.StatusTooManyRequests) + }, + Expect: func(t testing.TB, r http.Response) { + t.Helper() + assert.Equal(t, r.StatusCode, http.StatusTooManyRequests) + }, + }, + { + Name: "ZeroHeader", + Send: func(w http.ResponseWriter) { + w.Header().Set("Retry-After", "0") + w.WriteHeader(http.StatusTooManyRequests) + }, + Expect: func(t testing.TB, r http.Response) { + t.Helper() + assert.Equal(t, r.Header.Get("Retry-After"), "0") + assert.Equal(t, r.StatusCode, http.StatusTooManyRequests) + }, + }, + { + Name: "NegativeHeader", + Send: func(w http.ResponseWriter) { + w.Header().Set("Retry-After", "-10") + w.WriteHeader(http.StatusTooManyRequests) + }, + Expect: func(t testing.TB, r http.Response) { + t.Helper() + assert.Equal(t, r.Header.Get("Retry-After"), "-10") + assert.Equal(t, r.StatusCode, http.StatusTooManyRequests) + }, + }, + { + Name: "TextHeader", + Send: func(w http.ResponseWriter) { + w.Header().Set("Retry-After", "bogus") + w.WriteHeader(http.StatusTooManyRequests) + }, + Expect: func(t testing.TB, r http.Response) { + t.Helper() + assert.Equal(t, r.Header.Get("Retry-After"), "bogus") + assert.Equal(t, r.StatusCode, http.StatusTooManyRequests) + }, + }, + } { + t.Run(tt.Name, func(t *testing.T) { + requests := 0 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + requests++ + tt.Send(w) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + ctx := context.Background() + response, err := client.doWithRetry(ctx, "POST", "/any", nil, nil, nil) + assert.NilError(t, err) + assert.Assert(t, response != nil) + t.Cleanup(func() { _ = response.Body.Close() }) + + tt.Expect(t, *response) + + assert.Equal(t, requests, 1, "expected no retries") + }) + } + }) +} + +func TestClientCreateAuthObject(t *testing.T) { + t.Run("Arguments", func(t *testing.T) { + var requests []http.Request + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + body, _ := io.ReadAll(r.Body) + assert.Equal(t, len(body), 0) + requests = append(requests, *r) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + ctx := context.Background() + _, _ = client.CreateAuthObject(ctx, AuthObject{Secret: "sesame"}) + + assert.Equal(t, len(requests), 1) + assert.Equal(t, requests[0].Header.Get("Authorization"), "Bearer sesame") + }) + + t.Run("Unauthorized", func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusUnauthorized) + _, _ = w.Write([]byte(`some info`)) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err := client.CreateAuthObject(context.Background(), AuthObject{}) + assert.ErrorContains(t, err, "authentication") + assert.ErrorContains(t, err, "some info") + assert.ErrorIs(t, err, errAuthentication) + }) + + t.Run("ErrorResponse", func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + _, _ = w.Write([]byte(`some message`)) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err := client.CreateAuthObject(context.Background(), AuthObject{}) + assert.ErrorContains(t, err, "404 Not Found") + assert.ErrorContains(t, err, "some message") + }) + + t.Run("NoResponseBody", func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err := client.CreateAuthObject(context.Background(), AuthObject{}) + assert.ErrorContains(t, err, "unexpected end") + assert.ErrorContains(t, err, "JSON") + }) + + t.Run("ResponseNotJSON", func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`asdf`)) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err := client.CreateAuthObject(context.Background(), AuthObject{}) + assert.ErrorContains(t, err, "invalid") + assert.ErrorContains(t, err, "asdf") + }) +} + +func TestClientCreateInstallation(t *testing.T) { + t.Run("ErrorResponse", func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + _, _ = w.Write([]byte(`any content, any format`)) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err := client.CreateInstallation(context.Background()) + assert.ErrorContains(t, err, "404 Not Found") + assert.ErrorContains(t, err, "any content, any format") + }) + + t.Run("NoResponseBody", func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err := client.CreateInstallation(context.Background()) + assert.ErrorContains(t, err, "unexpected end") + assert.ErrorContains(t, err, "JSON") + }) + + t.Run("ResponseNotJSON", func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`asdf`)) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err := client.CreateInstallation(context.Background()) + assert.ErrorContains(t, err, "invalid") + assert.ErrorContains(t, err, "asdf") + }) +} + +func TestListClusters(t *testing.T) { + responsePayload := &ClusterList{ + Clusters: []*ClusterApiResource{}, + } + firstClusterApiResource := &ClusterApiResource{ + ID: "1234", + } + secondClusterApiResource := &ClusterApiResource{ + ID: "2345", + } + + t.Run("WeSendCorrectData", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(responsePayload) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, r.Method, "GET", "Expected GET method") + assert.Equal(t, r.URL.Path, "/clusters", "Expected path to be '/clusters'") + assert.Equal(t, r.Header.Get("Authorization"), "Bearer "+testApiKey, "Expected Authorization header to contain api key.") + assert.Equal(t, r.URL.Query()["team_id"][0], testTeamId, "Expected query params to contain team id.") + + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.ListClusters(context.Background(), testApiKey, testTeamId) + assert.NilError(t, err) + }) + + t.Run("OkResponseNoClusters", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(responsePayload) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + clusters, err := client.ListClusters(context.Background(), testApiKey, testTeamId) + assert.NilError(t, err) + assert.Equal(t, len(clusters), 0) + }) + + t.Run("OkResponseOneCluster", func(t *testing.T) { + responsePayload.Clusters = append(responsePayload.Clusters, firstClusterApiResource) + responsePayloadJson, err := json.Marshal(responsePayload) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + clusters, err := client.ListClusters(context.Background(), testApiKey, testTeamId) + assert.NilError(t, err) + assert.Equal(t, len(clusters), 1) + assert.Equal(t, clusters[0].ID, responsePayload.Clusters[0].ID) + }) + + t.Run("OkResponseTwoClusters", func(t *testing.T) { + responsePayload.Clusters = append(responsePayload.Clusters, secondClusterApiResource) + responsePayloadJson, err := json.Marshal(responsePayload) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + clusters, err := client.ListClusters(context.Background(), testApiKey, testTeamId) + assert.NilError(t, err) + assert.Equal(t, len(clusters), 2) + }) + + t.Run("ErrorResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(responsePayload) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.ListClusters(context.Background(), testApiKey, testTeamId) + assert.Check(t, err != nil) + assert.ErrorContains(t, err, "400 Bad Request") + }) +} + +func TestCreateCluster(t *testing.T) { + clusterApiResource := &ClusterApiResource{ + ClusterName: "test-cluster1", + } + clusterRequestPayload := &PostClustersRequestPayload{ + Name: "test-cluster1", + } + + t.Run("WeSendCorrectData", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var receivedPayload PostClustersRequestPayload + dec := json.NewDecoder(r.Body) + err = dec.Decode(&receivedPayload) + assert.NilError(t, err) + assert.Equal(t, r.Method, "POST", "Expected POST method") + assert.Equal(t, r.URL.Path, "/clusters", "Expected path to be '/clusters'") + assert.Equal(t, r.Header.Get("Authorization"), "Bearer "+testApiKey, "Expected Authorization header to contain api key.") + assert.Equal(t, receivedPayload, *clusterRequestPayload) + + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.CreateCluster(context.Background(), testApiKey, clusterRequestPayload) + assert.NilError(t, err) + }) + + t.Run("OkResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + newCluster, err := client.CreateCluster(context.Background(), testApiKey, clusterRequestPayload) + assert.NilError(t, err) + assert.Equal(t, newCluster.ClusterName, clusterApiResource.ClusterName) + }) + + t.Run("ErrorResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.CreateCluster(context.Background(), testApiKey, clusterRequestPayload) + assert.Check(t, err != nil) + assert.ErrorContains(t, err, "400 Bad Request") + }) +} + +func TestDeleteCluster(t *testing.T) { + clusterId := "1234" + clusterApiResource := &ClusterApiResource{ + ClusterName: "test-cluster1", + } + + t.Run("WeSendCorrectData", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, r.Method, "DELETE", "Expected DELETE method") + assert.Equal(t, r.URL.Path, "/clusters/"+clusterId, "Expected path to be /clusters/"+clusterId) + assert.Equal(t, r.Header.Get("Authorization"), "Bearer "+testApiKey, "Expected Authorization header to contain api key.") + + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, _, err = client.DeleteCluster(context.Background(), testApiKey, clusterId) + assert.NilError(t, err) + }) + + t.Run("OkResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + deletedCluster, deletedAlready, err := client.DeleteCluster(context.Background(), testApiKey, clusterId) + assert.NilError(t, err) + assert.Equal(t, deletedCluster.ClusterName, clusterApiResource.ClusterName) + assert.Equal(t, deletedAlready, false) + }) + + t.Run("GoneResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusGone) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, deletedAlready, err := client.DeleteCluster(context.Background(), testApiKey, clusterId) + assert.NilError(t, err) + assert.Equal(t, deletedAlready, true) + }) + + t.Run("NotFoundResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, deletedAlready, err := client.DeleteCluster(context.Background(), testApiKey, clusterId) + assert.NilError(t, err) + assert.Equal(t, deletedAlready, true) + }) + + t.Run("ErrorResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, _, err = client.DeleteCluster(context.Background(), testApiKey, clusterId) + assert.Check(t, err != nil) + assert.ErrorContains(t, err, "400 Bad Request") + }) +} + +func TestGetCluster(t *testing.T) { + clusterId := "1234" + clusterApiResource := &ClusterApiResource{ + ClusterName: "test-cluster1", + } + + t.Run("WeSendCorrectData", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, r.Method, "GET", "Expected GET method") + assert.Equal(t, r.URL.Path, "/clusters/"+clusterId, "Expected path to be /clusters/"+clusterId) + assert.Equal(t, r.Header.Get("Authorization"), "Bearer "+testApiKey, "Expected Authorization header to contain api key.") + + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.GetCluster(context.Background(), testApiKey, clusterId) + assert.NilError(t, err) + }) + + t.Run("OkResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + cluster, err := client.GetCluster(context.Background(), testApiKey, clusterId) + assert.NilError(t, err) + assert.Equal(t, cluster.ClusterName, clusterApiResource.ClusterName) + }) + + t.Run("ErrorResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.GetCluster(context.Background(), testApiKey, clusterId) + assert.Check(t, err != nil) + assert.ErrorContains(t, err, "400 Bad Request") + }) +} + +func TestGetClusterStatus(t *testing.T) { + clusterId := "1234" + state := "Ready" + + clusterStatusApiResource := &ClusterStatusApiResource{ + State: state, + } + + t.Run("WeSendCorrectData", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterStatusApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, r.Method, "GET", "Expected GET method") + assert.Equal(t, r.URL.Path, "/clusters/"+clusterId+"/status", "Expected path to be /clusters/"+clusterId+"/status") + assert.Equal(t, r.Header.Get("Authorization"), "Bearer "+testApiKey, "Expected Authorization header to contain api key.") + + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.GetClusterStatus(context.Background(), testApiKey, clusterId) + assert.NilError(t, err) + }) + + t.Run("OkResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterStatusApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + clusterStatus, err := client.GetClusterStatus(context.Background(), testApiKey, clusterId) + assert.NilError(t, err) + assert.Equal(t, clusterStatus.State, state) + }) + + t.Run("ErrorResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterStatusApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.GetClusterStatus(context.Background(), testApiKey, clusterId) + assert.Check(t, err != nil) + assert.ErrorContains(t, err, "400 Bad Request") + }) +} + +func TestGetClusterUpgrade(t *testing.T) { + clusterId := "1234" + clusterUpgradeApiResource := &ClusterUpgradeApiResource{ + ClusterID: clusterId, + } + + t.Run("WeSendCorrectData", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterUpgradeApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, r.Method, "GET", "Expected GET method") + assert.Equal(t, r.URL.Path, "/clusters/"+clusterId+"/upgrade", "Expected path to be /clusters/"+clusterId+"/upgrade") + assert.Equal(t, r.Header.Get("Authorization"), "Bearer "+testApiKey, "Expected Authorization header to contain api key.") + + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.GetClusterUpgrade(context.Background(), testApiKey, clusterId) + assert.NilError(t, err) + }) + + t.Run("OkResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterUpgradeApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + clusterUpgrade, err := client.GetClusterUpgrade(context.Background(), testApiKey, clusterId) + assert.NilError(t, err) + assert.Equal(t, clusterUpgrade.ClusterID, clusterId) + }) + + t.Run("ErrorResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterUpgradeApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.GetClusterUpgrade(context.Background(), testApiKey, clusterId) + assert.Check(t, err != nil) + assert.ErrorContains(t, err, "400 Bad Request") + }) +} + +func TestUpgradeCluster(t *testing.T) { + clusterId := "1234" + clusterUpgradeApiResource := &ClusterUpgradeApiResource{ + ClusterID: clusterId, + } + clusterUpgradeRequestPayload := &PostClustersUpgradeRequestPayload{ + Plan: "standard-8", + PostgresVersion: intstr.FromInt(15), + UpgradeStartTime: "start-time", + Storage: 10, + } + + t.Run("WeSendCorrectData", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterUpgradeApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var receivedPayload PostClustersUpgradeRequestPayload + dec := json.NewDecoder(r.Body) + err = dec.Decode(&receivedPayload) + assert.NilError(t, err) + assert.Equal(t, r.Method, "POST", "Expected POST method") + assert.Equal(t, r.URL.Path, "/clusters/"+clusterId+"/upgrade", "Expected path to be /clusters/"+clusterId+"/upgrade") + assert.Equal(t, r.Header.Get("Authorization"), "Bearer "+testApiKey, "Expected Authorization header to contain api key.") + assert.Equal(t, receivedPayload, *clusterUpgradeRequestPayload) + + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.UpgradeCluster(context.Background(), testApiKey, clusterId, clusterUpgradeRequestPayload) + assert.NilError(t, err) + }) + + t.Run("OkResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterUpgradeApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + clusterUpgrade, err := client.UpgradeCluster(context.Background(), testApiKey, clusterId, clusterUpgradeRequestPayload) + assert.NilError(t, err) + assert.Equal(t, clusterUpgrade.ClusterID, clusterId) + }) + + t.Run("ErrorResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterUpgradeApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.UpgradeCluster(context.Background(), testApiKey, clusterId, clusterUpgradeRequestPayload) + assert.Check(t, err != nil) + assert.ErrorContains(t, err, "400 Bad Request") + }) +} + +func TestUpgradeClusterHA(t *testing.T) { + clusterId := "1234" + action := "enable-ha" + clusterUpgradeApiResource := &ClusterUpgradeApiResource{ + ClusterID: clusterId, + } + + t.Run("WeSendCorrectData", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterUpgradeApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, r.Method, "PUT", "Expected PUT method") + assert.Equal(t, r.URL.Path, "/clusters/"+clusterId+"/actions/"+action, + "Expected path to be /clusters/"+clusterId+"/actions/"+action) + assert.Equal(t, r.Header.Get("Authorization"), "Bearer "+testApiKey, "Expected Authorization header to contain api key.") + + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.UpgradeClusterHA(context.Background(), testApiKey, clusterId, action) + assert.NilError(t, err) + }) + + t.Run("OkResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterUpgradeApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + clusterUpgrade, err := client.UpgradeClusterHA(context.Background(), testApiKey, clusterId, action) + assert.NilError(t, err) + assert.Equal(t, clusterUpgrade.ClusterID, clusterId) + }) + + t.Run("ErrorResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterUpgradeApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.UpgradeClusterHA(context.Background(), testApiKey, clusterId, action) + assert.Check(t, err != nil) + assert.ErrorContains(t, err, "400 Bad Request") + }) +} + +func TestUpdateCluster(t *testing.T) { + clusterId := "1234" + clusterApiResource := &ClusterApiResource{ + ClusterName: "new-cluster-name", + } + clusterUpdateRequestPayload := &PatchClustersRequestPayload{ + IsProtected: initialize.Bool(true), + } + + t.Run("WeSendCorrectData", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var receivedPayload PatchClustersRequestPayload + dec := json.NewDecoder(r.Body) + err = dec.Decode(&receivedPayload) + assert.NilError(t, err) + assert.Equal(t, r.Method, "PATCH", "Expected PATCH method") + assert.Equal(t, r.URL.Path, "/clusters/"+clusterId, "Expected path to be /clusters/"+clusterId) + assert.Equal(t, r.Header.Get("Authorization"), "Bearer "+testApiKey, "Expected Authorization header to contain api key.") + assert.Equal(t, *receivedPayload.IsProtected, *clusterUpdateRequestPayload.IsProtected) + + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.UpdateCluster(context.Background(), testApiKey, clusterId, clusterUpdateRequestPayload) + assert.NilError(t, err) + }) + + t.Run("OkResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + clusterUpdate, err := client.UpdateCluster(context.Background(), testApiKey, clusterId, clusterUpdateRequestPayload) + assert.NilError(t, err) + assert.Equal(t, clusterUpdate.ClusterName, clusterApiResource.ClusterName) + }) + + t.Run("ErrorResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.UpdateCluster(context.Background(), testApiKey, clusterId, clusterUpdateRequestPayload) + assert.Check(t, err != nil) + assert.ErrorContains(t, err, "400 Bad Request") + }) +} + +func TestGetClusterRole(t *testing.T) { + clusterId := "1234" + roleName := "application" + clusterRoleApiResource := &ClusterRoleApiResource{ + Name: roleName, + } + + t.Run("WeSendCorrectData", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterRoleApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, r.Method, "GET", "Expected GET method") + assert.Equal(t, r.URL.Path, "/clusters/"+clusterId+"/roles/"+roleName, + "Expected path to be /clusters/"+clusterId+"/roles/"+roleName) + assert.Equal(t, r.Header.Get("Authorization"), "Bearer "+testApiKey, "Expected Authorization header to contain api key.") + + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.GetClusterRole(context.Background(), testApiKey, clusterId, roleName) + assert.NilError(t, err) + }) + + t.Run("OkResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterRoleApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + clusterRole, err := client.GetClusterRole(context.Background(), testApiKey, clusterId, roleName) + assert.NilError(t, err) + assert.Equal(t, clusterRole.Name, roleName) + }) + + t.Run("ErrorResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(clusterRoleApiResource) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.GetClusterRole(context.Background(), testApiKey, clusterId, roleName) + assert.Check(t, err != nil) + assert.ErrorContains(t, err, "400 Bad Request") + }) +} + +func TestListClusterRoles(t *testing.T) { + clusterId := "1234" + responsePayload := &ClusterRoleList{ + Roles: []*ClusterRoleApiResource{}, + } + applicationClusterRoleApiResource := &ClusterRoleApiResource{} + postgresClusterRoleApiResource := &ClusterRoleApiResource{} + + t.Run("WeSendCorrectData", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(responsePayload) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, r.Method, "GET", "Expected GET method") + assert.Equal(t, r.URL.Path, "/clusters/"+clusterId+"/roles", "Expected path to be '/clusters/%s/roles'") + assert.Equal(t, r.Header.Get("Authorization"), "Bearer "+testApiKey, "Expected Authorization header to contain api key.") + + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.ListClusterRoles(context.Background(), testApiKey, clusterId) + assert.NilError(t, err) + }) + + t.Run("OkResponse", func(t *testing.T) { + responsePayload.Roles = append(responsePayload.Roles, applicationClusterRoleApiResource, postgresClusterRoleApiResource) + responsePayloadJson, err := json.Marshal(responsePayload) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + clusterRoles, err := client.ListClusterRoles(context.Background(), testApiKey, clusterId) + assert.NilError(t, err) + assert.Equal(t, len(clusterRoles), 2) + }) + + t.Run("ErrorResponse", func(t *testing.T) { + responsePayloadJson, err := json.Marshal(responsePayload) + assert.NilError(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write(responsePayloadJson) + })) + t.Cleanup(server.Close) + + client := NewClient(server.URL, "") + assert.Equal(t, client.BaseURL.String(), server.URL) + + _, err = client.ListClusterRoles(context.Background(), testApiKey, clusterId) + assert.Check(t, err != nil) + assert.ErrorContains(t, err, "400 Bad Request") + }) +} diff --git a/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller.go b/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller.go new file mode 100644 index 0000000000..8a3280f512 --- /dev/null +++ b/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller.go @@ -0,0 +1,711 @@ +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package crunchybridgecluster + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/crunchydata/postgres-operator/internal/bridge" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" + "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/logging" + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/tracing" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +// CrunchyBridgeClusterReconciler reconciles a CrunchyBridgeCluster object +type CrunchyBridgeClusterReconciler struct { + // NewClient is called each time a new bridge.Client is needed. + NewClient func() bridge.ClientInterface + + Reader interface { + Get(context.Context, client.ObjectKey, client.Object, ...client.GetOption) error + List(context.Context, client.ObjectList, ...client.ListOption) error + } + Writer interface { + Delete(context.Context, client.Object, ...client.DeleteOption) error + Patch(context.Context, client.Object, client.Patch, ...client.PatchOption) error + Update(context.Context, client.Object, ...client.UpdateOption) error + } + StatusWriter interface { + Patch(context.Context, client.Object, client.Patch, ...client.SubResourcePatchOption) error + } +} + +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="crunchybridgeclusters",verbs={get,list,watch} +//+kubebuilder:rbac:groups="",resources="secrets",verbs={get,list,watch} + +// ManagedReconciler creates a [CrunchyBridgeClusterReconciler] and adds it to m. +func ManagedReconciler(m ctrl.Manager, newClient func() bridge.ClientInterface) error { + kubernetes := client.WithFieldOwner(m.GetClient(), naming.ControllerCrunchyBridgeCluster) + + reconciler := &CrunchyBridgeClusterReconciler{ + NewClient: newClient, + Reader: kubernetes, + StatusWriter: kubernetes.Status(), + Writer: kubernetes, + } + + return ctrl.NewControllerManagedBy(m). + For(&v1beta1.CrunchyBridgeCluster{}). + Owns(&corev1.Secret{}). + // Wake periodically to check Bridge API for all CrunchyBridgeClusters. + // Potentially replace with different requeue times + // Smarter: retry after a certain time for each cluster + WatchesRawSource( + runtime.NewTickerImmediate(5*time.Minute, event.GenericEvent{}, + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, _ client.Object) []reconcile.Request { + var list v1beta1.CrunchyBridgeClusterList + _ = reconciler.Reader.List(ctx, &list) + return runtime.Requests(initialize.Pointers(list.Items...)...) + }), + ), + ). + // Watch secrets and filter for secrets mentioned by CrunchyBridgeClusters + Watches( + &corev1.Secret{}, + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, secret client.Object) []reconcile.Request { + return runtime.Requests(reconciler.findCrunchyBridgeClustersForSecret(ctx, client.ObjectKeyFromObject(secret))...) + }), + ). + Complete(reconcile.AsReconciler(kubernetes, reconciler)) +} + +// The owner reference created by controllerutil.SetControllerReference blocks +// deletion. The OwnerReferencesPermissionEnforcement plugin requires that the +// creator of such a reference have either "delete" permission on the owner or +// "update" permission on the owner's "finalizers" subresource. +// - https://docs.k8s.io/reference/access-authn-authz/admission-controllers/ +// +kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="crunchybridgeclusters/finalizers",verbs={update} + +// setControllerReference sets owner as a Controller OwnerReference on controlled. +// Only one OwnerReference can be a controller, so it returns an error if another +// is already set. +func (r *CrunchyBridgeClusterReconciler) setControllerReference( + owner *v1beta1.CrunchyBridgeCluster, controlled client.Object, +) error { + return controllerutil.SetControllerReference(owner, controlled, runtime.Scheme) +} + +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="crunchybridgeclusters",verbs={patch,update} +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="crunchybridgeclusters/status",verbs={patch,update} +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="crunchybridgeclusters/finalizers",verbs={patch,update} +//+kubebuilder:rbac:groups="",resources="secrets",verbs={get} + +// Reconcile does the work to move the current state of the world toward the +// desired state described in crunchybridgecluster. +func (r *CrunchyBridgeClusterReconciler) Reconcile(ctx context.Context, crunchybridgecluster *v1beta1.CrunchyBridgeCluster) (ctrl.Result, error) { + var err error + ctx, span := tracing.Start(ctx, "reconcile-crunchybridgecluster") + log := logging.FromContext(ctx) + defer span.End() + + // Write any changes to the crunchybridgecluster status on the way out. + before := crunchybridgecluster.DeepCopy() + defer func() { + if !equality.Semantic.DeepEqual(before.Status, crunchybridgecluster.Status) { + status := r.StatusWriter.Patch(ctx, crunchybridgecluster, client.MergeFrom(before)) + + if err == nil && status != nil { + err = status + } else if status != nil { + log.Error(status, "Patching CrunchyBridgeCluster status") + } + } + }() + + // Get and validate connection secret for requests + key, team, err := r.reconcileBridgeConnectionSecret(ctx, crunchybridgecluster) + if err != nil { + log.Error(err, "issue reconciling bridge connection secret") + + // Don't automatically requeue Secret issues. We are watching for + // related secrets, so will requeue when a related secret is touched. + // lint:ignore nilerr Return err as status, no requeue needed + return ctrl.Result{}, nil + } + + // Check for and handle deletion of cluster. Return early if it is being + // deleted or there was an error. Make sure finalizer is added if cluster + // is not being deleted. + if result, err := r.handleDelete(ctx, crunchybridgecluster, key); err != nil { + log.Error(err, "deleting") + return ctrl.Result{}, tracing.Escape(span, err) + } else if result != nil { + if log := log.V(1); log.Enabled() { + log.Info("deleting", "result", fmt.Sprintf("%+v", *result)) + } + return *result, tracing.Escape(span, err) + } + + // Wonder if there's a better way to handle adding/checking/removing statuses + // We did something in the upgrade controller + // Exit early if we can't create from this K8s object + // unless this K8s object has been changed (compare ObservedGeneration) + invalid := meta.FindStatusCondition(crunchybridgecluster.Status.Conditions, + v1beta1.ConditionReady) + if invalid != nil && + invalid.Status == metav1.ConditionFalse && + invalid.Reason == "ClusterInvalid" && + invalid.ObservedGeneration == crunchybridgecluster.GetGeneration() { + return ctrl.Result{}, nil + } + + // check for an upgrade error and return until observedGeneration has + // been incremented. + invalidUpgrade := meta.FindStatusCondition(crunchybridgecluster.Status.Conditions, + v1beta1.ConditionUpgrading) + if invalidUpgrade != nil && + invalidUpgrade.Status == metav1.ConditionFalse && + invalidUpgrade.Reason == "UpgradeError" && + invalidUpgrade.ObservedGeneration == crunchybridgecluster.GetGeneration() { + return ctrl.Result{}, nil + } + + // We should only be missing the ID if no create has been issued + // or the create was interrupted and we haven't received the ID. + if crunchybridgecluster.Status.ID == "" { + // Check if a cluster with the same name already exists + controllerResult, err := r.handleDuplicateClusterName(ctx, key, team, crunchybridgecluster) + if err != nil || controllerResult != nil { + return *controllerResult, tracing.Escape(span, err) + } + + // if we've gotten here then no cluster exists with that name and we're missing the ID, ergo, create cluster + return r.handleCreateCluster(ctx, key, team, crunchybridgecluster), nil + } + + // If we reach this point, our CrunchyBridgeCluster object has an ID, so we want + // to fill in the details for the cluster, cluster status, and cluster upgrades + // from the Bridge API. + + // Get Cluster + err = r.handleGetCluster(ctx, key, crunchybridgecluster) + if err != nil { + return ctrl.Result{}, tracing.Escape(span, err) + } + + // Get Cluster Status + err = r.handleGetClusterStatus(ctx, key, crunchybridgecluster) + if err != nil { + return ctrl.Result{}, tracing.Escape(span, err) + } + + // Get Cluster Upgrade + err = r.handleGetClusterUpgrade(ctx, key, crunchybridgecluster) + if err != nil { + return ctrl.Result{}, tracing.Escape(span, err) + } + + // Reconcile roles and their secrets + err = r.reconcilePostgresRoles(ctx, key, crunchybridgecluster) + if err != nil { + log.Error(err, "issue reconciling postgres user roles/secrets") + return ctrl.Result{}, tracing.Escape(span, err) + } + + // For now, we skip updating until the upgrade status is cleared. + // For the future, we may want to update in-progress upgrades, + // and for that we will need a way tell that an upgrade in progress + // is the one we want to update. + // Consider: Perhaps add `generation` field to upgrade status? + // Checking this here also means that if an upgrade is requested through the GUI/API + // then we will requeue and wait for it to be done. + // TODO(crunchybridgecluster): Do we want the operator to interrupt + // upgrades created through the GUI/API? + if len(crunchybridgecluster.Status.OngoingUpgrade) != 0 { + return runtime.RequeueWithoutBackoff(3 * time.Minute), nil + } + + // Check if there's an upgrade difference for the three upgradeable fields that hit the upgrade endpoint + // Why PostgresVersion and MajorVersion? Because MajorVersion in the Status is sure to be + // an int of the major version, whereas Status.Responses.Cluster.PostgresVersion might be the ID + if (crunchybridgecluster.Spec.Storage != *crunchybridgecluster.Status.Storage) || + crunchybridgecluster.Spec.Plan != crunchybridgecluster.Status.Plan || + crunchybridgecluster.Spec.PostgresVersion != crunchybridgecluster.Status.MajorVersion { + return r.handleUpgrade(ctx, key, crunchybridgecluster), nil + } + + // Are there diffs between the cluster response from the Bridge API and the spec? + // HA diffs are sent to /clusters/{cluster_id}/actions/[enable|disable]-ha + // so have to know (a) to send and (b) which to send to + if crunchybridgecluster.Spec.IsHA != *crunchybridgecluster.Status.IsHA { + return r.handleUpgradeHA(ctx, key, crunchybridgecluster), nil + } + + // Check if there's a difference in is_protected, name, maintenance_window_start, etc. + // see https://docs.crunchybridge.com/api/cluster#update-cluster + // updates to these fields that hit the PATCH `clusters/` endpoint + if crunchybridgecluster.Spec.IsProtected != *crunchybridgecluster.Status.IsProtected || + crunchybridgecluster.Spec.ClusterName != crunchybridgecluster.Status.ClusterName { + return r.handleUpdate(ctx, key, crunchybridgecluster), nil + } + + log.Info("Reconciled") + // TODO(crunchybridgecluster): do we always want to requeue? Does the Watch mean we + // don't need this, or do we want both? + return runtime.RequeueWithoutBackoff(3 * time.Minute), nil +} + +// reconcileBridgeConnectionSecret looks for the Bridge connection secret specified by the cluster, +// and returns the API key and Team ID found in the secret, or sets conditions and returns an error +// if the secret is invalid. +func (r *CrunchyBridgeClusterReconciler) reconcileBridgeConnectionSecret( + ctx context.Context, crunchybridgecluster *v1beta1.CrunchyBridgeCluster, +) (string, string, error) { + key, team, err := r.GetSecretKeys(ctx, crunchybridgecluster) + if err != nil { + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionReady, + Status: metav1.ConditionUnknown, + Reason: "SecretInvalid", + Message: fmt.Sprintf( + "The condition of the cluster is unknown because the secret is invalid: %v", err), + }) + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + Type: v1beta1.ConditionUpgrading, + Status: metav1.ConditionUnknown, + ObservedGeneration: crunchybridgecluster.GetGeneration(), + LastTransitionTime: metav1.Time{}, + Reason: "SecretInvalid", + Message: fmt.Sprintf( + "The condition of the upgrade(s) is unknown because the secret is invalid: %v", err), + }) + + return "", "", err + } + + return key, team, err +} + +// handleDuplicateClusterName checks Bridge for any already existing clusters that +// have the same name. It returns (nil, nil) when no cluster is found with the same +// name. It returns a controller result, indicating we should exit the reconcile loop, +// if a cluster with a duplicate name is found. The caller is responsible for +// returning controller result objects and errors to controller-runtime. +func (r *CrunchyBridgeClusterReconciler) handleDuplicateClusterName(ctx context.Context, + apiKey, teamId string, crunchybridgecluster *v1beta1.CrunchyBridgeCluster, +) (*ctrl.Result, error) { + log := ctrl.LoggerFrom(ctx) + + clusters, err := r.NewClient().ListClusters(ctx, apiKey, teamId) + if err != nil { + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionReady, + Status: metav1.ConditionUnknown, + Reason: "UnknownClusterState", + Message: fmt.Sprintf("Issue listing existing clusters in Bridge: %v", err), + }) + log.Error(err, "issue listing existing clusters in Bridge") + return &ctrl.Result{}, err + } + + for _, cluster := range clusters { + if crunchybridgecluster.Spec.ClusterName == cluster.ClusterName { + // Cluster with the same name exists so check for adoption annotation + adoptionID, annotationExists := crunchybridgecluster.Annotations[naming.CrunchyBridgeClusterAdoptionAnnotation] + if annotationExists && strings.EqualFold(adoptionID, cluster.ID) { + // Annotation is present with correct ID value; adopt cluster by assigning ID to status. + crunchybridgecluster.Status.ID = cluster.ID + // Requeue now that we have a cluster ID assigned + return &ctrl.Result{Requeue: true}, nil + } + + // If we made it here, the adoption annotation either doesn't exist or its value is incorrect. + // The user must either add it or change the name on the CR. + + // Set invalid status condition and create log message. + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionReady, + Status: metav1.ConditionFalse, + Reason: "DuplicateClusterName", + Message: fmt.Sprintf("A cluster with the same name already exists for this team (Team ID: %v). "+ + "Give the CrunchyBridgeCluster CR a unique name, or if you would like to take control of the "+ + "existing cluster, add the 'postgres-operator.crunchydata.com/adopt-bridge-cluster' "+ + "annotation and set its value to the existing cluster's ID (Cluster ID: %v).", teamId, cluster.ID), + }) + + log.Info(fmt.Sprintf("A cluster with the same name already exists for this team (Team ID: %v). "+ + "Give the CrunchyBridgeCluster CR a unique name, or if you would like to take control "+ + "of the existing cluster, add the 'postgres-operator.crunchydata.com/adopt-bridge-cluster' "+ + "annotation and set its value to the existing cluster's ID (Cluster ID: %v).", teamId, cluster.ID)) + + // We have an invalid cluster spec so we don't want to requeue + return &ctrl.Result{}, nil + } + } + + return nil, nil +} + +// handleCreateCluster handles creating new Crunchy Bridge Clusters +func (r *CrunchyBridgeClusterReconciler) handleCreateCluster(ctx context.Context, + apiKey, teamId string, crunchybridgecluster *v1beta1.CrunchyBridgeCluster, +) ctrl.Result { + log := ctrl.LoggerFrom(ctx) + + createClusterRequestPayload := &bridge.PostClustersRequestPayload{ + IsHA: crunchybridgecluster.Spec.IsHA, + Name: crunchybridgecluster.Spec.ClusterName, + Plan: crunchybridgecluster.Spec.Plan, + PostgresVersion: intstr.FromInt(crunchybridgecluster.Spec.PostgresVersion), + Provider: crunchybridgecluster.Spec.Provider, + Region: crunchybridgecluster.Spec.Region, + Storage: bridge.ToGibibytes(crunchybridgecluster.Spec.Storage), + Team: teamId, + } + cluster, err := r.NewClient().CreateCluster(ctx, apiKey, createClusterRequestPayload) + if err != nil { + log.Error(err, "issue creating cluster in Bridge") + // TODO(crunchybridgecluster): probably shouldn't set this condition unless response from Bridge + // indicates the payload is wrong + // Otherwise want a different condition + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionReady, + Status: metav1.ConditionFalse, + Reason: "ClusterInvalid", + Message: fmt.Sprintf( + "Cannot create from spec: %v", err), + }) + + // TODO(crunchybridgecluster): If the payload is wrong, we don't want to requeue, so pass nil error + // If the transmission hit a transient problem, we do want to requeue + return ctrl.Result{} + } + crunchybridgecluster.Status.ID = cluster.ID + + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionReady, + Status: metav1.ConditionUnknown, + Reason: "UnknownClusterState", + Message: "The condition of the cluster is unknown.", + }) + + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionUpgrading, + Status: metav1.ConditionUnknown, + Reason: "UnknownUpgradeState", + Message: "The condition of the upgrade(s) is unknown.", + }) + + return runtime.RequeueWithoutBackoff(3 * time.Minute) +} + +// handleGetCluster handles getting the cluster details from Bridge and +// updating the cluster CR's Status accordingly +func (r *CrunchyBridgeClusterReconciler) handleGetCluster(ctx context.Context, + apiKey string, crunchybridgecluster *v1beta1.CrunchyBridgeCluster, +) error { + log := ctrl.LoggerFrom(ctx) + + clusterDetails, err := r.NewClient().GetCluster(ctx, apiKey, crunchybridgecluster.Status.ID) + if err != nil { + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionReady, + Status: metav1.ConditionUnknown, + Reason: "UnknownClusterState", + Message: fmt.Sprintf("Issue getting cluster information from Bridge: %v", err), + }) + log.Error(err, "issue getting cluster information from Bridge") + return err + } + clusterDetails.AddDataToClusterStatus(crunchybridgecluster) + + return nil +} + +// handleGetClusterStatus handles getting the cluster status from Bridge and +// updating the cluster CR's Status accordingly +func (r *CrunchyBridgeClusterReconciler) handleGetClusterStatus(ctx context.Context, + apiKey string, crunchybridgecluster *v1beta1.CrunchyBridgeCluster, +) error { + log := ctrl.LoggerFrom(ctx) + + clusterStatus, err := r.NewClient().GetClusterStatus(ctx, apiKey, crunchybridgecluster.Status.ID) + if err != nil { + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionReady, + Status: metav1.ConditionUnknown, + Reason: "UnknownClusterState", + Message: fmt.Sprintf("Issue getting cluster status from Bridge: %v", err), + }) + crunchybridgecluster.Status.State = "unknown" + log.Error(err, "issue getting cluster status from Bridge") + return err + } + clusterStatus.AddDataToClusterStatus(crunchybridgecluster) + + if clusterStatus.State == "ready" { + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionReady, + Status: metav1.ConditionTrue, + Reason: clusterStatus.State, + Message: fmt.Sprintf("Bridge cluster state is %v.", clusterStatus.State), + }) + } else { + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionReady, + Status: metav1.ConditionFalse, + Reason: clusterStatus.State, + Message: fmt.Sprintf("Bridge cluster state is %v.", clusterStatus.State), + }) + } + + return nil +} + +// handleGetClusterUpgrade handles getting the ongoing upgrade operations from Bridge and +// updating the cluster CR's Status accordingly +func (r *CrunchyBridgeClusterReconciler) handleGetClusterUpgrade(ctx context.Context, + apiKey string, + crunchybridgecluster *v1beta1.CrunchyBridgeCluster, +) error { + log := ctrl.LoggerFrom(ctx) + + clusterUpgradeDetails, err := r.NewClient().GetClusterUpgrade(ctx, apiKey, crunchybridgecluster.Status.ID) + if err != nil { + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionUpgrading, + Status: metav1.ConditionUnknown, + Reason: "UnknownUpgradeState", + Message: fmt.Sprintf("Issue getting cluster upgrade from Bridge: %v", err), + }) + log.Error(err, "issue getting cluster upgrade from Bridge") + return err + } + clusterUpgradeDetails.AddDataToClusterStatus(crunchybridgecluster) + + if len(clusterUpgradeDetails.Operations) != 0 { + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionUpgrading, + Status: metav1.ConditionTrue, + Reason: clusterUpgradeDetails.Operations[0].Flavor, + Message: fmt.Sprintf( + "Performing an upgrade of type %v with a state of %v.", + clusterUpgradeDetails.Operations[0].Flavor, clusterUpgradeDetails.Operations[0].State), + }) + } else { + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionUpgrading, + Status: metav1.ConditionFalse, + Reason: "NoUpgradesInProgress", + Message: "No upgrades being performed", + }) + } + + return nil +} + +// handleUpgrade handles upgrades that hit the "POST /clusters//upgrade" endpoint +func (r *CrunchyBridgeClusterReconciler) handleUpgrade(ctx context.Context, + apiKey string, + crunchybridgecluster *v1beta1.CrunchyBridgeCluster, +) ctrl.Result { + log := ctrl.LoggerFrom(ctx) + + log.Info("Handling upgrade request") + + upgradeRequest := &bridge.PostClustersUpgradeRequestPayload{ + Plan: crunchybridgecluster.Spec.Plan, + PostgresVersion: intstr.FromInt(crunchybridgecluster.Spec.PostgresVersion), + Storage: bridge.ToGibibytes(crunchybridgecluster.Spec.Storage), + } + + clusterUpgrade, err := r.NewClient().UpgradeCluster(ctx, apiKey, + crunchybridgecluster.Status.ID, upgradeRequest) + if err != nil { + // TODO(crunchybridgecluster): consider what errors we might get + // and what different results/requeue times we want to return. + // Currently: don't requeue and wait for user to change spec. + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionUpgrading, + Status: metav1.ConditionFalse, + Reason: "UpgradeError", + Message: fmt.Sprintf( + "Error performing an upgrade: %s", err), + }) + log.Error(err, "Error while attempting cluster upgrade") + return ctrl.Result{} + } + clusterUpgrade.AddDataToClusterStatus(crunchybridgecluster) + + if len(clusterUpgrade.Operations) != 0 { + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionUpgrading, + Status: metav1.ConditionTrue, + Reason: clusterUpgrade.Operations[0].Flavor, + Message: fmt.Sprintf( + "Performing an upgrade of type %v with a state of %v.", + clusterUpgrade.Operations[0].Flavor, clusterUpgrade.Operations[0].State), + }) + } + + return runtime.RequeueWithoutBackoff(3 * time.Minute) +} + +// handleUpgradeHA handles upgrades that hit the +// "PUT /clusters//actions/[enable|disable]-ha" endpoint +func (r *CrunchyBridgeClusterReconciler) handleUpgradeHA(ctx context.Context, + apiKey string, + crunchybridgecluster *v1beta1.CrunchyBridgeCluster, +) ctrl.Result { + log := ctrl.LoggerFrom(ctx) + + log.Info("Handling HA change request") + + action := "enable-ha" + if !crunchybridgecluster.Spec.IsHA { + action = "disable-ha" + } + + clusterUpgrade, err := r.NewClient().UpgradeClusterHA(ctx, apiKey, crunchybridgecluster.Status.ID, action) + if err != nil { + // TODO(crunchybridgecluster): consider what errors we might get + // and what different results/requeue times we want to return. + // Currently: don't requeue and wait for user to change spec. + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionUpgrading, + Status: metav1.ConditionFalse, + Reason: "UpgradeError", + Message: fmt.Sprintf( + "Error performing an HA upgrade: %s", err), + }) + log.Error(err, "Error while attempting cluster HA change") + return ctrl.Result{} + } + clusterUpgrade.AddDataToClusterStatus(crunchybridgecluster) + if len(clusterUpgrade.Operations) != 0 { + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionUpgrading, + Status: metav1.ConditionTrue, + Reason: clusterUpgrade.Operations[0].Flavor, + Message: fmt.Sprintf( + "Performing an upgrade of type %v with a state of %v.", + clusterUpgrade.Operations[0].Flavor, clusterUpgrade.Operations[0].State), + }) + } + + return runtime.RequeueWithoutBackoff(3 * time.Minute) +} + +// handleUpdate handles upgrades that hit the "PATCH /clusters/" endpoint +func (r *CrunchyBridgeClusterReconciler) handleUpdate(ctx context.Context, + apiKey string, + crunchybridgecluster *v1beta1.CrunchyBridgeCluster, +) ctrl.Result { + log := ctrl.LoggerFrom(ctx) + + log.Info("Handling update request") + + updateRequest := &bridge.PatchClustersRequestPayload{ + IsProtected: &crunchybridgecluster.Spec.IsProtected, + Name: crunchybridgecluster.Spec.ClusterName, + } + + clusterUpdate, err := r.NewClient().UpdateCluster(ctx, apiKey, + crunchybridgecluster.Status.ID, updateRequest) + if err != nil { + // TODO(crunchybridgecluster): consider what errors we might get + // and what different results/requeue times we want to return. + // Currently: don't requeue and wait for user to change spec. + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionUpgrading, + Status: metav1.ConditionFalse, + Reason: "UpgradeError", + Message: fmt.Sprintf( + "Error performing an upgrade: %s", err), + }) + log.Error(err, "Error while attempting cluster update") + return ctrl.Result{} + } + clusterUpdate.AddDataToClusterStatus(crunchybridgecluster) + meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ + ObservedGeneration: crunchybridgecluster.GetGeneration(), + Type: v1beta1.ConditionUpgrading, + Status: metav1.ConditionTrue, + Reason: "ClusterUpgrade", + Message: fmt.Sprintf( + "An upgrade is occurring, the clusters name is %v and the cluster is protected is %v.", + clusterUpdate.ClusterName, *clusterUpdate.IsProtected), + }) + + return runtime.RequeueWithoutBackoff(3 * time.Minute) +} + +// GetSecretKeys gets the secret and returns the expected API key and team id +// or an error if either of those fields or the Secret are missing +func (r *CrunchyBridgeClusterReconciler) GetSecretKeys( + ctx context.Context, crunchyBridgeCluster *v1beta1.CrunchyBridgeCluster, +) (string, string, error) { + + existing := &corev1.Secret{ObjectMeta: metav1.ObjectMeta{ + Namespace: crunchyBridgeCluster.GetNamespace(), + Name: crunchyBridgeCluster.Spec.Secret, + }} + + err := errors.WithStack( + r.Reader.Get(ctx, client.ObjectKeyFromObject(existing), existing)) + + if err == nil { + if existing.Data["key"] != nil && existing.Data["team"] != nil { + return string(existing.Data["key"]), string(existing.Data["team"]), nil + } + err = fmt.Errorf("error handling secret; expected to find a key and a team: found key %t, found team %t", + existing.Data["key"] != nil, + existing.Data["team"] != nil) + } + + return "", "", err +} + +// deleteControlled safely deletes object when it is controlled by cluster. +func (r *CrunchyBridgeClusterReconciler) deleteControlled( + ctx context.Context, crunchyBridgeCluster *v1beta1.CrunchyBridgeCluster, object client.Object, +) error { + if metav1.IsControlledBy(object, crunchyBridgeCluster) { + uid := object.GetUID() + version := object.GetResourceVersion() + exactly := client.Preconditions{UID: &uid, ResourceVersion: &version} + + return r.Writer.Delete(ctx, object, exactly) + } + + return nil +} diff --git a/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller_test.go b/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller_test.go new file mode 100644 index 0000000000..955282f61d --- /dev/null +++ b/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller_test.go @@ -0,0 +1,755 @@ +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package crunchybridgecluster + +import ( + "context" + "strings" + "testing" + "time" + + "gotest.tools/v3/assert" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crunchydata/postgres-operator/internal/bridge" + "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/internal/testing/require" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +var testTeamId = "5678" +var testApiKey = "9012" + +func TestReconcileBridgeConnectionSecret(t *testing.T) { + ctx := context.Background() + tClient := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + reconciler := &CrunchyBridgeClusterReconciler{ + Reader: tClient, + } + + ns := setupNamespace(t, tClient).Name + cluster := testCluster() + cluster.Namespace = ns + + t.Run("Failure", func(t *testing.T) { + key, team, err := reconciler.reconcileBridgeConnectionSecret(ctx, cluster) + assert.Equal(t, key, "") + assert.Equal(t, team, "") + assert.Check(t, err != nil) + readyCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionReady) + if assert.Check(t, readyCondition != nil) { + assert.Equal(t, readyCondition.Status, metav1.ConditionUnknown) + assert.Equal(t, readyCondition.Reason, "SecretInvalid") + assert.Check(t, cmp.Contains(readyCondition.Message, + "The condition of the cluster is unknown because the secret is invalid:")) + } + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionUnknown) + assert.Equal(t, upgradingCondition.Reason, "SecretInvalid") + assert.Check(t, cmp.Contains(upgradingCondition.Message, + "The condition of the upgrade(s) is unknown because the secret is invalid:")) + } + }) + + t.Run("ValidSecretFound", func(t *testing.T) { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "crunchy-bridge-api-key", + Namespace: ns, + }, + Data: map[string][]byte{ + "key": []byte(`asdf`), + "team": []byte(`jkl;`), + }, + } + assert.NilError(t, tClient.Create(ctx, secret)) + + key, team, err := reconciler.reconcileBridgeConnectionSecret(ctx, cluster) + assert.Equal(t, key, "asdf") + assert.Equal(t, team, "jkl;") + assert.NilError(t, err) + }) +} + +func TestHandleDuplicateClusterName(t *testing.T) { + ctx := context.Background() + + clusterInBridge := testClusterApiResource() + clusterInBridge.ClusterName = "bridge-cluster-1" // originally "hippo-cluster" + reconciler := &CrunchyBridgeClusterReconciler{} + reconciler.NewClient = func() bridge.ClientInterface { + return &TestBridgeClient{ + ApiKey: testApiKey, + TeamId: testTeamId, + Clusters: []*bridge.ClusterApiResource{clusterInBridge}, + } + } + + t.Run("FailureToListClusters", func(t *testing.T) { + cluster := testCluster() + + controllerResult, err := reconciler.handleDuplicateClusterName(ctx, "bad_api_key", testTeamId, cluster) + assert.Check(t, err != nil) + assert.Equal(t, *controllerResult, ctrl.Result{}) + readyCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionReady) + if assert.Check(t, readyCondition != nil) { + assert.Equal(t, readyCondition.Status, metav1.ConditionUnknown) + assert.Equal(t, readyCondition.Reason, "UnknownClusterState") + assert.Check(t, cmp.Contains(readyCondition.Message, + "Issue listing existing clusters in Bridge:")) + } + }) + + t.Run("NoDuplicateFound", func(t *testing.T) { + cluster := testCluster() + + controllerResult, err := reconciler.handleDuplicateClusterName(ctx, testApiKey, testTeamId, cluster) + assert.NilError(t, err) + assert.Check(t, controllerResult == nil) + }) + + t.Run("DuplicateFoundAdoptionAnnotationNotPresent", func(t *testing.T) { + cluster := testCluster() + cluster.Spec.ClusterName = "bridge-cluster-1" // originally "hippo-cluster" + + controllerResult, err := reconciler.handleDuplicateClusterName(ctx, testApiKey, testTeamId, cluster) + assert.NilError(t, err) + assert.Equal(t, *controllerResult, ctrl.Result{}) + readyCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionReady) + if assert.Check(t, readyCondition != nil) { + assert.Equal(t, readyCondition.Status, metav1.ConditionFalse) + assert.Equal(t, readyCondition.Reason, "DuplicateClusterName") + assert.Check(t, cmp.Contains(readyCondition.Message, + "A cluster with the same name already exists for this team (Team ID: ")) + } + }) + + t.Run("DuplicateFoundAdoptionAnnotationPresent", func(t *testing.T) { + cluster := testCluster() + cluster.Spec.ClusterName = "bridge-cluster-1" // originally "hippo-cluster" + cluster.Annotations = map[string]string{} + cluster.Annotations[naming.CrunchyBridgeClusterAdoptionAnnotation] = "1234" + + controllerResult, err := reconciler.handleDuplicateClusterName(ctx, testApiKey, testTeamId, cluster) + assert.NilError(t, err) + assert.Equal(t, *controllerResult, ctrl.Result{Requeue: true}) + assert.Equal(t, cluster.Status.ID, "1234") + }) +} + +func TestHandleCreateCluster(t *testing.T) { + ctx := context.Background() + + reconciler := &CrunchyBridgeClusterReconciler{} + reconciler.NewClient = func() bridge.ClientInterface { + return &TestBridgeClient{ + ApiKey: testApiKey, + TeamId: testTeamId, + Clusters: []*bridge.ClusterApiResource{}, + } + } + + t.Run("SuccessfulCreate", func(t *testing.T) { + cluster := testCluster() + + controllerResult := reconciler.handleCreateCluster(ctx, testApiKey, testTeamId, cluster) + assert.Equal(t, controllerResult.RequeueAfter, 3*time.Minute) + assert.Equal(t, cluster.Status.ID, "0") + + readyCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionReady) + if assert.Check(t, readyCondition != nil) { + assert.Equal(t, readyCondition.Status, metav1.ConditionUnknown) + assert.Equal(t, readyCondition.Reason, "UnknownClusterState") + assert.Check(t, cmp.Contains(readyCondition.Message, + "The condition of the cluster is unknown.")) + } + + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionUnknown) + assert.Equal(t, upgradingCondition.Reason, "UnknownUpgradeState") + assert.Check(t, cmp.Contains(upgradingCondition.Message, + "The condition of the upgrade(s) is unknown.")) + } + }) + + t.Run("UnsuccessfulCreate", func(t *testing.T) { + cluster := testCluster() + + controllerResult := reconciler.handleCreateCluster(ctx, "bad_api_key", testTeamId, cluster) + assert.Equal(t, controllerResult, ctrl.Result{}) + assert.Equal(t, cluster.Status.ID, "") + + readyCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionReady) + if assert.Check(t, readyCondition != nil) { + assert.Equal(t, readyCondition.Status, metav1.ConditionFalse) + assert.Equal(t, readyCondition.Reason, "ClusterInvalid") + assert.Check(t, cmp.Contains(readyCondition.Message, + "Cannot create from spec:")) + } + + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + assert.Check(t, upgradingCondition == nil) + }) +} + +func TestHandleGetCluster(t *testing.T) { + ctx := context.Background() + + firstClusterInBridge := testClusterApiResource() + secondClusterInBridge := testClusterApiResource() + secondClusterInBridge.ID = "2345" // originally "1234" + secondClusterInBridge.ClusterName = "hippo-cluster-2" // originally "hippo-cluster" + + reconciler := &CrunchyBridgeClusterReconciler{} + reconciler.NewClient = func() bridge.ClientInterface { + return &TestBridgeClient{ + ApiKey: testApiKey, + TeamId: testTeamId, + Clusters: []*bridge.ClusterApiResource{firstClusterInBridge, secondClusterInBridge}, + } + } + + t.Run("SuccessfulGet", func(t *testing.T) { + cluster := testCluster() + cluster.Status.ID = "1234" + + err := reconciler.handleGetCluster(ctx, testApiKey, cluster) + assert.NilError(t, err) + assert.Equal(t, cluster.Status.ClusterName, firstClusterInBridge.ClusterName) + assert.Equal(t, cluster.Status.Host, firstClusterInBridge.Host) + assert.Equal(t, cluster.Status.ID, firstClusterInBridge.ID) + assert.Equal(t, cluster.Status.IsHA, firstClusterInBridge.IsHA) + assert.Equal(t, cluster.Status.IsProtected, firstClusterInBridge.IsProtected) + assert.Equal(t, cluster.Status.MajorVersion, firstClusterInBridge.MajorVersion) + assert.Equal(t, cluster.Status.Plan, firstClusterInBridge.Plan) + assert.Equal(t, *cluster.Status.Storage, *bridge.FromGibibytes(firstClusterInBridge.Storage)) + }) + + t.Run("UnsuccessfulGet", func(t *testing.T) { + cluster := testCluster() + cluster.Status.ID = "bad_cluster_id" + + err := reconciler.handleGetCluster(ctx, testApiKey, cluster) + assert.Check(t, err != nil) + + readyCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionReady) + if assert.Check(t, readyCondition != nil) { + assert.Equal(t, readyCondition.Status, metav1.ConditionUnknown) + assert.Equal(t, readyCondition.Reason, "UnknownClusterState") + assert.Check(t, cmp.Contains(readyCondition.Message, + "Issue getting cluster information from Bridge:")) + } + }) +} + +func TestHandleGetClusterStatus(t *testing.T) { + ctx := context.Background() + + readyClusterId := "1234" + creatingClusterId := "7890" + readyClusterStatusInBridge := testClusterStatusApiResource(readyClusterId) + creatingClusterStatusInBridge := testClusterStatusApiResource(creatingClusterId) + creatingClusterStatusInBridge.State = "creating" // originally "ready" + + reconciler := &CrunchyBridgeClusterReconciler{} + reconciler.NewClient = func() bridge.ClientInterface { + return &TestBridgeClient{ + ApiKey: testApiKey, + TeamId: testTeamId, + ClusterStatuses: map[string]*bridge.ClusterStatusApiResource{ + readyClusterId: readyClusterStatusInBridge, + creatingClusterId: creatingClusterStatusInBridge, + }, + } + } + + t.Run("SuccessReadyState", func(t *testing.T) { + cluster := testCluster() + cluster.Status.ID = readyClusterId + + err := reconciler.handleGetClusterStatus(ctx, testApiKey, cluster) + assert.NilError(t, err) + assert.Equal(t, cluster.Status.State, "ready") + readyCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionReady) + if assert.Check(t, readyCondition != nil) { + assert.Equal(t, readyCondition.Status, metav1.ConditionTrue) + assert.Equal(t, readyCondition.Reason, "ready") + assert.Check(t, cmp.Contains(readyCondition.Message, + "Bridge cluster state is ready")) + } + }) + + t.Run("SuccessNonReadyState", func(t *testing.T) { + cluster := testCluster() + cluster.Status.ID = creatingClusterId + + err := reconciler.handleGetClusterStatus(ctx, testApiKey, cluster) + assert.NilError(t, err) + assert.Equal(t, cluster.Status.State, "creating") + readyCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionReady) + if assert.Check(t, readyCondition != nil) { + assert.Equal(t, readyCondition.Status, metav1.ConditionFalse) + assert.Equal(t, readyCondition.Reason, "creating") + assert.Check(t, cmp.Contains(readyCondition.Message, + "Bridge cluster state is creating")) + } + }) + + t.Run("UnsuccessfulGet", func(t *testing.T) { + cluster := testCluster() + cluster.Status.ID = creatingClusterId + + err := reconciler.handleGetClusterStatus(ctx, "bad_api_key", cluster) + assert.Check(t, err != nil) + assert.Equal(t, cluster.Status.State, "unknown") + readyCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionReady) + if assert.Check(t, readyCondition != nil) { + assert.Equal(t, readyCondition.Status, metav1.ConditionUnknown) + assert.Equal(t, readyCondition.Reason, "UnknownClusterState") + assert.Check(t, cmp.Contains(readyCondition.Message, + "Issue getting cluster status from Bridge:")) + } + }) +} + +func TestHandleGetClusterUpgrade(t *testing.T) { + ctx := context.Background() + + upgradingClusterId := "1234" + notUpgradingClusterId := "7890" + upgradingClusterUpgradeInBridge := testClusterUpgradeApiResource(upgradingClusterId) + notUpgradingClusterUpgradeInBridge := testClusterUpgradeApiResource(notUpgradingClusterId) + notUpgradingClusterUpgradeInBridge.Operations = []*v1beta1.UpgradeOperation{} + + reconciler := &CrunchyBridgeClusterReconciler{} + reconciler.NewClient = func() bridge.ClientInterface { + return &TestBridgeClient{ + ApiKey: testApiKey, + TeamId: testTeamId, + ClusterUpgrades: map[string]*bridge.ClusterUpgradeApiResource{ + upgradingClusterId: upgradingClusterUpgradeInBridge, + notUpgradingClusterId: notUpgradingClusterUpgradeInBridge, + }, + } + } + + t.Run("SuccessUpgrading", func(t *testing.T) { + cluster := testCluster() + cluster.Status.ID = upgradingClusterId + + err := reconciler.handleGetClusterUpgrade(ctx, testApiKey, cluster) + assert.NilError(t, err) + assert.Equal(t, *cluster.Status.OngoingUpgrade[0], v1beta1.UpgradeOperation{ + Flavor: "resize", + StartingFrom: "", + State: "in_progress", + }) + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionTrue) + assert.Equal(t, upgradingCondition.Reason, "resize") + assert.Check(t, cmp.Contains(upgradingCondition.Message, + "Performing an upgrade of type resize with a state of in_progress.")) + } + }) + + t.Run("SuccessNotUpgrading", func(t *testing.T) { + cluster := testCluster() + cluster.Status.ID = notUpgradingClusterId + + err := reconciler.handleGetClusterUpgrade(ctx, testApiKey, cluster) + assert.NilError(t, err) + assert.Equal(t, len(cluster.Status.OngoingUpgrade), 0) + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionFalse) + assert.Equal(t, upgradingCondition.Reason, "NoUpgradesInProgress") + assert.Check(t, cmp.Contains(upgradingCondition.Message, + "No upgrades being performed")) + } + }) + + t.Run("UnsuccessfulGet", func(t *testing.T) { + cluster := testCluster() + cluster.Status.ID = notUpgradingClusterId + + err := reconciler.handleGetClusterUpgrade(ctx, "bad_api_key", cluster) + assert.Check(t, err != nil) + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionUnknown) + assert.Equal(t, upgradingCondition.Reason, "UnknownUpgradeState") + assert.Check(t, cmp.Contains(upgradingCondition.Message, + "Issue getting cluster upgrade from Bridge:")) + } + }) +} + +func TestHandleUpgrade(t *testing.T) { + ctx := context.Background() + clusterInBridge := testClusterApiResource() + + reconciler := &CrunchyBridgeClusterReconciler{} + reconciler.NewClient = func() bridge.ClientInterface { + return &TestBridgeClient{ + ApiKey: testApiKey, + TeamId: testTeamId, + Clusters: []*bridge.ClusterApiResource{clusterInBridge}, + } + } + + t.Run("UpgradePlan", func(t *testing.T) { + cluster := testCluster() + cluster.Status.ID = "1234" + cluster.Spec.Plan = "standard-16" // originally "standard-8" + + controllerResult := reconciler.handleUpgrade(ctx, testApiKey, cluster) + assert.Equal(t, controllerResult.RequeueAfter, 3*time.Minute) + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionTrue) + assert.Equal(t, upgradingCondition.Reason, "maintenance") + assert.Check(t, cmp.Contains(upgradingCondition.Message, + "Performing an upgrade of type maintenance with a state of in_progress.")) + assert.Equal(t, *cluster.Status.OngoingUpgrade[0], v1beta1.UpgradeOperation{ + Flavor: "maintenance", + StartingFrom: "", + State: "in_progress", + }) + } + }) + + t.Run("UpgradePostgres", func(t *testing.T) { + cluster := testCluster() + cluster.Status.ID = "1234" + cluster.Spec.PostgresVersion = 16 // originally "15" + + controllerResult := reconciler.handleUpgrade(ctx, testApiKey, cluster) + assert.Equal(t, controllerResult.RequeueAfter, 3*time.Minute) + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionTrue) + assert.Equal(t, upgradingCondition.Reason, "major_version_upgrade") + assert.Check(t, cmp.Contains(upgradingCondition.Message, + "Performing an upgrade of type major_version_upgrade with a state of in_progress.")) + assert.Equal(t, *cluster.Status.OngoingUpgrade[0], v1beta1.UpgradeOperation{ + Flavor: "major_version_upgrade", + StartingFrom: "", + State: "in_progress", + }) + } + }) + + t.Run("UpgradeStorage", func(t *testing.T) { + cluster := testCluster() + cluster.Status.ID = "1234" + cluster.Spec.Storage = resource.MustParse("15Gi") // originally "10Gi" + + controllerResult := reconciler.handleUpgrade(ctx, testApiKey, cluster) + assert.Equal(t, controllerResult.RequeueAfter, 3*time.Minute) + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionTrue) + assert.Equal(t, upgradingCondition.Reason, "resize") + assert.Check(t, cmp.Contains(upgradingCondition.Message, + "Performing an upgrade of type resize with a state of in_progress.")) + assert.Equal(t, *cluster.Status.OngoingUpgrade[0], v1beta1.UpgradeOperation{ + Flavor: "resize", + StartingFrom: "", + State: "in_progress", + }) + } + }) + + t.Run("UpgradeFailure", func(t *testing.T) { + cluster := testCluster() + cluster.Status.ID = "1234" + cluster.Spec.Storage = resource.MustParse("15Gi") // originally "10Gi" + + controllerResult := reconciler.handleUpgrade(ctx, "bad_api_key", cluster) + assert.Equal(t, controllerResult, ctrl.Result{}) + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionFalse) + assert.Equal(t, upgradingCondition.Reason, "UpgradeError") + assert.Check(t, cmp.Contains(upgradingCondition.Message, + "Error performing an upgrade: boom")) + } + }) +} + +func TestHandleUpgradeHA(t *testing.T) { + ctx := context.Background() + + clusterInBridgeWithHaDisabled := testClusterApiResource() + clusterInBridgeWithHaEnabled := testClusterApiResource() + clusterInBridgeWithHaEnabled.ID = "2345" // originally "1234" + clusterInBridgeWithHaEnabled.IsHA = initialize.Bool(true) // originally "false" + + reconciler := &CrunchyBridgeClusterReconciler{} + reconciler.NewClient = func() bridge.ClientInterface { + return &TestBridgeClient{ + ApiKey: testApiKey, + TeamId: testTeamId, + Clusters: []*bridge.ClusterApiResource{clusterInBridgeWithHaDisabled, + clusterInBridgeWithHaEnabled}, + } + } + + t.Run("EnableHA", func(t *testing.T) { + cluster := testCluster() + cluster.Status.ID = "1234" + cluster.Spec.IsHA = true // originally "false" + + controllerResult := reconciler.handleUpgradeHA(ctx, testApiKey, cluster) + assert.Equal(t, controllerResult.RequeueAfter, 3*time.Minute) + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionTrue) + assert.Equal(t, upgradingCondition.Reason, "ha_change") + assert.Check(t, cmp.Contains(upgradingCondition.Message, + "Performing an upgrade of type ha_change with a state of enabling_ha.")) + assert.Equal(t, *cluster.Status.OngoingUpgrade[0], v1beta1.UpgradeOperation{ + Flavor: "ha_change", + StartingFrom: "", + State: "enabling_ha", + }) + } + }) + + t.Run("DisableHA", func(t *testing.T) { + cluster := testCluster() + cluster.Status.ID = "2345" + + controllerResult := reconciler.handleUpgradeHA(ctx, testApiKey, cluster) + assert.Equal(t, controllerResult.RequeueAfter, 3*time.Minute) + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionTrue) + assert.Equal(t, upgradingCondition.Reason, "ha_change") + assert.Check(t, cmp.Contains(upgradingCondition.Message, + "Performing an upgrade of type ha_change with a state of disabling_ha.")) + assert.Equal(t, *cluster.Status.OngoingUpgrade[0], v1beta1.UpgradeOperation{ + Flavor: "ha_change", + StartingFrom: "", + State: "disabling_ha", + }) + } + }) + + t.Run("UpgradeFailure", func(t *testing.T) { + cluster := testCluster() + cluster.Status.ID = "1234" + + controllerResult := reconciler.handleUpgradeHA(ctx, "bad_api_key", cluster) + assert.Equal(t, controllerResult, ctrl.Result{}) + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionFalse) + assert.Equal(t, upgradingCondition.Reason, "UpgradeError") + assert.Check(t, cmp.Contains(upgradingCondition.Message, + "Error performing an HA upgrade: boom")) + } + }) +} + +func TestHandleUpdate(t *testing.T) { + ctx := context.Background() + clusterInBridge := testClusterApiResource() + + reconciler := &CrunchyBridgeClusterReconciler{} + reconciler.NewClient = func() bridge.ClientInterface { + return &TestBridgeClient{ + ApiKey: testApiKey, + TeamId: testTeamId, + Clusters: []*bridge.ClusterApiResource{clusterInBridge}, + } + } + + t.Run("UpdateName", func(t *testing.T) { + cluster := testCluster() + cluster.Status.ID = "1234" + cluster.Spec.ClusterName = "new-cluster-name" // originally "hippo-cluster" + + controllerResult := reconciler.handleUpdate(ctx, testApiKey, cluster) + assert.Equal(t, controllerResult.RequeueAfter, 3*time.Minute) + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionTrue) + assert.Equal(t, upgradingCondition.Reason, "ClusterUpgrade") + assert.Check(t, cmp.Contains(upgradingCondition.Message, + "An upgrade is occurring, the clusters name is new-cluster-name and the cluster is protected is false.")) + } + assert.Equal(t, cluster.Status.ClusterName, "new-cluster-name") + }) + + t.Run("UpdateIsProtected", func(t *testing.T) { + cluster := testCluster() + cluster.Status.ID = "1234" + cluster.Spec.IsProtected = true // originally "false" + + controllerResult := reconciler.handleUpdate(ctx, testApiKey, cluster) + assert.Equal(t, controllerResult.RequeueAfter, 3*time.Minute) + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionTrue) + assert.Equal(t, upgradingCondition.Reason, "ClusterUpgrade") + assert.Check(t, cmp.Contains(upgradingCondition.Message, + "An upgrade is occurring, the clusters name is hippo-cluster and the cluster is protected is true.")) + } + assert.Equal(t, *cluster.Status.IsProtected, true) + }) + + t.Run("UpgradeFailure", func(t *testing.T) { + cluster := testCluster() + cluster.Status.ID = "1234" + cluster.Spec.IsProtected = true // originally "false" + + controllerResult := reconciler.handleUpdate(ctx, "bad_api_key", cluster) + assert.Equal(t, controllerResult, ctrl.Result{}) + upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) + if assert.Check(t, upgradingCondition != nil) { + assert.Equal(t, upgradingCondition.Status, metav1.ConditionFalse) + assert.Equal(t, upgradingCondition.Reason, "UpgradeError") + assert.Check(t, cmp.Contains(upgradingCondition.Message, "Error performing an upgrade: boom")) + } + }) +} + +func TestGetSecretKeys(t *testing.T) { + ctx := context.Background() + tClient := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + reconciler := &CrunchyBridgeClusterReconciler{ + Reader: tClient, + } + + ns := setupNamespace(t, tClient).Name + cluster := testCluster() + cluster.Namespace = ns + + t.Run("NoSecret", func(t *testing.T) { + apiKey, team, err := reconciler.GetSecretKeys(ctx, cluster) + assert.Equal(t, apiKey, "") + assert.Equal(t, team, "") + assert.ErrorContains(t, err, "secrets \"crunchy-bridge-api-key\" not found") + }) + + t.Run("SecretMissingApiKey", func(t *testing.T) { + cluster.Spec.Secret = "secret-missing-api-key" // originally "crunchy-bridge-api-key" + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret-missing-api-key", + Namespace: ns, + }, + Data: map[string][]byte{ + "team": []byte(`jkl;`), + }, + } + assert.NilError(t, tClient.Create(ctx, secret)) + + apiKey, team, err := reconciler.GetSecretKeys(ctx, cluster) + assert.Equal(t, apiKey, "") + assert.Equal(t, team, "") + assert.ErrorContains(t, err, "error handling secret; expected to find a key and a team: found key false, found team true") + + assert.NilError(t, tClient.Delete(ctx, secret)) + }) + + t.Run("SecretMissingTeamId", func(t *testing.T) { + cluster.Spec.Secret = "secret-missing-team-id" + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret-missing-team-id", + Namespace: ns, + }, + Data: map[string][]byte{ + "key": []byte(`asdf`), + }, + } + assert.NilError(t, tClient.Create(ctx, secret)) + + apiKey, team, err := reconciler.GetSecretKeys(ctx, cluster) + assert.Equal(t, apiKey, "") + assert.Equal(t, team, "") + assert.ErrorContains(t, err, "error handling secret; expected to find a key and a team: found key true, found team false") + }) + + t.Run("GoodSecret", func(t *testing.T) { + cluster.Spec.Secret = "crunchy-bridge-api-key" + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "crunchy-bridge-api-key", + Namespace: ns, + }, + Data: map[string][]byte{ + "key": []byte(`asdf`), + "team": []byte(`jkl;`), + }, + } + assert.NilError(t, tClient.Create(ctx, secret)) + + apiKey, team, err := reconciler.GetSecretKeys(ctx, cluster) + assert.Equal(t, apiKey, "asdf") + assert.Equal(t, team, "jkl;") + assert.NilError(t, err) + }) +} + +func TestDeleteControlled(t *testing.T) { + ctx := context.Background() + tClient := setupKubernetes(t) + require.ParallelCapacity(t, 1) + + ns := setupNamespace(t, tClient) + reconciler := &CrunchyBridgeClusterReconciler{ + Writer: client.WithFieldOwner(tClient, t.Name()), + } + + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.Name = strings.ToLower(t.Name()) // originally "hippo-cr" + assert.NilError(t, tClient.Create(ctx, cluster)) + + t.Run("NotControlled", func(t *testing.T) { + secret := &corev1.Secret{} + secret.Namespace = ns.Name + secret.Name = "solo" + + assert.NilError(t, tClient.Create(ctx, secret)) + + // No-op when there's no ownership + assert.NilError(t, reconciler.deleteControlled(ctx, cluster, secret)) + assert.NilError(t, tClient.Get(ctx, client.ObjectKeyFromObject(secret), secret)) + }) + + t.Run("Controlled", func(t *testing.T) { + secret := &corev1.Secret{} + secret.Namespace = ns.Name + secret.Name = "controlled" + + assert.NilError(t, reconciler.setControllerReference(cluster, secret)) + assert.NilError(t, tClient.Create(ctx, secret)) + + // Deletes when controlled by cluster. + assert.NilError(t, reconciler.deleteControlled(ctx, cluster, secret)) + + err := tClient.Get(ctx, client.ObjectKeyFromObject(secret), secret) + assert.Assert(t, apierrors.IsNotFound(err), "expected NotFound, got %#v", err) + }) +} diff --git a/internal/bridge/crunchybridgecluster/delete.go b/internal/bridge/crunchybridgecluster/delete.go new file mode 100644 index 0000000000..cf5a320f54 --- /dev/null +++ b/internal/bridge/crunchybridgecluster/delete.go @@ -0,0 +1,70 @@ +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package crunchybridgecluster + +import ( + "context" + "time" + + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + "github.com/crunchydata/postgres-operator/internal/controller/runtime" + "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +const finalizer = "crunchybridgecluster.postgres-operator.crunchydata.com/finalizer" + +// handleDelete sets a finalizer on cluster and performs the finalization of +// cluster when it is being deleted. It returns (nil, nil) when cluster is +// not being deleted and there are no errors patching the CrunchyBridgeCluster. +// The caller is responsible for returning other values to controller-runtime. +func (r *CrunchyBridgeClusterReconciler) handleDelete( + ctx context.Context, crunchybridgecluster *v1beta1.CrunchyBridgeCluster, key string, +) (*ctrl.Result, error) { + log := ctrl.LoggerFrom(ctx) + + // If the CrunchyBridgeCluster isn't being deleted, add the finalizer + if crunchybridgecluster.DeletionTimestamp.IsZero() { + if !controllerutil.ContainsFinalizer(crunchybridgecluster, finalizer) { + controllerutil.AddFinalizer(crunchybridgecluster, finalizer) + if err := r.Writer.Update(ctx, crunchybridgecluster); err != nil { + return nil, err + } + } + // If the CrunchyBridgeCluster is being deleted, + // handle the deletion, and remove the finalizer + } else { + if controllerutil.ContainsFinalizer(crunchybridgecluster, finalizer) { + log.Info("deleting cluster", "clusterName", crunchybridgecluster.Spec.ClusterName) + + // TODO(crunchybridgecluster): If is_protected is true, maybe skip this call, but allow the deletion of the K8s object? + _, deletedAlready, err := r.NewClient().DeleteCluster(ctx, key, crunchybridgecluster.Status.ID) + // Requeue if error + if err != nil { + return &ctrl.Result{}, err + } + + if !deletedAlready { + return initialize.Pointer(runtime.RequeueWithoutBackoff(time.Second)), err + } + + // Remove finalizer if deleted already + if deletedAlready { + log.Info("cluster deleted", "clusterName", crunchybridgecluster.Spec.ClusterName) + + controllerutil.RemoveFinalizer(crunchybridgecluster, finalizer) + if err := r.Writer.Update(ctx, crunchybridgecluster); err != nil { + return &ctrl.Result{}, err + } + } + } + // Stop reconciliation as the item is being deleted + return &ctrl.Result{}, nil + } + + return nil, nil +} diff --git a/internal/bridge/crunchybridgecluster/delete_test.go b/internal/bridge/crunchybridgecluster/delete_test.go new file mode 100644 index 0000000000..508c87c5c9 --- /dev/null +++ b/internal/bridge/crunchybridgecluster/delete_test.go @@ -0,0 +1,132 @@ +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package crunchybridgecluster + +import ( + "context" + "testing" + "time" + + "gotest.tools/v3/assert" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + "github.com/crunchydata/postgres-operator/internal/bridge" + "github.com/crunchydata/postgres-operator/internal/testing/require" +) + +func TestHandleDeleteCluster(t *testing.T) { + ctx := context.Background() + tClient := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + ns := setupNamespace(t, tClient).Name + + firstClusterInBridge := testClusterApiResource() + firstClusterInBridge.ClusterName = "bridge-cluster-1" + secondClusterInBridge := testClusterApiResource() + secondClusterInBridge.ClusterName = "bridge-cluster-2" + secondClusterInBridge.ID = "2345" + + reconciler := &CrunchyBridgeClusterReconciler{ + Writer: client.WithFieldOwner(tClient, t.Name()), + } + testBridgeClient := &TestBridgeClient{ + ApiKey: "9012", + TeamId: "5678", + Clusters: []*bridge.ClusterApiResource{firstClusterInBridge, secondClusterInBridge}, + } + reconciler.NewClient = func() bridge.ClientInterface { + return testBridgeClient + } + + t.Run("SuccessfulDeletion", func(t *testing.T) { + // Create test cluster in kubernetes + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = "1234" + cluster.Spec.ClusterName = "bridge-cluster-1" + assert.NilError(t, tClient.Create(ctx, cluster)) + + // Run handleDelete + controllerResult, err := reconciler.handleDelete(ctx, cluster, "9012") + assert.NilError(t, err) + assert.Check(t, controllerResult == nil) + + // Make sure that finalizer was added + assert.Check(t, controllerutil.ContainsFinalizer(cluster, finalizer)) + + // Send delete request to kubernetes + assert.NilError(t, tClient.Delete(ctx, cluster)) + + // Get cluster from kubernetes and assert that the deletion timestamp was added + assert.NilError(t, tClient.Get(ctx, client.ObjectKeyFromObject(cluster), cluster)) + assert.Check(t, !cluster.DeletionTimestamp.IsZero()) + + // Note: We must run handleDelete multiple times because we don't want to remove the + // finalizer until we're sure that the cluster has been deleted from Bridge, so we + // have to do multiple calls/reconcile loops. + // Run handleDelete again to delete from Bridge + cluster.Status.ID = "1234" + controllerResult, err = reconciler.handleDelete(ctx, cluster, "9012") + assert.NilError(t, err) + assert.Equal(t, controllerResult.RequeueAfter, 1*time.Second) + assert.Equal(t, len(testBridgeClient.Clusters), 1) + assert.Equal(t, testBridgeClient.Clusters[0].ClusterName, "bridge-cluster-2") + + // Run handleDelete one last time to remove finalizer + controllerResult, err = reconciler.handleDelete(ctx, cluster, "9012") + assert.NilError(t, err) + assert.Equal(t, *controllerResult, ctrl.Result{}) + + // Make sure that finalizer was removed + assert.Check(t, !controllerutil.ContainsFinalizer(cluster, finalizer)) + }) + + t.Run("UnsuccessfulDeletion", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = "2345" + cluster.Spec.ClusterName = "bridge-cluster-2" + assert.NilError(t, tClient.Create(ctx, cluster)) + + // Run handleDelete + controllerResult, err := reconciler.handleDelete(ctx, cluster, "9012") + assert.NilError(t, err) + assert.Check(t, controllerResult == nil) + + // Make sure that finalizer was added + assert.Check(t, controllerutil.ContainsFinalizer(cluster, finalizer)) + + // Send delete request to kubernetes + assert.NilError(t, tClient.Delete(ctx, cluster)) + + // Get cluster from kubernetes and assert that the deletion timestamp was added + assert.NilError(t, tClient.Get(ctx, client.ObjectKeyFromObject(cluster), cluster)) + assert.Check(t, !cluster.DeletionTimestamp.IsZero()) + + // Run handleDelete again to attempt to delete from Bridge, but provide bad api key + cluster.Status.ID = "2345" + controllerResult, err = reconciler.handleDelete(ctx, cluster, "bad_api_key") + assert.ErrorContains(t, err, "boom") + assert.Equal(t, *controllerResult, ctrl.Result{}) + + // Run handleDelete a couple times with good api key so test can cleanup properly. + // Note: We must run handleDelete multiple times because we don't want to remove the + // finalizer until we're sure that the cluster has been deleted from Bridge, so we + // have to do multiple calls/reconcile loops. + // delete from bridge + _, err = reconciler.handleDelete(ctx, cluster, "9012") + assert.NilError(t, err) + + // remove finalizer + _, err = reconciler.handleDelete(ctx, cluster, "9012") + assert.NilError(t, err) + + // Make sure that finalizer was removed + assert.Check(t, !controllerutil.ContainsFinalizer(cluster, finalizer)) + }) +} diff --git a/internal/bridge/crunchybridgecluster/helpers_test.go b/internal/bridge/crunchybridgecluster/helpers_test.go new file mode 100644 index 0000000000..f8bc4295f6 --- /dev/null +++ b/internal/bridge/crunchybridgecluster/helpers_test.go @@ -0,0 +1,178 @@ +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package crunchybridgecluster + +import ( + "context" + "os" + "strconv" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" + + "github.com/crunchydata/postgres-operator/internal/bridge" + "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/testing/require" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +// Scale extends d according to PGO_TEST_TIMEOUT_SCALE. +var Scale = func(d time.Duration) time.Duration { return d } + +// This function was duplicated from the postgrescluster package. +// TODO: Pull these duplicated functions out into a separate, shared package. +func init() { + setting := os.Getenv("PGO_TEST_TIMEOUT_SCALE") + factor, _ := strconv.ParseFloat(setting, 64) + + if setting != "" { + if factor <= 0 { + panic("PGO_TEST_TIMEOUT_SCALE must be a fractional number greater than zero") + } + + Scale = func(d time.Duration) time.Duration { + return time.Duration(factor * float64(d)) + } + } +} + +// setupKubernetes starts or connects to a Kubernetes API and returns a client +// that uses it. See [require.Kubernetes] for more details. +func setupKubernetes(t testing.TB) client.Client { + t.Helper() + + // Start and/or connect to a Kubernetes API, or Skip when that's not configured. + cc := require.Kubernetes(t) + + // Log the status of any test namespaces after this test fails. + t.Cleanup(func() { + if t.Failed() { + var namespaces corev1.NamespaceList + _ = cc.List(context.Background(), &namespaces, client.HasLabels{"postgres-operator-test"}) + + type shaped map[string]corev1.NamespaceStatus + result := make([]shaped, len(namespaces.Items)) + + for i, ns := range namespaces.Items { + result[i] = shaped{ns.Labels["postgres-operator-test"]: ns.Status} + } + + formatted, _ := yaml.Marshal(result) + t.Logf("Test Namespaces:\n%s", formatted) + } + }) + + return cc +} + +// setupNamespace creates a random namespace that will be deleted by t.Cleanup. +// +// Deprecated: Use [require.Namespace] instead. +func setupNamespace(t testing.TB, cc client.Client) *corev1.Namespace { + t.Helper() + return require.Namespace(t, cc) +} + +// testCluster defines a base cluster spec that can be used by tests to +// generate a CrunchyBridgeCluster CR +func testCluster() *v1beta1.CrunchyBridgeCluster { + cluster := v1beta1.CrunchyBridgeCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hippo-cr", + }, + Spec: v1beta1.CrunchyBridgeClusterSpec{ + ClusterName: "hippo-cluster", + IsHA: false, + PostgresVersion: 15, + Plan: "standard-8", + Provider: "aws", + Region: "us-east-2", + Secret: "crunchy-bridge-api-key", + Storage: resource.MustParse("10Gi"), + }, + } + return cluster.DeepCopy() +} + +func testClusterApiResource() *bridge.ClusterApiResource { + cluster := bridge.ClusterApiResource{ + ID: "1234", + Host: "example.com", + IsHA: initialize.Bool(false), + IsProtected: initialize.Bool(false), + MajorVersion: 15, + ClusterName: "hippo-cluster", + Plan: "standard-8", + Provider: "aws", + Region: "us-east-2", + Storage: 10, + Team: "5678", + } + return &cluster +} + +func testClusterStatusApiResource(clusterId string) *bridge.ClusterStatusApiResource { + teamId := "5678" + state := "ready" + + clusterStatus := bridge.ClusterStatusApiResource{ + DiskUsage: &bridge.ClusterDiskUsageApiResource{ + DiskAvailableMB: 16, + DiskTotalSizeMB: 16, + DiskUsedMB: 0, + }, + OldestBackup: "oldbackup", + OngoingUpgrade: &bridge.ClusterUpgradeApiResource{ + ClusterID: clusterId, + Operations: []*v1beta1.UpgradeOperation{}, + Team: teamId, + }, + State: state, + } + + return &clusterStatus +} + +func testClusterUpgradeApiResource(clusterId string) *bridge.ClusterUpgradeApiResource { + teamId := "5678" + + clusterUpgrade := bridge.ClusterUpgradeApiResource{ + ClusterID: clusterId, + Operations: []*v1beta1.UpgradeOperation{ + { + Flavor: "resize", + StartingFrom: "", + State: "in_progress", + }, + }, + Team: teamId, + } + + return &clusterUpgrade +} + +func testClusterRoleApiResource() *bridge.ClusterRoleApiResource { + clusterId := "1234" + teamId := "5678" + roleName := "application" + + clusterRole := bridge.ClusterRoleApiResource{ + AccountEmail: "test@email.com", + AccountId: "12345678", + ClusterId: clusterId, + Flavor: "chocolate", + Name: roleName, + Password: "application-password", + Team: teamId, + URI: "connection-string", + } + + return &clusterRole +} diff --git a/internal/bridge/crunchybridgecluster/mock_bridge_api.go b/internal/bridge/crunchybridgecluster/mock_bridge_api.go new file mode 100644 index 0000000000..f0439531d1 --- /dev/null +++ b/internal/bridge/crunchybridgecluster/mock_bridge_api.go @@ -0,0 +1,246 @@ +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package crunchybridgecluster + +import ( + "context" + "errors" + "fmt" + + "k8s.io/apimachinery/pkg/util/intstr" + + "github.com/crunchydata/postgres-operator/internal/bridge" + "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +type TestBridgeClient struct { + ApiKey string `json:"apiKey,omitempty"` + TeamId string `json:"teamId,omitempty"` + Clusters []*bridge.ClusterApiResource `json:"clusters,omitempty"` + ClusterRoles []*bridge.ClusterRoleApiResource `json:"clusterRoles,omitempty"` + ClusterStatuses map[string]*bridge.ClusterStatusApiResource `json:"clusterStatuses,omitempty"` + ClusterUpgrades map[string]*bridge.ClusterUpgradeApiResource `json:"clusterUpgrades,omitempty"` +} + +func (tbc *TestBridgeClient) ListClusters(ctx context.Context, apiKey, teamId string) ([]*bridge.ClusterApiResource, error) { + + if apiKey == tbc.ApiKey && teamId == tbc.TeamId { + return tbc.Clusters, nil + } + + return nil, errors.New("boom") +} + +func (tbc *TestBridgeClient) UpgradeCluster(ctx context.Context, apiKey, id string, clusterRequestPayload *bridge.PostClustersUpgradeRequestPayload, +) (*bridge.ClusterUpgradeApiResource, error) { + // look for cluster + var desiredCluster *bridge.ClusterApiResource + clusterFound := false + for _, cluster := range tbc.Clusters { + if cluster.ID == id { + desiredCluster = cluster + clusterFound = true + } + } + if !clusterFound { + return nil, errors.New("cluster not found") + } + + // happy path + if apiKey == tbc.ApiKey { + result := &bridge.ClusterUpgradeApiResource{ + ClusterID: id, + Team: tbc.TeamId, + } + if clusterRequestPayload.Plan != desiredCluster.Plan { + result.Operations = []*v1beta1.UpgradeOperation{ + { + Flavor: "maintenance", + StartingFrom: "", + State: "in_progress", + }, + } + } else if clusterRequestPayload.PostgresVersion != intstr.FromInt(desiredCluster.MajorVersion) { + result.Operations = []*v1beta1.UpgradeOperation{ + { + Flavor: "major_version_upgrade", + StartingFrom: "", + State: "in_progress", + }, + } + } else if clusterRequestPayload.Storage != desiredCluster.Storage { + result.Operations = []*v1beta1.UpgradeOperation{ + { + Flavor: "resize", + StartingFrom: "", + State: "in_progress", + }, + } + } + return result, nil + } + // sad path + return nil, errors.New("boom") +} + +func (tbc *TestBridgeClient) UpgradeClusterHA(ctx context.Context, apiKey, id, action string, +) (*bridge.ClusterUpgradeApiResource, error) { + // look for cluster + var desiredCluster *bridge.ClusterApiResource + clusterFound := false + for _, cluster := range tbc.Clusters { + if cluster.ID == id { + desiredCluster = cluster + clusterFound = true + } + } + if !clusterFound { + return nil, errors.New("cluster not found") + } + + // happy path + if apiKey == tbc.ApiKey { + result := &bridge.ClusterUpgradeApiResource{ + ClusterID: id, + Team: tbc.TeamId, + } + if action == "enable-ha" && !*desiredCluster.IsHA { + result.Operations = []*v1beta1.UpgradeOperation{ + { + Flavor: "ha_change", + StartingFrom: "", + State: "enabling_ha", + }, + } + } else if action == "disable-ha" && *desiredCluster.IsHA { + result.Operations = []*v1beta1.UpgradeOperation{ + { + Flavor: "ha_change", + StartingFrom: "", + State: "disabling_ha", + }, + } + } else { + return nil, errors.New("no change detected") + } + return result, nil + } + // sad path + return nil, errors.New("boom") +} + +func (tbc *TestBridgeClient) UpdateCluster(ctx context.Context, apiKey, id string, clusterRequestPayload *bridge.PatchClustersRequestPayload, +) (*bridge.ClusterApiResource, error) { + // look for cluster + var desiredCluster *bridge.ClusterApiResource + clusterFound := false + for _, cluster := range tbc.Clusters { + if cluster.ID == id { + desiredCluster = cluster + clusterFound = true + } + } + if !clusterFound { + return nil, errors.New("cluster not found") + } + + // happy path + if apiKey == tbc.ApiKey { + desiredCluster.ClusterName = clusterRequestPayload.Name + desiredCluster.IsProtected = clusterRequestPayload.IsProtected + return desiredCluster, nil + } + // sad path + return nil, errors.New("boom") +} + +func (tbc *TestBridgeClient) CreateCluster(ctx context.Context, apiKey string, + clusterRequestPayload *bridge.PostClustersRequestPayload) (*bridge.ClusterApiResource, error) { + + if apiKey == tbc.ApiKey && clusterRequestPayload.Team == tbc.TeamId && clusterRequestPayload.Name != "" && + clusterRequestPayload.Plan != "" { + cluster := &bridge.ClusterApiResource{ + ID: fmt.Sprint(len(tbc.Clusters)), + Host: "example.com", + IsHA: initialize.Bool(clusterRequestPayload.IsHA), + MajorVersion: clusterRequestPayload.PostgresVersion.IntValue(), + ClusterName: clusterRequestPayload.Name, + Plan: clusterRequestPayload.Plan, + Provider: clusterRequestPayload.Provider, + Region: clusterRequestPayload.Region, + Storage: clusterRequestPayload.Storage, + } + tbc.Clusters = append(tbc.Clusters, cluster) + + return cluster, nil + } + + return nil, errors.New("boom") +} + +func (tbc *TestBridgeClient) GetCluster(ctx context.Context, apiKey, id string) (*bridge.ClusterApiResource, error) { + + if apiKey == tbc.ApiKey { + for _, cluster := range tbc.Clusters { + if cluster.ID == id { + return cluster, nil + } + } + } + + return nil, errors.New("boom") +} + +func (tbc *TestBridgeClient) GetClusterStatus(ctx context.Context, apiKey, id string) (*bridge.ClusterStatusApiResource, error) { + + if apiKey == tbc.ApiKey { + return tbc.ClusterStatuses[id], nil + } + + return nil, errors.New("boom") +} + +func (tbc *TestBridgeClient) GetClusterUpgrade(ctx context.Context, apiKey, id string) (*bridge.ClusterUpgradeApiResource, error) { + + if apiKey == tbc.ApiKey { + return tbc.ClusterUpgrades[id], nil + } + + return nil, errors.New("boom") +} + +func (tbc *TestBridgeClient) GetClusterRole(ctx context.Context, apiKey, clusterId, roleName string) (*bridge.ClusterRoleApiResource, error) { + + if apiKey == tbc.ApiKey { + for _, clusterRole := range tbc.ClusterRoles { + if clusterRole.ClusterId == clusterId && clusterRole.Name == roleName { + return clusterRole, nil + } + } + } + + return nil, errors.New("boom") +} + +func (tbc *TestBridgeClient) DeleteCluster(ctx context.Context, apiKey, clusterId string) (*bridge.ClusterApiResource, bool, error) { + alreadyDeleted := true + var cluster *bridge.ClusterApiResource + + if apiKey == tbc.ApiKey { + for i := len(tbc.Clusters) - 1; i >= 0; i-- { + if tbc.Clusters[i].ID == clusterId { + cluster = tbc.Clusters[i] + alreadyDeleted = false + tbc.Clusters = append(tbc.Clusters[:i], tbc.Clusters[i+1:]...) + return cluster, alreadyDeleted, nil + } + } + } else { + return nil, alreadyDeleted, errors.New("boom") + } + + return nil, alreadyDeleted, nil +} diff --git a/internal/bridge/crunchybridgecluster/postgres.go b/internal/bridge/crunchybridgecluster/postgres.go new file mode 100644 index 0000000000..f8b8bf6b12 --- /dev/null +++ b/internal/bridge/crunchybridgecluster/postgres.go @@ -0,0 +1,164 @@ +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package crunchybridgecluster + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crunchydata/postgres-operator/internal/bridge" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +// generatePostgresRoleSecret returns a Secret containing a password and +// connection details for the appropriate database. +func (r *CrunchyBridgeClusterReconciler) generatePostgresRoleSecret( + cluster *v1beta1.CrunchyBridgeCluster, roleSpec *v1beta1.CrunchyBridgeClusterRoleSpec, + clusterRole *bridge.ClusterRoleApiResource, +) (*corev1.Secret, error) { + roleName := roleSpec.Name + secretName := roleSpec.SecretName + intent := &corev1.Secret{ObjectMeta: metav1.ObjectMeta{ + Namespace: cluster.Namespace, + Name: secretName, + }} + intent.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Secret")) + intent.StringData = map[string]string{ + "name": clusterRole.Name, + "password": clusterRole.Password, + "uri": clusterRole.URI, + } + + intent.Annotations = cluster.Spec.Metadata.GetAnnotationsOrNil() + intent.Labels = naming.Merge( + cluster.Spec.Metadata.GetLabelsOrNil(), + map[string]string{ + naming.LabelCluster: cluster.Name, + naming.LabelRole: naming.RoleCrunchyBridgeClusterPostgresRole, + naming.LabelCrunchyBridgeClusterPostgresRole: roleName, + }) + + err := errors.WithStack(r.setControllerReference(cluster, intent)) + + return intent, err +} + +// reconcilePostgresRoles writes the objects necessary to manage roles and their +// passwords in PostgreSQL. +func (r *CrunchyBridgeClusterReconciler) reconcilePostgresRoles( + ctx context.Context, apiKey string, cluster *v1beta1.CrunchyBridgeCluster, +) error { + _, _, err := r.reconcilePostgresRoleSecrets(ctx, apiKey, cluster) + + // TODO: If we ever add a PgAdmin feature to CrunchyBridgeCluster, we will + // want to add the role credentials to PgAdmin here + + return err +} + +func (r *CrunchyBridgeClusterReconciler) reconcilePostgresRoleSecrets( + ctx context.Context, apiKey string, cluster *v1beta1.CrunchyBridgeCluster, +) ( + []*v1beta1.CrunchyBridgeClusterRoleSpec, map[string]*corev1.Secret, error, +) { + log := ctrl.LoggerFrom(ctx) + specRoles := cluster.Spec.Roles + + // Index role specifications by PostgreSQL role name and make sure that none of the + // secretNames are identical in the spec + secretNames := make(map[string]bool) + roleSpecs := make(map[string]*v1beta1.CrunchyBridgeClusterRoleSpec, len(specRoles)) + for i := range specRoles { + if secretNames[specRoles[i].SecretName] { + // Duplicate secretName found, return early with error + err := errors.New("Two or more of the Roles in the CrunchyBridgeCluster spec " + + "have the same SecretName. Role SecretNames must be unique.") + return nil, nil, err + } + secretNames[specRoles[i].SecretName] = true + + roleSpecs[specRoles[i].Name] = specRoles[i] + } + + // Make sure that this cluster's role secret names are not being used by any other + // secrets in the namespace + allSecretsInNamespace := &corev1.SecretList{} + err := errors.WithStack(r.Reader.List(ctx, allSecretsInNamespace, client.InNamespace(cluster.Namespace))) + if err != nil { + return nil, nil, err + } + for _, secret := range allSecretsInNamespace.Items { + if secretNames[secret.Name] { + existingSecretLabels := secret.GetLabels() + if existingSecretLabels[naming.LabelCluster] != cluster.Name || + existingSecretLabels[naming.LabelRole] != naming.RoleCrunchyBridgeClusterPostgresRole { + err = errors.New( + fmt.Sprintf("There is already an existing Secret in this namespace with the name %v. "+ + "Please choose a different name for this role's Secret.", secret.Name), + ) + return nil, nil, err + } + } + } + + // Gather existing role secrets + secrets := &corev1.SecretList{} + selector, err := naming.AsSelector(naming.CrunchyBridgeClusterPostgresRoles(cluster.Name)) + if err == nil { + err = errors.WithStack( + r.Reader.List(ctx, secrets, + client.InNamespace(cluster.Namespace), + client.MatchingLabelsSelector{Selector: selector}, + )) + } + + // Index secrets by PostgreSQL role name and delete any that are not in the + // cluster spec. + roleSecrets := make(map[string]*corev1.Secret, len(secrets.Items)) + if err == nil { + for i := range secrets.Items { + secret := &secrets.Items[i] + secretRoleName := secret.Labels[naming.LabelCrunchyBridgeClusterPostgresRole] + + roleSpec, specified := roleSpecs[secretRoleName] + if specified && roleSpec.SecretName == secret.Name { + roleSecrets[secretRoleName] = secret + } else if err == nil { + err = errors.WithStack(r.deleteControlled(ctx, cluster, secret)) + } + } + } + + // Reconcile each PostgreSQL role in the cluster spec. + for roleName, role := range roleSpecs { + // Get ClusterRole from Bridge API + clusterRole, err := r.NewClient().GetClusterRole(ctx, apiKey, cluster.Status.ID, roleName) + // If issue with getting ClusterRole, log error and move on to next role + if err != nil { + // TODO (dsessler7): Emit event here? + log.Error(err, "issue retrieving cluster role from Bridge") + continue + } + if err == nil { + roleSecrets[roleName], err = r.generatePostgresRoleSecret(cluster, role, clusterRole) + } + if err == nil { + err = errors.WithStack(runtime.Apply(ctx, r.Writer, roleSecrets[roleName])) + } + if err != nil { + log.Error(err, "Issue creating role secret.") + } + } + + return specRoles, roleSecrets, err +} diff --git a/internal/bridge/crunchybridgecluster/postgres_test.go b/internal/bridge/crunchybridgecluster/postgres_test.go new file mode 100644 index 0000000000..f2594bbba4 --- /dev/null +++ b/internal/bridge/crunchybridgecluster/postgres_test.go @@ -0,0 +1,232 @@ +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package crunchybridgecluster + +import ( + "context" + "testing" + + "gotest.tools/v3/assert" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crunchydata/postgres-operator/internal/bridge" + "github.com/crunchydata/postgres-operator/internal/testing/require" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +func TestGeneratePostgresRoleSecret(t *testing.T) { + reconciler := &CrunchyBridgeClusterReconciler{} + + cluster := testCluster() + cluster.Namespace = "asdf" + + spec := &v1beta1.CrunchyBridgeClusterRoleSpec{ + Name: "application", + SecretName: "application-role-secret", + } + role := &bridge.ClusterRoleApiResource{ + Name: "application", + Password: "password", + URI: "postgres://application:password@example.com:5432/postgres", + } + t.Run("ObjectMeta", func(t *testing.T) { + secret, err := reconciler.generatePostgresRoleSecret(cluster, spec, role) + assert.NilError(t, err) + + if assert.Check(t, secret != nil) { + assert.Equal(t, secret.Namespace, cluster.Namespace) + assert.Assert(t, metav1.IsControlledBy(secret, cluster)) + assert.DeepEqual(t, secret.Labels, map[string]string{ + "postgres-operator.crunchydata.com/cluster": "hippo-cr", + "postgres-operator.crunchydata.com/role": "cbc-pgrole", + "postgres-operator.crunchydata.com/cbc-pgrole": "application", + }) + } + }) + + t.Run("Data", func(t *testing.T) { + secret, err := reconciler.generatePostgresRoleSecret(cluster, spec, role) + assert.NilError(t, err) + + if assert.Check(t, secret != nil) { + assert.Equal(t, secret.StringData["name"], "application") + assert.Equal(t, secret.StringData["password"], "password") + assert.Equal(t, secret.StringData["uri"], + "postgres://application:password@example.com:5432/postgres") + } + }) +} + +func TestReconcilePostgresRoleSecrets(t *testing.T) { + ctx := context.Background() + tClient := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + apiKey := "9012" + ns := setupNamespace(t, tClient).Name + + reconciler := &CrunchyBridgeClusterReconciler{ + Reader: tClient, + Writer: client.WithFieldOwner(tClient, t.Name()), + } + + t.Run("DuplicateSecretNameInSpec", func(t *testing.T) { + cluster := testCluster() + cluster.Namespace = ns + + spec1 := &v1beta1.CrunchyBridgeClusterRoleSpec{ + Name: "application", + SecretName: "role-secret", + } + spec2 := &v1beta1.CrunchyBridgeClusterRoleSpec{ + Name: "postgres", + SecretName: "role-secret", + } + cluster.Spec.Roles = append(cluster.Spec.Roles, spec1, spec2) + + roleSpecSlice, secretMap, err := reconciler.reconcilePostgresRoleSecrets(ctx, apiKey, cluster) + assert.Check(t, roleSpecSlice == nil) + assert.Check(t, secretMap == nil) + assert.ErrorContains(t, err, "Two or more of the Roles in the CrunchyBridgeCluster spec have "+ + "the same SecretName. Role SecretNames must be unique.", "expected duplicate secret name error") + }) + + t.Run("DuplicateSecretNameInNamespace", func(t *testing.T) { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "role-secret", + Namespace: ns, + }, + StringData: map[string]string{ + "path": "stuff", + }, + } + assert.NilError(t, tClient.Create(ctx, secret)) + + cluster := testCluster() + cluster.Namespace = ns + + spec1 := &v1beta1.CrunchyBridgeClusterRoleSpec{ + Name: "application", + SecretName: "role-secret", + } + + cluster.Spec.Roles = append(cluster.Spec.Roles, spec1) + + roleSpecSlice, secretMap, err := reconciler.reconcilePostgresRoleSecrets(ctx, apiKey, cluster) + assert.Check(t, roleSpecSlice == nil) + assert.Check(t, secretMap == nil) + assert.ErrorContains(t, err, "There is already an existing Secret in this namespace with the name role-secret. "+ + "Please choose a different name for this role's Secret.", "expected duplicate secret name error") + }) + + t.Run("UnusedSecretsGetRemoved", func(t *testing.T) { + applicationRoleInBridge := testClusterRoleApiResource() + postgresRoleInBridge := testClusterRoleApiResource() + postgresRoleInBridge.Name = "postgres" + postgresRoleInBridge.Password = "postgres-password" + reconciler.NewClient = func() bridge.ClientInterface { + return &TestBridgeClient{ + ApiKey: apiKey, + TeamId: "5678", + ClusterRoles: []*bridge.ClusterRoleApiResource{applicationRoleInBridge, postgresRoleInBridge}, + } + } + + applicationSpec := &v1beta1.CrunchyBridgeClusterRoleSpec{ + Name: "application", + SecretName: "application-role-secret", + } + postgresSpec := &v1beta1.CrunchyBridgeClusterRoleSpec{ + Name: "postgres", + SecretName: "postgres-role-secret", + } + + cluster := testCluster() + cluster.Namespace = ns + cluster.Status.ID = "1234" + // Add one role to cluster spec + cluster.Spec.Roles = append(cluster.Spec.Roles, applicationSpec) + assert.NilError(t, tClient.Create(ctx, cluster)) + + applicationRole := &bridge.ClusterRoleApiResource{ + Name: "application", + Password: "application-password", + URI: "connection-string", + } + postgresRole := &bridge.ClusterRoleApiResource{ + Name: "postgres", + Password: "postgres-password", + URI: "connection-string", + } + + // Generate secrets + applicationSecret, err := reconciler.generatePostgresRoleSecret(cluster, applicationSpec, applicationRole) + assert.NilError(t, err) + postgresSecret, err := reconciler.generatePostgresRoleSecret(cluster, postgresSpec, postgresRole) + assert.NilError(t, err) + + // Create secrets in k8s + assert.NilError(t, tClient.Create(ctx, applicationSecret)) + assert.NilError(t, tClient.Create(ctx, postgresSecret)) + + roleSpecSlice, secretMap, err := reconciler.reconcilePostgresRoleSecrets(ctx, apiKey, cluster) + assert.Check(t, roleSpecSlice != nil) + assert.Check(t, secretMap != nil) + assert.NilError(t, err) + + // Assert that postgresSecret was deleted since its associated role is not in the spec + err = tClient.Get(ctx, client.ObjectKeyFromObject(postgresSecret), postgresSecret) + assert.Assert(t, apierrors.IsNotFound(err), "expected NotFound, got %#v", err) + + // Assert that applicationSecret is still there + err = tClient.Get(ctx, client.ObjectKeyFromObject(applicationSecret), applicationSecret) + assert.NilError(t, err) + }) + + t.Run("SecretsGetUpdated", func(t *testing.T) { + clusterRoleInBridge := testClusterRoleApiResource() + clusterRoleInBridge.Password = "different-password" + reconciler.NewClient = func() bridge.ClientInterface { + return &TestBridgeClient{ + ApiKey: apiKey, + TeamId: "5678", + ClusterRoles: []*bridge.ClusterRoleApiResource{clusterRoleInBridge}, + } + } + + cluster := testCluster() + cluster.Namespace = ns + err := tClient.Get(ctx, client.ObjectKeyFromObject(cluster), cluster) + assert.NilError(t, err) + cluster.Status.ID = "1234" + + spec1 := &v1beta1.CrunchyBridgeClusterRoleSpec{ + Name: "application", + SecretName: "application-role-secret", + } + role1 := &bridge.ClusterRoleApiResource{ + Name: "application", + Password: "test", + URI: "connection-string", + } + // Generate secret + secret1, err := reconciler.generatePostgresRoleSecret(cluster, spec1, role1) + assert.NilError(t, err) + + roleSpecSlice, secretMap, err := reconciler.reconcilePostgresRoleSecrets(ctx, apiKey, cluster) + assert.Check(t, roleSpecSlice != nil) + assert.Check(t, secretMap != nil) + assert.NilError(t, err) + + // Assert that secret1 was updated + err = tClient.Get(ctx, client.ObjectKeyFromObject(secret1), secret1) + assert.NilError(t, err) + assert.Equal(t, string(secret1.Data["password"]), "different-password") + }) +} diff --git a/internal/bridge/crunchybridgecluster/watches.go b/internal/bridge/crunchybridgecluster/watches.go new file mode 100644 index 0000000000..ac9b59b429 --- /dev/null +++ b/internal/bridge/crunchybridgecluster/watches.go @@ -0,0 +1,39 @@ +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package crunchybridgecluster + +import ( + "context" + + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="crunchybridgeclusters",verbs={list} + +// findCrunchyBridgeClustersForSecret returns CrunchyBridgeClusters +// that are connected to the Secret +func (r *CrunchyBridgeClusterReconciler) findCrunchyBridgeClustersForSecret( + ctx context.Context, secret client.ObjectKey, +) []*v1beta1.CrunchyBridgeCluster { + var matching []*v1beta1.CrunchyBridgeCluster + var clusters v1beta1.CrunchyBridgeClusterList + + // NOTE: If this becomes slow due to a large number of CrunchyBridgeClusters in a single + // namespace, we can configure the [manager.Manager] field indexer and pass a + // [fields.Selector] here. + // - https://book.kubebuilder.io/reference/watching-resources/externally-managed.html + if err := r.Reader.List(ctx, &clusters, &client.ListOptions{ + Namespace: secret.Namespace, + }); err == nil { + for i := range clusters.Items { + if clusters.Items[i].Spec.Secret == secret.Name { + matching = append(matching, &clusters.Items[i]) + } + } + } + return matching +} diff --git a/internal/bridge/crunchybridgecluster/watches_test.go b/internal/bridge/crunchybridgecluster/watches_test.go new file mode 100644 index 0000000000..b7e6f67f31 --- /dev/null +++ b/internal/bridge/crunchybridgecluster/watches_test.go @@ -0,0 +1,84 @@ +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package crunchybridgecluster + +import ( + "context" + "testing" + + "gotest.tools/v3/assert" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crunchydata/postgres-operator/internal/testing/require" +) + +func TestFindCrunchyBridgeClustersForSecret(t *testing.T) { + ctx := context.Background() + tClient := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + ns := setupNamespace(t, tClient) + reconciler := &CrunchyBridgeClusterReconciler{Reader: tClient} + + secret := &corev1.Secret{} + secret.Namespace = ns.Name + secret.Name = "crunchy-bridge-api-key" + + assert.NilError(t, tClient.Create(ctx, secret)) + secretObjectKey := client.ObjectKeyFromObject(secret) + + t.Run("NoClusters", func(t *testing.T) { + clusters := reconciler.findCrunchyBridgeClustersForSecret(ctx, secretObjectKey) + + assert.Equal(t, len(clusters), 0) + }) + + t.Run("OneCluster", func(t *testing.T) { + cluster1 := testCluster() + cluster1.Namespace = ns.Name + cluster1.Name = "first-cluster" + assert.NilError(t, tClient.Create(ctx, cluster1)) + + clusters := reconciler.findCrunchyBridgeClustersForSecret(ctx, secretObjectKey) + + assert.Equal(t, len(clusters), 1) + assert.Equal(t, clusters[0].Name, "first-cluster") + }) + + t.Run("TwoClusters", func(t *testing.T) { + cluster2 := testCluster() + cluster2.Namespace = ns.Name + cluster2.Name = "second-cluster" + assert.NilError(t, tClient.Create(ctx, cluster2)) + clusters := reconciler.findCrunchyBridgeClustersForSecret(ctx, secretObjectKey) + + assert.Equal(t, len(clusters), 2) + clusterCount := map[string]int{} + for _, cluster := range clusters { + clusterCount[cluster.Name] += 1 + } + assert.Equal(t, clusterCount["first-cluster"], 1) + assert.Equal(t, clusterCount["second-cluster"], 1) + }) + + t.Run("ClusterWithDifferentSecretNameNotIncluded", func(t *testing.T) { + cluster3 := testCluster() + cluster3.Namespace = ns.Name + cluster3.Name = "third-cluster" + cluster3.Spec.Secret = "different-secret-name" + assert.NilError(t, tClient.Create(ctx, cluster3)) + clusters := reconciler.findCrunchyBridgeClustersForSecret(ctx, secretObjectKey) + + assert.Equal(t, len(clusters), 2) + clusterCount := map[string]int{} + for _, cluster := range clusters { + clusterCount[cluster.Name] += 1 + } + assert.Equal(t, clusterCount["first-cluster"], 1) + assert.Equal(t, clusterCount["second-cluster"], 1) + assert.Equal(t, clusterCount["third-cluster"], 0) + }) +} diff --git a/internal/bridge/installation.go b/internal/bridge/installation.go new file mode 100644 index 0000000000..3464c6f020 --- /dev/null +++ b/internal/bridge/installation.go @@ -0,0 +1,280 @@ +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package bridge + +import ( + "context" + "encoding/json" + "errors" + "sync" + "time" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + corev1apply "k8s.io/client-go/applyconfigurations/core/v1" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/yaml" + + "github.com/crunchydata/postgres-operator/internal/controller/runtime" + "github.com/crunchydata/postgres-operator/internal/logging" + "github.com/crunchydata/postgres-operator/internal/naming" +) + +// self is a singleton Installation. See [InstallationReconciler]. +var self = new(struct { + Installation + sync.RWMutex +}) + +type AuthObject struct { + ID string `json:"id"` + ExpiresAt time.Time `json:"expires_at"` + Secret string `json:"secret"` +} + +type Installation struct { + ID string `json:"id"` + AuthObject AuthObject `json:"auth_object"` +} + +type InstallationReconciler struct { + Owner client.FieldOwner + Reader interface { + Get(context.Context, client.ObjectKey, client.Object, ...client.GetOption) error + } + Writer interface { + Patch(context.Context, client.Object, client.Patch, ...client.PatchOption) error + } + + // Refresh is the frequency at which AuthObjects should be renewed. + Refresh time.Duration + + // SecretRef is the name of the corev1.Secret in which to store Bridge tokens. + SecretRef client.ObjectKey + + // NewClient is called each time a new Client is needed. + NewClient func() *Client +} + +// ManagedInstallationReconciler creates an [InstallationReconciler] and adds it to m. +func ManagedInstallationReconciler(m manager.Manager, newClient func() *Client) error { + kubernetes := m.GetClient() + reconciler := &InstallationReconciler{ + Owner: naming.ControllerBridge, + Reader: kubernetes, + Writer: kubernetes, + Refresh: 2 * time.Hour, + SecretRef: naming.AsObjectKey(naming.OperatorConfigurationSecret()), + NewClient: newClient, + } + + // NOTE: This name was selected to show something interesting in the logs. + // The default is "secret". + // TODO: Pick this name considering metrics and other controllers. + return builder.ControllerManagedBy(m).Named("installation"). + // + // Reconcile the one Secret that holds Bridge tokens. + For(&corev1.Secret{}, builder.WithPredicates( + predicate.NewPredicateFuncs(func(secret client.Object) bool { + return client.ObjectKeyFromObject(secret) == reconciler.SecretRef + }), + )). + // + // Wake periodically even when that Secret does not exist. + WatchesRawSource( + runtime.NewTickerImmediate(time.Hour, event.GenericEvent{}, + handler.EnqueueRequestsFromMapFunc( + func(context.Context, client.Object) []reconcile.Request { + return []reconcile.Request{{NamespacedName: reconciler.SecretRef}} + }, + ), + ), + ). + // + Complete(reconciler) +} + +func (r *InstallationReconciler) Reconcile( + ctx context.Context, request reconcile.Request) (reconcile.Result, error, +) { + result := reconcile.Result{} + secret := &corev1.Secret{} + err := client.IgnoreNotFound(r.Reader.Get(ctx, request.NamespacedName, secret)) + + if err == nil { + // It is easier later to treat a missing Secret the same as one that exists + // and is empty. Fill in the metadata with information from the request to + // make it so. + secret.Namespace, secret.Name = request.Namespace, request.Name + + result.RequeueAfter, err = r.reconcile(ctx, secret) + } + + // Nothing can be written to a deleted namespace. + if err != nil && apierrors.HasStatusCause(err, corev1.NamespaceTerminatingCause) { + return runtime.ErrorWithoutBackoff(err) + } + + // Write conflicts are returned as errors; log and retry with backoff. + if err != nil && apierrors.IsConflict(err) { + logging.FromContext(ctx).Info("Requeue", "reason", err) + return runtime.RequeueWithBackoff(), nil + } + + return result, err +} + +// reconcile looks for an Installation in read and stores it or another in +// the [self] singleton after a successful response from the Bridge API. +func (r *InstallationReconciler) reconcile( + ctx context.Context, read *corev1.Secret) (next time.Duration, err error, +) { + write, err := corev1apply.ExtractSecret(read, string(r.Owner)) + if err != nil { + return 0, err + } + + // We GET-extract-PATCH the Secret and do not build it up from scratch. + // Send the ResourceVersion from the GET in the body of every PATCH. + if len(read.ResourceVersion) != 0 { + write.WithResourceVersion(read.ResourceVersion) + } + + // Read the Installation from the Secret, if any. + var installation Installation + if yaml.Unmarshal(read.Data[KeyBridgeToken], &installation) != nil { + installation = Installation{} + } + + // When the Secret lacks an Installation, write the one we have in memory + // or register with the API for a new one. In both cases, we write to the + // Secret which triggers another reconcile. + if len(installation.ID) == 0 { + if len(self.ID) == 0 { + return 0, r.register(ctx, write) + } + + data := map[string][]byte{} + data[KeyBridgeToken], _ = json.Marshal(self.Installation) //nolint:errchkjson + + return 0, r.persist(ctx, write.WithData(data)) + } + + // Read the timestamp from the Secret, if any. + var touched time.Time + if yaml.Unmarshal(read.Data[KeyBridgeLocalTime], &touched) != nil { + touched = time.Time{} + } + + // Refresh the AuthObject when there is no Installation in memory, + // there is no timestamp, or the timestamp is far away. This writes to + // the Secret which triggers another reconcile. + if len(self.ID) == 0 || time.Since(touched) > r.Refresh || time.Until(touched) > r.Refresh { + return 0, r.refresh(ctx, installation, write) + } + + // Trigger another reconcile one interval after the stored timestamp. + return wait.Jitter(time.Until(touched.Add(r.Refresh)), 0.1), nil +} + +// persist uses Server-Side Apply to write config to Kubernetes. The Name and +// Namespace fields cannot be nil. +func (r *InstallationReconciler) persist( + ctx context.Context, config *corev1apply.SecretApplyConfiguration, +) error { + data, err := json.Marshal(config) + apply := client.RawPatch(client.Apply.Type(), data) + + // [client.Client] decides where to write by looking at the underlying type, + // namespace, and name of its [client.Object] argument. That is also where + // it stores the API response. + target := corev1.Secret{} + target.Namespace, target.Name = *config.Namespace, *config.Name + + if err == nil { + err = r.Writer.Patch(ctx, &target, apply, r.Owner, client.ForceOwnership) + } + + return err +} + +// refresh calls the Bridge API to refresh the AuthObject of installation. It +// combines the result with installation and stores that in the [self] singleton +// and the write object in Kubernetes. The Name and Namespace fields of the +// latter cannot be nil. +func (r *InstallationReconciler) refresh( + ctx context.Context, installation Installation, + write *corev1apply.SecretApplyConfiguration, +) error { + result, err := r.NewClient().CreateAuthObject(ctx, installation.AuthObject) + + // An authentication error means the installation is irrecoverably expired. + // Remove it from the singleton and move it to a dated entry in the Secret. + if err != nil && errors.Is(err, errAuthentication) { + self.Lock() + self.Installation = Installation{} + self.Unlock() + + keyExpiration := KeyBridgeToken + + installation.AuthObject.ExpiresAt.UTC().Format("--2006-01-02") + + data := make(map[string][]byte, 2) + data[KeyBridgeToken] = nil + data[keyExpiration], _ = json.Marshal(installation) //nolint:errchkjson + + return r.persist(ctx, write.WithData(data)) + } + + if err == nil { + installation.AuthObject = result + + // Store the new value in the singleton. + self.Lock() + self.Installation = installation + self.Unlock() + + // Store the new value in the Secret along with the current time. + data := make(map[string][]byte, 2) + data[KeyBridgeLocalTime], _ = metav1.Now().MarshalJSON() + data[KeyBridgeToken], _ = json.Marshal(installation) //nolint:errchkjson + + err = r.persist(ctx, write.WithData(data)) + } + + return err +} + +// register calls the Bridge API to register a new Installation. It stores the +// result in the [self] singleton and the write object in Kubernetes. The Name +// and Namespace fields of the latter cannot be nil. +func (r *InstallationReconciler) register( + ctx context.Context, write *corev1apply.SecretApplyConfiguration, +) error { + installation, err := r.NewClient().CreateInstallation(ctx) + + if err == nil { + // Store the new value in the singleton. + self.Lock() + self.Installation = installation + self.Unlock() + + // Store the new value in the Secret along with the current time. + data := make(map[string][]byte, 2) + data[KeyBridgeLocalTime], _ = metav1.Now().MarshalJSON() + data[KeyBridgeToken], _ = json.Marshal(installation) //nolint:errchkjson + + err = r.persist(ctx, write.WithData(data)) + } + + return err +} diff --git a/internal/bridge/installation_test.go b/internal/bridge/installation_test.go new file mode 100644 index 0000000000..f7a86e2d3a --- /dev/null +++ b/internal/bridge/installation_test.go @@ -0,0 +1,491 @@ +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package bridge + +import ( + "context" + "encoding/json" + "errors" + "net/http" + "net/http/httptest" + "testing" + "time" + + "gotest.tools/v3/assert" + cmpopt "gotest.tools/v3/assert/opt" + corev1 "k8s.io/api/core/v1" + corev1apply "k8s.io/client-go/applyconfigurations/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crunchydata/postgres-operator/internal/controller/runtime" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/internal/testing/require" +) + +func TestExtractSecretContract(t *testing.T) { + // We expect ExtractSecret to populate GVK, Namespace, and Name. + + t.Run("GVK", func(t *testing.T) { + empty := &corev1.Secret{} + + extracted, err := corev1apply.ExtractSecret(empty, "") + assert.NilError(t, err) + + if assert.Check(t, extracted.APIVersion != nil) { + assert.Equal(t, *extracted.APIVersion, "v1") + } + if assert.Check(t, extracted.Kind != nil) { + assert.Equal(t, *extracted.Kind, "Secret") + } + }) + + t.Run("Name", func(t *testing.T) { + named := &corev1.Secret{} + named.Namespace, named.Name = "ns1", "s2" + + extracted, err := corev1apply.ExtractSecret(named, "") + assert.NilError(t, err) + + if assert.Check(t, extracted.Namespace != nil) { + assert.Equal(t, *extracted.Namespace, "ns1") + } + if assert.Check(t, extracted.Name != nil) { + assert.Equal(t, *extracted.Name, "s2") + } + }) + + t.Run("ResourceVersion", func(t *testing.T) { + versioned := &corev1.Secret{} + versioned.ResourceVersion = "asdf" + + extracted, err := corev1apply.ExtractSecret(versioned, "") + assert.NilError(t, err) + + // ResourceVersion is not copied from the original. + assert.Assert(t, extracted.ResourceVersion == nil) + }) +} + +func TestInstallationReconcile(t *testing.T) { + // Scenario: + // When there is no Secret and no Installation in memory, + // Then Reconcile should register with the API. + // + t.Run("FreshStart", func(t *testing.T) { + var reconciler *InstallationReconciler + var secret *corev1.Secret + + beforeEach := func() { + reconciler = new(InstallationReconciler) + secret = new(corev1.Secret) + self.Installation = Installation{} + } + + t.Run("ItRegisters", func(t *testing.T) { + beforeEach() + + // API double; spy on requests. + var requests []http.Request + { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + requests = append(requests, *r) + _ = json.NewEncoder(w).Encode(map[string]any{ + "id": "abc", "auth_object": map[string]any{"secret": "xyz"}, + }) + })) + t.Cleanup(server.Close) + + reconciler.NewClient = func() *Client { + c := NewClient(server.URL, "") + c.Steps = 1 + assert.Equal(t, c.BaseURL.String(), server.URL) + return c + } + } + + // Kubernetes double; spy on SSA patches. + var applies []string + { + reconciler.Writer = runtime.ClientPatch(func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + assert.Equal(t, string(patch.Type()), "application/apply-patch+yaml") + + data, err := patch.Data(obj) + applies = append(applies, string(data)) + return err + }) + } + + ctx := context.Background() + next, err := reconciler.reconcile(ctx, secret) + assert.NilError(t, err) + assert.Assert(t, next == 0) + + // It calls the API. + assert.Equal(t, len(requests), 1) + assert.Equal(t, requests[0].Method, "POST") + assert.Equal(t, requests[0].URL.Path, "/vendor/operator/installations") + + // It stores the result in memory. + assert.Equal(t, self.ID, "abc") + assert.Equal(t, self.AuthObject.Secret, "xyz") + + // It stores the result in Kubernetes. + assert.Equal(t, len(applies), 1) + assert.Assert(t, cmp.Contains(applies[0], `"kind":"Secret"`)) + + var decoded corev1.Secret + require.UnmarshalInto(t, &decoded, applies[0]) + assert.Assert(t, cmp.Contains(string(decoded.Data["bridge-token"]), `"id":"abc"`)) + assert.Assert(t, cmp.Contains(string(decoded.Data["bridge-token"]), `"secret":"xyz"`)) + }) + + t.Run("KubernetesError", func(t *testing.T) { + beforeEach() + + // API double; successful. + { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _ = json.NewEncoder(w).Encode(map[string]any{ + "id": "123", "auth_object": map[string]any{"secret": "456"}, + }) + })) + t.Cleanup(server.Close) + + reconciler.NewClient = func() *Client { + c := NewClient(server.URL, "") + c.Steps = 1 + assert.Equal(t, c.BaseURL.String(), server.URL) + return c + } + } + + // Kubernetes double; failure. + expected := errors.New("boom") + { + reconciler.Writer = runtime.ClientPatch(func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + return expected + }) + } + + ctx := context.Background() + _, err := reconciler.reconcile(ctx, secret) + assert.Equal(t, err, expected, "expected a Kubernetes error") + + // It stores the API result in memory. + assert.Equal(t, self.ID, "123") + assert.Equal(t, self.AuthObject.Secret, "456") + }) + }) + + // Scenario: + // When there is no Secret but an Installation exists in memory, + // Then Reconcile should store it in Kubernetes. + // + t.Run("LostSecret", func(t *testing.T) { + var reconciler *InstallationReconciler + var secret *corev1.Secret + + beforeEach := func(token []byte) { + reconciler = new(InstallationReconciler) + secret = new(corev1.Secret) + secret.Data = map[string][]byte{ + KeyBridgeToken: token, + } + self.Installation = Installation{ID: "asdf"} + } + + for _, tt := range []struct { + Name string + Token []byte + }{ + {Name: "NoToken", Token: nil}, + {Name: "BadToken", Token: []byte(`asdf`)}, + } { + t.Run(tt.Name, func(t *testing.T) { + beforeEach(tt.Token) + + // Kubernetes double; spy on SSA patches. + var applies []string + { + reconciler.Writer = runtime.ClientPatch(func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + assert.Equal(t, string(patch.Type()), "application/apply-patch+yaml") + + data, err := patch.Data(obj) + applies = append(applies, string(data)) + return err + }) + } + + ctx := context.Background() + next, err := reconciler.reconcile(ctx, secret) + assert.NilError(t, err) + assert.Assert(t, next == 0) + + assert.Equal(t, self.ID, "asdf", "expected no change to memory") + + // It stores the memory in Kubernetes. + assert.Equal(t, len(applies), 1) + assert.Assert(t, cmp.Contains(applies[0], `"kind":"Secret"`)) + + var decoded corev1.Secret + require.UnmarshalInto(t, &decoded, applies[0]) + assert.Assert(t, cmp.Contains(string(decoded.Data["bridge-token"]), `"id":"asdf"`)) + }) + } + + t.Run("KubernetesError", func(t *testing.T) { + beforeEach(nil) + + // Kubernetes double; failure. + expected := errors.New("boom") + { + reconciler.Writer = runtime.ClientPatch(func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + return expected + }) + } + + ctx := context.Background() + _, err := reconciler.reconcile(ctx, secret) + assert.Equal(t, err, expected, "expected a Kubernetes error") + assert.Equal(t, self.ID, "asdf", "expected no change to memory") + }) + }) + + // Scenario: + // When there is a Secret but no Installation in memory, + // Then Reconcile should verify it in the API and store it in memory. + // + t.Run("Restart", func(t *testing.T) { + var reconciler *InstallationReconciler + var secret *corev1.Secret + + beforeEach := func() { + reconciler = new(InstallationReconciler) + secret = new(corev1.Secret) + secret.Data = map[string][]byte{ + KeyBridgeToken: []byte(`{ + "id":"xyz", "auth_object":{ + "secret":"abc", + "expires_at":"2020-10-28T05:06:07Z" + } + }`), + } + self.Installation = Installation{} + } + + t.Run("ItVerifies", func(t *testing.T) { + beforeEach() + + // API double; spy on requests. + var requests []http.Request + { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + requests = append(requests, *r) + _ = json.NewEncoder(w).Encode(map[string]any{"secret": "def"}) + })) + t.Cleanup(server.Close) + + reconciler.NewClient = func() *Client { + c := NewClient(server.URL, "") + c.Steps = 1 + assert.Equal(t, c.BaseURL.String(), server.URL) + return c + } + } + + // Kubernetes double; spy on SSA patches. + var applies []string + { + reconciler.Writer = runtime.ClientPatch(func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + assert.Equal(t, string(patch.Type()), "application/apply-patch+yaml") + + data, err := patch.Data(obj) + applies = append(applies, string(data)) + return err + }) + } + + ctx := context.Background() + next, err := reconciler.reconcile(ctx, secret) + assert.NilError(t, err) + assert.Assert(t, next == 0) + + assert.Equal(t, len(requests), 1) + assert.Equal(t, requests[0].Header.Get("Authorization"), "Bearer abc") + assert.Equal(t, requests[0].Method, "POST") + assert.Equal(t, requests[0].URL.Path, "/vendor/operator/auth-objects") + + // It stores the result in memory. + assert.Equal(t, self.ID, "xyz") + assert.Equal(t, self.AuthObject.Secret, "def") + + // It stores the memory in Kubernetes. + assert.Equal(t, len(applies), 1) + assert.Assert(t, cmp.Contains(applies[0], `"kind":"Secret"`)) + + var decoded corev1.Secret + require.UnmarshalInto(t, &decoded, applies[0]) + assert.Assert(t, cmp.Contains(string(decoded.Data["bridge-token"]), `"id":"xyz"`)) + assert.Assert(t, cmp.Contains(string(decoded.Data["bridge-token"]), `"secret":"def"`)) + }) + + t.Run("Expired", func(t *testing.T) { + beforeEach() + + // API double; authentication error. + { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusUnauthorized) + })) + t.Cleanup(server.Close) + + reconciler.NewClient = func() *Client { + c := NewClient(server.URL, "") + c.Steps = 1 + assert.Equal(t, c.BaseURL.String(), server.URL) + return c + } + } + + // Kubernetes double; spy on SSA patches. + var applies []string + { + reconciler.Writer = runtime.ClientPatch(func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + assert.Equal(t, string(patch.Type()), "application/apply-patch+yaml") + + data, err := patch.Data(obj) + applies = append(applies, string(data)) + return err + }) + } + + ctx := context.Background() + next, err := reconciler.reconcile(ctx, secret) + assert.NilError(t, err) + assert.Assert(t, next == 0) + + assert.DeepEqual(t, self.Installation, Installation{}) + + // It archives the expired one. + assert.Equal(t, len(applies), 1) + assert.Assert(t, cmp.Contains(applies[0], `"kind":"Secret"`)) + + var decoded corev1.Secret + require.UnmarshalInto(t, &decoded, applies[0]) + assert.Equal(t, len(decoded.Data["bridge-token"]), 0) + + archived := string(decoded.Data["bridge-token--2020-10-28"]) + assert.Assert(t, cmp.Contains(archived, `"id":"xyz"`)) + assert.Assert(t, cmp.Contains(archived, `"secret":"abc"`)) + }) + }) + + // Scenario: + // When there is an Installation in the Secret and in memory, + // Then Reconcile should refresh it periodically. + // + t.Run("Refresh", func(t *testing.T) { + var reconciler *InstallationReconciler + var secret *corev1.Secret + + beforeEach := func(timestamp []byte) { + reconciler = new(InstallationReconciler) + reconciler.Refresh = time.Minute + + secret = new(corev1.Secret) + secret.Data = map[string][]byte{ + KeyBridgeToken: []byte(`{"id":"ddd", "auth_object":{"secret":"eee"}}`), + KeyBridgeLocalTime: timestamp, + } + + self.Installation = Installation{ID: "ddd"} + } + + for _, tt := range []struct { + Name string + Timestamp []byte + }{ + {Name: "NoTimestamp", Timestamp: nil}, + {Name: "BadTimestamp", Timestamp: []byte(`asdf`)}, + {Name: "OldTimestamp", Timestamp: []byte(`"2020-10-10T20:20:20Z"`)}, + {Name: "FutureTimestamp", Timestamp: []byte(`"2030-10-10T20:20:20Z"`)}, + } { + t.Run(tt.Name, func(t *testing.T) { + beforeEach(tt.Timestamp) + + // API double; spy on requests. + var requests []http.Request + { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + requests = append(requests, *r) + _ = json.NewEncoder(w).Encode(map[string]any{"secret": "fresh"}) + })) + t.Cleanup(server.Close) + + reconciler.NewClient = func() *Client { + c := NewClient(server.URL, "") + c.Steps = 1 + assert.Equal(t, c.BaseURL.String(), server.URL) + return c + } + } + + // Kubernetes double; spy on SSA patches. + var applies []string + { + reconciler.Writer = runtime.ClientPatch(func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + assert.Equal(t, string(patch.Type()), "application/apply-patch+yaml") + + data, err := patch.Data(obj) + applies = append(applies, string(data)) + return err + }) + } + + ctx := context.Background() + next, err := reconciler.reconcile(ctx, secret) + assert.NilError(t, err) + assert.Assert(t, next == 0) + + assert.Equal(t, len(requests), 1) + assert.Equal(t, requests[0].Header.Get("Authorization"), "Bearer eee") + assert.Equal(t, requests[0].Method, "POST") + assert.Equal(t, requests[0].URL.Path, "/vendor/operator/auth-objects") + + // It stores the result in memory. + assert.Equal(t, self.ID, "ddd") + assert.Equal(t, self.AuthObject.Secret, "fresh") + + // It stores the memory in Kubernetes. + assert.Equal(t, len(applies), 1) + assert.Assert(t, cmp.Contains(applies[0], `"kind":"Secret"`)) + + var decoded corev1.Secret + require.UnmarshalInto(t, &decoded, applies[0]) + assert.Assert(t, cmp.Contains(string(decoded.Data["bridge-token"]), `"id":"ddd"`)) + assert.Assert(t, cmp.Contains(string(decoded.Data["bridge-token"]), `"secret":"fresh"`)) + }) + } + + t.Run("CurrentTimestamp", func(t *testing.T) { + current := time.Now().Add(-15 * time.Minute) + currentJSON, _ := current.UTC().MarshalJSON() + + beforeEach(currentJSON) + reconciler.Refresh = time.Hour + + // Any API calls would panic because no spies are configured here. + + ctx := context.Background() + next, err := reconciler.reconcile(ctx, secret) + assert.NilError(t, err) + + // The next reconcile is scheduled around (60 - 15 =) 45 minutes + // from now, plus or minus (60 * 10% =) 6 minutes of jitter. + assert.DeepEqual(t, next, 45*time.Minute, + cmpopt.DurationWithThreshold(6*time.Minute)) + }) + }) +} diff --git a/internal/bridge/naming.go b/internal/bridge/naming.go new file mode 100644 index 0000000000..7b8b6a9223 --- /dev/null +++ b/internal/bridge/naming.go @@ -0,0 +1,10 @@ +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package bridge + +const ( + KeyBridgeLocalTime = "bridge-local-time" + KeyBridgeToken = "bridge-token" +) diff --git a/internal/bridge/quantity.go b/internal/bridge/quantity.go new file mode 100644 index 0000000000..e4edd2a149 --- /dev/null +++ b/internal/bridge/quantity.go @@ -0,0 +1,44 @@ +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package bridge + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/resource" +) + +func FromCPU(n int64) *resource.Quantity { + // Assume the Bridge API returns numbers that can be parsed by the + // [resource] package. + if q, err := resource.ParseQuantity(fmt.Sprint(n)); err == nil { + return &q + } + + return resource.NewQuantity(0, resource.DecimalSI) +} + +// FromGibibytes returns n gibibytes as a [resource.Quantity]. +func FromGibibytes(n int64) *resource.Quantity { + // Assume the Bridge API returns numbers that can be parsed by the + // [resource] package. + if q, err := resource.ParseQuantity(fmt.Sprint(n) + "Gi"); err == nil { + return &q + } + + return resource.NewQuantity(0, resource.BinarySI) +} + +// ToGibibytes returns q rounded up to a non-negative gibibyte. +func ToGibibytes(q resource.Quantity) int64 { + v := q.Value() + + if v <= 0 { + return 0 + } + + // https://stackoverflow.com/a/2745086 + return 1 + ((v - 1) >> 30) +} diff --git a/internal/bridge/quantity_test.go b/internal/bridge/quantity_test.go new file mode 100644 index 0000000000..d0e914a9d1 --- /dev/null +++ b/internal/bridge/quantity_test.go @@ -0,0 +1,59 @@ +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package bridge + +import ( + "testing" + + "gotest.tools/v3/assert" + "k8s.io/apimachinery/pkg/api/resource" +) + +func TestFromCPU(t *testing.T) { + zero := FromCPU(0) + assert.Assert(t, zero.IsZero()) + assert.Equal(t, zero.String(), "0") + + one := FromCPU(1) + assert.Equal(t, one.String(), "1") + + negative := FromCPU(-2) + assert.Equal(t, negative.String(), "-2") +} + +func TestFromGibibytes(t *testing.T) { + zero := FromGibibytes(0) + assert.Assert(t, zero.IsZero()) + assert.Equal(t, zero.String(), "0") + + one := FromGibibytes(1) + assert.Equal(t, one.String(), "1Gi") + + negative := FromGibibytes(-2) + assert.Equal(t, negative.String(), "-2Gi") +} + +func TestToGibibytes(t *testing.T) { + zero := resource.MustParse("0") + assert.Equal(t, ToGibibytes(zero), int64(0)) + + // Negative quantities become zero. + negative := resource.MustParse("-4G") + assert.Equal(t, ToGibibytes(negative), int64(0)) + + // Decimal quantities round up. + decimal := resource.MustParse("9000M") + assert.Equal(t, ToGibibytes(decimal), int64(9)) + + // Binary quantities round up. + binary := resource.MustParse("8000Mi") + assert.Equal(t, ToGibibytes(binary), int64(8)) + + fourGi := resource.MustParse("4096Mi") + assert.Equal(t, ToGibibytes(fourGi), int64(4)) + + moreThanFourGi := resource.MustParse("4097Mi") + assert.Equal(t, ToGibibytes(moreThanFourGi), int64(5)) +} diff --git a/internal/crd/validation/postgrescluster/postgres_config_test.go b/internal/crd/validation/postgrescluster/postgres_config_test.go index 1971d71155..83b46e2437 100644 --- a/internal/crd/validation/postgrescluster/postgres_config_test.go +++ b/internal/crd/validation/postgrescluster/postgres_config_test.go @@ -71,24 +71,43 @@ func TestPostgresConfigParametersV1beta1(t *testing.T) { }) }) - t.Run("SSL Settings", func(t *testing.T) { - t.Run("Allowed", func(t *testing.T) { + t.Run("ssl_groups and ssl_ecdh_curve", func(t *testing.T) { + t.Run("ssl_groups not allowed for pg17", func(t *testing.T) { for _, tt := range []struct { - key string - value any - postgresVersion int + key string + value any }{ - // ssl_ecdh_curve is allowed for all supported Postgres versions - {key: "ssl_ecdh_curve", value: "anything", postgresVersion: 17}, - {key: "ssl_ecdh_curve", value: "anything", postgresVersion: 18}, + {key: "ssl_groups", value: "anything"}, + } { + t.Run(tt.key, func(t *testing.T) { + cluster := u.DeepCopy() + require.UnmarshalIntoField(t, cluster, + require.Value(yaml.Marshal(17)), + "spec", "postgresVersion") + require.UnmarshalIntoField(t, cluster, + require.Value(yaml.Marshal(tt.value)), + "spec", "config", "parameters", tt.key) - // ssl_groups is only supported for Postgres 18 and greater - {key: "ssl_groups", value: "anything", postgresVersion: 18}, + err := cc.Create(ctx, cluster, client.DryRunAll) + assert.Assert(t, apierrors.IsInvalid(err)) + + details := require.StatusErrorDetails(t, err) + assert.Assert(t, cmp.Len(details.Causes, 1)) + }) + } + }) + + t.Run("ssl_groups allowed for pg18", func(t *testing.T) { + for _, tt := range []struct { + key string + value any + }{ + {key: "ssl_groups", value: "anything"}, } { t.Run(tt.key, func(t *testing.T) { cluster := u.DeepCopy() require.UnmarshalIntoField(t, cluster, - require.Value(yaml.Marshal(tt.postgresVersion)), + require.Value(yaml.Marshal(18)), "spec", "postgresVersion") require.UnmarshalIntoField(t, cluster, require.Value(yaml.Marshal(tt.value)), @@ -99,39 +118,48 @@ func TestPostgresConfigParametersV1beta1(t *testing.T) { } }) - t.Run("Not Allowed", func(t *testing.T) { + t.Run("ssl_ecdh_curve allowed for both", func(t *testing.T) { for _, tt := range []struct { - key string - value any - postgresVersion int + key string + value any }{ - // setting "ssl" is not allowed for any Postgres version - {key: "ssl", value: "anything", postgresVersion: 17}, - {key: "ssl", value: "anything", postgresVersion: 18}, - - // setting any parameter with an "ssl_" prefix that is not - // "ssl_ecdh_curve" or "ssl_groups" is not allowed for any version - {key: "ssl_anything", value: "anything", postgresVersion: 17}, - {key: "ssl_anything", value: "anything", postgresVersion: 18}, - - // setting "ssl_ecdh_curve" with any additional suffix is not - // allowed for any version - {key: "ssl_ecdh_curve_bad", value: "anything", postgresVersion: 17}, - {key: "ssl_ecdh_curve_bad", value: "anything", postgresVersion: 18}, - - // setting "ssl_groups" is not allowed for Postgres versions 17 - // or earlier - {key: "ssl_groups", value: "anything", postgresVersion: 17}, - - // setting "ssl_groups" with any additional suffix is not - // allowed for any version - {key: "ssl_groups_bad", value: "anything", postgresVersion: 17}, - {key: "ssl_groups_bad", value: "anything", postgresVersion: 18}, + {key: "ssl_ecdh_curve", value: "anything"}, } { t.Run(tt.key, func(t *testing.T) { cluster := u.DeepCopy() require.UnmarshalIntoField(t, cluster, - require.Value(yaml.Marshal(tt.postgresVersion)), + require.Value(yaml.Marshal(17)), + "spec", "postgresVersion") + require.UnmarshalIntoField(t, cluster, + require.Value(yaml.Marshal(tt.value)), + "spec", "config", "parameters", tt.key) + + assert.NilError(t, cc.Create(ctx, cluster, client.DryRunAll)) + + cluster2 := u.DeepCopy() + require.UnmarshalIntoField(t, cluster2, + require.Value(yaml.Marshal(18)), + "spec", "postgresVersion") + require.UnmarshalIntoField(t, cluster2, + require.Value(yaml.Marshal(tt.value)), + "spec", "config", "parameters", tt.key) + + assert.NilError(t, cc.Create(ctx, cluster2, client.DryRunAll)) + }) + } + }) + + t.Run("other ssl_* parameters not allowed for any pg version", func(t *testing.T) { + for _, tt := range []struct { + key string + value any + }{ + {key: "ssl_anything", value: "anything"}, + } { + t.Run(tt.key, func(t *testing.T) { + cluster := u.DeepCopy() + require.UnmarshalIntoField(t, cluster, + require.Value(yaml.Marshal(17)), "spec", "postgresVersion") require.UnmarshalIntoField(t, cluster, require.Value(yaml.Marshal(tt.value)), @@ -142,6 +170,20 @@ func TestPostgresConfigParametersV1beta1(t *testing.T) { details := require.StatusErrorDetails(t, err) assert.Assert(t, cmp.Len(details.Causes, 1)) + + cluster1 := u.DeepCopy() + require.UnmarshalIntoField(t, cluster1, + require.Value(yaml.Marshal(18)), + "spec", "postgresVersion") + require.UnmarshalIntoField(t, cluster1, + require.Value(yaml.Marshal(tt.value)), + "spec", "config", "parameters", tt.key) + + err = cc.Create(ctx, cluster1, client.DryRunAll) + assert.Assert(t, apierrors.IsInvalid(err)) + + details = require.StatusErrorDetails(t, err) + assert.Assert(t, cmp.Len(details.Causes, 1)) }) } }) diff --git a/internal/feature/features.go b/internal/feature/features.go index ba51022c6d..36eb777ffa 100644 --- a/internal/feature/features.go +++ b/internal/feature/features.go @@ -75,6 +75,9 @@ const ( // Support automatically growing volumes AutoGrowVolumes = "AutoGrowVolumes" + // Deprecated + BridgeIdentifiers = "BridgeIdentifiers" + // Support custom sidecars for PostgreSQL instance Pods InstanceSidecars = "InstanceSidecars" @@ -105,6 +108,7 @@ func NewGate() MutableGate { AppendCustomQueries: {Default: false, PreRelease: featuregate.Alpha}, AutoCreateUserSchema: {Default: true, PreRelease: featuregate.Beta}, AutoGrowVolumes: {Default: true, PreRelease: featuregate.Alpha}, + BridgeIdentifiers: {Default: false, PreRelease: featuregate.Deprecated}, InstanceSidecars: {Default: true, PreRelease: featuregate.Alpha}, OpenTelemetryLogs: {Default: false, PreRelease: featuregate.Alpha}, OpenTelemetryMetrics: {Default: false, PreRelease: featuregate.Alpha}, diff --git a/internal/feature/features_test.go b/internal/feature/features_test.go index dedabe9294..e87b698a02 100644 --- a/internal/feature/features_test.go +++ b/internal/feature/features_test.go @@ -19,6 +19,7 @@ func TestDefaults(t *testing.T) { assert.Assert(t, false == gate.Enabled(AppendCustomQueries)) assert.Assert(t, true == gate.Enabled(AutoCreateUserSchema)) assert.Assert(t, true == gate.Enabled(AutoGrowVolumes)) + assert.Assert(t, false == gate.Enabled(BridgeIdentifiers)) assert.Assert(t, true == gate.Enabled(InstanceSidecars)) assert.Assert(t, false == gate.Enabled(OpenTelemetryLogs)) assert.Assert(t, false == gate.Enabled(OpenTelemetryMetrics)) diff --git a/internal/naming/annotations.go b/internal/naming/annotations.go index 2d3d03560b..61a5438908 100644 --- a/internal/naming/annotations.go +++ b/internal/naming/annotations.go @@ -63,6 +63,13 @@ const ( // disables all postgres_exporter defaults. Disabling the defaults may cause errors in dashboards. PostgresExporterCollectorsAnnotation = annotationPrefix + "postgres-exporter-collectors" + // CrunchyBridgeClusterAdoptionAnnotation is an annotation used to allow users to "adopt" or take + // control over an existing Bridge Cluster with a CrunchyBridgeCluster CR. Essentially, if a + // CrunchyBridgeCluster CR does not have a status.ID, but the name matches the name of an existing + // bridge cluster, the user must add this annotation to the CR to allow the CR to take control of + // the Bridge Cluster. The Value assigned to the annotation must be the ID of existing cluster. + CrunchyBridgeClusterAdoptionAnnotation = annotationPrefix + "adopt-bridge-cluster" + // AutoCreateUserSchemaAnnotation is an annotation used to allow users to control whether the cluster // has schemas automatically created for the users defined in `spec.users` for all of the databases // listed for that user. diff --git a/internal/naming/annotations_test.go b/internal/naming/annotations_test.go index fcb6108525..9553e5e72a 100644 --- a/internal/naming/annotations_test.go +++ b/internal/naming/annotations_test.go @@ -14,6 +14,7 @@ import ( func TestAnnotationsValid(t *testing.T) { assert.Assert(t, nil == validation.IsQualifiedName(AuthorizeBackupRemovalAnnotation)) assert.Assert(t, nil == validation.IsQualifiedName(AutoCreateUserSchemaAnnotation)) + assert.Assert(t, nil == validation.IsQualifiedName(CrunchyBridgeClusterAdoptionAnnotation)) assert.Assert(t, nil == validation.IsQualifiedName(Finalizer)) assert.Assert(t, nil == validation.IsQualifiedName(PatroniSwitchover)) assert.Assert(t, nil == validation.IsQualifiedName(PGBackRestBackup)) diff --git a/internal/naming/controllers.go b/internal/naming/controllers.go index f3863e3a18..dd797282d1 100644 --- a/internal/naming/controllers.go +++ b/internal/naming/controllers.go @@ -5,7 +5,9 @@ package naming const ( - ControllerPGAdmin = "pgadmin-controller" - ControllerPGUpgrade = "pgupgrade-controller" - ControllerPostgresCluster = "postgrescluster-controller" + ControllerBridge = "bridge-controller" + ControllerCrunchyBridgeCluster = "crunchybridgecluster-controller" + ControllerPGAdmin = "pgadmin-controller" + ControllerPGUpgrade = "pgupgrade-controller" + ControllerPostgresCluster = "postgrescluster-controller" ) diff --git a/internal/naming/labels.go b/internal/naming/labels.go index 0056f56dd2..209af0367b 100644 --- a/internal/naming/labels.go +++ b/internal/naming/labels.go @@ -117,6 +117,14 @@ const ( RoleSnapshot = "snapshot" ) +const ( + // LabelCrunchyBridgeClusterPostgresRole identifies the PostgreSQL user an object is for or about. + LabelCrunchyBridgeClusterPostgresRole = labelPrefix + "cbc-pgrole" + + // RoleCrunchyBridgeClusterPostgresRole is the LabelRole applied to CBC PostgreSQL role secrets. + RoleCrunchyBridgeClusterPostgresRole = "cbc-pgrole" +) + const ( // DataPGAdmin is a LabelData value that indicates the object has pgAdmin data. DataPGAdmin = "pgadmin" diff --git a/internal/naming/labels_test.go b/internal/naming/labels_test.go index b8d7130697..552e38ceb5 100644 --- a/internal/naming/labels_test.go +++ b/internal/naming/labels_test.go @@ -35,6 +35,7 @@ func TestLabelsValid(t *testing.T) { assert.Assert(t, nil == validation.IsQualifiedName(LabelPostgresUser)) assert.Assert(t, nil == validation.IsQualifiedName(LabelStandalonePGAdmin)) assert.Assert(t, nil == validation.IsQualifiedName(LabelStartupInstance)) + assert.Assert(t, nil == validation.IsQualifiedName(LabelCrunchyBridgeClusterPostgresRole)) } func TestLabelValuesValid(t *testing.T) { @@ -54,6 +55,7 @@ func TestLabelValuesValid(t *testing.T) { assert.Assert(t, nil == validation.IsValidLabelValue(string(BackupReplicaCreate))) assert.Assert(t, nil == validation.IsValidLabelValue(string(BackupScheduled))) assert.Assert(t, nil == validation.IsValidLabelValue(RoleMonitoring)) + assert.Assert(t, nil == validation.IsValidLabelValue(RoleCrunchyBridgeClusterPostgresRole)) } func TestMerge(t *testing.T) { diff --git a/internal/naming/selectors.go b/internal/naming/selectors.go index a14a12ec5f..c51f2d0262 100644 --- a/internal/naming/selectors.go +++ b/internal/naming/selectors.go @@ -158,3 +158,14 @@ func ClusterPrimary(cluster string) metav1.LabelSelector { s.MatchLabels[LabelRole] = RolePatroniLeader return s } + +// CrunchyBridgeClusterPostgresRoles selects things labeled for CrunchyBridgeCluster +// PostgreSQL roles in cluster. +func CrunchyBridgeClusterPostgresRoles(clusterName string) metav1.LabelSelector { + return metav1.LabelSelector{ + MatchLabels: map[string]string{ + LabelCluster: clusterName, + LabelRole: RoleCrunchyBridgeClusterPostgresRole, + }, + } +} diff --git a/internal/naming/selectors_test.go b/internal/naming/selectors_test.go index f805b6b723..c8617bcb78 100644 --- a/internal/naming/selectors_test.go +++ b/internal/naming/selectors_test.go @@ -157,3 +157,15 @@ func TestClusterPrimary(t *testing.T) { "postgres-operator.crunchydata.com/role=master", }, ",")) } + +func TestCrunchyBridgeClusterPostgresRoles(t *testing.T) { + s, err := AsSelector(CrunchyBridgeClusterPostgresRoles("something")) + assert.NilError(t, err) + assert.DeepEqual(t, s.String(), strings.Join([]string{ + "postgres-operator.crunchydata.com/cluster=something", + "postgres-operator.crunchydata.com/role=cbc-pgrole", + }, ",")) + + _, err = AsSelector(CrunchyBridgeClusterPostgresRoles("--nope--")) + assert.ErrorContains(t, err, "Invalid") +} diff --git a/internal/patroni/config.go b/internal/patroni/config.go index 58dd5920bd..8f99487969 100644 --- a/internal/patroni/config.go +++ b/internal/patroni/config.go @@ -484,6 +484,8 @@ func instanceYAML( } postgresql := map[string]any{ + // TODO(cbandy): "bin_dir" + // Missing here is "connect_address" which cannot be known until the // instance Pod is created. That value should be injected using the downward // API and the PATRONI_POSTGRESQL_CONNECT_ADDRESS environment variable. diff --git a/internal/patroni/reconcile.go b/internal/patroni/reconcile.go index c526295497..a8de99f028 100644 --- a/internal/patroni/reconcile.go +++ b/internal/patroni/reconcile.go @@ -105,14 +105,7 @@ func InstancePod(ctx context.Context, } } - // Start Patroni in an environment that prioritizes executables of the specified Postgres version. - // NOTE: Patroni ignores PATH when "postgresql.bin_dir" is set. - // - // https://patroni.readthedocs.io/en/latest/yaml_configuration.html#postgresql - container.Command = []string{ - "sh", "-c", "--", postgres.ShellPath(inCluster.Spec.PostgresVersion) + ` && exec "$@"`, "--", - "patroni", configDirectory, - } + container.Command = []string{"patroni", configDirectory} container.Env = append(container.Env, instanceEnvironment(inCluster, inClusterPodService, inPatroniLeaderService, diff --git a/internal/patroni/reconcile_test.go b/internal/patroni/reconcile_test.go index a4727a324f..729bd6573d 100644 --- a/internal/patroni/reconcile_test.go +++ b/internal/patroni/reconcile_test.go @@ -140,12 +140,6 @@ func TestInstancePod(t *testing.T) { assert.Assert(t, cmp.MarshalMatches(template.Spec, ` containers: - command: - - sh - - -c - - -- - - PATH="/usr/lib/postgresql/11/bin:/usr/libexec/postgresql11:/usr/pgsql-11/bin${PATH+:${PATH}}" - && exec "$@" - - -- - patroni - /etc/patroni env: diff --git a/internal/postgres/config.go b/internal/postgres/config.go index 7cc96d8d01..ebefc9dd6c 100644 --- a/internal/postgres/config.go +++ b/internal/postgres/config.go @@ -405,7 +405,7 @@ func startupCommand( instance *v1beta1.PostgresInstanceSetSpec, parameters *ParameterSet, ) []string { - version := cluster.Spec.PostgresVersion + version := fmt.Sprint(cluster.Spec.PostgresVersion) dataDir := DataDirectory(cluster) logDir := parameters.Value("log_directory") walDir := WALDirectory(cluster, instance) @@ -463,7 +463,7 @@ chmod +x /tmp/pg_rewind_tde.sh ` } - args := []string{fmt.Sprint(version), walDir} + args := []string{version, walDir} script := strings.Join([]string{ `declare -r expected_major_version="$1" pgwal_directory="$2"`, @@ -485,9 +485,6 @@ chmod +x /tmp/pg_rewind_tde.sh // Function to change a directory symlink while keeping the directory contents. strings.TrimSpace(bashSafeLink), - // Prioritize executables for this major version of Postgres. - ShellPath(version), - // Log the effective user ID and all the group IDs. `echo Initializing ...`, `results 'uid' "$(id -u ||:)" 'gid' "$(id -G ||:)"`, diff --git a/internal/postgres/reconcile_test.go b/internal/postgres/reconcile_test.go index f219d69051..73ac1125de 100644 --- a/internal/postgres/reconcile_test.go +++ b/internal/postgres/reconcile_test.go @@ -274,7 +274,6 @@ initContainers: set -x; mv --no-target-directory "${current}" "${desired}" ln --no-dereference --force --symbolic "${desired}" "${name}" ) - PATH="/usr/lib/postgresql/11/bin:/usr/libexec/postgresql11:/usr/pgsql-11/bin${PATH+:${PATH}}" echo Initializing ... results 'uid' "$(id -u ||:)" 'gid' "$(id -G ||:)" if [[ "${pgwal_directory}" == *"pgwal/"* ]] && [[ ! -d "/pgwal/pgbackrest-spool" ]];then rm -rf "/pgdata/pgbackrest-spool" && mkdir -p "/pgwal/pgbackrest-spool" && ln --force --symbolic "/pgwal/pgbackrest-spool" "/pgdata/pgbackrest-spool";fi diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/crunchy_bridgecluster_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/crunchy_bridgecluster_types.go new file mode 100644 index 0000000000..c8606d6e81 --- /dev/null +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/crunchy_bridgecluster_types.go @@ -0,0 +1,247 @@ +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// CrunchyBridgeClusterSpec defines the desired state of CrunchyBridgeCluster +// to be managed by Crunchy Data Bridge +type CrunchyBridgeClusterSpec struct { + // +optional + Metadata *Metadata `json:"metadata,omitempty"` + + // Whether the cluster is high availability, + // meaning that it has a secondary it can fail over to quickly + // in case the primary becomes unavailable. + // +kubebuilder:validation:Required + IsHA bool `json:"isHa"` + + // Whether the cluster is protected. Protected clusters can't be destroyed until + // their protected flag is removed + // +kubebuilder:validation:Optional + IsProtected bool `json:"isProtected,omitempty"` + + // The name of the cluster + // --- + // According to Bridge API/GUI errors, + // "Field name should be between 5 and 50 characters in length, containing only unicode characters, unicode numbers, hyphens, spaces, or underscores, and starting with a character", and ending with a character or number. + // +kubebuilder:validation:MinLength=5 + // +kubebuilder:validation:MaxLength=50 + // +kubebuilder:validation:Pattern=`^[A-Za-z][A-Za-z0-9\-_ ]*[A-Za-z0-9]$` + // +kubebuilder:validation:Required + // +kubebuilder:validation:Type=string + ClusterName string `json:"clusterName"` + + // The ID of the cluster's plan. Determines instance, CPU, and memory. + // +kubebuilder:validation:Required + Plan string `json:"plan"` + + // The ID of the cluster's major Postgres version. + // Currently Bridge offers 13-17 + // +kubebuilder:validation:Required + // +kubebuilder:validation:Minimum=13 + // +kubebuilder:validation:Maximum=17 + // +operator-sdk:csv:customresourcedefinitions:type=spec,order=1 + PostgresVersion int `json:"majorVersion"` + + // The cloud provider where the cluster is located. + // Currently Bridge offers aws, azure, and gcp only + // --- + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum={aws,azure,gcp} + // +kubebuilder:validation:XValidation:rule=`self == oldSelf`,message="immutable" + Provider string `json:"provider"` + + // The provider region where the cluster is located. + // +kubebuilder:validation:Required + // +kubebuilder:validation:XValidation:rule=`self == oldSelf`,message="immutable" + Region string `json:"region"` + + // Roles for which to create Secrets that contain their credentials which + // are retrieved from the Bridge API. An empty list creates no role secrets. + // Removing a role from this list does NOT drop the role nor revoke their + // access, but it will delete that role's secret from the kube cluster. + // +kubebuilder:validation:Optional + // +listType=map + // +listMapKey=name + Roles []*CrunchyBridgeClusterRoleSpec `json:"roles,omitempty"` + + // The name of the secret containing the API key and team id + // +kubebuilder:validation:Required + Secret string `json:"secret"` + + // The amount of storage available to the cluster in gigabytes. + // The amount must be an integer, followed by Gi (gibibytes) or G (gigabytes) to match Kubernetes conventions. + // If the amount is given in Gi, we round to the nearest G value. + // The minimum value allowed by Bridge is 10 GB. + // The maximum value allowed by Bridge is 65535 GB. + // +kubebuilder:validation:Required + Storage resource.Quantity `json:"storage"` +} + +type CrunchyBridgeClusterRoleSpec struct { + // Name of the role within Crunchy Bridge. + // More info: https://docs.crunchybridge.com/concepts/users + // +kubebuilder:validation:Required + Name string `json:"name"` + + // The name of the Secret that will hold the role credentials. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$` + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:Type=string + SecretName string `json:"secretName"` +} + +// CrunchyBridgeClusterStatus defines the observed state of CrunchyBridgeCluster +type CrunchyBridgeClusterStatus struct { + // The name of the cluster in Bridge. + // +optional + ClusterName string `json:"name,omitempty"` + + // conditions represent the observations of postgres cluster's current state. + // +optional + // +listType=map + // +listMapKey=type + // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors={"urn:alm:descriptor:io.kubernetes.conditions"} + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // The Hostname of the postgres cluster in Bridge, provided by Bridge API and null until then. + // +optional + Host string `json:"host,omitempty"` + + // The ID of the postgres cluster in Bridge, provided by Bridge API and null until then. + // +optional + ID string `json:"id,omitempty"` + + // Whether the cluster is high availability, meaning that it has a secondary it can fail + // over to quickly in case the primary becomes unavailable. + // +optional + IsHA *bool `json:"isHa"` + + // Whether the cluster is protected. Protected clusters can't be destroyed until + // their protected flag is removed + // +optional + IsProtected *bool `json:"isProtected"` + + // The cluster's major Postgres version. + // +optional + MajorVersion int `json:"majorVersion"` + + // observedGeneration represents the .metadata.generation on which the status was based. + // +optional + // +kubebuilder:validation:Minimum=0 + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // The cluster upgrade as represented by Bridge + // +optional + OngoingUpgrade []*UpgradeOperation `json:"ongoingUpgrade,omitempty"` + + // The ID of the cluster's plan. Determines instance, CPU, and memory. + // +optional + Plan string `json:"plan"` + + // Most recent, raw responses from Bridge API + // +optional + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless + // +kubebuilder:validation:Type=object + Responses APIResponses `json:"responses"` + + // State of cluster in Bridge. + // +optional + State string `json:"state,omitempty"` + + // The amount of storage available to the cluster. + // +optional + Storage *resource.Quantity `json:"storage"` +} + +type APIResponses struct { + Cluster SchemalessObject `json:"cluster,omitempty"` + Status SchemalessObject `json:"status,omitempty"` + Upgrade SchemalessObject `json:"upgrade,omitempty"` +} + +type ClusterUpgrade struct { + Operations []*UpgradeOperation `json:"operations,omitempty"` +} + +type UpgradeOperation struct { + Flavor string `json:"flavor"` + StartingFrom string `json:"starting_from"` + State string `json:"state"` +} + +// TODO(crunchybridgecluster) Think through conditions +// CrunchyBridgeClusterStatus condition types. +const ( + ConditionUnknown = "" + ConditionUpgrading = "Upgrading" + ConditionReady = "Ready" + ConditionDeleting = "Deleting" +) + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +//+kubebuilder:storageversion +//+versionName=v1beta1 +// +operator-sdk:csv:customresourcedefinitions:resources={{ConfigMap,v1},{Secret,v1},{Service,v1},{CronJob,v1beta1},{Deployment,v1},{Job,v1},{StatefulSet,v1},{PersistentVolumeClaim,v1}} + +// CrunchyBridgeCluster is the Schema for the crunchybridgeclusters API +type CrunchyBridgeCluster struct { + // ObjectMeta.Name is a DNS subdomain. + // - https://docs.k8s.io/concepts/overview/working-with-objects/names/#dns-subdomain-names + // - https://releases.k8s.io/v1.21.0/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/validator.go#L60 + + // In Bridge json, meta.name is "name" + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitzero"` + + // NOTE(cbandy): Every CrunchyBridgeCluster needs a Spec, but it is optional here + // so ObjectMeta can be managed independently. + + // +optional + Spec CrunchyBridgeClusterSpec `json:"spec,omitzero"` + // +optional + Status CrunchyBridgeClusterStatus `json:"status,omitzero"` +} + +// Default implements "sigs.k8s.io/controller-runtime/pkg/webhook.Defaulter" so +// a webhook can be registered for the type. +// - https://book.kubebuilder.io/reference/webhook-overview.html +func (c *CrunchyBridgeCluster) Default() { + if len(c.APIVersion) == 0 { + c.APIVersion = GroupVersion.String() + } + if len(c.Kind) == 0 { + c.Kind = "CrunchyBridgeCluster" + } +} + +// +kubebuilder:object:root=true + +// CrunchyBridgeClusterList contains a list of CrunchyBridgeCluster +type CrunchyBridgeClusterList struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ListMeta `json:"metadata,omitzero"` + Items []CrunchyBridgeCluster `json:"items"` +} + +func init() { + SchemeBuilder.Register(&CrunchyBridgeCluster{}, &CrunchyBridgeClusterList{}) +} + +func NewCrunchyBridgeCluster() *CrunchyBridgeCluster { + cluster := &CrunchyBridgeCluster{} + cluster.SetGroupVersionKind(GroupVersion.WithKind("CrunchyBridgeCluster")) + return cluster +} diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go index 5f266850a3..8d674daf3f 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go @@ -9,12 +9,30 @@ package v1beta1 import ( - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIResponses) DeepCopyInto(out *APIResponses) { + *out = *in + out.Cluster = in.Cluster.DeepCopy() + out.Status = in.Status.DeepCopy() + out.Upgrade = in.Upgrade.DeepCopy() +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIResponses. +func (in *APIResponses) DeepCopy() *APIResponses { + if in == nil { + return nil + } + out := new(APIResponses) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AdditionalVolume) DeepCopyInto(out *AdditionalVolume) { *out = *in @@ -25,7 +43,7 @@ func (in *AdditionalVolume) DeepCopyInto(out *AdditionalVolume) { } if in.Image != nil { in, out := &in.Image, &out.Image - *out = new(v1.ImageVolumeSource) + *out = new(corev1.ImageVolumeSource) **out = **in } } @@ -66,12 +84,12 @@ func (in *BackupJobs) DeepCopyInto(out *BackupJobs) { } if in.Affinity != nil { in, out := &in.Affinity, &out.Affinity - *out = new(v1.Affinity) + *out = new(corev1.Affinity) (*in).DeepCopyInto(*out) } if in.Tolerations != nil { in, out := &in.Tolerations, &out.Tolerations - *out = make([]v1.Toleration, len(*in)) + *out = make([]corev1.Toleration, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -119,6 +137,32 @@ func (in *Backups) DeepCopy() *Backups { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterUpgrade) DeepCopyInto(out *ClusterUpgrade) { + *out = *in + if in.Operations != nil { + in, out := &in.Operations, &out.Operations + *out = make([]*UpgradeOperation, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(UpgradeOperation) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterUpgrade. +func (in *ClusterUpgrade) DeepCopy() *ClusterUpgrade { + if in == nil { + return nil + } + out := new(ClusterUpgrade) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ConfigMapKeyRef) DeepCopyInto(out *ConfigMapKeyRef) { *out = *in @@ -134,6 +178,161 @@ func (in *ConfigMapKeyRef) DeepCopy() *ConfigMapKeyRef { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrunchyBridgeCluster) DeepCopyInto(out *CrunchyBridgeCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrunchyBridgeCluster. +func (in *CrunchyBridgeCluster) DeepCopy() *CrunchyBridgeCluster { + if in == nil { + return nil + } + out := new(CrunchyBridgeCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CrunchyBridgeCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrunchyBridgeClusterList) DeepCopyInto(out *CrunchyBridgeClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CrunchyBridgeCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrunchyBridgeClusterList. +func (in *CrunchyBridgeClusterList) DeepCopy() *CrunchyBridgeClusterList { + if in == nil { + return nil + } + out := new(CrunchyBridgeClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CrunchyBridgeClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrunchyBridgeClusterRoleSpec) DeepCopyInto(out *CrunchyBridgeClusterRoleSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrunchyBridgeClusterRoleSpec. +func (in *CrunchyBridgeClusterRoleSpec) DeepCopy() *CrunchyBridgeClusterRoleSpec { + if in == nil { + return nil + } + out := new(CrunchyBridgeClusterRoleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrunchyBridgeClusterSpec) DeepCopyInto(out *CrunchyBridgeClusterSpec) { + *out = *in + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = new(Metadata) + (*in).DeepCopyInto(*out) + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]*CrunchyBridgeClusterRoleSpec, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(CrunchyBridgeClusterRoleSpec) + **out = **in + } + } + } + out.Storage = in.Storage.DeepCopy() +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrunchyBridgeClusterSpec. +func (in *CrunchyBridgeClusterSpec) DeepCopy() *CrunchyBridgeClusterSpec { + if in == nil { + return nil + } + out := new(CrunchyBridgeClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrunchyBridgeClusterStatus) DeepCopyInto(out *CrunchyBridgeClusterStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IsHA != nil { + in, out := &in.IsHA, &out.IsHA + *out = new(bool) + **out = **in + } + if in.IsProtected != nil { + in, out := &in.IsProtected, &out.IsProtected + *out = new(bool) + **out = **in + } + if in.OngoingUpgrade != nil { + in, out := &in.OngoingUpgrade, &out.OngoingUpgrade + *out = make([]*UpgradeOperation, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(UpgradeOperation) + **out = **in + } + } + } + in.Responses.DeepCopyInto(&out.Responses) + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + x := (*in).DeepCopy() + *out = &x + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrunchyBridgeClusterStatus. +func (in *CrunchyBridgeClusterStatus) DeepCopy() *CrunchyBridgeClusterStatus { + if in == nil { + return nil + } + out := new(CrunchyBridgeClusterStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DataSource) DeepCopyInto(out *DataSource) { *out = *in @@ -245,14 +444,14 @@ func (in *ExporterSpec) DeepCopyInto(out *ExporterSpec) { *out = *in if in.Configuration != nil { in, out := &in.Configuration, &out.Configuration - *out = make([]v1.VolumeProjection, len(*in)) + *out = make([]corev1.VolumeProjection, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.CustomTLSSecret != nil { in, out := &in.CustomTLSSecret, &out.CustomTLSSecret - *out = new(v1.SecretProjection) + *out = new(corev1.SecretProjection) (*in).DeepCopyInto(*out) } in.Resources.DeepCopyInto(&out.Resources) @@ -301,14 +500,14 @@ func (in *InstrumentationConfigSpec) DeepCopyInto(out *InstrumentationConfigSpec out.Exporters = in.Exporters.DeepCopy() if in.Files != nil { in, out := &in.Files, &out.Files - *out = make([]v1.VolumeProjection, len(*in)) + *out = make([]corev1.VolumeProjection, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.EnvironmentVariables != nil { in, out := &in.EnvironmentVariables, &out.EnvironmentVariables - *out = make([]v1.EnvVar, len(*in)) + *out = make([]corev1.EnvVar, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -674,7 +873,7 @@ func (in *PGAdminConfiguration) DeepCopyInto(out *PGAdminConfiguration) { *out = *in if in.Files != nil { in, out := &in.Files, &out.Files - *out = make([]v1.VolumeProjection, len(*in)) + *out = make([]corev1.VolumeProjection, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -755,7 +954,7 @@ func (in *PGAdminPodSpec) DeepCopyInto(out *PGAdminPodSpec) { } if in.Affinity != nil { in, out := &in.Affinity, &out.Affinity - *out = new(v1.Affinity) + *out = new(corev1.Affinity) (*in).DeepCopyInto(*out) } in.Config.DeepCopyInto(&out.Config) @@ -778,14 +977,14 @@ func (in *PGAdminPodSpec) DeepCopyInto(out *PGAdminPodSpec) { } if in.Tolerations != nil { in, out := &in.Tolerations, &out.Tolerations - *out = make([]v1.Toleration, len(*in)) + *out = make([]corev1.Toleration, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.TopologySpreadConstraints != nil { in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints - *out = make([]v1.TopologySpreadConstraint, len(*in)) + *out = make([]corev1.TopologySpreadConstraint, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -834,7 +1033,7 @@ func (in *PGAdminSpec) DeepCopyInto(out *PGAdminSpec) { } if in.ImagePullSecrets != nil { in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]v1.LocalObjectReference, len(*in)) + *out = make([]corev1.LocalObjectReference, len(*in)) copy(*out, *in) } if in.Instrumentation != nil { @@ -845,7 +1044,7 @@ func (in *PGAdminSpec) DeepCopyInto(out *PGAdminSpec) { in.Resources.DeepCopyInto(&out.Resources) if in.Affinity != nil { in, out := &in.Affinity, &out.Affinity - *out = new(v1.Affinity) + *out = new(corev1.Affinity) (*in).DeepCopyInto(*out) } if in.PriorityClassName != nil { @@ -855,7 +1054,7 @@ func (in *PGAdminSpec) DeepCopyInto(out *PGAdminSpec) { } if in.Tolerations != nil { in, out := &in.Tolerations, &out.Tolerations - *out = make([]v1.Toleration, len(*in)) + *out = make([]corev1.Toleration, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -896,7 +1095,7 @@ func (in *PGAdminStatus) DeepCopyInto(out *PGAdminStatus) { *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make([]metav1.Condition, len(*in)) + *out = make([]v1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -918,7 +1117,7 @@ func (in *PGAdminUser) DeepCopyInto(out *PGAdminUser) { *out = *in if in.PasswordRef != nil { in, out := &in.PasswordRef, &out.PasswordRef - *out = new(v1.SecretKeySelector) + *out = new(corev1.SecretKeySelector) (*in).DeepCopyInto(*out) } } @@ -965,7 +1164,7 @@ func (in *PGBackRestArchive) DeepCopyInto(out *PGBackRestArchive) { } if in.Configuration != nil { in, out := &in.Configuration, &out.Configuration - *out = make([]v1.VolumeProjection, len(*in)) + *out = make([]corev1.VolumeProjection, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1061,7 +1260,7 @@ func (in *PGBackRestDataSource) DeepCopyInto(out *PGBackRestDataSource) { *out = *in if in.Configuration != nil { in, out := &in.Configuration, &out.Configuration - *out = make([]v1.VolumeProjection, len(*in)) + *out = make([]corev1.VolumeProjection, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1082,7 +1281,7 @@ func (in *PGBackRestDataSource) DeepCopyInto(out *PGBackRestDataSource) { in.Resources.DeepCopyInto(&out.Resources) if in.Affinity != nil { in, out := &in.Affinity, &out.Affinity - *out = new(v1.Affinity) + *out = new(corev1.Affinity) (*in).DeepCopyInto(*out) } if in.PriorityClassName != nil { @@ -1092,7 +1291,7 @@ func (in *PGBackRestDataSource) DeepCopyInto(out *PGBackRestDataSource) { } if in.Tolerations != nil { in, out := &in.Tolerations, &out.Tolerations - *out = make([]v1.Toleration, len(*in)) + *out = make([]corev1.Toleration, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1202,7 +1401,7 @@ func (in *PGBackRestRepoHost) DeepCopyInto(out *PGBackRestRepoHost) { *out = *in if in.Affinity != nil { in, out := &in.Affinity, &out.Affinity - *out = new(v1.Affinity) + *out = new(corev1.Affinity) (*in).DeepCopyInto(*out) } if in.Log != nil { @@ -1218,26 +1417,26 @@ func (in *PGBackRestRepoHost) DeepCopyInto(out *PGBackRestRepoHost) { in.Resources.DeepCopyInto(&out.Resources) if in.Tolerations != nil { in, out := &in.Tolerations, &out.Tolerations - *out = make([]v1.Toleration, len(*in)) + *out = make([]corev1.Toleration, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.TopologySpreadConstraints != nil { in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints - *out = make([]v1.TopologySpreadConstraint, len(*in)) + *out = make([]corev1.TopologySpreadConstraint, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.SSHConfiguration != nil { in, out := &in.SSHConfiguration, &out.SSHConfiguration - *out = new(v1.ConfigMapProjection) + *out = new(corev1.ConfigMapProjection) (*in).DeepCopyInto(*out) } if in.SSHSecret != nil { in, out := &in.SSHSecret, &out.SSHSecret - *out = new(v1.SecretProjection) + *out = new(corev1.SecretProjection) (*in).DeepCopyInto(*out) } if in.Volumes != nil { @@ -1399,7 +1598,7 @@ func (in *PGBouncerConfiguration) DeepCopyInto(out *PGBouncerConfiguration) { *out = *in if in.Files != nil { in, out := &in.Files, &out.Files - *out = make([]v1.VolumeProjection, len(*in)) + *out = make([]corev1.VolumeProjection, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1447,20 +1646,20 @@ func (in *PGBouncerPodSpec) DeepCopyInto(out *PGBouncerPodSpec) { } if in.Affinity != nil { in, out := &in.Affinity, &out.Affinity - *out = new(v1.Affinity) + *out = new(corev1.Affinity) (*in).DeepCopyInto(*out) } in.Config.DeepCopyInto(&out.Config) if in.Containers != nil { in, out := &in.Containers, &out.Containers - *out = make([]v1.Container, len(*in)) + *out = make([]corev1.Container, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.CustomTLSSecret != nil { in, out := &in.CustomTLSSecret, &out.CustomTLSSecret - *out = new(v1.SecretProjection) + *out = new(corev1.SecretProjection) (*in).DeepCopyInto(*out) } if in.Port != nil { @@ -1496,14 +1695,14 @@ func (in *PGBouncerPodSpec) DeepCopyInto(out *PGBouncerPodSpec) { } if in.Tolerations != nil { in, out := &in.Tolerations, &out.Tolerations - *out = make([]v1.Toleration, len(*in)) + *out = make([]corev1.Toleration, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.TopologySpreadConstraints != nil { in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints - *out = make([]v1.TopologySpreadConstraint, len(*in)) + *out = make([]corev1.TopologySpreadConstraint, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1691,13 +1890,13 @@ func (in *PGUpgradeSpec) DeepCopyInto(out *PGUpgradeSpec) { } if in.ImagePullSecrets != nil { in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]v1.LocalObjectReference, len(*in)) + *out = make([]corev1.LocalObjectReference, len(*in)) copy(*out, *in) } in.Resources.DeepCopyInto(&out.Resources) if in.Affinity != nil { in, out := &in.Affinity, &out.Affinity - *out = new(v1.Affinity) + *out = new(corev1.Affinity) (*in).DeepCopyInto(*out) } if in.PriorityClassName != nil { @@ -1707,7 +1906,7 @@ func (in *PGUpgradeSpec) DeepCopyInto(out *PGUpgradeSpec) { } if in.Tolerations != nil { in, out := &in.Tolerations, &out.Tolerations - *out = make([]v1.Toleration, len(*in)) + *out = make([]corev1.Toleration, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1730,7 +1929,7 @@ func (in *PGUpgradeStatus) DeepCopyInto(out *PGUpgradeStatus) { *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make([]metav1.Condition, len(*in)) + *out = make([]v1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1918,7 +2117,7 @@ func (in *PostgresClusterDataSource) DeepCopyInto(out *PostgresClusterDataSource in.Resources.DeepCopyInto(&out.Resources) if in.Affinity != nil { in, out := &in.Affinity, &out.Affinity - *out = new(v1.Affinity) + *out = new(corev1.Affinity) (*in).DeepCopyInto(*out) } if in.PriorityClassName != nil { @@ -1928,7 +2127,7 @@ func (in *PostgresClusterDataSource) DeepCopyInto(out *PostgresClusterDataSource } if in.Tolerations != nil { in, out := &in.Tolerations, &out.Tolerations - *out = make([]v1.Toleration, len(*in)) + *out = make([]corev1.Toleration, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -2008,12 +2207,12 @@ func (in *PostgresClusterSpec) DeepCopyInto(out *PostgresClusterSpec) { } if in.CustomTLSSecret != nil { in, out := &in.CustomTLSSecret, &out.CustomTLSSecret - *out = new(v1.SecretProjection) + *out = new(corev1.SecretProjection) (*in).DeepCopyInto(*out) } if in.CustomReplicationClientTLSSecret != nil { in, out := &in.CustomReplicationClientTLSSecret, &out.CustomReplicationClientTLSSecret - *out = new(v1.SecretProjection) + *out = new(corev1.SecretProjection) (*in).DeepCopyInto(*out) } if in.DatabaseInitSQL != nil { @@ -2028,7 +2227,7 @@ func (in *PostgresClusterSpec) DeepCopyInto(out *PostgresClusterSpec) { } if in.ImagePullSecrets != nil { in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]v1.LocalObjectReference, len(*in)) + *out = make([]corev1.LocalObjectReference, len(*in)) copy(*out, *in) } if in.InstanceSets != nil { @@ -2152,7 +2351,7 @@ func (in *PostgresClusterStatus) DeepCopyInto(out *PostgresClusterStatus) { } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make([]metav1.Condition, len(*in)) + *out = make([]v1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -2174,7 +2373,7 @@ func (in *PostgresConfigSpec) DeepCopyInto(out *PostgresConfigSpec) { *out = *in if in.Files != nil { in, out := &in.Files, &out.Files - *out = make([]v1.VolumeProjection, len(*in)) + *out = make([]corev1.VolumeProjection, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -2256,12 +2455,12 @@ func (in *PostgresInstanceSetSpec) DeepCopyInto(out *PostgresInstanceSetSpec) { } if in.Affinity != nil { in, out := &in.Affinity, &out.Affinity - *out = new(v1.Affinity) + *out = new(corev1.Affinity) (*in).DeepCopyInto(*out) } if in.Containers != nil { in, out := &in.Containers, &out.Containers - *out = make([]v1.Container, len(*in)) + *out = make([]corev1.Container, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -2290,14 +2489,14 @@ func (in *PostgresInstanceSetSpec) DeepCopyInto(out *PostgresInstanceSetSpec) { } if in.Tolerations != nil { in, out := &in.Tolerations, &out.Tolerations - *out = make([]v1.Toleration, len(*in)) + *out = make([]corev1.Toleration, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.TopologySpreadConstraints != nil { in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints - *out = make([]v1.TopologySpreadConstraint, len(*in)) + *out = make([]corev1.TopologySpreadConstraint, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -2643,22 +2842,22 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { } if in.IPFamilyPolicy != nil { in, out := &in.IPFamilyPolicy, &out.IPFamilyPolicy - *out = new(v1.IPFamilyPolicy) + *out = new(corev1.IPFamilyPolicy) **out = **in } if in.IPFamilies != nil { in, out := &in.IPFamilies, &out.IPFamilies - *out = make([]v1.IPFamily, len(*in)) + *out = make([]corev1.IPFamily, len(*in)) copy(*out, *in) } if in.InternalTrafficPolicy != nil { in, out := &in.InternalTrafficPolicy, &out.InternalTrafficPolicy - *out = new(v1.ServiceInternalTrafficPolicy) + *out = new(corev1.ServiceInternalTrafficPolicy) **out = **in } if in.ExternalTrafficPolicy != nil { in, out := &in.ExternalTrafficPolicy, &out.ExternalTrafficPolicy - *out = new(v1.ServiceExternalTrafficPolicy) + *out = new(corev1.ServiceExternalTrafficPolicy) **out = **in } } @@ -2678,7 +2877,7 @@ func (in *Sidecar) DeepCopyInto(out *Sidecar) { *out = *in if in.Resources != nil { in, out := &in.Resources, &out.Resources - *out = new(v1.ResourceRequirements) + *out = new(corev1.ResourceRequirements) (*in).DeepCopyInto(*out) } } @@ -2698,7 +2897,7 @@ func (in *StandalonePGAdminConfiguration) DeepCopyInto(out *StandalonePGAdminCon *out = *in if in.Files != nil { in, out := &in.Files, &out.Files - *out = make([]v1.VolumeProjection, len(*in)) + *out = make([]corev1.VolumeProjection, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -2750,6 +2949,21 @@ func (in *TablespaceVolume) DeepCopy() *TablespaceVolume { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpgradeOperation) DeepCopyInto(out *UpgradeOperation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpgradeOperation. +func (in *UpgradeOperation) DeepCopy() *UpgradeOperation { + if in == nil { + return nil + } + out := new(UpgradeOperation) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *UserInterfaceSpec) DeepCopyInto(out *UserInterfaceSpec) { *out = *in diff --git a/testing/chainsaw/e2e/pgbackrest-restore/chainsaw-test.yaml b/testing/chainsaw/e2e/pgbackrest-restore/chainsaw-test.yaml index a79dc6a193..18a19553bb 100644 --- a/testing/chainsaw/e2e/pgbackrest-restore/chainsaw-test.yaml +++ b/testing/chainsaw/e2e/pgbackrest-restore/chainsaw-test.yaml @@ -19,10 +19,6 @@ spec: - name: volume value: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } - - name: postgrescluster - value: - apiVersion: (join('', ['postgres-operator.crunchydata.com/', $values.versions.postgrescluster])) - steps: - name: 'Create Cluster with replica, tablespace' use: @@ -89,7 +85,7 @@ spec: deletionPropagationPolicy: Background expect: [{ check: { (`true`): true } }] ref: - apiVersion: ($postgrescluster.apiVersion) + apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster name: clone-one @@ -166,7 +162,7 @@ spec: deletionPropagationPolicy: Background expect: [{ check: { (`true`): true } }] ref: - apiVersion: ($postgrescluster.apiVersion) + apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster name: clone-two diff --git a/testing/chainsaw/e2e/pgbackrest-restore/templates/change-parameters.yaml b/testing/chainsaw/e2e/pgbackrest-restore/templates/change-parameters.yaml index 04b5a85e86..b8650de471 100644 --- a/testing/chainsaw/e2e/pgbackrest-restore/templates/change-parameters.yaml +++ b/testing/chainsaw/e2e/pgbackrest-restore/templates/change-parameters.yaml @@ -52,7 +52,7 @@ spec: Update the cluster with parameters that require attention during recovery patch: resource: - apiVersion: ($postgrescluster.apiVersion) + apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: original diff --git a/testing/chainsaw/e2e/pgbackrest-restore/templates/clone-cluster.yaml b/testing/chainsaw/e2e/pgbackrest-restore/templates/clone-cluster.yaml index 193e7495c4..a768fa2abd 100644 --- a/testing/chainsaw/e2e/pgbackrest-restore/templates/clone-cluster.yaml +++ b/testing/chainsaw/e2e/pgbackrest-restore/templates/clone-cluster.yaml @@ -13,7 +13,7 @@ spec: Clone the cluster using a pgBackRest restore apply: resource: - apiVersion: ($postgrescluster.apiVersion) + apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: ($name) @@ -39,7 +39,7 @@ spec: Wait for the cluster to come online assert: resource: - apiVersion: ($postgrescluster.apiVersion) + apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: ($name) diff --git a/testing/chainsaw/e2e/pgbackrest-restore/templates/create-backup.yaml b/testing/chainsaw/e2e/pgbackrest-restore/templates/create-backup.yaml index 6279f29248..8f71a915f7 100644 --- a/testing/chainsaw/e2e/pgbackrest-restore/templates/create-backup.yaml +++ b/testing/chainsaw/e2e/pgbackrest-restore/templates/create-backup.yaml @@ -12,7 +12,7 @@ spec: Annotate the cluster to trigger a backup patch: resource: - apiVersion: ($postgrescluster.apiVersion) + apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: original diff --git a/testing/chainsaw/e2e/pgbackrest-restore/templates/create-cluster.yaml b/testing/chainsaw/e2e/pgbackrest-restore/templates/create-cluster.yaml index 89a72282ef..295ba9590d 100644 --- a/testing/chainsaw/e2e/pgbackrest-restore/templates/create-cluster.yaml +++ b/testing/chainsaw/e2e/pgbackrest-restore/templates/create-cluster.yaml @@ -10,7 +10,7 @@ spec: and some parameters that require attention during PostgreSQL recovery apply: resource: - apiVersion: ($postgrescluster.apiVersion) + apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: original @@ -40,7 +40,7 @@ spec: Wait for the replica backup to complete assert: resource: - apiVersion: ($postgrescluster.apiVersion) + apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: original diff --git a/testing/chainsaw/e2e/pgbackrest-restore/templates/point-in-time-restore.yaml b/testing/chainsaw/e2e/pgbackrest-restore/templates/point-in-time-restore.yaml index b832cec5ae..3b842f5ebc 100644 --- a/testing/chainsaw/e2e/pgbackrest-restore/templates/point-in-time-restore.yaml +++ b/testing/chainsaw/e2e/pgbackrest-restore/templates/point-in-time-restore.yaml @@ -25,7 +25,7 @@ spec: Trigger an in-place point-in-time restore (PITR) patch: resource: - apiVersion: ($postgrescluster.apiVersion) + apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: original @@ -46,7 +46,7 @@ spec: Wait for the restore to complete and the cluster to come online assert: resource: - apiVersion: ($postgrescluster.apiVersion) + apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: original diff --git a/testing/chainsaw/e2e/values.yaml b/testing/chainsaw/e2e/values.yaml index 02fce2dc14..9dc2157b85 100644 --- a/testing/chainsaw/e2e/values.yaml +++ b/testing/chainsaw/e2e/values.yaml @@ -1,6 +1,5 @@ versions: postgres: '17' - postgrescluster: 'v1' images: psql: 'registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-17.6-2542' diff --git a/testing/kuttl/e2e/cluster-pause/files/00-cluster-created.yaml b/testing/kuttl/e2e/cluster-pause/files/00-cluster-created.yaml index d5f1e7db9d..a5fe982b1a 100644 --- a/testing/kuttl/e2e/cluster-pause/files/00-cluster-created.yaml +++ b/testing/kuttl/e2e/cluster-pause/files/00-cluster-created.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: cluster-pause diff --git a/testing/kuttl/e2e/cluster-pause/files/00-create-cluster.yaml b/testing/kuttl/e2e/cluster-pause/files/00-create-cluster.yaml index 980304d60f..9f687a1dfa 100644 --- a/testing/kuttl/e2e/cluster-pause/files/00-create-cluster.yaml +++ b/testing/kuttl/e2e/cluster-pause/files/00-create-cluster.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: cluster-pause diff --git a/testing/kuttl/e2e/cluster-pause/files/01-cluster-paused.yaml b/testing/kuttl/e2e/cluster-pause/files/01-cluster-paused.yaml index ec96f76a43..6776fc542b 100644 --- a/testing/kuttl/e2e/cluster-pause/files/01-cluster-paused.yaml +++ b/testing/kuttl/e2e/cluster-pause/files/01-cluster-paused.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: cluster-pause diff --git a/testing/kuttl/e2e/cluster-pause/files/01-pause-cluster.yaml b/testing/kuttl/e2e/cluster-pause/files/01-pause-cluster.yaml index 9b689502d6..6a21b00b22 100644 --- a/testing/kuttl/e2e/cluster-pause/files/01-pause-cluster.yaml +++ b/testing/kuttl/e2e/cluster-pause/files/01-pause-cluster.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: cluster-pause diff --git a/testing/kuttl/e2e/cluster-pause/files/02-cluster-resumed.yaml b/testing/kuttl/e2e/cluster-pause/files/02-cluster-resumed.yaml index e411b0d959..82062fb908 100644 --- a/testing/kuttl/e2e/cluster-pause/files/02-cluster-resumed.yaml +++ b/testing/kuttl/e2e/cluster-pause/files/02-cluster-resumed.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: cluster-pause diff --git a/testing/kuttl/e2e/cluster-pause/files/02-resume-cluster.yaml b/testing/kuttl/e2e/cluster-pause/files/02-resume-cluster.yaml index 1e2b7e2b09..2f5665e146 100644 --- a/testing/kuttl/e2e/cluster-pause/files/02-resume-cluster.yaml +++ b/testing/kuttl/e2e/cluster-pause/files/02-resume-cluster.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: cluster-pause diff --git a/testing/kuttl/e2e/cluster-start/files/00-cluster-created.yaml b/testing/kuttl/e2e/cluster-start/files/00-cluster-created.yaml index 900afb1d3a..4eebece89e 100644 --- a/testing/kuttl/e2e/cluster-start/files/00-cluster-created.yaml +++ b/testing/kuttl/e2e/cluster-start/files/00-cluster-created.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: cluster-start diff --git a/testing/kuttl/e2e/cluster-start/files/00-create-cluster.yaml b/testing/kuttl/e2e/cluster-start/files/00-create-cluster.yaml index 5687e5482a..713cd14eb3 100644 --- a/testing/kuttl/e2e/cluster-start/files/00-create-cluster.yaml +++ b/testing/kuttl/e2e/cluster-start/files/00-create-cluster.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: cluster-start diff --git a/testing/kuttl/e2e/delete-namespace/files/00-create-cluster.yaml b/testing/kuttl/e2e/delete-namespace/files/00-create-cluster.yaml index f775eb9680..fe6392d75a 100644 --- a/testing/kuttl/e2e/delete-namespace/files/00-create-cluster.yaml +++ b/testing/kuttl/e2e/delete-namespace/files/00-create-cluster.yaml @@ -1,5 +1,5 @@ --- -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: delete-namespace diff --git a/testing/kuttl/e2e/delete-namespace/files/00-created.yaml b/testing/kuttl/e2e/delete-namespace/files/00-created.yaml index 82699adaa0..3d2c7ec936 100644 --- a/testing/kuttl/e2e/delete-namespace/files/00-created.yaml +++ b/testing/kuttl/e2e/delete-namespace/files/00-created.yaml @@ -1,5 +1,5 @@ --- -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: delete-namespace diff --git a/testing/kuttl/e2e/delete-namespace/files/01-errors.yaml b/testing/kuttl/e2e/delete-namespace/files/01-errors.yaml index 9d3e0f9277..ee6f31178c 100644 --- a/testing/kuttl/e2e/delete-namespace/files/01-errors.yaml +++ b/testing/kuttl/e2e/delete-namespace/files/01-errors.yaml @@ -1,5 +1,5 @@ --- -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: namespace: ${KUTTL_TEST_DELETE_NAMESPACE} diff --git a/testing/kuttl/e2e/delete/files/00-cluster-created.yaml b/testing/kuttl/e2e/delete/files/00-cluster-created.yaml index f4ccb037b3..6130475c07 100644 --- a/testing/kuttl/e2e/delete/files/00-cluster-created.yaml +++ b/testing/kuttl/e2e/delete/files/00-cluster-created.yaml @@ -1,5 +1,5 @@ --- -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: delete diff --git a/testing/kuttl/e2e/delete/files/00-create-cluster.yaml b/testing/kuttl/e2e/delete/files/00-create-cluster.yaml index 0a8a2a1224..0dbcb08204 100644 --- a/testing/kuttl/e2e/delete/files/00-create-cluster.yaml +++ b/testing/kuttl/e2e/delete/files/00-create-cluster.yaml @@ -1,5 +1,5 @@ --- -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: delete diff --git a/testing/kuttl/e2e/delete/files/01-cluster-deleted.yaml b/testing/kuttl/e2e/delete/files/01-cluster-deleted.yaml index db203233eb..091bc96b7b 100644 --- a/testing/kuttl/e2e/delete/files/01-cluster-deleted.yaml +++ b/testing/kuttl/e2e/delete/files/01-cluster-deleted.yaml @@ -1,5 +1,5 @@ --- -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: delete diff --git a/testing/kuttl/e2e/delete/files/10-cluster-with-replicas-created.yaml b/testing/kuttl/e2e/delete/files/10-cluster-with-replicas-created.yaml index e9f4114c38..1940fc680a 100644 --- a/testing/kuttl/e2e/delete/files/10-cluster-with-replicas-created.yaml +++ b/testing/kuttl/e2e/delete/files/10-cluster-with-replicas-created.yaml @@ -1,5 +1,5 @@ --- -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: delete-with-replica diff --git a/testing/kuttl/e2e/delete/files/10-create-cluster-with-replicas.yaml b/testing/kuttl/e2e/delete/files/10-create-cluster-with-replicas.yaml index 05a2a419e6..53c4fc434d 100644 --- a/testing/kuttl/e2e/delete/files/10-create-cluster-with-replicas.yaml +++ b/testing/kuttl/e2e/delete/files/10-create-cluster-with-replicas.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: delete-with-replica diff --git a/testing/kuttl/e2e/delete/files/11-cluster-with-replicas-deleted.yaml b/testing/kuttl/e2e/delete/files/11-cluster-with-replicas-deleted.yaml index 27f1d5bc32..cc14b60d3d 100644 --- a/testing/kuttl/e2e/delete/files/11-cluster-with-replicas-deleted.yaml +++ b/testing/kuttl/e2e/delete/files/11-cluster-with-replicas-deleted.yaml @@ -1,5 +1,5 @@ --- -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: delete-with-replica diff --git a/testing/kuttl/e2e/delete/files/20-broken-cluster-not-created.yaml b/testing/kuttl/e2e/delete/files/20-broken-cluster-not-created.yaml index 9bab1f05a5..f910fa9811 100644 --- a/testing/kuttl/e2e/delete/files/20-broken-cluster-not-created.yaml +++ b/testing/kuttl/e2e/delete/files/20-broken-cluster-not-created.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: delete-not-running diff --git a/testing/kuttl/e2e/delete/files/20-create-broken-cluster.yaml b/testing/kuttl/e2e/delete/files/20-create-broken-cluster.yaml index ebf3381731..2b7d34f3f6 100644 --- a/testing/kuttl/e2e/delete/files/20-create-broken-cluster.yaml +++ b/testing/kuttl/e2e/delete/files/20-create-broken-cluster.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: delete-not-running diff --git a/testing/kuttl/e2e/delete/files/21-broken-cluster-deleted.yaml b/testing/kuttl/e2e/delete/files/21-broken-cluster-deleted.yaml index 1ef98e1c99..4527a3659d 100644 --- a/testing/kuttl/e2e/delete/files/21-broken-cluster-deleted.yaml +++ b/testing/kuttl/e2e/delete/files/21-broken-cluster-deleted.yaml @@ -1,5 +1,5 @@ --- -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: delete-not-running diff --git a/testing/kuttl/e2e/exporter-custom-queries/files/exporter-custom-queries-cluster-checks.yaml b/testing/kuttl/e2e/exporter-custom-queries/files/exporter-custom-queries-cluster-checks.yaml index 0f50ffced9..ed6fd22b7c 100644 --- a/testing/kuttl/e2e/exporter-custom-queries/files/exporter-custom-queries-cluster-checks.yaml +++ b/testing/kuttl/e2e/exporter-custom-queries/files/exporter-custom-queries-cluster-checks.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: exporter-custom-queries diff --git a/testing/kuttl/e2e/exporter-custom-queries/files/exporter-custom-queries-cluster.yaml b/testing/kuttl/e2e/exporter-custom-queries/files/exporter-custom-queries-cluster.yaml index 482b9c86b9..5356b83be9 100644 --- a/testing/kuttl/e2e/exporter-custom-queries/files/exporter-custom-queries-cluster.yaml +++ b/testing/kuttl/e2e/exporter-custom-queries/files/exporter-custom-queries-cluster.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: exporter-custom-queries diff --git a/testing/kuttl/e2e/exporter-no-tls/files/exporter-no-tls-cluster-checks.yaml b/testing/kuttl/e2e/exporter-no-tls/files/exporter-no-tls-cluster-checks.yaml index 12eec3074c..eab02c6888 100644 --- a/testing/kuttl/e2e/exporter-no-tls/files/exporter-no-tls-cluster-checks.yaml +++ b/testing/kuttl/e2e/exporter-no-tls/files/exporter-no-tls-cluster-checks.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: exporter-no-tls diff --git a/testing/kuttl/e2e/exporter-no-tls/files/exporter-no-tls-cluster.yaml b/testing/kuttl/e2e/exporter-no-tls/files/exporter-no-tls-cluster.yaml index cb9a23db69..690d5b505d 100644 --- a/testing/kuttl/e2e/exporter-no-tls/files/exporter-no-tls-cluster.yaml +++ b/testing/kuttl/e2e/exporter-no-tls/files/exporter-no-tls-cluster.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: exporter-no-tls diff --git a/testing/kuttl/e2e/exporter-password-change/files/initial-postgrescluster-checks.yaml b/testing/kuttl/e2e/exporter-password-change/files/initial-postgrescluster-checks.yaml index e1820e7ead..19887a0e10 100644 --- a/testing/kuttl/e2e/exporter-password-change/files/initial-postgrescluster-checks.yaml +++ b/testing/kuttl/e2e/exporter-password-change/files/initial-postgrescluster-checks.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: exporter-password-change diff --git a/testing/kuttl/e2e/exporter-password-change/files/initial-postgrescluster.yaml b/testing/kuttl/e2e/exporter-password-change/files/initial-postgrescluster.yaml index a62577de92..d16c898ac2 100644 --- a/testing/kuttl/e2e/exporter-password-change/files/initial-postgrescluster.yaml +++ b/testing/kuttl/e2e/exporter-password-change/files/initial-postgrescluster.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: exporter-password-change diff --git a/testing/kuttl/e2e/exporter-tls/files/exporter-tls-cluster-checks.yaml b/testing/kuttl/e2e/exporter-tls/files/exporter-tls-cluster-checks.yaml index 41821919d4..e192191fcd 100644 --- a/testing/kuttl/e2e/exporter-tls/files/exporter-tls-cluster-checks.yaml +++ b/testing/kuttl/e2e/exporter-tls/files/exporter-tls-cluster-checks.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: exporter-tls diff --git a/testing/kuttl/e2e/exporter-tls/files/exporter-tls-cluster.yaml b/testing/kuttl/e2e/exporter-tls/files/exporter-tls-cluster.yaml index be550c5433..4fa420664a 100644 --- a/testing/kuttl/e2e/exporter-tls/files/exporter-tls-cluster.yaml +++ b/testing/kuttl/e2e/exporter-tls/files/exporter-tls-cluster.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: exporter-tls diff --git a/testing/kuttl/e2e/major-upgrade-missing-image/10--cluster.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/10--cluster.yaml index 8435bad7f7..8a0e57bab6 100644 --- a/testing/kuttl/e2e/major-upgrade-missing-image/10--cluster.yaml +++ b/testing/kuttl/e2e/major-upgrade-missing-image/10--cluster.yaml @@ -1,7 +1,7 @@ --- # Create the cluster we will do an actual upgrade on, but set the postgres version # to '10' to force a missing image scenario -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: major-upgrade-empty-image diff --git a/testing/kuttl/e2e/major-upgrade-missing-image/11--shutdown-cluster.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/11--shutdown-cluster.yaml index 76c3b02ee1..316f3a5472 100644 --- a/testing/kuttl/e2e/major-upgrade-missing-image/11--shutdown-cluster.yaml +++ b/testing/kuttl/e2e/major-upgrade-missing-image/11--shutdown-cluster.yaml @@ -1,6 +1,6 @@ --- # Shutdown the cluster -- but without the annotation. -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: major-upgrade-empty-image diff --git a/testing/kuttl/e2e/major-upgrade-missing-image/12--start-and-update-version.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/12--start-and-update-version.yaml index f4cf125329..fcdf4f62e3 100644 --- a/testing/kuttl/e2e/major-upgrade-missing-image/12--start-and-update-version.yaml +++ b/testing/kuttl/e2e/major-upgrade-missing-image/12--start-and-update-version.yaml @@ -1,6 +1,6 @@ --- # Update the postgres version and restart the cluster. -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: major-upgrade-empty-image diff --git a/testing/kuttl/e2e/major-upgrade-missing-image/12-assert.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/12-assert.yaml index 1f2b717cf2..14c33cccfe 100644 --- a/testing/kuttl/e2e/major-upgrade-missing-image/12-assert.yaml +++ b/testing/kuttl/e2e/major-upgrade-missing-image/12-assert.yaml @@ -1,7 +1,7 @@ --- # Wait for the instances to be ready and the replica backup to complete # by waiting for the status to signal pods ready and pgbackrest stanza created -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: major-upgrade-empty-image diff --git a/testing/kuttl/e2e/major-upgrade-missing-image/13--shutdown-cluster.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/13--shutdown-cluster.yaml index 76c3b02ee1..316f3a5472 100644 --- a/testing/kuttl/e2e/major-upgrade-missing-image/13--shutdown-cluster.yaml +++ b/testing/kuttl/e2e/major-upgrade-missing-image/13--shutdown-cluster.yaml @@ -1,6 +1,6 @@ --- # Shutdown the cluster -- but without the annotation. -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: major-upgrade-empty-image diff --git a/testing/kuttl/e2e/major-upgrade-missing-image/14--annotate-cluster.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/14--annotate-cluster.yaml index f9fe8a20a8..2fa2c949a9 100644 --- a/testing/kuttl/e2e/major-upgrade-missing-image/14--annotate-cluster.yaml +++ b/testing/kuttl/e2e/major-upgrade-missing-image/14--annotate-cluster.yaml @@ -1,6 +1,6 @@ --- # Annotate the cluster for an upgrade. -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: major-upgrade-empty-image diff --git a/testing/kuttl/e2e/major-upgrade-missing-image/14-assert.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/14-assert.yaml index 97350a6a04..bd828180f4 100644 --- a/testing/kuttl/e2e/major-upgrade-missing-image/14-assert.yaml +++ b/testing/kuttl/e2e/major-upgrade-missing-image/14-assert.yaml @@ -14,7 +14,7 @@ status: - type: "Succeeded" status: "True" --- -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: major-upgrade-empty-image diff --git a/testing/kuttl/e2e/major-upgrade-missing-image/15--start-cluster.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/15--start-cluster.yaml index b53caab52b..e5f270fb2f 100644 --- a/testing/kuttl/e2e/major-upgrade-missing-image/15--start-cluster.yaml +++ b/testing/kuttl/e2e/major-upgrade-missing-image/15--start-cluster.yaml @@ -1,7 +1,7 @@ --- # Once the pgupgrade is finished, update the version and set shutdown to false # in the postgres cluster -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: major-upgrade-empty-image diff --git a/testing/kuttl/e2e/major-upgrade-missing-image/15-assert.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/15-assert.yaml index 99ac652193..dfcbd4c819 100644 --- a/testing/kuttl/e2e/major-upgrade-missing-image/15-assert.yaml +++ b/testing/kuttl/e2e/major-upgrade-missing-image/15-assert.yaml @@ -1,6 +1,6 @@ --- # Wait for the instances to be ready with the target Postgres version. -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: major-upgrade-empty-image diff --git a/testing/kuttl/e2e/major-upgrade/10--already-updated-cluster.yaml b/testing/kuttl/e2e/major-upgrade/10--already-updated-cluster.yaml index 0e493b51ed..0591645221 100644 --- a/testing/kuttl/e2e/major-upgrade/10--already-updated-cluster.yaml +++ b/testing/kuttl/e2e/major-upgrade/10--already-updated-cluster.yaml @@ -1,6 +1,6 @@ --- # Create a cluster that is already at the correct version -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: major-upgrade diff --git a/testing/kuttl/e2e/major-upgrade/30--cluster.yaml b/testing/kuttl/e2e/major-upgrade/30--cluster.yaml index 8b96d7cdfd..07546c384e 100644 --- a/testing/kuttl/e2e/major-upgrade/30--cluster.yaml +++ b/testing/kuttl/e2e/major-upgrade/30--cluster.yaml @@ -1,6 +1,6 @@ --- # Create the cluster we will do an actual upgrade on -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: major-upgrade diff --git a/testing/kuttl/e2e/major-upgrade/30-assert.yaml b/testing/kuttl/e2e/major-upgrade/30-assert.yaml index 3ebbe65b62..1db8ec257d 100644 --- a/testing/kuttl/e2e/major-upgrade/30-assert.yaml +++ b/testing/kuttl/e2e/major-upgrade/30-assert.yaml @@ -1,7 +1,7 @@ --- # Wait for the instances to be ready and the replica backup to complete # by waiting for the status to signal pods ready and pgbackrest stanza created -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: major-upgrade diff --git a/testing/kuttl/e2e/major-upgrade/32--shutdown-cluster.yaml b/testing/kuttl/e2e/major-upgrade/32--shutdown-cluster.yaml index 06bee8a99c..9e4a575a3a 100644 --- a/testing/kuttl/e2e/major-upgrade/32--shutdown-cluster.yaml +++ b/testing/kuttl/e2e/major-upgrade/32--shutdown-cluster.yaml @@ -1,6 +1,6 @@ --- # Shutdown the cluster -- but without the annotation. -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: major-upgrade diff --git a/testing/kuttl/e2e/major-upgrade/33--annotate-cluster.yaml b/testing/kuttl/e2e/major-upgrade/33--annotate-cluster.yaml index a8ace3049c..35cd269035 100644 --- a/testing/kuttl/e2e/major-upgrade/33--annotate-cluster.yaml +++ b/testing/kuttl/e2e/major-upgrade/33--annotate-cluster.yaml @@ -1,6 +1,6 @@ --- # Annotate the cluster for an upgrade. -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: major-upgrade diff --git a/testing/kuttl/e2e/major-upgrade/33-assert.yaml b/testing/kuttl/e2e/major-upgrade/33-assert.yaml index ebb1896d7e..aadb5e3bb1 100644 --- a/testing/kuttl/e2e/major-upgrade/33-assert.yaml +++ b/testing/kuttl/e2e/major-upgrade/33-assert.yaml @@ -14,7 +14,7 @@ status: - type: "Succeeded" status: "True" --- -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: major-upgrade diff --git a/testing/kuttl/e2e/major-upgrade/34--restart-cluster.yaml b/testing/kuttl/e2e/major-upgrade/34--restart-cluster.yaml index af6573706d..ee674151ca 100644 --- a/testing/kuttl/e2e/major-upgrade/34--restart-cluster.yaml +++ b/testing/kuttl/e2e/major-upgrade/34--restart-cluster.yaml @@ -1,7 +1,7 @@ --- # Once the pgupgrade is finished, update the version and set shutdown to false # in the postgres cluster -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: major-upgrade diff --git a/testing/kuttl/e2e/major-upgrade/34-assert.yaml b/testing/kuttl/e2e/major-upgrade/34-assert.yaml index 46aa2fa022..aba583f74c 100644 --- a/testing/kuttl/e2e/major-upgrade/34-assert.yaml +++ b/testing/kuttl/e2e/major-upgrade/34-assert.yaml @@ -1,6 +1,6 @@ --- # Wait for the instances to be ready with the target Postgres version. -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: major-upgrade diff --git a/testing/kuttl/e2e/optional-backups/00--cluster.yaml b/testing/kuttl/e2e/optional-backups/00--cluster.yaml index 3140e8c7a5..7b927831e0 100644 --- a/testing/kuttl/e2e/optional-backups/00--cluster.yaml +++ b/testing/kuttl/e2e/optional-backups/00--cluster.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: created-without-backups diff --git a/testing/kuttl/e2e/optional-backups/00-assert.yaml b/testing/kuttl/e2e/optional-backups/00-assert.yaml index 253688c8db..86392d0308 100644 --- a/testing/kuttl/e2e/optional-backups/00-assert.yaml +++ b/testing/kuttl/e2e/optional-backups/00-assert.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: created-without-backups diff --git a/testing/kuttl/e2e/optional-backups/04--cluster.yaml b/testing/kuttl/e2e/optional-backups/04--cluster.yaml index 458ecef772..fc39ff6ebe 100644 --- a/testing/kuttl/e2e/optional-backups/04--cluster.yaml +++ b/testing/kuttl/e2e/optional-backups/04--cluster.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: created-without-backups diff --git a/testing/kuttl/e2e/optional-backups/10--cluster.yaml b/testing/kuttl/e2e/optional-backups/10--cluster.yaml index 1e13b403bb..6da85c93f9 100644 --- a/testing/kuttl/e2e/optional-backups/10--cluster.yaml +++ b/testing/kuttl/e2e/optional-backups/10--cluster.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: created-without-backups diff --git a/testing/kuttl/e2e/optional-backups/10-assert.yaml b/testing/kuttl/e2e/optional-backups/10-assert.yaml index d9c626a92f..7b740b310d 100644 --- a/testing/kuttl/e2e/optional-backups/10-assert.yaml +++ b/testing/kuttl/e2e/optional-backups/10-assert.yaml @@ -1,5 +1,5 @@ # It should be possible to turn backups back on. -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: created-without-backups diff --git a/testing/kuttl/e2e/optional-backups/20-assert.yaml b/testing/kuttl/e2e/optional-backups/20-assert.yaml index 1dd366f4e9..b469e277f8 100644 --- a/testing/kuttl/e2e/optional-backups/20-assert.yaml +++ b/testing/kuttl/e2e/optional-backups/20-assert.yaml @@ -1,5 +1,5 @@ # It should be possible to turn backups back on. -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: created-without-backups diff --git a/testing/kuttl/e2e/optional-backups/23-assert.yaml b/testing/kuttl/e2e/optional-backups/23-assert.yaml index 814d9174ad..8748ea015c 100644 --- a/testing/kuttl/e2e/optional-backups/23-assert.yaml +++ b/testing/kuttl/e2e/optional-backups/23-assert.yaml @@ -1,5 +1,5 @@ # It should be possible to turn backups back on. -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: created-without-backups diff --git a/testing/kuttl/e2e/otel-logging-and-metrics/files/00--create-cluster.yaml b/testing/kuttl/e2e/otel-logging-and-metrics/files/00--create-cluster.yaml index eabb86847f..3345bef5f9 100644 --- a/testing/kuttl/e2e/otel-logging-and-metrics/files/00--create-cluster.yaml +++ b/testing/kuttl/e2e/otel-logging-and-metrics/files/00--create-cluster.yaml @@ -1,5 +1,5 @@ --- -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: otel-cluster diff --git a/testing/kuttl/e2e/otel-logging-and-metrics/files/00-cluster-created.yaml b/testing/kuttl/e2e/otel-logging-and-metrics/files/00-cluster-created.yaml index 5b11b4bda0..97bd3e2b97 100644 --- a/testing/kuttl/e2e/otel-logging-and-metrics/files/00-cluster-created.yaml +++ b/testing/kuttl/e2e/otel-logging-and-metrics/files/00-cluster-created.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: otel-cluster diff --git a/testing/kuttl/e2e/otel-logging-and-metrics/files/01--add-instrumentation.yaml b/testing/kuttl/e2e/otel-logging-and-metrics/files/01--add-instrumentation.yaml index 651f5fa14e..ebde9f7caa 100644 --- a/testing/kuttl/e2e/otel-logging-and-metrics/files/01--add-instrumentation.yaml +++ b/testing/kuttl/e2e/otel-logging-and-metrics/files/01--add-instrumentation.yaml @@ -1,5 +1,5 @@ --- -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: otel-cluster diff --git a/testing/kuttl/e2e/otel-logging-and-metrics/files/01-instrumentation-added.yaml b/testing/kuttl/e2e/otel-logging-and-metrics/files/01-instrumentation-added.yaml index 1dc292fad5..672bdd2d1d 100644 --- a/testing/kuttl/e2e/otel-logging-and-metrics/files/01-instrumentation-added.yaml +++ b/testing/kuttl/e2e/otel-logging-and-metrics/files/01-instrumentation-added.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: otel-cluster diff --git a/testing/kuttl/e2e/otel-logging-and-metrics/files/03--annotate-cluster.yaml b/testing/kuttl/e2e/otel-logging-and-metrics/files/03--annotate-cluster.yaml index 437b328eae..1133b7fe15 100644 --- a/testing/kuttl/e2e/otel-logging-and-metrics/files/03--annotate-cluster.yaml +++ b/testing/kuttl/e2e/otel-logging-and-metrics/files/03--annotate-cluster.yaml @@ -1,6 +1,6 @@ --- # Annotate the cluster to trigger a backup. -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: otel-cluster diff --git a/testing/kuttl/e2e/otel-logging-and-metrics/files/07-instrumentation-added.yaml b/testing/kuttl/e2e/otel-logging-and-metrics/files/07-instrumentation-added.yaml index ce5c6a938b..858b78ff83 100644 --- a/testing/kuttl/e2e/otel-logging-and-metrics/files/07-instrumentation-added.yaml +++ b/testing/kuttl/e2e/otel-logging-and-metrics/files/07-instrumentation-added.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: otel-cluster diff --git a/testing/kuttl/e2e/otel-logging-and-metrics/files/09--add-custom-queries.yaml b/testing/kuttl/e2e/otel-logging-and-metrics/files/09--add-custom-queries.yaml index d5a2089e3a..ed133fc26a 100644 --- a/testing/kuttl/e2e/otel-logging-and-metrics/files/09--add-custom-queries.yaml +++ b/testing/kuttl/e2e/otel-logging-and-metrics/files/09--add-custom-queries.yaml @@ -1,5 +1,5 @@ --- -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: otel-cluster diff --git a/testing/kuttl/e2e/otel-logging-and-metrics/files/09-custom-queries-added.yaml b/testing/kuttl/e2e/otel-logging-and-metrics/files/09-custom-queries-added.yaml index ba93e37f8c..1a756b7a73 100644 --- a/testing/kuttl/e2e/otel-logging-and-metrics/files/09-custom-queries-added.yaml +++ b/testing/kuttl/e2e/otel-logging-and-metrics/files/09-custom-queries-added.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: otel-cluster diff --git a/testing/kuttl/e2e/otel-logging-and-metrics/files/11--add-per-db-metrics.yaml b/testing/kuttl/e2e/otel-logging-and-metrics/files/11--add-per-db-metrics.yaml index 47c3d17edc..1cf4c28a83 100644 --- a/testing/kuttl/e2e/otel-logging-and-metrics/files/11--add-per-db-metrics.yaml +++ b/testing/kuttl/e2e/otel-logging-and-metrics/files/11--add-per-db-metrics.yaml @@ -1,5 +1,5 @@ --- -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: otel-cluster diff --git a/testing/kuttl/e2e/otel-logging-and-metrics/files/13--add-per-db-metrics.yaml b/testing/kuttl/e2e/otel-logging-and-metrics/files/13--add-per-db-metrics.yaml index d231188202..c383238be9 100644 --- a/testing/kuttl/e2e/otel-logging-and-metrics/files/13--add-per-db-metrics.yaml +++ b/testing/kuttl/e2e/otel-logging-and-metrics/files/13--add-per-db-metrics.yaml @@ -1,5 +1,5 @@ --- -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: otel-cluster diff --git a/testing/kuttl/e2e/otel-logging-and-metrics/files/15--remove-per-db-metrics.yaml b/testing/kuttl/e2e/otel-logging-and-metrics/files/15--remove-per-db-metrics.yaml index aaef4a4dbe..4421de8482 100644 --- a/testing/kuttl/e2e/otel-logging-and-metrics/files/15--remove-per-db-metrics.yaml +++ b/testing/kuttl/e2e/otel-logging-and-metrics/files/15--remove-per-db-metrics.yaml @@ -1,5 +1,5 @@ --- -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: otel-cluster diff --git a/testing/kuttl/e2e/otel-logging-and-metrics/files/17--add-custom-queries-per-db.yaml b/testing/kuttl/e2e/otel-logging-and-metrics/files/17--add-custom-queries-per-db.yaml index c71c52c337..92360a4a9a 100644 --- a/testing/kuttl/e2e/otel-logging-and-metrics/files/17--add-custom-queries-per-db.yaml +++ b/testing/kuttl/e2e/otel-logging-and-metrics/files/17--add-custom-queries-per-db.yaml @@ -1,5 +1,5 @@ --- -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: otel-cluster diff --git a/testing/kuttl/e2e/otel-logging-and-metrics/files/17-custom-queries-per-db-added.yaml b/testing/kuttl/e2e/otel-logging-and-metrics/files/17-custom-queries-per-db-added.yaml index 7cdadefe6c..5bd9cec286 100644 --- a/testing/kuttl/e2e/otel-logging-and-metrics/files/17-custom-queries-per-db-added.yaml +++ b/testing/kuttl/e2e/otel-logging-and-metrics/files/17-custom-queries-per-db-added.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: otel-cluster diff --git a/testing/kuttl/e2e/otel-logging-and-metrics/files/19--add-logs-metrics-exporter.yaml b/testing/kuttl/e2e/otel-logging-and-metrics/files/19--add-logs-metrics-exporter.yaml index a0ed2e3263..67926505c0 100644 --- a/testing/kuttl/e2e/otel-logging-and-metrics/files/19--add-logs-metrics-exporter.yaml +++ b/testing/kuttl/e2e/otel-logging-and-metrics/files/19--add-logs-metrics-exporter.yaml @@ -1,5 +1,5 @@ --- -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: otel-cluster diff --git a/testing/kuttl/e2e/otel-logging-and-metrics/files/19-logs-metrics-exporter-added.yaml b/testing/kuttl/e2e/otel-logging-and-metrics/files/19-logs-metrics-exporter-added.yaml index 2fae632304..f730898692 100644 --- a/testing/kuttl/e2e/otel-logging-and-metrics/files/19-logs-metrics-exporter-added.yaml +++ b/testing/kuttl/e2e/otel-logging-and-metrics/files/19-logs-metrics-exporter-added.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: otel-cluster diff --git a/testing/kuttl/e2e/otel-logging-and-metrics/files/21--create-cluster.yaml b/testing/kuttl/e2e/otel-logging-and-metrics/files/21--create-cluster.yaml index b66b0cee99..3983405b34 100644 --- a/testing/kuttl/e2e/otel-logging-and-metrics/files/21--create-cluster.yaml +++ b/testing/kuttl/e2e/otel-logging-and-metrics/files/21--create-cluster.yaml @@ -1,5 +1,5 @@ --- -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: otel-cluster-no-backups diff --git a/testing/kuttl/e2e/otel-logging-and-metrics/files/21-cluster-created.yaml b/testing/kuttl/e2e/otel-logging-and-metrics/files/21-cluster-created.yaml index 1cad988aff..c9aad7ec25 100644 --- a/testing/kuttl/e2e/otel-logging-and-metrics/files/21-cluster-created.yaml +++ b/testing/kuttl/e2e/otel-logging-and-metrics/files/21-cluster-created.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: otel-cluster-no-backups diff --git a/testing/kuttl/e2e/otel-logging-and-metrics/files/23--add-backups.yaml b/testing/kuttl/e2e/otel-logging-and-metrics/files/23--add-backups.yaml index fdc193ebca..bb7c70ea37 100644 --- a/testing/kuttl/e2e/otel-logging-and-metrics/files/23--add-backups.yaml +++ b/testing/kuttl/e2e/otel-logging-and-metrics/files/23--add-backups.yaml @@ -1,5 +1,5 @@ --- -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: otel-cluster-no-backups diff --git a/testing/kuttl/e2e/otel-logging-and-metrics/files/23-backups-added.yaml b/testing/kuttl/e2e/otel-logging-and-metrics/files/23-backups-added.yaml index 34f3b2b161..52221d2349 100644 --- a/testing/kuttl/e2e/otel-logging-and-metrics/files/23-backups-added.yaml +++ b/testing/kuttl/e2e/otel-logging-and-metrics/files/23-backups-added.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: otel-cluster-no-backups diff --git a/testing/kuttl/e2e/otel-logging-and-metrics/files/25-backups-removed.yaml b/testing/kuttl/e2e/otel-logging-and-metrics/files/25-backups-removed.yaml index 1cad988aff..c9aad7ec25 100644 --- a/testing/kuttl/e2e/otel-logging-and-metrics/files/25-backups-removed.yaml +++ b/testing/kuttl/e2e/otel-logging-and-metrics/files/25-backups-removed.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: otel-cluster-no-backups diff --git a/testing/kuttl/e2e/password-change/00--cluster.yaml b/testing/kuttl/e2e/password-change/00--cluster.yaml index df6806647f..d7b7019b62 100644 --- a/testing/kuttl/e2e/password-change/00--cluster.yaml +++ b/testing/kuttl/e2e/password-change/00--cluster.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: password-change diff --git a/testing/kuttl/e2e/password-change/00-assert.yaml b/testing/kuttl/e2e/password-change/00-assert.yaml index af183b5aaf..bfedc0b25e 100644 --- a/testing/kuttl/e2e/password-change/00-assert.yaml +++ b/testing/kuttl/e2e/password-change/00-assert.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: password-change diff --git a/testing/kuttl/e2e/password-change/04--secret.yaml b/testing/kuttl/e2e/password-change/04--secret.yaml index 162e0e640c..5f312e9bf1 100644 --- a/testing/kuttl/e2e/password-change/04--secret.yaml +++ b/testing/kuttl/e2e/password-change/04--secret.yaml @@ -10,7 +10,7 @@ stringData: uri: "" --- # Enable authenticating with MD5 passwords -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: password-change diff --git a/testing/kuttl/e2e/password-change/06--cluster.yaml b/testing/kuttl/e2e/password-change/06--cluster.yaml index 73357ea1cc..4cb70defdd 100644 --- a/testing/kuttl/e2e/password-change/06--cluster.yaml +++ b/testing/kuttl/e2e/password-change/06--cluster.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: password-change diff --git a/testing/kuttl/e2e/password-change/06-assert.yaml b/testing/kuttl/e2e/password-change/06-assert.yaml index af183b5aaf..bfedc0b25e 100644 --- a/testing/kuttl/e2e/password-change/06-assert.yaml +++ b/testing/kuttl/e2e/password-change/06-assert.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: password-change diff --git a/testing/kuttl/e2e/pgbackrest-backup-standby/00--cluster.yaml b/testing/kuttl/e2e/pgbackrest-backup-standby/00--cluster.yaml index 30e9edb9ba..9665fac665 100644 --- a/testing/kuttl/e2e/pgbackrest-backup-standby/00--cluster.yaml +++ b/testing/kuttl/e2e/pgbackrest-backup-standby/00--cluster.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: pgbackrest-backup-standby diff --git a/testing/kuttl/e2e/pgbackrest-backup-standby/00-assert.yaml b/testing/kuttl/e2e/pgbackrest-backup-standby/00-assert.yaml index 805943d763..d69a3c68b5 100644 --- a/testing/kuttl/e2e/pgbackrest-backup-standby/00-assert.yaml +++ b/testing/kuttl/e2e/pgbackrest-backup-standby/00-assert.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: pgbackrest-backup-standby diff --git a/testing/kuttl/e2e/pgbackrest-backup-standby/02--cluster.yaml b/testing/kuttl/e2e/pgbackrest-backup-standby/02--cluster.yaml index 7d8fc1682c..c986f4a9de 100644 --- a/testing/kuttl/e2e/pgbackrest-backup-standby/02--cluster.yaml +++ b/testing/kuttl/e2e/pgbackrest-backup-standby/02--cluster.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: pgbackrest-backup-standby diff --git a/testing/kuttl/e2e/pgbackrest-backup-standby/02-assert.yaml b/testing/kuttl/e2e/pgbackrest-backup-standby/02-assert.yaml index 820cc1b0d5..92f7b12f5a 100644 --- a/testing/kuttl/e2e/pgbackrest-backup-standby/02-assert.yaml +++ b/testing/kuttl/e2e/pgbackrest-backup-standby/02-assert.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: pgbackrest-backup-standby diff --git a/testing/kuttl/e2e/pgbackrest-init/00--cluster.yaml b/testing/kuttl/e2e/pgbackrest-init/00--cluster.yaml index e21f90e63d..03391359a1 100644 --- a/testing/kuttl/e2e/pgbackrest-init/00--cluster.yaml +++ b/testing/kuttl/e2e/pgbackrest-init/00--cluster.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: init-pgbackrest diff --git a/testing/kuttl/e2e/pgbackrest-init/00-assert.yaml b/testing/kuttl/e2e/pgbackrest-init/00-assert.yaml index 9078eb9783..5181c95993 100644 --- a/testing/kuttl/e2e/pgbackrest-init/00-assert.yaml +++ b/testing/kuttl/e2e/pgbackrest-init/00-assert.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: init-pgbackrest diff --git a/testing/kuttl/e2e/pgbackrest-init/04--cluster.yaml b/testing/kuttl/e2e/pgbackrest-init/04--cluster.yaml index 6669c0d262..e732f1fd9a 100644 --- a/testing/kuttl/e2e/pgbackrest-init/04--cluster.yaml +++ b/testing/kuttl/e2e/pgbackrest-init/04--cluster.yaml @@ -1,5 +1,5 @@ --- -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: init-pgbackrest diff --git a/testing/kuttl/e2e/pgbackrest-init/04-assert.yaml b/testing/kuttl/e2e/pgbackrest-init/04-assert.yaml index 89eba23413..04a38ac9f4 100644 --- a/testing/kuttl/e2e/pgbackrest-init/04-assert.yaml +++ b/testing/kuttl/e2e/pgbackrest-init/04-assert.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: init-pgbackrest diff --git a/testing/kuttl/e2e/pgbouncer/00--cluster.yaml b/testing/kuttl/e2e/pgbouncer/00--cluster.yaml index 9a61dccef9..4699d90171 100644 --- a/testing/kuttl/e2e/pgbouncer/00--cluster.yaml +++ b/testing/kuttl/e2e/pgbouncer/00--cluster.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: proxied diff --git a/testing/kuttl/e2e/pgbouncer/00-assert.yaml b/testing/kuttl/e2e/pgbouncer/00-assert.yaml index c914e8991e..6c3a33079f 100644 --- a/testing/kuttl/e2e/pgbouncer/00-assert.yaml +++ b/testing/kuttl/e2e/pgbouncer/00-assert.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: proxied diff --git a/testing/kuttl/e2e/replica-read/00--cluster.yaml b/testing/kuttl/e2e/replica-read/00--cluster.yaml index 9c79960116..c62f5418cd 100644 --- a/testing/kuttl/e2e/replica-read/00--cluster.yaml +++ b/testing/kuttl/e2e/replica-read/00--cluster.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: replica-read diff --git a/testing/kuttl/e2e/replica-read/00-assert.yaml b/testing/kuttl/e2e/replica-read/00-assert.yaml index f17d7dc0c8..17c2942eb0 100644 --- a/testing/kuttl/e2e/replica-read/00-assert.yaml +++ b/testing/kuttl/e2e/replica-read/00-assert.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: replica-read diff --git a/testing/kuttl/e2e/root-cert-ownership/00--cluster.yaml b/testing/kuttl/e2e/root-cert-ownership/00--cluster.yaml index 23f4fb9212..2d23e1e3d3 100644 --- a/testing/kuttl/e2e/root-cert-ownership/00--cluster.yaml +++ b/testing/kuttl/e2e/root-cert-ownership/00--cluster.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: owner1 @@ -10,7 +10,7 @@ spec: replicas: 1 dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } --- -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: owner2 diff --git a/testing/kuttl/e2e/root-cert-ownership/00-assert.yaml b/testing/kuttl/e2e/root-cert-ownership/00-assert.yaml index a1fef98859..406465b691 100644 --- a/testing/kuttl/e2e/root-cert-ownership/00-assert.yaml +++ b/testing/kuttl/e2e/root-cert-ownership/00-assert.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: owner1 @@ -9,7 +9,7 @@ status: replicas: 1 updatedReplicas: 1 --- -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: owner2 diff --git a/testing/kuttl/e2e/root-cert-ownership/02-assert.yaml b/testing/kuttl/e2e/root-cert-ownership/02-assert.yaml index 84dfaf2de8..839f6a9b29 100644 --- a/testing/kuttl/e2e/root-cert-ownership/02-assert.yaml +++ b/testing/kuttl/e2e/root-cert-ownership/02-assert.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: owner2 diff --git a/testing/kuttl/e2e/root-cert-ownership/02-errors.yaml b/testing/kuttl/e2e/root-cert-ownership/02-errors.yaml index 9cb7c13433..d8f159d59c 100644 --- a/testing/kuttl/e2e/root-cert-ownership/02-errors.yaml +++ b/testing/kuttl/e2e/root-cert-ownership/02-errors.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: owner1 diff --git a/testing/kuttl/e2e/root-cert-ownership/04-errors.yaml b/testing/kuttl/e2e/root-cert-ownership/04-errors.yaml index 2939054a21..b117c4561b 100644 --- a/testing/kuttl/e2e/root-cert-ownership/04-errors.yaml +++ b/testing/kuttl/e2e/root-cert-ownership/04-errors.yaml @@ -1,9 +1,9 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: owner1 --- -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: owner2 diff --git a/testing/kuttl/e2e/scaledown/00--create-cluster.yaml b/testing/kuttl/e2e/scaledown/00--create-cluster.yaml index 1e4586e172..50377c2fb6 100644 --- a/testing/kuttl/e2e/scaledown/00--create-cluster.yaml +++ b/testing/kuttl/e2e/scaledown/00--create-cluster.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: scaledown diff --git a/testing/kuttl/e2e/scaledown/00-assert.yaml b/testing/kuttl/e2e/scaledown/00-assert.yaml index fb08e124f0..b5fa5a9051 100644 --- a/testing/kuttl/e2e/scaledown/00-assert.yaml +++ b/testing/kuttl/e2e/scaledown/00-assert.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: scaledown diff --git a/testing/kuttl/e2e/scaledown/01--update-cluster.yaml b/testing/kuttl/e2e/scaledown/01--update-cluster.yaml index 0fce072dc8..d6409a8fd1 100644 --- a/testing/kuttl/e2e/scaledown/01--update-cluster.yaml +++ b/testing/kuttl/e2e/scaledown/01--update-cluster.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: scaledown diff --git a/testing/kuttl/e2e/scaledown/01-assert.yaml b/testing/kuttl/e2e/scaledown/01-assert.yaml index b88ea56ca3..45bb0b6d04 100644 --- a/testing/kuttl/e2e/scaledown/01-assert.yaml +++ b/testing/kuttl/e2e/scaledown/01-assert.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: scaledown diff --git a/testing/kuttl/e2e/scaledown/10--create-cluster.yaml b/testing/kuttl/e2e/scaledown/10--create-cluster.yaml index 9af2fd0103..3847e588c0 100644 --- a/testing/kuttl/e2e/scaledown/10--create-cluster.yaml +++ b/testing/kuttl/e2e/scaledown/10--create-cluster.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: scaledown1 diff --git a/testing/kuttl/e2e/scaledown/10-assert.yaml b/testing/kuttl/e2e/scaledown/10-assert.yaml index e1862b9e0d..cf8bcb461a 100644 --- a/testing/kuttl/e2e/scaledown/10-assert.yaml +++ b/testing/kuttl/e2e/scaledown/10-assert.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: scaledown1 diff --git a/testing/kuttl/e2e/scaledown/12--update-cluster.yaml b/testing/kuttl/e2e/scaledown/12--update-cluster.yaml index a5cbdddb33..3b4f62094a 100644 --- a/testing/kuttl/e2e/scaledown/12--update-cluster.yaml +++ b/testing/kuttl/e2e/scaledown/12--update-cluster.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: scaledown1 diff --git a/testing/kuttl/e2e/scaledown/12-assert.yaml b/testing/kuttl/e2e/scaledown/12-assert.yaml index a409e60697..079435b67d 100644 --- a/testing/kuttl/e2e/scaledown/12-assert.yaml +++ b/testing/kuttl/e2e/scaledown/12-assert.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: scaledown1 diff --git a/testing/kuttl/e2e/scaledown/20--create-cluster.yaml b/testing/kuttl/e2e/scaledown/20--create-cluster.yaml index 2454897b70..796f88db3c 100644 --- a/testing/kuttl/e2e/scaledown/20--create-cluster.yaml +++ b/testing/kuttl/e2e/scaledown/20--create-cluster.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: scaledown2 diff --git a/testing/kuttl/e2e/scaledown/20-assert.yaml b/testing/kuttl/e2e/scaledown/20-assert.yaml index d0392fce99..f65cef60b8 100644 --- a/testing/kuttl/e2e/scaledown/20-assert.yaml +++ b/testing/kuttl/e2e/scaledown/20-assert.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: scaledown2 diff --git a/testing/kuttl/e2e/scaledown/21--update-cluster.yaml b/testing/kuttl/e2e/scaledown/21--update-cluster.yaml index f675d74a4d..02d8936d0b 100644 --- a/testing/kuttl/e2e/scaledown/21--update-cluster.yaml +++ b/testing/kuttl/e2e/scaledown/21--update-cluster.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: scaledown2 diff --git a/testing/kuttl/e2e/scaledown/21-assert.yaml b/testing/kuttl/e2e/scaledown/21-assert.yaml index 80ac7ef0df..f137a616b8 100644 --- a/testing/kuttl/e2e/scaledown/21-assert.yaml +++ b/testing/kuttl/e2e/scaledown/21-assert.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: scaledown2 diff --git a/testing/kuttl/e2e/security-context/00--cluster.yaml b/testing/kuttl/e2e/security-context/00--cluster.yaml index 0fa6a0881f..d754eedec6 100644 --- a/testing/kuttl/e2e/security-context/00--cluster.yaml +++ b/testing/kuttl/e2e/security-context/00--cluster.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: security-context diff --git a/testing/kuttl/e2e/security-context/00-assert.yaml b/testing/kuttl/e2e/security-context/00-assert.yaml index 032bf5b2ad..6df19c6608 100644 --- a/testing/kuttl/e2e/security-context/00-assert.yaml +++ b/testing/kuttl/e2e/security-context/00-assert.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: security-context diff --git a/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/00-cluster-check.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/00-cluster-check.yaml index b47b7f5e22..8ae250152f 100644 --- a/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/00-cluster-check.yaml +++ b/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/00-cluster-check.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: elephant diff --git a/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/00-cluster.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/00-cluster.yaml index 05510cd1b3..5f8678e5e9 100644 --- a/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/00-cluster.yaml +++ b/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/00-cluster.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: elephant diff --git a/testing/kuttl/e2e/streaming-standby/01--primary-cluster.yaml b/testing/kuttl/e2e/streaming-standby/01--primary-cluster.yaml index 7bcc1de3a0..44d1386b59 100644 --- a/testing/kuttl/e2e/streaming-standby/01--primary-cluster.yaml +++ b/testing/kuttl/e2e/streaming-standby/01--primary-cluster.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: primary-cluster diff --git a/testing/kuttl/e2e/streaming-standby/01-assert.yaml b/testing/kuttl/e2e/streaming-standby/01-assert.yaml index 09e1ed27ab..55c820a116 100644 --- a/testing/kuttl/e2e/streaming-standby/01-assert.yaml +++ b/testing/kuttl/e2e/streaming-standby/01-assert.yaml @@ -1,5 +1,5 @@ --- -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: primary-cluster diff --git a/testing/kuttl/e2e/streaming-standby/03--standby-cluster.yaml b/testing/kuttl/e2e/streaming-standby/03--standby-cluster.yaml index b8a6a90fe2..ebe382041a 100644 --- a/testing/kuttl/e2e/streaming-standby/03--standby-cluster.yaml +++ b/testing/kuttl/e2e/streaming-standby/03--standby-cluster.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: standby-cluster diff --git a/testing/kuttl/e2e/streaming-standby/03-assert.yaml b/testing/kuttl/e2e/streaming-standby/03-assert.yaml index 571a3f5921..9c3a95c1d3 100644 --- a/testing/kuttl/e2e/streaming-standby/03-assert.yaml +++ b/testing/kuttl/e2e/streaming-standby/03-assert.yaml @@ -1,5 +1,5 @@ --- -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: standby-cluster diff --git a/testing/kuttl/e2e/switchover/01--cluster.yaml b/testing/kuttl/e2e/switchover/01--cluster.yaml index a4eb33c756..4c91dd85ec 100644 --- a/testing/kuttl/e2e/switchover/01--cluster.yaml +++ b/testing/kuttl/e2e/switchover/01--cluster.yaml @@ -1,6 +1,6 @@ --- # Create a cluster with multiple instances and manual switchover enabled. -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: switchover diff --git a/testing/kuttl/e2e/switchover/01-assert.yaml b/testing/kuttl/e2e/switchover/01-assert.yaml index a724fb451e..b6b35e8126 100644 --- a/testing/kuttl/e2e/switchover/01-assert.yaml +++ b/testing/kuttl/e2e/switchover/01-assert.yaml @@ -1,5 +1,5 @@ --- -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: switchover diff --git a/testing/kuttl/e2e/switchover/03-assert.yaml b/testing/kuttl/e2e/switchover/03-assert.yaml index 2bbc350932..cad813362f 100644 --- a/testing/kuttl/e2e/switchover/03-assert.yaml +++ b/testing/kuttl/e2e/switchover/03-assert.yaml @@ -24,7 +24,7 @@ metadata: --- # All instances should be healthy. -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: switchover diff --git a/testing/kuttl/e2e/tablespace-enabled/00--cluster.yaml b/testing/kuttl/e2e/tablespace-enabled/00--cluster.yaml index 0304ead6e0..ea69a7264f 100644 --- a/testing/kuttl/e2e/tablespace-enabled/00--cluster.yaml +++ b/testing/kuttl/e2e/tablespace-enabled/00--cluster.yaml @@ -7,7 +7,7 @@ data: CREATE TABLESPACE trial OWNER "tablespace-enabled" LOCATION '/tablespaces/library/data'; CREATE TABLESPACE castle OWNER "tablespace-enabled" LOCATION '/tablespaces/user/data'; --- -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: tablespace-enabled diff --git a/testing/kuttl/e2e/tablespace-enabled/00-assert.yaml b/testing/kuttl/e2e/tablespace-enabled/00-assert.yaml index 8bd0f2f7c5..ad436fc892 100644 --- a/testing/kuttl/e2e/tablespace-enabled/00-assert.yaml +++ b/testing/kuttl/e2e/tablespace-enabled/00-assert.yaml @@ -1,4 +1,4 @@ -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: tablespace-enabled diff --git a/testing/kuttl/e2e/wal-pvc-pgupgrade/00--create-resources.yaml b/testing/kuttl/e2e/wal-pvc-pgupgrade/00--create-resources.yaml index 55062f2f74..4ec3e7c22b 100644 --- a/testing/kuttl/e2e/wal-pvc-pgupgrade/00--create-resources.yaml +++ b/testing/kuttl/e2e/wal-pvc-pgupgrade/00--create-resources.yaml @@ -1,6 +1,6 @@ --- # Create the cluster we will do an actual upgrade on -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: wal-pvc-pgupgrade diff --git a/testing/kuttl/e2e/wal-pvc-pgupgrade/00-assert.yaml b/testing/kuttl/e2e/wal-pvc-pgupgrade/00-assert.yaml index 0d6b3a58e3..b3267d072b 100644 --- a/testing/kuttl/e2e/wal-pvc-pgupgrade/00-assert.yaml +++ b/testing/kuttl/e2e/wal-pvc-pgupgrade/00-assert.yaml @@ -1,7 +1,7 @@ --- # Wait for the instances to be ready and the replica backup to complete # by waiting for the status to signal pods ready and pgbackrest stanza created -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: wal-pvc-pgupgrade diff --git a/testing/kuttl/e2e/wal-pvc-pgupgrade/02--shutdown-cluster.yaml b/testing/kuttl/e2e/wal-pvc-pgupgrade/02--shutdown-cluster.yaml index 8bd06eb22c..6d44b8b23b 100644 --- a/testing/kuttl/e2e/wal-pvc-pgupgrade/02--shutdown-cluster.yaml +++ b/testing/kuttl/e2e/wal-pvc-pgupgrade/02--shutdown-cluster.yaml @@ -1,6 +1,6 @@ --- # Shutdown the cluster -- but without the annotation. -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: wal-pvc-pgupgrade diff --git a/testing/kuttl/e2e/wal-pvc-pgupgrade/03--annotate-cluster.yaml b/testing/kuttl/e2e/wal-pvc-pgupgrade/03--annotate-cluster.yaml index 3efce5dbdb..fd9739c9e1 100644 --- a/testing/kuttl/e2e/wal-pvc-pgupgrade/03--annotate-cluster.yaml +++ b/testing/kuttl/e2e/wal-pvc-pgupgrade/03--annotate-cluster.yaml @@ -1,6 +1,6 @@ --- # Annotate the cluster for an upgrade. -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: wal-pvc-pgupgrade diff --git a/testing/kuttl/e2e/wal-pvc-pgupgrade/03-assert.yaml b/testing/kuttl/e2e/wal-pvc-pgupgrade/03-assert.yaml index be8ae1966c..0e5d8e7c20 100644 --- a/testing/kuttl/e2e/wal-pvc-pgupgrade/03-assert.yaml +++ b/testing/kuttl/e2e/wal-pvc-pgupgrade/03-assert.yaml @@ -14,7 +14,7 @@ status: - type: "Succeeded" status: "True" --- -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: wal-pvc-pgupgrade diff --git a/testing/kuttl/e2e/wal-pvc-pgupgrade/04--restart-cluster.yaml b/testing/kuttl/e2e/wal-pvc-pgupgrade/04--restart-cluster.yaml index 22bbb7e09a..95b122eed3 100644 --- a/testing/kuttl/e2e/wal-pvc-pgupgrade/04--restart-cluster.yaml +++ b/testing/kuttl/e2e/wal-pvc-pgupgrade/04--restart-cluster.yaml @@ -1,7 +1,7 @@ --- # Once the pgupgrade is finished, update the version and set shutdown to false # in the postgres cluster -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: wal-pvc-pgupgrade diff --git a/testing/kuttl/e2e/wal-pvc-pgupgrade/04-assert.yaml b/testing/kuttl/e2e/wal-pvc-pgupgrade/04-assert.yaml index 0296fd03c6..089d448cbd 100644 --- a/testing/kuttl/e2e/wal-pvc-pgupgrade/04-assert.yaml +++ b/testing/kuttl/e2e/wal-pvc-pgupgrade/04-assert.yaml @@ -1,6 +1,6 @@ --- # Wait for the instances to be ready with the target Postgres version. -apiVersion: postgres-operator.crunchydata.com/${KUTTL_PGCLUSTER_API_VERSION} +apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: name: wal-pvc-pgupgrade