Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 16 additions & 0 deletions .github/workflows/publish_ghcr_image.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ name: Publish multiarch postgres-operator images on ghcr.io
env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}
IMAGE_NAME_POOLER: ${{ github.repository }}-pooler
IMAGE_NAME_UI: ${{ github.repository }}-ui

on:
Expand Down Expand Up @@ -34,6 +35,12 @@ jobs:
OPERATOR_IMAGE="${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${GITHUB_REF/refs\/tags\//}"
echo "OPERATOR_IMAGE=$OPERATOR_IMAGE" >> $GITHUB_OUTPUT

- name: Define pooler image name
id: image_pooler
run: |
POOLER_IMAGE="${{ env.REGISTRY }}/${{ env.IMAGE_NAME_POOLER }}:${GITHUB_REF/refs\/tags\//}"
echo "POOLER_IMAGE=$POOLER_IMAGE" >> $GITHUB_OUTPUT

- name: Define UI image name
id: image_ui
run: |
Expand Down Expand Up @@ -69,6 +76,15 @@ jobs:
tags: "${{ steps.image.outputs.OPERATOR_IMAGE }}"
platforms: linux/amd64,linux/arm64

- name: Build and push multiarch pooler image to ghcr
uses: docker/build-push-action@v3
with:
context: pooler
push: true
build-args: BASE_IMAGE=alpine:3.22
tags: "${{ steps.image_pooler.outputs.POOLER_IMAGE }}"
platforms: linux/amd64,linux/arm64

- name: Build and push multiarch ui image to ghcr
uses: docker/build-push-action@v3
with:
Expand Down
27 changes: 27 additions & 0 deletions delivery.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,33 @@ pipeline:
-f docker/Dockerfile \
--push .

- id: build-pooler
env:
<<: *BUILD_ENV
type: script
vm_config:
type: linux

commands:
- desc: Build image
cmd: |
cd pooler
if [ -z ${CDP_SOURCE_BRANCH} ]; then
IMAGE=${MULTI_ARCH_REGISTRY}/postgres-operator-pooler
else
IMAGE=${MULTI_ARCH_REGISTRY}/postgres-operator-pooler-test
fi

docker buildx create --config /etc/cdp-buildkitd.toml --driver-opt network=host --bootstrap --use
docker buildx build --platform "linux/amd64,linux/arm64" \
--build-arg BASE_IMAGE="${ALPINE_BASE_IMAGE}" \
-t "${IMAGE}:${CDP_BUILD_VERSION}" \
--push .

if [ -z ${CDP_SOURCE_BRANCH} ]; then
cdp-promote-image ${IMAGE}:${CDP_BUILD_VERSION}
fi

- id: build-operator-ui
env:
<<: *BUILD_ENV
Expand Down
10 changes: 5 additions & 5 deletions e2e/tests/test_e2e.py
Original file line number Diff line number Diff line change
Expand Up @@ -698,7 +698,7 @@ def test_enable_disable_connection_pooler(self):
self.eventuallyEqual(lambda: k8s.count_running_pods(master_pooler_label), 2, "No pooler pods found")
self.eventuallyEqual(lambda: k8s.count_running_pods(replica_pooler_label), 2, "No pooler replica pods found")
self.eventuallyEqual(lambda: k8s.count_services_with_label(pooler_label), 2, "No pooler service found")
self.eventuallyEqual(lambda: k8s.count_secrets_with_label(pooler_label), 1, "Pooler secret not created")
self.eventuallyEqual(lambda: k8s.count_secrets_with_label(pooler_label), 3, "Not all pooler secrets found")

# TLS still enabled so check existing env variables and volume mounts
self.eventuallyEqual(lambda: k8s.count_pods_with_env_variable("CONNECTION_POOLER_CLIENT_TLS_CRT", pooler_label), 4, "TLS env variable CONNECTION_POOLER_CLIENT_TLS_CRT missing in pooler pods")
Expand Down Expand Up @@ -758,7 +758,7 @@ def test_enable_disable_connection_pooler(self):
self.eventuallyEqual(lambda: k8s.count_services_with_label(pooler_label),
1, "No pooler service found")
self.eventuallyEqual(lambda: k8s.count_secrets_with_label(pooler_label),
1, "Secret not created")
2, "Not all pooler secrets created")

# Turn off only replica connection pooler
k8s.api.custom_objects_api.patch_namespaced_custom_object(
Expand Down Expand Up @@ -786,7 +786,7 @@ def test_enable_disable_connection_pooler(self):
'ClusterIP',
"Expected LoadBalancer service type for master, found {}")
self.eventuallyEqual(lambda: k8s.count_secrets_with_label(pooler_label),
1, "Secret not created")
2, "Not all pooler secrets created")

# scale up connection pooler deployment
k8s.api.custom_objects_api.patch_namespaced_custom_object(
Expand Down Expand Up @@ -821,8 +821,8 @@ def test_enable_disable_connection_pooler(self):
0, "Pooler pods not scaled down")
self.eventuallyEqual(lambda: k8s.count_services_with_label(pooler_label),
0, "Pooler service not removed")
self.eventuallyEqual(lambda: k8s.count_secrets_with_label('application=spilo,cluster-name=acid-minimal-cluster'),
4, "Secrets not deleted")
self.eventuallyEqual(lambda: k8s.count_secrets_with_label(pooler_label),
0, "Not all pooler secrets deleted")

# Verify that all the databases have pooler schema installed.
# Do this via psql, since otherwise we need to deal with
Expand Down
6 changes: 0 additions & 6 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -71,8 +71,6 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/lib/pq v1.11.2 h1:x6gxUeu39V0BHZiugWe8LXZYZ+Utk7hSJGThs8sdzfs=
github.com/lib/pq v1.11.2/go.mod h1:/p+8NSbOcwzAEI7wiMXFlgydTwcgTr3OSKMsD2BitpA=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
Expand Down Expand Up @@ -113,8 +111,6 @@ github.com/r3labs/diff v1.1.0/go.mod h1:7WjXasNzi0vJetRcB/RqNl5dlIsmXcTTLmF5IoH6
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w=
github.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g=
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
Expand All @@ -126,7 +122,6 @@ github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSS
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
Expand Down Expand Up @@ -170,7 +165,6 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
Expand Down
119 changes: 116 additions & 3 deletions pkg/cluster/connection_pooler.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ var poolerRunAsGroup = int64(101)

// ConnectionPoolerObjects K8s objects that are belong to connection pooler
type ConnectionPoolerObjects struct {
AuthSecret *v1.Secret
Deployment *appsv1.Deployment
Service *v1.Service
Name string
Expand Down Expand Up @@ -167,6 +168,38 @@ func (c *Cluster) createConnectionPooler(LookupFunction InstallFunction) (SyncRe
return reason, nil
}

func (c *Cluster) generateUserlist() string {
var sb strings.Builder

poolerAdminUser := c.systemUsers[constants.ConnectionPoolerUserKeyName]
fmt.Fprintf(&sb, "\"%s\" \"%s\"\n", poolerAdminUser.Name, poolerAdminUser.Password)

for roleName, infraRole := range c.InfrastructureRoles {
if infraRole.Password != "" {
fmt.Fprintf(&sb, "\"%s\" \"%s\"\n", roleName, infraRole.Password)
}
}

return sb.String()
}

func (c *Cluster) generateConnectionPoolerAuthSecret(connectionPooler *ConnectionPoolerObjects) *v1.Secret {
return &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Labels: c.connectionPoolerLabels(connectionPooler.Role, true).MatchLabels,
Name: fmt.Sprintf("%s-userlist", connectionPooler.Name),
Namespace: connectionPooler.Namespace,
Annotations: c.annotationsSet(nil),
OwnerReferences: c.ownerReferences(),
},
Type: v1.SecretTypeOpaque,
// Secret data must be bytes. Kubernetes handles the encoding.
StringData: map[string]string{
"userlist.txt": c.generateUserlist(),
},
}
}

// Generate pool size related environment variables.
//
// MAX_DB_CONN would specify the global maximum for connections to a target
Expand Down Expand Up @@ -320,6 +353,18 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) (
}
envVars = append(envVars, c.getConnectionPoolerEnvVars()...)

infraRolesList := make([]string, 0)
for infraRoleName := range c.InfrastructureRoles {
infraRolesList = append(infraRolesList, infraRoleName)
}

if len(infraRolesList) > 0 {
envVars = append(envVars, v1.EnvVar{
Name: "INFRASTRUCTURE_ROLES",
Value: strings.Join(infraRolesList, ","),
})
}

poolerContainer := v1.Container{
Name: connectionPoolerContainer,
Image: effectiveDockerImage,
Expand All @@ -343,12 +388,29 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) (
},
}

var poolerVolumes []v1.Volume
var volumeMounts []v1.VolumeMount

// mount secret volume with userlist.txt for pgBouncer to authenticate users
poolerVolumes = append(poolerVolumes, v1.Volume{
Name: fmt.Sprintf("%s-userlist-volume", c.connectionPoolerName(role)),
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: fmt.Sprintf("%s-userlist", c.connectionPoolerName(role)),
},
},
})
volumeMounts = append(volumeMounts, v1.VolumeMount{
Name: fmt.Sprintf("%s-userlist-volume", c.connectionPoolerName(role)),
MountPath: "/etc/pgbouncer/userlist.txt",
SubPath: "userlist.txt",
ReadOnly: true,
})

// If the cluster has custom TLS certificates configured, we do the following:
// 1. Add environment variables to tell pgBouncer where to find the TLS certificates
// 2. Reference the secret in a volume
// 3. Mount the volume to the container at /tls
var poolerVolumes []v1.Volume
var volumeMounts []v1.VolumeMount
if spec.TLS != nil && spec.TLS.SecretName != "" {
getPoolerTLSEnv := func(k string) string {
keyName := ""
Expand Down Expand Up @@ -639,12 +701,31 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) {
c.logger.Infof("connection pooler service %s has been deleted for role %s", service.Name, role)
}

// Repeat the same for the auth secret
authSecret := c.ConnectionPooler[role].AuthSecret
if authSecret == nil {
c.logger.Debug("no connection pooler auth secret to delete")
} else {
err := c.KubeClient.
Secrets(c.Namespace).
Delete(context.TODO(), authSecret.Name, metav1.DeleteOptions{})

if k8sutil.ResourceNotFound(err) {
c.logger.Debugf("connection pooler auth secret %s for role %s has already been deleted", authSecret.Name, role)
} else if err != nil {
return fmt.Errorf("could not delete connection pooler auth secret: %v", err)
}

c.logger.Infof("connection pooler auth secret %s has been deleted for role %s", authSecret.Name, role)
}

c.ConnectionPooler[role].AuthSecret = nil
c.ConnectionPooler[role].Deployment = nil
c.ConnectionPooler[role].Service = nil
return nil
}

// delete connection pooler
// delete connection pooler secret
func (c *Cluster) deleteConnectionPoolerSecret() (err error) {
// Repeat the same for the secret object
secretName := c.credentialSecretName(c.OpConfig.ConnectionPooler.User)
Expand All @@ -660,6 +741,7 @@ func (c *Cluster) deleteConnectionPoolerSecret() (err error) {
return fmt.Errorf("could not delete pooler secret: %v", err)
}
}

return nil
}

Expand Down Expand Up @@ -975,11 +1057,42 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql
pods []v1.Pod
service *v1.Service
newService *v1.Service
authSecret *v1.Secret
newAuthSecret *v1.Secret
err error
)

updatedPodAnnotations := map[string]*string{}
syncReason := make([]string, 0)

// create extra secret for connection pooler authentication
newAuthSecret = c.generateConnectionPoolerAuthSecret(c.ConnectionPooler[role])
if authSecret, err = c.KubeClient.Secrets(c.Namespace).Get(context.TODO(), fmt.Sprintf("%s-userlist", c.connectionPoolerName(role)), metav1.GetOptions{}); err == nil {
c.ConnectionPooler[role].AuthSecret = authSecret
// make sure existing annotations are preserved
newAuthSecret.Annotations = c.annotationsSet(authSecret.Annotations)
authSecret, err = c.KubeClient.Secrets(authSecret.Namespace).Update(context.TODO(), newAuthSecret, metav1.UpdateOptions{})
if err != nil {
return NoSync, fmt.Errorf("could not update connection pooler auth secret: %v", err)
}
c.ConnectionPooler[role].AuthSecret = authSecret
} else if !k8sutil.ResourceNotFound(err) {
return NoSync, fmt.Errorf("could not get auth secret for connection pooler to sync: %v", err)
}

if k8sutil.ResourceNotFound(err) {
c.logger.Warningf("auth secret %s for connection pooler is not found, create it", fmt.Sprintf("%s-userlist", c.connectionPoolerName(role)))
authSecret, err = c.KubeClient.
Secrets(newAuthSecret.Namespace).
Create(context.TODO(), newAuthSecret, metav1.CreateOptions{})

if err != nil {
return NoSync, err
}
c.ConnectionPooler[role].AuthSecret = authSecret
}

// next the pooler deployment
deployment, err = c.KubeClient.
Deployments(c.Namespace).
Get(context.TODO(), c.connectionPoolerName(role), metav1.GetOptions{})
Expand Down
5 changes: 5 additions & 0 deletions pkg/cluster/connection_pooler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ func newFakeK8sPoolerTestClient() (k8sutil.KubernetesClient, *fake.Clientset) {
StatefulSetsGetter: clientSet.AppsV1(),
DeploymentsGetter: clientSet.AppsV1(),
ServicesGetter: clientSet.CoreV1(),
SecretsGetter: clientSet.CoreV1(),
}, clientSet
}

Expand Down Expand Up @@ -803,6 +804,7 @@ func TestConnectionPoolerDeploymentSpec(t *testing.T) {
}
cluster.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{
Master: {
AuthSecret: nil,
Deployment: nil,
Service: nil,
LookupFunction: true,
Expand Down Expand Up @@ -1019,6 +1021,7 @@ func TestPoolerTLS(t *testing.T) {
// create pooler resources
cluster.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{}
cluster.ConnectionPooler[Master] = &ConnectionPoolerObjects{
AuthSecret: nil,
Deployment: nil,
Service: nil,
Name: cluster.connectionPoolerName(Master),
Expand Down Expand Up @@ -1089,12 +1092,14 @@ func TestConnectionPoolerServiceSpec(t *testing.T) {
}
cluster.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{
Master: {
AuthSecret: nil,
Deployment: nil,
Service: nil,
LookupFunction: false,
Role: Master,
},
Replica: {
AuthSecret: nil,
Deployment: nil,
Service: nil,
LookupFunction: false,
Expand Down
1 change: 1 addition & 0 deletions pkg/cluster/k8sres_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2967,6 +2967,7 @@ func newLBFakeClient() (k8sutil.KubernetesClient, *fake.Clientset) {
DeploymentsGetter: clientSet.AppsV1(),
PodsGetter: clientSet.CoreV1(),
ServicesGetter: clientSet.CoreV1(),
SecretsGetter: clientSet.CoreV1(),
}, clientSet
}

Expand Down
Loading
Loading