Skip to content

Commit 97115d6

Browse files
authored
add annotation to ignore resources thresholds (#3030)
* add annotation to ignore resources thresholds * add test case when annotation key is set but value is not true
1 parent a585b17 commit 97115d6

12 files changed

Lines changed: 105 additions & 9 deletions

File tree

charts/postgres-operator/crds/operatorconfigurations.yaml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -96,6 +96,8 @@ spec:
9696
default: ""
9797
ignore_instance_limits_annotation_key:
9898
type: string
99+
ignore_resources_limits_annotation_key:
100+
type: string
99101
kubernetes_use_configmaps:
100102
type: boolean
101103
default: false

charts/postgres-operator/values.yaml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,9 @@ configGeneral:
4343
# key name for annotation to ignore globally configured instance limits
4444
# ignore_instance_limits_annotation_key: ""
4545

46+
# key name for annotation to ignore globally configured resources thresholds
47+
# ignore_resources_limits_annotation_key: ""
48+
4649
# Select if setup uses endpoints (default), or configmaps to manage leader (DCS=k8s)
4750
# kubernetes_use_configmaps: false
4851

docs/reference/operator_parameters.md

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -163,7 +163,15 @@ Those are top-level keys, containing both leaf keys and groups.
163163
for some clusters it might be required to scale beyond the limits that can be
164164
configured with `min_instances` and `max_instances` options. You can define
165165
an annotation key that can be used as a toggle in cluster manifests to ignore
166-
globally configured instance limits. The default is empty.
166+
globally configured instance limits. The value must be `"true"` to be
167+
effective. The default is empty which means the feature is disabled.
168+
169+
* **ignore_resources_limits_annotation_key**
170+
for some clusters it might be required to request resources beyond the globally
171+
configured thresholds for maximum requests and minimum limits. You can define
172+
an annotation key that can be used as a toggle in cluster manifests to ignore
173+
the thresholds. The value must be `"true"` to be effective. The default is empty
174+
which means the feature is disabled.
167175

168176
* **resync_period**
169177
period between consecutive sync requests. The default is `30m`.

manifests/configmap.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,7 @@ data:
7575
# infrastructure_roles_secret_name: "postgresql-infrastructure-roles"
7676
# infrastructure_roles_secrets: "secretname:monitoring-roles,userkey:user,passwordkey:password,rolekey:inrole"
7777
# ignore_instance_limits_annotation_key: ""
78+
# ignore_resources_limits_annotation_key: ""
7879
# inherited_annotations: owned-by
7980
# inherited_labels: application,environment
8081
# kube_iam_role: ""

manifests/operatorconfiguration.crd.yaml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,8 @@ spec:
9494
default: ""
9595
ignore_instance_limits_annotation_key:
9696
type: string
97+
ignore_resources_limits_annotation_key:
98+
type: string
9799
kubernetes_use_configmaps:
98100
type: boolean
99101
default: false

manifests/postgresql-operator-default-configuration.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ configuration:
1414
enable_team_id_clustername_prefix: false
1515
etcd_host: ""
1616
# ignore_instance_limits_annotation_key: ""
17+
# ignore_resources_limits_annotation_key: ""
1718
# kubernetes_use_configmaps: false
1819
max_instances: -1
1920
min_instances: -1

pkg/apis/acid.zalan.do/v1/crds.go

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -122,6 +122,9 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
122122
"ignore_instance_limits_annotation_key": {
123123
Type: "string",
124124
},
125+
"ignore_resources_limits_annotation_key": {
126+
Type: "string",
127+
},
125128
"kubernetes_use_configmaps": {
126129
Type: "boolean",
127130
},

pkg/apis/acid.zalan.do/v1/operator_configuration_type.go

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -288,6 +288,8 @@ type OperatorConfigurationData struct {
288288
MinInstances int32 `json:"min_instances,omitempty"`
289289
MaxInstances int32 `json:"max_instances,omitempty"`
290290
IgnoreInstanceLimitsAnnotationKey string `json:"ignore_instance_limits_annotation_key,omitempty"`
291+
292+
IgnoreResourcesLimitsAnnotationKey string `json:"ignore_resources_limits_annotation_key,omitempty"`
291293
}
292294

293295
// Duration shortens this frequently used name

pkg/cluster/k8sres.go

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -313,6 +313,14 @@ func (c *Cluster) generateResourceRequirements(
313313
specLimits := acidv1.ResourceDescription{}
314314
result := v1.ResourceRequirements{}
315315

316+
enforceThresholds := true
317+
resourcesLimitAnnotationKey := c.OpConfig.IgnoreResourcesLimitsAnnotationKey
318+
if resourcesLimitAnnotationKey != "" {
319+
if value, exists := c.ObjectMeta.Annotations[resourcesLimitAnnotationKey]; exists && value == "true" {
320+
enforceThresholds = false
321+
}
322+
}
323+
316324
if resources != nil {
317325
specRequests = resources.ResourceRequests
318326
specLimits = resources.ResourceLimits
@@ -329,7 +337,7 @@ func (c *Cluster) generateResourceRequirements(
329337
}
330338

331339
// enforce minimum cpu and memory limits for Postgres containers only
332-
if containerName == constants.PostgresContainerName {
340+
if containerName == constants.PostgresContainerName && enforceThresholds {
333341
if err = c.enforceMinResourceLimits(&result); err != nil {
334342
return nil, fmt.Errorf("could not enforce minimum resource limits: %v", err)
335343
}
@@ -344,7 +352,7 @@ func (c *Cluster) generateResourceRequirements(
344352
}
345353

346354
// enforce maximum cpu and memory requests for Postgres containers only
347-
if containerName == constants.PostgresContainerName {
355+
if containerName == constants.PostgresContainerName && enforceThresholds {
348356
if err = c.enforceMaxResourceRequests(&result); err != nil {
349357
return nil, fmt.Errorf("could not enforce maximum resource requests: %v", err)
350358
}

pkg/cluster/k8sres_test.go

Lines changed: 69 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3130,6 +3130,9 @@ func TestGenerateResourceRequirements(t *testing.T) {
31303130
PodRoleLabel: "spilo-role",
31313131
}
31323132

3133+
configWithEnabledIgnoreResourcesLimits := configResources
3134+
configWithEnabledIgnoreResourcesLimits.IgnoreResourcesLimitsAnnotationKey = "zalando.org/ignore-resources-limits"
3135+
31333136
tests := []struct {
31343137
subTest string
31353138
config config.Config
@@ -3465,14 +3468,15 @@ func TestGenerateResourceRequirements(t *testing.T) {
34653468
{
34663469
subTest: "test enforcing min cpu and memory limit",
34673470
config: config.Config{
3468-
Resources: configResources,
3471+
Resources: configWithEnabledIgnoreResourcesLimits,
34693472
PodManagementPolicy: "ordered_ready",
34703473
SetMemoryRequestToLimit: false,
34713474
},
34723475
pgSpec: acidv1.Postgresql{
34733476
ObjectMeta: metav1.ObjectMeta{
3474-
Name: clusterName,
3475-
Namespace: namespace,
3477+
Name: clusterName,
3478+
Namespace: namespace,
3479+
Annotations: map[string]string{"zalando.org/ignore-resources-limits": "false"},
34763480
},
34773481
Spec: acidv1.PostgresSpec{
34783482
Resources: &acidv1.Resources{
@@ -3490,6 +3494,35 @@ func TestGenerateResourceRequirements(t *testing.T) {
34903494
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("250m"), Memory: k8sutil.StringToPointer("250Mi")},
34913495
},
34923496
},
3497+
{
3498+
subTest: "ingnore min cpu and memory limit threshold",
3499+
config: config.Config{
3500+
Resources: configWithEnabledIgnoreResourcesLimits,
3501+
PodManagementPolicy: "ordered_ready",
3502+
SetMemoryRequestToLimit: false,
3503+
},
3504+
pgSpec: acidv1.Postgresql{
3505+
ObjectMeta: metav1.ObjectMeta{
3506+
Name: clusterName,
3507+
Namespace: namespace,
3508+
Annotations: map[string]string{"zalando.org/ignore-resources-limits": "true"},
3509+
},
3510+
Spec: acidv1.PostgresSpec{
3511+
Resources: &acidv1.Resources{
3512+
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("100Mi")},
3513+
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("200m"), Memory: k8sutil.StringToPointer("200Mi")},
3514+
},
3515+
TeamID: "acid",
3516+
Volume: acidv1.Volume{
3517+
Size: "1G",
3518+
},
3519+
},
3520+
},
3521+
expectedResources: acidv1.Resources{
3522+
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("100Mi")},
3523+
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("200m"), Memory: k8sutil.StringToPointer("200Mi")},
3524+
},
3525+
},
34933526
{
34943527
subTest: "test min cpu and memory limit are not enforced on sidecar",
34953528
config: config.Config{
@@ -3527,14 +3560,15 @@ func TestGenerateResourceRequirements(t *testing.T) {
35273560
{
35283561
subTest: "test enforcing max cpu and memory requests",
35293562
config: config.Config{
3530-
Resources: configResources,
3563+
Resources: configWithEnabledIgnoreResourcesLimits,
35313564
PodManagementPolicy: "ordered_ready",
35323565
SetMemoryRequestToLimit: false,
35333566
},
35343567
pgSpec: acidv1.Postgresql{
35353568
ObjectMeta: metav1.ObjectMeta{
3536-
Name: clusterName,
3537-
Namespace: namespace,
3569+
Name: clusterName,
3570+
Namespace: namespace,
3571+
Annotations: map[string]string{"zalando.org/ignore-resources-limits": "yes"},
35383572
},
35393573
Spec: acidv1.PostgresSpec{
35403574
Resources: &acidv1.Resources{
@@ -3552,6 +3586,35 @@ func TestGenerateResourceRequirements(t *testing.T) {
35523586
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("2"), Memory: k8sutil.StringToPointer("4Gi")},
35533587
},
35543588
},
3589+
{
3590+
subTest: "ignore max cpu and memory requests limit",
3591+
config: config.Config{
3592+
Resources: configWithEnabledIgnoreResourcesLimits,
3593+
PodManagementPolicy: "ordered_ready",
3594+
SetMemoryRequestToLimit: false,
3595+
},
3596+
pgSpec: acidv1.Postgresql{
3597+
ObjectMeta: metav1.ObjectMeta{
3598+
Name: clusterName,
3599+
Namespace: namespace,
3600+
Annotations: map[string]string{"zalando.org/ignore-resources-limits": "true"},
3601+
},
3602+
Spec: acidv1.PostgresSpec{
3603+
Resources: &acidv1.Resources{
3604+
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("2Gi")},
3605+
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("2"), Memory: k8sutil.StringToPointer("4Gi")},
3606+
},
3607+
TeamID: "acid",
3608+
Volume: acidv1.Volume{
3609+
Size: "1G",
3610+
},
3611+
},
3612+
},
3613+
expectedResources: acidv1.Resources{
3614+
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("2Gi")},
3615+
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("2"), Memory: k8sutil.StringToPointer("4Gi")},
3616+
},
3617+
},
35553618
{
35563619
subTest: "test SetMemoryRequestToLimit flag but raise only until max memory request",
35573620
config: config.Config{

0 commit comments

Comments
 (0)