-
Notifications
You must be signed in to change notification settings - Fork 56
Expand file tree
/
Copy pathresource.go
More file actions
631 lines (556 loc) · 23.1 KB
/
resource.go
File metadata and controls
631 lines (556 loc) · 23.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
package schedule
import (
"context"
"fmt"
"net/http"
"strconv"
"strings"
"github.com/hashicorp/terraform-plugin-framework-validators/int32validator"
"github.com/hashicorp/terraform-plugin-framework-validators/listvalidator"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/int32planmodifier"
serverbackupUtils "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/serverbackup/utils"
"github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator"
"github.com/hashicorp/terraform-plugin-framework/path"
"github.com/hashicorp/terraform-plugin-framework/resource"
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
"github.com/hashicorp/terraform-plugin-framework/schema/validator"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
"github.com/stackitcloud/terraform-provider-stackit/stackit/internal/conversion"
"github.com/stackitcloud/terraform-provider-stackit/stackit/internal/core"
"github.com/stackitcloud/terraform-provider-stackit/stackit/internal/utils"
"github.com/stackitcloud/terraform-provider-stackit/stackit/internal/validate"
"github.com/stackitcloud/stackit-sdk-go/core/oapierror"
serverbackup "github.com/stackitcloud/stackit-sdk-go/services/serverbackup/v2api"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ resource.Resource = &scheduleResource{}
_ resource.ResourceWithConfigure = &scheduleResource{}
_ resource.ResourceWithImportState = &scheduleResource{}
_ resource.ResourceWithModifyPlan = &scheduleResource{}
)
type Model struct {
ID types.String `tfsdk:"id"`
ProjectId types.String `tfsdk:"project_id"`
ServerId types.String `tfsdk:"server_id"`
BackupScheduleId types.Int32 `tfsdk:"backup_schedule_id"`
Name types.String `tfsdk:"name"`
Rrule types.String `tfsdk:"rrule"`
Enabled types.Bool `tfsdk:"enabled"`
BackupProperties *scheduleBackupPropertiesModel `tfsdk:"backup_properties"`
Region types.String `tfsdk:"region"`
}
// scheduleBackupPropertiesModel maps schedule backup_properties data
type scheduleBackupPropertiesModel struct {
BackupName types.String `tfsdk:"name"`
RetentionPeriod types.Int32 `tfsdk:"retention_period"`
VolumeIds types.List `tfsdk:"volume_ids"`
}
// NewScheduleResource is a helper function to simplify the provider implementation.
func NewScheduleResource() resource.Resource {
return &scheduleResource{}
}
// scheduleResource is the resource implementation.
type scheduleResource struct {
client *serverbackup.APIClient
providerData core.ProviderData
}
// ModifyPlan implements resource.ResourceWithModifyPlan.
// Use the modifier to set the effective region in the current plan.
func (r *scheduleResource) ModifyPlan(ctx context.Context, req resource.ModifyPlanRequest, resp *resource.ModifyPlanResponse) { // nolint:gocritic // function signature required by Terraform
var configModel Model
// skip initial empty configuration to avoid follow-up errors
if req.Config.Raw.IsNull() {
return
}
resp.Diagnostics.Append(req.Config.Get(ctx, &configModel)...)
if resp.Diagnostics.HasError() {
return
}
var planModel Model
resp.Diagnostics.Append(req.Plan.Get(ctx, &planModel)...)
if resp.Diagnostics.HasError() {
return
}
utils.AdaptRegion(ctx, configModel.Region, &planModel.Region, r.providerData.GetRegion(), resp)
if resp.Diagnostics.HasError() {
return
}
resp.Diagnostics.Append(resp.Plan.Set(ctx, planModel)...)
if resp.Diagnostics.HasError() {
return
}
}
// Metadata returns the resource type name.
func (r *scheduleResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_server_backup_schedule"
}
// Configure adds the provider configured client to the resource.
func (r *scheduleResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) {
var ok bool
r.providerData, ok = conversion.ParseProviderData(ctx, req.ProviderData, &resp.Diagnostics)
if !ok {
return
}
apiClient := serverbackupUtils.ConfigureClient(ctx, &r.providerData, &resp.Diagnostics)
if resp.Diagnostics.HasError() {
return
}
r.client = apiClient
tflog.Info(ctx, "Server backup client configured.")
}
// Schema defines the schema for the resource.
func (r *scheduleResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) {
resp.Schema = schema.Schema{
Description: "Server backup schedule resource schema. Must have a `region` specified in the provider configuration.",
Attributes: map[string]schema.Attribute{
"id": schema.StringAttribute{
Description: "Terraform's internal resource identifier. It is structured as \"`project_id`,`region`,`server_id`,`backup_schedule_id`\".",
Computed: true,
PlanModifiers: []planmodifier.String{
stringplanmodifier.UseStateForUnknown(),
},
},
"name": schema.StringAttribute{
Description: "The schedule name.",
Required: true,
PlanModifiers: []planmodifier.String{
stringplanmodifier.RequiresReplace(),
stringplanmodifier.UseStateForUnknown(),
},
Validators: []validator.String{
stringvalidator.LengthBetween(1, 255),
},
},
"backup_schedule_id": schema.Int32Attribute{
Description: "Backup schedule ID.",
Computed: true,
PlanModifiers: []planmodifier.Int32{
int32planmodifier.UseStateForUnknown(),
},
Validators: []validator.Int32{
int32validator.AtLeast(1),
},
},
"project_id": schema.StringAttribute{
Description: "STACKIT Project ID to which the server is associated.",
Required: true,
PlanModifiers: []planmodifier.String{
stringplanmodifier.RequiresReplace(),
stringplanmodifier.UseStateForUnknown(),
},
Validators: []validator.String{
validate.UUID(),
validate.NoSeparator(),
},
},
"server_id": schema.StringAttribute{
Description: "Server ID for the backup schedule.",
Required: true,
PlanModifiers: []planmodifier.String{
stringplanmodifier.RequiresReplace(),
stringplanmodifier.UseStateForUnknown(),
},
Validators: []validator.String{
validate.UUID(),
validate.NoSeparator(),
},
},
"rrule": schema.StringAttribute{
Description: "An `rrule` (Recurrence Rule) is a standardized string format used in iCalendar (RFC 5545) to define repeating events, and you can generate one by using a dedicated library or by using online generator tools to specify parameters like frequency, interval, and end dates.",
Required: true,
PlanModifiers: []planmodifier.String{
stringplanmodifier.RequiresReplace(),
stringplanmodifier.UseStateForUnknown(),
},
Validators: []validator.String{
validate.Rrule(),
validate.NoSeparator(),
},
},
"enabled": schema.BoolAttribute{
Description: "Is the backup schedule enabled or disabled.",
Required: true,
},
"backup_properties": schema.SingleNestedAttribute{
Description: "Backup schedule details for the backups.",
Required: true,
Attributes: map[string]schema.Attribute{
"volume_ids": schema.ListAttribute{
ElementType: types.StringType,
Optional: true,
Validators: []validator.List{
listvalidator.SizeAtLeast(1),
},
},
"name": schema.StringAttribute{
Required: true,
},
"retention_period": schema.Int32Attribute{
Required: true,
Validators: []validator.Int32{
int32validator.AtLeast(1),
},
},
},
},
"region": schema.StringAttribute{
Optional: true,
// must be computed to allow for storing the override value from the provider
Computed: true,
Description: "The resource region. If not defined, the provider region is used.",
PlanModifiers: []planmodifier.String{
stringplanmodifier.RequiresReplace(),
},
},
},
}
}
// Create creates the resource and sets the initial Terraform state.
func (r *scheduleResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { // nolint:gocritic // function signature required by Terraform
var model Model
diags := req.Plan.Get(ctx, &model)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
ctx = core.InitProviderContext(ctx)
projectId := model.ProjectId.ValueString()
serverId := model.ServerId.ValueString()
region := r.providerData.GetRegionWithOverride(model.Region)
ctx = tflog.SetField(ctx, "project_id", projectId)
ctx = tflog.SetField(ctx, "server_id", serverId)
ctx = tflog.SetField(ctx, "region", region)
// Deprecation warning for enableBackupService.
resp.Diagnostics.AddWarning("Deprecation warning",
"This resource is using a built in function to enable the backup service which will be removed on 26.09.2026. "+
"Use the new `server_backup_enable` resource instead to prevent unexpected behavior.")
// Enable backups if not already enabled
// Deprecated: This function will be removed on 26.09.2026. Use `server_backup_enable` resource instead.
err := r.enableBackupsService(ctx, &model)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating server backup schedule", fmt.Sprintf("Enabling server backup project before creation: %v", err))
return
}
// Create new schedule
payload, err := toCreatePayload(&model)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating server backup schedule", fmt.Sprintf("Creating API payload: %v", err))
return
}
scheduleResp, err := r.client.DefaultAPI.CreateBackupSchedule(ctx, projectId, serverId, region).CreateBackupSchedulePayload(*payload).Execute()
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating server backup schedule", fmt.Sprintf("Calling API: %v", err))
return
}
ctx = core.LogResponse(ctx)
ctx = tflog.SetField(ctx, "backup_schedule_id", scheduleResp.Id)
// Map response body to schema
err = mapFields(ctx, scheduleResp, &model, region)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating server backup schedule", fmt.Sprintf("Processing API payload: %v", err))
return
}
diags = resp.State.Set(ctx, model)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
tflog.Info(ctx, "Server backup schedule created.")
}
// Read refreshes the Terraform state with the latest data.
func (r *scheduleResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { // nolint:gocritic // function signature required by Terraform
var model Model
diags := req.State.Get(ctx, &model)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
ctx = core.InitProviderContext(ctx)
projectId := model.ProjectId.ValueString()
serverId := model.ServerId.ValueString()
backupScheduleId := model.BackupScheduleId.ValueInt32()
region := r.providerData.GetRegionWithOverride(model.Region)
ctx = tflog.SetField(ctx, "project_id", projectId)
ctx = tflog.SetField(ctx, "server_id", serverId)
ctx = tflog.SetField(ctx, "backup_schedule_id", backupScheduleId)
ctx = tflog.SetField(ctx, "region", region)
scheduleResp, err := r.client.DefaultAPI.GetBackupSchedule(ctx, projectId, serverId, region, strconv.FormatInt(int64(backupScheduleId), 10)).Execute()
if err != nil {
oapiErr, ok := err.(*oapierror.GenericOpenAPIError) //nolint:errorlint //complaining that error.As should be used to catch wrapped errors, but this error should not be wrapped
if ok && oapiErr.StatusCode == http.StatusNotFound {
resp.State.RemoveResource(ctx)
return
}
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading backup schedule", fmt.Sprintf("Calling API: %v", err))
return
}
ctx = core.LogResponse(ctx)
// Map response body to schema
err = mapFields(ctx, scheduleResp, &model, region)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading backup schedule", fmt.Sprintf("Processing API payload: %v", err))
return
}
// Set refreshed state
diags = resp.State.Set(ctx, model)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
tflog.Info(ctx, "Server backup schedule read.")
}
// Update updates the resource and sets the updated Terraform state on success.
func (r *scheduleResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { // nolint:gocritic // function signature required by Terraform
var model Model
diags := req.Plan.Get(ctx, &model)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
ctx = core.InitProviderContext(ctx)
projectId := model.ProjectId.ValueString()
serverId := model.ServerId.ValueString()
backupScheduleId := model.BackupScheduleId.ValueInt32()
region := r.providerData.GetRegionWithOverride(model.Region)
ctx = tflog.SetField(ctx, "project_id", projectId)
ctx = tflog.SetField(ctx, "server_id", serverId)
ctx = tflog.SetField(ctx, "backup_schedule_id", backupScheduleId)
ctx = tflog.SetField(ctx, "region", region)
// Update schedule
payload, err := toUpdatePayload(&model)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating server backup schedule", fmt.Sprintf("Creating API payload: %v", err))
return
}
scheduleResp, err := r.client.DefaultAPI.UpdateBackupSchedule(ctx, projectId, serverId, region, strconv.FormatInt(int64(backupScheduleId), 10)).UpdateBackupSchedulePayload(*payload).Execute()
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating server backup schedule", fmt.Sprintf("Calling API: %v", err))
return
}
ctx = core.LogResponse(ctx)
// Map response body to schema
err = mapFields(ctx, scheduleResp, &model, region)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating server backup schedule", fmt.Sprintf("Processing API payload: %v", err))
return
}
diags = resp.State.Set(ctx, model)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
tflog.Info(ctx, "Server backup schedule updated.")
}
// Delete deletes the resource and removes the Terraform state on success.
func (r *scheduleResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { // nolint:gocritic // function signature required by Terraform
var model Model
diags := req.State.Get(ctx, &model)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
ctx = core.InitProviderContext(ctx)
projectId := model.ProjectId.ValueString()
serverId := model.ServerId.ValueString()
backupScheduleId := model.BackupScheduleId.ValueInt32()
region := r.providerData.GetRegionWithOverride(model.Region)
ctx = tflog.SetField(ctx, "project_id", projectId)
ctx = tflog.SetField(ctx, "server_id", serverId)
ctx = tflog.SetField(ctx, "backup_schedule_id", backupScheduleId)
ctx = tflog.SetField(ctx, "region", region)
err := r.client.DefaultAPI.DeleteBackupSchedule(ctx, projectId, serverId, region, strconv.FormatInt(int64(backupScheduleId), 10)).Execute()
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting server backup schedule", fmt.Sprintf("Calling API: %v", err))
return
}
ctx = core.LogResponse(ctx)
tflog.Info(ctx, "Server backup schedule deleted.")
// Disable backups service in case there are no backups and no backup schedules.
err = r.disableBackupsService(ctx, &model)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting server backup schedule", fmt.Sprintf("Disabling server backup service after deleting schedule: %v", err))
return
}
}
// ImportState imports a resource into the Terraform state on success.
// The expected format of the resource import identifier is: // project_id,server_id,schedule_id
func (r *scheduleResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) {
idParts := strings.Split(req.ID, core.Separator)
if len(idParts) != 4 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" || idParts[3] == "" {
core.LogAndAddError(ctx, &resp.Diagnostics,
"Error importing server backup schedule",
fmt.Sprintf("Expected import identifier with format [project_id],[region],[server_id],[backup_schedule_id], got %q", req.ID),
)
return
}
intId, err := strconv.ParseInt(idParts[3], 10, 64)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics,
"Error importing server backup schedule",
fmt.Sprintf("Expected backup_schedule_id to be int64, got %q", idParts[2]),
)
return
}
resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...)
resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("region"), idParts[1])...)
resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("server_id"), idParts[2])...)
resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("backup_schedule_id"), intId)...)
tflog.Info(ctx, "Server backup schedule state imported.")
}
func mapFields(ctx context.Context, schedule *serverbackup.BackupSchedule, model *Model, region string) error {
if schedule == nil {
return fmt.Errorf("response input is nil")
}
if model == nil {
return fmt.Errorf("model input is nil")
}
if schedule.Id == 0 {
return fmt.Errorf("response id is nil")
}
model.BackupScheduleId = types.Int32Value(schedule.Id)
model.ID = utils.BuildInternalTerraformId(
model.ProjectId.ValueString(), region, model.ServerId.ValueString(),
strconv.FormatInt(int64(model.BackupScheduleId.ValueInt32()), 10),
)
model.Name = types.StringValue(schedule.Name)
model.Rrule = types.StringValue(schedule.Rrule)
model.Enabled = types.BoolValue(schedule.Enabled)
if schedule.BackupProperties == nil {
model.BackupProperties = nil
return nil
}
volIds := types.ListNull(types.StringType)
if schedule.BackupProperties.VolumeIds != nil {
var modelVolIds []string
if model.BackupProperties != nil {
var err error
modelVolIds, err = utils.ListValuetoStringSlice(model.BackupProperties.VolumeIds)
if err != nil {
return err
}
}
respVolIds := schedule.BackupProperties.VolumeIds
reconciledVolIds := utils.ReconcileStringSlices(modelVolIds, respVolIds)
var diags diag.Diagnostics
volIds, diags = types.ListValueFrom(ctx, types.StringType, reconciledVolIds)
if diags.HasError() {
return fmt.Errorf("failed to map volumeIds: %w", core.DiagsToError(diags))
}
}
model.BackupProperties = &scheduleBackupPropertiesModel{
BackupName: types.StringValue(schedule.BackupProperties.Name),
RetentionPeriod: types.Int32Value(schedule.BackupProperties.RetentionPeriod),
VolumeIds: volIds,
}
model.Region = types.StringValue(region)
return nil
}
// If already enabled, just continues
// Deprecated: This function will be removed on 26.09.2026. Use `server_backup_enable` resource instead.
func (r *scheduleResource) enableBackupsService(ctx context.Context, model *Model) error {
projectId := model.ProjectId.ValueString()
serverId := model.ServerId.ValueString()
region := r.providerData.GetRegionWithOverride(model.Region)
tflog.Debug(ctx, "Enabling server backup service")
request := r.client.DefaultAPI.EnableServiceResource(ctx, projectId, serverId, region).
EnableServiceResourcePayload(serverbackup.EnableServiceResourcePayload{})
if err := request.Execute(); err != nil {
if strings.Contains(err.Error(), "Tried to activate already active service") {
tflog.Debug(ctx, "Service for server backup already enabled")
return nil
}
return fmt.Errorf("enable server backup service: %w", err)
}
tflog.Info(ctx, "Enabled server backup service")
return nil
}
// Disables only if no backup schedules are present and no backups are present
// Deprecated: This function will be removed on 26.09.2026. Use `server_backup_enable` resource instead.
func (r *scheduleResource) disableBackupsService(ctx context.Context, model *Model) error {
tflog.Debug(ctx, "Disabling server backup service (in case there are no backups and no backup schedules)")
projectId := model.ProjectId.ValueString()
serverId := model.ServerId.ValueString()
region := r.providerData.GetRegionWithOverride(model.Region)
tflog.Debug(ctx, "Checking for existing backups")
backups, err := r.client.DefaultAPI.ListBackups(ctx, projectId, serverId, region).Execute()
if err != nil {
return fmt.Errorf("list backups: %w", err)
}
if len(backups.Items) > 0 {
tflog.Debug(ctx, "Backups found - will not disable server backup service")
return nil
}
err = r.client.DefaultAPI.DisableServiceResource(ctx, projectId, serverId, region).Execute()
if err != nil {
return fmt.Errorf("disable server backup service: %w", err)
}
tflog.Info(ctx, "Disabled server backup service")
return nil
}
func toCreatePayload(model *Model) (*serverbackup.CreateBackupSchedulePayload, error) {
if model == nil {
return nil, fmt.Errorf("nil model")
}
backupProperties := serverbackup.BackupProperties{}
if model.BackupProperties != nil {
ids := []string{}
var err error
if !(model.BackupProperties.VolumeIds.IsNull() || model.BackupProperties.VolumeIds.IsUnknown()) {
ids, err = utils.ListValuetoStringSlice(model.BackupProperties.VolumeIds)
if err != nil {
return nil, fmt.Errorf("convert volume id: %w", err)
}
}
// we should provide null to the API in case no volumeIds were chosen, else it errors
if len(ids) == 0 {
ids = nil
}
backupProperties = serverbackup.BackupProperties{
Name: model.BackupProperties.BackupName.ValueString(),
RetentionPeriod: model.BackupProperties.RetentionPeriod.ValueInt32(),
VolumeIds: ids,
}
}
return &serverbackup.CreateBackupSchedulePayload{
Enabled: model.Enabled.ValueBool(),
Name: model.Name.ValueString(),
Rrule: model.Rrule.ValueString(),
BackupProperties: &backupProperties,
}, nil
}
func toUpdatePayload(model *Model) (*serverbackup.UpdateBackupSchedulePayload, error) {
if model == nil {
return nil, fmt.Errorf("nil model")
}
backupProperties := serverbackup.BackupProperties{}
if model.BackupProperties != nil {
ids := []string{}
var err error
if !(model.BackupProperties.VolumeIds.IsNull() || model.BackupProperties.VolumeIds.IsUnknown()) {
ids, err = utils.ListValuetoStringSlice(model.BackupProperties.VolumeIds)
if err != nil {
return nil, fmt.Errorf("convert volume id: %w", err)
}
}
// we should provide null to the API in case no volumeIds were chosen, else it errors
if len(ids) == 0 {
ids = nil
}
backupProperties = serverbackup.BackupProperties{
Name: model.BackupProperties.BackupName.ValueString(),
RetentionPeriod: model.BackupProperties.RetentionPeriod.ValueInt32(),
VolumeIds: ids,
}
}
return &serverbackup.UpdateBackupSchedulePayload{
Enabled: model.Enabled.ValueBool(),
Name: model.Name.ValueString(),
Rrule: model.Rrule.ValueString(),
BackupProperties: &backupProperties,
}, nil
}