Skip to content

Commit 513808a

Browse files
authored
chore: rename model serving to AI model serving (#751)
1 parent 862db91 commit 513808a

File tree

5 files changed

+49
-49
lines changed

5 files changed

+49
-49
lines changed

docs/index.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -162,7 +162,7 @@ Note: AWS specific checks must be skipped as they do not work on STACKIT. For de
162162
- `loadbalancer_custom_endpoint` (String) Custom endpoint for the Load Balancer service
163163
- `logme_custom_endpoint` (String) Custom endpoint for the LogMe service
164164
- `mariadb_custom_endpoint` (String) Custom endpoint for the MariaDB service
165-
- `modelserving_custom_endpoint` (String) Custom endpoint for the Model Serving service
165+
- `modelserving_custom_endpoint` (String) Custom endpoint for the AI Model Serving service
166166
- `mongodbflex_custom_endpoint` (String) Custom endpoint for the MongoDB Flex service
167167
- `objectstorage_custom_endpoint` (String) Custom endpoint for the Object Storage service
168168
- `observability_custom_endpoint` (String) Custom endpoint for the Observability service

docs/resources/modelserving_token.md

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -3,9 +3,9 @@
33
page_title: "stackit_modelserving_token Resource - stackit"
44
subcategory: ""
55
description: |-
6-
Model Serving Auth Token Resource schema.
6+
AI Model Serving Auth Token Resource schema.
77
Example Usage
8-
Automatically rotate model serving token
8+
Automatically rotate AI model serving token
99
1010
resource "time_rotating" "rotate" {
1111
rotation_days = 80
@@ -24,11 +24,11 @@ description: |-
2424

2525
# stackit_modelserving_token (Resource)
2626

27-
Model Serving Auth Token Resource schema.
27+
AI Model Serving Auth Token Resource schema.
2828

2929
## Example Usage
3030

31-
### Automatically rotate model serving token
31+
### Automatically rotate AI model serving token
3232
```terraform
3333
resource "time_rotating" "rotate" {
3434
rotation_days = 80
@@ -52,20 +52,20 @@ resource "stackit_modelserving_token" "example" {
5252

5353
### Required
5454

55-
- `name` (String) Name of the model serving auth token.
56-
- `project_id` (String) STACKIT project ID to which the model serving auth token is associated.
55+
- `name` (String) Name of the AI model serving auth token.
56+
- `project_id` (String) STACKIT project ID to which the AI model serving auth token is associated.
5757

5858
### Optional
5959

60-
- `description` (String) The description of the model serving auth token.
61-
- `region` (String) Region to which the model serving auth token is associated. If not defined, the provider region is used
60+
- `description` (String) The description of the AI model serving auth token.
61+
- `region` (String) Region to which the AI model serving auth token is associated. If not defined, the provider region is used
6262
- `rotate_when_changed` (Map of String) A map of arbitrary key/value pairs that will force recreation of the token when they change, enabling token rotation based on external conditions such as a rotating timestamp. Changing this forces a new resource to be created.
63-
- `ttl_duration` (String) The TTL duration of the model serving auth token. E.g. 5h30m40s,5h,5h30m,30m,30s
63+
- `ttl_duration` (String) The TTL duration of the AI model serving auth token. E.g. 5h30m40s,5h,5h30m,30m,30s
6464

6565
### Read-Only
6666

6767
- `id` (String) Terraform's internal data source. ID. It is structured as "`project_id`,`region`,`token_id`".
68-
- `state` (String) State of the model serving auth token.
69-
- `token` (String, Sensitive) Content of the model serving auth token.
70-
- `token_id` (String) The model serving auth token ID.
71-
- `valid_until` (String) The time until the model serving auth token is valid.
68+
- `state` (String) State of the AI model serving auth token.
69+
- `token` (String, Sensitive) Content of the AI model serving auth token.
70+
- `token_id` (String) The AI model serving auth token ID.
71+
- `valid_until` (String) The time until the AI model serving auth token is valid.

stackit/internal/services/modelserving/token/description.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
1-
Model Serving Auth Token Resource schema.
1+
AI Model Serving Auth Token Resource schema.
22

33
## Example Usage
44

5-
### Automatically rotate model serving token
5+
### Automatically rotate AI model serving token
66
```terraform
77
resource "time_rotating" "rotate" {
88
rotation_days = 80

stackit/internal/services/modelserving/token/resource.go

Lines changed: 32 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -199,7 +199,7 @@ func (r *tokenResource) Schema(_ context.Context, _ resource.SchemaRequest, resp
199199
Computed: true,
200200
},
201201
"project_id": schema.StringAttribute{
202-
Description: "STACKIT project ID to which the model serving auth token is associated.",
202+
Description: "STACKIT project ID to which the AI model serving auth token is associated.",
203203
Required: true,
204204
Validators: []validator.String{
205205
validate.UUID(),
@@ -210,21 +210,21 @@ func (r *tokenResource) Schema(_ context.Context, _ resource.SchemaRequest, resp
210210
Optional: true,
211211
// must be computed to allow for storing the override value from the provider
212212
Computed: true,
213-
Description: "Region to which the model serving auth token is associated. If not defined, the provider region is used",
213+
Description: "Region to which the AI model serving auth token is associated. If not defined, the provider region is used",
214214
PlanModifiers: []planmodifier.String{
215215
stringplanmodifier.RequiresReplace(),
216216
},
217217
},
218218
"token_id": schema.StringAttribute{
219-
Description: "The model serving auth token ID.",
219+
Description: "The AI model serving auth token ID.",
220220
Computed: true,
221221
Validators: []validator.String{
222222
validate.UUID(),
223223
validate.NoSeparator(),
224224
},
225225
},
226226
"ttl_duration": schema.StringAttribute{
227-
Description: "The TTL duration of the model serving auth token. E.g. 5h30m40s,5h,5h30m,30m,30s",
227+
Description: "The TTL duration of the AI model serving auth token. E.g. 5h30m40s,5h,5h30m,30m,30s",
228228
Required: false,
229229
Optional: true,
230230
PlanModifiers: []planmodifier.String{
@@ -247,31 +247,31 @@ func (r *tokenResource) Schema(_ context.Context, _ resource.SchemaRequest, resp
247247
},
248248
},
249249
"description": schema.StringAttribute{
250-
Description: "The description of the model serving auth token.",
250+
Description: "The description of the AI model serving auth token.",
251251
Required: false,
252252
Optional: true,
253253
Validators: []validator.String{
254254
stringvalidator.LengthBetween(1, 2000),
255255
},
256256
},
257257
"name": schema.StringAttribute{
258-
Description: "Name of the model serving auth token.",
258+
Description: "Name of the AI model serving auth token.",
259259
Required: true,
260260
Validators: []validator.String{
261261
stringvalidator.LengthBetween(1, 200),
262262
},
263263
},
264264
"state": schema.StringAttribute{
265-
Description: "State of the model serving auth token.",
265+
Description: "State of the AI model serving auth token.",
266266
Computed: true,
267267
},
268268
"token": schema.StringAttribute{
269-
Description: "Content of the model serving auth token.",
269+
Description: "Content of the AI model serving auth token.",
270270
Computed: true,
271271
Sensitive: true,
272272
},
273273
"valid_until": schema.StringAttribute{
274-
Description: "The time until the model serving auth token is valid.",
274+
Description: "The time until the AI model serving auth token is valid.",
275275
Computed: true,
276276
},
277277
},
@@ -300,14 +300,14 @@ func (r *tokenResource) Create(ctx context.Context, req resource.CreateRequest,
300300
ctx = tflog.SetField(ctx, "project_id", projectId)
301301
ctx = tflog.SetField(ctx, "region", region)
302302

303-
// If model serving is not enabled, enable it
303+
// If AI model serving is not enabled, enable it
304304
err := r.serviceEnablementClient.EnableServiceRegional(ctx, region, projectId, utils.ModelServingServiceId).
305305
Execute()
306306
if err != nil {
307307
var oapiErr *oapierror.GenericOpenAPIError
308308
if errors.As(err, &oapiErr) {
309309
if oapiErr.StatusCode == http.StatusNotFound {
310-
core.LogAndAddError(ctx, &resp.Diagnostics, "Error enabling model serving",
310+
core.LogAndAddError(ctx, &resp.Diagnostics, "Error enabling AI model serving",
311311
fmt.Sprintf("Service not available in region %s \n%v", region, err),
312312
)
313313
return
@@ -316,8 +316,8 @@ func (r *tokenResource) Create(ctx context.Context, req resource.CreateRequest,
316316
core.LogAndAddError(
317317
ctx,
318318
&resp.Diagnostics,
319-
"Error enabling model serving",
320-
fmt.Sprintf("Error enabling model serving: %v", err),
319+
"Error enabling AI model serving",
320+
fmt.Sprintf("Error enabling AI model serving: %v", err),
321321
)
322322
return
323323
}
@@ -328,43 +328,43 @@ func (r *tokenResource) Create(ctx context.Context, req resource.CreateRequest,
328328
core.LogAndAddError(
329329
ctx,
330330
&resp.Diagnostics,
331-
"Error enabling model serving",
332-
fmt.Sprintf("Error enabling model serving: %v", err),
331+
"Error enabling AI model serving",
332+
fmt.Sprintf("Error enabling AI model serving: %v", err),
333333
)
334334
return
335335
}
336336

337337
// Generate API request body from model
338338
payload, err := toCreatePayload(&model)
339339
if err != nil {
340-
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating model serving auth token", fmt.Sprintf("Creating API payload: %v", err))
340+
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating AI model serving auth token", fmt.Sprintf("Creating API payload: %v", err))
341341
return
342342
}
343343

344-
// Create new model serving auth token
344+
// Create new AI model serving auth token
345345
createTokenResp, err := r.client.CreateToken(ctx, region, projectId).
346346
CreateTokenPayload(*payload).
347347
Execute()
348348
if err != nil {
349349
core.LogAndAddError(
350350
ctx,
351351
&resp.Diagnostics,
352-
"Error creating model serving auth token",
352+
"Error creating AI model serving auth token",
353353
fmt.Sprintf("Calling API: %v", err),
354354
)
355355
return
356356
}
357357

358358
waitResp, err := wait.CreateModelServingWaitHandler(ctx, r.client, region, projectId, *createTokenResp.Token.Id).WaitWithContext(ctx)
359359
if err != nil {
360-
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating model serving auth token", fmt.Sprintf("Waiting for token to be active: %v", err))
360+
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating AI model serving auth token", fmt.Sprintf("Waiting for token to be active: %v", err))
361361
return
362362
}
363363

364364
// Map response body to schema
365365
err = mapCreateResponse(createTokenResp, waitResp, &model, region)
366366
if err != nil {
367-
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating model serving auth token", fmt.Sprintf("Processing API payload: %v", err))
367+
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating AI model serving auth token", fmt.Sprintf("Processing API payload: %v", err))
368368
return
369369
}
370370

@@ -413,21 +413,21 @@ func (r *tokenResource) Read(ctx context.Context, req resource.ReadRequest, resp
413413
}
414414
}
415415

416-
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading model serving auth token", fmt.Sprintf("Calling API: %v", err))
416+
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading AI model serving auth token", fmt.Sprintf("Calling API: %v", err))
417417
return
418418
}
419419

420420
if getTokenResp != nil && getTokenResp.Token.State != nil &&
421421
*getTokenResp.Token.State == inactiveState {
422422
resp.State.RemoveResource(ctx)
423-
core.LogAndAddWarning(ctx, &resp.Diagnostics, "Error reading model serving auth token", "Model serving auth token has expired")
423+
core.LogAndAddWarning(ctx, &resp.Diagnostics, "Error reading AI model serving auth token", "AI model serving auth token has expired")
424424
return
425425
}
426426

427427
// Map response body to schema
428428
err = mapGetResponse(getTokenResp, &model)
429429
if err != nil {
430-
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading model serving auth token", fmt.Sprintf("Processing API payload: %v", err))
430+
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading AI model serving auth token", fmt.Sprintf("Processing API payload: %v", err))
431431
return
432432
}
433433

@@ -476,11 +476,11 @@ func (r *tokenResource) Update(ctx context.Context, req resource.UpdateRequest,
476476
// Generate API request body from model
477477
payload, err := toUpdatePayload(&model)
478478
if err != nil {
479-
core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating model serving auth token", fmt.Sprintf("Creating API payload: %v", err))
479+
core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating AI model serving auth token", fmt.Sprintf("Creating API payload: %v", err))
480480
return
481481
}
482482

483-
// Update model serving auth token
483+
// Update AI model serving auth token
484484
updateTokenResp, err := r.client.PartialUpdateToken(ctx, region, projectId, tokenId).PartialUpdateTokenPayload(*payload).Execute()
485485
if err != nil {
486486
var oapiErr *oapierror.GenericOpenAPIError
@@ -495,7 +495,7 @@ func (r *tokenResource) Update(ctx context.Context, req resource.UpdateRequest,
495495
core.LogAndAddError(
496496
ctx,
497497
&resp.Diagnostics,
498-
"Error updating model serving auth token",
498+
"Error updating AI model serving auth token",
499499
fmt.Sprintf(
500500
"Calling API: %v, tokenId: %s, region: %s, projectId: %s",
501501
err,
@@ -510,21 +510,21 @@ func (r *tokenResource) Update(ctx context.Context, req resource.UpdateRequest,
510510
if updateTokenResp != nil && updateTokenResp.Token.State != nil &&
511511
*updateTokenResp.Token.State == inactiveState {
512512
resp.State.RemoveResource(ctx)
513-
core.LogAndAddWarning(ctx, &resp.Diagnostics, "Error updating model serving auth token", "Model serving auth token has expired")
513+
core.LogAndAddWarning(ctx, &resp.Diagnostics, "Error updating AI model serving auth token", "AI model serving auth token has expired")
514514
return
515515
}
516516

517517
waitResp, err := wait.UpdateModelServingWaitHandler(ctx, r.client, region, projectId, tokenId).WaitWithContext(ctx)
518518
if err != nil {
519-
core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating model serving auth token", fmt.Sprintf("Waiting for token to be updated: %v", err))
519+
core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating AI model serving auth token", fmt.Sprintf("Waiting for token to be updated: %v", err))
520520
return
521521
}
522522

523523
// Since STACKIT is not saving the content of the token. We have to use it from the state.
524524
model.Token = state.Token
525525
err = mapGetResponse(waitResp, &model)
526526
if err != nil {
527-
core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating model serving auth token", fmt.Sprintf("Processing API payload: %v", err))
527+
core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating AI model serving auth token", fmt.Sprintf("Processing API payload: %v", err))
528528
return
529529
}
530530

@@ -561,7 +561,7 @@ func (r *tokenResource) Delete(ctx context.Context, req resource.DeleteRequest,
561561
ctx = tflog.SetField(ctx, "token_id", tokenId)
562562
ctx = tflog.SetField(ctx, "region", region)
563563

564-
// Delete existing model serving auth token. We will ignore the state 'deleting' for now.
564+
// Delete existing AI model serving auth token. We will ignore the state 'deleting' for now.
565565
_, err := r.client.DeleteToken(ctx, region, projectId, tokenId).Execute()
566566
if err != nil {
567567
var oapiErr *oapierror.GenericOpenAPIError
@@ -572,14 +572,14 @@ func (r *tokenResource) Delete(ctx context.Context, req resource.DeleteRequest,
572572
}
573573
}
574574

575-
core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting model serving auth token", fmt.Sprintf("Calling API: %v", err))
575+
core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting AI model serving auth token", fmt.Sprintf("Calling API: %v", err))
576576
return
577577
}
578578

579579
_, err = wait.DeleteModelServingWaitHandler(ctx, r.client, region, projectId, tokenId).
580580
WaitWithContext(ctx)
581581
if err != nil {
582-
core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting model serving auth token", fmt.Sprintf("Waiting for token to be deleted: %v", err))
582+
core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting AI model serving auth token", fmt.Sprintf("Waiting for token to be deleted: %v", err))
583583
return
584584
}
585585

stackit/provider.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -158,7 +158,7 @@ func (p *Provider) Schema(_ context.Context, _ provider.SchemaRequest, resp *pro
158158
"dns_custom_endpoint": "Custom endpoint for the DNS service",
159159
"iaas_custom_endpoint": "Custom endpoint for the IaaS service",
160160
"mongodbflex_custom_endpoint": "Custom endpoint for the MongoDB Flex service",
161-
"modelserving_custom_endpoint": "Custom endpoint for the Model Serving service",
161+
"modelserving_custom_endpoint": "Custom endpoint for the AI Model Serving service",
162162
"loadbalancer_custom_endpoint": "Custom endpoint for the Load Balancer service",
163163
"logme_custom_endpoint": "Custom endpoint for the LogMe service",
164164
"rabbitmq_custom_endpoint": "Custom endpoint for the RabbitMQ service",

0 commit comments

Comments
 (0)