Skip to content

Commit 6e495e8

Browse files
committed
Merge branch 'develop' of ssh.gitlab.aws.dev:genaiic-reusable-assets/engagement-artifacts/genaiic-idp-accelerator into develop
2 parents dc45ffe + 998b067 commit 6e495e8

6 files changed

Lines changed: 53 additions & 55 deletions

File tree

config_library/finetuning_models.yaml

Lines changed: 6 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -30,19 +30,10 @@ supported_models:
3030
- id: "us.amazon.nova-2-lite-v1:0"
3131
name: "Nova 2 Lite"
3232
provider: "Amazon"
33-
33+
3434
- id: "us.amazon.nova-2-pro-v1:0"
3535
name: "Nova 2 Pro"
3636
provider: "Amazon"
37-
38-
# Nova 1.x models (legacy)
39-
- id: "us.amazon.nova-lite-v1:0"
40-
name: "Nova Lite (v1)"
41-
provider: "Amazon"
42-
43-
- id: "us.amazon.nova-pro-v1:0"
44-
name: "Nova Pro (v1)"
45-
provider: "Amazon"
4637

4738
# ---------------------------------------------------------------------------
4839
# Model ID Mappings for Fine-tuning API
@@ -55,14 +46,14 @@ supported_models:
5546
# Key: Base model ID (without cross-region prefix)
5647
# Value: Fine-tuning-capable model ID
5748
model_mappings:
58-
# Nova 1.x mappings (300k context window)
59-
"amazon.nova-lite-v1:0": "amazon.nova-lite-v1:0:300k"
60-
"amazon.nova-pro-v1:0": "amazon.nova-pro-v1:0:300k"
61-
6249
# Nova 2.x mappings (256k context window)
6350
"amazon.nova-2-lite-v1:0": "amazon.nova-2-lite-v1:0:256k"
6451
"amazon.nova-2-pro-v1:0": "amazon.nova-2-pro-v1:0:256k"
6552

53+
# Nova 1.x mappings (legacy, 300k context window)
54+
"amazon.nova-lite-v1:0": "amazon.nova-lite-v1:0:300k"
55+
"amazon.nova-pro-v1:0": "amazon.nova-pro-v1:0:300k"
56+
6657
# ---------------------------------------------------------------------------
6758
# Default Hyperparameters
6859
# ---------------------------------------------------------------------------
@@ -71,4 +62,4 @@ model_mappings:
7162
default_hyperparameters:
7263
epochCount: "2"
7364
learningRate: "0.00001"
74-
batchSize: "1"
65+
batchSize: "1"

lib/idp_common_pkg/idp_common/model_finetuning/service.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,9 +28,8 @@
2828
logger = logging.getLogger(__name__)
2929

3030
# Default supported Nova models for fine-tuning (fallback if config not loaded)
31+
# Nova 2.x models are recommended; v1 models are kept for backward compatibility.
3132
DEFAULT_SUPPORTED_MODELS = [
32-
{"id": "us.amazon.nova-lite-v1:0", "name": "Nova Lite", "provider": "Amazon"},
33-
{"id": "us.amazon.nova-pro-v1:0", "name": "Nova Pro", "provider": "Amazon"},
3433
{"id": "us.amazon.nova-2-lite-v1:0", "name": "Nova 2 Lite", "provider": "Amazon"},
3534
{"id": "us.amazon.nova-2-pro-v1:0", "name": "Nova 2 Pro", "provider": "Amazon"},
3635
]

lib/idp_common_pkg/tests/unit/test_model_finetuning.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -529,12 +529,10 @@ def test_list_available_models(self, service, mock_bedrock_client):
529529

530530
result = service.list_available_models()
531531

532-
# Should have 4 base models (Nova Lite, Nova Pro, Nova 2 Lite, Nova 2 Pro)
533-
assert len(result.base_models) == 4
534-
assert result.base_models[0].name == "Nova Lite"
535-
assert result.base_models[1].name == "Nova Pro"
536-
assert result.base_models[2].name == "Nova 2 Lite"
537-
assert result.base_models[3].name == "Nova 2 Pro"
532+
# Should have 2 base models (Nova 2 Lite, Nova 2 Pro)
533+
assert len(result.base_models) == 2
534+
assert result.base_models[0].name == "Nova 2 Lite"
535+
assert result.base_models[1].name == "Nova 2 Pro"
538536

539537
# Should have 1 custom model
540538
assert len(result.custom_models) == 1

nested/appsync/src/lambda/finetuning_jobs_resolver/index.py

Lines changed: 34 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@
2020
from typing import Any, Dict, List, Optional
2121

2222
import boto3
23-
from boto3.dynamodb.conditions import Key
2423

2524
logger = logging.getLogger()
2625
logger.setLevel(logging.INFO)
@@ -39,10 +38,10 @@
3938
FINETUNING_JOB_PREFIX = "finetuning#"
4039
FINETUNING_JOBS_GSI_PK = "finetuning#jobs"
4140

42-
# Supported base models for fine-tuning
41+
# Supported base models for fine-tuning (Nova 2.x recommended)
4342
SUPPORTED_BASE_MODELS = [
44-
{"id": "us.amazon.nova-lite-v1:0", "name": "Nova Lite", "provider": "Amazon"},
45-
{"id": "us.amazon.nova-pro-v1:0", "name": "Nova Pro", "provider": "Amazon"},
43+
{"id": "us.amazon.nova-2-lite-v1:0", "name": "Nova 2 Lite", "provider": "Amazon"},
44+
{"id": "us.amazon.nova-2-pro-v1:0", "name": "Nova 2 Pro", "provider": "Amazon"},
4645
]
4746

4847

@@ -95,47 +94,56 @@ def lambda_handler(event: Dict[str, Any], context: Any) -> Any:
9594

9695
def list_finetuning_jobs(arguments: Dict[str, Any]) -> Dict[str, Any]:
9796
"""List all fine-tuning jobs with pagination.
98-
97+
9998
Uses scan with filter since fine-tuning jobs are relatively few
10099
and the GSI1 index may not exist on all deployments.
101100
Jobs are stored with GSI1PK/GSI1SK for future GSI support.
101+
102+
The scan paginates through the table until enough matching items
103+
are collected or the entire table has been scanned, because
104+
DynamoDB's ``Limit`` caps items *evaluated* (before filtering),
105+
not items *returned*.
102106
"""
103-
limit = arguments.get("limit", 20)
107+
limit = arguments.get("limit", 50)
104108
next_token = arguments.get("nextToken")
105109

106110
table = dynamodb.Table(TRACKING_TABLE_NAME)
107111

108-
# Use scan with filter - fine-tuning jobs have PK starting with 'finetuning#'
109112
from boto3.dynamodb.conditions import Attr
110-
111-
scan_params = {
112-
"FilterExpression": Attr("PK").begins_with(FINETUNING_JOB_PREFIX) & Attr("SK").eq("metadata"),
113-
"Limit": limit * 5, # Scan more items since filter reduces results
114-
}
113+
114+
filter_expr = Attr("PK").begins_with(FINETUNING_JOB_PREFIX) & Attr("SK").eq(
115+
"metadata"
116+
)
117+
118+
# Collect all matching items by paginating through the scan.
119+
# Fine-tuning jobs are few relative to the rest of the table,
120+
# so a single scan page may not contain any matches.
121+
items: List[Dict[str, Any]] = []
122+
scan_kwargs: Dict[str, Any] = {"FilterExpression": filter_expr}
115123

116124
if next_token:
117-
scan_params["ExclusiveStartKey"] = json.loads(next_token)
125+
scan_kwargs["ExclusiveStartKey"] = json.loads(next_token)
118126

119-
response = table.scan(**scan_params)
127+
while True:
128+
response = table.scan(**scan_kwargs)
120129

121-
# Filter and format items
122-
items = []
123-
for item in response.get("Items", []):
124-
if item.get("PK", "").startswith(FINETUNING_JOB_PREFIX) and item.get("SK") == "metadata":
130+
for item in response.get("Items", []):
125131
items.append(_format_job_for_graphql(item))
126-
132+
133+
# Stop if we've scanned the whole table
134+
if "LastEvaluatedKey" not in response:
135+
break
136+
137+
# Continue scanning from where we left off
138+
scan_kwargs["ExclusiveStartKey"] = response["LastEvaluatedKey"]
139+
127140
# Sort by createdAt descending (most recent first)
128141
items.sort(key=lambda x: x.get("createdAt", ""), reverse=True)
129-
142+
130143
# Apply limit after sorting
131144
items = items[:limit]
132145

133-
result = {"items": items}
134-
135-
if "LastEvaluatedKey" in response and len(items) >= limit:
136-
result["nextToken"] = json.dumps(response["LastEvaluatedKey"], cls=DecimalEncoder)
137-
138-
return result
146+
return {"items": items}
139147

140148

141149
def get_finetuning_job(job_id: str) -> Optional[Dict[str, Any]]:

src/ui/src/components/custom-models/CustomModelsLayout.tsx

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -59,10 +59,10 @@ interface SelectOption {
5959
description?: string;
6060
}
6161

62-
// Base models that support fine-tuning
62+
// Base models that support fine-tuning (Nova 2.x recommended)
6363
const SUPPORTED_BASE_MODELS: SelectOption[] = [
64-
{ label: 'Amazon Nova Pro', value: 'us.amazon.nova-pro-v1:0', description: 'High-performance model for complex tasks' },
65-
{ label: 'Amazon Nova Lite', value: 'us.amazon.nova-lite-v1:0', description: 'Balanced performance and cost' },
64+
{ label: 'Amazon Nova 2 Pro', value: 'us.amazon.nova-2-pro-v1:0', description: 'High-performance model for complex tasks' },
65+
{ label: 'Amazon Nova 2 Lite', value: 'us.amazon.nova-2-lite-v1:0', description: 'Balanced performance and cost' },
6666
];
6767

6868
// Status badge colors

src/ui/src/components/custom-models/FinetuningJobDetail.tsx

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -58,10 +58,12 @@ interface Notification {
5858
onDismiss: () => void;
5959
}
6060

61-
// Base models that support fine-tuning
61+
// Base models that support fine-tuning (Nova 2.x + legacy v1 for display)
6262
const SUPPORTED_BASE_MODELS: Record<string, string> = {
63-
'us.amazon.nova-pro-v1:0': 'Amazon Nova Pro',
64-
'us.amazon.nova-lite-v1:0': 'Amazon Nova Lite',
63+
'us.amazon.nova-2-pro-v1:0': 'Amazon Nova 2 Pro',
64+
'us.amazon.nova-2-lite-v1:0': 'Amazon Nova 2 Lite',
65+
'us.amazon.nova-pro-v1:0': 'Amazon Nova Pro (v1)',
66+
'us.amazon.nova-lite-v1:0': 'Amazon Nova Lite (v1)',
6567
};
6668

6769
// Status badge colors

0 commit comments

Comments
 (0)