-
Notifications
You must be signed in to change notification settings - Fork 5.8k
Expand file tree
/
Copy pathconverse.js
More file actions
68 lines (61 loc) · 2.5 KB
/
converse.js
File metadata and controls
68 lines (61 loc) · 2.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// snippet-start:[javascript.v3.bedrock-runtime.Converse_AmazonNovaText]
// This example demonstrates how to use the Amazon Nova foundation models to generate text.
// It shows how to:
// - Set up the Amazon Bedrock runtime client
// - Create a message
// - Configure and send a request
// - Process the response
import {
BedrockRuntimeClient,
ConversationRole,
ConverseCommand,
} from "@aws-sdk/client-bedrock-runtime";
// Step 1: Create the Amazon Bedrock runtime client
// Credentials will be automatically loaded from the environment.
const client = new BedrockRuntimeClient({ region: "us-east-1" });
// Step 2: Specify which model to use:
// Available Amazon Nova models and their characteristics:
// - Amazon Nova Micro: Text-only model optimized for lowest latency and cost
// - Amazon Nova Lite: Fast, low-cost multimodal model for image, video, and text
// - Amazon Nova Pro: Advanced multimodal model balancing accuracy, speed, and cost
//
// For the most current model IDs, see:
// https://docs.aws.amazon.com/bedrock/latest/userguide/models-supported.html
const modelId = "amazon.nova-lite-v1:0";
// Step 3: Create the message
// The message includes the text prompt and specifies that it comes from the user
const inputText =
"Describe the purpose of a 'hello world' program in one line.";
const message = {
content: [{ text: inputText }],
role: ConversationRole.USER,
};
// Step 4: Configure the request
// Optional parameters to control the model's response:
// - maxTokens: maximum number of tokens to generate
// - temperature: randomness (max: 1.0, default: 0.7)
// OR
// - topP: diversity of word choice (max: 1.0, default: 0.9)
// Note: Use either temperature OR topP, but not both
const request = {
modelId,
messages: [message],
inferenceConfig: {
maxTokens: 500, // The maximum response length
temperature: 0.5, // Using temperature for randomness control
//topP: 0.9, // Alternative: use topP instead of temperature
},
};
// Step 5: Send and process the request
// - Send the request to the model
// - Extract and return the generated text from the response
try {
const response = await client.send(new ConverseCommand(request));
console.log(response.output.message.content[0].text);
} catch (error) {
console.error(`ERROR: Can't invoke '${modelId}'. Reason: ${error.message}`);
throw error;
}
// snippet-end:[javascript.v3.bedrock-runtime.Converse_AmazonNovaText]