From e7b51e51d59af0155043abfa5a5a7d4e64d48faa Mon Sep 17 00:00:00 2001 From: scmacdon Date: Mon, 3 Mar 2025 12:51:44 -0500 Subject: [PATCH 01/10] added S3 Express Scenario --- .../s3/express/CloudFormationHelper.java | 162 +++++ .../s3/express/CreateDirectoryBucket.java | 61 ++ .../s3/express/S3DirectoriesActions.java | 298 ++++++++ .../s3/express/S3DirectoriesScenario.java | 652 ++++++++++++++++++ .../com/example/s3/express/UserNames.java | 17 + 5 files changed, 1190 insertions(+) create mode 100644 javav2/example_code/s3/src/main/java/com/example/s3/express/CloudFormationHelper.java create mode 100644 javav2/example_code/s3/src/main/java/com/example/s3/express/CreateDirectoryBucket.java create mode 100644 javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesActions.java create mode 100644 javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesScenario.java create mode 100644 javav2/example_code/s3/src/main/java/com/example/s3/express/UserNames.java diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/express/CloudFormationHelper.java b/javav2/example_code/s3/src/main/java/com/example/s3/express/CloudFormationHelper.java new file mode 100644 index 00000000000..77d31b9e710 --- /dev/null +++ b/javav2/example_code/s3/src/main/java/com/example/s3/express/CloudFormationHelper.java @@ -0,0 +1,162 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.example.s3.express; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; +import software.amazon.awssdk.core.retry.RetryMode; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; +import software.amazon.awssdk.services.cloudformation.CloudFormationAsyncClient; +import software.amazon.awssdk.services.cloudformation.model.Capability; +import software.amazon.awssdk.services.cloudformation.model.CloudFormationException; +import software.amazon.awssdk.services.cloudformation.model.DescribeStacksRequest; +import software.amazon.awssdk.services.cloudformation.model.DescribeStacksResponse; +import software.amazon.awssdk.services.cloudformation.model.Output; +import software.amazon.awssdk.services.cloudformation.model.Stack; +import software.amazon.awssdk.services.cloudformation.waiters.CloudFormationAsyncWaiter; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.time.Duration; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CompletableFuture; + +public class CloudFormationHelper { + private static final String CFN_TEMPLATE = "s3_express_template.yaml"; + private static final Logger logger = LoggerFactory.getLogger(CloudFormationHelper.class); + + private static CloudFormationAsyncClient cloudFormationClient; + + private static CloudFormationAsyncClient getCloudFormationClient() { + if (cloudFormationClient == null) { + SdkAsyncHttpClient httpClient = NettyNioAsyncHttpClient.builder() + .maxConcurrency(100) + .connectionTimeout(Duration.ofSeconds(60)) + .readTimeout(Duration.ofSeconds(60)) + .writeTimeout(Duration.ofSeconds(60)) + .build(); + + ClientOverrideConfiguration overrideConfig = ClientOverrideConfiguration.builder() + .apiCallTimeout(Duration.ofMinutes(2)) + .apiCallAttemptTimeout(Duration.ofSeconds(90)) + .retryStrategy(RetryMode.STANDARD) + .build(); + + cloudFormationClient = CloudFormationAsyncClient.builder() + .httpClient(httpClient) + .overrideConfiguration(overrideConfig) + .build(); + } + return cloudFormationClient; + } + + public static void deployCloudFormationStack(String stackName) { + String templateBody; + boolean doesExist = describeStack(stackName); + if (!doesExist) { + try { + ClassLoader classLoader = Thread.currentThread().getContextClassLoader(); + Path filePath = Paths.get(classLoader.getResource(CFN_TEMPLATE).toURI()); + templateBody = Files.readString(filePath); + } catch (IOException | URISyntaxException e) { + throw new RuntimeException(e); + } + + getCloudFormationClient().createStack(b -> b.stackName(stackName) + .templateBody(templateBody) + .capabilities(Capability.CAPABILITY_IAM)) + .whenComplete((csr, t) -> { + if (csr != null) { + System.out.println("Stack creation requested, ARN is " + csr.stackId()); + try (CloudFormationAsyncWaiter waiter = getCloudFormationClient().waiter()) { + waiter.waitUntilStackCreateComplete(request -> request.stackName(stackName)) + .whenComplete((dsr, th) -> { + if (th != null) { + System.out.println("Error waiting for stack creation: " + th.getMessage()); + } else { + dsr.matched().response().orElseThrow(() -> new RuntimeException("Failed to deploy")); + System.out.println("Stack created successfully"); + } + }).join(); + } + } else { + System.out.format("Error creating stack: " + t.getMessage(), t); + throw new RuntimeException(t.getCause().getMessage(), t); + } + }).join(); + } else { + logger.info("{} stack already exists", CFN_TEMPLATE); + } + } + + // Check to see if the Stack exists before deploying it + public static Boolean describeStack(String stackName) { + try { + CompletableFuture future = getCloudFormationClient().describeStacks(); + DescribeStacksResponse stacksResponse = (DescribeStacksResponse) future.join(); + List stacks = stacksResponse.stacks(); + for (Stack myStack : stacks) { + if (myStack.stackName().compareTo(stackName) == 0) { + return true; + } + } + } catch (CloudFormationException e) { + System.err.println(e.getMessage()); + } + return false; + } + + public static void destroyCloudFormationStack(String stackName) { + getCloudFormationClient().deleteStack(b -> b.stackName(stackName)) + .whenComplete((dsr, t) -> { + if (dsr != null) { + System.out.println("Delete stack requested ...."); + try (CloudFormationAsyncWaiter waiter = getCloudFormationClient().waiter()) { + waiter.waitUntilStackDeleteComplete(request -> request.stackName(stackName)) + .whenComplete((waiterResponse, throwable) -> + System.out.println("Stack deleted successfully.")) + .join(); + } + } else { + System.out.format("Error deleting stack: " + t.getMessage(), t); + throw new RuntimeException(t.getCause().getMessage(), t); + } + }).join(); + } + + public static CompletableFuture> getStackOutputsAsync(String stackName) { + CloudFormationAsyncClient cloudFormationAsyncClient = getCloudFormationClient(); + + DescribeStacksRequest describeStacksRequest = DescribeStacksRequest.builder() + .stackName(stackName) + .build(); + + return cloudFormationAsyncClient.describeStacks(describeStacksRequest) + .handle((describeStacksResponse, throwable) -> { + if (throwable != null) { + throw new RuntimeException("Failed to get stack outputs for: " + stackName, throwable); + } + + // Process the result + if (describeStacksResponse.stacks().isEmpty()) { + throw new RuntimeException("Stack not found: " + stackName); + } + + Stack stack = describeStacksResponse.stacks().get(0); + Map outputs = new HashMap<>(); + for (Output output : stack.outputs()) { + outputs.put(output.outputKey(), output.outputValue()); + } + + return outputs; + }); + } +} diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/express/CreateDirectoryBucket.java b/javav2/example_code/s3/src/main/java/com/example/s3/express/CreateDirectoryBucket.java new file mode 100644 index 00000000000..d8307a88476 --- /dev/null +++ b/javav2/example_code/s3/src/main/java/com/example/s3/express/CreateDirectoryBucket.java @@ -0,0 +1,61 @@ +package com.example.s3.express; + +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.BucketInfo; +import software.amazon.awssdk.services.s3.model.BucketType; +import software.amazon.awssdk.services.s3.model.CreateBucketConfiguration; +import software.amazon.awssdk.services.s3.model.CreateBucketRequest; +import software.amazon.awssdk.services.s3.model.CreateBucketResponse; +import software.amazon.awssdk.services.s3.model.DataRedundancy; +import software.amazon.awssdk.services.s3.model.LocationInfo; +import software.amazon.awssdk.services.s3.model.LocationType; +import software.amazon.awssdk.services.s3.model.S3Exception; + +public class CreateDirectoryBucket { + + public static void main(String[] args){ + String bucketName = "test-bucket-" + System.currentTimeMillis() + "--usw2-az1--x-s3"; + Region region = Region.US_WEST_2; + String zone = "usw2-az1"; + S3Client s3Client = S3Client.builder() + .region(region) + .build(); + + createDirectoryBucket(s3Client, bucketName, zone); + + } + + /** + * Creates a new S3 directory bucket in a specified Zone (For example, a + * specified Availability Zone in this code example). + * + * @param s3Client The S3 client used to create the bucket + * @param bucketName The name of the bucket to be created + * @param zone The region where the bucket will be created + + */ + public static void createDirectoryBucket(S3Client s3Client, String bucketName, String zone) throws S3Exception { + System.out.println("Creating bucket: " +bucketName); + + CreateBucketConfiguration bucketConfiguration = CreateBucketConfiguration.builder() + .location(LocationInfo.builder() + .type(LocationType.AVAILABILITY_ZONE) + .name(zone).build()) + .bucket(BucketInfo.builder() + .type(BucketType.DIRECTORY) + .dataRedundancy(DataRedundancy.SINGLE_AVAILABILITY_ZONE) + .build()) + .build(); + try { + CreateBucketRequest bucketRequest = CreateBucketRequest.builder() + .bucket(bucketName) + .createBucketConfiguration(bucketConfiguration).build(); + CreateBucketResponse response = s3Client.createBucket(bucketRequest); + System.out.println("Bucket created successfully with location: " +response.location()); + } catch (S3Exception e) { + System.out.println("Error creating bucket: - Error code: {}" +e.awsErrorDetails().errorMessage()); + throw e; + } + } +} diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesActions.java b/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesActions.java new file mode 100644 index 00000000000..c8539b5ce09 --- /dev/null +++ b/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesActions.java @@ -0,0 +1,298 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.example.s3.express; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import software.amazon.awssdk.core.ResponseBytes; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.waiters.WaiterResponse; +import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.model.BucketInfo; +import software.amazon.awssdk.services.s3.model.BucketType; +import software.amazon.awssdk.services.s3.model.CopyObjectRequest; +import software.amazon.awssdk.services.s3.model.CreateBucketConfiguration; +import software.amazon.awssdk.services.s3.model.CreateBucketRequest; +import software.amazon.awssdk.services.s3.model.CreateSessionRequest; +import software.amazon.awssdk.services.s3.model.DataRedundancy; +import software.amazon.awssdk.services.s3.model.Delete; +import software.amazon.awssdk.services.s3.model.DeleteBucketRequest; +import software.amazon.awssdk.services.s3.model.DeleteObjectsRequest; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; +import software.amazon.awssdk.services.s3.model.HeadBucketRequest; +import software.amazon.awssdk.services.s3.model.HeadBucketResponse; +import software.amazon.awssdk.services.s3.model.ListObjectsV2Request; +import software.amazon.awssdk.services.s3.model.LocationInfo; +import software.amazon.awssdk.services.s3.model.LocationType; +import software.amazon.awssdk.services.s3.model.ObjectIdentifier; +import software.amazon.awssdk.services.s3.model.PutObjectResponse; +import software.amazon.awssdk.services.s3.model.S3Object; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.S3Exception; +import software.amazon.awssdk.services.s3.waiters.S3AsyncWaiter; +import software.amazon.awssdk.core.async.AsyncResponseTransformer; + +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionException; +import java.util.stream.Collectors; + +public class S3DirectoriesActions { + private static final Logger logger = LoggerFactory.getLogger(S3DirectoriesActions.class); + + + /** + * Deletes the specified S3 bucket and all the objects within it in an asynchronous manner. + * + * @param s3AsyncClient the S3 asynchronous client to use for the operations + * @param bucketName the name of the S3 bucket to be deleted + * @return a {@link CompletableFuture} that completes with a {@link WaiterResponse} containing the + * {@link HeadBucketResponse} when the bucket has been successfully deleted + * @throws CompletionException if there was an error deleting the bucket or its objects + */ + public CompletableFuture> deleteBucketAndObjectsAsync(S3AsyncClient s3AsyncClient, String bucketName) { + ListObjectsV2Request listRequest = ListObjectsV2Request.builder() + .bucket(bucketName) + .build(); + + return s3AsyncClient.listObjectsV2(listRequest) + .thenCompose(listResponse -> { + if (!listResponse.contents().isEmpty()) { + List objectIdentifiers = listResponse.contents().stream() + .map(s3Object -> ObjectIdentifier.builder().key(s3Object.key()).build()) + .collect(Collectors.toList()); + + DeleteObjectsRequest deleteRequest = DeleteObjectsRequest.builder() + .bucket(bucketName) + .delete(Delete.builder().objects(objectIdentifiers).build()) + .build(); + + return s3AsyncClient.deleteObjects(deleteRequest) + .thenAccept(deleteResponse -> { + if (!deleteResponse.errors().isEmpty()) { + deleteResponse.errors().forEach(error -> + logger.error("Couldn't delete object " + error.key() + ". Reason: " + error.message())); + } + }); + } + return CompletableFuture.completedFuture(null); + }) + .thenCompose(ignored -> { + DeleteBucketRequest deleteBucketRequest = DeleteBucketRequest.builder() + .bucket(bucketName) + .build(); + return s3AsyncClient.deleteBucket(deleteBucketRequest); + }) + .thenCompose(ignored -> { + S3AsyncWaiter waiter = s3AsyncClient.waiter(); + HeadBucketRequest headBucketRequest = HeadBucketRequest.builder().bucket(bucketName).build(); + return waiter.waitUntilBucketNotExists(headBucketRequest); + }) + .whenComplete((ignored, exception) -> { + if (exception != null) { + Throwable cause = exception.getCause(); + if (cause instanceof S3Exception) { + throw new CompletionException("Error deleting bucket: " + bucketName, cause); + } + throw new CompletionException("Failed to delete bucket and objects: " + bucketName, exception); + } + logger.info("Bucket deleted successfully: " + bucketName); + }); + } + + + /** + * Lists the objects in an S3 bucket asynchronously using the AWS SDK. + * + * @param s3Client the S3 async client to use for the operation + * @param bucketName the name of the S3 bucket to list objects from + * @return a {@link CompletableFuture} that contains the list of object keys in the specified bucket + */ + public CompletableFuture> listObjectsAsync(S3AsyncClient s3Client, String bucketName) { + ListObjectsV2Request request = ListObjectsV2Request.builder() + .bucket(bucketName) + .build(); + + return s3Client.listObjectsV2(request) + .thenApply(response -> response.contents().stream() + .map(S3Object::key) + .collect(Collectors.toList())) + .whenComplete((result, exception) -> { + if (exception != null) { + throw new CompletionException("Couldn't list objects in bucket: " + bucketName, exception); + } + }); + } + + public CompletableFuture> getObjectAsync(S3AsyncClient s3Client, String bucketName, String keyName) { + // Create the GetObjectRequest for the asynchronous client + GetObjectRequest objectRequest = GetObjectRequest.builder() + .key(keyName) + .bucket(bucketName) + .build(); + + // Get the object asynchronously and transform it into a byte array + return s3Client.getObject(objectRequest, AsyncResponseTransformer.toBytes()) + .exceptionally(exception -> { + // Handle the exception by checking the cause + Throwable cause = exception.getCause(); + if (cause instanceof S3Exception) { + throw new CompletionException("Failed to get the object. Reason: " + ((S3Exception) cause).awsErrorDetails().errorMessage(), cause); + } + throw new CompletionException("Failed to get the object", exception); + }) + .thenApply(response -> { + logger.info("Successfully obtained bytes from an S3 object"); + return response; + }); + } + /** + * Asynchronously copies an object from one S3 bucket to another. + * + * @param s3Client the S3 async client to use for the copy operation + * @param sourceBucket the name of the source bucket + * @param sourceKey the key of the object to be copied in the source bucket + * @param destinationBucket the name of the destination bucket + * @param destinationKey the key of the copied object in the destination bucket + * @return a {@link CompletableFuture} that completes when the copy operation is finished + */ + public CompletableFuture copyObjectAsync(S3AsyncClient s3Client, String sourceBucket, String sourceKey, String destinationBucket, String destinationKey) { + CopyObjectRequest copyRequest = CopyObjectRequest.builder() + .sourceBucket(sourceBucket) + .sourceKey(sourceKey) + .destinationBucket(destinationBucket) + .destinationKey(destinationKey) + .build(); + + return s3Client.copyObject(copyRequest) + .thenRun(() -> logger.info("Copied object '" + sourceKey + "' from bucket '" + sourceBucket + "' to bucket '" + destinationBucket + "'")) + .whenComplete((ignored, exception) -> { + if (exception != null) { + Throwable cause = exception.getCause(); + if (cause instanceof S3Exception) { + throw new CompletionException("Couldn't copy object '" + sourceKey + "' from bucket '" + sourceBucket + "' to bucket '" + destinationBucket + "'. Reason: " + ((S3Exception) cause).awsErrorDetails().errorMessage(), cause); + } + throw new CompletionException("Failed to copy object", exception); + } + }); + } + + /** + * Creates an asynchronous session for the specified S3 bucket. + * + * @param s3Client the S3 asynchronous client to use for creating the session + * @param bucketName the name of the S3 bucket for which to create the session + * @return a {@link CompletableFuture} that completes when the session is created, or throws a {@link CompletionException} if an error occurs + */ + public CompletableFuture createSessionAsync(S3AsyncClient s3Client, String bucketName) { + CreateSessionRequest request = CreateSessionRequest.builder() + .bucket(bucketName) + .build(); + + return s3Client.createSession(request) + .thenRun(() -> logger.info("Created session for bucket: " + bucketName)) + .whenComplete((ignored, exception) -> { + if (exception != null) { + Throwable cause = exception.getCause(); + if (cause instanceof S3Exception) { + throw new CompletionException("Couldn't create the session. Reason: " + ((S3Exception) cause).awsErrorDetails().errorMessage(), cause); + } + throw new CompletionException("Unexpected error occurred while creating session", exception); + } + }); + } + + /** + * Creates a new S3 directory bucket in a specified Zone (For example, a + * specified Availability Zone in this code example). + * + * @param s3Client The S3 client used to create the bucket + * @param bucketName The name of the bucket to be created + * @param zone The region where the bucket will be created + * @throws S3Exception if there's an error creating the bucket + */ + public static CompletableFuture createDirectoryBucketAsync(S3AsyncClient s3Client, String bucketName, String zone) { + logger.info("Creating bucket: " + bucketName); + + CreateBucketConfiguration bucketConfiguration = CreateBucketConfiguration.builder() + .location(LocationInfo.builder() + .type(LocationType.AVAILABILITY_ZONE) + .name(zone) + .build()) + .bucket(BucketInfo.builder() + .type(BucketType.DIRECTORY) + .dataRedundancy(DataRedundancy.SINGLE_AVAILABILITY_ZONE) + .build()) + .build(); + + CreateBucketRequest bucketRequest = CreateBucketRequest.builder() + .bucket(bucketName) + .createBucketConfiguration(bucketConfiguration) + .build(); + + return s3Client.createBucket(bucketRequest) + .thenAccept(response -> logger.info("Bucket created successfully with location: " + response.location())) + .whenComplete((ignored, exception) -> { + if (exception != null) { + Throwable cause = exception.getCause(); + if (cause instanceof S3Exception) { + throw new CompletionException("Error creating bucket: " + ((S3Exception) cause).awsErrorDetails().errorMessage(), cause); + } + throw new CompletionException("Unexpected error occurred while creating bucket", exception); + } + }); + } + /** + * Creates an S3 bucket asynchronously. + * + * @param s3Client the S3 async client to use for the bucket creation + * @param bucketName the name of the S3 bucket to create + * @return a {@link CompletableFuture} that completes with the {@link WaiterResponse} containing the {@link HeadBucketResponse} + * when the bucket is successfully created + * @throws CompletionException if there's an error creating the bucket + */ + public CompletableFuture> createBucketAsync(S3AsyncClient s3Client, String bucketName) { + CreateBucketRequest bucketRequest = CreateBucketRequest.builder() + .bucket(bucketName) + .build(); + + return s3Client.createBucket(bucketRequest) + .thenCompose(response -> { + S3AsyncWaiter s3Waiter = s3Client.waiter(); + HeadBucketRequest bucketRequestWait = HeadBucketRequest.builder() + .bucket(bucketName) + .build(); + return s3Waiter.waitUntilBucketExists(bucketRequestWait); + }) + .whenComplete((response, exception) -> { + if (exception != null) { + throw new CompletionException("Error creating bucket: " + bucketName, exception); + } + logger.info(bucketName + " is ready"); + }); + } + + /** + * Uploads an object to an Amazon S3 bucket asynchronously. + * + * @param s3Client the S3 async client to use for the upload + * @param bucketName the name of the S3 bucket to upload the object to + * @param bucketObject the name of the object to be uploaded + * @param text the content to be uploaded as the object + */ + public CompletableFuture putObjectAsync(S3AsyncClient s3Client, String bucketName, String bucketObject, String text) { + PutObjectRequest objectRequest = PutObjectRequest.builder() + .bucket(bucketName) + .key(bucketObject) + .build(); + + return s3Client.putObject(objectRequest, AsyncRequestBody.fromString(text)) + .whenComplete((response, exception) -> { + if (exception != null) { + throw new CompletionException("Failed to upload file", exception); + } + }); + } +} \ No newline at end of file diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesScenario.java b/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesScenario.java new file mode 100644 index 00000000000..4621ed5f1a8 --- /dev/null +++ b/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesScenario.java @@ -0,0 +1,652 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.example.s3.express; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.ec2.Ec2Client; +import software.amazon.awssdk.services.ec2.model.AvailabilityZone; +import software.amazon.awssdk.services.ec2.model.CreateVpcEndpointRequest; +import software.amazon.awssdk.services.ec2.model.CreateVpcEndpointResponse; +import software.amazon.awssdk.services.ec2.model.CreateVpcRequest; +import software.amazon.awssdk.services.ec2.model.CreateVpcResponse; +import software.amazon.awssdk.services.ec2.model.DescribeAvailabilityZonesRequest; +import software.amazon.awssdk.services.ec2.model.DescribeAvailabilityZonesResponse; +import software.amazon.awssdk.services.ec2.model.DescribeRouteTablesRequest; +import software.amazon.awssdk.services.ec2.model.DescribeRouteTablesResponse; +import software.amazon.awssdk.services.ec2.model.DescribeVpcsRequest; +import software.amazon.awssdk.services.ec2.model.Ec2Exception; +import software.amazon.awssdk.services.ec2.model.Filter; +import software.amazon.awssdk.services.ec2.waiters.Ec2Waiter; +import software.amazon.awssdk.services.iam.IamClient; +import software.amazon.awssdk.services.iam.model.CreateAccessKeyRequest; +import software.amazon.awssdk.services.iam.model.CreateAccessKeyResponse; +import software.amazon.awssdk.services.iam.model.IamException; +import software.amazon.awssdk.services.s3.S3AsyncClient; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Scanner; +import java.util.UUID; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionException; +import java.util.stream.IntStream; + +public class S3DirectoriesScenario { + + public static final String DASHES = new String(new char[80]).replace("\0", "-"); + + private static final Logger logger = LoggerFactory.getLogger(S3DirectoriesScenario.class); + static Scanner scanner = new Scanner(System.in); + + private static S3AsyncClient mS3RegularClient; + private static S3AsyncClient mS3ExpressClient; + + private IamClient iam; + + private static String mdirectoryBucketName; + private static String mregularBucketName; + + private static String stackName = "cfn-stack-s3-express-basics--" + UUID.randomUUID(); + + private static String regularUser = ""; + private static String vpcId = ""; + private static String expressUser = ""; + + private static String vpcEndpointId = ""; + + private static S3DirectoriesActions s3DirectoriesActions = new S3DirectoriesActions(); + + public static void main(String[] args) { + s3ExpressScenario(); + } + + // Runs the scenario. + private static void s3ExpressScenario() { + logger.info(DASHES); + logger.info("Welcome to the Amazon S3 Express Basics demo using AWS SDK for Java V2"); + logger.info(""" + Let's get started! First, please note that S3 Express One Zone works best when working within the AWS infrastructure, + specifically when working in the same Availability Zone. To see the best results in this example and when you implement + Directory buckets into your infrastructure, it is best to put your compute resources in the same AZ as your Directory + bucket. + """); + waitForInputToContinue(scanner); + logger.info(DASHES); + + // Create an optional VPC and create 2 IAM users. + UserNames userNames = createVpcUsers(); + String expressUserName = userNames.getExpressUserName(); + String regularUserName = userNames.getRegularUserName(); + + // Set up two S3 clients, one regular and one express, + // and two buckets, one regular and one express. + setupClientsAndBuckets(expressUserName, regularUserName); + + // Create an S3 session for the express S3 client and add objects to the buckets. + logger.info("Create an S3 session for the express S3 client and add objects to the buckets"); + waitForInputToContinue(scanner); + String bucketObject = createSessionAddObjects(); + + // Demonstrate performance differences between regular and express buckets. + demonstratePerformance(bucketObject); + + // Populate the buckets to show the lexicographical difference between + // regular and express buckets. + showLexicographicalDifferences(bucketObject); + + logger.info(""); + logger.info("That's it for our tour of the basic operations for S3 Express One Zone."); + logger.info("Would you like to cleanUp the AWS resources? (y/n): "); + String response = scanner.next().trim().toLowerCase(); + if (response.equals("y")) { + cleanUp(); + } + } + + /* + Delete resources created by this scenario. + */ + private static void cleanUp() { + if (mdirectoryBucketName != null) { + s3DirectoriesActions.deleteBucketAndObjectsAsync(mS3ExpressClient, mdirectoryBucketName).join(); + } + logger.info("Deleted directory bucket " + mdirectoryBucketName); + mdirectoryBucketName = null; + + if (mregularBucketName != null) { + s3DirectoriesActions.deleteBucketAndObjectsAsync(mS3RegularClient, mregularBucketName).join(); + } + + logger.info("Deleted regular bucket " + mregularBucketName); + mregularBucketName = null; + CloudFormationHelper.destroyCloudFormationStack(stackName); + } + + private static void showLexicographicalDifferences(String bucketObject) { + logger.info(""" + 7. Populate the buckets to show the lexicographical difference. + Now let's explore how Directory buckets store objects in a different + manner to regular buckets. The key is in the name + "Directory". Where regular buckets store their key/value pairs in a + flat manner, Directory buckets use actual directories/folders. + This allows for more rapid indexing, traversing, and therefore + retrieval times! + + The more segmented your bucket is, with lots of + directories, sub-directories, and objects, the more efficient it becomes. + This structural difference also causes ListObjects to behave differently, + which can cause unexpected results. Let's add a few more + objects with layered directories to see how the output of + ListObjects changes. + """); + + waitForInputToContinue(scanner); + + // Populate a few more files in each bucket so that we can use + // ListObjects and show the difference. + String otherObject = "other/" + bucketObject; + String altObject = "alt/" + bucketObject; + String otherAltObject = "other/alt/" + bucketObject; + + try { + s3DirectoriesActions.putObjectAsync(mS3RegularClient, mregularBucketName, otherObject, "").join(); + s3DirectoriesActions.putObjectAsync(mS3ExpressClient, mdirectoryBucketName, otherObject, "").join(); + s3DirectoriesActions.putObjectAsync(mS3RegularClient, mregularBucketName, altObject, "").join(); + s3DirectoriesActions.putObjectAsync(mS3ExpressClient, mdirectoryBucketName, altObject, "").join(); + s3DirectoriesActions.putObjectAsync(mS3RegularClient, mregularBucketName, otherAltObject, "").join(); + s3DirectoriesActions.putObjectAsync(mS3ExpressClient, mdirectoryBucketName, otherAltObject, "").join(); + + } catch (CompletionException e) { + logger.error("Async operation failed: {} ", e.getCause().getMessage()); + return; + } + + try { + // List objects in both S3 buckets. + List dirBucketObjects = s3DirectoriesActions.listObjectsAsync(mS3ExpressClient, mdirectoryBucketName).join(); + List regBucketObjects = s3DirectoriesActions.listObjectsAsync(mS3RegularClient, mregularBucketName).join(); + + logger.info("Directory bucket content"); + for (String obj : dirBucketObjects) { + logger.info(obj); + } + + logger.info("Regular bucket content"); + for (String obj : regBucketObjects) { + logger.info(obj); + } + } catch (CompletionException e) { + logger.error("Async operation failed: {} ", e.getCause().getMessage()); + return; + } + + logger.info(""" + Notice how the normal bucket lists objects in lexicographical order, while the directory bucket does not. This is + because the normal bucket considers the whole "key" to be the object identifier, while the directory bucket actually + creates directories and uses the object "key" as a path to the object. + """); + waitForInputToContinue(scanner); + } + + /** + * Demonstrates the performance difference between downloading an object from a directory bucket and a regular bucket. + * + *

This method: + *

    + *
  • Prompts the user to choose the number of downloads (default is 1,000).
  • + *
  • Downloads the specified object from the directory bucket and measures the total time.
  • + *
  • Downloads the same object from the regular bucket and measures the total time.
  • + *
  • Compares the time differences and prints the results.
  • + *
+ * + *

Note: The performance difference will be more pronounced if this example is run on an EC2 instance + * in the same Availability Zone as the buckets. + * + * @param bucketObject the name of the object to download + */ + private static void demonstratePerformance(String bucketObject) { + logger.info("6. Demonstrate performance difference."); + logger.info(""" + Now, let's do a performance test. We'll download the same object from each + bucket 'downloads' times and compare the total time needed. Note: + the performance difference will be much more pronounced if this + example is run in an EC2 instance in the same Availability Zone as + the bucket. + """); + waitForInputToContinue(scanner); + + int downloads = 1000; // Default value + logger.info("The number of downloads of the same object for this example is set at " + downloads + "."); + + // Ask if the user wants to download a different number. + logger.info("Would you like to download a different number? (y/n): "); + String response = scanner.next().trim().toLowerCase(); + if (response.equals("y")) { + int maxDownloads = 1_000_000; + + // Ask for a valid number of downloads. + while (true) { + logger.info("Enter a number between 1 and " + maxDownloads + " for the number of downloads: "); + if (scanner.hasNextInt()) { + downloads = scanner.nextInt(); + if (downloads >= 1 && downloads <= maxDownloads) { + break; + } else { + logger.info("Please enter a number between 1 and " + maxDownloads + "."); + } + } else { + logger.info("Invalid input. Please enter a valid integer."); + scanner.next(); + } + } + + logger.info("You have chosen to download {} items.", downloads); + } else { + logger.info("No changes made. Using default downloads: {}", downloads); + } + // Simulating the download process for the Directory bucket. + logger.info("Downloading from the Directory bucket."); + long directoryTimeStart = System.nanoTime(); + for (int index = 0; index < downloads; index++) { + if (index % 10 == 0) { + logger.info("Download " + index + " of " + downloads); + } + + try { + // Get the object from the Directory bucket. + s3DirectoriesActions.getObjectAsync(mS3ExpressClient, mdirectoryBucketName, bucketObject).join(); + } catch (CompletionException e) { + logger.error("Async operation failed: {} ", e.getCause().getMessage()); + return; + } + } + + long directoryTimeDifference = System.nanoTime() - directoryTimeStart; + + // Simulating the download process for the normal bucket. + logger.info("Downloading from the regular bucket."); + long normalTimeStart = System.nanoTime(); + for (int index = 0; index < downloads; index++) { + if (index % 10 == 0) { + logger.info("Download " + index + " of " + downloads); + } + + // Get the object from the normal bucket. + s3DirectoriesActions.getObjectAsync(mS3RegularClient, mregularBucketName, bucketObject).join(); + } + + long normalTimeDifference = System.nanoTime() - normalTimeStart; + logger.info("The directory bucket took " + directoryTimeDifference + " nanoseconds, while the normal bucket took " + normalTimeDifference + " nanoseconds."); + long difference = normalTimeDifference - directoryTimeDifference; + logger.info("That's a difference of " + difference + " nanoseconds, or"); + logger.info(difference / 1_000_000_000.0 + " seconds."); + + if (difference < 0) { + logger.info("The directory buckets were slower. This can happen if you are not running on the cloud within a VPC."); + } + waitForInputToContinue(scanner); + } + + private static String createSessionAddObjects() { + logger.info(""" + 5. Create an object and copy it over. + We'll create a basic object consisting of some text and upload it to the + normal bucket. + Next we'll copy the object into the Directory bucket using the regular client. + This works fine because copy operations are not restricted for Directory buckets. + """); + waitForInputToContinue(scanner); + + String bucketObject = "basic-text-object.txt"; + s3DirectoriesActions.putObjectAsync(mS3RegularClient, mregularBucketName, bucketObject, "Look Ma, I'm a bucket!").join(); + s3DirectoriesActions.createSessionAsync(mS3ExpressClient, mdirectoryBucketName).join(); + + // Copy the object to the destination S3 bucket. + s3DirectoriesActions.copyObjectAsync(mS3ExpressClient, mregularBucketName, bucketObject, mdirectoryBucketName, bucketObject).join(); + + logger.info(""" + It worked! It's important to remember the user permissions when interacting with + Directory buckets. Instead of validating permissions on every call as + normal buckets do, Directory buckets utilize the user credentials and session + token to validate. This allows for much faster connection speeds on every call. + For single calls, this is low, but for many concurrent calls + this adds up to a lot of time saved. + """); + waitForInputToContinue(scanner); + return bucketObject; + } + + /** + * Creates VPC users for the S3 Express One Zone scenario. + *

+ * This method performs the following steps: + *

    + *
  1. Optionally creates a new VPC and VPC Endpoint if the application is running in an EC2 instance in the same Availability Zone as the Directory buckets.
  2. + *
  3. Creates two IAM users: one with S3 Express One Zone permissions and one without.
  4. + *
+ * + * @return a {@link UserNames} object containing the names of the created IAM users + */ + /** + * Creates VPC users for the S3 Express One Zone scenario. + *

+ * This method performs the following steps: + *

    + *
  1. Optionally creates a new VPC and VPC Endpoint if the application is running in an EC2 instance in the same Availability Zone as the Directory buckets.
  2. + *
  3. Creates two IAM users: one with S3 Express One Zone permissions and one without.
  4. + *
+ * + * @return a {@link UserNames} object containing the names of the created IAM users + */ + public static UserNames createVpcUsers() { + /* + Optionally create a VPC. + Create two IAM users, one with S3 Express One Zone permissions and one without. + */ + logger.info(""" + 1. First, we'll set up a new VPC and VPC Endpoint if this program is running in an EC2 instance in the same AZ as your\s + Directory buckets will be. Are you running this in an EC2 instance located in the same AZ as your intended Directory buckets? + """); + + logger.info("Do you want to setup a VPC Endpoint? (y/n)"); + String endpointAns = scanner.nextLine().trim(); + if (endpointAns.equalsIgnoreCase("y")) { + logger.info(""" + "Great! Let's set up a VPC, retrieve the Route Table from it, and create a VPC Endpoint to connect the S3 Client to." + """); + + setupVPC(); + waitForInputToContinue(scanner); + } else { + logger.info("Skipping the VPC setup. Don't forget to use this in production!"); + } + logger.info(""" + 2. Policies, users, and roles with CDK. + Now, we'll set up some policies, roles, and a user. This user will + only have permissions to do S3 Express One Zone actions. + """); + + waitForInputToContinue(scanner); + logger.info("Use AWS CloudFormation to create IAM roles that is required for this scenario."); + CloudFormationHelper.deployCloudFormationStack(stackName); + Map stackOutputs = CloudFormationHelper.getStackOutputsAsync(stackName).join(); + + // Create two users required for this scenario. + regularUser = stackOutputs.get("RegularUser"); + expressUser = stackOutputs.get("ExpressUser"); + + UserNames names = new UserNames(); + names.setRegularUserName(regularUser); + names.setExpressUserName(expressUser); + return names; + } + + /** + * Sets up a Virtual Private Cloud (VPC) in AWS. + *

+ * This method performs the following steps: + *

    + *
  1. Creates a VPC with a CIDR block of "10.0.0.0/16".
  2. + *
  3. Waits for the VPC to become available.
  4. + *
  5. Retrieves the route table associated with the VPC.
  6. + *
  7. Creates a VPC endpoint for the S3 service in the VPC.
  8. + *
+ *

+ * The method uses the AWS SDK for Java to interact with the EC2 service. + * + * @throws RuntimeException if the VPC wait fails + * @throws Ec2Exception if there is an error creating the VPC endpoint + */ + private static void setupVPC() { + /* + CIDR (Classless Inter-Domain Routing) is a notation used to + define IP address ranges in AWS VPC and EC2 networking. + It determines the network size and available IP addresses in a + given range. + */ + String cidr = "10.0.0.0/16"; + CreateVpcRequest vpcRequest = CreateVpcRequest.builder() + .cidrBlock(cidr) + .build(); + + CreateVpcResponse vpcResponse = getEC2Client().createVpc(vpcRequest); + vpcId = vpcResponse.vpc().vpcId(); + try (Ec2Waiter waiter = getEC2Client().waiter()) { + DescribeVpcsRequest request = DescribeVpcsRequest.builder() + .vpcIds(vpcId) + .build(); + + waiter.waitUntilVpcAvailable(request); + logger.info("Created VPC {}",vpcId); + } catch (Ec2Exception ex) { + throw new RuntimeException("VPC wait failed: " + ex.getMessage(), ex); + } + + try { + Filter filter = Filter.builder() + .name("vpc-id") + .values(vpcId) + .build(); + + DescribeRouteTablesRequest describeRouteTablesRequest = DescribeRouteTablesRequest.builder() + .filters(filter) + .build(); + + DescribeRouteTablesResponse routeTablesResponse = getEC2Client().describeRouteTables(describeRouteTablesRequest); + String routeTableId = routeTablesResponse.routeTables().get(0).routeTableId(); + Region region = getEC2Client().serviceClientConfiguration().region(); + String serviceName = String.format("com.amazonaws.%s.s3express", region.id()); + + CreateVpcEndpointRequest endpointRequest = CreateVpcEndpointRequest.builder() + .vpcId(vpcId) + .routeTableIds(routeTableId) + .serviceName(serviceName) + .build(); + + CreateVpcEndpointResponse vpcEndpointResponse = getEC2Client().createVpcEndpoint(endpointRequest); + vpcEndpointId = vpcEndpointResponse.vpcEndpoint().vpcEndpointId(); + + } catch (Ec2Exception ex) { + logger.error( + "Couldn't create the vpc endpoint. Here's why: %s", + ex.getCause() + ); + } + } + + /** + * Sets up the necessary clients and buckets for the S3 Express service. + * + * @param expressUserName the username for the user with S3 Express permissions + * @param regularUserName the username for the user with regular S3 permissions + */ + public static void setupClientsAndBuckets(String expressUserName, String regularUserName) { + Scanner locscanner = new Scanner(System.in); // Open the scanner here + CreateAccessKeyResponse keyResponse = createAccessKey(regularUserName); + String accessKeyIdforRegUser = keyResponse.accessKey().accessKeyId(); + String secretAccessforRegUser = keyResponse.accessKey().secretAccessKey(); + + CreateAccessKeyResponse keyResponseExpress = createAccessKey(expressUserName); + String accessKeyIdforExpressUser = keyResponseExpress.accessKey().accessKeyId(); + String secretAccessforExpressUser = keyResponseExpress.accessKey().secretAccessKey(); + + // Create an additional client using the credentials + // with S3 Express permissions. + logger.info(""" + 3. Create an additional client using the credentials with S3 Express permissions. This client is created with the + credentials associated with the user account with the S3 Express policy attached, so it can perform S3 Express operations. + """); + waitForInputToContinue(locscanner); + + // Populate the two S3 data member clients. + mS3RegularClient = createS3ClientWithAccessKeyAsync(accessKeyIdforRegUser, secretAccessforRegUser).join(); + mS3ExpressClient = createS3ClientWithAccessKeyAsync(accessKeyIdforExpressUser, secretAccessforExpressUser).join(); + logger.info(""" + All the roles and policies were created and attached to the user. Then a new S3 Client were created using + that user's credentials. We can now use this client to make calls to S3 Express operations. Keeping permissions in mind + (and adhering to least-privilege) is crucial to S3 Express. + """); + waitForInputToContinue(locscanner); + + logger.info(""" + 4. Create two buckets. + Now we will create a Directory bucket which is the linchpin of the S3 Express One Zone service. Directory buckets + behave in different ways from regular S3 buckets which we will explore here. We'll also create a normal bucket, put + an object into the normal bucket, and copy it over to the Directory bucket. + """); + + logger.info(""" + Now, let's choose an availability zone for the Directory bucket. We'll choose one + that is supported. + """); + selectAvailabilityZoneId(String.valueOf(Region.US_EAST_1)); + String regularBucketName = "reg-bucket-" + System.currentTimeMillis(); + + logger.info(""" + Now, let's create the actual Directory bucket, as well as a regular + bucket." + """); + String directoryBucketName = "test-bucket-" + System.currentTimeMillis() + "--usw2-az1--x-s3"; + String zone = "usw2-az1"; + s3DirectoriesActions.createDirectoryBucketAsync(mS3ExpressClient, directoryBucketName, zone).join(); + logger.info("Created directory bucket, " + directoryBucketName); + + // Assign to the data member. + mdirectoryBucketName = directoryBucketName; + + s3DirectoriesActions.createBucketAsync(mS3RegularClient, regularBucketName).join(); + logger.info("Created regular bucket, " + regularBucketName); + mregularBucketName = regularBucketName; + logger.info("Great! Both buckets were created."); + waitForInputToContinue(locscanner); + } + + /** + * Selects an availability zone ID based on the specified AWS region. + * + * @param region The AWS region to retrieve the availability zones from. + * @return A map containing the selected availability zone details, including the zone name, zone ID, region name, and state. + */ + public static Map selectAvailabilityZoneId(String region) { + Ec2Client ec2Client = Ec2Client.create(); + + // Define filter for region + Filter myFilter = Filter.builder() + .name("region-name") + .values(region) + .build(); + + // Request available zones + DescribeAvailabilityZonesRequest zonesRequest = DescribeAvailabilityZonesRequest.builder() + .filters(myFilter) + .build(); + DescribeAvailabilityZonesResponse response = ec2Client.describeAvailabilityZones(zonesRequest); + List zonesList = response.availabilityZones(); + + if (zonesList.isEmpty()) { + logger.info("No availability zones found."); + return null; + } + + // Extract zone names + List zoneNames = zonesList.stream() + .map(AvailabilityZone::zoneName) + .toList(); + + // Prompt user to select an availability zone + Scanner scanner = new Scanner(System.in); + int index = -1; + + while (index < 0 || index >= zoneNames.size()) { + logger.info("Select an availability zone:"); + IntStream.range(0, zoneNames.size()).forEach(i -> + System.out.println(i + ": " + zoneNames.get(i)) + ); + + logger.info("Enter the number corresponding to your choice: "); + if (scanner.hasNextInt()) { + index = scanner.nextInt(); + } else { + scanner.next(); // Consume invalid input + } + } + + AvailabilityZone selectedZone = zonesList.get(index); + logger.info("You selected: " + selectedZone.zoneName()); + + // Convert selected AvailabilityZone to a Map + Map selectedZoneMap = new HashMap<>(); + selectedZoneMap.put("ZoneName", selectedZone.zoneName()); + selectedZoneMap.put("ZoneId", selectedZone.zoneId()); + selectedZoneMap.put("RegionName", selectedZone.regionName()); + selectedZoneMap.put("State", selectedZone.stateAsString()); + return selectedZoneMap; + } + + /* + Creates an S3 client with access key credentials. + :param access_key: The access key for the user. + :return: The S3 Express One Zone client. + + */ + private static CompletableFuture createS3ClientWithAccessKeyAsync(String accessKeyId, String secretAccessKey) { + return CompletableFuture.supplyAsync(() -> { + AwsBasicCredentials awsCredentials = AwsBasicCredentials.create(accessKeyId, secretAccessKey); + return S3AsyncClient.builder() + .credentialsProvider(StaticCredentialsProvider.create(awsCredentials)) + .region(Region.US_WEST_2) + .build(); + }); + } + + + private static CreateAccessKeyResponse createAccessKey(String userName) { + CreateAccessKeyRequest request = CreateAccessKeyRequest.builder() + .userName(userName) + .build(); + + try { + return getIAMClient().createAccessKey(request); + + } catch (IamException e) { + System.err.println(e.awsErrorDetails().errorMessage()); + System.exit(1); + } + return null; + } + + + private static void waitForInputToContinue(Scanner scanner) { + while (true) { + logger.info(""); + logger.info("Enter 'c' followed by to continue:"); + String input = scanner.nextLine(); + + if (input.trim().equalsIgnoreCase("c")) { + logger.info("Continuing with the program..."); + logger.info(""); + break; + } else { + logger.info("Invalid input. Please try again."); + } + } + } + + private static IamClient getIAMClient() { + return IamClient.builder() + .region(Region.US_EAST_1) + .build(); + } + + private static Ec2Client getEC2Client() { + return Ec2Client.builder() + .region(Region.US_EAST_1) + .build(); + } +} diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/express/UserNames.java b/javav2/example_code/s3/src/main/java/com/example/s3/express/UserNames.java new file mode 100644 index 00000000000..dfe5c70af74 --- /dev/null +++ b/javav2/example_code/s3/src/main/java/com/example/s3/express/UserNames.java @@ -0,0 +1,17 @@ +package com.example.s3.express; + +public class UserNames { + private String expressUserName; + private String regularUserName; + + public String getExpressUserName() { return expressUserName; } + + public void setExpressUserName(String expressUserName) { + this.expressUserName = expressUserName; + } + + public void setRegularUserName(String regularUserName) { + this.regularUserName = regularUserName; + } + public String getRegularUserName() { return regularUserName; } +} From 56c3215470066975a33f686a3f5311907d01dcc1 Mon Sep 17 00:00:00 2001 From: scmacdon Date: Tue, 4 Mar 2025 12:52:01 -0500 Subject: [PATCH 02/10] added S3 Express Scenario --- javav2/example_code/s3/pom.xml | 4 + .../DeleteDirectoryBucketObjects.java | 5 +- .../s3/express/S3DirectoriesActions.java | 261 +++++++++++++- .../s3/express/S3DirectoriesScenario.java | 334 ++++++++---------- .../s3/src/main/resources/log4j2.xml | 2 +- .../main/resources/s3_express_template.yaml | 51 +++ 6 files changed, 460 insertions(+), 197 deletions(-) create mode 100644 javav2/example_code/s3/src/main/resources/s3_express_template.yaml diff --git a/javav2/example_code/s3/pom.xml b/javav2/example_code/s3/pom.xml index 957e73c9091..7c41c1230bb 100644 --- a/javav2/example_code/s3/pom.xml +++ b/javav2/example_code/s3/pom.xml @@ -157,6 +157,10 @@ software.amazon.awssdk iam + + software.amazon.awssdk + ec2 + org.apache.logging.log4j log4j-core diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/DeleteDirectoryBucketObjects.java b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/DeleteDirectoryBucketObjects.java index ab5bf43b54b..917c6ea264f 100644 --- a/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/DeleteDirectoryBucketObjects.java +++ b/javav2/example_code/s3/src/main/java/com/example/s3/directorybucket/DeleteDirectoryBucketObjects.java @@ -69,23 +69,20 @@ public static void deleteDirectoryBucketObjects(S3Client s3Client, String bucket logger.info("Deleting objects from bucket: {}", bucketName); try { - // Create a list of ObjectIdentifier + // Create a list of ObjectIdentifier. List identifiers = objectKeys.stream() .map(key -> ObjectIdentifier.builder().key(key).build()) .toList(); - // Create a Delete object Delete delete = Delete.builder() .objects(identifiers) .build(); - // Create a DeleteObjectsRequest DeleteObjectsRequest deleteObjectsRequest = DeleteObjectsRequest.builder() .bucket(bucketName) .delete(delete) .build(); - // Delete the objects DeleteObjectsResponse deleteObjectsResponse = s3Client.deleteObjects(deleteObjectsRequest); deleteObjectsResponse.deleted().forEach(deleted -> logger.info("Deleted object: {}", deleted.key())); diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesActions.java b/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesActions.java index c8539b5ce09..333f642151b 100644 --- a/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesActions.java +++ b/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesActions.java @@ -7,7 +7,26 @@ import org.slf4j.LoggerFactory; import software.amazon.awssdk.core.ResponseBytes; import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; +import software.amazon.awssdk.core.retry.RetryMode; import software.amazon.awssdk.core.waiters.WaiterResponse; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.ec2.Ec2AsyncClient; +import software.amazon.awssdk.services.ec2.model.AvailabilityZone; +import software.amazon.awssdk.services.ec2.model.CreateVpcEndpointRequest; +import software.amazon.awssdk.services.ec2.model.CreateVpcRequest; +import software.amazon.awssdk.services.ec2.model.DescribeAvailabilityZonesRequest; +import software.amazon.awssdk.services.ec2.model.DescribeRouteTablesRequest; +import software.amazon.awssdk.services.ec2.model.DescribeVpcsRequest; +import software.amazon.awssdk.services.ec2.model.Ec2Exception; +import software.amazon.awssdk.services.ec2.model.Filter; +import software.amazon.awssdk.services.ec2.waiters.Ec2AsyncWaiter; +import software.amazon.awssdk.services.iam.IamAsyncClient; +import software.amazon.awssdk.services.iam.model.CreateAccessKeyRequest; +import software.amazon.awssdk.services.iam.model.CreateAccessKeyResponse; +import software.amazon.awssdk.services.iam.model.IamException; import software.amazon.awssdk.services.s3.S3AsyncClient; import software.amazon.awssdk.services.s3.model.BucketInfo; import software.amazon.awssdk.services.s3.model.BucketType; @@ -33,15 +52,68 @@ import software.amazon.awssdk.services.s3.model.S3Exception; import software.amazon.awssdk.services.s3.waiters.S3AsyncWaiter; import software.amazon.awssdk.core.async.AsyncResponseTransformer; - +import java.time.Duration; +import java.util.AbstractMap; import java.util.List; +import java.util.Scanner; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionException; import java.util.stream.Collectors; +import java.util.stream.IntStream; public class S3DirectoriesActions { + + private static IamAsyncClient iamAsyncClient; + + private static Ec2AsyncClient ec2AsyncClient; private static final Logger logger = LoggerFactory.getLogger(S3DirectoriesActions.class); + private static IamAsyncClient getIAMAsyncClient() { + if (iamAsyncClient == null) { + SdkAsyncHttpClient httpClient = NettyNioAsyncHttpClient.builder() + .maxConcurrency(100) + .connectionTimeout(Duration.ofSeconds(60)) + .readTimeout(Duration.ofSeconds(60)) + .writeTimeout(Duration.ofSeconds(60)) + .build(); + + ClientOverrideConfiguration overrideConfig = ClientOverrideConfiguration.builder() + .apiCallTimeout(Duration.ofMinutes(2)) + .apiCallAttemptTimeout(Duration.ofSeconds(90)) + .retryStrategy(RetryMode.STANDARD) + .build(); + + iamAsyncClient = IamAsyncClient.builder() + .httpClient(httpClient) + .overrideConfiguration(overrideConfig) + .build(); + } + return iamAsyncClient; + } + + private static Ec2AsyncClient getEc2AsyncClient() { + if (ec2AsyncClient == null) { + SdkAsyncHttpClient httpClient = NettyNioAsyncHttpClient.builder() + .maxConcurrency(100) + .connectionTimeout(Duration.ofSeconds(60)) + .readTimeout(Duration.ofSeconds(60)) + .writeTimeout(Duration.ofSeconds(60)) + .build(); + + ClientOverrideConfiguration overrideConfig = ClientOverrideConfiguration.builder() + .apiCallTimeout(Duration.ofMinutes(2)) + .apiCallAttemptTimeout(Duration.ofSeconds(90)) + .retryStrategy(RetryMode.STANDARD) + .build(); + + ec2AsyncClient = Ec2AsyncClient.builder() + .httpClient(httpClient) + .region(Region.US_WEST_2) + .overrideConfiguration(overrideConfig) + .build(); + } + return ec2AsyncClient; + } /** * Deletes the specified S3 bucket and all the objects within it in an asynchronous manner. @@ -102,7 +174,6 @@ public CompletableFuture> deleteBucketAndObje }); } - /** * Lists the objects in an S3 bucket asynchronously using the AWS SDK. * @@ -127,7 +198,6 @@ public CompletableFuture> listObjectsAsync(S3AsyncClient s3Client, } public CompletableFuture> getObjectAsync(S3AsyncClient s3Client, String bucketName, String keyName) { - // Create the GetObjectRequest for the asynchronous client GetObjectRequest objectRequest = GetObjectRequest.builder() .key(keyName) .bucket(bucketName) @@ -213,7 +283,7 @@ public CompletableFuture createSessionAsync(S3AsyncClient s3Client, String * @param zone The region where the bucket will be created * @throws S3Exception if there's an error creating the bucket */ - public static CompletableFuture createDirectoryBucketAsync(S3AsyncClient s3Client, String bucketName, String zone) { + public CompletableFuture createDirectoryBucketAsync(S3AsyncClient s3Client, String bucketName, String zone) { logger.info("Creating bucket: " + bucketName); CreateBucketConfiguration bucketConfiguration = CreateBucketConfiguration.builder() @@ -295,4 +365,187 @@ public CompletableFuture putObjectAsync(S3AsyncClient s3Clien } }); } + + /** + * Creates an AWS IAM access key asynchronously for the specified user name. + * + * @param userName the name of the IAM user for whom to create the access key + * @return a {@link CompletableFuture} that completes with the {@link CreateAccessKeyResponse} containing the created access key + */ + public CompletableFuture createAccessKeyAsync(String userName) { + CreateAccessKeyRequest request = CreateAccessKeyRequest.builder() + .userName(userName) + .build(); + + return getIAMAsyncClient().createAccessKey(request) + .whenComplete((response, exception) -> { + if (response != null) { + logger.info("Access Key Created."); + } else { + if (exception == null) { + throw new CompletionException("An unknown error occurred while creating access key.", null); + } + + Throwable cause = exception.getCause(); + if (cause instanceof IamException) { + throw new CompletionException("IAM error while creating access key: " + cause.getMessage(), cause); + } + + throw new CompletionException("Failed to create access key: " + exception.getMessage(), exception); + } + }); + } + + /** + * Selects an availability zone ID based on the specified AWS region. + * + * @return A map containing the selected availability zone details, including the zone name, zone ID, region name, and state. + */ + public CompletableFuture selectAvailabilityZoneIdAsync() { + // Request available zones + DescribeAvailabilityZonesRequest zonesRequest = DescribeAvailabilityZonesRequest.builder() + .build(); + + return getEc2AsyncClient().describeAvailabilityZones(zonesRequest) + .thenCompose(response -> { + List zonesList = response.availabilityZones(); + + if (zonesList.isEmpty()) { + logger.info("No availability zones found."); + return CompletableFuture.completedFuture(null); // Return null if no zones are found + } + + // Extract zone IDs + List zoneIds = zonesList.stream() + .map(AvailabilityZone::zoneId) // Get the zoneId (e.g., "usw2-az1") + .toList(); + + // **Prompt user synchronously** and return CompletableFuture + return CompletableFuture.supplyAsync(() -> promptUserForZoneSelection(zonesList, zoneIds)) + .thenApply(selectedZone -> { + // Return only the selected Zone ID (e.g., "usw2-az1") + return selectedZone.zoneId(); + }); + }) + .whenComplete((result, exception) -> { + if (exception == null) { + if (result != null) { + logger.info("Selected Availability Zone ID: " + result); + } else { + logger.info("No availability zone selected."); + } + } else { + Throwable cause = exception.getCause(); + if (cause instanceof Ec2Exception) { + throw new CompletionException("EC2 error while selecting availability zone: " + cause.getMessage(), cause); + } + throw new CompletionException("Failed to select availability zone: " + exception.getMessage(), exception); + } + }); + } + + /** + * Prompts the user to select an availability zone from the given list. + * + * @param zonesList the list of availability zones + * @param zoneIds the list of zone IDs + * @return the selected AvailabilityZone + */ + private static AvailabilityZone promptUserForZoneSelection(List zonesList, List zoneIds) { + Scanner scanner = new Scanner(System.in); + int index = -1; + + while (index < 0 || index >= zoneIds.size()) { + logger.info("Select an availability zone:"); + IntStream.range(0, zoneIds.size()).forEach(i -> + System.out.println(i + ": " + zoneIds.get(i)) // Display Zone IDs + ); + + logger.info("Enter the number corresponding to your choice: "); + if (scanner.hasNextInt()) { + index = scanner.nextInt(); + } else { + scanner.next(); // Consume invalid input + } + } + + AvailabilityZone selectedZone = zonesList.get(index); + logger.info("You selected: " + selectedZone.zoneId()); // Log Zone ID + return selectedZone; + } + public CompletableFuture setupVPCAsync() { + String cidr = "10.0.0.0/16"; + CreateVpcRequest vpcRequest = CreateVpcRequest.builder() + .cidrBlock(cidr) + .build(); + + return getEc2AsyncClient().createVpc(vpcRequest) + .thenCompose(vpcResponse -> { + String vpcId = vpcResponse.vpc().vpcId(); + + // Wait for VPC to be available + Ec2AsyncWaiter waiter = ec2AsyncClient.waiter(); + DescribeVpcsRequest request = DescribeVpcsRequest.builder() + .vpcIds(vpcId) + .build(); + + return waiter.waitUntilVpcAvailable(request) + .thenApply(waiterResponse -> vpcId); + }) + .thenCompose(vpcId -> { + // Fetch route table for VPC + Filter filter = Filter.builder() + .name("vpc-id") + .values(vpcId) + .build(); + + DescribeRouteTablesRequest describeRouteTablesRequest = DescribeRouteTablesRequest.builder() + .filters(filter) + .build(); + + return ec2AsyncClient.describeRouteTables(describeRouteTablesRequest) + .thenApply(routeTablesResponse -> { + if (routeTablesResponse.routeTables().isEmpty()) { + throw new CompletionException("No route tables found for VPC.", null); + } + return new AbstractMap.SimpleEntry<>(vpcId, routeTablesResponse.routeTables().get(0).routeTableId()); + }); + }) + .thenCompose(vpcAndRouteTable -> { + String vpcId = vpcAndRouteTable.getKey(); + String routeTableId = vpcAndRouteTable.getValue(); + Region region = ec2AsyncClient.serviceClientConfiguration().region(); + String serviceName = String.format("com.amazonaws.%s.s3express", region.id()); + + CreateVpcEndpointRequest endpointRequest = CreateVpcEndpointRequest.builder() + .vpcId(vpcId) + .routeTableIds(routeTableId) + .serviceName(serviceName) + .build(); + + return ec2AsyncClient.createVpcEndpoint(endpointRequest) + .thenApply(vpcEndpointResponse -> { + String vpcEndpointId = vpcEndpointResponse.vpcEndpoint().vpcEndpointId(); + return new AbstractMap.SimpleEntry<>(vpcId, vpcEndpointId); + }); + }) + .whenComplete((result, exception) -> { + if (result != null) { + logger.info("Created VPC: {}", result.getKey()); + logger.info("Created VPC Endpoint: {}", result.getValue()); + } else { + if (exception == null) { + throw new CompletionException("An unknown error occurred during VPC setup.", null); + } + + Throwable cause = exception.getCause(); + if (cause instanceof Ec2Exception) { + throw new CompletionException("EC2 error during VPC setup: " + cause.getMessage(), cause); + } + + throw new CompletionException("VPC setup failed: " + exception.getMessage(), exception); + } + }) + .thenAccept(v -> {}); // Ensure CompletableFuture return type + } } \ No newline at end of file diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesScenario.java b/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesScenario.java index 4621ed5f1a8..727adfa6048 100644 --- a/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesScenario.java +++ b/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesScenario.java @@ -22,11 +22,13 @@ import software.amazon.awssdk.services.ec2.model.Ec2Exception; import software.amazon.awssdk.services.ec2.model.Filter; import software.amazon.awssdk.services.ec2.waiters.Ec2Waiter; +import software.amazon.awssdk.services.iam.IamAsyncClient; import software.amazon.awssdk.services.iam.IamClient; import software.amazon.awssdk.services.iam.model.CreateAccessKeyRequest; import software.amazon.awssdk.services.iam.model.CreateAccessKeyResponse; import software.amazon.awssdk.services.iam.model.IamException; import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.model.S3Exception; import java.util.HashMap; import java.util.List; @@ -47,8 +49,6 @@ public class S3DirectoriesScenario { private static S3AsyncClient mS3RegularClient; private static S3AsyncClient mS3ExpressClient; - private IamClient iam; - private static String mdirectoryBucketName; private static String mregularBucketName; @@ -60,10 +60,14 @@ public class S3DirectoriesScenario { private static String vpcEndpointId = ""; - private static S3DirectoriesActions s3DirectoriesActions = new S3DirectoriesActions(); + private static final S3DirectoriesActions s3DirectoriesActions = new S3DirectoriesActions(); public static void main(String[] args) { - s3ExpressScenario(); + try { + s3ExpressScenario(); + } catch (RuntimeException e) { + logger.info(e.getMessage()); + } } // Runs the scenario. @@ -113,14 +117,23 @@ private static void s3ExpressScenario() { Delete resources created by this scenario. */ private static void cleanUp() { - if (mdirectoryBucketName != null) { - s3DirectoriesActions.deleteBucketAndObjectsAsync(mS3ExpressClient, mdirectoryBucketName).join(); - } - logger.info("Deleted directory bucket " + mdirectoryBucketName); - mdirectoryBucketName = null; + try { + if (mdirectoryBucketName != null) { + s3DirectoriesActions.deleteBucketAndObjectsAsync(mS3ExpressClient, mdirectoryBucketName).join(); + } + logger.info("Deleted directory bucket " + mdirectoryBucketName); + mdirectoryBucketName = null; - if (mregularBucketName != null) { - s3DirectoriesActions.deleteBucketAndObjectsAsync(mS3RegularClient, mregularBucketName).join(); + if (mregularBucketName != null) { + s3DirectoriesActions.deleteBucketAndObjectsAsync(mS3RegularClient, mregularBucketName).join(); + } + } catch (CompletionException ce) { + Throwable cause = ce.getCause(); + if (cause instanceof S3Exception) { + logger.error("S3Exception occurred: {}", cause.getMessage(), ce); + } else { + logger.error("An unexpected error occurred: {}", cause.getMessage(), ce); + } } logger.info("Deleted regular bucket " + mregularBucketName); @@ -277,8 +290,16 @@ private static void demonstratePerformance(String bucketObject) { logger.info("Download " + index + " of " + downloads); } - // Get the object from the normal bucket. - s3DirectoriesActions.getObjectAsync(mS3RegularClient, mregularBucketName, bucketObject).join(); + try { + s3DirectoriesActions.getObjectAsync(mS3RegularClient, mregularBucketName, bucketObject).join(); + } catch (CompletionException ce) { + Throwable cause = ce.getCause(); + if (cause instanceof S3Exception) { + logger.error("S3Exception occurred: {}", cause.getMessage(), ce); + } else { + logger.error("An unexpected error occurred: {}", cause.getMessage(), ce); + } + } } long normalTimeDifference = System.nanoTime() - normalTimeStart; @@ -304,12 +325,20 @@ private static String createSessionAddObjects() { waitForInputToContinue(scanner); String bucketObject = "basic-text-object.txt"; - s3DirectoriesActions.putObjectAsync(mS3RegularClient, mregularBucketName, bucketObject, "Look Ma, I'm a bucket!").join(); - s3DirectoriesActions.createSessionAsync(mS3ExpressClient, mdirectoryBucketName).join(); - - // Copy the object to the destination S3 bucket. - s3DirectoriesActions.copyObjectAsync(mS3ExpressClient, mregularBucketName, bucketObject, mdirectoryBucketName, bucketObject).join(); - + try { + s3DirectoriesActions.putObjectAsync(mS3RegularClient, mregularBucketName, bucketObject, "Look Ma, I'm a bucket!").join(); + s3DirectoriesActions.createSessionAsync(mS3ExpressClient, mdirectoryBucketName).join(); + + // Copy the object to the destination S3 bucket. + s3DirectoriesActions.copyObjectAsync(mS3ExpressClient, mregularBucketName, bucketObject, mdirectoryBucketName, bucketObject).join(); + } catch (CompletionException ce) { + Throwable cause = ce.getCause(); + if (cause instanceof S3Exception) { + logger.error("S3Exception occurred: {}", cause.getMessage(), ce); + } else { + logger.error("An unexpected error occurred: {}", cause.getMessage(), ce); + } + } logger.info(""" It worked! It's important to remember the user permissions when interacting with Directory buckets. Instead of validating permissions on every call as @@ -358,10 +387,18 @@ public static UserNames createVpcUsers() { String endpointAns = scanner.nextLine().trim(); if (endpointAns.equalsIgnoreCase("y")) { logger.info(""" - "Great! Let's set up a VPC, retrieve the Route Table from it, and create a VPC Endpoint to connect the S3 Client to." + Great! Let's set up a VPC, retrieve the Route Table from it, and create a VPC Endpoint to connect the S3 Client to. """); - - setupVPC(); + try { + s3DirectoriesActions.setupVPCAsync().join(); + } catch (CompletionException ce) { + Throwable cause = ce.getCause(); + if (cause instanceof Ec2Exception) { + logger.error("IamException occurred: {}", cause.getMessage(), ce); + } else { + logger.error("An unexpected error occurred: {}", cause.getMessage(), ce); + } + } waitForInputToContinue(scanner); } else { logger.info("Skipping the VPC setup. Don't forget to use this in production!"); @@ -403,62 +440,7 @@ public static UserNames createVpcUsers() { * @throws RuntimeException if the VPC wait fails * @throws Ec2Exception if there is an error creating the VPC endpoint */ - private static void setupVPC() { - /* - CIDR (Classless Inter-Domain Routing) is a notation used to - define IP address ranges in AWS VPC and EC2 networking. - It determines the network size and available IP addresses in a - given range. - */ - String cidr = "10.0.0.0/16"; - CreateVpcRequest vpcRequest = CreateVpcRequest.builder() - .cidrBlock(cidr) - .build(); - - CreateVpcResponse vpcResponse = getEC2Client().createVpc(vpcRequest); - vpcId = vpcResponse.vpc().vpcId(); - try (Ec2Waiter waiter = getEC2Client().waiter()) { - DescribeVpcsRequest request = DescribeVpcsRequest.builder() - .vpcIds(vpcId) - .build(); - - waiter.waitUntilVpcAvailable(request); - logger.info("Created VPC {}",vpcId); - } catch (Ec2Exception ex) { - throw new RuntimeException("VPC wait failed: " + ex.getMessage(), ex); - } - - try { - Filter filter = Filter.builder() - .name("vpc-id") - .values(vpcId) - .build(); - - DescribeRouteTablesRequest describeRouteTablesRequest = DescribeRouteTablesRequest.builder() - .filters(filter) - .build(); - - DescribeRouteTablesResponse routeTablesResponse = getEC2Client().describeRouteTables(describeRouteTablesRequest); - String routeTableId = routeTablesResponse.routeTables().get(0).routeTableId(); - Region region = getEC2Client().serviceClientConfiguration().region(); - String serviceName = String.format("com.amazonaws.%s.s3express", region.id()); - - CreateVpcEndpointRequest endpointRequest = CreateVpcEndpointRequest.builder() - .vpcId(vpcId) - .routeTableIds(routeTableId) - .serviceName(serviceName) - .build(); - CreateVpcEndpointResponse vpcEndpointResponse = getEC2Client().createVpcEndpoint(endpointRequest); - vpcEndpointId = vpcEndpointResponse.vpcEndpoint().vpcEndpointId(); - - } catch (Ec2Exception ex) { - logger.error( - "Couldn't create the vpc endpoint. Here's why: %s", - ex.getCause() - ); - } - } /** * Sets up the necessary clients and buckets for the S3 Express service. @@ -467,14 +449,39 @@ private static void setupVPC() { * @param regularUserName the username for the user with regular S3 permissions */ public static void setupClientsAndBuckets(String expressUserName, String regularUserName) { - Scanner locscanner = new Scanner(System.in); // Open the scanner here - CreateAccessKeyResponse keyResponse = createAccessKey(regularUserName); - String accessKeyIdforRegUser = keyResponse.accessKey().accessKeyId(); - String secretAccessforRegUser = keyResponse.accessKey().secretAccessKey(); + Scanner locscanner = new Scanner(System.in); + String accessKeyIdforRegUser; + String secretAccessforRegUser; + try { + CreateAccessKeyResponse keyResponse = s3DirectoriesActions.createAccessKeyAsync(regularUserName).join(); + accessKeyIdforRegUser = keyResponse.accessKey().accessKeyId(); + secretAccessforRegUser = keyResponse.accessKey().secretAccessKey(); + } catch (CompletionException ce) { + Throwable cause = ce.getCause(); + if (cause instanceof IamException) { + logger.error("IamException occurred: {}", cause.getMessage(), ce); + } else { + logger.error("An unexpected error occurred: {}", cause.getMessage(), ce); + } + return; + } + + String accessKeyIdforExpressUser; + String secretAccessforExpressUser; + try { + CreateAccessKeyResponse keyResponseExpress = s3DirectoriesActions.createAccessKeyAsync(expressUserName).join(); + accessKeyIdforExpressUser = keyResponseExpress.accessKey().accessKeyId(); + secretAccessforExpressUser = keyResponseExpress.accessKey().secretAccessKey(); + } catch (CompletionException ce) { + Throwable cause = ce.getCause(); + if (cause instanceof IamException) { + logger.error("IamException occurred: {}", cause.getMessage(), ce); + } else { + logger.error("An unexpected error occurred: {}", cause.getMessage(), ce); + } + return; + } - CreateAccessKeyResponse keyResponseExpress = createAccessKey(expressUserName); - String accessKeyIdforExpressUser = keyResponseExpress.accessKey().accessKeyId(); - String secretAccessforExpressUser = keyResponseExpress.accessKey().secretAccessKey(); // Create an additional client using the credentials // with S3 Express permissions. @@ -485,13 +492,24 @@ public static void setupClientsAndBuckets(String expressUserName, String regular waitForInputToContinue(locscanner); // Populate the two S3 data member clients. - mS3RegularClient = createS3ClientWithAccessKeyAsync(accessKeyIdforRegUser, secretAccessforRegUser).join(); - mS3ExpressClient = createS3ClientWithAccessKeyAsync(accessKeyIdforExpressUser, secretAccessforExpressUser).join(); + try { + mS3RegularClient = createS3ClientWithAccessKeyAsync(accessKeyIdforRegUser, secretAccessforRegUser).join(); + mS3ExpressClient = createS3ClientWithAccessKeyAsync(accessKeyIdforExpressUser, secretAccessforExpressUser).join(); + } catch (CompletionException ce) { + Throwable cause = ce.getCause(); + if (cause instanceof S3Exception) { + logger.error("S3Exception occurred: {}", cause.getMessage(), ce); + } else { + logger.error("An unexpected error occurred: {}", cause.getMessage(), ce); + } + return; + } + logger.info(""" All the roles and policies were created and attached to the user. Then a new S3 Client were created using that user's credentials. We can now use this client to make calls to S3 Express operations. Keeping permissions in mind (and adhering to least-privilege) is crucial to S3 Express. - """); + """); waitForInputToContinue(locscanner); logger.info(""" @@ -505,88 +523,57 @@ public static void setupClientsAndBuckets(String expressUserName, String regular Now, let's choose an availability zone for the Directory bucket. We'll choose one that is supported. """); - selectAvailabilityZoneId(String.valueOf(Region.US_EAST_1)); - String regularBucketName = "reg-bucket-" + System.currentTimeMillis(); - + String zoneId; + String regularBucketName; + try { + zoneId = s3DirectoriesActions.selectAvailabilityZoneIdAsync().join(); + regularBucketName = "reg-bucket-" + System.currentTimeMillis(); + } catch (CompletionException ce) { + Throwable cause = ce.getCause(); + if (cause instanceof Ec2Exception) { + logger.error("EC2Exception occurred: {}", cause.getMessage(), ce); + } else { + logger.error("An unexpected error occurred: {}", cause.getMessage(), ce); + } + return; + } logger.info(""" Now, let's create the actual Directory bucket, as well as a regular bucket." """); - String directoryBucketName = "test-bucket-" + System.currentTimeMillis() + "--usw2-az1--x-s3"; - String zone = "usw2-az1"; - s3DirectoriesActions.createDirectoryBucketAsync(mS3ExpressClient, directoryBucketName, zone).join(); - logger.info("Created directory bucket, " + directoryBucketName); - - // Assign to the data member. - mdirectoryBucketName = directoryBucketName; - - s3DirectoriesActions.createBucketAsync(mS3RegularClient, regularBucketName).join(); - logger.info("Created regular bucket, " + regularBucketName); - mregularBucketName = regularBucketName; - logger.info("Great! Both buckets were created."); - waitForInputToContinue(locscanner); - } - /** - * Selects an availability zone ID based on the specified AWS region. - * - * @param region The AWS region to retrieve the availability zones from. - * @return A map containing the selected availability zone details, including the zone name, zone ID, region name, and state. - */ - public static Map selectAvailabilityZoneId(String region) { - Ec2Client ec2Client = Ec2Client.create(); - - // Define filter for region - Filter myFilter = Filter.builder() - .name("region-name") - .values(region) - .build(); - - // Request available zones - DescribeAvailabilityZonesRequest zonesRequest = DescribeAvailabilityZonesRequest.builder() - .filters(myFilter) - .build(); - DescribeAvailabilityZonesResponse response = ec2Client.describeAvailabilityZones(zonesRequest); - List zonesList = response.availabilityZones(); - - if (zonesList.isEmpty()) { - logger.info("No availability zones found."); - return null; + try { + String directoryBucketName = "test-bucket-" + System.currentTimeMillis() + "--" + zoneId + "--x-s3"; + s3DirectoriesActions.createDirectoryBucketAsync(mS3ExpressClient, directoryBucketName, zoneId).join(); + logger.info("Created directory bucket {}", directoryBucketName); + + // Assign to the data member. + mdirectoryBucketName = directoryBucketName; + } catch (CompletionException ce) { + Throwable cause = ce.getCause(); + if (cause instanceof S3Exception) { + logger.error("S3Exception occurred: {}", cause.getMessage(), ce); + } else { + logger.error("An unexpected error occurred: {}", cause.getMessage(), ce); + } + return; } - // Extract zone names - List zoneNames = zonesList.stream() - .map(AvailabilityZone::zoneName) - .toList(); - - // Prompt user to select an availability zone - Scanner scanner = new Scanner(System.in); - int index = -1; - - while (index < 0 || index >= zoneNames.size()) { - logger.info("Select an availability zone:"); - IntStream.range(0, zoneNames.size()).forEach(i -> - System.out.println(i + ": " + zoneNames.get(i)) - ); - - logger.info("Enter the number corresponding to your choice: "); - if (scanner.hasNextInt()) { - index = scanner.nextInt(); + try { + s3DirectoriesActions.createBucketAsync(mS3RegularClient, regularBucketName).join(); + logger.info("Created regular bucket {} ", regularBucketName); + mregularBucketName = regularBucketName; + } catch (CompletionException ce) { + Throwable cause = ce.getCause(); + if (cause instanceof S3Exception) { + logger.error("S3Exception occurred: {}", cause.getMessage(), ce); } else { - scanner.next(); // Consume invalid input + logger.error("An unexpected error occurred: {}", cause.getMessage(), ce); } + return; } - - AvailabilityZone selectedZone = zonesList.get(index); - logger.info("You selected: " + selectedZone.zoneName()); - - // Convert selected AvailabilityZone to a Map - Map selectedZoneMap = new HashMap<>(); - selectedZoneMap.put("ZoneName", selectedZone.zoneName()); - selectedZoneMap.put("ZoneId", selectedZone.zoneId()); - selectedZoneMap.put("RegionName", selectedZone.regionName()); - selectedZoneMap.put("State", selectedZone.stateAsString()); - return selectedZoneMap; + logger.info("Great! Both buckets were created."); + waitForInputToContinue(locscanner); } /* @@ -605,23 +592,6 @@ private static CompletableFuture createS3ClientWithAccessKeyAsync }); } - - private static CreateAccessKeyResponse createAccessKey(String userName) { - CreateAccessKeyRequest request = CreateAccessKeyRequest.builder() - .userName(userName) - .build(); - - try { - return getIAMClient().createAccessKey(request); - - } catch (IamException e) { - System.err.println(e.awsErrorDetails().errorMessage()); - System.exit(1); - } - return null; - } - - private static void waitForInputToContinue(Scanner scanner) { while (true) { logger.info(""); @@ -637,16 +607,4 @@ private static void waitForInputToContinue(Scanner scanner) { } } } - - private static IamClient getIAMClient() { - return IamClient.builder() - .region(Region.US_EAST_1) - .build(); - } - - private static Ec2Client getEC2Client() { - return Ec2Client.builder() - .region(Region.US_EAST_1) - .build(); - } } diff --git a/javav2/example_code/s3/src/main/resources/log4j2.xml b/javav2/example_code/s3/src/main/resources/log4j2.xml index 2329c9d3615..32a31484ec9 100644 --- a/javav2/example_code/s3/src/main/resources/log4j2.xml +++ b/javav2/example_code/s3/src/main/resources/log4j2.xml @@ -1,7 +1,7 @@ - + diff --git a/javav2/example_code/s3/src/main/resources/s3_express_template.yaml b/javav2/example_code/s3/src/main/resources/s3_express_template.yaml new file mode 100644 index 00000000000..be510ae8212 --- /dev/null +++ b/javav2/example_code/s3/src/main/resources/s3_express_template.yaml @@ -0,0 +1,51 @@ +Resources: + RegularUser: + Type: AWS::IAM::User + ExpressUser: + Type: AWS::IAM::User + ExpressPolicy: + Type: AWS::IAM::Policy + Properties: + PolicyName: AllowExpressOperations + PolicyDocument: + Statement: + - Effect: Allow + Action: + - "s3express:CreateBucket" + - "s3express:CreateSession" + - "s3express:CopyObject" + - "s3express:GetObject" + - "s3express:PutObject" + - "s3express:ListObjects" + - "s3express:DeleteObjects" + - "s3express:DeleteObject" + - "s3express:DeleteBucket" + - "s3:GetObject" + - "s3:CopyObject" + Resource: "*" + Users: + - !Ref ExpressUser + RegularPolicy: + Type: AWS::IAM::Policy + Properties: + PolicyName: AllowRegularOperations + PolicyDocument: + Statement: + - Effect: Allow + Action: + - "s3:CreateBucket" + - "s3:PutObject" + - "s3:GetObject" + - "S3:ListObjects" + - "S3:DeleteObjects" + - "S3:DeleteObject" + - "s3:ListBucket" + - "s3:DeleteBucket" + Resource: "*" + Users: + - !Ref RegularUser +Outputs: + RegularUser: + Value: !Ref RegularUser + ExpressUser: + Value: !Ref ExpressUser From 2adfc16a9602a65898efdee6543350803e009c28 Mon Sep 17 00:00:00 2001 From: scmacdon Date: Tue, 4 Mar 2025 13:14:50 -0500 Subject: [PATCH 03/10] added S3 Express Scenario --- .../s3/express/S3DirectoriesScenario.java | 32 ------------------- 1 file changed, 32 deletions(-) diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesScenario.java b/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesScenario.java index 727adfa6048..a86057a409f 100644 --- a/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesScenario.java +++ b/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesScenario.java @@ -123,7 +123,6 @@ private static void cleanUp() { } logger.info("Deleted directory bucket " + mdirectoryBucketName); mdirectoryBucketName = null; - if (mregularBucketName != null) { s3DirectoriesActions.deleteBucketAndObjectsAsync(mS3RegularClient, mregularBucketName).join(); } @@ -351,17 +350,6 @@ private static String createSessionAddObjects() { return bucketObject; } - /** - * Creates VPC users for the S3 Express One Zone scenario. - *

- * This method performs the following steps: - *

    - *
  1. Optionally creates a new VPC and VPC Endpoint if the application is running in an EC2 instance in the same Availability Zone as the Directory buckets.
  2. - *
  3. Creates two IAM users: one with S3 Express One Zone permissions and one without.
  4. - *
- * - * @return a {@link UserNames} object containing the names of the created IAM users - */ /** * Creates VPC users for the S3 Express One Zone scenario. *

@@ -424,24 +412,6 @@ public static UserNames createVpcUsers() { return names; } - /** - * Sets up a Virtual Private Cloud (VPC) in AWS. - *

- * This method performs the following steps: - *

    - *
  1. Creates a VPC with a CIDR block of "10.0.0.0/16".
  2. - *
  3. Waits for the VPC to become available.
  4. - *
  5. Retrieves the route table associated with the VPC.
  6. - *
  7. Creates a VPC endpoint for the S3 service in the VPC.
  8. - *
- *

- * The method uses the AWS SDK for Java to interact with the EC2 service. - * - * @throws RuntimeException if the VPC wait fails - * @throws Ec2Exception if there is an error creating the VPC endpoint - */ - - /** * Sets up the necessary clients and buckets for the S3 Express service. * @@ -482,7 +452,6 @@ public static void setupClientsAndBuckets(String expressUserName, String regular return; } - // Create an additional client using the credentials // with S3 Express permissions. logger.info(""" @@ -511,7 +480,6 @@ public static void setupClientsAndBuckets(String expressUserName, String regular (and adhering to least-privilege) is crucial to S3 Express. """); waitForInputToContinue(locscanner); - logger.info(""" 4. Create two buckets. Now we will create a Directory bucket which is the linchpin of the S3 Express One Zone service. Directory buckets From 195a864d8baae47bc19d1f08823f6ae025f9fea3 Mon Sep 17 00:00:00 2001 From: scmacdon Date: Tue, 18 Mar 2025 19:47:23 -0400 Subject: [PATCH 04/10] rolled in review comments --- .../s3/express/CreateDirectoryBucket.java | 61 --------------- .../s3/express/S3DirectoriesActions.java | 77 +++++++++++-------- .../s3/express/S3DirectoriesScenario.java | 76 +++++++++--------- 3 files changed, 87 insertions(+), 127 deletions(-) delete mode 100644 javav2/example_code/s3/src/main/java/com/example/s3/express/CreateDirectoryBucket.java diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/express/CreateDirectoryBucket.java b/javav2/example_code/s3/src/main/java/com/example/s3/express/CreateDirectoryBucket.java deleted file mode 100644 index d8307a88476..00000000000 --- a/javav2/example_code/s3/src/main/java/com/example/s3/express/CreateDirectoryBucket.java +++ /dev/null @@ -1,61 +0,0 @@ -package com.example.s3.express; - -import software.amazon.awssdk.regions.Region; -import software.amazon.awssdk.services.s3.S3Client; -import software.amazon.awssdk.services.s3.model.BucketInfo; -import software.amazon.awssdk.services.s3.model.BucketType; -import software.amazon.awssdk.services.s3.model.CreateBucketConfiguration; -import software.amazon.awssdk.services.s3.model.CreateBucketRequest; -import software.amazon.awssdk.services.s3.model.CreateBucketResponse; -import software.amazon.awssdk.services.s3.model.DataRedundancy; -import software.amazon.awssdk.services.s3.model.LocationInfo; -import software.amazon.awssdk.services.s3.model.LocationType; -import software.amazon.awssdk.services.s3.model.S3Exception; - -public class CreateDirectoryBucket { - - public static void main(String[] args){ - String bucketName = "test-bucket-" + System.currentTimeMillis() + "--usw2-az1--x-s3"; - Region region = Region.US_WEST_2; - String zone = "usw2-az1"; - S3Client s3Client = S3Client.builder() - .region(region) - .build(); - - createDirectoryBucket(s3Client, bucketName, zone); - - } - - /** - * Creates a new S3 directory bucket in a specified Zone (For example, a - * specified Availability Zone in this code example). - * - * @param s3Client The S3 client used to create the bucket - * @param bucketName The name of the bucket to be created - * @param zone The region where the bucket will be created - - */ - public static void createDirectoryBucket(S3Client s3Client, String bucketName, String zone) throws S3Exception { - System.out.println("Creating bucket: " +bucketName); - - CreateBucketConfiguration bucketConfiguration = CreateBucketConfiguration.builder() - .location(LocationInfo.builder() - .type(LocationType.AVAILABILITY_ZONE) - .name(zone).build()) - .bucket(BucketInfo.builder() - .type(BucketType.DIRECTORY) - .dataRedundancy(DataRedundancy.SINGLE_AVAILABILITY_ZONE) - .build()) - .build(); - try { - CreateBucketRequest bucketRequest = CreateBucketRequest.builder() - .bucket(bucketName) - .createBucketConfiguration(bucketConfiguration).build(); - CreateBucketResponse response = s3Client.createBucket(bucketRequest); - System.out.println("Bucket created successfully with location: " +response.location()); - } catch (S3Exception e) { - System.out.println("Error creating bucket: - Error code: {}" +e.awsErrorDetails().errorMessage()); - throw e; - } - } -} diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesActions.java b/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesActions.java index 333f642151b..053d5e17f8d 100644 --- a/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesActions.java +++ b/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesActions.java @@ -33,7 +33,9 @@ import software.amazon.awssdk.services.s3.model.CopyObjectRequest; import software.amazon.awssdk.services.s3.model.CreateBucketConfiguration; import software.amazon.awssdk.services.s3.model.CreateBucketRequest; +import software.amazon.awssdk.services.s3.model.CreateBucketResponse; import software.amazon.awssdk.services.s3.model.CreateSessionRequest; +import software.amazon.awssdk.services.s3.model.CreateSessionResponse; import software.amazon.awssdk.services.s3.model.DataRedundancy; import software.amazon.awssdk.services.s3.model.Delete; import software.amazon.awssdk.services.s3.model.DeleteBucketRequest; @@ -116,7 +118,7 @@ private static Ec2AsyncClient getEc2AsyncClient() { } /** - * Deletes the specified S3 bucket and all the objects within it in an asynchronous manner. + * Deletes the specified S3 bucket and all the objects within it asynchronously. * * @param s3AsyncClient the S3 asynchronous client to use for the operations * @param bucketName the name of the S3 bucket to be deleted @@ -175,10 +177,10 @@ public CompletableFuture> deleteBucketAndObje } /** - * Lists the objects in an S3 bucket asynchronously using the AWS SDK. + * Lists the objects in an S3 bucket asynchronously. * - * @param s3Client the S3 async client to use for the operation - * @param bucketName the name of the S3 bucket to list objects from + * @param s3Client the S3 async client to use for the operation + * @param bucketName the name of the the S3 bucket containing the objects to list * @return a {@link CompletableFuture} that contains the list of object keys in the specified bucket */ public CompletableFuture> listObjectsAsync(S3AsyncClient s3Client, String bucketName) { @@ -197,6 +199,14 @@ public CompletableFuture> listObjectsAsync(S3AsyncClient s3Client, }); } + /** + * Retrieves an object from an Amazon S3 bucket asynchronously. + * + * @param s3Client the S3 async client to use for the operation + * @param bucketName the name of the S3 bucket containing the object + * @param keyName the unique identifier (key) of the object to retrieve + * @return a {@link CompletableFuture} that, when completed, contains the object's content as a {@link ResponseBytes} of {@link GetObjectResponse} + */ public CompletableFuture> getObjectAsync(S3AsyncClient s3Client, String bucketName, String keyName) { GetObjectRequest objectRequest = GetObjectRequest.builder() .key(keyName) @@ -206,7 +216,6 @@ public CompletableFuture> getObjectAsync(S3Asyn // Get the object asynchronously and transform it into a byte array return s3Client.getObject(objectRequest, AsyncResponseTransformer.toBytes()) .exceptionally(exception -> { - // Handle the exception by checking the cause Throwable cause = exception.getCause(); if (cause instanceof S3Exception) { throw new CompletionException("Failed to get the object. Reason: " + ((S3Exception) cause).awsErrorDetails().errorMessage(), cause); @@ -218,6 +227,7 @@ public CompletableFuture> getObjectAsync(S3Asyn return response; }); } + /** * Asynchronously copies an object from one S3 bucket to another. * @@ -250,20 +260,19 @@ public CompletableFuture copyObjectAsync(S3AsyncClient s3Client, String so } /** - * Creates an asynchronous session for the specified S3 bucket. + * Asynchronously creates a session for the specified S3 bucket. * - * @param s3Client the S3 asynchronous client to use for creating the session + * @param s3Client the S3 asynchronous client to use for creating the session * @param bucketName the name of the S3 bucket for which to create the session * @return a {@link CompletableFuture} that completes when the session is created, or throws a {@link CompletionException} if an error occurs */ - public CompletableFuture createSessionAsync(S3AsyncClient s3Client, String bucketName) { + public CompletableFuture createSessionAsync(S3AsyncClient s3Client, String bucketName) { CreateSessionRequest request = CreateSessionRequest.builder() .bucket(bucketName) .build(); return s3Client.createSession(request) - .thenRun(() -> logger.info("Created session for bucket: " + bucketName)) - .whenComplete((ignored, exception) -> { + .whenComplete((response, exception) -> { if (exception != null) { Throwable cause = exception.getCause(); if (cause instanceof S3Exception) { @@ -271,19 +280,21 @@ public CompletableFuture createSessionAsync(S3AsyncClient s3Client, String } throw new CompletionException("Unexpected error occurred while creating session", exception); } + logger.info("Created session for bucket: " + bucketName); }); + } /** * Creates a new S3 directory bucket in a specified Zone (For example, a * specified Availability Zone in this code example). * - * @param s3Client The S3 client used to create the bucket + * @param s3Client The asynchronous S3 client used to create the bucket * @param bucketName The name of the bucket to be created - * @param zone The region where the bucket will be created - * @throws S3Exception if there's an error creating the bucket + * @param zone The Availability Zone where the bucket will be created + * @throws CompletionException if there's an error creating the bucket */ - public CompletableFuture createDirectoryBucketAsync(S3AsyncClient s3Client, String bucketName, String zone) { + public CompletableFuture createDirectoryBucketAsync(S3AsyncClient s3Client, String bucketName, String zone) { logger.info("Creating bucket: " + bucketName); CreateBucketConfiguration bucketConfiguration = CreateBucketConfiguration.builder() @@ -303,8 +314,7 @@ public CompletableFuture createDirectoryBucketAsync(S3AsyncClient s3Client .build(); return s3Client.createBucket(bucketRequest) - .thenAccept(response -> logger.info("Bucket created successfully with location: " + response.location())) - .whenComplete((ignored, exception) -> { + .whenComplete((response, exception) -> { if (exception != null) { Throwable cause = exception.getCause(); if (cause instanceof S3Exception) { @@ -312,8 +322,10 @@ public CompletableFuture createDirectoryBucketAsync(S3AsyncClient s3Client } throw new CompletionException("Unexpected error occurred while creating bucket", exception); } + logger.info("Bucket created successfully with location: " + response.location()); }); } + /** * Creates an S3 bucket asynchronously. * @@ -348,7 +360,7 @@ public CompletableFuture> createBucketAsync(S * Uploads an object to an Amazon S3 bucket asynchronously. * * @param s3Client the S3 async client to use for the upload - * @param bucketName the name of the S3 bucket to upload the object to + * @param bucketName the destination S3 bucket name * @param bucketObject the name of the object to be uploaded * @param text the content to be uploaded as the object */ @@ -397,12 +409,12 @@ public CompletableFuture createAccessKeyAsync(String us } /** - * Selects an availability zone ID based on the specified AWS region. + * Asynchronously selects an Availability Zone ID from the available EC2 zones. * - * @return A map containing the selected availability zone details, including the zone name, zone ID, region name, and state. + * @return A {@link CompletableFuture} that resolves to the selected Availability Zone ID. + * @throws CompletionException if an error occurs during the request or processing. */ public CompletableFuture selectAvailabilityZoneIdAsync() { - // Request available zones DescribeAvailabilityZonesRequest zonesRequest = DescribeAvailabilityZonesRequest.builder() .build(); @@ -415,15 +427,13 @@ public CompletableFuture selectAvailabilityZoneIdAsync() { return CompletableFuture.completedFuture(null); // Return null if no zones are found } - // Extract zone IDs List zoneIds = zonesList.stream() .map(AvailabilityZone::zoneId) // Get the zoneId (e.g., "usw2-az1") .toList(); - // **Prompt user synchronously** and return CompletableFuture return CompletableFuture.supplyAsync(() -> promptUserForZoneSelection(zonesList, zoneIds)) .thenApply(selectedZone -> { - // Return only the selected Zone ID (e.g., "usw2-az1") + // Return only the selected Zone ID (e.g., "usw2-az1"). return selectedZone.zoneId(); }); }) @@ -445,7 +455,7 @@ public CompletableFuture selectAvailabilityZoneIdAsync() { } /** - * Prompts the user to select an availability zone from the given list. + * Prompts the user to select an Availability Zone from the given list. * * @param zonesList the list of availability zones * @param zoneIds the list of zone IDs @@ -458,21 +468,30 @@ private static AvailabilityZone promptUserForZoneSelection(List= zoneIds.size()) { logger.info("Select an availability zone:"); IntStream.range(0, zoneIds.size()).forEach(i -> - System.out.println(i + ": " + zoneIds.get(i)) // Display Zone IDs + logger.info(i + ": " + zoneIds.get(i)) ); logger.info("Enter the number corresponding to your choice: "); if (scanner.hasNextInt()) { index = scanner.nextInt(); } else { - scanner.next(); // Consume invalid input + scanner.next(); } } AvailabilityZone selectedZone = zonesList.get(index); - logger.info("You selected: " + selectedZone.zoneId()); // Log Zone ID + logger.info("You selected: " + selectedZone.zoneId()); return selectedZone; } + + /** + * Asynchronously sets up an AWS VPC, including creating a VPC, waiting for it to be available, + * retrieving its associated route table, and creating a VPC endpoint for S3 Express. + * + * @return A {@link CompletableFuture} that completes when the VPC setup is finished. + * If an error occurs, a {@link CompletionException} is thrown. + * @throws CompletionException if an EC2-related error occurs or if required resources are missing. + */ public CompletableFuture setupVPCAsync() { String cidr = "10.0.0.0/16"; CreateVpcRequest vpcRequest = CreateVpcRequest.builder() @@ -483,7 +502,6 @@ public CompletableFuture setupVPCAsync() { .thenCompose(vpcResponse -> { String vpcId = vpcResponse.vpc().vpcId(); - // Wait for VPC to be available Ec2AsyncWaiter waiter = ec2AsyncClient.waiter(); DescribeVpcsRequest request = DescribeVpcsRequest.builder() .vpcIds(vpcId) @@ -493,7 +511,6 @@ public CompletableFuture setupVPCAsync() { .thenApply(waiterResponse -> vpcId); }) .thenCompose(vpcId -> { - // Fetch route table for VPC Filter filter = Filter.builder() .name("vpc-id") .values(vpcId) @@ -546,6 +563,6 @@ public CompletableFuture setupVPCAsync() { throw new CompletionException("VPC setup failed: " + exception.getMessage(), exception); } }) - .thenAccept(v -> {}); // Ensure CompletableFuture return type + .thenAccept(v -> {}); } } \ No newline at end of file diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesScenario.java b/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesScenario.java index a86057a409f..3c7ecdd398e 100644 --- a/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesScenario.java +++ b/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesScenario.java @@ -76,8 +76,8 @@ private static void s3ExpressScenario() { logger.info("Welcome to the Amazon S3 Express Basics demo using AWS SDK for Java V2"); logger.info(""" Let's get started! First, please note that S3 Express One Zone works best when working within the AWS infrastructure, - specifically when working in the same Availability Zone. To see the best results in this example and when you implement - Directory buckets into your infrastructure, it is best to put your compute resources in the same AZ as your Directory + specifically when working in the same Availability Zone (AZ). To see the best results in this example and when you implement + directory buckets into your infrastructure, it is best to put your compute resources in the same AZ as your directory bucket. """); waitForInputToContinue(scanner); @@ -143,10 +143,10 @@ private static void cleanUp() { private static void showLexicographicalDifferences(String bucketObject) { logger.info(""" 7. Populate the buckets to show the lexicographical difference. - Now let's explore how Directory buckets store objects in a different + Now let's explore how directory buckets store objects in a different manner to regular buckets. The key is in the name "Directory". Where regular buckets store their key/value pairs in a - flat manner, Directory buckets use actual directories/folders. + flat manner, directory buckets use actual directories/folders. This allows for more rapid indexing, traversing, and therefore retrieval times! @@ -223,10 +223,10 @@ private static void showLexicographicalDifferences(String bucketObject) { * @param bucketObject the name of the object to download */ private static void demonstratePerformance(String bucketObject) { - logger.info("6. Demonstrate performance difference."); + logger.info("6. Demonstrate the performance difference."); logger.info(""" Now, let's do a performance test. We'll download the same object from each - bucket 'downloads' times and compare the total time needed. Note: + bucket repeatedly and compare the total time needed. Note: the performance difference will be much more pronounced if this example is run in an EC2 instance in the same Availability Zone as the bucket. @@ -262,8 +262,8 @@ private static void demonstratePerformance(String bucketObject) { } else { logger.info("No changes made. Using default downloads: {}", downloads); } - // Simulating the download process for the Directory bucket. - logger.info("Downloading from the Directory bucket."); + // Simulating the download process for the directory bucket. + logger.info("Downloading from the directory bucket."); long directoryTimeStart = System.nanoTime(); for (int index = 0; index < downloads; index++) { if (index % 10 == 0) { @@ -271,7 +271,7 @@ private static void demonstratePerformance(String bucketObject) { } try { - // Get the object from the Directory bucket. + // Get the object from the directory bucket. s3DirectoriesActions.getObjectAsync(mS3ExpressClient, mdirectoryBucketName, bucketObject).join(); } catch (CompletionException e) { logger.error("Async operation failed: {} ", e.getCause().getMessage()); @@ -315,11 +315,11 @@ private static void demonstratePerformance(String bucketObject) { private static String createSessionAddObjects() { logger.info(""" - 5. Create an object and copy it over. + 5. Create an object and copy it. We'll create a basic object consisting of some text and upload it to the normal bucket. - Next we'll copy the object into the Directory bucket using the regular client. - This works fine because copy operations are not restricted for Directory buckets. + Next we'll copy the object into the directory bucket using the regular client. + This works fine because copy operations are not restricted for directory buckets. """); waitForInputToContinue(scanner); @@ -340,8 +340,8 @@ private static String createSessionAddObjects() { } logger.info(""" It worked! It's important to remember the user permissions when interacting with - Directory buckets. Instead of validating permissions on every call as - normal buckets do, Directory buckets utilize the user credentials and session + directory buckets. Instead of validating permissions on every call as + normal buckets do, directory buckets utilize the user credentials and session token to validate. This allows for much faster connection speeds on every call. For single calls, this is low, but for many concurrent calls this adds up to a lot of time saved. @@ -355,7 +355,7 @@ private static String createSessionAddObjects() { *

* This method performs the following steps: *

    - *
  1. Optionally creates a new VPC and VPC Endpoint if the application is running in an EC2 instance in the same Availability Zone as the Directory buckets.
  2. + *
  3. Optionally creates a new VPC and VPC Endpoint if the application is running in an EC2 instance in the same Availability Zone as the directory buckets.
  4. *
  5. Creates two IAM users: one with S3 Express One Zone permissions and one without.
  6. *
* @@ -368,7 +368,7 @@ public static UserNames createVpcUsers() { */ logger.info(""" 1. First, we'll set up a new VPC and VPC Endpoint if this program is running in an EC2 instance in the same AZ as your\s - Directory buckets will be. Are you running this in an EC2 instance located in the same AZ as your intended Directory buckets? + directory buckets will be. Are you running this in an EC2 instance located in the same AZ as your intended directory buckets? """); logger.info("Do you want to setup a VPC Endpoint? (y/n)"); @@ -392,17 +392,15 @@ public static UserNames createVpcUsers() { logger.info("Skipping the VPC setup. Don't forget to use this in production!"); } logger.info(""" - 2. Policies, users, and roles with CDK. - Now, we'll set up some policies, roles, and a user. This user will - only have permissions to do S3 Express One Zone actions. + 2. Create a RegularUser and ExpressUser by using the AWS CDK. + One IAM User, named RegularUser, will have permissions to work only + with regular buckets and one IAM user, named ExpressUser, will have + permissions to work only with directory buckets. """); - waitForInputToContinue(scanner); - logger.info("Use AWS CloudFormation to create IAM roles that is required for this scenario."); - CloudFormationHelper.deployCloudFormationStack(stackName); - Map stackOutputs = CloudFormationHelper.getStackOutputsAsync(stackName).join(); // Create two users required for this scenario. + Map stackOutputs = createUsersUsingCDK(); regularUser = stackOutputs.get("RegularUser"); expressUser = stackOutputs.get("ExpressUser"); @@ -412,6 +410,18 @@ public static UserNames createVpcUsers() { return names; } + /** + * Creates users using AWS CloudFormation. + * + * @return a {@link Map} of String keys and String values representing the stack outputs, + * which may include user-related information such as user names and IDs. + */ + public static Map createUsersUsingCDK() { + logger.info("We'll use an AWS CloudFormation template to create the IAM users and policies."); + CloudFormationHelper.deployCloudFormationStack(stackName); + return CloudFormationHelper.getStackOutputsAsync(stackName).join(); + } + /** * Sets up the necessary clients and buckets for the S3 Express service. * @@ -452,15 +462,11 @@ public static void setupClientsAndBuckets(String expressUserName, String regular return; } - // Create an additional client using the credentials - // with S3 Express permissions. logger.info(""" - 3. Create an additional client using the credentials with S3 Express permissions. This client is created with the - credentials associated with the user account with the S3 Express policy attached, so it can perform S3 Express operations. + 3. Create 2 S3Clients; one uses the ExpressUser's credentials and one uses the RegularUser's credentials. + The 2 S3Clients will use different credentials. """); waitForInputToContinue(locscanner); - - // Populate the two S3 data member clients. try { mS3RegularClient = createS3ClientWithAccessKeyAsync(accessKeyIdforRegUser, secretAccessforRegUser).join(); mS3ExpressClient = createS3ClientWithAccessKeyAsync(accessKeyIdforExpressUser, secretAccessforExpressUser).join(); @@ -475,20 +481,18 @@ public static void setupClientsAndBuckets(String expressUserName, String regular } logger.info(""" - All the roles and policies were created and attached to the user. Then a new S3 Client were created using - that user's credentials. We can now use this client to make calls to S3 Express operations. Keeping permissions in mind - (and adhering to least-privilege) is crucial to S3 Express. + We can now use the ExpressUser client to make calls to S3 Express operations. """); waitForInputToContinue(locscanner); logger.info(""" 4. Create two buckets. - Now we will create a Directory bucket which is the linchpin of the S3 Express One Zone service. Directory buckets + Now we will create a directory bucket which is the linchpin of the S3 Express One Zone service. Directory buckets behave in different ways from regular S3 buckets which we will explore here. We'll also create a normal bucket, put - an object into the normal bucket, and copy it over to the Directory bucket. + an object into the normal bucket, and copy it over to the directory bucket. """); logger.info(""" - Now, let's choose an availability zone for the Directory bucket. We'll choose one + Now, let's choose an availability zone for the directory bucket. We'll choose one that is supported. """); String zoneId; @@ -506,7 +510,7 @@ public static void setupClientsAndBuckets(String expressUserName, String regular return; } logger.info(""" - Now, let's create the actual Directory bucket, as well as a regular + Now, let's create the actual directory bucket, as well as a regular bucket." """); From 1f8cc939b20ceb7008935792849aebc7e4a3327e Mon Sep 17 00:00:00 2001 From: scmacdon Date: Wed, 19 Mar 2025 11:24:01 -0400 Subject: [PATCH 05/10] rolled in review comments --- .../s3/express/S3DirectoriesActions.java | 2 +- .../s3/express/S3DirectoriesScenario.java | 57 +++++++++++-------- 2 files changed, 33 insertions(+), 26 deletions(-) diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesActions.java b/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesActions.java index 053d5e17f8d..8d26dfec46c 100644 --- a/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesActions.java +++ b/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesActions.java @@ -191,7 +191,7 @@ public CompletableFuture> listObjectsAsync(S3AsyncClient s3Client, return s3Client.listObjectsV2(request) .thenApply(response -> response.contents().stream() .map(S3Object::key) - .collect(Collectors.toList())) + .toList()) .whenComplete((result, exception) -> { if (exception != null) { throw new CompletionException("Couldn't list objects in bucket: " + bucketName, exception); diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesScenario.java b/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesScenario.java index 3c7ecdd398e..5277cb9876b 100644 --- a/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesScenario.java +++ b/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesScenario.java @@ -73,7 +73,7 @@ public static void main(String[] args) { // Runs the scenario. private static void s3ExpressScenario() { logger.info(DASHES); - logger.info("Welcome to the Amazon S3 Express Basics demo using AWS SDK for Java V2"); + logger.info("Welcome to the Amazon S3 Express Basics demo using AWS SDK for Java V2."); logger.info(""" Let's get started! First, please note that S3 Express One Zone works best when working within the AWS infrastructure, specifically when working in the same Availability Zone (AZ). To see the best results in this example and when you implement @@ -93,7 +93,7 @@ specifically when working in the same Availability Zone (AZ). To see the best re setupClientsAndBuckets(expressUserName, regularUserName); // Create an S3 session for the express S3 client and add objects to the buckets. - logger.info("Create an S3 session for the express S3 client and add objects to the buckets"); + logger.info("Now let's add some objects to our buckets and demonstrate how to work with S3 Sessions."); waitForInputToContinue(scanner); String bucketObject = createSessionAddObjects(); @@ -104,7 +104,7 @@ specifically when working in the same Availability Zone (AZ). To see the best re // regular and express buckets. showLexicographicalDifferences(bucketObject); - logger.info(""); + logger.info(DASHES); logger.info("That's it for our tour of the basic operations for S3 Express One Zone."); logger.info("Would you like to cleanUp the AWS resources? (y/n): "); String response = scanner.next().trim().toLowerCase(); @@ -141,10 +141,11 @@ private static void cleanUp() { } private static void showLexicographicalDifferences(String bucketObject) { + logger.info(DASHES); logger.info(""" - 7. Populate the buckets to show the lexicographical difference. - Now let's explore how directory buckets store objects in a different - manner to regular buckets. The key is in the name + 7. Populate the buckets to show the lexicographical (alphabetical) difference + when object names are listed. Now let's explore how directory buckets store + objects in a different manner to regular buckets. The key is in the name "Directory". Where regular buckets store their key/value pairs in a flat manner, directory buckets use actual directories/folders. This allows for more rapid indexing, traversing, and therefore @@ -152,9 +153,9 @@ private static void showLexicographicalDifferences(String bucketObject) { The more segmented your bucket is, with lots of directories, sub-directories, and objects, the more efficient it becomes. - This structural difference also causes ListObjects to behave differently, - which can cause unexpected results. Let's add a few more - objects with layered directories to see how the output of + This structural difference also causes `ListObject` operations to behave + differently, which can cause unexpected results. Let's add a few more + objects in subdirectories directories to see how the output of ListObjects changes. """); @@ -199,8 +200,8 @@ private static void showLexicographicalDifferences(String bucketObject) { } logger.info(""" - Notice how the normal bucket lists objects in lexicographical order, while the directory bucket does not. This is - because the normal bucket considers the whole "key" to be the object identifier, while the directory bucket actually + Notice how the regular bucket lists objects in lexicographical order, while the directory bucket does not. This is + because the regular bucket considers the whole "key" to be the object identifier, while the directory bucket actually creates directories and uses the object "key" as a path to the object. """); waitForInputToContinue(scanner); @@ -223,6 +224,7 @@ private static void showLexicographicalDifferences(String bucketObject) { * @param bucketObject the name of the object to download */ private static void demonstratePerformance(String bucketObject) { + logger.info(DASHES); logger.info("6. Demonstrate the performance difference."); logger.info(""" Now, let's do a performance test. We'll download the same object from each @@ -233,11 +235,11 @@ private static void demonstratePerformance(String bucketObject) { """); waitForInputToContinue(scanner); - int downloads = 1000; // Default value - logger.info("The number of downloads of the same object for this example is set at " + downloads + "."); + int downloads = 1000; // Default value. + logger.info("The default number of downloads of the same object for this example is set at " + downloads + "."); // Ask if the user wants to download a different number. - logger.info("Would you like to download a different number? (y/n): "); + logger.info("Would you like to download the file a different number of times? (y/n): "); String response = scanner.next().trim().toLowerCase(); if (response.equals("y")) { int maxDownloads = 1_000_000; @@ -266,7 +268,7 @@ private static void demonstratePerformance(String bucketObject) { logger.info("Downloading from the directory bucket."); long directoryTimeStart = System.nanoTime(); for (int index = 0; index < downloads; index++) { - if (index % 10 == 0) { + if (index % 50 == 0) { logger.info("Download " + index + " of " + downloads); } @@ -281,11 +283,11 @@ private static void demonstratePerformance(String bucketObject) { long directoryTimeDifference = System.nanoTime() - directoryTimeStart; - // Simulating the download process for the normal bucket. + // Download from the regular bucket. logger.info("Downloading from the regular bucket."); long normalTimeStart = System.nanoTime(); for (int index = 0; index < downloads; index++) { - if (index % 10 == 0) { + if (index % 50 == 0) { logger.info("Download " + index + " of " + downloads); } @@ -302,7 +304,7 @@ private static void demonstratePerformance(String bucketObject) { } long normalTimeDifference = System.nanoTime() - normalTimeStart; - logger.info("The directory bucket took " + directoryTimeDifference + " nanoseconds, while the normal bucket took " + normalTimeDifference + " nanoseconds."); + logger.info("The directory bucket took " + directoryTimeDifference + " nanoseconds, while the regular bucket took " + normalTimeDifference + " nanoseconds."); long difference = normalTimeDifference - directoryTimeDifference; logger.info("That's a difference of " + difference + " nanoseconds, or"); logger.info(difference / 1_000_000_000.0 + " seconds."); @@ -314,10 +316,11 @@ private static void demonstratePerformance(String bucketObject) { } private static String createSessionAddObjects() { + logger.info(DASHES); logger.info(""" 5. Create an object and copy it. We'll create a basic object consisting of some text and upload it to the - normal bucket. + regular bucket. Next we'll copy the object into the directory bucket using the regular client. This works fine because copy operations are not restricted for directory buckets. """); @@ -341,7 +344,7 @@ private static String createSessionAddObjects() { logger.info(""" It worked! It's important to remember the user permissions when interacting with directory buckets. Instead of validating permissions on every call as - normal buckets do, directory buckets utilize the user credentials and session + regular buckets do, directory buckets utilize the user credentials and session token to validate. This allows for much faster connection speeds on every call. For single calls, this is low, but for many concurrent calls this adds up to a lot of time saved. @@ -366,6 +369,7 @@ public static UserNames createVpcUsers() { Optionally create a VPC. Create two IAM users, one with S3 Express One Zone permissions and one without. */ + logger.info(DASHES); logger.info(""" 1. First, we'll set up a new VPC and VPC Endpoint if this program is running in an EC2 instance in the same AZ as your\s directory buckets will be. Are you running this in an EC2 instance located in the same AZ as your intended directory buckets? @@ -391,6 +395,7 @@ public static UserNames createVpcUsers() { } else { logger.info("Skipping the VPC setup. Don't forget to use this in production!"); } + logger.info(DASHES); logger.info(""" 2. Create a RegularUser and ExpressUser by using the AWS CDK. One IAM User, named RegularUser, will have permissions to work only @@ -416,7 +421,7 @@ public static UserNames createVpcUsers() { * @return a {@link Map} of String keys and String values representing the stack outputs, * which may include user-related information such as user names and IDs. */ - public static Map createUsersUsingCDK() { + public static Map createUsersUsingCDK() { logger.info("We'll use an AWS CloudFormation template to create the IAM users and policies."); CloudFormationHelper.deployCloudFormationStack(stackName); return CloudFormationHelper.getStackOutputsAsync(stackName).join(); @@ -462,6 +467,7 @@ public static void setupClientsAndBuckets(String expressUserName, String regular return; } + logger.info(DASHES); logger.info(""" 3. Create 2 S3Clients; one uses the ExpressUser's credentials and one uses the RegularUser's credentials. The 2 S3Clients will use different credentials. @@ -484,16 +490,17 @@ public static void setupClientsAndBuckets(String expressUserName, String regular We can now use the ExpressUser client to make calls to S3 Express operations. """); waitForInputToContinue(locscanner); + logger.info(DASHES); logger.info(""" 4. Create two buckets. Now we will create a directory bucket which is the linchpin of the S3 Express One Zone service. Directory buckets - behave in different ways from regular S3 buckets which we will explore here. We'll also create a normal bucket, put - an object into the normal bucket, and copy it over to the directory bucket. + behave differently from regular S3 buckets which we will explore here. We'll also create a regular bucket, put + an object into the regular bucket, and copy it to the directory bucket. """); logger.info(""" - Now, let's choose an availability zone for the directory bucket. We'll choose one - that is supported. + Now, let's choose an availability zone (AZ) for the directory bucket. + We'll choose one that is supported. """); String zoneId; String regularBucketName; From 8b2a188d2320e71567113796b30c8c3f27326ed7 Mon Sep 17 00:00:00 2001 From: scmacdon Date: Wed, 19 Mar 2025 11:38:24 -0400 Subject: [PATCH 06/10] hooked into SOS --- .../s3-directory-buckets_metadata.yaml | 12 +++++++++++ .../s3/express/S3DirectoriesActions.java | 4 +++- .../s3/express/S3DirectoriesScenario.java | 21 ++----------------- 3 files changed, 17 insertions(+), 20 deletions(-) diff --git a/.doc_gen/metadata/s3-directory-buckets_metadata.yaml b/.doc_gen/metadata/s3-directory-buckets_metadata.yaml index 11acfe6708b..a0809b38e6b 100644 --- a/.doc_gen/metadata/s3-directory-buckets_metadata.yaml +++ b/.doc_gen/metadata/s3-directory-buckets_metadata.yaml @@ -418,6 +418,18 @@ s3-directory-buckets_Scenario_ExpressBasics: - Prompt the user to see if they want to clean up the resources. category: Basics languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/s3 + sdkguide: + excerpts: + - description: Run an interactive scenario demonstrating &S3; features. + snippet_tags: + - s3.java2.directories.scenario.main + - description: A wrapper class for &S3; SDK methods. + snippet_tags: + - s3.java2.directories.actions.main PHP: versions: - sdk_version: 3 diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesActions.java b/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesActions.java index 8d26dfec46c..39aec1eca8a 100644 --- a/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesActions.java +++ b/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesActions.java @@ -63,6 +63,7 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; +// snippet-start:[s3.java2.directories.actions.main] public class S3DirectoriesActions { private static IamAsyncClient iamAsyncClient; @@ -565,4 +566,5 @@ public CompletableFuture setupVPCAsync() { }) .thenAccept(v -> {}); } -} \ No newline at end of file +} +// snippet-end:[s3.java2.directories.actions.main] \ No newline at end of file diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesScenario.java b/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesScenario.java index 5277cb9876b..d68f9c2e449 100644 --- a/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesScenario.java +++ b/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesScenario.java @@ -8,37 +8,19 @@ import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; import software.amazon.awssdk.regions.Region; -import software.amazon.awssdk.services.ec2.Ec2Client; -import software.amazon.awssdk.services.ec2.model.AvailabilityZone; -import software.amazon.awssdk.services.ec2.model.CreateVpcEndpointRequest; -import software.amazon.awssdk.services.ec2.model.CreateVpcEndpointResponse; -import software.amazon.awssdk.services.ec2.model.CreateVpcRequest; -import software.amazon.awssdk.services.ec2.model.CreateVpcResponse; -import software.amazon.awssdk.services.ec2.model.DescribeAvailabilityZonesRequest; -import software.amazon.awssdk.services.ec2.model.DescribeAvailabilityZonesResponse; -import software.amazon.awssdk.services.ec2.model.DescribeRouteTablesRequest; -import software.amazon.awssdk.services.ec2.model.DescribeRouteTablesResponse; -import software.amazon.awssdk.services.ec2.model.DescribeVpcsRequest; import software.amazon.awssdk.services.ec2.model.Ec2Exception; -import software.amazon.awssdk.services.ec2.model.Filter; -import software.amazon.awssdk.services.ec2.waiters.Ec2Waiter; -import software.amazon.awssdk.services.iam.IamAsyncClient; -import software.amazon.awssdk.services.iam.IamClient; -import software.amazon.awssdk.services.iam.model.CreateAccessKeyRequest; import software.amazon.awssdk.services.iam.model.CreateAccessKeyResponse; import software.amazon.awssdk.services.iam.model.IamException; import software.amazon.awssdk.services.s3.S3AsyncClient; import software.amazon.awssdk.services.s3.model.S3Exception; - -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Scanner; import java.util.UUID; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionException; -import java.util.stream.IntStream; +// snippet-start:[s3.java2.directories.scenario.main] public class S3DirectoriesScenario { public static final String DASHES = new String(new char[80]).replace("\0", "-"); @@ -587,3 +569,4 @@ private static void waitForInputToContinue(Scanner scanner) { } } } +// snippet-end:[s3.java2.directories.scenario.main] \ No newline at end of file From 42c963b159cfff4f62ccd5b51c37d2c085d78527 Mon Sep 17 00:00:00 2001 From: scmacdon Date: Wed, 19 Mar 2025 12:49:04 -0400 Subject: [PATCH 07/10] addressed a github issue --- .../s3/src/main/java/com/example/s3/express/UserNames.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/express/UserNames.java b/javav2/example_code/s3/src/main/java/com/example/s3/express/UserNames.java index dfe5c70af74..98ca4860e3a 100644 --- a/javav2/example_code/s3/src/main/java/com/example/s3/express/UserNames.java +++ b/javav2/example_code/s3/src/main/java/com/example/s3/express/UserNames.java @@ -1,3 +1,6 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + package com.example.s3.express; public class UserNames { From 20415214d1f7221991c534d86e7402e5ae3573ca Mon Sep 17 00:00:00 2001 From: scmacdon Date: Wed, 19 Mar 2025 18:39:36 -0400 Subject: [PATCH 08/10] addressed additional comments --- .../s3/express/S3DirectoriesActions.java | 102 ++++----- .../s3/express/S3DirectoriesScenario.java | 100 +++++---- .../s3/src/test/java/S3ExpressTests.java | 193 ++++++++++++++++++ 3 files changed, 306 insertions(+), 89 deletions(-) create mode 100644 javav2/example_code/s3/src/test/java/S3ExpressTests.java diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesActions.java b/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesActions.java index 39aec1eca8a..71633eec7bd 100644 --- a/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesActions.java +++ b/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesActions.java @@ -28,6 +28,7 @@ import software.amazon.awssdk.services.iam.model.CreateAccessKeyResponse; import software.amazon.awssdk.services.iam.model.IamException; import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.model.BucketAlreadyExistsException; import software.amazon.awssdk.services.s3.model.BucketInfo; import software.amazon.awssdk.services.s3.model.BucketType; import software.amazon.awssdk.services.s3.model.CopyObjectRequest; @@ -47,6 +48,8 @@ import software.amazon.awssdk.services.s3.model.ListObjectsV2Request; import software.amazon.awssdk.services.s3.model.LocationInfo; import software.amazon.awssdk.services.s3.model.LocationType; +import software.amazon.awssdk.services.s3.model.NoSuchBucketException; +import software.amazon.awssdk.services.s3.model.NoSuchKeyException; import software.amazon.awssdk.services.s3.model.ObjectIdentifier; import software.amazon.awssdk.services.s3.model.PutObjectResponse; import software.amazon.awssdk.services.s3.model.S3Object; @@ -181,7 +184,7 @@ public CompletableFuture> deleteBucketAndObje * Lists the objects in an S3 bucket asynchronously. * * @param s3Client the S3 async client to use for the operation - * @param bucketName the name of the the S3 bucket containing the objects to list + * @param bucketName the name of the S3 bucket containing the objects to list * @return a {@link CompletableFuture} that contains the list of object keys in the specified bucket */ public CompletableFuture> listObjectsAsync(S3AsyncClient s3Client, String bucketName) { @@ -218,14 +221,10 @@ public CompletableFuture> getObjectAsync(S3Asyn return s3Client.getObject(objectRequest, AsyncResponseTransformer.toBytes()) .exceptionally(exception -> { Throwable cause = exception.getCause(); - if (cause instanceof S3Exception) { + if (cause instanceof NoSuchKeyException) { throw new CompletionException("Failed to get the object. Reason: " + ((S3Exception) cause).awsErrorDetails().errorMessage(), cause); } throw new CompletionException("Failed to get the object", exception); - }) - .thenApply(response -> { - logger.info("Successfully obtained bytes from an S3 object"); - return response; }); } @@ -318,8 +317,8 @@ public CompletableFuture createDirectoryBucketAsync(S3Asyn .whenComplete((response, exception) -> { if (exception != null) { Throwable cause = exception.getCause(); - if (cause instanceof S3Exception) { - throw new CompletionException("Error creating bucket: " + ((S3Exception) cause).awsErrorDetails().errorMessage(), cause); + if (cause instanceof BucketAlreadyExistsException) { + throw new CompletionException("The bucket already exists: " + ((S3Exception) cause).awsErrorDetails().errorMessage(), cause); } throw new CompletionException("Unexpected error occurred while creating bucket", exception); } @@ -351,7 +350,12 @@ public CompletableFuture> createBucketAsync(S }) .whenComplete((response, exception) -> { if (exception != null) { - throw new CompletionException("Error creating bucket: " + bucketName, exception); + Throwable cause = exception.getCause(); + if (cause instanceof BucketAlreadyExistsException) { + throw new CompletionException("The S3 bucket exists: " + cause.getMessage(), cause); + } else { + throw new CompletionException("Failed to create access key: " + exception.getMessage(), exception); + } } logger.info(bucketName + " is ready"); }); @@ -374,7 +378,12 @@ public CompletableFuture putObjectAsync(S3AsyncClient s3Clien return s3Client.putObject(objectRequest, AsyncRequestBody.fromString(text)) .whenComplete((response, exception) -> { if (exception != null) { - throw new CompletionException("Failed to upload file", exception); + Throwable cause = exception.getCause(); + if (cause instanceof NoSuchBucketException) { + throw new CompletionException("The S3 bucket does not exist: " + cause.getMessage(), cause); + } else { + throw new CompletionException("Failed to create access key: " + exception.getMessage(), exception); + } } }); } @@ -396,15 +405,13 @@ public CompletableFuture createAccessKeyAsync(String us logger.info("Access Key Created."); } else { if (exception == null) { - throw new CompletionException("An unknown error occurred while creating access key.", null); - } - - Throwable cause = exception.getCause(); - if (cause instanceof IamException) { - throw new CompletionException("IAM error while creating access key: " + cause.getMessage(), cause); + Throwable cause = exception.getCause(); + if (cause instanceof IamException) { + throw new CompletionException("IAM error while creating access key: " + cause.getMessage(), cause); + } else { + throw new CompletionException("Failed to create access key: " + exception.getMessage(), exception); + } } - - throw new CompletionException("Failed to create access key: " + exception.getMessage(), exception); } }); } @@ -422,7 +429,6 @@ public CompletableFuture selectAvailabilityZoneIdAsync() { return getEc2AsyncClient().describeAvailabilityZones(zonesRequest) .thenCompose(response -> { List zonesList = response.availabilityZones(); - if (zonesList.isEmpty()) { logger.info("No availability zones found."); return CompletableFuture.completedFuture(null); // Return null if no zones are found @@ -458,9 +464,9 @@ public CompletableFuture selectAvailabilityZoneIdAsync() { /** * Prompts the user to select an Availability Zone from the given list. * - * @param zonesList the list of availability zones + * @param zonesList the list of Availability Zones * @param zoneIds the list of zone IDs - * @return the selected AvailabilityZone + * @return the selected Availability Zone */ private static AvailabilityZone promptUserForZoneSelection(List zonesList, List zoneIds) { Scanner scanner = new Scanner(System.in); @@ -486,14 +492,13 @@ private static AvailabilityZone promptUserForZoneSelection(List setupVPCAsync() { + public CompletableFuture> setupVPCAsync() { String cidr = "10.0.0.0/16"; CreateVpcRequest vpcRequest = CreateVpcRequest.builder() .cidrBlock(cidr) @@ -502,8 +507,9 @@ public CompletableFuture setupVPCAsync() { return getEc2AsyncClient().createVpc(vpcRequest) .thenCompose(vpcResponse -> { String vpcId = vpcResponse.vpc().vpcId(); + logger.info("VPC Created: {}", vpcId); - Ec2AsyncWaiter waiter = ec2AsyncClient.waiter(); + Ec2AsyncWaiter waiter = getEc2AsyncClient().waiter(); DescribeVpcsRequest request = DescribeVpcsRequest.builder() .vpcIds(vpcId) .build(); @@ -521,18 +527,20 @@ public CompletableFuture setupVPCAsync() { .filters(filter) .build(); - return ec2AsyncClient.describeRouteTables(describeRouteTablesRequest) + return getEc2AsyncClient().describeRouteTables(describeRouteTablesRequest) .thenApply(routeTablesResponse -> { if (routeTablesResponse.routeTables().isEmpty()) { - throw new CompletionException("No route tables found for VPC.", null); + throw new CompletionException("No route tables found for VPC: " + vpcId, null); } - return new AbstractMap.SimpleEntry<>(vpcId, routeTablesResponse.routeTables().get(0).routeTableId()); + String routeTableId = routeTablesResponse.routeTables().get(0).routeTableId(); + logger.info("Route table found: {}", routeTableId); + return new AbstractMap.SimpleEntry<>(vpcId, routeTableId); }); }) .thenCompose(vpcAndRouteTable -> { String vpcId = vpcAndRouteTable.getKey(); String routeTableId = vpcAndRouteTable.getValue(); - Region region = ec2AsyncClient.serviceClientConfiguration().region(); + Region region = getEc2AsyncClient().serviceClientConfiguration().region(); String serviceName = String.format("com.amazonaws.%s.s3express", region.id()); CreateVpcEndpointRequest endpointRequest = CreateVpcEndpointRequest.builder() @@ -541,30 +549,24 @@ public CompletableFuture setupVPCAsync() { .serviceName(serviceName) .build(); - return ec2AsyncClient.createVpcEndpoint(endpointRequest) + return getEc2AsyncClient().createVpcEndpoint(endpointRequest) .thenApply(vpcEndpointResponse -> { String vpcEndpointId = vpcEndpointResponse.vpcEndpoint().vpcEndpointId(); + logger.info("VPC Endpoint created: {}", vpcEndpointId); return new AbstractMap.SimpleEntry<>(vpcId, vpcEndpointId); }); }) - .whenComplete((result, exception) -> { - if (result != null) { - logger.info("Created VPC: {}", result.getKey()); - logger.info("Created VPC Endpoint: {}", result.getValue()); - } else { - if (exception == null) { - throw new CompletionException("An unknown error occurred during VPC setup.", null); - } - - Throwable cause = exception.getCause(); - if (cause instanceof Ec2Exception) { - throw new CompletionException("EC2 error during VPC setup: " + cause.getMessage(), cause); - } - - throw new CompletionException("VPC setup failed: " + exception.getMessage(), exception); + .exceptionally(exception -> { + Throwable cause = exception.getCause() != null ? exception.getCause() : exception; + if (cause instanceof Ec2Exception) { + logger.error("EC2 error during VPC setup: {}", cause.getMessage(), cause); + throw new CompletionException("EC2 error during VPC setup: " + cause.getMessage(), cause); } - }) - .thenAccept(v -> {}); + + logger.error("VPC setup failed: {}", cause.getMessage(), cause); + throw new CompletionException("VPC setup failed: " + cause.getMessage(), cause); + }); } + } // snippet-end:[s3.java2.directories.actions.main] \ No newline at end of file diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesScenario.java b/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesScenario.java index d68f9c2e449..b92d2be40dc 100644 --- a/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesScenario.java +++ b/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesScenario.java @@ -12,7 +12,11 @@ import software.amazon.awssdk.services.iam.model.CreateAccessKeyResponse; import software.amazon.awssdk.services.iam.model.IamException; import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.model.BucketAlreadyExistsException; +import software.amazon.awssdk.services.s3.model.NoSuchBucketException; +import software.amazon.awssdk.services.s3.model.NoSuchKeyException; import software.amazon.awssdk.services.s3.model.S3Exception; + import java.util.List; import java.util.Map; import java.util.Scanner; @@ -71,7 +75,7 @@ specifically when working in the same Availability Zone (AZ). To see the best re String regularUserName = userNames.getRegularUserName(); // Set up two S3 clients, one regular and one express, - // and two buckets, one regular and one express. + // and two buckets, one regular and one directory. setupClientsAndBuckets(expressUserName, regularUserName); // Create an S3 session for the express S3 client and add objects to the buckets. @@ -79,7 +83,7 @@ specifically when working in the same Availability Zone (AZ). To see the best re waitForInputToContinue(scanner); String bucketObject = createSessionAddObjects(); - // Demonstrate performance differences between regular and express buckets. + // Demonstrate performance differences between regular and directory buckets. demonstratePerformance(bucketObject); // Populate the buckets to show the lexicographical difference between @@ -91,14 +95,14 @@ specifically when working in the same Availability Zone (AZ). To see the best re logger.info("Would you like to cleanUp the AWS resources? (y/n): "); String response = scanner.next().trim().toLowerCase(); if (response.equals("y")) { - cleanUp(); + cleanUp(stackName); } } /* Delete resources created by this scenario. */ - private static void cleanUp() { + public static void cleanUp(String stackName) { try { if (mdirectoryBucketName != null) { s3DirectoriesActions.deleteBucketAndObjectsAsync(mS3ExpressClient, mdirectoryBucketName).join(); @@ -137,7 +141,7 @@ private static void showLexicographicalDifferences(String bucketObject) { directories, sub-directories, and objects, the more efficient it becomes. This structural difference also causes `ListObject` operations to behave differently, which can cause unexpected results. Let's add a few more - objects in subdirectories directories to see how the output of + objects in sub-directories to see how the output of ListObjects changes. """); @@ -157,8 +161,13 @@ private static void showLexicographicalDifferences(String bucketObject) { s3DirectoriesActions.putObjectAsync(mS3RegularClient, mregularBucketName, otherAltObject, "").join(); s3DirectoriesActions.putObjectAsync(mS3ExpressClient, mdirectoryBucketName, otherAltObject, "").join(); - } catch (CompletionException e) { - logger.error("Async operation failed: {} ", e.getCause().getMessage()); + } catch (CompletionException ce) { + Throwable cause = ce.getCause(); + if (cause instanceof NoSuchBucketException) { + logger.error("S3Exception occurred: {}", cause.getMessage(), ce); + } else { + logger.error("An unexpected error occurred: {}", cause.getMessage(), ce); + } return; } @@ -210,8 +219,9 @@ private static void demonstratePerformance(String bucketObject) { logger.info("6. Demonstrate the performance difference."); logger.info(""" Now, let's do a performance test. We'll download the same object from each - bucket repeatedly and compare the total time needed. Note: - the performance difference will be much more pronounced if this + bucket repeatedly and compare the total time needed. + + Note: the performance difference will be much more pronounced if this example is run in an EC2 instance in the same Availability Zone as the bucket. """); @@ -257,12 +267,16 @@ private static void demonstratePerformance(String bucketObject) { try { // Get the object from the directory bucket. s3DirectoriesActions.getObjectAsync(mS3ExpressClient, mdirectoryBucketName, bucketObject).join(); - } catch (CompletionException e) { - logger.error("Async operation failed: {} ", e.getCause().getMessage()); + } catch (CompletionException ce) { + Throwable cause = ce.getCause(); + if (cause instanceof NoSuchKeyException) { + logger.error("S3Exception occurred: {}", cause.getMessage(), ce); + } else { + logger.error("An unexpected error occurred: {}", cause.getMessage(), ce); + } return; } } - long directoryTimeDifference = System.nanoTime() - directoryTimeStart; // Download from the regular bucket. @@ -277,11 +291,12 @@ private static void demonstratePerformance(String bucketObject) { s3DirectoriesActions.getObjectAsync(mS3RegularClient, mregularBucketName, bucketObject).join(); } catch (CompletionException ce) { Throwable cause = ce.getCause(); - if (cause instanceof S3Exception) { + if (cause instanceof NoSuchKeyException) { logger.error("S3Exception occurred: {}", cause.getMessage(), ce); } else { logger.error("An unexpected error occurred: {}", cause.getMessage(), ce); } + return; } } @@ -301,10 +316,8 @@ private static String createSessionAddObjects() { logger.info(DASHES); logger.info(""" 5. Create an object and copy it. - We'll create a basic object consisting of some text and upload it to the + We'll create an object consisting of some text and upload it to the regular bucket. - Next we'll copy the object into the directory bucket using the regular client. - This works fine because copy operations are not restricted for directory buckets. """); waitForInputToContinue(scanner); @@ -324,7 +337,11 @@ private static String createSessionAddObjects() { } } logger.info(""" - It worked! It's important to remember the user permissions when interacting with + It worked! This is because the S3Client that performed the copy operation + is the expressClient using the credentials for the user with permission to + work with directory buckets. + + It's important to remember the user permissions when interacting with directory buckets. Instead of validating permissions on every call as regular buckets do, directory buckets utilize the user credentials and session token to validate. This allows for much faster connection speeds on every call. @@ -387,7 +404,7 @@ public static UserNames createVpcUsers() { waitForInputToContinue(scanner); // Create two users required for this scenario. - Map stackOutputs = createUsersUsingCDK(); + Map stackOutputs = createUsersUsingCDK(stackName); regularUser = stackOutputs.get("RegularUser"); expressUser = stackOutputs.get("ExpressUser"); @@ -403,7 +420,7 @@ public static UserNames createVpcUsers() { * @return a {@link Map} of String keys and String values representing the stack outputs, * which may include user-related information such as user names and IDs. */ - public static Map createUsersUsingCDK() { + public static Map createUsersUsingCDK(String stackName) { logger.info("We'll use an AWS CloudFormation template to create the IAM users and policies."); CloudFormationHelper.deployCloudFormationStack(stackName); return CloudFormationHelper.getStackOutputsAsync(stackName).join(); @@ -460,8 +477,8 @@ public static void setupClientsAndBuckets(String expressUserName, String regular mS3ExpressClient = createS3ClientWithAccessKeyAsync(accessKeyIdforExpressUser, secretAccessforExpressUser).join(); } catch (CompletionException ce) { Throwable cause = ce.getCause(); - if (cause instanceof S3Exception) { - logger.error("S3Exception occurred: {}", cause.getMessage(), ce); + if (cause instanceof IllegalArgumentException) { + logger.error("An illegal argument exception occurred: {}", cause.getMessage(), ce); } else { logger.error("An unexpected error occurred: {}", cause.getMessage(), ce); } @@ -499,52 +516,57 @@ public static void setupClientsAndBuckets(String expressUserName, String regular return; } logger.info(""" - Now, let's create the actual directory bucket, as well as a regular - bucket." + Now, let's create the actual directory bucket, as well as a regular bucket." """); + String directoryBucketName = "test-bucket-" + System.currentTimeMillis() + "--" + zoneId + "--x-s3"; try { - String directoryBucketName = "test-bucket-" + System.currentTimeMillis() + "--" + zoneId + "--x-s3"; s3DirectoriesActions.createDirectoryBucketAsync(mS3ExpressClient, directoryBucketName, zoneId).join(); logger.info("Created directory bucket {}", directoryBucketName); - - // Assign to the data member. - mdirectoryBucketName = directoryBucketName; } catch (CompletionException ce) { Throwable cause = ce.getCause(); - if (cause instanceof S3Exception) { - logger.error("S3Exception occurred: {}", cause.getMessage(), ce); + if (cause instanceof BucketAlreadyExistsException) { + logger.error("The bucket already exists. Moving on: {}", cause.getMessage(), ce); } else { logger.error("An unexpected error occurred: {}", cause.getMessage(), ce); + return; } - return; } + // Assign to the data member. + mdirectoryBucketName = directoryBucketName; try { s3DirectoriesActions.createBucketAsync(mS3RegularClient, regularBucketName).join(); logger.info("Created regular bucket {} ", regularBucketName); mregularBucketName = regularBucketName; } catch (CompletionException ce) { Throwable cause = ce.getCause(); - if (cause instanceof S3Exception) { - logger.error("S3Exception occurred: {}", cause.getMessage(), ce); + if (cause instanceof BucketAlreadyExistsException) { + logger.error("The bucket already exists. Moving on: {}", cause.getMessage(), ce); } else { logger.error("An unexpected error occurred: {}", cause.getMessage(), ce); + return; } - return; } logger.info("Great! Both buckets were created."); waitForInputToContinue(locscanner); } - /* - Creates an S3 client with access key credentials. - :param access_key: The access key for the user. - :return: The S3 Express One Zone client. - + /** + * Creates an asynchronous S3 client with the specified access key and secret access key. + * + * @param accessKeyId the AWS access key ID + * @param secretAccessKey the AWS secret access key + * @return a {@link CompletableFuture} that asynchronously creates the S3 client + * @throws IllegalArgumentException if the access key ID or secret access key is null */ - private static CompletableFuture createS3ClientWithAccessKeyAsync(String accessKeyId, String secretAccessKey) { + public static CompletableFuture createS3ClientWithAccessKeyAsync(String accessKeyId, String secretAccessKey) { return CompletableFuture.supplyAsync(() -> { + // Validate input parameters + if (accessKeyId == null || accessKeyId.isBlank() || secretAccessKey == null || secretAccessKey.isBlank()) { + throw new IllegalArgumentException("Access Key ID and Secret Access Key must not be null or empty"); + } + AwsBasicCredentials awsCredentials = AwsBasicCredentials.create(accessKeyId, secretAccessKey); return S3AsyncClient.builder() .credentialsProvider(StaticCredentialsProvider.create(awsCredentials)) diff --git a/javav2/example_code/s3/src/test/java/S3ExpressTests.java b/javav2/example_code/s3/src/test/java/S3ExpressTests.java new file mode 100644 index 00000000000..18f11c316a8 --- /dev/null +++ b/javav2/example_code/s3/src/test/java/S3ExpressTests.java @@ -0,0 +1,193 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import com.example.s3.express.S3DirectoriesActions; +import com.example.s3.express.S3DirectoriesScenario; +import com.example.s3.express.UserNames; +import org.junit.jupiter.api.MethodOrderer; +import org.junit.jupiter.api.Order; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; +import org.junit.jupiter.api.TestMethodOrder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import software.amazon.awssdk.core.waiters.WaiterResponse; +import software.amazon.awssdk.services.iam.model.CreateAccessKeyResponse; +import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.model.CreateBucketResponse; +import software.amazon.awssdk.services.s3.model.HeadBucketResponse; + +import java.io.IOException; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.CompletableFuture; + +import static com.example.s3.express.S3DirectoriesScenario.createS3ClientWithAccessKeyAsync; +import static com.example.s3.express.S3DirectoriesScenario.createUsersUsingCDK; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertNotNull; + +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +@TestMethodOrder(MethodOrderer.OrderAnnotation.class) +public class S3ExpressTests { + + private static S3AsyncClient mS3RegularClient; + private static S3AsyncClient mS3ExpressClient; + + private static String regularUser = ""; + + private static String expressUser = ""; + + private static String regularBucketName = ""; + private static String directoryBucketName = ""; + + private static String bucketObject = "basic-text-object.txt"; + private static final S3DirectoriesActions s3DirectoriesActions = new S3DirectoriesActions(); + + private static String stackName = "cfn-stack-s3-express-basics--" + UUID.randomUUID(); + + private static final Logger logger = LoggerFactory.getLogger(S3ExpressTests.class); + + @Test + @Tag("IntegrationTest") + @Order(1) + public void testSetUp() throws IOException { + assertDoesNotThrow(() -> { + // Retrieve user names from CDK stack outputs + Map stackOutputs = createUsersUsingCDK(stackName); + regularUser = stackOutputs.get("RegularUser"); + expressUser = stackOutputs.get("ExpressUser"); + + assertNotNull(regularUser, "Regular user should not be null"); + assertNotNull(expressUser, "Express user should not be null"); + + // Store the user names in a UserNames object + UserNames names = new UserNames(); + names.setRegularUserName(regularUser); + names.setExpressUserName(expressUser); + + // Create access keys for both users asynchronously + CreateAccessKeyResponse keyResponseRegular = s3DirectoriesActions.createAccessKeyAsync(regularUser).join(); + CreateAccessKeyResponse keyResponseExpress = s3DirectoriesActions.createAccessKeyAsync(expressUser).join(); + + assertNotNull(keyResponseRegular.accessKey(), "Access key for Regular User should not be null"); + assertNotNull(keyResponseExpress.accessKey(), "Access key for Express User should not be null"); + + // Extract access keys + String accessKeyIdForRegUser = keyResponseRegular.accessKey().accessKeyId(); + String secretAccessForRegUser = keyResponseRegular.accessKey().secretAccessKey(); + + String accessKeyIdForExpressUser = keyResponseExpress.accessKey().accessKeyId(); + String secretAccessForExpressUser = keyResponseExpress.accessKey().secretAccessKey(); + + // Ensure keys are valid + assertNotNull(accessKeyIdForRegUser, "Access Key ID for Regular User should not be null"); + assertNotNull(secretAccessForRegUser, "Secret Access Key for Regular User should not be null"); + assertNotNull(accessKeyIdForExpressUser, "Access Key ID for Express User should not be null"); + assertNotNull(secretAccessForExpressUser, "Secret Access Key for Express User should not be null"); + + // Create S3 clients asynchronously + mS3RegularClient = createS3ClientWithAccessKeyAsync(accessKeyIdForRegUser, secretAccessForRegUser).join(); + mS3ExpressClient = createS3ClientWithAccessKeyAsync(accessKeyIdForExpressUser, secretAccessForExpressUser).join(); + + assertNotNull(mS3RegularClient, "S3 client for Regular User should not be null"); + assertNotNull(mS3ExpressClient, "S3 client for Express User should not be null"); + }); + } + + @Test + @Tag("IntegrationTest") + @Order(2) + public void createBuckets() throws InterruptedException { + Thread.sleep(30000); + assertDoesNotThrow(() -> { + String zoneId = "usw2-az1"; + + // Generate bucket names + regularBucketName = "reg-bucket-" + System.currentTimeMillis(); + directoryBucketName = "test-bucket-" + System.currentTimeMillis() + "--" + zoneId + "--x-s3"; + + // Validate bucket names + assertNotNull(regularBucketName, "Regular bucket name should not be null"); + assertNotNull(directoryBucketName, "Directory bucket name should not be null"); + + // Create the regular bucket asynchronously + CompletableFuture> regularBucketFuture = s3DirectoriesActions.createBucketAsync(mS3RegularClient, regularBucketName); + + // Create the directory bucket asynchronously + CompletableFuture directoryBucketFuture = s3DirectoriesActions.createDirectoryBucketAsync(mS3ExpressClient, directoryBucketName, zoneId); + + // Wait for both operations to complete + CompletableFuture.allOf(regularBucketFuture, directoryBucketFuture).join(); + + }); + } + + + @Test + @Tag("IntegrationTest") + @Order(3) + public void createSessionAddObjectTest() { + assertDoesNotThrow(() -> { + s3DirectoriesActions.putObjectAsync(mS3RegularClient, regularBucketName, bucketObject, "Look Ma, I'm a bucket!").join(); + s3DirectoriesActions.createSessionAsync(mS3ExpressClient, directoryBucketName).join(); + s3DirectoriesActions.copyObjectAsync(mS3ExpressClient, regularBucketName, bucketObject, directoryBucketName, bucketObject).join(); + }); + } + + @Test + @Tag("IntegrationTest") + @Order(4) + public void demonstratePerformance() { + assertDoesNotThrow(() -> { + int downloads = 300; + long directoryTimeStart = System.nanoTime(); + for (int index = 0; index < downloads; index++) { + if (index % 50 == 0) { + System.out.println("Download " + index + " of " + downloads); + } + + + // Get the object from the directory bucket. + s3DirectoriesActions.getObjectAsync(mS3ExpressClient, directoryBucketName, bucketObject).join(); + } + + long directoryTimeDifference = System.nanoTime() - directoryTimeStart; + + // Download from the regular bucket. + System.out.println("Downloading from the regular bucket."); + long normalTimeStart = System.nanoTime(); + for (int index = 0; index < downloads; index++) { + if (index % 50 == 0) { + System.out.println("Download " + index + " of " + downloads); + } + + s3DirectoriesActions.getObjectAsync(mS3RegularClient, regularBucketName, bucketObject).join(); + + } + + long normalTimeDifference = System.nanoTime() - normalTimeStart; + System.out.println("The directory bucket took " + directoryTimeDifference + " nanoseconds, while the regular bucket took " + normalTimeDifference + " nanoseconds."); + long difference = normalTimeDifference - directoryTimeDifference; + System.out.println("That's a difference of " + difference + " nanoseconds, or"); + System.out.println(difference / 1_000_000_000.0 + " seconds."); + + if (difference < 0) { + System.out.println("The directory buckets were slower. This can happen if you are not running on the cloud within a VPC."); + } + }); + } + + @Test + @Tag("IntegrationTest") + @Order(5) + public void testCleanup() { + assertDoesNotThrow(() -> { + s3DirectoriesActions.deleteBucketAndObjectsAsync(mS3ExpressClient, directoryBucketName).join(); + s3DirectoriesActions.deleteBucketAndObjectsAsync(mS3RegularClient, regularBucketName).join(); + S3DirectoriesScenario.cleanUp(stackName); + }); + } +} + From f464264e2e970ded7fe77aa415ddfb7acf7c9f0a Mon Sep 17 00:00:00 2001 From: scmacdon Date: Mon, 24 Mar 2025 14:50:01 -0400 Subject: [PATCH 09/10] updated Scenario --- .../main/java/com/example/s3/express/S3DirectoriesScenario.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesScenario.java b/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesScenario.java index b92d2be40dc..f818e941faf 100644 --- a/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesScenario.java +++ b/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesScenario.java @@ -478,7 +478,7 @@ public static void setupClientsAndBuckets(String expressUserName, String regular } catch (CompletionException ce) { Throwable cause = ce.getCause(); if (cause instanceof IllegalArgumentException) { - logger.error("An illegal argument exception occurred: {}", cause.getMessage(), ce); + logger.error("An invalid argument exception occurred: {}", cause.getMessage(), ce); } else { logger.error("An unexpected error occurred: {}", cause.getMessage(), ce); } From 5e5532770be785f8387fe839d8976368ba0076e2 Mon Sep 17 00:00:00 2001 From: scmacdon Date: Mon, 24 Mar 2025 14:56:42 -0400 Subject: [PATCH 10/10] updated Scenario --- .../main/java/com/example/s3/express/S3DirectoriesScenario.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesScenario.java b/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesScenario.java index f818e941faf..621a30db1a0 100644 --- a/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesScenario.java +++ b/javav2/example_code/s3/src/main/java/com/example/s3/express/S3DirectoriesScenario.java @@ -468,7 +468,7 @@ public static void setupClientsAndBuckets(String expressUserName, String regular logger.info(DASHES); logger.info(""" - 3. Create 2 S3Clients; one uses the ExpressUser's credentials and one uses the RegularUser's credentials. + 3. Create two S3Clients; one uses the ExpressUser's credentials and one uses the RegularUser's credentials. The 2 S3Clients will use different credentials. """); waitForInputToContinue(locscanner);