regBucketObjects = s3DirectoriesActions.listObjectsAsync(mS3RegularClient, mregularBucketName).join();
+
+ logger.info("Directory bucket content");
+ for (String obj : dirBucketObjects) {
+ logger.info(obj);
+ }
+
+ logger.info("Regular bucket content");
+ for (String obj : regBucketObjects) {
+ logger.info(obj);
+ }
+ } catch (CompletionException e) {
+ logger.error("Async operation failed: {} ", e.getCause().getMessage());
+ return;
+ }
+
+ logger.info("""
+ Notice how the regular bucket lists objects in lexicographical order, while the directory bucket does not. This is
+ because the regular bucket considers the whole "key" to be the object identifier, while the directory bucket actually
+ creates directories and uses the object "key" as a path to the object.
+ """);
+ waitForInputToContinue(scanner);
+ }
+
+ /**
+ * Demonstrates the performance difference between downloading an object from a directory bucket and a regular bucket.
+ *
+ * This method:
+ *
+ * - Prompts the user to choose the number of downloads (default is 1,000).
+ * - Downloads the specified object from the directory bucket and measures the total time.
+ * - Downloads the same object from the regular bucket and measures the total time.
+ * - Compares the time differences and prints the results.
+ *
+ *
+ * Note: The performance difference will be more pronounced if this example is run on an EC2 instance
+ * in the same Availability Zone as the buckets.
+ *
+ * @param bucketObject the name of the object to download
+ */
+ private static void demonstratePerformance(String bucketObject) {
+ logger.info(DASHES);
+ logger.info("6. Demonstrate the performance difference.");
+ logger.info("""
+ Now, let's do a performance test. We'll download the same object from each
+ bucket repeatedly and compare the total time needed.
+
+ Note: the performance difference will be much more pronounced if this
+ example is run in an EC2 instance in the same Availability Zone as
+ the bucket.
+ """);
+ waitForInputToContinue(scanner);
+
+ int downloads = 1000; // Default value.
+ logger.info("The default number of downloads of the same object for this example is set at " + downloads + ".");
+
+ // Ask if the user wants to download a different number.
+ logger.info("Would you like to download the file a different number of times? (y/n): ");
+ String response = scanner.next().trim().toLowerCase();
+ if (response.equals("y")) {
+ int maxDownloads = 1_000_000;
+
+ // Ask for a valid number of downloads.
+ while (true) {
+ logger.info("Enter a number between 1 and " + maxDownloads + " for the number of downloads: ");
+ if (scanner.hasNextInt()) {
+ downloads = scanner.nextInt();
+ if (downloads >= 1 && downloads <= maxDownloads) {
+ break;
+ } else {
+ logger.info("Please enter a number between 1 and " + maxDownloads + ".");
+ }
+ } else {
+ logger.info("Invalid input. Please enter a valid integer.");
+ scanner.next();
+ }
+ }
+
+ logger.info("You have chosen to download {} items.", downloads);
+ } else {
+ logger.info("No changes made. Using default downloads: {}", downloads);
+ }
+ // Simulating the download process for the directory bucket.
+ logger.info("Downloading from the directory bucket.");
+ long directoryTimeStart = System.nanoTime();
+ for (int index = 0; index < downloads; index++) {
+ if (index % 50 == 0) {
+ logger.info("Download " + index + " of " + downloads);
+ }
+
+ try {
+ // Get the object from the directory bucket.
+ s3DirectoriesActions.getObjectAsync(mS3ExpressClient, mdirectoryBucketName, bucketObject).join();
+ } catch (CompletionException ce) {
+ Throwable cause = ce.getCause();
+ if (cause instanceof NoSuchKeyException) {
+ logger.error("S3Exception occurred: {}", cause.getMessage(), ce);
+ } else {
+ logger.error("An unexpected error occurred: {}", cause.getMessage(), ce);
+ }
+ return;
+ }
+ }
+ long directoryTimeDifference = System.nanoTime() - directoryTimeStart;
+
+ // Download from the regular bucket.
+ logger.info("Downloading from the regular bucket.");
+ long normalTimeStart = System.nanoTime();
+ for (int index = 0; index < downloads; index++) {
+ if (index % 50 == 0) {
+ logger.info("Download " + index + " of " + downloads);
+ }
+
+ try {
+ s3DirectoriesActions.getObjectAsync(mS3RegularClient, mregularBucketName, bucketObject).join();
+ } catch (CompletionException ce) {
+ Throwable cause = ce.getCause();
+ if (cause instanceof NoSuchKeyException) {
+ logger.error("S3Exception occurred: {}", cause.getMessage(), ce);
+ } else {
+ logger.error("An unexpected error occurred: {}", cause.getMessage(), ce);
+ }
+ return;
+ }
+ }
+
+ long normalTimeDifference = System.nanoTime() - normalTimeStart;
+ logger.info("The directory bucket took " + directoryTimeDifference + " nanoseconds, while the regular bucket took " + normalTimeDifference + " nanoseconds.");
+ long difference = normalTimeDifference - directoryTimeDifference;
+ logger.info("That's a difference of " + difference + " nanoseconds, or");
+ logger.info(difference / 1_000_000_000.0 + " seconds.");
+
+ if (difference < 0) {
+ logger.info("The directory buckets were slower. This can happen if you are not running on the cloud within a VPC.");
+ }
+ waitForInputToContinue(scanner);
+ }
+
+ private static String createSessionAddObjects() {
+ logger.info(DASHES);
+ logger.info("""
+ 5. Create an object and copy it.
+ We'll create an object consisting of some text and upload it to the
+ regular bucket.
+ """);
+ waitForInputToContinue(scanner);
+
+ String bucketObject = "basic-text-object.txt";
+ try {
+ s3DirectoriesActions.putObjectAsync(mS3RegularClient, mregularBucketName, bucketObject, "Look Ma, I'm a bucket!").join();
+ s3DirectoriesActions.createSessionAsync(mS3ExpressClient, mdirectoryBucketName).join();
+
+ // Copy the object to the destination S3 bucket.
+ s3DirectoriesActions.copyObjectAsync(mS3ExpressClient, mregularBucketName, bucketObject, mdirectoryBucketName, bucketObject).join();
+ } catch (CompletionException ce) {
+ Throwable cause = ce.getCause();
+ if (cause instanceof S3Exception) {
+ logger.error("S3Exception occurred: {}", cause.getMessage(), ce);
+ } else {
+ logger.error("An unexpected error occurred: {}", cause.getMessage(), ce);
+ }
+ }
+ logger.info("""
+ It worked! This is because the S3Client that performed the copy operation
+ is the expressClient using the credentials for the user with permission to
+ work with directory buckets.
+
+ It's important to remember the user permissions when interacting with
+ directory buckets. Instead of validating permissions on every call as
+ regular buckets do, directory buckets utilize the user credentials and session
+ token to validate. This allows for much faster connection speeds on every call.
+ For single calls, this is low, but for many concurrent calls
+ this adds up to a lot of time saved.
+ """);
+ waitForInputToContinue(scanner);
+ return bucketObject;
+ }
+
+ /**
+ * Creates VPC users for the S3 Express One Zone scenario.
+ *
+ * This method performs the following steps:
+ *
+ * - Optionally creates a new VPC and VPC Endpoint if the application is running in an EC2 instance in the same Availability Zone as the directory buckets.
+ * - Creates two IAM users: one with S3 Express One Zone permissions and one without.
+ *
+ *
+ * @return a {@link UserNames} object containing the names of the created IAM users
+ */
+ public static UserNames createVpcUsers() {
+ /*
+ Optionally create a VPC.
+ Create two IAM users, one with S3 Express One Zone permissions and one without.
+ */
+ logger.info(DASHES);
+ logger.info("""
+ 1. First, we'll set up a new VPC and VPC Endpoint if this program is running in an EC2 instance in the same AZ as your\s
+ directory buckets will be. Are you running this in an EC2 instance located in the same AZ as your intended directory buckets?
+ """);
+
+ logger.info("Do you want to setup a VPC Endpoint? (y/n)");
+ String endpointAns = scanner.nextLine().trim();
+ if (endpointAns.equalsIgnoreCase("y")) {
+ logger.info("""
+ Great! Let's set up a VPC, retrieve the Route Table from it, and create a VPC Endpoint to connect the S3 Client to.
+ """);
+ try {
+ s3DirectoriesActions.setupVPCAsync().join();
+ } catch (CompletionException ce) {
+ Throwable cause = ce.getCause();
+ if (cause instanceof Ec2Exception) {
+ logger.error("IamException occurred: {}", cause.getMessage(), ce);
+ } else {
+ logger.error("An unexpected error occurred: {}", cause.getMessage(), ce);
+ }
+ }
+ waitForInputToContinue(scanner);
+ } else {
+ logger.info("Skipping the VPC setup. Don't forget to use this in production!");
+ }
+ logger.info(DASHES);
+ logger.info("""
+ 2. Create a RegularUser and ExpressUser by using the AWS CDK.
+ One IAM User, named RegularUser, will have permissions to work only
+ with regular buckets and one IAM user, named ExpressUser, will have
+ permissions to work only with directory buckets.
+ """);
+ waitForInputToContinue(scanner);
+
+ // Create two users required for this scenario.
+ Map stackOutputs = createUsersUsingCDK(stackName);
+ regularUser = stackOutputs.get("RegularUser");
+ expressUser = stackOutputs.get("ExpressUser");
+
+ UserNames names = new UserNames();
+ names.setRegularUserName(regularUser);
+ names.setExpressUserName(expressUser);
+ return names;
+ }
+
+ /**
+ * Creates users using AWS CloudFormation.
+ *
+ * @return a {@link Map} of String keys and String values representing the stack outputs,
+ * which may include user-related information such as user names and IDs.
+ */
+ public static Map createUsersUsingCDK(String stackName) {
+ logger.info("We'll use an AWS CloudFormation template to create the IAM users and policies.");
+ CloudFormationHelper.deployCloudFormationStack(stackName);
+ return CloudFormationHelper.getStackOutputsAsync(stackName).join();
+ }
+
+ /**
+ * Sets up the necessary clients and buckets for the S3 Express service.
+ *
+ * @param expressUserName the username for the user with S3 Express permissions
+ * @param regularUserName the username for the user with regular S3 permissions
+ */
+ public static void setupClientsAndBuckets(String expressUserName, String regularUserName) {
+ Scanner locscanner = new Scanner(System.in);
+ String accessKeyIdforRegUser;
+ String secretAccessforRegUser;
+ try {
+ CreateAccessKeyResponse keyResponse = s3DirectoriesActions.createAccessKeyAsync(regularUserName).join();
+ accessKeyIdforRegUser = keyResponse.accessKey().accessKeyId();
+ secretAccessforRegUser = keyResponse.accessKey().secretAccessKey();
+ } catch (CompletionException ce) {
+ Throwable cause = ce.getCause();
+ if (cause instanceof IamException) {
+ logger.error("IamException occurred: {}", cause.getMessage(), ce);
+ } else {
+ logger.error("An unexpected error occurred: {}", cause.getMessage(), ce);
+ }
+ return;
+ }
+
+ String accessKeyIdforExpressUser;
+ String secretAccessforExpressUser;
+ try {
+ CreateAccessKeyResponse keyResponseExpress = s3DirectoriesActions.createAccessKeyAsync(expressUserName).join();
+ accessKeyIdforExpressUser = keyResponseExpress.accessKey().accessKeyId();
+ secretAccessforExpressUser = keyResponseExpress.accessKey().secretAccessKey();
+ } catch (CompletionException ce) {
+ Throwable cause = ce.getCause();
+ if (cause instanceof IamException) {
+ logger.error("IamException occurred: {}", cause.getMessage(), ce);
+ } else {
+ logger.error("An unexpected error occurred: {}", cause.getMessage(), ce);
+ }
+ return;
+ }
+
+ logger.info(DASHES);
+ logger.info("""
+ 3. Create two S3Clients; one uses the ExpressUser's credentials and one uses the RegularUser's credentials.
+ The 2 S3Clients will use different credentials.
+ """);
+ waitForInputToContinue(locscanner);
+ try {
+ mS3RegularClient = createS3ClientWithAccessKeyAsync(accessKeyIdforRegUser, secretAccessforRegUser).join();
+ mS3ExpressClient = createS3ClientWithAccessKeyAsync(accessKeyIdforExpressUser, secretAccessforExpressUser).join();
+ } catch (CompletionException ce) {
+ Throwable cause = ce.getCause();
+ if (cause instanceof IllegalArgumentException) {
+ logger.error("An invalid argument exception occurred: {}", cause.getMessage(), ce);
+ } else {
+ logger.error("An unexpected error occurred: {}", cause.getMessage(), ce);
+ }
+ return;
+ }
+
+ logger.info("""
+ We can now use the ExpressUser client to make calls to S3 Express operations.
+ """);
+ waitForInputToContinue(locscanner);
+ logger.info(DASHES);
+ logger.info("""
+ 4. Create two buckets.
+ Now we will create a directory bucket which is the linchpin of the S3 Express One Zone service. Directory buckets
+ behave differently from regular S3 buckets which we will explore here. We'll also create a regular bucket, put
+ an object into the regular bucket, and copy it to the directory bucket.
+ """);
+
+ logger.info("""
+ Now, let's choose an availability zone (AZ) for the directory bucket.
+ We'll choose one that is supported.
+ """);
+ String zoneId;
+ String regularBucketName;
+ try {
+ zoneId = s3DirectoriesActions.selectAvailabilityZoneIdAsync().join();
+ regularBucketName = "reg-bucket-" + System.currentTimeMillis();
+ } catch (CompletionException ce) {
+ Throwable cause = ce.getCause();
+ if (cause instanceof Ec2Exception) {
+ logger.error("EC2Exception occurred: {}", cause.getMessage(), ce);
+ } else {
+ logger.error("An unexpected error occurred: {}", cause.getMessage(), ce);
+ }
+ return;
+ }
+ logger.info("""
+ Now, let's create the actual directory bucket, as well as a regular bucket."
+ """);
+
+ String directoryBucketName = "test-bucket-" + System.currentTimeMillis() + "--" + zoneId + "--x-s3";
+ try {
+ s3DirectoriesActions.createDirectoryBucketAsync(mS3ExpressClient, directoryBucketName, zoneId).join();
+ logger.info("Created directory bucket {}", directoryBucketName);
+ } catch (CompletionException ce) {
+ Throwable cause = ce.getCause();
+ if (cause instanceof BucketAlreadyExistsException) {
+ logger.error("The bucket already exists. Moving on: {}", cause.getMessage(), ce);
+ } else {
+ logger.error("An unexpected error occurred: {}", cause.getMessage(), ce);
+ return;
+ }
+ }
+
+ // Assign to the data member.
+ mdirectoryBucketName = directoryBucketName;
+ try {
+ s3DirectoriesActions.createBucketAsync(mS3RegularClient, regularBucketName).join();
+ logger.info("Created regular bucket {} ", regularBucketName);
+ mregularBucketName = regularBucketName;
+ } catch (CompletionException ce) {
+ Throwable cause = ce.getCause();
+ if (cause instanceof BucketAlreadyExistsException) {
+ logger.error("The bucket already exists. Moving on: {}", cause.getMessage(), ce);
+ } else {
+ logger.error("An unexpected error occurred: {}", cause.getMessage(), ce);
+ return;
+ }
+ }
+ logger.info("Great! Both buckets were created.");
+ waitForInputToContinue(locscanner);
+ }
+
+ /**
+ * Creates an asynchronous S3 client with the specified access key and secret access key.
+ *
+ * @param accessKeyId the AWS access key ID
+ * @param secretAccessKey the AWS secret access key
+ * @return a {@link CompletableFuture} that asynchronously creates the S3 client
+ * @throws IllegalArgumentException if the access key ID or secret access key is null
+ */
+ public static CompletableFuture createS3ClientWithAccessKeyAsync(String accessKeyId, String secretAccessKey) {
+ return CompletableFuture.supplyAsync(() -> {
+ // Validate input parameters
+ if (accessKeyId == null || accessKeyId.isBlank() || secretAccessKey == null || secretAccessKey.isBlank()) {
+ throw new IllegalArgumentException("Access Key ID and Secret Access Key must not be null or empty");
+ }
+
+ AwsBasicCredentials awsCredentials = AwsBasicCredentials.create(accessKeyId, secretAccessKey);
+ return S3AsyncClient.builder()
+ .credentialsProvider(StaticCredentialsProvider.create(awsCredentials))
+ .region(Region.US_WEST_2)
+ .build();
+ });
+ }
+
+ private static void waitForInputToContinue(Scanner scanner) {
+ while (true) {
+ logger.info("");
+ logger.info("Enter 'c' followed by to continue:");
+ String input = scanner.nextLine();
+
+ if (input.trim().equalsIgnoreCase("c")) {
+ logger.info("Continuing with the program...");
+ logger.info("");
+ break;
+ } else {
+ logger.info("Invalid input. Please try again.");
+ }
+ }
+ }
+}
+// snippet-end:[s3.java2.directories.scenario.main]
\ No newline at end of file
diff --git a/javav2/example_code/s3/src/main/java/com/example/s3/express/UserNames.java b/javav2/example_code/s3/src/main/java/com/example/s3/express/UserNames.java
new file mode 100644
index 00000000000..98ca4860e3a
--- /dev/null
+++ b/javav2/example_code/s3/src/main/java/com/example/s3/express/UserNames.java
@@ -0,0 +1,20 @@
+// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+
+package com.example.s3.express;
+
+public class UserNames {
+ private String expressUserName;
+ private String regularUserName;
+
+ public String getExpressUserName() { return expressUserName; }
+
+ public void setExpressUserName(String expressUserName) {
+ this.expressUserName = expressUserName;
+ }
+
+ public void setRegularUserName(String regularUserName) {
+ this.regularUserName = regularUserName;
+ }
+ public String getRegularUserName() { return regularUserName; }
+}
diff --git a/javav2/example_code/s3/src/main/resources/log4j2.xml b/javav2/example_code/s3/src/main/resources/log4j2.xml
index 2329c9d3615..32a31484ec9 100644
--- a/javav2/example_code/s3/src/main/resources/log4j2.xml
+++ b/javav2/example_code/s3/src/main/resources/log4j2.xml
@@ -1,7 +1,7 @@
-
+
diff --git a/javav2/example_code/s3/src/main/resources/s3_express_template.yaml b/javav2/example_code/s3/src/main/resources/s3_express_template.yaml
new file mode 100644
index 00000000000..be510ae8212
--- /dev/null
+++ b/javav2/example_code/s3/src/main/resources/s3_express_template.yaml
@@ -0,0 +1,51 @@
+Resources:
+ RegularUser:
+ Type: AWS::IAM::User
+ ExpressUser:
+ Type: AWS::IAM::User
+ ExpressPolicy:
+ Type: AWS::IAM::Policy
+ Properties:
+ PolicyName: AllowExpressOperations
+ PolicyDocument:
+ Statement:
+ - Effect: Allow
+ Action:
+ - "s3express:CreateBucket"
+ - "s3express:CreateSession"
+ - "s3express:CopyObject"
+ - "s3express:GetObject"
+ - "s3express:PutObject"
+ - "s3express:ListObjects"
+ - "s3express:DeleteObjects"
+ - "s3express:DeleteObject"
+ - "s3express:DeleteBucket"
+ - "s3:GetObject"
+ - "s3:CopyObject"
+ Resource: "*"
+ Users:
+ - !Ref ExpressUser
+ RegularPolicy:
+ Type: AWS::IAM::Policy
+ Properties:
+ PolicyName: AllowRegularOperations
+ PolicyDocument:
+ Statement:
+ - Effect: Allow
+ Action:
+ - "s3:CreateBucket"
+ - "s3:PutObject"
+ - "s3:GetObject"
+ - "S3:ListObjects"
+ - "S3:DeleteObjects"
+ - "S3:DeleteObject"
+ - "s3:ListBucket"
+ - "s3:DeleteBucket"
+ Resource: "*"
+ Users:
+ - !Ref RegularUser
+Outputs:
+ RegularUser:
+ Value: !Ref RegularUser
+ ExpressUser:
+ Value: !Ref ExpressUser
diff --git a/javav2/example_code/s3/src/test/java/S3ExpressTests.java b/javav2/example_code/s3/src/test/java/S3ExpressTests.java
new file mode 100644
index 00000000000..18f11c316a8
--- /dev/null
+++ b/javav2/example_code/s3/src/test/java/S3ExpressTests.java
@@ -0,0 +1,193 @@
+// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+
+import com.example.s3.express.S3DirectoriesActions;
+import com.example.s3.express.S3DirectoriesScenario;
+import com.example.s3.express.UserNames;
+import org.junit.jupiter.api.MethodOrderer;
+import org.junit.jupiter.api.Order;
+import org.junit.jupiter.api.Tag;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.TestInstance;
+import org.junit.jupiter.api.TestMethodOrder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.core.waiters.WaiterResponse;
+import software.amazon.awssdk.services.iam.model.CreateAccessKeyResponse;
+import software.amazon.awssdk.services.s3.S3AsyncClient;
+import software.amazon.awssdk.services.s3.model.CreateBucketResponse;
+import software.amazon.awssdk.services.s3.model.HeadBucketResponse;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.UUID;
+import java.util.concurrent.CompletableFuture;
+
+import static com.example.s3.express.S3DirectoriesScenario.createS3ClientWithAccessKeyAsync;
+import static com.example.s3.express.S3DirectoriesScenario.createUsersUsingCDK;
+import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+
+@TestInstance(TestInstance.Lifecycle.PER_CLASS)
+@TestMethodOrder(MethodOrderer.OrderAnnotation.class)
+public class S3ExpressTests {
+
+ private static S3AsyncClient mS3RegularClient;
+ private static S3AsyncClient mS3ExpressClient;
+
+ private static String regularUser = "";
+
+ private static String expressUser = "";
+
+ private static String regularBucketName = "";
+ private static String directoryBucketName = "";
+
+ private static String bucketObject = "basic-text-object.txt";
+ private static final S3DirectoriesActions s3DirectoriesActions = new S3DirectoriesActions();
+
+ private static String stackName = "cfn-stack-s3-express-basics--" + UUID.randomUUID();
+
+ private static final Logger logger = LoggerFactory.getLogger(S3ExpressTests.class);
+
+ @Test
+ @Tag("IntegrationTest")
+ @Order(1)
+ public void testSetUp() throws IOException {
+ assertDoesNotThrow(() -> {
+ // Retrieve user names from CDK stack outputs
+ Map stackOutputs = createUsersUsingCDK(stackName);
+ regularUser = stackOutputs.get("RegularUser");
+ expressUser = stackOutputs.get("ExpressUser");
+
+ assertNotNull(regularUser, "Regular user should not be null");
+ assertNotNull(expressUser, "Express user should not be null");
+
+ // Store the user names in a UserNames object
+ UserNames names = new UserNames();
+ names.setRegularUserName(regularUser);
+ names.setExpressUserName(expressUser);
+
+ // Create access keys for both users asynchronously
+ CreateAccessKeyResponse keyResponseRegular = s3DirectoriesActions.createAccessKeyAsync(regularUser).join();
+ CreateAccessKeyResponse keyResponseExpress = s3DirectoriesActions.createAccessKeyAsync(expressUser).join();
+
+ assertNotNull(keyResponseRegular.accessKey(), "Access key for Regular User should not be null");
+ assertNotNull(keyResponseExpress.accessKey(), "Access key for Express User should not be null");
+
+ // Extract access keys
+ String accessKeyIdForRegUser = keyResponseRegular.accessKey().accessKeyId();
+ String secretAccessForRegUser = keyResponseRegular.accessKey().secretAccessKey();
+
+ String accessKeyIdForExpressUser = keyResponseExpress.accessKey().accessKeyId();
+ String secretAccessForExpressUser = keyResponseExpress.accessKey().secretAccessKey();
+
+ // Ensure keys are valid
+ assertNotNull(accessKeyIdForRegUser, "Access Key ID for Regular User should not be null");
+ assertNotNull(secretAccessForRegUser, "Secret Access Key for Regular User should not be null");
+ assertNotNull(accessKeyIdForExpressUser, "Access Key ID for Express User should not be null");
+ assertNotNull(secretAccessForExpressUser, "Secret Access Key for Express User should not be null");
+
+ // Create S3 clients asynchronously
+ mS3RegularClient = createS3ClientWithAccessKeyAsync(accessKeyIdForRegUser, secretAccessForRegUser).join();
+ mS3ExpressClient = createS3ClientWithAccessKeyAsync(accessKeyIdForExpressUser, secretAccessForExpressUser).join();
+
+ assertNotNull(mS3RegularClient, "S3 client for Regular User should not be null");
+ assertNotNull(mS3ExpressClient, "S3 client for Express User should not be null");
+ });
+ }
+
+ @Test
+ @Tag("IntegrationTest")
+ @Order(2)
+ public void createBuckets() throws InterruptedException {
+ Thread.sleep(30000);
+ assertDoesNotThrow(() -> {
+ String zoneId = "usw2-az1";
+
+ // Generate bucket names
+ regularBucketName = "reg-bucket-" + System.currentTimeMillis();
+ directoryBucketName = "test-bucket-" + System.currentTimeMillis() + "--" + zoneId + "--x-s3";
+
+ // Validate bucket names
+ assertNotNull(regularBucketName, "Regular bucket name should not be null");
+ assertNotNull(directoryBucketName, "Directory bucket name should not be null");
+
+ // Create the regular bucket asynchronously
+ CompletableFuture> regularBucketFuture = s3DirectoriesActions.createBucketAsync(mS3RegularClient, regularBucketName);
+
+ // Create the directory bucket asynchronously
+ CompletableFuture directoryBucketFuture = s3DirectoriesActions.createDirectoryBucketAsync(mS3ExpressClient, directoryBucketName, zoneId);
+
+ // Wait for both operations to complete
+ CompletableFuture.allOf(regularBucketFuture, directoryBucketFuture).join();
+
+ });
+ }
+
+
+ @Test
+ @Tag("IntegrationTest")
+ @Order(3)
+ public void createSessionAddObjectTest() {
+ assertDoesNotThrow(() -> {
+ s3DirectoriesActions.putObjectAsync(mS3RegularClient, regularBucketName, bucketObject, "Look Ma, I'm a bucket!").join();
+ s3DirectoriesActions.createSessionAsync(mS3ExpressClient, directoryBucketName).join();
+ s3DirectoriesActions.copyObjectAsync(mS3ExpressClient, regularBucketName, bucketObject, directoryBucketName, bucketObject).join();
+ });
+ }
+
+ @Test
+ @Tag("IntegrationTest")
+ @Order(4)
+ public void demonstratePerformance() {
+ assertDoesNotThrow(() -> {
+ int downloads = 300;
+ long directoryTimeStart = System.nanoTime();
+ for (int index = 0; index < downloads; index++) {
+ if (index % 50 == 0) {
+ System.out.println("Download " + index + " of " + downloads);
+ }
+
+
+ // Get the object from the directory bucket.
+ s3DirectoriesActions.getObjectAsync(mS3ExpressClient, directoryBucketName, bucketObject).join();
+ }
+
+ long directoryTimeDifference = System.nanoTime() - directoryTimeStart;
+
+ // Download from the regular bucket.
+ System.out.println("Downloading from the regular bucket.");
+ long normalTimeStart = System.nanoTime();
+ for (int index = 0; index < downloads; index++) {
+ if (index % 50 == 0) {
+ System.out.println("Download " + index + " of " + downloads);
+ }
+
+ s3DirectoriesActions.getObjectAsync(mS3RegularClient, regularBucketName, bucketObject).join();
+
+ }
+
+ long normalTimeDifference = System.nanoTime() - normalTimeStart;
+ System.out.println("The directory bucket took " + directoryTimeDifference + " nanoseconds, while the regular bucket took " + normalTimeDifference + " nanoseconds.");
+ long difference = normalTimeDifference - directoryTimeDifference;
+ System.out.println("That's a difference of " + difference + " nanoseconds, or");
+ System.out.println(difference / 1_000_000_000.0 + " seconds.");
+
+ if (difference < 0) {
+ System.out.println("The directory buckets were slower. This can happen if you are not running on the cloud within a VPC.");
+ }
+ });
+ }
+
+ @Test
+ @Tag("IntegrationTest")
+ @Order(5)
+ public void testCleanup() {
+ assertDoesNotThrow(() -> {
+ s3DirectoriesActions.deleteBucketAndObjectsAsync(mS3ExpressClient, directoryBucketName).join();
+ s3DirectoriesActions.deleteBucketAndObjectsAsync(mS3RegularClient, regularBucketName).join();
+ S3DirectoriesScenario.cleanUp(stackName);
+ });
+ }
+}
+