|
| 1 | +const assert = require('assert'); |
| 2 | +const async = require('async'); |
| 3 | +const util = require('util'); |
| 4 | +const arsenal = require('arsenal'); |
| 5 | +const { |
| 6 | + HeadBucketCommand, |
| 7 | + ListObjectsCommand, |
| 8 | + CreateBucketCommand, |
| 9 | + DeleteBucketCommand, |
| 10 | + PutObjectCommand, |
| 11 | + DeleteObjectCommand, |
| 12 | +} = require('@aws-sdk/client-s3'); |
| 13 | +const { GCP } = arsenal.storage.data.external.GCP; |
| 14 | +const { genUniqID, genBucketName, gcpRetry } = require('../../../utils/gcpUtils'); |
| 15 | +const { getRealAwsConfig } = |
| 16 | + require('../../../../aws-node-sdk/test/support/awsConfig'); |
| 17 | +const { listingHardLimit } = require('../../../../../../constants'); |
| 18 | + |
| 19 | +const credentialOne = 'gcpbackend'; |
| 20 | +const config = getRealAwsConfig(credentialOne); |
| 21 | +const gcpClient = new GCP(config); |
| 22 | + |
| 23 | +describe('GCP: Bucket', function testSuite() { |
| 24 | + this.timeout(180000); |
| 25 | + |
| 26 | + const bucketName = genBucketName('bucket'); |
| 27 | + |
| 28 | + before(async () => { |
| 29 | + process.stdout.write(`Creating test bucket ${bucketName}\n`); |
| 30 | + await gcpRetry(gcpClient, new CreateBucketCommand({ Bucket: bucketName })); |
| 31 | + }); |
| 32 | + |
| 33 | + after(async () => { |
| 34 | + await gcpRetry(gcpClient, new DeleteBucketCommand({ Bucket: bucketName })); |
| 35 | + }); |
| 36 | + |
| 37 | + describe('HEAD Bucket', () => { |
| 38 | + it('should return 404 for non-existing bucket', async () => { |
| 39 | + const badBucketName = `cldsrvci-bucket-${genUniqID()}`; |
| 40 | + try { |
| 41 | + await gcpClient.send(new HeadBucketCommand({ Bucket: badBucketName })); |
| 42 | + assert.fail('Expected 404 error, but got success'); |
| 43 | + } catch (err) { |
| 44 | + assert(err); |
| 45 | + assert.strictEqual(err.$metadata?.httpStatusCode, 404); |
| 46 | + const errorName = err.name === 'NotFound' ? 'NoSuchBucket' : err.name; |
| 47 | + assert.strictEqual(errorName, 'NoSuchBucket'); |
| 48 | + } |
| 49 | + }); |
| 50 | + |
| 51 | + it('should return 200 and bucket metadata', async () => { |
| 52 | + // Need to use the helper headBucket function for middleware with MetaVersionId |
| 53 | + const res = await util.promisify(gcpClient.headBucket.bind(gcpClient))({ Bucket: bucketName }); |
| 54 | + const { $metadata, ...data } = res; |
| 55 | + assert.strictEqual($metadata?.httpStatusCode, 200); |
| 56 | + // Ensure MetaVersionId is present and non-empty |
| 57 | + assert.ok( |
| 58 | + typeof data.MetaVersionId === 'string' |
| 59 | + && data.MetaVersionId.length > 0 |
| 60 | + ); |
| 61 | + }); |
| 62 | + }); |
| 63 | + |
| 64 | + describe('GET Bucket (List Objects)', () => { |
| 65 | + const smallSize = 20; |
| 66 | + const bigSize = listingHardLimit + 1; |
| 67 | + |
| 68 | + function populateBucket(createdObjects, callback) { |
| 69 | + process.stdout.write( |
| 70 | + `Putting ${createdObjects.length} objects into bucket\n`); |
| 71 | + async.mapLimit( |
| 72 | + createdObjects, |
| 73 | + 10, |
| 74 | + async object => gcpClient.send(new PutObjectCommand({ |
| 75 | + Bucket: bucketName, |
| 76 | + Key: object, |
| 77 | + })), |
| 78 | + err => { |
| 79 | + if (err) { |
| 80 | + process.stdout.write(`err putting objects ${err}\n`); |
| 81 | + } |
| 82 | + return callback(err); |
| 83 | + } |
| 84 | + ); |
| 85 | + } |
| 86 | + |
| 87 | + function removeObjects(createdObjects, callback) { |
| 88 | + process.stdout.write( |
| 89 | + `Deleting ${createdObjects.length} objects from bucket\n`); |
| 90 | + async.mapLimit( |
| 91 | + createdObjects, |
| 92 | + 10, |
| 93 | + async object => gcpClient.send(new DeleteObjectCommand({ |
| 94 | + Bucket: bucketName, |
| 95 | + Key: object, |
| 96 | + })), |
| 97 | + err => { |
| 98 | + if (err) { |
| 99 | + process.stdout.write(`err deleting objects ${err}\n`); |
| 100 | + } |
| 101 | + return callback(err); |
| 102 | + } |
| 103 | + ); |
| 104 | + } |
| 105 | + |
| 106 | + it('should return 200', async () => { |
| 107 | + const res = await gcpClient.send( |
| 108 | + new ListObjectsCommand({ Bucket: bucketName })); |
| 109 | + assert.strictEqual(res.$metadata?.httpStatusCode, 200); |
| 110 | + }); |
| 111 | + |
| 112 | + describe('with less than listingHardLimit number of objects', () => { |
| 113 | + const createdObjects = Array.from( |
| 114 | + Array(smallSize).keys()).map(i => `someObject-${i}`); |
| 115 | + |
| 116 | + before(done => populateBucket(createdObjects, done)); |
| 117 | + after(done => removeObjects(createdObjects, done)); |
| 118 | + |
| 119 | + it(`should list all ${smallSize} created objects`, async () => { |
| 120 | + const res = await gcpClient.send( |
| 121 | + new ListObjectsCommand({ Bucket: bucketName })); |
| 122 | + assert.strictEqual(res.Contents.length, smallSize); |
| 123 | + }); |
| 124 | + |
| 125 | + it('should list MaxKeys number of objects with MaxKeys at 10', async () => { |
| 126 | + const res = await gcpClient.send(new ListObjectsCommand({ |
| 127 | + Bucket: bucketName, |
| 128 | + MaxKeys: 10, |
| 129 | + })); |
| 130 | + assert.strictEqual(res.Contents.length, 10); |
| 131 | + }); |
| 132 | + }); |
| 133 | + |
| 134 | + describe('with more than listingHardLimit number of objects', () => { |
| 135 | + const createdObjects = Array.from( |
| 136 | + Array(bigSize).keys()).map(i => `someObject-${i}`); |
| 137 | + |
| 138 | + before(done => populateBucket(createdObjects, done)); |
| 139 | + after(done => removeObjects(createdObjects, done)); |
| 140 | + |
| 141 | + it('should list at max 1000 of objects created', async () => { |
| 142 | + const res = await gcpClient.send( |
| 143 | + new ListObjectsCommand({ Bucket: bucketName })); |
| 144 | + assert.strictEqual(res.Contents.length, listingHardLimit); |
| 145 | + }); |
| 146 | + |
| 147 | + describe('with MaxKeys at 1001', () => { |
| 148 | + // TODO: S3C-5445 |
| 149 | + // Note: this test is testing GCP behaviour, not the Cloudserver one. |
| 150 | + // It tests that GET https://<GCP_BUCKET_NAME>.storage.googleapis.com/?max-keys=1001 |
| 151 | + // returns only the first 1000 objects. |
| 152 | + // |
| 153 | + // Expected behavior: the GCP XML API should not return a list longer |
| 154 | + // than 1000 objects, even if max-keys is greater than 1000: |
| 155 | + // https://cloud.google.com/storage/docs/xml-api/reference-headers#maxkeys |
| 156 | + // |
| 157 | + // Actual behavior: it returns a list longer than 1000 objects when |
| 158 | + // max-keys is greater than 1000 |
| 159 | + it.skip('should list at max 1000, ignoring MaxKeys', async () => { |
| 160 | + const res = await gcpClient.send(new ListObjectsCommand({ |
| 161 | + Bucket: bucketName, |
| 162 | + MaxKeys: 1001, |
| 163 | + })); |
| 164 | + assert.strictEqual(res.Contents.length, listingHardLimit); |
| 165 | + }); |
| 166 | + }); |
| 167 | + }); |
| 168 | + }); |
| 169 | +}); |
0 commit comments