diff --git a/constants.js b/constants.js index 46bda9c4de..f00f349a7f 100644 --- a/constants.js +++ b/constants.js @@ -222,6 +222,7 @@ const constants = { 'isNull', 'isDeleteMarker', 'x-amz-meta-scal-version-id', + 'bucketOwnerId', ], unsupportedSignatureChecksums: new Set([ 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER', diff --git a/lib/api/apiUtils/object/createAndStoreObject.js b/lib/api/apiUtils/object/createAndStoreObject.js index a8f2137cd2..7b6367896d 100644 --- a/lib/api/apiUtils/object/createAndStoreObject.js +++ b/lib/api/apiUtils/object/createAndStoreObject.js @@ -194,6 +194,10 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo, metadataStoreParams.oldReplayId = objMD.uploadId; } + if (authInfo.getCanonicalID() != bucketMD.getOwner()) { + metadataStoreParams.bucketOwnerId = bucketMD.getOwner(); + } + if (isPutVersion && location === bucketMD.getLocationConstraint() && bucketMD.isIngestionBucket()) { // When restoring to OOB bucket, we cannot force the versionId of the object written to the // backend, and it is thus not match the versionId of the ingested object. Thus we add extra diff --git a/lib/api/completeMultipartUpload.js b/lib/api/completeMultipartUpload.js index daaafeb5f0..6f3904b84f 100644 --- a/lib/api/completeMultipartUpload.js +++ b/lib/api/completeMultipartUpload.js @@ -379,6 +379,11 @@ function completeMultipartUpload(authInfo, request, log, callback) { masterKeyId: destBucket.getSseMasterKeyId(), }; } + + if (authInfo.getCanonicalID() !== destBucket.getOwner()) { + metaStoreParams.bucketOwnerId = destBucket.getOwner(); + } + // if x-scal-s3-version-id header is specified, we overwrite the object/version metadata. if (isPutVersion) { const options = overwritingVersioning(objMD, metaStoreParams); diff --git a/lib/api/objectCopy.js b/lib/api/objectCopy.js index 4c07b64f83..14ff119f55 100644 --- a/lib/api/objectCopy.js +++ b/lib/api/objectCopy.js @@ -189,6 +189,11 @@ function _prepMetadata(request, sourceObjMD, headers, sourceIsDestination, if (!storeMetadataParams.contentType) { storeMetadataParams.contentType = sourceObjMD['content-type']; } + + if (authInfo.getCanonicalID() !== destBucketMD.getOwner()) { + storeMetadataParams.bucketOwnerId = destBucketMD.getOwner(); + } + return { storeMetadataParams, sourceLocationConstraintName, backendInfoDest: backendInfoObjDest.backendInfo }; } diff --git a/lib/api/objectPutCopyPart.js b/lib/api/objectPutCopyPart.js index b2a0eb269c..6a82ff6a59 100644 --- a/lib/api/objectPutCopyPart.js +++ b/lib/api/objectPutCopyPart.js @@ -342,6 +342,7 @@ function objectPutCopyPart(authInfo, request, sourceBucket, splitter: constants.splitter, lastModified, overheadField: constants.overheadField, + ownerId: destBucketMD.getOwner(), }; return services.metadataStorePart(mpuBucketName, locations, metaStoreParams, log, err => { diff --git a/lib/services.js b/lib/services.js index b93f5de70f..205a33dbf5 100644 --- a/lib/services.js +++ b/lib/services.js @@ -109,7 +109,7 @@ const services = { dataStoreName, creationTime, retentionMode, retentionDate, legalHold, originOp, updateMicroVersionId, archive, oldReplayId, deleteNullKey, amzStorageClass, overheadField, needOplogUpdate, - restoredEtag } = params; + restoredEtag, bucketOwnerId } = params; log.trace('storing object in metadata'); assert.strictEqual(typeof bucketName, 'string'); const md = new ObjectMD(); @@ -274,6 +274,10 @@ const services = { md.setAcl(params.acl); } + if (bucketOwnerId) { + md.setBucketOwnerId(bucketOwnerId); + } + log.trace('object metadata', { omVal: md.getValue() }); // If this is not the completion of a multipart upload or // the creation of a delete marker, parse the headers to @@ -763,7 +767,7 @@ const services = { metadataStorePart(mpuBucketName, partLocations, metaStoreParams, log, cb) { assert.strictEqual(typeof mpuBucketName, 'string'); - const { partNumber, contentMD5, size, uploadId, lastModified, splitter, overheadField } + const { partNumber, contentMD5, size, uploadId, lastModified, splitter, overheadField, ownerId } = metaStoreParams; const dateModified = typeof lastModified === 'string' ? lastModified : new Date().toJSON(); @@ -778,6 +782,7 @@ const services = { 'last-modified': dateModified, 'content-md5': contentMD5, 'content-length': size, + 'owner-id': ownerId, }; const params = {}; diff --git a/tests/unit/DummyRequest.js b/tests/unit/DummyRequest.js index 28b21337eb..6189fb8eb0 100644 --- a/tests/unit/DummyRequest.js +++ b/tests/unit/DummyRequest.js @@ -26,6 +26,11 @@ class DummyRequest extends http.IncomingMessage { } this.push(null); } + + _destroy(err, cb) { + // this is a no-op + cb(); + } } module.exports = DummyRequest; diff --git a/tests/unit/api/multipartUpload.js b/tests/unit/api/multipartUpload.js index af46c3fbca..574b3edf36 100644 --- a/tests/unit/api/multipartUpload.js +++ b/tests/unit/api/multipartUpload.js @@ -38,6 +38,7 @@ const log = new DummyRequestLogger(); const splitter = constants.splitter; const canonicalID = 'accessKey1'; const authInfo = makeAuthInfo(canonicalID); +const authInfoOtherAcc = makeAuthInfo('accessKey2'); const namespace = 'default'; const bucketName = 'bucketname'; const lockedBucket = 'objectlockenabledbucket'; @@ -2661,9 +2662,14 @@ describe('complete mpu with bucket policy', () => { beforeEach(done => { cleanup(); + sinon.spy(metadataswitch, 'putObjectMD'); bucketPut(authInfo, bucketPutRequest, log, done); }); + afterEach(() => { + sinon.restore(); + }); + it('should complete with a deny on unrelated object as non root', done => { const bucketPutPolicyRequest = getPolicyRequest({ Version: '2012-10-17', @@ -2730,6 +2736,96 @@ describe('complete mpu with bucket policy', () => { done(); }); }); + + it('should set bucketOwnerId if requester is not destination bucket owner', done => { + const partBody = Buffer.from('I am a part\n', 'utf8'); + const bucketPutPolicyRequest = getPolicyRequest({ + Version: '2012-10-17', + Statement: [ + { + Effect: 'Allow', + Principal: { AWS: `arn:aws:iam::${authInfoOtherAcc.shortid}:root` }, + Action: ['s3:*'], + Resource: `arn:aws:s3:::${bucketName}/*`, + }, + ], + }); + async.waterfall([ + next => bucketPutPolicy(authInfo, bucketPutPolicyRequest, log, next), + (corsHeaders, next) => initiateMultipartUpload(authInfoOtherAcc, + initiateRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + ], + (err, json) => { + // Need to build request in here since do not have uploadId + // until here + assert.ifError(err); + const testUploadId = + json.InitiateMultipartUploadResult.UploadId[0]; + const md5Hash = crypto.createHash('md5').update(partBody); + const calculatedHash = md5Hash.digest('hex'); + const partRequest = new DummyRequest({ + bucketName, + namespace, + objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, + query: { + partNumber: '1', + uploadId: testUploadId, + }, + // Note that the body of the post set in the request here does + // not really matter in this test. + // The put is not going through the route so the md5 is being + // calculated above and manually being set in the request below. + // What is being tested is that the calculatedHash being sent + // to the API for the part is stored and then used to + // calculate the final ETag upon completion + // of the multipart upload. + calculatedHash, + socket: { + remoteAddress: '1.1.1.1', + }, + }, partBody); + objectPutPart(authInfoOtherAcc, partRequest, undefined, log, err => { + assert.ifError(err); + const completeBody = '' + + '' + + '1' + + `"${calculatedHash}"` + + '' + + ''; + const completeRequest = { + bucketName, + namespace, + objectKey, + parsedHost: 's3.amazonaws.com', + url: `/${objectKey}?uploadId=${testUploadId}`, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + query: { uploadId: testUploadId }, + post: completeBody, + actionImplicitDenies: false, + socket: { + remoteAddress: '1.1.1.1', + }, + }; + completeMultipartUpload(authInfoOtherAcc, + completeRequest, log, err => { + assert.ifError(err); + sinon.assert.calledWith( + metadataswitch.putObjectMD.lastCall, + bucketName, + objectKey, + sinon.match({ bucketOwnerId: authInfo.canonicalId }), + sinon.match.any, + sinon.match.any, + sinon.match.any + ); + done(); + }); + }); + }); + }); }); describe('multipart upload in ingestion bucket', () => { diff --git a/tests/unit/api/objectCopy.js b/tests/unit/api/objectCopy.js index 93376cc8f6..a0ab4a636c 100644 --- a/tests/unit/api/objectCopy.js +++ b/tests/unit/api/objectCopy.js @@ -5,6 +5,7 @@ const sinon = require('sinon'); const { bucketPut } = require('../../../lib/api/bucketPut'); const bucketPutVersioning = require('../../../lib/api/bucketPutVersioning'); +const bucketPutPolicy = require('../../../lib/api/bucketPutPolicy'); const objectPut = require('../../../lib/api/objectPut'); const objectCopy = require('../../../lib/api/objectCopy'); const DummyRequest = require('../DummyRequest'); @@ -45,6 +46,7 @@ function _createObjectCopyRequest(destBucketName) { objectKey, headers: {}, url: `/${destBucketName}/${objectKey}`, + socket: {}, }; return new DummyRequest(params); } @@ -68,6 +70,7 @@ describe('objectCopy with versioning', () => { before(done => { cleanup(); + sinon.spy(metadata, 'putObjectMD'); async.series([ callback => bucketPut(authInfo, putDestBucketRequest, log, callback), @@ -96,7 +99,10 @@ describe('objectCopy with versioning', () => { }); }); - after(() => cleanup()); + after(() => { + metadata.putObjectMD.restore(); + cleanup(); + }); it('should delete null version when creating new null version, ' + 'even when null version is not the latest version', done => { @@ -125,6 +131,94 @@ describe('objectCopy with versioning', () => { }); }); }); + + it('should not set bucketOwnerId if requesting account owns dest bucket', done => { + const testObjectCopyRequest = _createObjectCopyRequest(destBucketName); + objectCopy(authInfo, testObjectCopyRequest, sourceBucketName, objectKey, + undefined, log, err => { + assert.ifError(err); + sinon.assert.calledWith( + metadata.putObjectMD.lastCall, + destBucketName, + objectKey, + sinon.match({ _data: { bucketOwnerId: sinon.match.typeOf('undefined') }}), + sinon.match.any, + sinon.match.any, + sinon.match.any + ); + done(); + }); + }); + + // TODO: S3C-9965 + // Skipped because the policy is not checked correctly + // When source bucket policy is checked destination arn is used + it.skip('should set bucketOwnerId if requesting account differs from dest bucket owner', done => { + const authInfo2 = makeAuthInfo('accessKey2'); + const testObjectCopyRequest = _createObjectCopyRequest(destBucketName); + const testPutSrcPolicyRequest = new DummyRequest({ + bucketName: sourceBucketName, + namespace, + headers: { host: `${sourceBucketName}.s3.amazonaws.com` }, + url: '/', + socket: {}, + post: JSON.stringify({ + Version: '2012-10-17', + Statement: [ + { + Sid: 'AllowCrossAccountRead', + Effect: 'Allow', + Principal: { AWS: `arn:aws:iam::${authInfo2.shortid}:root` }, + Action: ['s3:GetObject'], + Resource: [ + `arn:aws:s3:::${sourceBucketName}/*` + ], + }, + ], + }), + }); + const testPutDestPolicyRequest = new DummyRequest({ + bucketName: destBucketName, + namespace, + headers: { host: `${destBucketName}.s3.amazonaws.com` }, + url: '/', + socket: {}, + post: JSON.stringify({ + Version: '2012-10-17', + Statement: [ + { + Sid: 'AllowCrossAccountWrite', + Effect: 'Allow', + Principal: { AWS: `arn:aws:iam::${authInfo2.shortid}:root` }, + Action: ['s3:PutObject'], + Resource: [ + `arn:aws:s3:::${destBucketName}/*` + ], + }, + ], + }), + }); + bucketPutPolicy(authInfo, testPutSrcPolicyRequest, log, err => { + assert.ifError(err); + bucketPutPolicy(authInfo, testPutDestPolicyRequest, log, err => { + assert.ifError(err); + objectCopy(authInfo2, testObjectCopyRequest, sourceBucketName, objectKey, + undefined, log, err => { + sinon.assert.calledWith( + metadata.putObjectMD.lastCall, + destBucketName, + objectKey, + sinon.match({_data: { bucketOwnerId: authInfo.canonicalID }}), + sinon.match.any, + sinon.match.any, + sinon.match.any + ); + assert.ifError(err); + done(); + }); + }); + }); + }); }); describe('non-versioned objectCopy', () => { diff --git a/tests/unit/api/objectCopyPart.js b/tests/unit/api/objectCopyPart.js index 88df569abc..46c95a0452 100644 --- a/tests/unit/api/objectCopyPart.js +++ b/tests/unit/api/objectCopyPart.js @@ -137,4 +137,21 @@ describe('objectCopyPart', () => { done(); }); }); + + it('should set owner-id to the canonicalId of the dest bucket owner', done => { + const testObjectCopyRequest = _createObjectCopyPartRequest(destBucketName, uploadId); + objectPutCopyPart(authInfo, testObjectCopyRequest, sourceBucketName, objectKey, undefined, log, err => { + assert.ifError(err); + sinon.assert.calledWith( + metadataswitch.putObjectMD.lastCall, + sinon.match.string, // MPU shadow bucket + `${uploadId}..|..00001`, + sinon.match({ 'owner-id': authInfo.canonicalID }), + sinon.match.any, + sinon.match.any, + sinon.match.any + ); + done(); + }); + }); }); diff --git a/tests/unit/api/objectPut.js b/tests/unit/api/objectPut.js index 590c283b8f..88783d0acc 100644 --- a/tests/unit/api/objectPut.js +++ b/tests/unit/api/objectPut.js @@ -8,6 +8,7 @@ const { bucketPut } = require('../../../lib/api/bucketPut'); const bucketPutObjectLock = require('../../../lib/api/bucketPutObjectLock'); const bucketPutACL = require('../../../lib/api/bucketPutACL'); const bucketPutVersioning = require('../../../lib/api/bucketPutVersioning'); +const bucketPutPolicy = require('../../../lib/api/bucketPutPolicy'); const { parseTagFromQuery } = s3middleware.tagging; const { cleanup, DummyRequestLogger, makeAuthInfo, versioningTestUtils } = require('../helpers'); @@ -582,7 +583,7 @@ describe('objectPut API', () => { data.implName = 'multipleBackends'; const originalPut = data.client.put; - data.client.put = (hashedStream, valueSize, keyContext, backendInfo, log, cb) => + data.client.put = (hashedStream, valueSize, keyContext, backendInfo, log, cb) => cb({ httpCode: 408 }); bucketPut(authInfo, testPutBucketRequest, log, () => { objectPut(authInfo, testPutObjectRequest, undefined, log, @@ -762,6 +763,86 @@ describe('objectPut API', () => { }, ], done); }); + + it('should not set bucketOwnerId if requester owns the bucket', done => { + const testPutObjectRequest = new DummyRequest({ + bucketName, + namespace, + objectKey: objectName, + headers: {}, + url: `/${bucketName}/${objectName}`, + calculatedHash: 'vnR+tLdVF79rPPfF+7YvOg==', + }, postBody); + + bucketPut(authInfo, testPutBucketRequest, log, () => { + objectPut(authInfo, testPutObjectRequest, undefined, log, + err => { + assert.ifError(err); + sinon.assert.calledWith(metadata.putObjectMD.lastCall, + bucketName, + objectName, + sinon.match({ bucketOwnerId: sinon.match.typeOf('undefined') }), + any, + any, + any + ); + done(); + } + ); + }); + }); + + it('should set bucketOwnerId if requester does not own the bucket', done => { + const authInfo2 = makeAuthInfo('accessKey2'); + + const testPutObjectRequest = new DummyRequest({ + bucketName, + namespace, + objectKey: objectName, + headers: {}, + url: `/${bucketName}/${objectName}`, + calculatedHash: 'vnR+tLdVF79rPPfF+7YvOg==', + }, postBody); + + const testPutPolicyRequest = new DummyRequest({ + bucketName, + namespace, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: '/', + post: JSON.stringify({ + Version: '2012-10-17', + Statement: [ + { + Sid: 'AllowCrossAccountReadWrite', + Effect: 'Allow', + Principal: { AWS: `arn:aws:iam::${authInfo2.shortid}:root` }, + Action: ['s3:PutObject'], + Resource: [`arn:aws:s3:::${bucketName}/*`], + }, + ], + }), + }); + + bucketPut(authInfo, testPutBucketRequest, log, () => { + bucketPutPolicy(authInfo, testPutPolicyRequest, log, err => { + assert.ifError(err); + objectPut(authInfo, testPutObjectRequest, undefined, log, + err => { + assert.ifError(err); + sinon.assert.calledWith(metadata.putObjectMD.lastCall, + bucketName, + objectName, + sinon.match({ bucketOwnerId: authInfo.canonicalId }), + any, + any, + any + ); + done(); + } + ); + }); + }); + }); }); describe('objectPut API with versioning', () => {