diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 208a51f93a..619afeff39 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -14,7 +14,7 @@ env: jobs: build-federation-image: - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v4 diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index 7e505a70c0..62289c6e1d 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -149,7 +149,7 @@ jobs: if: always() build: - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest permissions: contents: read packages: write @@ -189,7 +189,21 @@ jobs: git.commit-sha=${{ github.sha }} cache-from: type=gha,scope=pykmip cache-to: type=gha,mode=max,scope=pykmip - - name: Build and push MongoDB + + build-federation-image: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + - name: Login to GitHub Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.repository_owner }} + password: ${{ github.token }} + - name: Build and push image for federation uses: docker/build-push-action@v5 with: push: true diff --git a/lib/api/apiUtils/object/abortMultipartUpload.js b/lib/api/apiUtils/object/abortMultipartUpload.js index 3ccbbc3e4f..40e1f00c5f 100644 --- a/lib/api/apiUtils/object/abortMultipartUpload.js +++ b/lib/api/apiUtils/object/abortMultipartUpload.js @@ -126,7 +126,7 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log, return next(null, mpuBucket, storedParts, destBucket); } - if (objectMD?.location) { + if (objectMD && objectMD.location && objectMD.uploadId === metadataValMPUparams.uploadId) { const existingLocations = new Set(locations.map(loc => loc.key)); const remainingObjectLocations = objectMD.location.filter(loc => !existingLocations.has(loc.key)); locations.push(...remainingObjectLocations); diff --git a/package.json b/package.json index 7d686b53ee..99c0c3bf93 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@zenko/cloudserver", - "version": "8.8.45", + "version": "8.8.45.1", "description": "Zenko CloudServer, an open-source Node.js implementation of a server handling the Amazon S3 protocol", "main": "index.js", "engines": { diff --git a/tests/functional/aws-node-sdk/test/object/abortMPU.js b/tests/functional/aws-node-sdk/test/object/abortMPU.js index bec3ba4f40..787d9d1304 100644 --- a/tests/functional/aws-node-sdk/test/object/abortMPU.js +++ b/tests/functional/aws-node-sdk/test/object/abortMPU.js @@ -2,6 +2,7 @@ const assert = require('assert'); const { v4: uuidv4 } = require('uuid'); const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); +const async = require('async'); const date = Date.now(); const bucket = `abortmpu${date}`; @@ -64,6 +65,200 @@ describe('Abort MPU', () => { }); }); +describe('Abort MPU with existing object', function AbortMPUExistingObject() { + this.timeout(60000); + + withV4(sigCfg => { + let bucketUtil; + let s3; + const bucketName = `abortmpu-test-bucket-${Date.now()}`; + const objectKey = 'my-object'; + + beforeEach(done => { + bucketUtil = new BucketUtility('default', sigCfg); + s3 = bucketUtil.s3; + s3.createBucket({ Bucket: bucketName }, err => { + assert.ifError(err, `Error creating bucket: ${err}`); + done(); + }); + }); + + afterEach(async () => { + const data = await s3.listMultipartUploads({ Bucket: bucketName }).promise(); + const uploads = data.Uploads; + await Promise.all(uploads.map(async upload => { + try { + await s3.abortMultipartUpload({ + Bucket: bucketName, + Key: upload.Key, + UploadId: upload.UploadId, + }).promise(); + } catch (err) { + if (err.code !== 'NoSuchUpload') { + throw err; + } + // If NoSuchUpload, swallow error + } + })); + await bucketUtil.empty(bucketName); + await bucketUtil.deleteOne(bucketName); + }); + + it('should not delete existing object data when aborting another MPU for same key', done => { + const part1 = Buffer.from('I am part 1 of MPU 1'); + const part2 = Buffer.from('I am part 1 of MPU 2'); + let uploadId1; + let uploadId2; + let etag1; + async.waterfall([ + next => { + s3.createMultipartUpload({ Bucket: bucketName, Key: objectKey }, (err, data) => { + assert.ifError(err, `error creating MPU 1: ${err}`); + uploadId1 = data.UploadId; + s3.uploadPart({ + Bucket: bucketName, + Key: objectKey, + PartNumber: 1, + UploadId: uploadId1, + Body: part1, + }, (err, data) => { + assert.ifError(err, `error uploading part for MPU 1: ${err}`); + etag1 = data.ETag; + s3.completeMultipartUpload({ + Bucket: bucketName, + Key: objectKey, + UploadId: uploadId1, + MultipartUpload: { Parts: [{ ETag: etag1, PartNumber: 1 }] }, + }, err => { + assert.ifError(err, `error completing MPU 1: ${err}`); + next(); + }); + }); + }); + }, + next => { + s3.getObject({ Bucket: bucketName, Key: objectKey }, (err, data) => { + assert.ifError(err, `error getting object after MPU 1: ${err}`); + assert.strictEqual(data.Body.toString(), part1.toString()); + next(); + }); + }, + next => { + s3.createMultipartUpload({ Bucket: bucketName, Key: objectKey }, (err, data) => { + assert.ifError(err, `error creating MPU 2: ${err}`); + uploadId2 = data.UploadId; + s3.uploadPart({ + Bucket: bucketName, + Key: objectKey, + PartNumber: 1, + UploadId: uploadId2, + Body: part2, + }, err => { + assert.ifError(err, `error uploading part for MPU 2: ${err}`); + next(); + }); + }); + }, + next => { + s3.abortMultipartUpload({ Bucket: bucketName, Key: objectKey, UploadId: uploadId2 }, err => { + assert.ifError(err, `error aborting MPU 2: ${err}`); + next(); + }); + }, + next => { + s3.getObject({ Bucket: bucketName, Key: objectKey }, (err, data) => { + assert.ifError(err, `error getting object after aborting MPU 2: ${err}`); + assert.strictEqual(data.Body.toString(), part1.toString()); + next(); + }); + }, + ], done); + }); + + it('should not delete existing object data when aborting an old MPU for same key', done => { + const part1 = Buffer.from('I am part 1 of MPU 1'); + const part2 = Buffer.from('I am part 1 of MPU 2'); + let uploadId1; + let uploadId2; + let etag2; + async.waterfall([ + next => { + s3.createMultipartUpload({ + Bucket: bucketName, Key: objectKey, + }, (err, data) => { + assert.ifError(err, `error creating MPU 1: ${err}`); + uploadId1 = data.UploadId; + s3.uploadPart({ + Bucket: bucketName, + Key: objectKey, + PartNumber: 1, + UploadId: uploadId1, + Body: part1, + }, err => { + assert.ifError(err, `error uploading part for MPU 1: ${err}`); + next(); + }); + }); + }, + next => { + s3.createMultipartUpload({ + Bucket: bucketName, Key: objectKey, + }, (err, data) => { + assert.ifError(err, `error creating MPU 2: ${err}`); + uploadId2 = data.UploadId; + s3.uploadPart({ + Bucket: bucketName, + Key: objectKey, + PartNumber: 1, + UploadId: uploadId2, + Body: part2, + }, (err, data) => { + assert.ifError(err, `error uploading part for MPU 2: ${err}`); + etag2 = data.ETag; + s3.completeMultipartUpload({ + Bucket: bucketName, + Key: objectKey, + UploadId: uploadId2, + MultipartUpload: { Parts: [{ ETag: etag2, PartNumber: 1 }] }, + }, err => { + assert.ifError(err, `error completing MPU 2: ${err}`); + next(); + }); + }); + }); + }, + next => { + s3.getObject({ + Bucket: bucketName, + Key: objectKey, + }, (err, data) => { + assert.ifError(err, `error getting object after MPU 2: ${err}`); + assert.strictEqual(data.Body.toString(), part2.toString()); + next(); + }); + }, + next => { + s3.abortMultipartUpload({ + Bucket: bucketName, + Key: objectKey, + UploadId: uploadId1, + }, err => { + assert.ifError(err, `error aborting MPU 1: ${err}`); + next(); + }); + }, + next => { + s3.getObject({ Bucket: bucketName, Key: objectKey }, (err, data) => { + assert.ifError(err, `error getting object after aborting MPU 1: ${err}`); + assert.strictEqual(data.Body.toString(), part2.toString()); + next(); + }); + }, + ], done); + }); + }); +}); + describe('Abort MPU - No Such Upload', () => { withV4(sigCfg => { let bucketUtil;