Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/release.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ env:

jobs:
build-federation-image:
runs-on: ubuntu-20.04
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
Expand Down
18 changes: 16 additions & 2 deletions .github/workflows/tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ jobs:
if: always()

build:
runs-on: ubuntu-20.04
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
Expand Down Expand Up @@ -189,7 +189,21 @@ jobs:
git.commit-sha=${{ github.sha }}
cache-from: type=gha,scope=pykmip
cache-to: type=gha,mode=max,scope=pykmip
- name: Build and push MongoDB

build-federation-image:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to GitHub Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ github.token }}
- name: Build and push image for federation
uses: docker/build-push-action@v5
with:
push: true
Expand Down
2 changes: 1 addition & 1 deletion lib/api/apiUtils/object/abortMultipartUpload.js
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
return next(null, mpuBucket, storedParts, destBucket);
}

if (objectMD?.location) {
if (objectMD && objectMD.location && objectMD.uploadId === metadataValMPUparams.uploadId) {
const existingLocations = new Set(locations.map(loc => loc.key));
const remainingObjectLocations = objectMD.location.filter(loc => !existingLocations.has(loc.key));
locations.push(...remainingObjectLocations);
Expand Down
2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "@zenko/cloudserver",
"version": "8.8.45",
"version": "8.8.45.1",
"description": "Zenko CloudServer, an open-source Node.js implementation of a server handling the Amazon S3 protocol",
"main": "index.js",
"engines": {
Expand Down
195 changes: 195 additions & 0 deletions tests/functional/aws-node-sdk/test/object/abortMPU.js
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ const assert = require('assert');
const { v4: uuidv4 } = require('uuid');
const withV4 = require('../support/withV4');
const BucketUtility = require('../../lib/utility/bucket-util');
const async = require('async');

const date = Date.now();
const bucket = `abortmpu${date}`;
Expand Down Expand Up @@ -64,6 +65,200 @@ describe('Abort MPU', () => {
});
});

describe('Abort MPU with existing object', function AbortMPUExistingObject() {
this.timeout(60000);

withV4(sigCfg => {
let bucketUtil;
let s3;
const bucketName = `abortmpu-test-bucket-${Date.now()}`;
const objectKey = 'my-object';

beforeEach(done => {
bucketUtil = new BucketUtility('default', sigCfg);
s3 = bucketUtil.s3;
s3.createBucket({ Bucket: bucketName }, err => {
assert.ifError(err, `Error creating bucket: ${err}`);
done();
});
});

afterEach(async () => {
const data = await s3.listMultipartUploads({ Bucket: bucketName }).promise();
const uploads = data.Uploads;
await Promise.all(uploads.map(async upload => {
try {
await s3.abortMultipartUpload({
Bucket: bucketName,
Key: upload.Key,
UploadId: upload.UploadId,
}).promise();
} catch (err) {
if (err.code !== 'NoSuchUpload') {
throw err;
}
// If NoSuchUpload, swallow error
}
}));
await bucketUtil.empty(bucketName);
await bucketUtil.deleteOne(bucketName);
});

it('should not delete existing object data when aborting another MPU for same key', done => {
const part1 = Buffer.from('I am part 1 of MPU 1');
const part2 = Buffer.from('I am part 1 of MPU 2');
let uploadId1;
let uploadId2;
let etag1;
async.waterfall([
next => {
s3.createMultipartUpload({ Bucket: bucketName, Key: objectKey }, (err, data) => {
assert.ifError(err, `error creating MPU 1: ${err}`);
uploadId1 = data.UploadId;
s3.uploadPart({
Bucket: bucketName,
Key: objectKey,
PartNumber: 1,
UploadId: uploadId1,
Body: part1,
}, (err, data) => {
assert.ifError(err, `error uploading part for MPU 1: ${err}`);
etag1 = data.ETag;
s3.completeMultipartUpload({
Bucket: bucketName,
Key: objectKey,
UploadId: uploadId1,
MultipartUpload: { Parts: [{ ETag: etag1, PartNumber: 1 }] },
}, err => {
assert.ifError(err, `error completing MPU 1: ${err}`);
next();
});
});
});
},
next => {
s3.getObject({ Bucket: bucketName, Key: objectKey }, (err, data) => {
assert.ifError(err, `error getting object after MPU 1: ${err}`);
assert.strictEqual(data.Body.toString(), part1.toString());
next();
});
},
next => {
s3.createMultipartUpload({ Bucket: bucketName, Key: objectKey }, (err, data) => {
assert.ifError(err, `error creating MPU 2: ${err}`);
uploadId2 = data.UploadId;
s3.uploadPart({
Bucket: bucketName,
Key: objectKey,
PartNumber: 1,
UploadId: uploadId2,
Body: part2,
}, err => {
assert.ifError(err, `error uploading part for MPU 2: ${err}`);
next();
});
});
},
next => {
s3.abortMultipartUpload({ Bucket: bucketName, Key: objectKey, UploadId: uploadId2 }, err => {
assert.ifError(err, `error aborting MPU 2: ${err}`);
next();
});
},
next => {
s3.getObject({ Bucket: bucketName, Key: objectKey }, (err, data) => {
assert.ifError(err, `error getting object after aborting MPU 2: ${err}`);
assert.strictEqual(data.Body.toString(), part1.toString());
next();
});
},
], done);
});

it('should not delete existing object data when aborting an old MPU for same key', done => {
const part1 = Buffer.from('I am part 1 of MPU 1');
const part2 = Buffer.from('I am part 1 of MPU 2');
let uploadId1;
let uploadId2;
let etag2;
async.waterfall([
next => {
s3.createMultipartUpload({
Bucket: bucketName, Key: objectKey,
}, (err, data) => {
assert.ifError(err, `error creating MPU 1: ${err}`);
uploadId1 = data.UploadId;
s3.uploadPart({
Bucket: bucketName,
Key: objectKey,
PartNumber: 1,
UploadId: uploadId1,
Body: part1,
}, err => {
assert.ifError(err, `error uploading part for MPU 1: ${err}`);
next();
});
});
},
next => {
s3.createMultipartUpload({
Bucket: bucketName, Key: objectKey,
}, (err, data) => {
assert.ifError(err, `error creating MPU 2: ${err}`);
uploadId2 = data.UploadId;
s3.uploadPart({
Bucket: bucketName,
Key: objectKey,
PartNumber: 1,
UploadId: uploadId2,
Body: part2,
}, (err, data) => {
assert.ifError(err, `error uploading part for MPU 2: ${err}`);
etag2 = data.ETag;
s3.completeMultipartUpload({
Bucket: bucketName,
Key: objectKey,
UploadId: uploadId2,
MultipartUpload: { Parts: [{ ETag: etag2, PartNumber: 1 }] },
}, err => {
assert.ifError(err, `error completing MPU 2: ${err}`);
next();
});
});
});
},
next => {
s3.getObject({
Bucket: bucketName,
Key: objectKey,
}, (err, data) => {
assert.ifError(err, `error getting object after MPU 2: ${err}`);
assert.strictEqual(data.Body.toString(), part2.toString());
next();
});
},
next => {
s3.abortMultipartUpload({
Bucket: bucketName,
Key: objectKey,
UploadId: uploadId1,
}, err => {
assert.ifError(err, `error aborting MPU 1: ${err}`);
next();
});
},
next => {
s3.getObject({ Bucket: bucketName, Key: objectKey }, (err, data) => {
assert.ifError(err, `error getting object after aborting MPU 1: ${err}`);
assert.strictEqual(data.Body.toString(), part2.toString());
next();
});
},
], done);
});
});
});

describe('Abort MPU - No Such Upload', () => {
withV4(sigCfg => {
let bucketUtil;
Expand Down
Loading