Skip to content

Commit 946b11b

Browse files
committed
Test new error handling in completeMPU
Issue: CLDSRV-669
1 parent 4dfec8d commit 946b11b

1 file changed

Lines changed: 170 additions & 0 deletions

File tree

tests/unit/api/multipartUpload.js

Lines changed: 170 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2070,6 +2070,176 @@ describe('Multipart Upload API', () => {
20702070
});
20712071
});
20722072
});
2073+
2074+
it('should return a retryable error if deletePartsMetadata fails', done => {
2075+
const partBody = Buffer.from('I am a part\n', 'utf8');
2076+
let batchDeleteStub;
2077+
2078+
async.waterfall([
2079+
next => bucketPut(authInfo, bucketPutRequest, log, next),
2080+
(corsHeaders, next) => initiateMultipartUpload(authInfo,
2081+
initiateRequest, log, next),
2082+
(result, corsHeaders, next) => parseString(result, next),
2083+
],
2084+
(err, json) => {
2085+
assert.ifError(err);
2086+
const testUploadId = json.InitiateMultipartUploadResult.UploadId[0];
2087+
const md5Hash = crypto.createHash('md5').update(partBody);
2088+
const calculatedHash = md5Hash.digest('hex');
2089+
const partRequest = new DummyRequest({
2090+
bucketName,
2091+
namespace,
2092+
objectKey,
2093+
headers: { host: `${bucketName}.s3.amazonaws.com` },
2094+
url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`,
2095+
query: {
2096+
partNumber: '1',
2097+
uploadId: testUploadId,
2098+
},
2099+
calculatedHash,
2100+
}, partBody);
2101+
2102+
objectPutPart(authInfo, partRequest, undefined, log, () => {
2103+
// Mock batchDeleteObjectMetadata to fail with non-retryable error
2104+
const services = require('../../../lib/services');
2105+
batchDeleteStub = sinon.stub(services, 'batchDeleteObjectMetadata')
2106+
.callsFake((mpuBucketName, keysToDelete, log, cb) =>
2107+
// Simulate a non-retryable error that should be converted to retryable
2108+
cb(errors.NoSuchKey)
2109+
);
2110+
2111+
const completeBody = '<CompleteMultipartUpload>' +
2112+
'<Part>' +
2113+
'<PartNumber>1</PartNumber>' +
2114+
`<ETag>"${calculatedHash}"</ETag>` +
2115+
'</Part>' +
2116+
'</CompleteMultipartUpload>';
2117+
const completeRequest = {
2118+
bucketName,
2119+
namespace,
2120+
objectKey,
2121+
parsedHost: 's3.amazonaws.com',
2122+
url: `/${objectKey}?uploadId=${testUploadId}`,
2123+
headers: { host: `${bucketName}.s3.amazonaws.com` },
2124+
query: { uploadId: testUploadId },
2125+
post: completeBody,
2126+
actionImplicitDenies: false,
2127+
};
2128+
2129+
completeMultipartUpload(authInfo, completeRequest, log, err => {
2130+
// Restore original function
2131+
batchDeleteStub.restore();
2132+
2133+
// Should get an error (retryable behavior)
2134+
assert(err, 'Expected an error when metadata deletion fails');
2135+
2136+
// Verify S3 object was created successfully despite the error
2137+
const objMD = metadata.keyMaps.get(bucketName).get(objectKey);
2138+
assert(objMD, 'S3 object should exist even when metadata cleanup fails');
2139+
assert.strictEqual(objMD.uploadId, testUploadId);
2140+
2141+
done();
2142+
});
2143+
});
2144+
});
2145+
});
2146+
2147+
it('should not return error if batchDeleteExtraParts fails', done => {
2148+
const fullSizedPart = crypto.randomBytes(5 * 1024 * 1024);
2149+
const partBody = Buffer.from('I am a smaller part\n', 'utf8');
2150+
let batchDeleteStub;
2151+
2152+
async.waterfall([
2153+
next => bucketPut(authInfo, bucketPutRequest, log, next),
2154+
(corsHeaders, next) => initiateMultipartUpload(authInfo,
2155+
initiateRequest, log, next),
2156+
(result, corsHeaders, next) => parseString(result, next),
2157+
],
2158+
(err, json) => {
2159+
assert.ifError(err);
2160+
const testUploadId = json.InitiateMultipartUploadResult.UploadId[0];
2161+
2162+
// Upload part 1 (will be included in completion)
2163+
const partRequest1 = new DummyRequest({
2164+
bucketName,
2165+
namespace,
2166+
objectKey,
2167+
headers: { host: `${bucketName}.s3.amazonaws.com` },
2168+
url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`,
2169+
query: {
2170+
partNumber: '1',
2171+
uploadId: testUploadId,
2172+
},
2173+
}, fullSizedPart);
2174+
2175+
objectPutPart(authInfo, partRequest1, undefined, log, (err, part1ETag) => {
2176+
assert.ifError(err);
2177+
2178+
// Upload part 2 (will be an "extra part" not included in completion)
2179+
const partRequest2 = new DummyRequest({
2180+
bucketName,
2181+
namespace,
2182+
objectKey,
2183+
headers: { host: `${bucketName}.s3.amazonaws.com` },
2184+
url: `/${objectKey}?partNumber=2&uploadId=${testUploadId}`,
2185+
query: {
2186+
partNumber: '2',
2187+
uploadId: testUploadId,
2188+
},
2189+
}, partBody);
2190+
2191+
objectPutPart(authInfo, partRequest2, undefined, log, err => {
2192+
assert.ifError(err);
2193+
2194+
// Mock data.batchDelete to fail when deleting extra parts
2195+
const { data } = require('../../../lib/data/wrapper');
2196+
batchDeleteStub = sinon.stub(data, 'batchDelete')
2197+
.callsFake((locations, method, dataStoreName, log, cb) =>
2198+
// Always fail extra part deletion
2199+
cb(new Error('Simulated extra part deletion failure'))
2200+
);
2201+
2202+
// Complete MPU with only part 1 (part 2 becomes "extra part")
2203+
const completeBody = '<CompleteMultipartUpload>' +
2204+
'<Part>' +
2205+
'<PartNumber>1</PartNumber>' +
2206+
`<ETag>"${part1ETag}"</ETag>` +
2207+
'</Part>' +
2208+
'</CompleteMultipartUpload>';
2209+
const completeRequest = {
2210+
bucketName,
2211+
namespace,
2212+
objectKey,
2213+
parsedHost: 's3.amazonaws.com',
2214+
url: `/${objectKey}?uploadId=${testUploadId}`,
2215+
headers: { host: `${bucketName}.s3.amazonaws.com` },
2216+
query: { uploadId: testUploadId },
2217+
post: completeBody,
2218+
actionImplicitDenies: false,
2219+
};
2220+
2221+
completeMultipartUpload(authInfo, completeRequest, log, err => {
2222+
// Restore original function
2223+
batchDeleteStub.restore();
2224+
2225+
// Should NOT get an error despite extra part deletion failing
2226+
assert.ifError(err, 'Should not return error when extra part deletion fails');
2227+
2228+
// Verify S3 object was created successfully
2229+
const objMD = metadata.keyMaps.get(bucketName).get(objectKey);
2230+
assert(objMD, 'S3 object should exist');
2231+
assert.strictEqual(objMD.uploadId, testUploadId);
2232+
2233+
// Verify MPU metadata was cleaned up
2234+
assert.strictEqual(metadata.keyMaps.get(mpuBucket).size, 0,
2235+
'MPU metadata should be cleaned up');
2236+
2237+
done();
2238+
});
2239+
});
2240+
});
2241+
});
2242+
});
20732243
});
20742244

20752245
describe('complete mpu with versioning', () => {

0 commit comments

Comments
 (0)