Skip to content

Commit b9b6b70

Browse files
committed
CLDSRV-892: UploadPart store part checksum
1 parent 3138552 commit b9b6b70

File tree

3 files changed

+541
-1
lines changed

3 files changed

+541
-1
lines changed

lib/api/objectPutPart.js

Lines changed: 54 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,10 @@ const { BackendInfo } = models;
2121
const writeContinue = require('../utilities/writeContinue');
2222
const { parseObjectEncryptionHeaders } = require('./apiUtils/bucket/bucketEncryption');
2323
const validateChecksumHeaders = require('./apiUtils/object/validateChecksumHeaders');
24+
const {
25+
getChecksumDataFromHeaders,
26+
arsenalErrorFromChecksumError,
27+
} = require('./apiUtils/integrity/validateChecksums');
2428
const { validateQuotas } = require('./apiUtils/quotas/quotaUtils');
2529
const { setSSEHeaders } = require('./apiUtils/object/sseHeaders');
2630
const { storeServerAccessLogInfo } = require('../metadata/metadataUtils');
@@ -113,6 +117,8 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
113117
// `requestType` is the general 'objectPut'.
114118
const requestType = request.apiMethods || 'objectPutPart';
115119
let partChecksum;
120+
let mpuChecksumAlgo;
121+
let mpuChecksumIsDefault;
116122

117123
return async.waterfall([
118124
// Get the destination bucket.
@@ -196,6 +202,9 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
196202
return next(errors.AccessDenied, destinationBucket);
197203
}
198204

205+
mpuChecksumAlgo = res.checksumAlgorithm;
206+
mpuChecksumIsDefault = res.checksumIsDefault;
207+
199208
const objectLocationConstraint =
200209
res.controllingLocationConstraint;
201210
const sseAlgo = res['x-amz-server-side-encryption'];
@@ -316,8 +325,43 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
316325
};
317326
const backendInfo = new BackendInfo(config,
318327
objectLocationConstraint);
328+
329+
const headerChecksum = getChecksumDataFromHeaders(request.headers);
330+
if (headerChecksum && headerChecksum.error) {
331+
return next(arsenalErrorFromChecksumError(headerChecksum), destinationBucket);
332+
}
333+
334+
// If the MPU specifies a non-default checksum algo and the
335+
// client sends a different algo, reject the request.
336+
if (headerChecksum && mpuChecksumAlgo && !mpuChecksumIsDefault
337+
&& headerChecksum.algorithm !== mpuChecksumAlgo) {
338+
return next(errors.InvalidRequest.customizeDescription(
339+
`Checksum algorithm '${headerChecksum.algorithm}' is not the same ` +
340+
`as the checksum algorithm '${mpuChecksumAlgo}' specified during ` +
341+
'CreateMultipartUpload.'
342+
), destinationBucket);
343+
}
344+
345+
const primaryAlgo = mpuChecksumAlgo || 'crc64nvme';
346+
let checksums;
347+
if (headerChecksum && headerChecksum.algorithm === mpuChecksumAlgo) {
348+
checksums = {
349+
primary: headerChecksum, // MPU and Header match only need to calculate one.
350+
secondary: null,
351+
};
352+
} else if (headerChecksum) {
353+
checksums = {
354+
primary: { algorithm: primaryAlgo, isTrailer: false, expected: undefined },
355+
secondary: headerChecksum, // MPU and Header mismatch, need to verify the header checksum.
356+
};
357+
} else {
358+
checksums = {
359+
primary: { algorithm: primaryAlgo, isTrailer: false, expected: undefined },
360+
secondary: null, // No Header checksum, we only calculate the MPU one.
361+
};
362+
}
319363
return dataStore(objectContext, cipherBundle, request,
320-
size, streamingV4Params, backendInfo, log,
364+
size, streamingV4Params, backendInfo, checksums, log,
321365
(err, dataGetInfo, hexDigest, checksum) => {
322366
if (err) {
323367
return next(err, destinationBucket);
@@ -356,6 +400,15 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
356400
'content-length': size,
357401
'owner-id': destinationBucket.getOwner(),
358402
};
403+
if (partChecksum) {
404+
if (partChecksum.storageChecksum) {
405+
omVal.checksumValue = partChecksum.storageChecksum.value;
406+
omVal.checksumAlgorithm = partChecksum.storageChecksum.algorithm;
407+
} else {
408+
omVal.checksumValue = partChecksum.value;
409+
omVal.checksumAlgorithm = partChecksum.algorithm;
410+
}
411+
}
359412
const mdParams = { overheadField: constants.overheadField };
360413
return metadata.putObjectMD(mpuBucketName, partKey, omVal, mdParams, log,
361414
err => {
Lines changed: 186 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,186 @@
1+
const assert = require('assert');
2+
const {
3+
CreateBucketCommand,
4+
CreateMultipartUploadCommand,
5+
AbortMultipartUploadCommand,
6+
UploadPartCommand,
7+
DeleteBucketCommand,
8+
} = require('@aws-sdk/client-s3');
9+
10+
const withV4 = require('../support/withV4');
11+
const BucketUtility = require('../../lib/utility/bucket-util');
12+
const { algorithms } = require('../../../../../lib/api/apiUtils/integrity/validateChecksums');
13+
14+
const bucket = `mpu-part-checksum-test-${Date.now()}`;
15+
const key = 'test-part-checksum-key';
16+
const partBody = Buffer.from('I am a part body for checksum testing', 'utf8');
17+
18+
const allAlgos = ['CRC32', 'CRC32C', 'SHA1', 'SHA256'];
19+
20+
// Maps algo name to the UploadPartCommand checksum field name
21+
const checksumField = {
22+
CRC32: 'ChecksumCRC32',
23+
CRC32C: 'ChecksumCRC32C',
24+
SHA1: 'ChecksumSHA1',
25+
SHA256: 'ChecksumSHA256',
26+
};
27+
28+
// Pre-compute correct digests for partBody
29+
const correctDigest = {};
30+
// A valid-length but incorrect digest for each algo
31+
const wrongDigest = {};
32+
33+
before(async () => {
34+
for (const algo of allAlgos) {
35+
36+
correctDigest[algo] = await algorithms[algo.toLowerCase()].digest(partBody);
37+
}
38+
// Generate wrong digests: flip the first character
39+
for (const algo of allAlgos) {
40+
const correct = correctDigest[algo];
41+
const flipped = correct[0] === 'A' ? `B${correct.slice(1)}` : `A${correct.slice(1)}`;
42+
wrongDigest[algo] = flipped;
43+
}
44+
});
45+
46+
describe('UploadPart checksum validation', () =>
47+
withV4(sigCfg => {
48+
let bucketUtil;
49+
let s3;
50+
51+
before(async () => {
52+
bucketUtil = new BucketUtility('default', sigCfg);
53+
s3 = bucketUtil.s3;
54+
await s3.send(new CreateBucketCommand({ Bucket: bucket }));
55+
});
56+
57+
after(async () => {
58+
await bucketUtil.empty(bucket);
59+
await s3.send(new DeleteBucketCommand({ Bucket: bucket }));
60+
});
61+
62+
// For each non-default MPU algo, test that:
63+
// - matching algo with correct digest succeeds
64+
// - matching algo with wrong digest fails with BadDigest
65+
// - every other algo is rejected with InvalidRequest
66+
// - no checksum header is accepted
67+
allAlgos.forEach(mpuAlgo => {
68+
describe(`MPU created with ${mpuAlgo}`, () => {
69+
let uploadId;
70+
let partNum = 0;
71+
72+
before(async () => {
73+
const res = await s3.send(new CreateMultipartUploadCommand({
74+
Bucket: bucket, Key: key,
75+
ChecksumAlgorithm: mpuAlgo,
76+
}));
77+
uploadId = res.UploadId;
78+
});
79+
80+
after(async () => {
81+
await s3.send(new AbortMultipartUploadCommand({
82+
Bucket: bucket, Key: key, UploadId: uploadId,
83+
}));
84+
});
85+
86+
it(`should accept ${mpuAlgo} with correct digest`, async () => {
87+
partNum++;
88+
const res = await s3.send(new UploadPartCommand({
89+
Bucket: bucket, Key: key, UploadId: uploadId,
90+
PartNumber: partNum, Body: partBody,
91+
[checksumField[mpuAlgo]]: correctDigest[mpuAlgo],
92+
}));
93+
assert.strictEqual(res[checksumField[mpuAlgo]], correctDigest[mpuAlgo]);
94+
});
95+
96+
it(`should reject ${mpuAlgo} with wrong digest (BadDigest)`, async () => {
97+
partNum++;
98+
try {
99+
await s3.send(new UploadPartCommand({
100+
Bucket: bucket, Key: key, UploadId: uploadId,
101+
PartNumber: partNum, Body: partBody,
102+
[checksumField[mpuAlgo]]: wrongDigest[mpuAlgo],
103+
}));
104+
assert.fail('Expected BadDigest error');
105+
} catch (err) {
106+
assert.strictEqual(err.name, 'BadDigest');
107+
}
108+
});
109+
110+
// Note: AWS SDK v3 always sends a default crc32 checksum,
111+
// so "no checksum header" cannot be tested via the SDK for
112+
// non-default MPUs (it would be rejected as a mismatch).
113+
114+
allAlgos.filter(a => a !== mpuAlgo).forEach(otherAlgo => {
115+
it(`should reject ${otherAlgo} when MPU is ${mpuAlgo} (InvalidRequest)`, async () => {
116+
partNum++;
117+
try {
118+
await s3.send(new UploadPartCommand({
119+
Bucket: bucket, Key: key, UploadId: uploadId,
120+
PartNumber: partNum, Body: partBody,
121+
[checksumField[otherAlgo]]: correctDigest[otherAlgo],
122+
}));
123+
assert.fail('Expected InvalidRequest error');
124+
} catch (err) {
125+
assert.strictEqual(err.name, 'InvalidRequest');
126+
}
127+
});
128+
});
129+
});
130+
});
131+
132+
// Default MPU (no ChecksumAlgorithm) should accept any algo
133+
describe('MPU created with no checksum (default)', () => {
134+
let uploadId;
135+
let partNum = 0;
136+
137+
before(async () => {
138+
const res = await s3.send(new CreateMultipartUploadCommand({
139+
Bucket: bucket, Key: key,
140+
}));
141+
uploadId = res.UploadId;
142+
});
143+
144+
after(async () => {
145+
await s3.send(new AbortMultipartUploadCommand({
146+
Bucket: bucket, Key: key, UploadId: uploadId,
147+
}));
148+
});
149+
150+
allAlgos.forEach(algo => {
151+
it(`should accept ${algo} with correct digest`, async () => {
152+
partNum++;
153+
const res = await s3.send(new UploadPartCommand({
154+
Bucket: bucket, Key: key, UploadId: uploadId,
155+
PartNumber: partNum, Body: partBody,
156+
[checksumField[algo]]: correctDigest[algo],
157+
}));
158+
assert.strictEqual(res[checksumField[algo]], correctDigest[algo]);
159+
});
160+
161+
it(`should reject ${algo} with wrong digest (BadDigest)`, async () => {
162+
partNum++;
163+
try {
164+
await s3.send(new UploadPartCommand({
165+
Bucket: bucket, Key: key, UploadId: uploadId,
166+
PartNumber: partNum, Body: partBody,
167+
[checksumField[algo]]: wrongDigest[algo],
168+
}));
169+
assert.fail('Expected BadDigest error');
170+
} catch (err) {
171+
assert.strictEqual(err.name, 'BadDigest');
172+
}
173+
});
174+
});
175+
176+
it('should accept part with no checksum header', async () => {
177+
partNum++;
178+
const res = await s3.send(new UploadPartCommand({
179+
Bucket: bucket, Key: key, UploadId: uploadId,
180+
PartNumber: partNum, Body: partBody,
181+
}));
182+
assert(res.ETag);
183+
});
184+
});
185+
})
186+
);

0 commit comments

Comments
 (0)