From f9f0688c315cc1ce1ccb956c12b51cf7dcf6fafe Mon Sep 17 00:00:00 2001 From: Leif Henriksen Date: Tue, 5 May 2026 19:21:08 +0200 Subject: [PATCH 01/12] CLDSRV-898: rename getObjectAttributesXMLTag to xmlTag --- lib/api/apiUtils/integrity/validateChecksums.js | 10 +++++----- lib/api/apiUtils/object/objectAttributes.js | 2 +- lib/api/listParts.js | 11 +++++++---- .../aws-node-sdk/test/object/objectGetAttributes.js | 10 +++++----- tests/unit/api/apiUtils/object/objectAttributes.js | 4 ++-- tests/unit/api/objectGetAttributes.js | 10 +++++----- 6 files changed, 25 insertions(+), 22 deletions(-) diff --git a/lib/api/apiUtils/integrity/validateChecksums.js b/lib/api/apiUtils/integrity/validateChecksums.js index 64f945b2ae..ba775aad79 100644 --- a/lib/api/apiUtils/integrity/validateChecksums.js +++ b/lib/api/apiUtils/integrity/validateChecksums.js @@ -85,7 +85,7 @@ function uint32ToBase64(num) { const algorithms = Object.freeze({ crc64nvme: { - getObjectAttributesXMLTag: 'ChecksumCRC64NVME', + xmlTag: 'ChecksumCRC64NVME', digest: async data => { const input = Buffer.isBuffer(data) ? data : Buffer.from(data); const crc = new CrtCrc64Nvme(); @@ -101,7 +101,7 @@ const algorithms = Object.freeze({ createHash: () => new CrtCrc64Nvme() }, crc32: { - getObjectAttributesXMLTag: 'ChecksumCRC32', + xmlTag: 'ChecksumCRC32', digest: data => { const input = Buffer.isBuffer(data) ? data : Buffer.from(data); return uint32ToBase64(new Crc32().update(input).digest() >>> 0); // >>> 0 coerce number to uint32 @@ -114,7 +114,7 @@ const algorithms = Object.freeze({ createHash: () => new Crc32() }, crc32c: { - getObjectAttributesXMLTag: 'ChecksumCRC32C', + xmlTag: 'ChecksumCRC32C', digest: data => { const input = Buffer.isBuffer(data) ? data : Buffer.from(data); return uint32ToBase64(new Crc32c().update(input).digest() >>> 0); // >>> 0 coerce number to uint32 @@ -124,7 +124,7 @@ const algorithms = Object.freeze({ createHash: () => new Crc32c() }, sha1: { - getObjectAttributesXMLTag: 'ChecksumSHA1', + xmlTag: 'ChecksumSHA1', digest: data => { const input = Buffer.isBuffer(data) ? data : Buffer.from(data); return crypto.createHash('sha1').update(input).digest('base64'); @@ -134,7 +134,7 @@ const algorithms = Object.freeze({ createHash: () => crypto.createHash('sha1') }, sha256: { - getObjectAttributesXMLTag: 'ChecksumSHA256', + xmlTag: 'ChecksumSHA256', digest: data => { const input = Buffer.isBuffer(data) ? data : Buffer.from(data); return crypto.createHash('sha256').update(input).digest('base64'); diff --git a/lib/api/apiUtils/object/objectAttributes.js b/lib/api/apiUtils/object/objectAttributes.js index ad5bf2e51e..052cd12a59 100644 --- a/lib/api/apiUtils/object/objectAttributes.js +++ b/lib/api/apiUtils/object/objectAttributes.js @@ -93,7 +93,7 @@ function buildAttributesXml(objectMD, userMetadata, requestedAttrs, xml, log) { }); break; } - const tag = algo.getObjectAttributesXMLTag; + const tag = algo.xmlTag; xml.push( '', `<${tag}>${checksum.checksumValue}`, diff --git a/lib/api/listParts.js b/lib/api/listParts.js index ffff63ed0b..22857456f5 100644 --- a/lib/api/listParts.js +++ b/lib/api/listParts.js @@ -91,8 +91,7 @@ function getPartChecksumXML(checksumAlgorithm, checksumValue) { return undefined; } const algorithm = checksumAlgorithm.toLowerCase(); - const xmlTag = algorithms[algorithm] && - algorithms[algorithm].getObjectAttributesXMLTag; + const xmlTag = algorithms[algorithm] && algorithms[algorithm].xmlTag; if (!xmlTag) { return undefined; } @@ -334,8 +333,12 @@ function listParts(authInfo, request, log, callback) { ], (err, destinationBucket, xml) => { const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, destinationBucket); - monitoring.promMetrics('GET', bucketName, 400, - 'listMultipartUploadParts'); + if (err) { + // The 200 metric is emitted on success in the final waterfall + // step; only count failures here to avoid double-counting. + monitoring.promMetrics('GET', bucketName, 400, + 'listMultipartUploadParts'); + } Object.assign(responseHeaders, corsHeaders); return callback(err, xml, responseHeaders); diff --git a/tests/functional/aws-node-sdk/test/object/objectGetAttributes.js b/tests/functional/aws-node-sdk/test/object/objectGetAttributes.js index 4af27c76e7..e80d9a0d31 100644 --- a/tests/functional/aws-node-sdk/test/object/objectGetAttributes.js +++ b/tests/functional/aws-node-sdk/test/object/objectGetAttributes.js @@ -519,10 +519,10 @@ describe('objectGetAttributes with checksum', () => { await s3.send(new DeleteBucketCommand({ Bucket: checksumBucket })); }); - Object.entries(algorithms).forEach(([name, { getObjectAttributesXMLTag }]) => { + Object.entries(algorithms).forEach(([name, { xmlTag }]) => { const sdkAlgorithm = name.toUpperCase(); - it(`should return ${getObjectAttributesXMLTag} when object has ${name} checksum`, async () => { + it(`should return ${xmlTag} when object has ${name} checksum`, async () => { await s3.send(new PutObjectCommand({ Bucket: checksumBucket, Key: checksumKey, @@ -537,11 +537,11 @@ describe('objectGetAttributes with checksum', () => { })); assert(data.Checksum, 'Checksum should be present'); - assert.strictEqual(data.Checksum[getObjectAttributesXMLTag], expectedDigests[name]); + assert.strictEqual(data.Checksum[xmlTag], expectedDigests[name]); assert.strictEqual(data.Checksum.ChecksumType, 'FULL_OBJECT'); }); - it(`should return ${getObjectAttributesXMLTag} along with other attributes`, async () => { + it(`should return ${xmlTag} along with other attributes`, async () => { await s3.send(new PutObjectCommand({ Bucket: checksumBucket, Key: checksumKey, @@ -558,7 +558,7 @@ describe('objectGetAttributes with checksum', () => { assert(data.ETag, 'ETag should be present'); assert(data.ObjectSize, 'ObjectSize should be present'); assert(data.Checksum, 'Checksum should be present'); - assert.strictEqual(data.Checksum[getObjectAttributesXMLTag], expectedDigests[name]); + assert.strictEqual(data.Checksum[xmlTag], expectedDigests[name]); assert.strictEqual(data.Checksum.ChecksumType, 'FULL_OBJECT'); }); }); diff --git a/tests/unit/api/apiUtils/object/objectAttributes.js b/tests/unit/api/apiUtils/object/objectAttributes.js index 151d56d0aa..2586f71a7f 100644 --- a/tests/unit/api/apiUtils/object/objectAttributes.js +++ b/tests/unit/api/apiUtils/object/objectAttributes.js @@ -190,7 +190,7 @@ describe('buildXmlAttributes', () => { assert.strictEqual(result.length, 0); }); - Object.entries(algorithms).forEach(([algo, { getObjectAttributesXMLTag }]) => { + Object.entries(algorithms).forEach(([algo, { xmlTag }]) => { it(`should generate correct Checksum XML for ${algo}`, () => { const digest = expectedDigests[algo]; const result = []; @@ -205,7 +205,7 @@ describe('buildXmlAttributes', () => { assert.strictEqual(result.length, 4); assert.strictEqual(result[0], ''); - assert.strictEqual(result[1], `<${getObjectAttributesXMLTag}>${digest}`); + assert.strictEqual(result[1], `<${xmlTag}>${digest}`); assert.strictEqual(result[2], 'FULL_OBJECT'); assert.strictEqual(result[3], ''); }); diff --git a/tests/unit/api/objectGetAttributes.js b/tests/unit/api/objectGetAttributes.js index 2155731560..d3e38f4568 100644 --- a/tests/unit/api/objectGetAttributes.js +++ b/tests/unit/api/objectGetAttributes.js @@ -664,8 +664,8 @@ describe('objectGetAttributes API with checksum', () => { await bucketPutAsync(authInfo, testPutBucketRequest, log); }); - Object.entries(algorithms).forEach(([name, { getObjectAttributesXMLTag }]) => { - it(`should return ${getObjectAttributesXMLTag} when object has ${name} checksum`, async () => { + Object.entries(algorithms).forEach(([name, { xmlTag }]) => { + it(`should return ${xmlTag} when object has ${name} checksum`, async () => { const testPutObjectRequest = new DummyRequest( { bucketName, @@ -688,11 +688,11 @@ describe('objectGetAttributes API with checksum', () => { const response = result.GetObjectAttributesResponse; assert(response.Checksum, 'Checksum should be present'); - assert.strictEqual(response.Checksum[0][getObjectAttributesXMLTag][0], expectedDigests[name]); + assert.strictEqual(response.Checksum[0][xmlTag][0], expectedDigests[name]); assert.strictEqual(response.Checksum[0].ChecksumType[0], 'FULL_OBJECT'); }); - it(`should return ${getObjectAttributesXMLTag} along with other attributes`, async () => { + it(`should return ${xmlTag} along with other attributes`, async () => { const testPutObjectRequest = new DummyRequest( { bucketName, @@ -717,7 +717,7 @@ describe('objectGetAttributes API with checksum', () => { assert(response.ETag, 'ETag should be present'); assert(response.ObjectSize, 'ObjectSize should be present'); assert(response.Checksum, 'Checksum should be present'); - assert.strictEqual(response.Checksum[0][getObjectAttributesXMLTag][0], expectedDigests[name]); + assert.strictEqual(response.Checksum[0][xmlTag][0], expectedDigests[name]); assert.strictEqual(response.Checksum[0].ChecksumType[0], 'FULL_OBJECT'); }); }); From 9477b58033a964d58e5652deb1157be039589eb2 Mon Sep 17 00:00:00 2001 From: Leif Henriksen Date: Tue, 5 May 2026 22:06:15 +0200 Subject: [PATCH 02/12] CLDSRV-898: validate per-part checksums and x-amz-checksum-type --- .../apiUtils/integrity/validateChecksums.js | 40 +- lib/api/completeMultipartUpload.js | 109 ++++ tests/unit/api/completeMultipartUpload.js | 482 ++++++++++++++++++ 3 files changed, 630 insertions(+), 1 deletion(-) create mode 100644 tests/unit/api/completeMultipartUpload.js diff --git a/lib/api/apiUtils/integrity/validateChecksums.js b/lib/api/apiUtils/integrity/validateChecksums.js index ba775aad79..f186a7711a 100644 --- a/lib/api/apiUtils/integrity/validateChecksums.js +++ b/lib/api/apiUtils/integrity/validateChecksums.js @@ -32,7 +32,9 @@ const errMPUTypeWithoutAlgo = errorInstances.InvalidRequest.customizeDescription 'with the x-amz-checksum-algorithm header.'); const checksumedMethods = Object.freeze({ - 'completeMultipartUpload': true, + // CompleteMPU's x-amz-checksum- is the final-object checksum, + // not a body digest. Validated in completeMultipartUpload.js instead. + // 'completeMultipartUpload': true, 'multiObjectDelete': true, 'bucketPutACL': true, 'bucketPutCors': true, @@ -145,6 +147,20 @@ const algorithms = Object.freeze({ } }); +/** + * Validate body integrity for a buffered (non-chunked) request. + * + * The whole request body has already been read into memory. This function + * picks the single x-amz-checksum- header, recomputes the digest of + * `body` with that algorithm, and compares it to the header value. It is the + * authoritative body-checksum check for small APIs (e.g. multiObjectDelete, + * bucket configuration PUTs) where the body is not streamed. + * + * @param {object} headers - HTTP request headers (lowercased keys) + * @param {Buffer} body - the entire buffered request body + * @returns {Promise} - + * null on success; otherwise a ChecksumError with details. + */ async function validateXAmzChecksums(headers, body) { const checksumHeaders = Object.keys(headers).filter(header => header.startsWith('x-amz-checksum-')); const xAmzChecksumCnt = checksumHeaders.length; @@ -198,6 +214,28 @@ async function validateXAmzChecksums(headers, body) { return null; } +/** + * Extract checksum intent from request headers for a streaming upload. + * + * Inspects x-amz-checksum-, x-amz-trailer, and x-amz-sdk-checksum-algorithm + * to decide which algorithm the streaming pipeline (e.g. ChecksumTransform, + * TrailingChecksumTransform) should compute over the body, and what digest + * value (if any) the body must match. Does NOT read the body — the actual + * comparison happens later as bytes flow through the stream. + * + * Used by streaming write paths: PutObject, UploadPart, replication writes + * via routeBackbeat. + * + * @param {object} headers - HTTP request headers (lowercased keys) + * @returns {null + * | { algorithm: string, isTrailer: boolean, expected: string|undefined } + * | { error: string, details: object }} - + * - null when no checksum metadata is present (caller decides default). + * - { algorithm, isTrailer, expected } describing what the stream must + * produce. `isTrailer` is true when the digest will arrive in the + * request trailer (`expected` is undefined until the trailer parses). + * - { error, details } on header mismatch. + */ function getChecksumDataFromHeaders(headers) { const checkSdk = algo => { if (!('x-amz-sdk-checksum-algorithm' in headers)) { diff --git a/lib/api/completeMultipartUpload.js b/lib/api/completeMultipartUpload.js index 83df03fbe8..3be6b302aa 100644 --- a/lib/api/completeMultipartUpload.js +++ b/lib/api/completeMultipartUpload.js @@ -25,12 +25,85 @@ const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders'); const { validatePutVersionId } = require('./apiUtils/object/coldStorage'); const { validateQuotas } = require('./apiUtils/quotas/quotaUtils'); const { setSSEHeaders } = require('./apiUtils/object/sseHeaders'); +const { algorithms: checksumAlgorithms } = require('./apiUtils/integrity/validateChecksums'); const versionIdUtils = versioning.VersionID; let splitter = constants.splitter; const REPLICATION_ACTION = 'MPU'; +const allChecksumXmlTags = Object.values(checksumAlgorithms) + .map(algo => algo.xmlTag); + +/** + * Validate per-part checksums in a CompleteMultipartUpload request body + * against the stored MPU configuration and stored part metadata. + * + * Rules (per AWS): + * - If a Part element includes a Checksum field that does not match the + * MPU's configured checksumAlgorithm, return BadDigest. + * - If a Part element includes the matching Checksum field but the value + * does not match the stored part's ChecksumValue, return InvalidPart. + * - If checksumType === 'COMPOSITE' and checksumIsDefault is false, every part + * in the request body MUST include the matching Checksum field; + * missing → InvalidRequest. + * + * @param {object} jsonList - parsed CompleteMultipartUpload XML + * @param {array} storedParts - parts as returned by services.getMPUparts + * @param {string} mpuSplitter - splitter used in part keys + * @param {object} mpuChecksum - { algorithm, type, isDefault } + * @returns {Error|null} + */ +function validatePerPartChecksums(jsonList, storedParts, mpuSplitter, mpuChecksum) { + const mpuAlgo = mpuChecksum.algorithm; + const expectedTag = mpuAlgo && checksumAlgorithms[mpuAlgo] ? checksumAlgorithms[mpuAlgo].xmlTag : null; + // Skip enforcement if the MPU's algorithm is unknown (shouldn't happen). + const requireForEachPart = mpuChecksum.type === 'COMPOSITE' + && !mpuChecksum.isDefault + && expectedTag !== null; + + const storedByPartNumber = new Map(); + storedParts.forEach(item => { + const partNumber = Number.parseInt(item.key.split(mpuSplitter)[1], 10); + storedByPartNumber.set(partNumber, item); + }); + + const parts = jsonList.Part || []; + for (let i = 0; i < parts.length; i++) { + const part = parts[i]; + const partNumber = Number.parseInt(part.PartNumber[0], 10); + + const presentTags = allChecksumXmlTags.filter(tag => part[tag]); + + for (const tag of presentTags) { + if (tag !== expectedTag) { + const algoLabel = tag.replace(/^Checksum/, '').toLowerCase(); + return errorInstances.BadDigest.customizeDescription( + `The ${algoLabel} you specified for part ${partNumber} ` + + 'did not match what we received.'); + } + } + + if (expectedTag && presentTags.includes(expectedTag)) { + const providedValue = part[expectedTag][0]; + const storedPart = storedByPartNumber.get(partNumber); + const storedValue = storedPart && storedPart.value && storedPart.value.ChecksumValue; + if (!storedValue || providedValue !== storedValue) { + return errorInstances.InvalidPart.customizeDescription( + 'One or more of the specified parts could not be found. ' + + 'The part may not have been uploaded, or the specified ' + + 'entity tag may not match the part\'s entity tag.'); + } + } else if (requireForEachPart) { + return errorInstances.InvalidRequest.customizeDescription( + `The upload was created using a ${mpuAlgo} checksum. ` + + 'The complete request must include the checksum for each ' + + `part. It was missing for part ${partNumber} in the request.`); + } + } + return null; +} + /* Format of xml request: @@ -158,6 +231,28 @@ function completeMultipartUpload(authInfo, request, log, callback) { log.error('error validating request', { error: err }); return next(err, destBucket); } + // Validate x-amz-checksum-type header (if present) matches + // the checksum type the MPU was created with. + // x-amz-checksum-algorithm is not validated: AWS ignores + // a mismatch on this header for CompleteMultipartUpload. + const headerType = request.headers['x-amz-checksum-type']; + if (headerType) { + const headerTypeUpper = headerType.toUpperCase(); + if (headerTypeUpper !== 'COMPOSITE' && headerTypeUpper !== 'FULL_OBJECT') { + const typeErr = errorInstances.InvalidRequest + .customizeDescription( 'Value for x-amz-checksum-type header is invalid.'); + return next(typeErr, destBucket); + } + const mpuType = storedMetadata.checksumType; + if (!mpuType || headerTypeUpper !== mpuType.toUpperCase()) { + const typeErr = errorInstances.InvalidRequest + .customizeDescription( + `The upload was created using the ${mpuType} ` + + 'checksum mode. The complete request must ' + + 'use the same checksum mode.'); + return next(typeErr, destBucket); + } + } return next(null, destBucket, objMD, mpuBucket, storedMetadata); }); @@ -217,6 +312,19 @@ function completeMultipartUpload(authInfo, request, log, callback) { } const storedParts = result.Contents; const totalMPUSize = storedParts.reduce((acc, part) => acc + part.value.Size, 0); + const mpuChecksum = { + algorithm: storedMetadata.checksumAlgorithm, + type: storedMetadata.checksumType, + isDefault: storedMetadata.checksumIsDefault, + }; + const checksumErr = validatePerPartChecksums( + jsonList, storedParts, splitter, mpuChecksum); + if (checksumErr) { + log.debug('per-part checksum validation failed', { + error: checksumErr, + }); + return next(checksumErr, destBucket); + } return next(null, destBucket, objMD, mpuBucket, storedParts, jsonList, storedMetadata, location, mpuOverviewKey, totalMPUSize); }); @@ -633,3 +741,4 @@ function completeMultipartUpload(authInfo, request, log, callback) { } module.exports = completeMultipartUpload; +module.exports.validatePerPartChecksums = validatePerPartChecksums; \ No newline at end of file diff --git a/tests/unit/api/completeMultipartUpload.js b/tests/unit/api/completeMultipartUpload.js new file mode 100644 index 0000000000..bc515aa4b7 --- /dev/null +++ b/tests/unit/api/completeMultipartUpload.js @@ -0,0 +1,482 @@ +const assert = require('assert'); +const crypto = require('crypto'); +const async = require('async'); +const { parseString } = require('xml2js'); + +const { bucketPut } = require('../../../lib/api/bucketPut'); +const initiateMultipartUpload = + require('../../../lib/api/initiateMultipartUpload'); +const objectPutPart = require('../../../lib/api/objectPutPart'); +const completeMultipartUpload = + require('../../../lib/api/completeMultipartUpload'); +const { validatePerPartChecksums } = completeMultipartUpload; +const { validateMethodChecksumNoChunking } = + require('../../../lib/api/apiUtils/integrity/validateChecksums'); +const DummyRequest = require('../DummyRequest'); +const { cleanup, DummyRequestLogger, makeAuthInfo } = require('../helpers'); + +const SPLITTER = '..|..'; +const UPLOAD_ID = 'upload-id-1'; + +// XML element name AWS uses for each algorithm in CompleteMultipartUpload's +// per-part body. +const TAG_BY_ALGO = { + crc32: 'ChecksumCRC32', + crc32c: 'ChecksumCRC32C', + crc64nvme: 'ChecksumCRC64NVME', + sha1: 'ChecksumSHA1', + sha256: 'ChecksumSHA256', +}; + +// Two distinct base64 placeholder digests per algorithm. Sized to the real +// digest lengths so the test data looks realistic, though the validator +// itself doesn't enforce length. +const SAMPLE_DIGESTS = { + crc32: ['AQIDBA==', 'BQYHCA=='], + crc32c: ['CQoLDA==', 'DQ4PEA=='], + crc64nvme: ['AQIDBAUGBwg=', 'CQoLDA0ODxA='], + sha1: ['YWFhYWFhYWFhYWFhYWFhYWFhYWFhYWE=', 'YmJiYmJiYmJiYmJiYmJiYmJiYmJiYmI='], + sha256: ['YWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWE=', + 'YmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmI='], +}; + +// Every AWS-valid (algorithm, type) combination, plus the implicit default. +// See validateChecksums.getChecksumDataFromMPUHeaders for the source of truth. +const MATRIX = [ + { algorithm: 'crc32', type: 'COMPOSITE', isDefault: false }, + { algorithm: 'crc32', type: 'FULL_OBJECT', isDefault: false }, + { algorithm: 'crc32c', type: 'COMPOSITE', isDefault: false }, + { algorithm: 'crc32c', type: 'FULL_OBJECT', isDefault: false }, + { algorithm: 'crc64nvme', type: 'FULL_OBJECT', isDefault: false }, + { algorithm: 'crc64nvme', type: 'FULL_OBJECT', isDefault: true }, + { algorithm: 'sha1', type: 'COMPOSITE', isDefault: false }, + { algorithm: 'sha256', type: 'COMPOSITE', isDefault: false }, +]; + +function makeStoredPart(partNumber, checksum) { + const value = { + ETag: 'd41d8cd98f00b204e9800998ecf8427e', + Size: 5242880, + partLocations: [{ key: `data-${partNumber}`, dataStoreName: 'us-east-1' }], + }; + if (checksum) { + value.ChecksumAlgorithm = checksum.algorithm; + value.ChecksumValue = checksum.value; + } + return { + key: `${UPLOAD_ID}${SPLITTER}${partNumber}`, + value, + }; +} + +function makeJsonPart(partNumber, eTag, checksums) { + const part = { + PartNumber: [String(partNumber)], + ETag: [`"${eTag}"`], + }; + if (checksums) { + Object.entries(checksums).forEach(([tag, value]) => { + part[tag] = [value]; + }); + } + return part; +} + +function pickWrongAlgo(algo) { + return Object.keys(TAG_BY_ALGO).find(a => a !== algo); +} + +describe('validatePerPartChecksums', () => { + describe('AWS combination matrix', () => { + MATRIX.forEach(({ algorithm, type, isDefault }) => { + const label = `${algorithm}/${type}${isDefault ? ' (default)' : ''}`; + const tag = TAG_BY_ALGO[algorithm]; + const [d1, d2] = SAMPLE_DIGESTS[algorithm]; + const mpuChecksum = { algorithm, type, isDefault }; + + const stored = [ + makeStoredPart(1, { algorithm, value: d1 }), + makeStoredPart(2, { algorithm, value: d2 }), + ]; + + describe(label, () => { + it('should accept when every part includes the matching checksum', () => { + const jsonList = { + Part: [ + makeJsonPart(1, 'etag1', { [tag]: d1 }), + makeJsonPart(2, 'etag2', { [tag]: d2 }), + ], + }; + const err = validatePerPartChecksums( + jsonList, stored, SPLITTER, mpuChecksum); + assert.strictEqual(err, null); + }); + + it('should return BadDigest when a part uses the wrong checksum field', () => { + const wrongAlgo = pickWrongAlgo(algorithm); + const wrongTag = TAG_BY_ALGO[wrongAlgo]; + const wrongDigest = SAMPLE_DIGESTS[wrongAlgo][0]; + const jsonList = { + Part: [ + makeJsonPart(1, 'etag1', { [wrongTag]: wrongDigest }), + makeJsonPart(2, 'etag2', { [tag]: d2 }), + ], + }; + const err = validatePerPartChecksums( + jsonList, stored, SPLITTER, mpuChecksum); + assert(err); + assert.strictEqual(err.is.BadDigest, true); + // AWS-style message: "The {algo} you specified for part {N} did not match what we received." + assert.strictEqual( + err.description, + `The ${wrongAlgo} you specified for part 1 did ` + + 'not match what we received.'); + }); + + it('should return InvalidPart when the matching field has the wrong value', () => { + const jsonList = { + Part: [ + makeJsonPart(1, 'etag1', { [tag]: d1 }), + makeJsonPart(2, 'etag2', { [tag]: d1 }), + ], + }; + const err = validatePerPartChecksums( + jsonList, stored, SPLITTER, mpuChecksum); + assert(err); + assert.strictEqual(err.is.InvalidPart, true); + // AWS reuses its generic InvalidPart message — no algorithm + // or part number in the wording. + assert.strictEqual( + err.description, + 'One or more of the specified parts could not be ' + + 'found. The part may not have been uploaded, or ' + + 'the specified entity tag may not match the ' + + 'part\'s entity tag.'); + }); + + const requiresPerPart = type === 'COMPOSITE' && !isDefault; + const missingLabel = requiresPerPart + ? 'should return InvalidRequest when a part is missing its checksum' + : 'should accept a parts list missing per-part checksums'; + it(missingLabel, () => { + const jsonList = { + Part: [ + makeJsonPart(1, 'etag1', { [tag]: d1 }), + makeJsonPart(2, 'etag2'), + ], + }; + const err = validatePerPartChecksums( + jsonList, stored, SPLITTER, mpuChecksum); + if (requiresPerPart) { + assert(err); + assert.strictEqual(err.is.InvalidRequest, true); + assert(err.description.includes(algorithm)); + assert(err.description.includes('part 2 in the request')); + } else { + assert.strictEqual(err, null); + } + }); + }); + }); + }); + + describe('edge cases', () => { + it('should accept an empty parts list', () => { + const mpuChecksum = { + algorithm: 'sha256', + type: 'COMPOSITE', + isDefault: false, + }; + const err = validatePerPartChecksums( + { Part: [] }, [], SPLITTER, mpuChecksum); + assert.strictEqual(err, null); + }); + + it('should accept a parts list with no Part array (treated as empty)', () => { + const mpuChecksum = { + algorithm: 'crc64nvme', + type: 'FULL_OBJECT', + isDefault: true, + }; + const err = validatePerPartChecksums( + {}, [], SPLITTER, mpuChecksum); + assert.strictEqual(err, null); + }); + + it('should accept a FULL_OBJECT mixed list (one part with checksum, one without)', () => { + const mpuChecksum = { + algorithm: 'crc32', + type: 'FULL_OBJECT', + isDefault: false, + }; + const [d1, d2] = SAMPLE_DIGESTS.crc32; + const stored = [ + makeStoredPart(1, { algorithm: 'crc32', value: d1 }), + makeStoredPart(2, { algorithm: 'crc32', value: d2 }), + ]; + const jsonList = { + Part: [ + makeJsonPart(1, 'etag1', { ChecksumCRC32: d1 }), + makeJsonPart(2, 'etag2'), + ], + }; + const err = validatePerPartChecksums( + jsonList, stored, SPLITTER, mpuChecksum); + assert.strictEqual(err, null); + }); + + it('should not enforce per-part presence when MPU algorithm is unknown', () => { + // CreateMPU should never let this state through, but guard against + // an "InvalidRequest: using a undefined checksum" error if it did. + const mpuChecksum = { + algorithm: undefined, + type: 'COMPOSITE', + isDefault: false, + }; + const stored = [makeStoredPart(1, null), makeStoredPart(2, null)]; + const jsonList = { + Part: [ + makeJsonPart(1, 'etag1'), + makeJsonPart(2, 'etag2'), + ], + }; + const err = validatePerPartChecksums( + jsonList, stored, SPLITTER, mpuChecksum); + assert.strictEqual(err, null); + }); + + it('should return InvalidPart when stored part has no checksum but request does', () => { + const mpuChecksum = { + algorithm: 'sha256', + type: 'COMPOSITE', + isDefault: false, + }; + const stored = [makeStoredPart(1, null)]; + const jsonList = { + Part: [ + makeJsonPart(1, 'etag1', { + ChecksumSHA256: SAMPLE_DIGESTS.sha256[0], + }), + ], + }; + const err = validatePerPartChecksums( + jsonList, stored, SPLITTER, mpuChecksum); + assert(err); + assert.strictEqual(err.is.InvalidPart, true); + }); + }); +}); + +describe('CompleteMultipartUpload x-amz-checksum-type header', () => { + const log = new DummyRequestLogger(); + const authInfo = makeAuthInfo('accessKey1'); + const namespace = 'default'; + const bucketName = 'bucketname-checksum-type'; + const objectKey = 'testObject'; + + const bucketPutRequest = { + bucketName, + namespace, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: '/', + post: '' + + 'scality-internal-mem' + + '', + actionImplicitDenies: false, + }; + + function setupMpu(initiateHeaders, cb) { + async.waterfall([ + next => bucketPut(authInfo, bucketPutRequest, log, next), + (corsHeaders, next) => { + const initiateRequest = { + bucketName, + namespace, + objectKey, + headers: { + host: `${bucketName}.s3.amazonaws.com`, + ...initiateHeaders, + }, + url: `/${objectKey}?uploads`, + actionImplicitDenies: false, + }; + initiateMultipartUpload(authInfo, initiateRequest, log, next); + }, + (xml, corsHeaders, next) => parseString(xml, next), + (json, next) => { + const uploadId = json.InitiateMultipartUploadResult.UploadId[0]; + const partBody = Buffer.from('I am a part\n', 'utf8'); + const partHash = crypto.createHash('md5').update(partBody).digest('hex'); + const partRequest = new DummyRequest({ + bucketName, + namespace, + objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: `/${objectKey}?partNumber=1&uploadId=${uploadId}`, + query: { partNumber: '1', uploadId }, + partHash, + actionImplicitDenies: false, + }, partBody); + objectPutPart(authInfo, partRequest, undefined, log, + err => next(err, uploadId, partHash)); + }, + ], cb); + } + + function makeCompleteRequest(uploadId, partHash, extraHeaders) { + const completeBody = '' + + '' + + '1' + + `"${partHash}"` + + '' + + ''; + return { + bucketName, + namespace, + objectKey, + parsedHost: 's3.amazonaws.com', + url: `/${objectKey}?uploadId=${uploadId}`, + headers: { + host: `${bucketName}.s3.amazonaws.com`, + ...extraHeaders, + }, + query: { uploadId }, + post: completeBody, + actionImplicitDenies: false, + }; + } + + beforeEach(() => cleanup()); + + it('should accept CompleteMPU when no x-amz-checksum-type header is sent', done => { + const initiateHeaders = { + 'x-amz-checksum-algorithm': 'CRC32', + 'x-amz-checksum-type': 'FULL_OBJECT', + }; + setupMpu(initiateHeaders, (err, uploadId, partHash) => { + assert.ifError(err); + const req = makeCompleteRequest(uploadId, partHash, {}); + completeMultipartUpload(authInfo, req, log, completeErr => { + assert.ifError(completeErr); + done(); + }); + }); + }); + + it('should accept CompleteMPU when x-amz-checksum-type matches the MPU type', done => { + const initiateHeaders = { + 'x-amz-checksum-algorithm': 'CRC32', + 'x-amz-checksum-type': 'FULL_OBJECT', + }; + setupMpu(initiateHeaders, (err, uploadId, partHash) => { + assert.ifError(err); + const req = makeCompleteRequest(uploadId, partHash, { + 'x-amz-checksum-type': 'FULL_OBJECT', + }); + completeMultipartUpload(authInfo, req, log, completeErr => { + assert.ifError(completeErr); + done(); + }); + }); + }); + + it('should reject CompleteMPU with InvalidRequest when x-amz-checksum-type does not match the MPU type', done => { + const initiateHeaders = { + 'x-amz-checksum-algorithm': 'CRC32', + 'x-amz-checksum-type': 'FULL_OBJECT', + }; + setupMpu(initiateHeaders, (err, uploadId, partHash) => { + assert.ifError(err); + const req = makeCompleteRequest(uploadId, partHash, { + 'x-amz-checksum-type': 'COMPOSITE', + }); + completeMultipartUpload(authInfo, req, log, completeErr => { + assert(completeErr); + assert.strictEqual(completeErr.is.InvalidRequest, true); + // AWS-style mode-mismatch wording. + assert.strictEqual( + completeErr.description, + 'The upload was created using the FULL_OBJECT checksum ' + + 'mode. The complete request must use the same checksum ' + + 'mode.'); + done(); + }); + }); + }); + + it('should reject CompleteMPU with InvalidRequest when x-amz-checksum-type value is bogus', done => { + const initiateHeaders = { + 'x-amz-checksum-algorithm': 'CRC32', + 'x-amz-checksum-type': 'FULL_OBJECT', + }; + setupMpu(initiateHeaders, (err, uploadId, partHash) => { + assert.ifError(err); + const req = makeCompleteRequest(uploadId, partHash, { + 'x-amz-checksum-type': 'BOGUS', + }); + completeMultipartUpload(authInfo, req, log, completeErr => { + assert(completeErr); + assert.strictEqual(completeErr.is.InvalidRequest, true); + assert.strictEqual( + completeErr.description, + 'Value for x-amz-checksum-type header is invalid.'); + done(); + }); + }); + }); + + it('should compare x-amz-checksum-type case-insensitively', done => { + const initiateHeaders = { + 'x-amz-checksum-algorithm': 'CRC32', + 'x-amz-checksum-type': 'FULL_OBJECT', + }; + setupMpu(initiateHeaders, (err, uploadId, partHash) => { + assert.ifError(err); + const req = makeCompleteRequest(uploadId, partHash, { + 'x-amz-checksum-type': 'full_object', + }); + completeMultipartUpload(authInfo, req, log, completeErr => { + assert.ifError(completeErr); + done(); + }); + }); + }); +}); + +describe('CompleteMultipartUpload body-checksum bypass', () => { + const log = new DummyRequestLogger(); + + it('validateMethodChecksumNoChunking returns null for completeMultipartUpload ' + + 'even when x-amz-checksum-sha256 does not match the body digest', async () => { + const body = Buffer.from( + '1' + + '"abc"'); + // A syntactically valid SHA256 base64 digest that is NOT the digest of `body` + // (it's the digest of the empty string). On CompleteMPU this header carries + // the expected final-object checksum, not a body checksum, so pre-validation + // must skip it. + const finalObjectChecksum = + '47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU='; + const request = { + apiMethod: 'completeMultipartUpload', + headers: { 'x-amz-checksum-sha256': finalObjectChecksum }, + }; + const err = await validateMethodChecksumNoChunking(request, body, log); + assert.strictEqual(err, null); + }); + + it('validateMethodChecksumNoChunking still rejects body mismatch for methods ' + + 'that remain in checksumedMethods (sanity check)', async () => { + const body = Buffer.from('{"Objects":[]}'); + const finalObjectChecksum = + '47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU='; + const request = { + apiMethod: 'multiObjectDelete', + headers: { 'x-amz-checksum-sha256': finalObjectChecksum }, + }; + const err = await validateMethodChecksumNoChunking(request, body, log); + assert(err, 'expected an error for body checksum mismatch'); + assert.strictEqual(err.is.BadDigest, true); + }); +}); From 20f3e2484125a1313a700f15a78e2da3e8e7787b Mon Sep 17 00:00:00 2001 From: Leif Henriksen Date: Thu, 7 May 2026 17:12:53 +0200 Subject: [PATCH 03/12] CLDSRV-898: calculate final FULL_OBJECT (crc combine) and COMPOSITE checksum --- lib/api/apiUtils/integrity/crcCombine.js | 185 +++++++++++++++++ .../apiUtils/integrity/validateChecksums.js | 82 ++++++++ .../apiUtils/integrity/computeMpuChecksums.js | 187 ++++++++++++++++++ .../unit/api/apiUtils/integrity/crcCombine.js | 178 +++++++++++++++++ 4 files changed, 632 insertions(+) create mode 100644 lib/api/apiUtils/integrity/crcCombine.js create mode 100644 tests/unit/api/apiUtils/integrity/computeMpuChecksums.js create mode 100644 tests/unit/api/apiUtils/integrity/crcCombine.js diff --git a/lib/api/apiUtils/integrity/crcCombine.js b/lib/api/apiUtils/integrity/crcCombine.js new file mode 100644 index 0000000000..0a616be12b --- /dev/null +++ b/lib/api/apiUtils/integrity/crcCombine.js @@ -0,0 +1,185 @@ +'use strict'; + +// Combine two right-shift CRCs (zlib's gf2_matrix_* trick) without using BigInt +// inside the hot loops. Each GF(2) operator matrix is stored as a Uint32Array +// of `2 * dim` words, where row n is packed as [lo32, hi32]. For 32-bit CRCs +// the high halves stay zero and the per-row loop exits early; for the 64-bit +// CRC (crc64nvme) the pair-of-u32s representation lets every XOR/shift stay on +// 32-bit ints. +// +// References: +// zlib crc32_combine (canonical C implementation): +// https://github.com/madler/zlib/blob/master/crc32.c +// Mark Adler, "How does CRC32 work?" — derivation of the matrix trick: +// https://stackoverflow.com/a/23126768 +// AWS S3 multipart upload full-object checksums: +// https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + +function gf2MatrixTimes(mat, vecLo, vecHi) { + let sumLo = 0; + let sumHi = 0; + let lo = vecLo; + let hi = vecHi; + let i = 0; + while ((lo | hi) !== 0) { + if (lo & 1) { + sumLo ^= mat[2 * i]; + sumHi ^= mat[2 * i + 1]; + } + lo = (lo >>> 1) | ((hi & 1) << 31); + hi = hi >>> 1; + i += 1; + } + return [sumLo >>> 0, sumHi >>> 0]; +} + +function gf2MatrixSquare(square, mat, dim) { + for (let n = 0; n < dim; n += 1) { + const r = gf2MatrixTimes(mat, mat[2 * n], mat[2 * n + 1]); + square[2 * n] = r[0]; + square[2 * n + 1] = r[1]; + } +} + +// Per (polyReversed, dim), a lazily-grown chain of zero-byte operators. +// state.byteOps[j] is the GF(2) operator for prepending 2^j zero bytes +// (i.e. M^(8 * 2^j)). Building this chain is the dominant cost of crcCombine +// and depends only on the polynomial, so we cache it across calls. +const chainCache = new Map(); + +function getOrInitChain(polyReversed, dim) { + let state = chainCache.get(polyReversed); + if (state !== undefined) { + return state; + } + + // M^1: one-zero-bit operator. Column 0 is the polynomial; column k>0 is + // 1 << (k - 1) — what right-shifting a state with bit k set produces. + const m1 = new Uint32Array(2 * dim); + m1[0] = Number(polyReversed & 0xFFFFFFFFn); + m1[1] = Number((polyReversed >> 32n) & 0xFFFFFFFFn); + for (let k = 1; k < dim; k += 1) { + const bit = k - 1; + if (bit < 32) { + m1[2 * k] = (1 << bit) >>> 0; + } else { + m1[2 * k + 1] = (1 << (bit - 32)) >>> 0; + } + } + + const m2 = new Uint32Array(2 * dim); + gf2MatrixSquare(m2, m1, dim); + const m4 = new Uint32Array(2 * dim); + gf2MatrixSquare(m4, m2, dim); + const m8 = new Uint32Array(2 * dim); // operator for 1 zero byte + gf2MatrixSquare(m8, m4, dim); + + state = { dim, byteOps: [m8] }; + chainCache.set(polyReversed, state); + return state; +} + +function ensureChainLen(state, j) { + while (state.byteOps.length <= j) { + const prev = state.byteOps[state.byteOps.length - 1]; + const next = new Uint32Array(prev.length); + gf2MatrixSquare(next, prev, state.dim); + state.byteOps.push(next); + } +} + +/** + * Combine two CRCs of adjacent byte chunks. + * + * crcCombine(crc(a), crc(b), len(b), polyReversed, dim) === crc(a ‖ b) + * + * Works for any right-shift CRC of width `dim` (32 or 64) given its + * bit-reversed polynomial. The squaring chain for `polyReversed` is cached + * across calls, so the per-call cost is just popcount(len2) cheap operator + * applications plus the BigInt boundary conversions. + * + * @param {bigint} crc1 - CRC of the first chunk + * @param {bigint} crc2 - CRC of the second chunk + * @param {bigint} len2 - byte length of the second chunk + * @param {bigint} polyReversed - bit-reversed polynomial + * @param {number} dim - CRC width in bits (32 or 64) + * @returns {bigint} CRC of the concatenated chunk, masked to `dim` bits + */ +function crcCombine(crc1, crc2, len2, polyReversed, dim) { + const mask = (1n << BigInt(dim)) - 1n; + if (len2 === 0n) { + return crc1 & mask; + } + + const state = getOrInitChain(polyReversed, dim); + + let cLo = Number(crc1 & 0xFFFFFFFFn); + let cHi = Number((crc1 >> 32n) & 0xFFFFFFFFn); + + // Walk the bits of len2 (each bit represents a power-of-two number of + // zero bytes to prepend); apply the cached operator for every set bit. + let n = len2; + let j = 0; + while (n !== 0n) { + if ((n & 1n) === 1n) { + ensureChainLen(state, j); + const r = gf2MatrixTimes(state.byteOps[j], cLo, cHi); + cLo = r[0]; + cHi = r[1]; + } + n >>= 1n; + j += 1; + } + + const c2Lo = Number(crc2 & 0xFFFFFFFFn); + const c2Hi = Number((crc2 >> 32n) & 0xFFFFFFFFn); + cLo = (cLo ^ c2Lo) >>> 0; + cHi = (cHi ^ c2Hi) >>> 0; + + return ((BigInt(cHi) << 32n) | BigInt(cLo)) & mask; +} + +function base64ToBigInt(b64) { + const buf = Buffer.from(b64, 'base64'); + let r = 0n; + for (let i = 0; i < buf.length; i += 1) { + r = (r << 8n) | BigInt(buf[i]); + } + return r; +} + +function bigIntToBase64(value, dim) { + const nBytes = dim / 8; + const buf = Buffer.alloc(nBytes); + let v = value; + for (let i = nBytes - 1; i >= 0; i -= 1) { + buf[i] = Number(v & 0xFFn); + v >>= 8n; + } + return buf.toString('base64'); +} + +/** + * Combine N per-part CRCs into the full-object CRC, base64-encoded. + * + * @param {Array<{value: string, length: number}>} parts - per-part data in + * part order; `value` is the base64-encoded per-part CRC, `length` is the + * byte length of that part + * @param {bigint} polyReversed - bit-reversed polynomial + * @param {number} dim - CRC width in bits (32 or 64) + * @returns {string} base64-encoded combined CRC + */ +function combineCrcs(parts, polyReversed, dim) { + let combined = base64ToBigInt(parts[0].value); + for (let i = 1; i < parts.length; i += 1) { + combined = crcCombine( + combined, + base64ToBigInt(parts[i].value), + BigInt(parts[i].length), + polyReversed, + dim); + } + return bigIntToBase64(combined, dim); +} + +module.exports = { combineCrcs, crcCombine }; diff --git a/lib/api/apiUtils/integrity/validateChecksums.js b/lib/api/apiUtils/integrity/validateChecksums.js index f186a7711a..aff42d1f17 100644 --- a/lib/api/apiUtils/integrity/validateChecksums.js +++ b/lib/api/apiUtils/integrity/validateChecksums.js @@ -4,6 +4,7 @@ const { Crc32c } = require('@aws-crypto/crc32c'); const { CrtCrc64Nvme } = require('@aws-sdk/crc64-nvme-crt'); const { errors: ArsenalErrors, errorInstances } = require('arsenal'); const { config } = require('../../../Config'); +const { combineCrcs } = require('./crcCombine'); const defaultChecksumData = Object.freeze( { algorithm: 'crc64nvme', isTrailer: false, expected: undefined }); @@ -514,6 +515,85 @@ function getChecksumDataFromMPUHeaders(headers) { return { algorithm: algo, type: defaultChecksumType[algo], isDefault: false }; } +// ============================================================================= +// MPU final-object checksum computation +// ============================================================================= +// +// CompleteMultipartUpload composes a final-object checksum from the per-part +// checksums recorded at UploadPart time. AWS defines two modes: +// +// COMPOSITE : finalChecksum = base64(algo(decode(c1) || ... || decode(cN))) +// + "-N" suffix, where N is the number of parts. +// Supported on CRC32, CRC32C, SHA1, SHA256. +// +// FULL_OBJECT : finalChecksum is the CRC of the entire object's bytes, +// reconstructed by combining the per-part CRCs via CRC +// linearization. CRC-only: CRC32, CRC32C, +// CRC64NVME. + +// Bit-reversed polynomials used by the right-shift CRC implementations that +// the @aws-crypto/* and @aws-sdk/crc64-nvme-crt packages produce. +const FULL_OBJECT_POLYS = Object.freeze({ + crc32: { polyReversed: 0xEDB88320n, dim: 32 }, + crc32c: { polyReversed: 0x82F63B78n, dim: 32 }, + crc64nvme: { polyReversed: 0x9A6C9329AC4BC9B5n, dim: 64 }, +}); + +// Algorithms whose digest is synchronous, which is the full set AWS allows +// for COMPOSITE MPUs. crc64nvme is excluded because (a) AWS does not allow +// COMPOSITE for CRC64NVME and (b) its CRT-backed digest is async. +const COMPOSITE_ALGOS = new Set(['crc32', 'crc32c', 'sha1', 'sha256']); + +/** + * Compute the COMPOSITE final-object checksum for a CompleteMultipartUpload. + * + * final = base64(algo(decode(c1) || decode(c2) || ... || decode(cN))) + "-N" + * + * Supported algorithms: crc32, crc32c, sha1, sha256. (crc64nvme is excluded — + * AWS does not allow COMPOSITE for CRC64NVME.) + * + * @param {string} algorithm - lowercase algorithm name + * @param {string[]} partChecksumsBase64 - per-part checksums in part order, + * each base64-encoded (the format stored on MPU part metadata) + * @returns {{ checksum: string, error: null } + * | { checksum: null, error: { code: string, details: object } }} + */ +function computeCompositeMPUChecksum(algorithm, partChecksumsBase64) { + if (!COMPOSITE_ALGOS.has(algorithm)) { + return { checksum: null, error: { code: ChecksumError.MPUAlgoNotSupported, details: { algorithm } } }; + } + + const concat = Buffer.concat(partChecksumsBase64.map(c => Buffer.from(c, 'base64'))); + const digest = algorithms[algorithm].digest(concat); + return { + checksum: `${digest}-${partChecksumsBase64.length}`, + error: null, + }; +} + +/** + * Compute the FULL_OBJECT final-object checksum for a CompleteMultipartUpload. + * + * Returns the CRC of the assembled object's bytes, derived purely from the + * per-part CRCs and part lengths via CRC linearization. + * + * Supported algorithms: crc32, crc32c, crc64nvme. + * + * @param {string} algorithm - lowercase algorithm name + * @param {Array<{value: string, length: number}>} parts - per-part data in + * part order; `value` is the base64-encoded per-part CRC, `length` is the + * byte length of that part + * @returns {{ checksum: string, error: null } + * | { checksum: null, error: { code: string, details: object } }} + */ +function computeFullObjectMPUChecksum(algorithm, parts) { + const params = FULL_OBJECT_POLYS[algorithm]; + if (!params) { + return { checksum: null, error: { code: ChecksumError.MPUAlgoNotSupported, details: { algorithm } } }; + } + return { checksum: combineCrcs(parts, params.polyReversed, params.dim), error: null }; +} + module.exports = { ChecksumError, defaultChecksumData, @@ -524,4 +604,6 @@ module.exports = { algorithms, checksumedMethods, getChecksumDataFromMPUHeaders, + computeCompositeMPUChecksum, + computeFullObjectMPUChecksum, }; diff --git a/tests/unit/api/apiUtils/integrity/computeMpuChecksums.js b/tests/unit/api/apiUtils/integrity/computeMpuChecksums.js new file mode 100644 index 0000000000..319f5b69af --- /dev/null +++ b/tests/unit/api/apiUtils/integrity/computeMpuChecksums.js @@ -0,0 +1,187 @@ +const assert = require('assert'); +const crypto = require('crypto'); + +const { + algorithms, + computeCompositeMPUChecksum, + computeFullObjectMPUChecksum, +} = require('../../../../../lib/api/apiUtils/integrity/validateChecksums'); + +// Random part bodies. Per-test randomness still satisfies the assertions +// because each test only checks combine(parts) === algo(concat(parts)), +// which holds for any byte sequence. +function makeParts(count, size) { + const parts = []; + for (let i = 0; i < count; i += 1) { + parts.push(crypto.randomBytes(size)); + } + return parts; +} + +// -- COMPOSITE ------------------------------------------------------------ + +describe('computeCompositeMPUChecksum', () => { + const parts = makeParts(3, 1024); + + const COMPOSITE_ALGOS = ['crc32', 'crc32c', 'sha1', 'sha256']; + + COMPOSITE_ALGOS.forEach(algo => { + const label = algo.toUpperCase(); + it(`should match ${label}(decode(c1) || ... || decode(cN)) + "-N"`, () => { + const partChecksums = parts.map(p => algorithms[algo].digest(p)); + const expectedConcat = Buffer.concat( + partChecksums.map(c => Buffer.from(c, 'base64'))); + const expected = `${algorithms[algo].digest(expectedConcat)}-3`; + + const got = computeCompositeMPUChecksum(algo, partChecksums); + assert.strictEqual(got.error, null); + assert.strictEqual(got.checksum, expected); + }); + }); + + it('should return N=1 for a single part', () => { + const partChecksums = [algorithms.sha256.digest(parts[0])]; + const got = computeCompositeMPUChecksum('sha256', partChecksums); + assert.strictEqual(got.error, null); + assert(got.checksum.endsWith('-1')); + }); + + it('should return an error object on unsupported algorithm', () => { + const got = computeCompositeMPUChecksum('md5', ['AAAA']); + assert.strictEqual(got.checksum, null); + assert(got.error); + assert.strictEqual(got.error.code, 'MPUAlgoNotSupported'); + assert.deepStrictEqual(got.error.details, { algorithm: 'md5' }); + }); + + it('should return an error object for crc64nvme (not allowed for COMPOSITE)', () => { + const got = computeCompositeMPUChecksum('crc64nvme', ['AQIDBAUGBwg=']); + assert.strictEqual(got.checksum, null); + assert.strictEqual(got.error.code, 'MPUAlgoNotSupported'); + }); +}); + +// -- FULL_OBJECT ---------------------------------------------------------- + +describe('computeFullObjectMPUChecksum', () => { + // Validation strategy: build N concrete part bodies, run each through the + // canonical CRC implementation to get the per-part CRC, then compare the + // combined result against the CRC of the concatenation of all bodies. + + const FULL_OBJECT_ALGOS = ['crc32', 'crc32c', 'crc64nvme']; + + async function buildPartInputs(parts, algo) { + const partInputs = []; + for (const b of parts) { + // `await` is a no-op for the sync CRC32/CRC32C digests and resolves + // the Promise for the async CRC64NVME digest. + partInputs.push({ + value: await algorithms[algo].digest(b), + length: b.length, + }); + } + return partInputs; + } + + FULL_OBJECT_ALGOS.forEach(algo => { + const label = algo.toUpperCase(); + + it(`should match ${label}(concat(parts)) for varied part sizes`, async () => { + const parts = [ + crypto.randomBytes(5 * 1024 * 1024), + crypto.randomBytes(5 * 1024 * 1024 + 7), + crypto.randomBytes(19), + ]; + const partInputs = await buildPartInputs(parts, algo); + const result = computeFullObjectMPUChecksum(algo, partInputs); + const direct = await algorithms[algo].digest(Buffer.concat(parts)); + assert.strictEqual(result.error, null); + assert.strictEqual(result.checksum, direct); + }); + + it(`should return the part CRC unchanged for a single-part ${label} MPU`, async () => { + const buf = crypto.randomBytes(15); + const partCrc = await algorithms[algo].digest(buf); + const got = computeFullObjectMPUChecksum(algo, [{ + value: partCrc, length: buf.length, + }]); + assert.strictEqual(got.error, null); + assert.strictEqual(got.checksum, partCrc); + }); + + it(`should handle many small ${label} parts (16 × 1 MiB)`, async () => { + // Exercises multiple combine iterations and the matrix-squaring loop. + const parts = makeParts(16, 1 * 1024 * 1024); + const partInputs = await buildPartInputs(parts, algo); + const result = computeFullObjectMPUChecksum(algo, partInputs); + const direct = await algorithms[algo].digest(Buffer.concat(parts)); + assert.strictEqual(result.error, null); + assert.strictEqual(result.checksum, direct); + }); + }); + + it('should return an error object on unsupported algorithm', () => { + const got = computeFullObjectMPUChecksum( + 'sha256', [{ value: 'AAAA', length: 4 }]); + assert.strictEqual(got.checksum, null); + assert(got.error); + assert.strictEqual(got.error.code, 'MPUAlgoNotSupported'); + assert.deepStrictEqual(got.error.details, { algorithm: 'sha256' }); + }); + + it('should handle 10000 CRC64NVME parts of uniform 5 MiB (cache hits)', async function f() { + // 10 000 parts is the AWS MPU max; CRC64NVME has the largest + // (64-bit) combine matrix. Validates correctness against the CRC + // of the equivalent 50 GiB object, computed by streaming the same + // chunk through CrtCrc64Nvme without materializing the object. + this.timeout(120000); + + const partLen = 5 * 1024 * 1024; + const nParts = 10000; + const chunk = crypto.randomBytes(partLen); + const partCrc = await algorithms.crc64nvme.digest(chunk); + + const parts = new Array(nParts); + for (let i = 0; i < nParts; i += 1) { + parts[i] = { value: partCrc, length: partLen }; + } + + const got = computeFullObjectMPUChecksum('crc64nvme', parts); + assert.strictEqual(got.error, null); + + const ref = algorithms.crc64nvme.createHash(); + for (let i = 0; i < nParts; i += 1) { + ref.update(chunk); + } + const expected = await algorithms.crc64nvme.digestFromHash(ref); + assert.strictEqual(got.checksum, expected); + }); + + it('should handle 10000 CRC64NVME parts of distinct lengths (cache misses)', async function f() { + // Every part has a strictly different length, so each combine call + // touches a different mix of `len2` bit positions. Validates + // correctness against a streaming reference over independently + // generated part bodies. + this.timeout(60000); + + const baseLen = 64 * 1024; + const nParts = 10000; + const parts = new Array(nParts); + const ref = algorithms.crc64nvme.createHash(); + for (let i = 0; i < nParts; i += 1) { + const len = baseLen + i; + const buf = crypto.randomBytes(len); + parts[i] = { + value: await algorithms.crc64nvme.digest(buf), + length: len, + }; + ref.update(buf); + } + + const got = computeFullObjectMPUChecksum('crc64nvme', parts); + assert.strictEqual(got.error, null); + + const expected = await algorithms.crc64nvme.digestFromHash(ref); + assert.strictEqual(got.checksum, expected); + }); +}); diff --git a/tests/unit/api/apiUtils/integrity/crcCombine.js b/tests/unit/api/apiUtils/integrity/crcCombine.js new file mode 100644 index 0000000000..fd5c6e367e --- /dev/null +++ b/tests/unit/api/apiUtils/integrity/crcCombine.js @@ -0,0 +1,178 @@ +const assert = require('assert'); +const crypto = require('crypto'); + +const { crcCombine, combineCrcs } = require('../../../../../lib/api/apiUtils/integrity/crcCombine'); +const { algorithms } = require('../../../../../lib/api/apiUtils/integrity/validateChecksums'); + +// Reversed polynomial + bit width for each algorithm we use the combine +// routine with. Same values that validateChecksums.js feeds in. +const SPECS = [ + { algo: 'crc32', polyReversed: 0xEDB88320n, dim: 32 }, + { algo: 'crc32c', polyReversed: 0x82F63B78n, dim: 32 }, + { algo: 'crc64nvme', polyReversed: 0x9A6C9329AC4BC9B5n, dim: 64 }, +]; + +function base64ToBigInt(b64) { + const buf = Buffer.from(b64, 'base64'); + let r = 0n; + for (let i = 0; i < buf.length; i += 1) { + r = (r << 8n) | BigInt(buf[i]); + } + return r; +} + +async function crcOf(algo, buf) { + return base64ToBigInt(await algorithms[algo].digest(buf)); +} + +describe('crcCombine', () => { + SPECS.forEach(({ algo, polyReversed, dim }) => { + const label = algo.toUpperCase(); + const mask = (1n << BigInt(dim)) - 1n; + + describe(`${label} (dim=${dim})`, () => { + it('should combine(crc1, crc2, len2) to crc(chunk1 ‖ chunk2) for random data', async () => { + const a = crypto.randomBytes(1024); + const b = crypto.randomBytes(1024); + const crc1 = await crcOf(algo, a); + const crc2 = await crcOf(algo, b); + const got = crcCombine(crc1, crc2, BigInt(b.length), polyReversed, dim); + const expected = await crcOf(algo, Buffer.concat([a, b])); + assert.strictEqual(got, expected); + }); + + it('should return crc1 unchanged when len2 = 0 (identity)', async () => { + const a = crypto.randomBytes(64); + const crc1 = await crcOf(algo, a); + const got = crcCombine(crc1, 0n, 0n, polyReversed, dim); + assert.strictEqual(got, crc1 & mask); + }); + + it('should equal the original CRC when combined with the CRC of empty', async () => { + // CRC of an empty chunk under the AWS implementations is 0. + const a = crypto.randomBytes(128); + const crc1 = await crcOf(algo, a); + const crcEmpty = await crcOf(algo, Buffer.alloc(0)); + const got = crcCombine(crc1, crcEmpty, 0n, polyReversed, dim); + assert.strictEqual(got, crc1 & mask); + }); + + it('should mask the result to `dim` bits', async () => { + const a = crypto.randomBytes(256); + const b = crypto.randomBytes(256); + const got = crcCombine( + await crcOf(algo, a), + await crcOf(algo, b), + BigInt(b.length), + polyReversed, + dim); + assert.strictEqual(got & mask, got); + assert.strictEqual(got >> BigInt(dim), 0n); + }); + + it('should be associative across three chunks', async () => { + const a = crypto.randomBytes(300); + const b = crypto.randomBytes(400); + const c = crypto.randomBytes(500); + const crcA = await crcOf(algo, a); + const crcB = await crcOf(algo, b); + const crcC = await crcOf(algo, c); + + // Left-fold: combine(combine(A,B), C) + const ab = crcCombine(crcA, crcB, BigInt(b.length), polyReversed, dim); + const left = crcCombine(ab, crcC, BigInt(c.length), polyReversed, dim); + + // Right-fold: combine(A, combine(B, C), len(B)+len(C)) + const bc = crcCombine(crcB, crcC, BigInt(c.length), polyReversed, dim); + const right = crcCombine( + crcA, bc, BigInt(b.length + c.length), polyReversed, dim); + + assert.strictEqual(left, right); + const expected = await crcOf(algo, Buffer.concat([a, b, c])); + assert.strictEqual(left, expected); + }); + + it('should handle single-byte chunks', async () => { + const a = crypto.randomBytes(1); + const b = crypto.randomBytes(1); + const got = crcCombine( + await crcOf(algo, a), + await crcOf(algo, b), + 1n, + polyReversed, + dim); + const expected = await crcOf(algo, Buffer.concat([a, b])); + assert.strictEqual(got, expected); + }); + + it('should handle odd-length chunk2 sizes (not a multiple of 8 bytes)', async () => { + // Sizes chosen to exercise the matrix-squaring loop's + // odd/even alternation through both branches. + const sizes = [1, 7, 15, 33, 257, 1023, 65537]; + const a = crypto.randomBytes(64); + const crcA = await crcOf(algo, a); + for (const size of sizes) { + const b = crypto.randomBytes(size); + const got = crcCombine( + crcA, + await crcOf(algo, b), + BigInt(size), + polyReversed, + dim); + const expected = await crcOf(algo, Buffer.concat([a, b])); + assert.strictEqual(got, expected, `failed at size=${size}`); + } + }); + }); + }); +}); + +describe('combineCrcs', () => { + SPECS.forEach(({ algo, polyReversed, dim }) => { + const label = algo.toUpperCase(); + + describe(`${label} (dim=${dim})`, () => { + it('should return the part CRC unchanged for a single-part input', async () => { + const buf = crypto.randomBytes(13); + const partCrc = await algorithms[algo].digest(buf); + const got = combineCrcs( + [{ value: partCrc, length: buf.length }], + polyReversed, + dim); + assert.strictEqual(got, partCrc); + }); + + it('should match crc(concat) for two parts — base64 in, base64 out', async () => { + const a = crypto.randomBytes(1024); + const b = crypto.randomBytes(2048); + const parts = [ + { value: await algorithms[algo].digest(a), length: a.length }, + { value: await algorithms[algo].digest(b), length: b.length }, + ]; + const got = combineCrcs(parts, polyReversed, dim); + const expected = await algorithms[algo].digest(Buffer.concat([a, b])); + assert.strictEqual(got, expected); + }); + + it('should match crc(concat) for N parts of varied sizes', async () => { + const bufs = [ + crypto.randomBytes(7), + crypto.randomBytes(513), + crypto.randomBytes(1024), + crypto.randomBytes(2049), + crypto.randomBytes(64), + ]; + const parts = []; + for (const buf of bufs) { + parts.push({ + value: await algorithms[algo].digest(buf), + length: buf.length, + }); + } + const got = combineCrcs(parts, polyReversed, dim); + const expected = await algorithms[algo].digest(Buffer.concat(bufs)); + assert.strictEqual(got, expected); + }); + }); + }); +}); \ No newline at end of file From a9b86c3e8355d8e3d3b54a24bfa74a06b8859251 Mon Sep 17 00:00:00 2001 From: Leif Henriksen Date: Thu, 7 May 2026 17:17:14 +0200 Subject: [PATCH 04/12] CLDSRV-898: CompleteMPU calculate and validate final checksum with checksum header --- lib/api/completeMultipartUpload.js | 153 ++++++++++++- tests/unit/api/completeMultipartUpload.js | 263 +++++++++++++++++++++- 2 files changed, 412 insertions(+), 4 deletions(-) diff --git a/lib/api/completeMultipartUpload.js b/lib/api/completeMultipartUpload.js index 3be6b302aa..7b4eea6796 100644 --- a/lib/api/completeMultipartUpload.js +++ b/lib/api/completeMultipartUpload.js @@ -25,7 +25,11 @@ const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders'); const { validatePutVersionId } = require('./apiUtils/object/coldStorage'); const { validateQuotas } = require('./apiUtils/quotas/quotaUtils'); const { setSSEHeaders } = require('./apiUtils/object/sseHeaders'); -const { algorithms: checksumAlgorithms } = require('./apiUtils/integrity/validateChecksums'); +const { + algorithms: checksumAlgorithms, + computeCompositeMPUChecksum, + computeFullObjectMPUChecksum, +} = require('./apiUtils/integrity/validateChecksums'); const versionIdUtils = versioning.VersionID; @@ -104,6 +108,136 @@ function validatePerPartChecksums(jsonList, storedParts, mpuSplitter, mpuChecksu return null; } +/** + * Compute the final-object checksum for a CompleteMultipartUpload from the + * stored MPU configuration and per-part checksums. Returns null when the MPU + * has no checksum configured, when any part is missing its stored + * ChecksumValue (defensive — should not happen in steady state), or when the + * compute primitive itself reports an error. Errors are logged; the caller + * proceeds without a final-object checksum so the response simply omits the + * checksum fields rather than failing the whole CompleteMPU. + * + * @param {array} storedParts - parts from services.getMPUparts (carry ChecksumValue) + * @param {array} filteredPartList - validated, ordered subset matching jsonList + * @param {object} storedMetadata - MPU overview metadata + * @param {string} mpuSplitter - splitter used in part keys + * @param {string} uploadId - for log context + * @param {object} log - werelogs logger + * @returns {object|null} { algorithm, type, value } or null + */ +function computeFinalChecksum(storedParts, filteredPartList, storedMetadata, mpuSplitter, uploadId, log) { + const algorithm = storedMetadata.checksumAlgorithm; + const type = storedMetadata.checksumType; + if (!algorithm || !type) { + return null; + } + + const storedByKey = new Map(); + storedParts.forEach(p => storedByKey.set(p.key, p)); + + const partInputs = []; + const missingPartNumbers = []; + for (const fp of filteredPartList) { + const stored = storedByKey.get(fp.key); + const value = stored && stored.value && stored.value.ChecksumValue; + if (!value) { + missingPartNumbers.push(fp.key.split(mpuSplitter)[1]); + continue; + } + partInputs.push({ value, length: Number.parseInt(fp.size, 10) }); + } + + if (missingPartNumbers.length > 0) { + log.error('one or more MPU parts missing checksum value; ' + + 'skipping final-object checksum computation', { + uploadId, + algorithm, + type, + missingPartNumbers, + }); + return null; + } + + let result; + if (type === 'COMPOSITE') { + result = computeCompositeMPUChecksum(algorithm, partInputs.map(p => p.value)); + } else if (type === 'FULL_OBJECT') { + result = computeFullObjectMPUChecksum(algorithm, partInputs); + } else { + log.error('unknown MPU checksumType; skipping final-object checksum computation', { + uploadId, checksumType: type, + }); + return null; + } + + if (result.error) { + log.error('final-object checksum computation failed', { + uploadId, + checksumErrorCode: result.error.code, + checksumErrorDetails: result.error.details, + }); + return null; + } + + return { algorithm, type, value: result.checksum }; +} + +/** + * Validate the optional x-amz-checksum- header on a CompleteMPU + * request against the computed final-object checksum. + * + * AWS contract: when present, x-amz-checksum- on CompleteMPU is the + * client's assertion of what the final-object checksum should be (not a + * body digest). If it doesn't match the value we computed — including + * when it names a different algorithm than the MPU was created with, or + * when we couldn't compute one at all — return BadDigest. No header → no-op. + * + * @param {object} headers - request.headers (lowercased keys) + * @param {object|null} finalChecksum - { algorithm, type, value } or null + * @param {string} uploadId - for log context + * @param {object} log - werelogs logger + * @returns {Error|null} + */ +function validateExpectedFinalChecksum(headers, finalChecksum, uploadId, log) { + let foundAlgo = null; + let foundValue = null; + for (const algo of Object.keys(checksumAlgorithms)) { + const headerName = `x-amz-checksum-${algo}`; + if (headerName in headers) { + if (foundAlgo) { + log.error('multiple x-amz-checksum- headers on CompleteMPU', { + uploadId, + algorithms: [foundAlgo, algo], + }); + return errorInstances.InvalidRequest.customizeDescription( + 'Expecting a single x-amz-checksum- header. ' + + 'Multiple checksum Types are not allowed.'); + } + foundAlgo = algo; + foundValue = headers[headerName]; + } + } + + if (!foundAlgo) { + return null; + } + + if (!finalChecksum || finalChecksum.algorithm !== foundAlgo || finalChecksum.value !== foundValue) { + log.error('expected final-object checksum did not match computed value', { + uploadId, + headerAlgorithm: foundAlgo, + expected: foundValue, + computedAlgorithm: finalChecksum && finalChecksum.algorithm, + computed: finalChecksum && finalChecksum.value, + }); + return errorInstances.BadDigest.customizeDescription( + `The ${foundAlgo.toUpperCase()} you specified did not ` + + 'match the calculated checksum.'); + } + + return null; +} + /* Format of xml request: @@ -166,6 +300,7 @@ function completeMultipartUpload(authInfo, request, log, callback) { hostname, }; let oldByteLength = null; + let finalChecksum = null; const responseHeaders = {}; let versionId; @@ -372,6 +507,18 @@ function completeMultipartUpload(authInfo, request, log, callback) { function processParts(destBucket, objMD, mpuBucket, storedParts, jsonList, storedMetadata, completeObjData, mpuOverviewKey, filteredPartsObj, totalMPUSize, next) { + // External-handled MPUs (ingestion / external backends) come in + // with completeObjData set and no filteredPartsObj — the data + // store already aggregated the parts, and we have no per-part + // info to feed the compute step. Skip in that case. + if (filteredPartsObj) { + finalChecksum = computeFinalChecksum(storedParts, filteredPartsObj.partList, storedMetadata, + splitter, uploadId, log); + const expectedErr = validateExpectedFinalChecksum(request.headers, finalChecksum, uploadId, log); + if (expectedErr) { + return next(expectedErr, destBucket); + } + } // if mpu was completed on backend that stored mpu MD externally, // skip MD processing steps if (completeObjData && skipMpuPartProcessing(completeObjData)) { @@ -741,4 +888,6 @@ function completeMultipartUpload(authInfo, request, log, callback) { } module.exports = completeMultipartUpload; -module.exports.validatePerPartChecksums = validatePerPartChecksums; \ No newline at end of file +module.exports.validatePerPartChecksums = validatePerPartChecksums; +module.exports.computeFinalChecksum = computeFinalChecksum; +module.exports.validateExpectedFinalChecksum = validateExpectedFinalChecksum; \ No newline at end of file diff --git a/tests/unit/api/completeMultipartUpload.js b/tests/unit/api/completeMultipartUpload.js index bc515aa4b7..e37781aabc 100644 --- a/tests/unit/api/completeMultipartUpload.js +++ b/tests/unit/api/completeMultipartUpload.js @@ -9,8 +9,12 @@ const initiateMultipartUpload = const objectPutPart = require('../../../lib/api/objectPutPart'); const completeMultipartUpload = require('../../../lib/api/completeMultipartUpload'); -const { validatePerPartChecksums } = completeMultipartUpload; -const { validateMethodChecksumNoChunking } = +const { + validatePerPartChecksums, + computeFinalChecksum, + validateExpectedFinalChecksum, +} = completeMultipartUpload; +const { validateMethodChecksumNoChunking, algorithms } = require('../../../lib/api/apiUtils/integrity/validateChecksums'); const DummyRequest = require('../DummyRequest'); const { cleanup, DummyRequestLogger, makeAuthInfo } = require('../helpers'); @@ -480,3 +484,258 @@ describe('CompleteMultipartUpload body-checksum bypass', () => { assert.strictEqual(err.is.BadDigest, true); }); }); + +describe('computeFinalChecksum', () => { + const log = new DummyRequestLogger(); + const uploadId = UPLOAD_ID; + + function partListFromStored(stored) { + return stored.map(s => ({ + key: s.key, + ETag: `"${s.value.ETag}"`, + size: s.value.Size, + locations: s.value.partLocations, + })); + } + + it('should return null when MPU has no checksumAlgorithm', () => { + const stored = [ + makeStoredPart(1, { algorithm: 'sha256', value: SAMPLE_DIGESTS.sha256[0] }), + ]; + const got = computeFinalChecksum( + stored, partListFromStored(stored), {}, SPLITTER, uploadId, log); + assert.strictEqual(got, null); + }); + + it('should return null when MPU has no checksumType', () => { + const stored = [ + makeStoredPart(1, { algorithm: 'sha256', value: SAMPLE_DIGESTS.sha256[0] }), + ]; + const got = computeFinalChecksum( + stored, partListFromStored(stored), + { checksumAlgorithm: 'sha256' }, SPLITTER, uploadId, log); + assert.strictEqual(got, null); + }); + + it('should return COMPOSITE checksum with -N suffix for SHA256 MPU', () => { + const [d1, d2, d3] = [ + SAMPLE_DIGESTS.sha256[0], + SAMPLE_DIGESTS.sha256[1], + SAMPLE_DIGESTS.sha256[0], + ]; + const stored = [ + makeStoredPart(1, { algorithm: 'sha256', value: d1 }), + makeStoredPart(2, { algorithm: 'sha256', value: d2 }), + makeStoredPart(3, { algorithm: 'sha256', value: d3 }), + ]; + const got = computeFinalChecksum( + stored, partListFromStored(stored), + { checksumAlgorithm: 'sha256', checksumType: 'COMPOSITE' }, + SPLITTER, uploadId, log); + assert(got); + assert.strictEqual(got.algorithm, 'sha256'); + assert.strictEqual(got.type, 'COMPOSITE'); + assert(got.value.endsWith('-3'), + `expected -N suffix, got ${got.value}`); + // computeCompositeMPUChecksum's deterministic output for these + // exact placeholder digests: + const expected = crypto + .createHash('sha256') + .update(Buffer.concat([d1, d2, d3].map(x => Buffer.from(x, 'base64')))) + .digest('base64'); + assert.strictEqual(got.value, `${expected}-3`); + }); + + ['sha1', 'crc32', 'crc32c'].forEach(algo => { + it(`should compute COMPOSITE checksum for ${algo.toUpperCase()}`, () => { + const [d1, d2] = SAMPLE_DIGESTS[algo]; + const stored = [ + makeStoredPart(1, { algorithm: algo, value: d1 }), + makeStoredPart(2, { algorithm: algo, value: d2 }), + ]; + const got = computeFinalChecksum( + stored, partListFromStored(stored), + { checksumAlgorithm: algo, checksumType: 'COMPOSITE' }, + SPLITTER, uploadId, log); + assert(got); + assert.strictEqual(got.algorithm, algo); + assert.strictEqual(got.type, 'COMPOSITE'); + assert(got.value.endsWith('-2')); + }); + }); + + it('should return FULL_OBJECT checksum without -N suffix for CRC64NVME', async () => { + // Real CRCs over real bytes so we can verify against the equivalent + // direct CRC of the concatenation. + const a = crypto.randomBytes(1024); + const b = crypto.randomBytes(2048); + const dA = await algorithms.crc64nvme.digest(a); + const dB = await algorithms.crc64nvme.digest(b); + const stored = [ + { key: `${UPLOAD_ID}${SPLITTER}1`, + value: { ETag: 'e', Size: a.length, + ChecksumAlgorithm: 'crc64nvme', ChecksumValue: dA, + partLocations: [] } }, + { key: `${UPLOAD_ID}${SPLITTER}2`, + value: { ETag: 'e', Size: b.length, + ChecksumAlgorithm: 'crc64nvme', ChecksumValue: dB, + partLocations: [] } }, + ]; + const got = computeFinalChecksum( + stored, partListFromStored(stored), + { checksumAlgorithm: 'crc64nvme', checksumType: 'FULL_OBJECT' }, + SPLITTER, uploadId, log); + assert(got); + assert.strictEqual(got.algorithm, 'crc64nvme'); + assert.strictEqual(got.type, 'FULL_OBJECT'); + assert(!got.value.includes('-'), + `FULL_OBJECT should have no -N suffix, got ${got.value}`); + const expected = await algorithms.crc64nvme.digest(Buffer.concat([a, b])); + assert.strictEqual(got.value, expected); + }); + + it('should return null and log when a part is missing ChecksumValue', () => { + const stored = [ + makeStoredPart(1, { algorithm: 'sha256', value: SAMPLE_DIGESTS.sha256[0] }), + makeStoredPart(2, null), + makeStoredPart(3, { algorithm: 'sha256', value: SAMPLE_DIGESTS.sha256[1] }), + ]; + const got = computeFinalChecksum( + stored, partListFromStored(stored), + { checksumAlgorithm: 'sha256', checksumType: 'COMPOSITE' }, + SPLITTER, uploadId, log); + assert.strictEqual(got, null); + }); + + it('should return null when checksumType is unknown', () => { + const stored = [ + makeStoredPart(1, { algorithm: 'sha256', value: SAMPLE_DIGESTS.sha256[0] }), + ]; + const got = computeFinalChecksum( + stored, partListFromStored(stored), + { checksumAlgorithm: 'sha256', checksumType: 'WEIRD' }, + SPLITTER, uploadId, log); + assert.strictEqual(got, null); + }); + + it('should return null when underlying compute reports an error ' + + '(crc64nvme COMPOSITE is not allowed)', () => { + const stored = [ + makeStoredPart(1, { algorithm: 'crc64nvme', + value: SAMPLE_DIGESTS.crc64nvme[0] }), + ]; + const got = computeFinalChecksum( + stored, partListFromStored(stored), + { checksumAlgorithm: 'crc64nvme', checksumType: 'COMPOSITE' }, + SPLITTER, uploadId, log); + assert.strictEqual(got, null); + }); + + it('should compute over filteredPartList (subset), not all storedParts', () => { + const [d1, d2, d3] = [ + SAMPLE_DIGESTS.sha256[0], SAMPLE_DIGESTS.sha256[1], + SAMPLE_DIGESTS.sha256[0], + ]; + const stored = [ + makeStoredPart(1, { algorithm: 'sha256', value: d1 }), + makeStoredPart(2, { algorithm: 'sha256', value: d2 }), + makeStoredPart(3, { algorithm: 'sha256', value: d3 }), + ]; + // User completes only parts 1 and 3, dropping 2 (orphan). + const filtered = [stored[0], stored[2]].map(s => ({ + key: s.key, ETag: `"${s.value.ETag}"`, + size: s.value.Size, locations: s.value.partLocations, + })); + const got = computeFinalChecksum( + stored, filtered, + { checksumAlgorithm: 'sha256', checksumType: 'COMPOSITE' }, + SPLITTER, uploadId, log); + assert(got); + assert(got.value.endsWith('-2'), + `should reflect 2 completed parts, got ${got.value}`); + const expected = crypto + .createHash('sha256') + .update(Buffer.concat([d1, d3].map(x => Buffer.from(x, 'base64')))) + .digest('base64'); + assert.strictEqual(got.value, `${expected}-2`); + }); +}); + +describe('validateExpectedFinalChecksum', () => { + const log = new DummyRequestLogger(); + const uploadId = UPLOAD_ID; + + it('should return null when no x-amz-checksum- header is present', () => { + const err = validateExpectedFinalChecksum( + { 'host': 'example.com' }, + { algorithm: 'sha256', type: 'COMPOSITE', value: 'abc-3' }, + uploadId, log); + assert.strictEqual(err, null); + }); + + it('should ignore x-amz-checksum-type and x-amz-checksum-algorithm headers', () => { + const err = validateExpectedFinalChecksum( + { + 'x-amz-checksum-type': 'COMPOSITE', + 'x-amz-checksum-algorithm': 'SHA256', + }, + { algorithm: 'sha256', type: 'COMPOSITE', value: 'abc-3' }, + uploadId, log); + assert.strictEqual(err, null); + }); + + it('should return null when header value matches computed value', () => { + const err = validateExpectedFinalChecksum( + { 'x-amz-checksum-sha256': 'abc-3' }, + { algorithm: 'sha256', type: 'COMPOSITE', value: 'abc-3' }, + uploadId, log); + assert.strictEqual(err, null); + }); + + it('should return BadDigest when header value differs', () => { + const err = validateExpectedFinalChecksum( + { 'x-amz-checksum-sha256': 'wrong-3' }, + { algorithm: 'sha256', type: 'COMPOSITE', value: 'abc-3' }, + uploadId, log); + assert(err); + assert.strictEqual(err.is.BadDigest, true); + assert(err.description.includes('SHA256')); + }); + + it('should return BadDigest when header algorithm differs from MPU', () => { + const err = validateExpectedFinalChecksum( + { 'x-amz-checksum-crc32': 'aGVsbG8=' }, + { algorithm: 'sha256', type: 'COMPOSITE', value: 'abc-3' }, + uploadId, log); + assert(err); + assert.strictEqual(err.is.BadDigest, true); + assert(err.description.includes('CRC32')); + }); + + it('should return BadDigest when header is present but finalChecksum is null', () => { + const err = validateExpectedFinalChecksum( + { 'x-amz-checksum-sha256': 'abc-3' }, + null, uploadId, log); + assert(err); + assert.strictEqual(err.is.BadDigest, true); + }); + + it('should return null when finalChecksum is null and no header present', () => { + const err = validateExpectedFinalChecksum( + { 'host': 'example.com' }, null, uploadId, log); + assert.strictEqual(err, null); + }); + + it('should return InvalidRequest when multiple x-amz-checksum-* headers are sent', () => { + const err = validateExpectedFinalChecksum( + { + 'x-amz-checksum-sha256': 'abc-3', + 'x-amz-checksum-crc32': 'def', + }, + { algorithm: 'sha256', type: 'COMPOSITE', value: 'abc-3' }, + uploadId, log); + assert(err); + assert.strictEqual(err.is.InvalidRequest, true); + assert(err.description.includes('Multiple checksum Types')); + }); +}); From 8d026354e95c9e6744a7ebd6ed09b482cb9f7856 Mon Sep 17 00:00:00 2001 From: Leif Henriksen Date: Thu, 7 May 2026 22:09:52 +0200 Subject: [PATCH 05/12] CLDSRV-898: CompleteMPU store FULL_OBJECT checksum in object metadata --- lib/api/completeMultipartUpload.js | 9 +- tests/unit/api/completeMultipartUpload.js | 168 ++++++++++++++++++++++ 2 files changed, 176 insertions(+), 1 deletion(-) diff --git a/lib/api/completeMultipartUpload.js b/lib/api/completeMultipartUpload.js index 7b4eea6796..fbf26a2d4e 100644 --- a/lib/api/completeMultipartUpload.js +++ b/lib/api/completeMultipartUpload.js @@ -578,7 +578,8 @@ function completeMultipartUpload(authInfo, request, log, callback) { const keysNotNeeded = ['initiator', 'partLocations', 'key', 'initiated', 'uploadId', 'content-type', 'expires', - 'eventualStorageBucket', 'dataStoreName']; + 'eventualStorageBucket', 'dataStoreName', + 'checksumAlgorithm', 'checksumType', 'checksumIsDefault']; const metadataKeysToPull = Object.keys(storedMetadata).filter(item => keysNotNeeded.indexOf(item) === -1); @@ -609,6 +610,12 @@ function completeMultipartUpload(authInfo, request, log, callback) { overheadField: constants.overheadField, log, }; + // Persist FULL_OBJECT final-object checksum on the new ObjectMD. + // COMPOSITE is intentionally skipped to prevent metadata bloat, + // to be done in S3C-10399. + if (finalChecksum && finalChecksum.type === 'FULL_OBJECT') { + metaStoreParams.checksum = finalChecksum; + } // If key already exists if (objMD) { // Re-use creation-time if we can diff --git a/tests/unit/api/completeMultipartUpload.js b/tests/unit/api/completeMultipartUpload.js index e37781aabc..ba9e3a8328 100644 --- a/tests/unit/api/completeMultipartUpload.js +++ b/tests/unit/api/completeMultipartUpload.js @@ -9,6 +9,7 @@ const initiateMultipartUpload = const objectPutPart = require('../../../lib/api/objectPutPart'); const completeMultipartUpload = require('../../../lib/api/completeMultipartUpload'); +const metadata = require('../../../lib/metadata/wrapper'); const { validatePerPartChecksums, computeFinalChecksum, @@ -739,3 +740,170 @@ describe('validateExpectedFinalChecksum', () => { assert(err.description.includes('Multiple checksum Types')); }); }); + +describe('CompleteMultipartUpload final-object checksum storage', () => { + const log = new DummyRequestLogger(); + const authInfo = makeAuthInfo('accessKey1'); + const namespace = 'default'; + const bucketName = 'bucketname-final-checksum'; + const objectKey = 'testObject'; + const partBody = Buffer.from('I am a part\n', 'utf8'); + const partHash = crypto.createHash('md5').update(partBody).digest('hex'); + + const bucketPutRequest = { + bucketName, + namespace, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: '/', + post: '' + + 'scality-internal-mem' + + '', + actionImplicitDenies: false, + }; + + // (algorithm, type) pairs valid for an MPU per AWS rules. + // shouldStore reflects Part 3's gating: only FULL_OBJECT is persisted. + const STORAGE_MATRIX = [ + { algorithm: 'crc32', type: 'FULL_OBJECT', shouldStore: true }, + { algorithm: 'crc32c', type: 'FULL_OBJECT', shouldStore: true }, + { algorithm: 'crc64nvme', type: 'FULL_OBJECT', shouldStore: true }, + { algorithm: 'crc32', type: 'COMPOSITE', shouldStore: false }, + { algorithm: 'crc32c', type: 'COMPOSITE', shouldStore: false }, + { algorithm: 'sha1', type: 'COMPOSITE', shouldStore: false }, + { algorithm: 'sha256', type: 'COMPOSITE', shouldStore: false }, + ]; + + function bucketPutP() { + return new Promise((resolve, reject) => + bucketPut(authInfo, bucketPutRequest, log, + err => err ? reject(err) : resolve())); + } + + function initiateMpuP(headers) { + return new Promise((resolve, reject) => { + initiateMultipartUpload(authInfo, { + bucketName, namespace, objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com`, ...headers }, + url: `/${objectKey}?uploads`, + actionImplicitDenies: false, + }, log, (err, xml) => { + if (err) return reject(err); + return parseString(xml, (parseErr, json) => parseErr + ? reject(parseErr) + : resolve(json.InitiateMultipartUploadResult.UploadId[0])); + }); + }); + } + + function uploadPartP(uploadId, headers = {}) { + return new Promise((resolve, reject) => { + const partRequest = new DummyRequest({ + bucketName, namespace, objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com`, ...headers }, + url: `/${objectKey}?partNumber=1&uploadId=${uploadId}`, + query: { partNumber: '1', uploadId }, + partHash, + actionImplicitDenies: false, + }, partBody); + objectPutPart(authInfo, partRequest, undefined, log, + err => err ? reject(err) : resolve()); + }); + } + + function completeMpuP(uploadId, partChecksumXml = '') { + const completeBody = '' + + '' + + '1' + + `"${partHash}"` + + partChecksumXml + + '' + + ''; + return new Promise((resolve, reject) => { + completeMultipartUpload(authInfo, { + bucketName, namespace, objectKey, + parsedHost: 's3.amazonaws.com', + url: `/${objectKey}?uploadId=${uploadId}`, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + query: { uploadId }, + post: completeBody, + actionImplicitDenies: false, + }, log, err => err ? reject(err) : resolve()); + }); + } + + function fetchObjectMDP() { + return new Promise((resolve, reject) => + metadata.getObjectMD(bucketName, objectKey, {}, log, + (err, md) => err ? reject(err) : resolve(md))); + } + + beforeEach(() => cleanup()); + + STORAGE_MATRIX.forEach(({ algorithm, type, shouldStore }) => { + const upper = algorithm.toUpperCase(); + const verb = shouldStore ? 'should persist' : 'should not persist'; + const tag = TAG_BY_ALGO[algorithm]; + + it(`${verb} ${type} ${upper} checksum on the ObjectMD`, async () => { + await bucketPutP(); + const uploadId = await initiateMpuP({ + 'x-amz-checksum-algorithm': upper, + 'x-amz-checksum-type': type, + }); + // Pre-compute the part's checksum so we can supply it on + // UploadPart and (for COMPOSITE non-default) in the Complete body. + const partChecksum = await algorithms[algorithm].digest(partBody); + const uploadHeaders = type === 'COMPOSITE' + ? { [`x-amz-checksum-${algorithm}`]: partChecksum } + : {}; + await uploadPartP(uploadId, uploadHeaders); + const partChecksumXml = type === 'COMPOSITE' + ? `<${tag}>${partChecksum}` + : ''; + await completeMpuP(uploadId, partChecksumXml); + const md = await fetchObjectMDP(); + if (shouldStore) { + assert(md.checksum, + `expected ${type} ${upper} checksum on ObjectMD`); + assert.strictEqual(md.checksum.checksumAlgorithm, algorithm); + assert.strictEqual(md.checksum.checksumType, type); + assert(typeof md.checksum.checksumValue === 'string'); + assert(md.checksum.checksumValue.length > 0); + } else { + assert.strictEqual(md.checksum, undefined, + `${type} ${upper} should not persist on ObjectMD`); + } + }); + }); + + it('should persist FULL_OBJECT CRC64NVME checksum for default MPU (no checksum headers)', async () => { + // No x-amz-checksum-algorithm / x-amz-checksum-type headers — AWS + // defaults to crc64nvme/FULL_OBJECT and still persists the result. + await bucketPutP(); + const uploadId = await initiateMpuP({}); + await uploadPartP(uploadId); + await completeMpuP(uploadId); + const md = await fetchObjectMDP(); + assert(md.checksum, 'default MPU should still persist a checksum'); + assert.strictEqual(md.checksum.checksumAlgorithm, 'crc64nvme'); + assert.strictEqual(md.checksum.checksumType, 'FULL_OBJECT'); + }); + + it('should not leak checksumAlgorithm/Type/IsDefault into ObjectMD top-level fields', async () => { + // keysNotNeeded keeps these MPU-overview-only keys out of metaHeaders, + // which prevents them from sticking around on the final ObjectMD. + await bucketPutP(); + const uploadId = await initiateMpuP({ + 'x-amz-checksum-algorithm': 'CRC32', + 'x-amz-checksum-type': 'FULL_OBJECT', + }); + await uploadPartP(uploadId); + await completeMpuP(uploadId); + const md = await fetchObjectMDP(); + assert.strictEqual(md.checksumAlgorithm, undefined); + assert.strictEqual(md.checksumType, undefined); + assert.strictEqual(md.checksumIsDefault, undefined); + }); +}); + From 31b6a73a5ec980ecf08e57c88dfb680cbc1e012a Mon Sep 17 00:00:00 2001 From: Leif Henriksen Date: Thu, 7 May 2026 23:08:02 +0200 Subject: [PATCH 06/12] CLDSRV-898: CompleteMPU set checksum value and type in response XML body --- lib/api/completeMultipartUpload.js | 5 + tests/unit/api/completeMultipartUpload.js | 158 ++++++++++++++++++++++ 2 files changed, 163 insertions(+) diff --git a/lib/api/completeMultipartUpload.js b/lib/api/completeMultipartUpload.js index fbf26a2d4e..259bd3fba8 100644 --- a/lib/api/completeMultipartUpload.js +++ b/lib/api/completeMultipartUpload.js @@ -879,6 +879,11 @@ function completeMultipartUpload(authInfo, request, log, callback) { const isVersionedObj = vcfg && vcfg.Status === 'Enabled'; xmlParams.eTag = `"${aggregateETag}"`; + if (finalChecksum) { + xmlParams.checksumAlgorithm = finalChecksum.algorithm; + xmlParams.checksumValue = finalChecksum.value; + xmlParams.checksumType = finalChecksum.type; + } const xml = convertToXml('completeMultipartUpload', xmlParams); pushMetric('completeMultipartUpload', log, { oldByteLength: isVersionedObj ? null : oldByteLength, diff --git a/tests/unit/api/completeMultipartUpload.js b/tests/unit/api/completeMultipartUpload.js index ba9e3a8328..0b06395944 100644 --- a/tests/unit/api/completeMultipartUpload.js +++ b/tests/unit/api/completeMultipartUpload.js @@ -907,3 +907,161 @@ describe('CompleteMultipartUpload final-object checksum storage', () => { }); }); +describe('CompleteMultipartUpload final-object checksum response', () => { + const log = new DummyRequestLogger(); + const authInfo = makeAuthInfo('accessKey1'); + const namespace = 'default'; + const bucketName = 'bucketname-final-checksum-resp'; + const objectKey = 'testObject'; + const partBody = Buffer.from('I am a part\n', 'utf8'); + const partHash = crypto.createHash('md5').update(partBody).digest('hex'); + + const bucketPutRequest = { + bucketName, + namespace, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: '/', + post: '' + + 'scality-internal-mem' + + '', + actionImplicitDenies: false, + }; + + const RESPONSE_MATRIX = [ + { algorithm: 'crc32', type: 'FULL_OBJECT' }, + { algorithm: 'crc32c', type: 'FULL_OBJECT' }, + { algorithm: 'crc64nvme', type: 'FULL_OBJECT' }, + { algorithm: 'crc32', type: 'COMPOSITE' }, + { algorithm: 'crc32c', type: 'COMPOSITE' }, + { algorithm: 'sha1', type: 'COMPOSITE' }, + { algorithm: 'sha256', type: 'COMPOSITE' }, + ]; + + function bucketPutP() { + return new Promise((resolve, reject) => + bucketPut(authInfo, bucketPutRequest, log, + err => err ? reject(err) : resolve())); + } + + function initiateMpuP(headers) { + return new Promise((resolve, reject) => { + initiateMultipartUpload(authInfo, { + bucketName, namespace, objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com`, ...headers }, + url: `/${objectKey}?uploads`, + actionImplicitDenies: false, + }, log, (err, xml) => { + if (err) return reject(err); + return parseString(xml, (parseErr, json) => parseErr + ? reject(parseErr) + : resolve(json.InitiateMultipartUploadResult.UploadId[0])); + }); + }); + } + + function uploadPartP(uploadId, headers = {}) { + return new Promise((resolve, reject) => { + const partRequest = new DummyRequest({ + bucketName, namespace, objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com`, ...headers }, + url: `/${objectKey}?partNumber=1&uploadId=${uploadId}`, + query: { partNumber: '1', uploadId }, + partHash, + actionImplicitDenies: false, + }, partBody); + objectPutPart(authInfo, partRequest, undefined, log, + err => err ? reject(err) : resolve()); + }); + } + + // Resolves with { xml, headers } so callers can inspect both the + // response body and the response headers. + function completeMpuP(uploadId, partChecksumXml = '') { + const completeBody = '' + + '' + + '1' + + `"${partHash}"` + + partChecksumXml + + '' + + ''; + return new Promise((resolve, reject) => { + completeMultipartUpload(authInfo, { + bucketName, namespace, objectKey, + parsedHost: 's3.amazonaws.com', + url: `/${objectKey}?uploadId=${uploadId}`, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + query: { uploadId }, + post: completeBody, + actionImplicitDenies: false, + }, log, (err, xml, headers) => err + ? reject(err) + : resolve({ xml, headers })); + }); + } + + function parseXmlP(xmlStr) { + return new Promise((resolve, reject) => + parseString(xmlStr, (err, json) => err ? reject(err) : resolve(json))); + } + + beforeEach(() => cleanup()); + + RESPONSE_MATRIX.forEach(({ algorithm, type }) => { + const upper = algorithm.toUpperCase(); + const tag = TAG_BY_ALGO[algorithm]; + + it(`should emit ${type} ${upper} in response XML`, async () => { + await bucketPutP(); + const uploadId = await initiateMpuP({ + 'x-amz-checksum-algorithm': upper, + 'x-amz-checksum-type': type, + }); + const partChecksum = await algorithms[algorithm].digest(partBody); + const uploadHeaders = type === 'COMPOSITE' + ? { [`x-amz-checksum-${algorithm}`]: partChecksum } + : {}; + await uploadPartP(uploadId, uploadHeaders); + const partChecksumXml = type === 'COMPOSITE' + ? `<${tag}>${partChecksum}` + : ''; + const { xml, headers } = await completeMpuP(uploadId, partChecksumXml); + const json = await parseXmlP(xml); + const result = json.CompleteMultipartUploadResult; + assert(result[tag], `expected ${tag} in response XML`); + const xmlValue = result[tag][0]; + assert(typeof xmlValue === 'string' && xmlValue.length > 0); + assert.strictEqual(result.ChecksumType[0], type); + // COMPOSITE values carry the "-N" suffix; FULL_OBJECT do not. + if (type === 'COMPOSITE') { + assert(xmlValue.endsWith('-1'), + `expected -1 suffix for 1-part COMPOSITE, got ${xmlValue}`); + } else { + assert(!xmlValue.includes('-'), + `FULL_OBJECT value should have no suffix, got ${xmlValue}`); + } + // AWS-verified: CompleteMPU does NOT emit + // x-amz-checksum-* / x-amz-checksum-type response headers. + assert.strictEqual(headers[`x-amz-checksum-${algorithm}`], undefined); + assert.strictEqual(headers['x-amz-checksum-type'], undefined); + }); + }); + + it('should emit FULL_OBJECT CRC64NVME for default MPU (no checksum headers)', async () => { + // AWS-verified: a default MPU still surfaces the CRC64NVME + // checksum and ChecksumType=FULL_OBJECT in the CompleteMPU response + // BODY (not headers). + await bucketPutP(); + const uploadId = await initiateMpuP({}); + await uploadPartP(uploadId); + const { xml, headers } = await completeMpuP(uploadId); + const json = await parseXmlP(xml); + const result = json.CompleteMultipartUploadResult; + assert(result.ChecksumCRC64NVME, 'default MPU should emit ChecksumCRC64NVME'); + assert.strictEqual(result.ChecksumType[0], 'FULL_OBJECT'); + assert.strictEqual(headers['x-amz-checksum-crc64nvme'], undefined); + assert.strictEqual(headers['x-amz-checksum-type'], undefined); + }); +}); + + From 840c5a54192c0e8f25afc8ea77e0e17ac34ccf9d Mon Sep 17 00:00:00 2001 From: Leif Henriksen Date: Thu, 7 May 2026 23:33:11 +0200 Subject: [PATCH 07/12] CLDSRV-898: CompleteMPU checksum functional tests --- .../test/object/completeMpuChecksum.js | 152 ++++++++++++++++++ 1 file changed, 152 insertions(+) create mode 100644 tests/functional/aws-node-sdk/test/object/completeMpuChecksum.js diff --git a/tests/functional/aws-node-sdk/test/object/completeMpuChecksum.js b/tests/functional/aws-node-sdk/test/object/completeMpuChecksum.js new file mode 100644 index 0000000000..0a51cc3530 --- /dev/null +++ b/tests/functional/aws-node-sdk/test/object/completeMpuChecksum.js @@ -0,0 +1,152 @@ +'use strict'; + +const assert = require('assert'); +const { + CreateBucketCommand, + CreateMultipartUploadCommand, + UploadPartCommand, + CompleteMultipartUploadCommand, + HeadObjectCommand, + DeleteBucketCommand, +} = require('@aws-sdk/client-s3'); + +const withV4 = require('../support/withV4'); +const BucketUtility = require('../../lib/utility/bucket-util'); +const { algorithms } = + require('../../../../../lib/api/apiUtils/integrity/validateChecksums'); + +const bucket = `mpu-complete-checksum-${Date.now()}`; +const partBody = Buffer.from('I am a part body for complete-MPU testing', 'utf8'); + +// All AWS-valid (algorithm, type) pairs for an MPU. CompleteMPU should +// surface the resulting final-object checksum and ChecksumType in the +// response for every combination here, plus the implicit default. +const COMBOS = [ + { algo: 'CRC32', type: 'FULL_OBJECT' }, + { algo: 'CRC32', type: 'COMPOSITE' }, + { algo: 'CRC32C', type: 'FULL_OBJECT' }, + { algo: 'CRC32C', type: 'COMPOSITE' }, + { algo: 'CRC64NVME', type: 'FULL_OBJECT' }, + { algo: 'SHA1', type: 'COMPOSITE' }, + { algo: 'SHA256', type: 'COMPOSITE' }, +]; + +const tagField = algo => `Checksum${algo}`; + +describe('CompleteMultipartUpload final-object checksum', () => + withV4(sigCfg => { + let bucketUtil; + let s3; + + before(async () => { + bucketUtil = new BucketUtility('default', sigCfg); + s3 = bucketUtil.s3; + await s3.send(new CreateBucketCommand({ Bucket: bucket })); + }); + + after(async () => { + await bucketUtil.empty(bucket); + await s3.send(new DeleteBucketCommand({ Bucket: bucket })); + }); + + COMBOS.forEach(({ algo, type }) => { + const field = tagField(algo); + it(`should return ${algo}/${type} on CompleteMPU response`, async () => { + const key = `complete-${algo.toLowerCase()}-${type.toLowerCase()}-${Date.now()}`; + const partChecksum = + await algorithms[algo.toLowerCase()].digest(partBody); + + const create = await s3.send(new CreateMultipartUploadCommand({ + Bucket: bucket, Key: key, + ChecksumAlgorithm: algo, + ChecksumType: type, + })); + + const uploadPart = await s3.send(new UploadPartCommand({ + Bucket: bucket, Key: key, + UploadId: create.UploadId, + PartNumber: 1, + Body: partBody, + [field]: partChecksum, + })); + + const complete = await s3.send(new CompleteMultipartUploadCommand({ + Bucket: bucket, Key: key, + UploadId: create.UploadId, + MultipartUpload: { + Parts: [{ + PartNumber: 1, + ETag: uploadPart.ETag, + [field]: partChecksum, + }], + }, + })); + + assert(complete[field], + `expected ${field} in CompleteMPU response, got: ${JSON.stringify(complete)}`); + assert.strictEqual(complete.ChecksumType, type); + if (type === 'COMPOSITE') { + assert(complete[field].endsWith('-1'), + `expected -1 suffix for 1-part COMPOSITE, got ${complete[field]}`); + } else { + assert(!complete[field].includes('-'), + `FULL_OBJECT value should have no suffix, got ${complete[field]}`); + } + + // HeadObject with ChecksumMode=ENABLED must surface the same + // value that CompleteMPU returned for FULL_OBJECT MPUs. + // COMPOSITE storage is deferred, so HeadObject leaves the field absent — matching + // cloudserver's current intentional skip. + const head = await s3.send(new HeadObjectCommand({ + Bucket: bucket, Key: key, + ChecksumMode: 'ENABLED', + })); + if (type === 'FULL_OBJECT') { + assert.strictEqual(head[field], complete[field], + `HeadObject ${field} should match CompleteMPU response`); + assert.strictEqual(head.ChecksumType, type); + } else { + assert.strictEqual(head[field], undefined, + `COMPOSITE storage is deferred; HeadObject should not surface ${field}`); + assert.strictEqual(head.ChecksumType, undefined); + } + }); + }); + + it('should return CRC64NVME/FULL_OBJECT on CompleteMPU response when CreateMPU sent no checksum headers', + async () => { + const key = `complete-default-${Date.now()}`; + + const create = await s3.send(new CreateMultipartUploadCommand({ + Bucket: bucket, Key: key, + })); + + const uploadPart = await s3.send(new UploadPartCommand({ + Bucket: bucket, Key: key, + UploadId: create.UploadId, + PartNumber: 1, + Body: partBody, + })); + + const complete = await s3.send(new CompleteMultipartUploadCommand({ + Bucket: bucket, Key: key, + UploadId: create.UploadId, + MultipartUpload: { + Parts: [{ PartNumber: 1, ETag: uploadPart.ETag }], + }, + })); + + assert(complete.ChecksumCRC64NVME, + `expected ChecksumCRC64NVME for default MPU, got: ${JSON.stringify(complete)}`); + assert.strictEqual(complete.ChecksumType, 'FULL_OBJECT'); + + // Default MPU is FULL_OBJECT — checksum is persisted, so + // HeadObject must return the same value. + const head = await s3.send(new HeadObjectCommand({ + Bucket: bucket, Key: key, + ChecksumMode: 'ENABLED', + })); + assert.strictEqual(head.ChecksumCRC64NVME, complete.ChecksumCRC64NVME); + assert.strictEqual(head.ChecksumType, 'FULL_OBJECT'); + }); + })); From 6a4f500263f0afaaa41eb8b2056441598b9227ef Mon Sep 17 00:00:00 2001 From: Leif Henriksen Date: Mon, 11 May 2026 20:02:03 +0200 Subject: [PATCH 08/12] CLDSRV-898: fix test dont compare checksums for object override --- .../aws-node-sdk/test/object/mpuVersion.js | 30 +++++++------------ 1 file changed, 11 insertions(+), 19 deletions(-) diff --git a/tests/functional/aws-node-sdk/test/object/mpuVersion.js b/tests/functional/aws-node-sdk/test/object/mpuVersion.js index 0cdeee830f..ebdd4e0f35 100644 --- a/tests/functional/aws-node-sdk/test/object/mpuVersion.js +++ b/tests/functional/aws-node-sdk/test/object/mpuVersion.js @@ -141,14 +141,6 @@ function checkObjMdAndUpdate(objMDBefore, objMDAfter, props) { // eslint-disable-next-line no-param-reassign delete objMDBefore['content-type']; } - if (objMDBefore.checksum && !objMDAfter.checksum) { - // The initial PutObject stores a checksum, but the MPU restore path does not - // (CompleteMultipartUpload checksum storage is not yet implemented). - // Once it is, the restored object should carry a checksum and this workaround - // should be removed. - // eslint-disable-next-line no-param-reassign - delete objMDBefore.checksum; - } } function clearUploadIdAndRestoreStatusFromVersions(versions) { @@ -338,7 +330,7 @@ describe('MPU with x-scal-s3-version-id header', () => { assert.deepStrictEqual(versionsAfter, versionsBefore); checkObjMdAndUpdate(objMDBefore, objMDAfter, ['location', 'uploadId', 'microVersionId', 'x-amz-restore', - 'archive', 'dataStoreName', 'originOp']); + 'archive', 'dataStoreName', 'originOp', 'checksum']); assert.deepStrictEqual(objMDAfter, objMDBefore); } catch (err) { @@ -371,7 +363,7 @@ describe('MPU with x-scal-s3-version-id header', () => { checkObjMdAndUpdate(objMDBefore, objMDAfter, ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', - 'x-amz-restore', 'archive', 'dataStoreName']); + 'x-amz-restore', 'archive', 'dataStoreName', 'checksum']); assert.deepStrictEqual(objMDAfter, objMDBefore); }); @@ -409,7 +401,7 @@ describe('MPU with x-scal-s3-version-id header', () => { checkObjMdAndUpdate(objMDBefore, objMDAfter, ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', - 'x-amz-restore', 'archive', 'dataStoreName']); + 'x-amz-restore', 'archive', 'dataStoreName', 'checksum']); assert.deepStrictEqual(objMDAfter, objMDBefore); }); @@ -446,7 +438,7 @@ describe('MPU with x-scal-s3-version-id header', () => { checkObjMdAndUpdate(objMDBefore, objMDAfter, ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', - 'x-amz-restore', 'archive', 'dataStoreName']); + 'x-amz-restore', 'archive', 'dataStoreName', 'checksum']); assert.deepStrictEqual(objMDAfter, objMDBefore); }); @@ -481,7 +473,7 @@ describe('MPU with x-scal-s3-version-id header', () => { checkObjMdAndUpdate(objMDBefore, objMDAfter, ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', - 'x-amz-restore', 'archive', 'dataStoreName']); + 'x-amz-restore', 'archive', 'dataStoreName', 'checksum']); assert.deepStrictEqual(objMDAfter, objMDBefore); }); @@ -519,7 +511,7 @@ describe('MPU with x-scal-s3-version-id header', () => { checkObjMdAndUpdate(objMDBefore, objMDAfter, ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', - 'x-amz-restore', 'archive', 'dataStoreName']); + 'x-amz-restore', 'archive', 'dataStoreName', 'checksum']); assert.deepStrictEqual(objMDAfter, objMDBefore); }); @@ -562,7 +554,7 @@ describe('MPU with x-scal-s3-version-id header', () => { checkObjMdAndUpdate(objMDBefore, objMDAfter, ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', - 'x-amz-restore', 'archive', 'dataStoreName']); + 'x-amz-restore', 'archive', 'dataStoreName', 'checksum']); assert.deepStrictEqual(objMDAfter, objMDBefore); }); @@ -602,7 +594,7 @@ describe('MPU with x-scal-s3-version-id header', () => { checkObjMdAndUpdate(objMDBefore, objMDAfter, ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', - 'x-amz-restore', 'archive', 'dataStoreName']); + 'x-amz-restore', 'archive', 'dataStoreName', 'checksum']); assert.deepStrictEqual(objMDAfter, objMDBefore); }); @@ -640,7 +632,7 @@ describe('MPU with x-scal-s3-version-id header', () => { checkObjMdAndUpdate(objMDBefore, objMDAfter, ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', - 'x-amz-restore', 'archive', 'dataStoreName']); + 'x-amz-restore', 'archive', 'dataStoreName', 'checksum']); assert.deepStrictEqual(objMDAfter, objMDBefore); }); @@ -686,7 +678,7 @@ describe('MPU with x-scal-s3-version-id header', () => { checkObjMdAndUpdate(objMDBefore, objMDAfter, ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', - 'x-amz-restore', 'archive', 'dataStoreName']); + 'x-amz-restore', 'archive', 'dataStoreName', 'checksum']); assert.deepStrictEqual(objMDAfter, objMDBefore); }); @@ -722,7 +714,7 @@ describe('MPU with x-scal-s3-version-id header', () => { checkObjMdAndUpdate(objMDBefore, objMDAfter, ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', - 'x-amz-restore', 'archive', 'dataStoreName']); + 'x-amz-restore', 'archive', 'dataStoreName', 'checksum']); assert(isDeepStrictEqual(objMDAfter, objMDBefore), 'Objects should be deeply equal'); }); From c51df985fb9cad6b95dd95182800c5db0095517e Mon Sep 17 00:00:00 2001 From: Leif Henriksen Date: Mon, 11 May 2026 20:38:45 +0200 Subject: [PATCH 09/12] CLDSRV-898: fix test dont handle x-amz-checksum- as the checksum of the request body --- tests/functional/raw-node/test/xAmzChecksum.js | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/tests/functional/raw-node/test/xAmzChecksum.js b/tests/functional/raw-node/test/xAmzChecksum.js index b9fa5579e1..198fad2806 100644 --- a/tests/functional/raw-node/test/xAmzChecksum.js +++ b/tests/functional/raw-node/test/xAmzChecksum.js @@ -23,13 +23,11 @@ describe('Test x-amz-checksums', () => { validWrong: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=' }, ]; + // CompleteMultipartUpload intentionally not listed here: its + // x-amz-checksum- header is the expected final-object checksum, + // not a body digest, so it's not part of the buffered-body validator + // path tested below. const methods = [ - { - Name: 'CompleteMultipartupload', - Query: 'uploadId=77a4ce46b9bf4ea69d9e0cc3f0bb1aae', - Key: objectKey, - HTTPMethod: 'POST', - }, { Name: 'DeleteObjects', Query: 'delete', From 1cb739f827121b37a7eb97c4577c50512ea15ce3 Mon Sep 17 00:00:00 2001 From: Leif Henriksen Date: Mon, 11 May 2026 17:27:01 +0200 Subject: [PATCH 10/12] CLDSRV-898: fix lint issues --- lib/api/apiUtils/integrity/crcCombine.js | 26 +- .../apiUtils/integrity/validateChecksums.js | 111 +- lib/api/apiUtils/object/objectAttributes.js | 6 +- lib/api/completeMultipartUpload.js | 1284 ++++++++++------- lib/api/listParts.js | 400 ++--- .../test/object/completeMpuChecksum.js | 181 ++- .../aws-node-sdk/test/object/mpuVersion.js | 474 +++--- .../test/object/objectGetAttributes.js | 637 ++++---- .../functional/raw-node/test/xAmzChecksum.js | 68 +- .../apiUtils/integrity/computeMpuChecksums.js | 15 +- .../unit/api/apiUtils/integrity/crcCombine.js | 37 +- .../api/apiUtils/object/objectAttributes.js | 17 +- tests/unit/api/completeMultipartUpload.js | 688 ++++----- tests/unit/api/objectGetAttributes.js | 47 +- 14 files changed, 2269 insertions(+), 1722 deletions(-) diff --git a/lib/api/apiUtils/integrity/crcCombine.js b/lib/api/apiUtils/integrity/crcCombine.js index 0a616be12b..9dc43b0da1 100644 --- a/lib/api/apiUtils/integrity/crcCombine.js +++ b/lib/api/apiUtils/integrity/crcCombine.js @@ -36,8 +36,13 @@ function gf2MatrixTimes(mat, vecLo, vecHi) { function gf2MatrixSquare(square, mat, dim) { for (let n = 0; n < dim; n += 1) { const r = gf2MatrixTimes(mat, mat[2 * n], mat[2 * n + 1]); + // In-place mutation of the caller's scratch buffer is intentional — + // the callers (crcCombine, ensureChainLen) own `square` and re-use + // it across iterations to avoid re-allocating per squaring step. + /* eslint-disable no-param-reassign */ square[2 * n] = r[0]; square[2 * n + 1] = r[1]; + /* eslint-enable no-param-reassign */ } } @@ -56,8 +61,8 @@ function getOrInitChain(polyReversed, dim) { // M^1: one-zero-bit operator. Column 0 is the polynomial; column k>0 is // 1 << (k - 1) — what right-shifting a state with bit k set produces. const m1 = new Uint32Array(2 * dim); - m1[0] = Number(polyReversed & 0xFFFFFFFFn); - m1[1] = Number((polyReversed >> 32n) & 0xFFFFFFFFn); + m1[0] = Number(polyReversed & 0xffffffffn); + m1[1] = Number((polyReversed >> 32n) & 0xffffffffn); for (let k = 1; k < dim; k += 1) { const bit = k - 1; if (bit < 32) { @@ -113,8 +118,8 @@ function crcCombine(crc1, crc2, len2, polyReversed, dim) { const state = getOrInitChain(polyReversed, dim); - let cLo = Number(crc1 & 0xFFFFFFFFn); - let cHi = Number((crc1 >> 32n) & 0xFFFFFFFFn); + let cLo = Number(crc1 & 0xffffffffn); + let cHi = Number((crc1 >> 32n) & 0xffffffffn); // Walk the bits of len2 (each bit represents a power-of-two number of // zero bytes to prepend); apply the cached operator for every set bit. @@ -131,8 +136,8 @@ function crcCombine(crc1, crc2, len2, polyReversed, dim) { j += 1; } - const c2Lo = Number(crc2 & 0xFFFFFFFFn); - const c2Hi = Number((crc2 >> 32n) & 0xFFFFFFFFn); + const c2Lo = Number(crc2 & 0xffffffffn); + const c2Hi = Number((crc2 >> 32n) & 0xffffffffn); cLo = (cLo ^ c2Lo) >>> 0; cHi = (cHi ^ c2Hi) >>> 0; @@ -153,7 +158,7 @@ function bigIntToBase64(value, dim) { const buf = Buffer.alloc(nBytes); let v = value; for (let i = nBytes - 1; i >= 0; i -= 1) { - buf[i] = Number(v & 0xFFn); + buf[i] = Number(v & 0xffn); v >>= 8n; } return buf.toString('base64'); @@ -172,12 +177,7 @@ function bigIntToBase64(value, dim) { function combineCrcs(parts, polyReversed, dim) { let combined = base64ToBigInt(parts[0].value); for (let i = 1; i < parts.length; i += 1) { - combined = crcCombine( - combined, - base64ToBigInt(parts[i].value), - BigInt(parts[i].length), - polyReversed, - dim); + combined = crcCombine(combined, base64ToBigInt(parts[i].value), BigInt(parts[i].length), polyReversed, dim); } return bigIntToBase64(combined, dim); } diff --git a/lib/api/apiUtils/integrity/validateChecksums.js b/lib/api/apiUtils/integrity/validateChecksums.js index aff42d1f17..7fb987ab18 100644 --- a/lib/api/apiUtils/integrity/validateChecksums.js +++ b/lib/api/apiUtils/integrity/validateChecksums.js @@ -6,54 +6,61 @@ const { errors: ArsenalErrors, errorInstances } = require('arsenal'); const { config } = require('../../../Config'); const { combineCrcs } = require('./crcCombine'); -const defaultChecksumData = Object.freeze( - { algorithm: 'crc64nvme', isTrailer: false, expected: undefined }); +const defaultChecksumData = Object.freeze({ algorithm: 'crc64nvme', isTrailer: false, expected: undefined }); const errAlgoNotSupported = errorInstances.InvalidRequest.customizeDescription( - 'The algorithm type you specified in x-amz-checksum- header is invalid.'); + 'The algorithm type you specified in x-amz-checksum- header is invalid.', +); const errAlgoNotSupportedSDK = errorInstances.InvalidRequest.customizeDescription( - 'Value for x-amz-sdk-checksum-algorithm header is invalid.'); + 'Value for x-amz-sdk-checksum-algorithm header is invalid.', +); const errMissingCorresponding = errorInstances.InvalidRequest.customizeDescription( 'x-amz-sdk-checksum-algorithm specified, but no corresponding x-amz-checksum-* ' + - 'or x-amz-trailer headers were found.'); + 'or x-amz-trailer headers were found.', +); const errMultipleChecksumTypes = errorInstances.InvalidRequest.customizeDescription( - 'Expecting a single x-amz-checksum- header. Multiple checksum Types are not allowed.'); + 'Expecting a single x-amz-checksum- header. Multiple checksum Types are not allowed.', +); const errTrailerAndChecksum = errorInstances.InvalidRequest.customizeDescription( - 'Expecting a single x-amz-checksum- header'); + 'Expecting a single x-amz-checksum- header', +); const errTrailerNotSupported = errorInstances.InvalidRequest.customizeDescription( - 'The value specified in the x-amz-trailer header is not supported'); + 'The value specified in the x-amz-trailer header is not supported', +); const errMPUAlgoNotSupported = errorInstances.InvalidRequest.customizeDescription( 'Checksum algorithm provided is unsupported. ' + - 'Please try again with any of the valid types: ' + - '[CRC32, CRC32C, CRC64NVME, SHA1, SHA256]'); + 'Please try again with any of the valid types: ' + + '[CRC32, CRC32C, CRC64NVME, SHA1, SHA256]', +); const errMPUTypeInvalid = errorInstances.InvalidRequest.customizeDescription( - 'Value for x-amz-checksum-type header is invalid.'); + 'Value for x-amz-checksum-type header is invalid.', +); const errMPUTypeWithoutAlgo = errorInstances.InvalidRequest.customizeDescription( - 'The x-amz-checksum-type header can only be used ' + - 'with the x-amz-checksum-algorithm header.'); + 'The x-amz-checksum-type header can only be used ' + 'with the x-amz-checksum-algorithm header.', +); const checksumedMethods = Object.freeze({ // CompleteMPU's x-amz-checksum- is the final-object checksum, // not a body digest. Validated in completeMultipartUpload.js instead. // 'completeMultipartUpload': true, - 'multiObjectDelete': true, - 'bucketPutACL': true, - 'bucketPutCors': true, - 'bucketPutEncryption': true, - 'bucketPutLifecycle': true, - 'bucketPutLogging': true, - 'bucketPutNotification': true, - 'bucketPutPolicy': true, - 'bucketPutReplication': true, - 'bucketPutTagging': true, - 'bucketPutVersioning': true, - 'bucketPutWebsite': true, - 'objectPutACL': true, - 'objectPutLegalHold': true, - 'bucketPutObjectLock': true, // PutObjectLockConfiguration - 'objectPutRetention': true, - 'objectPutTagging': true, - 'objectRestore': true, + multiObjectDelete: true, + bucketPutACL: true, + bucketPutCors: true, + bucketPutEncryption: true, + bucketPutLifecycle: true, + bucketPutLogging: true, + bucketPutNotification: true, + bucketPutPolicy: true, + bucketPutReplication: true, + bucketPutTagging: true, + bucketPutVersioning: true, + bucketPutWebsite: true, + objectPutACL: true, + objectPutLegalHold: true, + bucketPutObjectLock: true, // PutObjectLockConfiguration + objectPutRetention: true, + objectPutTagging: true, + objectRestore: true, }); const ChecksumError = Object.freeze({ @@ -101,7 +108,7 @@ const algorithms = Object.freeze({ return Buffer.from(result).toString('base64'); }, isValidDigest: expected => typeof expected === 'string' && expected.length === 12 && base64Regex.test(expected), - createHash: () => new CrtCrc64Nvme() + createHash: () => new CrtCrc64Nvme(), }, crc32: { xmlTag: 'ChecksumCRC32', @@ -114,7 +121,7 @@ const algorithms = Object.freeze({ return uint32ToBase64(result >>> 0); }, isValidDigest: expected => typeof expected === 'string' && expected.length === 8 && base64Regex.test(expected), - createHash: () => new Crc32() + createHash: () => new Crc32(), }, crc32c: { xmlTag: 'ChecksumCRC32C', @@ -124,7 +131,7 @@ const algorithms = Object.freeze({ }, digestFromHash: hash => uint32ToBase64(hash.digest() >>> 0), isValidDigest: expected => typeof expected === 'string' && expected.length === 8 && base64Regex.test(expected), - createHash: () => new Crc32c() + createHash: () => new Crc32c(), }, sha1: { xmlTag: 'ChecksumSHA1', @@ -134,7 +141,7 @@ const algorithms = Object.freeze({ }, digestFromHash: hash => hash.digest('base64'), isValidDigest: expected => typeof expected === 'string' && expected.length === 28 && base64Regex.test(expected), - createHash: () => crypto.createHash('sha1') + createHash: () => crypto.createHash('sha1'), }, sha256: { xmlTag: 'ChecksumSHA256', @@ -144,8 +151,8 @@ const algorithms = Object.freeze({ }, digestFromHash: hash => hash.digest('base64'), isValidDigest: expected => typeof expected === 'string' && expected.length === 44 && base64Regex.test(expected), - createHash: () => crypto.createHash('sha256') - } + createHash: () => crypto.createHash('sha256'), + }, }); /** @@ -172,7 +179,7 @@ async function validateXAmzChecksums(headers, body) { if (xAmzChecksumCnt === 0 && 'x-amz-sdk-checksum-algorithm' in headers) { return { error: ChecksumError.MissingCorresponding, - details: { expected: headers['x-amz-sdk-checksum-algorithm'] } + details: { expected: headers['x-amz-sdk-checksum-algorithm'] }, }; } else if (xAmzChecksumCnt === 0) { return { error: ChecksumError.MissingChecksum, details: null }; @@ -181,7 +188,7 @@ async function validateXAmzChecksums(headers, body) { // No x-amz-sdk-checksum-algorithm we expect one x-amz-checksum-[crc64nvme, crc32, crc32C, sha1, sha256]. const algo = checksumHeaders[0].slice('x-amz-checksum-'.length); if (!(algo in algorithms)) { - return { error: ChecksumError.AlgoNotSupported, details: { algorithm: algo } };; + return { error: ChecksumError.AlgoNotSupported, details: { algorithm: algo } }; } const expected = headers[`x-amz-checksum-${algo}`]; @@ -271,7 +278,7 @@ function getChecksumDataFromHeaders(headers) { if (checksumHeader === undefined && !('x-amz-trailer' in headers) && 'x-amz-sdk-checksum-algorithm' in headers) { return { error: ChecksumError.MissingCorresponding, - details: { expected: headers['x-amz-sdk-checksum-algorithm'] } + details: { expected: headers['x-amz-sdk-checksum-algorithm'] }, }; } @@ -374,7 +381,8 @@ function arsenalErrorFromChecksumError(err) { case ChecksumError.XAmzMismatch: { const algoUpper = err.details.algorithm.toUpperCase(); return errorInstances.BadDigest.customizeDescription( - `The ${algoUpper} you specified did not match the calculated checksum.`); + `The ${algoUpper} you specified did not match the calculated checksum.`, + ); } case ChecksumError.AlgoNotSupported: return errAlgoNotSupported; @@ -386,7 +394,8 @@ function arsenalErrorFromChecksumError(err) { return errMultipleChecksumTypes; case ChecksumError.MalformedChecksum: return errorInstances.InvalidRequest.customizeDescription( - `Value for x-amz-checksum-${err.details.algorithm} header is invalid.`); + `Value for x-amz-checksum-${err.details.algorithm} header is invalid.`, + ); case ChecksumError.MD5Invalid: return ArsenalErrors.InvalidDigest; case ChecksumError.TrailerAlgoMismatch: @@ -397,7 +406,8 @@ function arsenalErrorFromChecksumError(err) { return ArsenalErrors.MalformedTrailerError; case ChecksumError.TrailerChecksumMalformed: return errorInstances.InvalidRequest.customizeDescription( - `Value for x-amz-checksum-${err.details.algorithm} trailing header is invalid.`); + `Value for x-amz-checksum-${err.details.algorithm} trailing header is invalid.`, + ); case ChecksumError.TrailerAndChecksum: return errTrailerAndChecksum; case ChecksumError.TrailerNotSupported: @@ -411,7 +421,8 @@ function arsenalErrorFromChecksumError(err) { case ChecksumError.MPUInvalidCombination: return errorInstances.InvalidRequest.customizeDescription( `The ${err.details.type} checksum type cannot be used ` + - `with the ${err.details.algorithm.toUpperCase()} checksum algorithm.`); + `with the ${err.details.algorithm.toUpperCase()} checksum algorithm.`, + ); default: return ArsenalErrors.BadDigest; } @@ -503,8 +514,10 @@ function getChecksumDataFromMPUHeaders(headers) { } // Validate algorithm + type combination - if ((type === 'FULL_OBJECT' && !fullObjectAlgorithms.has(algo)) || - (type === 'COMPOSITE' && !compositeAlgorithms.has(algo))) { + if ( + (type === 'FULL_OBJECT' && !fullObjectAlgorithms.has(algo)) || + (type === 'COMPOSITE' && !compositeAlgorithms.has(algo)) + ) { return { error: ChecksumError.MPUInvalidCombination, details: { algorithm: algo, type } }; } @@ -534,9 +547,9 @@ function getChecksumDataFromMPUHeaders(headers) { // Bit-reversed polynomials used by the right-shift CRC implementations that // the @aws-crypto/* and @aws-sdk/crc64-nvme-crt packages produce. const FULL_OBJECT_POLYS = Object.freeze({ - crc32: { polyReversed: 0xEDB88320n, dim: 32 }, - crc32c: { polyReversed: 0x82F63B78n, dim: 32 }, - crc64nvme: { polyReversed: 0x9A6C9329AC4BC9B5n, dim: 64 }, + crc32: { polyReversed: 0xedb88320n, dim: 32 }, + crc32c: { polyReversed: 0x82f63b78n, dim: 32 }, + crc64nvme: { polyReversed: 0x9a6c9329ac4bc9b5n, dim: 64 }, }); // Algorithms whose digest is synchronous, which is the full set AWS allows diff --git a/lib/api/apiUtils/object/objectAttributes.js b/lib/api/apiUtils/object/objectAttributes.js index 052cd12a59..2582ca9f25 100644 --- a/lib/api/apiUtils/object/objectAttributes.js +++ b/lib/api/apiUtils/object/objectAttributes.js @@ -69,11 +69,7 @@ function buildAttributesXml(objectMD, userMetadata, requestedAttrs, xml, log) { case 'ObjectParts': { const partCount = getPartCountFromMd5(objectMD); if (partCount) { - xml.push( - '', - `${partCount}`, - '', - ); + xml.push('', `${partCount}`, ''); } break; } diff --git a/lib/api/completeMultipartUpload.js b/lib/api/completeMultipartUpload.js index 259bd3fba8..fe922f47cc 100644 --- a/lib/api/completeMultipartUpload.js +++ b/lib/api/completeMultipartUpload.js @@ -10,17 +10,18 @@ const { data } = require('../data/wrapper'); const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const constants = require('../../constants'); const { config } = require('../Config'); -const { versioningPreprocessing, checkQueryVersionId, decodeVID, overwritingVersioning } - = require('./apiUtils/object/versioning'); +const { + versioningPreprocessing, + checkQueryVersionId, + decodeVID, + overwritingVersioning, +} = require('./apiUtils/object/versioning'); const services = require('../services'); const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils'); -const locationConstraintCheck - = require('./apiUtils/object/locationConstraintCheck'); +const locationConstraintCheck = require('./apiUtils/object/locationConstraintCheck'); const { skipMpuPartProcessing } = storage.data.external.backendUtils; -const { validateAndFilterMpuParts, generateMpuPartStorageInfo } = - s3middleware.processMpuParts; -const locationKeysHaveChanged - = require('./apiUtils/object/locationKeysHaveChanged'); +const { validateAndFilterMpuParts, generateMpuPartStorageInfo } = s3middleware.processMpuParts; +const locationKeysHaveChanged = require('./apiUtils/object/locationKeysHaveChanged'); const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders'); const { validatePutVersionId } = require('./apiUtils/object/coldStorage'); const { validateQuotas } = require('./apiUtils/quotas/quotaUtils'); @@ -36,8 +37,7 @@ const versionIdUtils = versioning.VersionID; let splitter = constants.splitter; const REPLICATION_ACTION = 'MPU'; -const allChecksumXmlTags = Object.values(checksumAlgorithms) - .map(algo => algo.xmlTag); +const allChecksumXmlTags = Object.values(checksumAlgorithms).map(algo => algo.xmlTag); /** * Validate per-part checksums in a CompleteMultipartUpload request body @@ -62,9 +62,7 @@ function validatePerPartChecksums(jsonList, storedParts, mpuSplitter, mpuChecksu const mpuAlgo = mpuChecksum.algorithm; const expectedTag = mpuAlgo && checksumAlgorithms[mpuAlgo] ? checksumAlgorithms[mpuAlgo].xmlTag : null; // Skip enforcement if the MPU's algorithm is unknown (shouldn't happen). - const requireForEachPart = mpuChecksum.type === 'COMPOSITE' - && !mpuChecksum.isDefault - && expectedTag !== null; + const requireForEachPart = mpuChecksum.type === 'COMPOSITE' && !mpuChecksum.isDefault && expectedTag !== null; const storedByPartNumber = new Map(); storedParts.forEach(item => { @@ -83,8 +81,8 @@ function validatePerPartChecksums(jsonList, storedParts, mpuSplitter, mpuChecksu if (tag !== expectedTag) { const algoLabel = tag.replace(/^Checksum/, '').toLowerCase(); return errorInstances.BadDigest.customizeDescription( - `The ${algoLabel} you specified for part ${partNumber} ` + - 'did not match what we received.'); + `The ${algoLabel} you specified for part ${partNumber} ` + 'did not match what we received.', + ); } } @@ -95,14 +93,16 @@ function validatePerPartChecksums(jsonList, storedParts, mpuSplitter, mpuChecksu if (!storedValue || providedValue !== storedValue) { return errorInstances.InvalidPart.customizeDescription( 'One or more of the specified parts could not be found. ' + - 'The part may not have been uploaded, or the specified ' + - 'entity tag may not match the part\'s entity tag.'); + 'The part may not have been uploaded, or the specified ' + + "entity tag may not match the part's entity tag.", + ); } } else if (requireForEachPart) { return errorInstances.InvalidRequest.customizeDescription( `The upload was created using a ${mpuAlgo} checksum. ` + - 'The complete request must include the checksum for each ' + - `part. It was missing for part ${partNumber} in the request.`); + 'The complete request must include the checksum for each ' + + `part. It was missing for part ${partNumber} in the request.`, + ); } } return null; @@ -148,8 +148,7 @@ function computeFinalChecksum(storedParts, filteredPartList, storedMetadata, mpu } if (missingPartNumbers.length > 0) { - log.error('one or more MPU parts missing checksum value; ' + - 'skipping final-object checksum computation', { + log.error('one or more MPU parts missing checksum value; ' + 'skipping final-object checksum computation', { uploadId, algorithm, type, @@ -160,12 +159,16 @@ function computeFinalChecksum(storedParts, filteredPartList, storedMetadata, mpu let result; if (type === 'COMPOSITE') { - result = computeCompositeMPUChecksum(algorithm, partInputs.map(p => p.value)); + result = computeCompositeMPUChecksum( + algorithm, + partInputs.map(p => p.value), + ); } else if (type === 'FULL_OBJECT') { result = computeFullObjectMPUChecksum(algorithm, partInputs); } else { log.error('unknown MPU checksumType; skipping final-object checksum computation', { - uploadId, checksumType: type, + uploadId, + checksumType: type, }); return null; } @@ -210,8 +213,8 @@ function validateExpectedFinalChecksum(headers, finalChecksum, uploadId, log) { algorithms: [foundAlgo, algo], }); return errorInstances.InvalidRequest.customizeDescription( - 'Expecting a single x-amz-checksum- header. ' + - 'Multiple checksum Types are not allowed.'); + 'Expecting a single x-amz-checksum- header. ' + 'Multiple checksum Types are not allowed.', + ); } foundAlgo = algo; foundValue = headers[headerName]; @@ -231,8 +234,8 @@ function validateExpectedFinalChecksum(headers, finalChecksum, uploadId, log) { computed: finalChecksum && finalChecksum.value, }); return errorInstances.BadDigest.customizeDescription( - `The ${foundAlgo.toUpperCase()} you specified did not ` + - 'match the calculated checksum.'); + `The ${foundAlgo.toUpperCase()} you specified did not ` + 'match the calculated checksum.', + ); } return null; @@ -256,8 +259,7 @@ function validateExpectedFinalChecksum(headers, finalChecksum, uploadId, log) { */ - - /* +/* Format of xml response: { - if (err || !result || !result.CompleteMultipartUpload - || !result.CompleteMultipartUpload.Part) { + if (err || !result || !result.CompleteMultipartUpload || !result.CompleteMultipartUpload.Part) { return next(errors.MalformedXML); } const jsonList = result.CompleteMultipartUpload; @@ -334,113 +335,149 @@ function completeMultipartUpload(authInfo, request, log, callback) { }); } - return async.waterfall([ - function validateDestBucket(next) { - const metadataValParams = { - objectKey, - authInfo, - bucketName, - // Required permissions for this action - // at the destinationBucket level are same as objectPut - requestType: request.apiMethods || 'completeMultipartUpload', - versionId, - request, - }; - standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log, next); - }, - function validateMultipart(destBucket, objMD, next) { - if (objMD) { - oldByteLength = objMD['content-length']; - } - - if (isPutVersion) { - const error = validatePutVersionId(objMD, putVersionId, log); - if (error) { - return next(error, destBucket); + return async.waterfall( + [ + function validateDestBucket(next) { + const metadataValParams = { + objectKey, + authInfo, + bucketName, + // Required permissions for this action + // at the destinationBucket level are same as objectPut + requestType: request.apiMethods || 'completeMultipartUpload', + versionId, + request, + }; + standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log, next); + }, + function validateMultipart(destBucket, objMD, next) { + if (objMD) { + oldByteLength = objMD['content-length']; } - } - return services.metadataValidateMultipart(metadataValParams, - (err, mpuBucket, mpuOverview, storedMetadata) => { - if (err) { - log.error('error validating request', { error: err }); - return next(err, destBucket); + if (isPutVersion) { + const error = validatePutVersionId(objMD, putVersionId, log); + if (error) { + return next(error, destBucket); } - // Validate x-amz-checksum-type header (if present) matches - // the checksum type the MPU was created with. - // x-amz-checksum-algorithm is not validated: AWS ignores - // a mismatch on this header for CompleteMultipartUpload. - const headerType = request.headers['x-amz-checksum-type']; - if (headerType) { - const headerTypeUpper = headerType.toUpperCase(); - if (headerTypeUpper !== 'COMPOSITE' && headerTypeUpper !== 'FULL_OBJECT') { - const typeErr = errorInstances.InvalidRequest - .customizeDescription( 'Value for x-amz-checksum-type header is invalid.'); - return next(typeErr, destBucket); + } + + return services.metadataValidateMultipart( + metadataValParams, + (err, mpuBucket, mpuOverview, storedMetadata) => { + if (err) { + log.error('error validating request', { error: err }); + return next(err, destBucket); } - const mpuType = storedMetadata.checksumType; - if (!mpuType || headerTypeUpper !== mpuType.toUpperCase()) { - const typeErr = errorInstances.InvalidRequest - .customizeDescription( + // Validate x-amz-checksum-type header (if present) matches + // the checksum type the MPU was created with. + // x-amz-checksum-algorithm is not validated: AWS ignores + // a mismatch on this header for CompleteMultipartUpload. + const headerType = request.headers['x-amz-checksum-type']; + if (headerType) { + const headerTypeUpper = headerType.toUpperCase(); + if (headerTypeUpper !== 'COMPOSITE' && headerTypeUpper !== 'FULL_OBJECT') { + const typeErr = errorInstances.InvalidRequest.customizeDescription( + 'Value for x-amz-checksum-type header is invalid.', + ); + return next(typeErr, destBucket); + } + const mpuType = storedMetadata.checksumType; + if (!mpuType || headerTypeUpper !== mpuType.toUpperCase()) { + const typeErr = errorInstances.InvalidRequest.customizeDescription( `The upload was created using the ${mpuType} ` + - 'checksum mode. The complete request must ' + - 'use the same checksum mode.'); - return next(typeErr, destBucket); + 'checksum mode. The complete request must ' + + 'use the same checksum mode.', + ); + return next(typeErr, destBucket); + } } - } - return next(null, destBucket, objMD, mpuBucket, - storedMetadata); - }); - }, - function parsePartsList(destBucket, objMD, mpuBucket, - storedMetadata, next) { - const location = storedMetadata.controllingLocationConstraint; - // BACKWARD: Remove to remove the old splitter - if (mpuBucket.getMdBucketModelVersion() < 2) { - splitter = constants.oldSplitter; - } - // Reconstruct mpuOverviewKey to point to metadata - // originally stored when mpu initiated - const mpuOverviewKey = - `overview${splitter}${objectKey}${splitter}${uploadId}`; - if (request.post) { - return parseXml(request.post, (err, jsonList) => { - if (err) { - log.error('error parsing XML', { error: err }); - return next(err, destBucket); - } - return next(null, destBucket, objMD, mpuBucket, - jsonList, storedMetadata, location, mpuOverviewKey); - }); - } - return next(errors.MalformedXML, destBucket); - }, - function markOverviewForCompletion(destBucket, objMD, mpuBucket, jsonList, - storedMetadata, location, mpuOverviewKey, next) { - return services.metadataMarkMPObjectForCompletion({ - bucketName: mpuBucket.getName(), - objectKey, - uploadId, - splitter, + return next(null, destBucket, objMD, mpuBucket, storedMetadata); + }, + ); + }, + function parsePartsList(destBucket, objMD, mpuBucket, storedMetadata, next) { + const location = storedMetadata.controllingLocationConstraint; + // BACKWARD: Remove to remove the old splitter + if (mpuBucket.getMdBucketModelVersion() < 2) { + splitter = constants.oldSplitter; + } + // Reconstruct mpuOverviewKey to point to metadata + // originally stored when mpu initiated + const mpuOverviewKey = `overview${splitter}${objectKey}${splitter}${uploadId}`; + if (request.post) { + return parseXml(request.post, (err, jsonList) => { + if (err) { + log.error('error parsing XML', { error: err }); + return next(err, destBucket); + } + return next( + null, + destBucket, + objMD, + mpuBucket, + jsonList, + storedMetadata, + location, + mpuOverviewKey, + ); + }); + } + return next(errors.MalformedXML, destBucket); + }, + function markOverviewForCompletion( + destBucket, + objMD, + mpuBucket, + jsonList, storedMetadata, - }, log, err => { - if (err) { - log.error('error marking MPU object for completion', { + location, + mpuOverviewKey, + next, + ) { + return services.metadataMarkMPObjectForCompletion( + { bucketName: mpuBucket.getName(), objectKey, uploadId, - error: err, - }); - return next(err); - } - return next(null, destBucket, objMD, mpuBucket, - jsonList, storedMetadata, location, mpuOverviewKey); - }); - }, - function retrieveParts(destBucket, objMD, mpuBucket, jsonList, - storedMetadata, location, mpuOverviewKey, next) { - return services.getMPUparts(mpuBucket.getName(), uploadId, log, - (err, result) => { + splitter, + storedMetadata, + }, + log, + err => { + if (err) { + log.error('error marking MPU object for completion', { + bucketName: mpuBucket.getName(), + objectKey, + uploadId, + error: err, + }); + return next(err); + } + return next( + null, + destBucket, + objMD, + mpuBucket, + jsonList, + storedMetadata, + location, + mpuOverviewKey, + ); + }, + ); + }, + function retrieveParts( + destBucket, + objMD, + mpuBucket, + jsonList, + storedMetadata, + location, + mpuOverviewKey, + next, + ) { + return services.getMPUparts(mpuBucket.getName(), uploadId, log, (err, result) => { if (err) { log.error('error getting parts', { error: err }); return next(err, destBucket); @@ -452,224 +489,380 @@ function completeMultipartUpload(authInfo, request, log, callback) { type: storedMetadata.checksumType, isDefault: storedMetadata.checksumIsDefault, }; - const checksumErr = validatePerPartChecksums( - jsonList, storedParts, splitter, mpuChecksum); + const checksumErr = validatePerPartChecksums(jsonList, storedParts, splitter, mpuChecksum); if (checksumErr) { log.debug('per-part checksum validation failed', { error: checksumErr, }); return next(checksumErr, destBucket); } - return next(null, destBucket, objMD, mpuBucket, storedParts, - jsonList, storedMetadata, location, mpuOverviewKey, totalMPUSize); + return next( + null, + destBucket, + objMD, + mpuBucket, + storedParts, + jsonList, + storedMetadata, + location, + mpuOverviewKey, + totalMPUSize, + ); }); - }, - function completeExternalMpu(destBucket, objMD, mpuBucket, storedParts, - jsonList, storedMetadata, location, mpuOverviewKey, totalMPUSize, next) { - const mdInfo = { storedParts, mpuOverviewKey, splitter }; - const mpuInfo = - { objectKey, uploadId, jsonList, bucketName, destBucket }; - const originalIdentityImpDenies = request.actionImplicitDenies; - // eslint-disable-next-line no-param-reassign - delete request.actionImplicitDenies; - return data.completeMPU(request, mpuInfo, mdInfo, location, - null, null, null, locationConstraintCheck, log, - (err, completeObjData) => { + }, + function completeExternalMpu( + destBucket, + objMD, + mpuBucket, + storedParts, + jsonList, + storedMetadata, + location, + mpuOverviewKey, + totalMPUSize, + next, + ) { + const mdInfo = { storedParts, mpuOverviewKey, splitter }; + const mpuInfo = { objectKey, uploadId, jsonList, bucketName, destBucket }; + const originalIdentityImpDenies = request.actionImplicitDenies; // eslint-disable-next-line no-param-reassign - request.actionImplicitDenies = originalIdentityImpDenies; - if (err) { - log.error('error completing MPU externally', { error: err }); - return next(err, destBucket); + delete request.actionImplicitDenies; + return data.completeMPU( + request, + mpuInfo, + mdInfo, + location, + null, + null, + null, + locationConstraintCheck, + log, + (err, completeObjData) => { + // eslint-disable-next-line no-param-reassign + request.actionImplicitDenies = originalIdentityImpDenies; + if (err) { + log.error('error completing MPU externally', { error: err }); + return next(err, destBucket); + } + // if mpu not handled externally, completeObjData will be null + return next( + null, + destBucket, + objMD, + mpuBucket, + storedParts, + jsonList, + storedMetadata, + completeObjData, + mpuOverviewKey, + totalMPUSize, + ); + }, + ); + }, + function validateAndFilterParts( + destBucket, + objMD, + mpuBucket, + storedParts, + jsonList, + storedMetadata, + completeObjData, + mpuOverviewKey, + totalMPUSize, + next, + ) { + if (completeObjData) { + return next( + null, + destBucket, + objMD, + mpuBucket, + storedParts, + jsonList, + storedMetadata, + completeObjData, + mpuOverviewKey, + completeObjData.filteredPartsObj, + totalMPUSize, + ); } - // if mpu not handled externally, completeObjData will be null - return next(null, destBucket, objMD, mpuBucket, storedParts, - jsonList, storedMetadata, completeObjData, mpuOverviewKey, - totalMPUSize); - }); - }, - function validateAndFilterParts(destBucket, objMD, mpuBucket, - storedParts, jsonList, storedMetadata, completeObjData, mpuOverviewKey, - totalMPUSize, next) { - if (completeObjData) { - return next(null, destBucket, objMD, mpuBucket, storedParts, - jsonList, storedMetadata, completeObjData, mpuOverviewKey, - completeObjData.filteredPartsObj, totalMPUSize); - } - const filteredPartsObj = validateAndFilterMpuParts(storedParts, - jsonList, mpuOverviewKey, splitter, log); - if (filteredPartsObj.error) { - return next(filteredPartsObj.error, destBucket); - } - return next(null, destBucket, objMD, mpuBucket, storedParts, - jsonList, storedMetadata, completeObjData, mpuOverviewKey, - filteredPartsObj, totalMPUSize); - }, - function processParts(destBucket, objMD, mpuBucket, storedParts, - jsonList, storedMetadata, completeObjData, mpuOverviewKey, - filteredPartsObj, totalMPUSize, next) { - // External-handled MPUs (ingestion / external backends) come in - // with completeObjData set and no filteredPartsObj — the data - // store already aggregated the parts, and we have no per-part - // info to feed the compute step. Skip in that case. - if (filteredPartsObj) { - finalChecksum = computeFinalChecksum(storedParts, filteredPartsObj.partList, storedMetadata, - splitter, uploadId, log); - const expectedErr = validateExpectedFinalChecksum(request.headers, finalChecksum, uploadId, log); - if (expectedErr) { - return next(expectedErr, destBucket); + const filteredPartsObj = validateAndFilterMpuParts( + storedParts, + jsonList, + mpuOverviewKey, + splitter, + log, + ); + if (filteredPartsObj.error) { + return next(filteredPartsObj.error, destBucket); + } + return next( + null, + destBucket, + objMD, + mpuBucket, + storedParts, + jsonList, + storedMetadata, + completeObjData, + mpuOverviewKey, + filteredPartsObj, + totalMPUSize, + ); + }, + function processParts( + destBucket, + objMD, + mpuBucket, + storedParts, + jsonList, + storedMetadata, + completeObjData, + mpuOverviewKey, + filteredPartsObj, + totalMPUSize, + next, + ) { + // External-handled MPUs (ingestion / external backends) come in + // with completeObjData set and no filteredPartsObj — the data + // store already aggregated the parts, and we have no per-part + // info to feed the compute step. Skip in that case. + if (filteredPartsObj) { + finalChecksum = computeFinalChecksum( + storedParts, + filteredPartsObj.partList, + storedMetadata, + splitter, + uploadId, + log, + ); + const expectedErr = validateExpectedFinalChecksum(request.headers, finalChecksum, uploadId, log); + if (expectedErr) { + return next(expectedErr, destBucket); + } + } + // if mpu was completed on backend that stored mpu MD externally, + // skip MD processing steps + if (completeObjData && skipMpuPartProcessing(completeObjData)) { + const dataLocations = [ + { + key: completeObjData.key, + size: completeObjData.contentLength, + start: 0, + dataStoreVersionId: completeObjData.dataStoreVersionId, + dataStoreName: storedMetadata.dataStoreName, + dataStoreETag: completeObjData.eTag, + dataStoreType: completeObjData.dataStoreType, + }, + ]; + const calculatedSize = completeObjData.contentLength; + return next( + null, + destBucket, + objMD, + mpuBucket, + storedMetadata, + completeObjData.eTag, + calculatedSize, + dataLocations, + [mpuOverviewKey], + null, + completeObjData, + totalMPUSize, + ); } - } - // if mpu was completed on backend that stored mpu MD externally, - // skip MD processing steps - if (completeObjData && skipMpuPartProcessing(completeObjData)) { - const dataLocations = [ - { - key: completeObjData.key, - size: completeObjData.contentLength, - start: 0, - dataStoreVersionId: completeObjData.dataStoreVersionId, - dataStoreName: storedMetadata.dataStoreName, - dataStoreETag: completeObjData.eTag, - dataStoreType: completeObjData.dataStoreType, - }, - ]; - const calculatedSize = completeObjData.contentLength; - return next(null, destBucket, objMD, mpuBucket, storedMetadata, - completeObjData.eTag, calculatedSize, dataLocations, - [mpuOverviewKey], null, completeObjData, totalMPUSize); - } - const partsInfo = - generateMpuPartStorageInfo(filteredPartsObj.partList); - if (partsInfo.error) { - return next(partsInfo.error, destBucket); - } - const { keysToDelete, extraPartLocations } = filteredPartsObj; - const { aggregateETag, dataLocations, calculatedSize } = partsInfo; + const partsInfo = generateMpuPartStorageInfo(filteredPartsObj.partList); + if (partsInfo.error) { + return next(partsInfo.error, destBucket); + } + const { keysToDelete, extraPartLocations } = filteredPartsObj; + const { aggregateETag, dataLocations, calculatedSize } = partsInfo; - if (completeObjData) { - const dataLocations = [ - { - key: completeObjData.key, - size: calculatedSize, - start: 0, - dataStoreName: storedMetadata.dataStoreName, - dataStoreETag: aggregateETag, - dataStoreType: completeObjData.dataStoreType, - }, + if (completeObjData) { + const dataLocations = [ + { + key: completeObjData.key, + size: calculatedSize, + start: 0, + dataStoreName: storedMetadata.dataStoreName, + dataStoreETag: aggregateETag, + dataStoreType: completeObjData.dataStoreType, + }, + ]; + return next( + null, + destBucket, + objMD, + mpuBucket, + storedMetadata, + aggregateETag, + calculatedSize, + dataLocations, + keysToDelete, + extraPartLocations, + completeObjData, + totalMPUSize, + ); + } + return next( + null, + destBucket, + objMD, + mpuBucket, + storedMetadata, + aggregateETag, + calculatedSize, + dataLocations, + keysToDelete, + extraPartLocations, + null, + totalMPUSize, + ); + }, + function prepForStoring( + destBucket, + objMD, + mpuBucket, + storedMetadata, + aggregateETag, + calculatedSize, + dataLocations, + keysToDelete, + extraPartLocations, + completeObjData, + totalMPUSize, + next, + ) { + // Store full object size for server access logs + if (request.serverAccessLog) { + // eslint-disable-next-line no-param-reassign + request.serverAccessLog.objectSize = calculatedSize; + } + const metaHeaders = {}; + const keysNotNeeded = [ + 'initiator', + 'partLocations', + 'key', + 'initiated', + 'uploadId', + 'content-type', + 'expires', + 'eventualStorageBucket', + 'dataStoreName', + 'checksumAlgorithm', + 'checksumType', + 'checksumIsDefault', ]; - return next(null, destBucket, objMD, mpuBucket, storedMetadata, - aggregateETag, calculatedSize, dataLocations, keysToDelete, - extraPartLocations, completeObjData, totalMPUSize); - } - return next(null, destBucket, objMD, mpuBucket, storedMetadata, - aggregateETag, calculatedSize, dataLocations, keysToDelete, - extraPartLocations, null, totalMPUSize); - }, - function prepForStoring(destBucket, objMD, mpuBucket, storedMetadata, - aggregateETag, calculatedSize, dataLocations, keysToDelete, - extraPartLocations, completeObjData, totalMPUSize, next) { - // Store full object size for server access logs - if (request.serverAccessLog) { - // eslint-disable-next-line no-param-reassign - request.serverAccessLog.objectSize = calculatedSize; - } - const metaHeaders = {}; - const keysNotNeeded = - ['initiator', 'partLocations', 'key', - 'initiated', 'uploadId', 'content-type', 'expires', - 'eventualStorageBucket', 'dataStoreName', - 'checksumAlgorithm', 'checksumType', 'checksumIsDefault']; - const metadataKeysToPull = - Object.keys(storedMetadata).filter(item => - keysNotNeeded.indexOf(item) === -1); - metadataKeysToPull.forEach(item => { - metaHeaders[item] = storedMetadata[item]; - }); + const metadataKeysToPull = Object.keys(storedMetadata).filter( + item => keysNotNeeded.indexOf(item) === -1, + ); + metadataKeysToPull.forEach(item => { + metaHeaders[item] = storedMetadata[item]; + }); - const droppedMPUSize = totalMPUSize - calculatedSize; + const droppedMPUSize = totalMPUSize - calculatedSize; - const metaStoreParams = { - authInfo, - objectKey, - metaHeaders, - uploadId, - dataStoreName: storedMetadata.dataStoreName, - contentType: storedMetadata['content-type'], - cacheControl: storedMetadata['cache-control'], - contentDisposition: storedMetadata['content-disposition'], - contentEncoding: storedMetadata['content-encoding'], - expires: storedMetadata.expires, - contentMD5: aggregateETag, - size: calculatedSize, - multipart: true, - isDeleteMarker: false, - replicationInfo: getReplicationInfo(config, - objectKey, destBucket, false, calculatedSize, REPLICATION_ACTION), - originOp: 's3:ObjectCreated:CompleteMultipartUpload', - overheadField: constants.overheadField, - log, - }; - // Persist FULL_OBJECT final-object checksum on the new ObjectMD. - // COMPOSITE is intentionally skipped to prevent metadata bloat, - // to be done in S3C-10399. - if (finalChecksum && finalChecksum.type === 'FULL_OBJECT') { - metaStoreParams.checksum = finalChecksum; - } - // If key already exists - if (objMD) { - // Re-use creation-time if we can - if (objMD['creation-time']) { - metaStoreParams.creationTime = objMD['creation-time']; - // Otherwise fallback to last-modified + const metaStoreParams = { + authInfo, + objectKey, + metaHeaders, + uploadId, + dataStoreName: storedMetadata.dataStoreName, + contentType: storedMetadata['content-type'], + cacheControl: storedMetadata['cache-control'], + contentDisposition: storedMetadata['content-disposition'], + contentEncoding: storedMetadata['content-encoding'], + expires: storedMetadata.expires, + contentMD5: aggregateETag, + size: calculatedSize, + multipart: true, + isDeleteMarker: false, + replicationInfo: getReplicationInfo( + config, + objectKey, + destBucket, + false, + calculatedSize, + REPLICATION_ACTION, + ), + originOp: 's3:ObjectCreated:CompleteMultipartUpload', + overheadField: constants.overheadField, + log, + }; + // Persist FULL_OBJECT final-object checksum on the new ObjectMD. + // COMPOSITE is intentionally skipped to prevent metadata bloat, + // to be done in S3C-10399. + if (finalChecksum && finalChecksum.type === 'FULL_OBJECT') { + metaStoreParams.checksum = finalChecksum; + } + // If key already exists + if (objMD) { + // Re-use creation-time if we can + if (objMD['creation-time']) { + metaStoreParams.creationTime = objMD['creation-time']; + // Otherwise fallback to last-modified + } else { + metaStoreParams.creationTime = objMD['last-modified']; + } + // If its a new key, create a new timestamp } else { - metaStoreParams.creationTime = objMD['last-modified']; + metaStoreParams.creationTime = new Date().toJSON(); + } + if (storedMetadata['x-amz-tagging']) { + metaStoreParams.tagging = storedMetadata['x-amz-tagging']; + } + if (storedMetadata.retentionMode && storedMetadata.retentionDate) { + metaStoreParams.retentionMode = storedMetadata.retentionMode; + metaStoreParams.retentionDate = storedMetadata.retentionDate; + } + if (storedMetadata.legalHold) { + metaStoreParams.legalHold = storedMetadata.legalHold; + } + const serverSideEncryption = storedMetadata['x-amz-server-side-encryption']; + let pseudoCipherBundle = null; + if (serverSideEncryption) { + const kmsKey = storedMetadata['x-amz-server-side-encryption-aws-kms-key-id']; + pseudoCipherBundle = { + algorithm: serverSideEncryption, + masterKeyId: kmsKey, + }; + setSSEHeaders(responseHeaders, serverSideEncryption, kmsKey); } - // If its a new key, create a new timestamp - } else { - metaStoreParams.creationTime = new Date().toJSON(); - } - if (storedMetadata['x-amz-tagging']) { - metaStoreParams.tagging = storedMetadata['x-amz-tagging']; - } - if (storedMetadata.retentionMode && storedMetadata.retentionDate) { - metaStoreParams.retentionMode = storedMetadata.retentionMode; - metaStoreParams.retentionDate = storedMetadata.retentionDate; - } - if (storedMetadata.legalHold) { - metaStoreParams.legalHold = storedMetadata.legalHold; - } - const serverSideEncryption = storedMetadata['x-amz-server-side-encryption']; - let pseudoCipherBundle = null; - if (serverSideEncryption) { - const kmsKey = storedMetadata['x-amz-server-side-encryption-aws-kms-key-id']; - pseudoCipherBundle = { - algorithm: serverSideEncryption, - masterKeyId: kmsKey, - }; - setSSEHeaders(responseHeaders, serverSideEncryption, kmsKey); - } - if (authInfo.getCanonicalID() !== destBucket.getOwner()) { - metaStoreParams.bucketOwnerId = destBucket.getOwner(); - } + if (authInfo.getCanonicalID() !== destBucket.getOwner()) { + metaStoreParams.bucketOwnerId = destBucket.getOwner(); + } - // if x-scal-s3-version-id header is specified, we overwrite the object/version metadata. - if (isPutVersion) { - const options = overwritingVersioning(objMD, metaStoreParams); - return process.nextTick(() => next(null, destBucket, dataLocations, - metaStoreParams, mpuBucket, keysToDelete, aggregateETag, - objMD, extraPartLocations, pseudoCipherBundle, - completeObjData, options, droppedMPUSize)); - } + // if x-scal-s3-version-id header is specified, we overwrite the object/version metadata. + if (isPutVersion) { + const options = overwritingVersioning(objMD, metaStoreParams); + return process.nextTick(() => + next( + null, + destBucket, + dataLocations, + metaStoreParams, + mpuBucket, + keysToDelete, + aggregateETag, + objMD, + extraPartLocations, + pseudoCipherBundle, + completeObjData, + options, + droppedMPUSize, + ), + ); + } - if (!destBucket.isVersioningEnabled() && objMD?.archive?.archiveInfo) { - // Ensure we trigger a "delete" event in the oplog for the previously archived object - metaStoreParams.needOplogUpdate = 's3:ReplaceArchivedObject'; - } + if (!destBucket.isVersioningEnabled() && objMD?.archive?.archiveInfo) { + // Ensure we trigger a "delete" event in the oplog for the previously archived object + metaStoreParams.needOplogUpdate = 's3:ReplaceArchivedObject'; + } - return versioningPreprocessing(bucketName, - destBucket, objectKey, objMD, log, (err, options) => { + return versioningPreprocessing(bucketName, destBucket, objectKey, objMD, log, (err, options) => { if (err) { // TODO: check AWS error when user requested a specific // version before any versions have been put @@ -692,214 +885,295 @@ function completeMultipartUpload(authInfo, request, log, callback) { } } - return next(null, destBucket, dataLocations, - metaStoreParams, mpuBucket, keysToDelete, aggregateETag, - objMD, extraPartLocations, pseudoCipherBundle, - completeObjData, options, droppedMPUSize); + return next( + null, + destBucket, + dataLocations, + metaStoreParams, + mpuBucket, + keysToDelete, + aggregateETag, + objMD, + extraPartLocations, + pseudoCipherBundle, + completeObjData, + options, + droppedMPUSize, + ); }); - }, - function storeAsNewObj(destinationBucket, dataLocations, - metaStoreParams, mpuBucket, keysToDelete, aggregateETag, objMD, - extraPartLocations, pseudoCipherBundle, - completeObjData, options, droppedMPUSize, next) { - const dataToDelete = options.dataToDelete; - /* eslint-disable no-param-reassign */ - metaStoreParams.versionId = options.versionId; - metaStoreParams.versioning = options.versioning; - metaStoreParams.isNull = options.isNull; - metaStoreParams.deleteNullKey = options.deleteNullKey; - if (options.extraMD) { - Object.assign(metaStoreParams, options.extraMD); - } - /* eslint-enable no-param-reassign */ - - // For external backends (where completeObjData is not - // null), the backend key does not change for new versions - // of the same object (or rewrites for nonversioned - // buckets), hence the deduplication sanity check does not - // make sense for external backends. - if (objMD && !completeObjData) { - // An object with the same key already exists, check - // if it has been created by the same MPU upload by - // checking if any of its internal location keys match - // the new keys. In such case, it must be a duplicate - // from a retry of a previous failed completion - // attempt, hence do the following: - // - // - skip writing the new metadata key to avoid - // creating a new version pointing to the same data - // keys - // - // - skip old data locations deletion since the old - // data location keys overlap the new ones (in - // principle they should be fully identical as there - // is no reuse of previous versions' data keys in - // the normal process) - note that the previous - // failed completion attempt may have left orphan - // data keys but we lost track of them so we cannot - // delete them now - // - // - proceed to the deletion of overview and part - // metadata keys, which are likely to have failed in - // the previous MPU completion attempt - // - if (!locationKeysHaveChanged(objMD.location, dataLocations)) { - log.info('MPU complete request replay detected', { - method: 'completeMultipartUpload.storeAsNewObj', - bucketName: destinationBucket.getName(), - objectKey: metaStoreParams.objectKey, - uploadId: metaStoreParams.uploadId, - }); - return next(null, mpuBucket, keysToDelete, aggregateETag, - extraPartLocations, destinationBucket, - // pass the original version ID as generatedVersionId - objMD.versionId, droppedMPUSize); + }, + function storeAsNewObj( + destinationBucket, + dataLocations, + metaStoreParams, + mpuBucket, + keysToDelete, + aggregateETag, + objMD, + extraPartLocations, + pseudoCipherBundle, + completeObjData, + options, + droppedMPUSize, + next, + ) { + const dataToDelete = options.dataToDelete; + /* eslint-disable no-param-reassign */ + metaStoreParams.versionId = options.versionId; + metaStoreParams.versioning = options.versioning; + metaStoreParams.isNull = options.isNull; + metaStoreParams.deleteNullKey = options.deleteNullKey; + if (options.extraMD) { + Object.assign(metaStoreParams, options.extraMD); } - } - return services.metadataStoreObject(destinationBucket.getName(), - dataLocations, pseudoCipherBundle, metaStoreParams, - (err, res) => { - if (err) { - log.error('error storing object metadata', { error: err }); - return next(err, destinationBucket); + /* eslint-enable no-param-reassign */ + + // For external backends (where completeObjData is not + // null), the backend key does not change for new versions + // of the same object (or rewrites for nonversioned + // buckets), hence the deduplication sanity check does not + // make sense for external backends. + if (objMD && !completeObjData) { + // An object with the same key already exists, check + // if it has been created by the same MPU upload by + // checking if any of its internal location keys match + // the new keys. In such case, it must be a duplicate + // from a retry of a previous failed completion + // attempt, hence do the following: + // + // - skip writing the new metadata key to avoid + // creating a new version pointing to the same data + // keys + // + // - skip old data locations deletion since the old + // data location keys overlap the new ones (in + // principle they should be fully identical as there + // is no reuse of previous versions' data keys in + // the normal process) - note that the previous + // failed completion attempt may have left orphan + // data keys but we lost track of them so we cannot + // delete them now + // + // - proceed to the deletion of overview and part + // metadata keys, which are likely to have failed in + // the previous MPU completion attempt + // + if (!locationKeysHaveChanged(objMD.location, dataLocations)) { + log.info('MPU complete request replay detected', { + method: 'completeMultipartUpload.storeAsNewObj', + bucketName: destinationBucket.getName(), + objectKey: metaStoreParams.objectKey, + uploadId: metaStoreParams.uploadId, + }); + return next( + null, + mpuBucket, + keysToDelete, + aggregateETag, + extraPartLocations, + destinationBucket, + // pass the original version ID as generatedVersionId + objMD.versionId, + droppedMPUSize, + ); } + } + return services.metadataStoreObject( + destinationBucket.getName(), + dataLocations, + pseudoCipherBundle, + metaStoreParams, + (err, res) => { + if (err) { + log.error('error storing object metadata', { error: err }); + return next(err, destinationBucket); + } - setExpirationHeaders(responseHeaders, { - lifecycleConfig: destinationBucket.getLifecycleConfiguration(), - objectParams: { - key: objectKey, - date: res.lastModified, - tags: res.tags, - }, - }); + setExpirationHeaders(responseHeaders, { + lifecycleConfig: destinationBucket.getLifecycleConfiguration(), + objectParams: { + key: objectKey, + date: res.lastModified, + tags: res.tags, + }, + }); - const generatedVersionId = res ? res.versionId : undefined; - // in cases where completing mpu overwrites a previous - // null version when versioning is suspended or versioning - // is not enabled, need to delete pre-existing data - // unless the preexisting object and the completed mpu - // are on external backends - if (dataToDelete) { - const newDataStoreName = - Array.isArray(dataLocations) && dataLocations[0] ? - dataLocations[0].dataStoreName : null; - return data.batchDelete(dataToDelete, - request.method, - newDataStoreName, log, err => { + const generatedVersionId = res ? res.versionId : undefined; + // in cases where completing mpu overwrites a previous + // null version when versioning is suspended or versioning + // is not enabled, need to delete pre-existing data + // unless the preexisting object and the completed mpu + // are on external backends + if (dataToDelete) { + const newDataStoreName = + Array.isArray(dataLocations) && dataLocations[0] + ? dataLocations[0].dataStoreName + : null; + return data.batchDelete(dataToDelete, request.method, newDataStoreName, log, err => { if (err) { return next(err); } - return next(null, mpuBucket, keysToDelete, - aggregateETag, extraPartLocations, - destinationBucket, generatedVersionId, - droppedMPUSize); + return next( + null, + mpuBucket, + keysToDelete, + aggregateETag, + extraPartLocations, + destinationBucket, + generatedVersionId, + droppedMPUSize, + ); }); - } - return next(null, mpuBucket, keysToDelete, aggregateETag, - extraPartLocations, destinationBucket, - generatedVersionId, droppedMPUSize); - }); - }, - function deletePartsMetadata(mpuBucket, keysToDelete, aggregateETag, - extraPartLocations, destinationBucket, generatedVersionId, droppedMPUSize, next) { - services.batchDeleteObjectMetadata(mpuBucket.getName(), - keysToDelete, log, err => { + } + return next( + null, + mpuBucket, + keysToDelete, + aggregateETag, + extraPartLocations, + destinationBucket, + generatedVersionId, + droppedMPUSize, + ); + }, + ); + }, + function deletePartsMetadata( + mpuBucket, + keysToDelete, + aggregateETag, + extraPartLocations, + destinationBucket, + generatedVersionId, + droppedMPUSize, + next, + ) { + services.batchDeleteObjectMetadata(mpuBucket.getName(), keysToDelete, log, err => { if (err) { if (err.is?.DeleteConflict) { // DeleteConflict should trigger automatic retry // Convert to InternalError to make it retryable const customErr = errorInstances.InternalError.customizeDescription( - 'conflict deleting MPU parts metadata' + 'conflict deleting MPU parts metadata', ); - return next(customErr, extraPartLocations, - destinationBucket, aggregateETag, generatedVersionId, droppedMPUSize); + return next( + customErr, + extraPartLocations, + destinationBucket, + aggregateETag, + generatedVersionId, + droppedMPUSize, + ); } // For NoSuchKey and other errors, return them as-is // NoSuchKey is non-retryable, InternalError and others are retryable - return next(err, extraPartLocations, - destinationBucket, aggregateETag, generatedVersionId, droppedMPUSize); + return next( + err, + extraPartLocations, + destinationBucket, + aggregateETag, + generatedVersionId, + droppedMPUSize, + ); } - return next(null, extraPartLocations, - destinationBucket, aggregateETag, generatedVersionId, droppedMPUSize); - }); - }, - function batchDeleteExtraParts(extraPartLocations, destinationBucket, - aggregateETag, generatedVersionId, droppedMPUSize, next) { - if (extraPartLocations && extraPartLocations.length > 0) { - return data.batchDelete(extraPartLocations, request.method, null, log, err => { - if (err) { - // Extra part deletion failure should not fail the operation - // The S3 object was created successfully and MPU metadata was cleaned up - // Orphaned extra parts are acceptable since the main operation succeeded - log.warn('failed to delete extra parts, keeping orphan but returning success', { - method: 'completeMultipartUpload', - extraPartLocationsCount: extraPartLocations.length, - error: err, - }); - } - return next(null, destinationBucket, aggregateETag, - generatedVersionId, droppedMPUSize); + return next( + null, + extraPartLocations, + destinationBucket, + aggregateETag, + generatedVersionId, + droppedMPUSize, + ); }); + }, + function batchDeleteExtraParts( + extraPartLocations, + destinationBucket, + aggregateETag, + generatedVersionId, + droppedMPUSize, + next, + ) { + if (extraPartLocations && extraPartLocations.length > 0) { + return data.batchDelete(extraPartLocations, request.method, null, log, err => { + if (err) { + // Extra part deletion failure should not fail the operation + // The S3 object was created successfully and MPU metadata was cleaned up + // Orphaned extra parts are acceptable since the main operation succeeded + log.warn('failed to delete extra parts, keeping orphan but returning success', { + method: 'completeMultipartUpload', + extraPartLocationsCount: extraPartLocations.length, + error: err, + }); + } + return next(null, destinationBucket, aggregateETag, generatedVersionId, droppedMPUSize); + }); + } + return next(null, destinationBucket, aggregateETag, generatedVersionId, droppedMPUSize); + }, + function updateQuotas(destinationBucket, aggregateETag, generatedVersionId, droppedMPUSize, next) { + return validateQuotas( + request, + destinationBucket, + request.accountQuotas, + ['objectDelete'], + 'objectDelete', + -droppedMPUSize, + false, + log, + err => { + if (err) { + // Ignore error, as the data has been deleted already: only inflight count + // has not been updated, and will be eventually consistent anyway + log.warn('failed to update inflights', { + method: 'completeMultipartUpload', + error: err, + }); + } + return next(null, destinationBucket, aggregateETag, generatedVersionId); + }, + ); + }, + ], + (err, destinationBucket, aggregateETag, generatedVersionId) => { + const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, destinationBucket); + if (err) { + return callback(err, null, corsHeaders); } - return next(null, destinationBucket, aggregateETag, - generatedVersionId, droppedMPUSize); - }, - function updateQuotas(destinationBucket, aggregateETag, generatedVersionId, droppedMPUSize, next) { - return validateQuotas(request, destinationBucket, request.accountQuotas, - ['objectDelete'], 'objectDelete', -droppedMPUSize, false, log, err => { - if (err) { - // Ignore error, as the data has been deleted already: only inflight count - // has not been updated, and will be eventually consistent anyway - log.warn('failed to update inflights', { - method: 'completeMultipartUpload', - error: err, - }); - } - return next(null, destinationBucket, aggregateETag, - generatedVersionId); - }); - }, - ], (err, destinationBucket, aggregateETag, generatedVersionId) => { - const corsHeaders = - collectCorsHeaders(request.headers.origin, request.method, - destinationBucket); - if (err) { - return callback(err, null, corsHeaders); - } - if (generatedVersionId) { - corsHeaders['x-amz-version-id'] = - versionIdUtils.encode(generatedVersionId); - } - Object.assign(responseHeaders, corsHeaders); + if (generatedVersionId) { + corsHeaders['x-amz-version-id'] = versionIdUtils.encode(generatedVersionId); + } + Object.assign(responseHeaders, corsHeaders); - const vcfg = destinationBucket.getVersioningConfiguration(); - const isVersionedObj = vcfg && vcfg.Status === 'Enabled'; + const vcfg = destinationBucket.getVersioningConfiguration(); + const isVersionedObj = vcfg && vcfg.Status === 'Enabled'; - xmlParams.eTag = `"${aggregateETag}"`; - if (finalChecksum) { - xmlParams.checksumAlgorithm = finalChecksum.algorithm; - xmlParams.checksumValue = finalChecksum.value; - xmlParams.checksumType = finalChecksum.type; - } - const xml = convertToXml('completeMultipartUpload', xmlParams); - pushMetric('completeMultipartUpload', log, { - oldByteLength: isVersionedObj ? null : oldByteLength, - authInfo, - canonicalID: destinationBucket.getOwner(), - bucket: bucketName, - keys: [objectKey], - versionId: generatedVersionId, - numberOfObjects: !generatedVersionId && oldByteLength !== null ? 0 : 1, - location: destinationBucket.getLocationConstraint(), - }); - return callback(null, xml, responseHeaders); - }); + xmlParams.eTag = `"${aggregateETag}"`; + if (finalChecksum) { + xmlParams.checksumAlgorithm = finalChecksum.algorithm; + xmlParams.checksumValue = finalChecksum.value; + xmlParams.checksumType = finalChecksum.type; + } + const xml = convertToXml('completeMultipartUpload', xmlParams); + pushMetric('completeMultipartUpload', log, { + oldByteLength: isVersionedObj ? null : oldByteLength, + authInfo, + canonicalID: destinationBucket.getOwner(), + bucket: bucketName, + keys: [objectKey], + versionId: generatedVersionId, + numberOfObjects: !generatedVersionId && oldByteLength !== null ? 0 : 1, + location: destinationBucket.getLocationConstraint(), + }); + return callback(null, xml, responseHeaders); + }, + ); } module.exports = completeMultipartUpload; module.exports.validatePerPartChecksums = validatePerPartChecksums; module.exports.computeFinalChecksum = computeFinalChecksum; -module.exports.validateExpectedFinalChecksum = validateExpectedFinalChecksum; \ No newline at end of file +module.exports.validateExpectedFinalChecksum = validateExpectedFinalChecksum; diff --git a/lib/api/listParts.js b/lib/api/listParts.js index 22857456f5..f5c7b5faaf 100644 --- a/lib/api/listParts.js +++ b/lib/api/listParts.js @@ -5,8 +5,7 @@ const { errors, s3middleware } = require('arsenal'); const constants = require('../../constants'); const collectCorsHeaders = require('../utilities/collectCorsHeaders'); -const locationConstraintCheck = - require('./apiUtils/object/locationConstraintCheck'); +const locationConstraintCheck = require('./apiUtils/object/locationConstraintCheck'); const services = require('../services'); const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const escapeForXml = s3middleware.escapeForXml; @@ -58,8 +57,7 @@ function buildXML(xmlParams, xml, encodingFn) { xmlParams.forEach(param => { if (param.value !== undefined) { xml.push(`<${param.tag}>${encodingFn(param.value)}`); - } else if (param.tag !== 'NextPartNumberMarker' && - param.tag !== 'PartNumberMarker') { + } else if (param.tag !== 'NextPartNumberMarker' && param.tag !== 'PartNumberMarker') { xml.push(`<${param.tag}/>`); } }); @@ -113,19 +111,19 @@ function listParts(authInfo, request, log, callback) { const objectKey = request.objectKey; const uploadId = request.query.uploadId; const encoding = request.query['encoding-type']; - let maxParts = Number.parseInt(request.query['max-parts'], 10) ? - Number.parseInt(request.query['max-parts'], 10) : 1000; + let maxParts = Number.parseInt(request.query['max-parts'], 10) + ? Number.parseInt(request.query['max-parts'], 10) + : 1000; if (maxParts < 0) { - monitoring.promMetrics('GET', bucketName, 400, - 'listMultipartUploadParts'); + monitoring.promMetrics('GET', bucketName, 400, 'listMultipartUploadParts'); return callback(errors.InvalidArgument); } if (maxParts > constants.listingHardLimit) { maxParts = constants.listingHardLimit; } - const partNumberMarker = - Number.parseInt(request.query['part-number-marker'], 10) ? - Number.parseInt(request.query['part-number-marker'], 10) : 0; + const partNumberMarker = Number.parseInt(request.query['part-number-marker'], 10) + ? Number.parseInt(request.query['part-number-marker'], 10) + : 0; const metadataValMPUparams = { authInfo, bucketName, @@ -146,203 +144,217 @@ function listParts(authInfo, request, log, callback) { let splitter = constants.splitter; const responseHeaders = {}; - async.waterfall([ - function checkDestBucketVal(next) { - standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log, - (err, destinationBucket) => { - if (err) { - return next(err, destinationBucket, null); - } - if (destinationBucket.policies) { - // TODO: Check bucket policies to see if user is granted - // permission or forbidden permission to take - // given action. - // If permitted, add 'bucketPolicyGoAhead' - // attribute to params for validating at MPU level. - // This is GH Issue#76 - metadataValMPUparams.requestType = - 'bucketPolicyGoAhead'; - } - return next(null, destinationBucket); - }); - }, - function waterfall2(destBucket, next) { - metadataValMPUparams.log = log; - services.metadataValidateMultipart(metadataValMPUparams, - (err, mpuBucket, mpuOverviewObj) => { + async.waterfall( + [ + function checkDestBucketVal(next) { + standardMetadataValidateBucketAndObj( + metadataValParams, + request.actionImplicitDenies, + log, + (err, destinationBucket) => { + if (err) { + return next(err, destinationBucket, null); + } + if (destinationBucket.policies) { + // TODO: Check bucket policies to see if user is granted + // permission or forbidden permission to take + // given action. + // If permitted, add 'bucketPolicyGoAhead' + // attribute to params for validating at MPU level. + // This is GH Issue#76 + metadataValMPUparams.requestType = 'bucketPolicyGoAhead'; + } + return next(null, destinationBucket); + }, + ); + }, + function waterfall2(destBucket, next) { + metadataValMPUparams.log = log; + services.metadataValidateMultipart(metadataValMPUparams, (err, mpuBucket, mpuOverviewObj) => { if (err) { return next(err, destBucket, null); } return next(null, destBucket, mpuBucket, mpuOverviewObj); }); - }, - function waterfall3(destBucket, mpuBucket, mpuOverviewObj, next) { - const mpuInfo = { - objectKey, - uploadId, - bucketName, - partNumberMarker, - maxParts, - mpuOverviewObj, - destBucket, - }; - const originalIdentityImpDenies = request.actionImplicitDenies; - // eslint-disable-next-line no-param-reassign - delete request.actionImplicitDenies; - return data.listParts(mpuInfo, request, locationConstraintCheck, - log, (err, backendPartList) => { + }, + function waterfall3(destBucket, mpuBucket, mpuOverviewObj, next) { + const mpuInfo = { + objectKey, + uploadId, + bucketName, + partNumberMarker, + maxParts, + mpuOverviewObj, + destBucket, + }; + const originalIdentityImpDenies = request.actionImplicitDenies; // eslint-disable-next-line no-param-reassign - request.actionImplicitDenies = originalIdentityImpDenies; - if (err) { - return next(err, destBucket); + delete request.actionImplicitDenies; + return data.listParts(mpuInfo, request, locationConstraintCheck, log, (err, backendPartList) => { + // eslint-disable-next-line no-param-reassign + request.actionImplicitDenies = originalIdentityImpDenies; + if (err) { + return next(err, destBucket); + } + // if external backend doesn't handle mpu, backendPartList + // will be null + return next(null, destBucket, mpuBucket, mpuOverviewObj, backendPartList); + }); + }, + function waterfall4(destBucket, mpuBucket, mpuOverviewObj, backendPartList, next) { + // if parts were returned from cloud backend, they were not + // stored in Scality S3 metadata, so this step can be skipped + if (backendPartList) { + return next(null, destBucket, mpuBucket, backendPartList, mpuOverviewObj); } - // if external backend doesn't handle mpu, backendPartList - // will be null - return next(null, destBucket, mpuBucket, mpuOverviewObj, - backendPartList); - }); - }, - function waterfall4(destBucket, mpuBucket, mpuOverviewObj, - backendPartList, next) { - // if parts were returned from cloud backend, they were not - // stored in Scality S3 metadata, so this step can be skipped - if (backendPartList) { - return next(null, destBucket, mpuBucket, backendPartList, - mpuOverviewObj); - } - // BACKWARD: Remove to remove the old splitter - if (mpuBucket.getMdBucketModelVersion() < 2) { - splitter = constants.oldSplitter; - } - const getPartsParams = { - uploadId, - mpuBucketName: mpuBucket.getName(), - maxParts, - partNumberMarker, - log, - splitter, - }; - return services.getSomeMPUparts(getPartsParams, - (err, storedParts) => { - if (err) { - return next(err, destBucket, null); + // BACKWARD: Remove to remove the old splitter + if (mpuBucket.getMdBucketModelVersion() < 2) { + splitter = constants.oldSplitter; } - return next(null, destBucket, mpuBucket, storedParts, - mpuOverviewObj); - }); - }, function waterfall5(destBucket, mpuBucket, storedParts, - mpuOverviewObj, next) { - const encodingFn = encoding === 'url' - ? querystring.escape : escapeForXml; - const isTruncated = storedParts.IsTruncated; - const splitterLen = splitter.length; - const partListing = storedParts.Contents.map(item => { - const value = item.value; - const partChecksum = getPartChecksum(item); - return { - partNumber: getPartNumber(item, splitter, splitterLen), - lastModified: value.LastModified, - ETag: value.ETag, - size: value.Size, - checksumAlgorithm: partChecksum.checksumAlgorithm, - checksumValue: partChecksum.checksumValue, + const getPartsParams = { + uploadId, + mpuBucketName: mpuBucket.getName(), + maxParts, + partNumberMarker, + log, + splitter, }; - }); - const lastPartShown = partListing.length > 0 ? - partListing[partListing.length - 1].partNumber : undefined; + return services.getSomeMPUparts(getPartsParams, (err, storedParts) => { + if (err) { + return next(err, destBucket, null); + } + return next(null, destBucket, mpuBucket, storedParts, mpuOverviewObj); + }); + }, + function waterfall5(destBucket, mpuBucket, storedParts, mpuOverviewObj, next) { + const encodingFn = encoding === 'url' ? querystring.escape : escapeForXml; + const isTruncated = storedParts.IsTruncated; + const splitterLen = splitter.length; + const partListing = storedParts.Contents.map(item => { + const value = item.value; + const partChecksum = getPartChecksum(item); + return { + partNumber: getPartNumber(item, splitter, splitterLen), + lastModified: value.LastModified, + ETag: value.ETag, + size: value.Size, + checksumAlgorithm: partChecksum.checksumAlgorithm, + checksumValue: partChecksum.checksumValue, + }; + }); + const lastPartShown = + partListing.length > 0 ? partListing[partListing.length - 1].partNumber : undefined; - setExpirationHeaders(responseHeaders, { - lifecycleConfig: destBucket.getLifecycleConfiguration(), - mpuParams: { - key: mpuOverviewObj.key, - date: mpuOverviewObj.initiated, - }, - }); + setExpirationHeaders(responseHeaders, { + lifecycleConfig: destBucket.getLifecycleConfiguration(), + mpuParams: { + key: mpuOverviewObj.key, + date: mpuOverviewObj.initiated, + }, + }); + + const xml = []; + xml.push( + '', + '', + ); + buildXML( + [ + { tag: 'Bucket', value: bucketName }, + { tag: 'Key', value: objectKey }, + { tag: 'UploadId', value: uploadId }, + ], + xml, + encodingFn, + ); + const showChecksum = + !mpuOverviewObj.checksumIsDefault && + mpuOverviewObj.checksumAlgorithm && + mpuOverviewObj.checksumType; + if (showChecksum) { + buildXML( + [ + { tag: 'ChecksumAlgorithm', value: mpuOverviewObj.checksumAlgorithm.toUpperCase() }, + { tag: 'ChecksumType', value: mpuOverviewObj.checksumType }, + ], + xml, + encodingFn, + ); + } + xml.push(''); + buildXML( + [ + { tag: 'ID', value: mpuOverviewObj.initiatorID }, + { tag: 'DisplayName', value: mpuOverviewObj.initiatorDisplayName }, + ], + xml, + encodingFn, + ); + xml.push(''); + xml.push(''); + buildXML( + [ + { tag: 'ID', value: mpuOverviewObj.ownerID }, + { tag: 'DisplayName', value: mpuOverviewObj.ownerDisplayName }, + ], + xml, + encodingFn, + ); + xml.push(''); + buildXML( + [ + { tag: 'StorageClass', value: mpuOverviewObj.storageClass }, + { tag: 'PartNumberMarker', value: partNumberMarker || undefined }, + // print only if it's truncated + { tag: 'NextPartNumberMarker', value: isTruncated ? parseInt(lastPartShown, 10) : undefined }, + { tag: 'MaxParts', value: maxParts }, + { tag: 'IsTruncated', value: isTruncated ? 'true' : 'false' }, + ], + xml, + encodingFn, + ); - const xml = []; - xml.push( - '', - '' - ); - buildXML([ - { tag: 'Bucket', value: bucketName }, - { tag: 'Key', value: objectKey }, - { tag: 'UploadId', value: uploadId }, - ], xml, encodingFn); - const showChecksum = !mpuOverviewObj.checksumIsDefault && - mpuOverviewObj.checksumAlgorithm && - mpuOverviewObj.checksumType; - if (showChecksum) { - buildXML([ - { tag: 'ChecksumAlgorithm', - value: mpuOverviewObj.checksumAlgorithm.toUpperCase() }, - { tag: 'ChecksumType', value: mpuOverviewObj.checksumType }, - ], xml, encodingFn); + partListing.forEach(part => { + const partChecksumXML = showChecksum + ? getPartChecksumXML(part.checksumAlgorithm, part.checksumValue) + : undefined; + xml.push(''); + buildXML( + [ + { tag: 'PartNumber', value: part.partNumber }, + { tag: 'LastModified', value: part.lastModified }, + { tag: 'ETag', value: `"${part.ETag}"` }, + { tag: 'Size', value: part.size }, + ], + xml, + encodingFn, + ); + if (partChecksumXML) { + buildXML([partChecksumXML], xml, encodingFn); + } + xml.push(''); + }); + xml.push(''); + pushMetric('listMultipartUploadParts', log, { + authInfo, + bucket: bucketName, + }); + monitoring.promMetrics('GET', bucketName, '200', 'listMultipartUploadParts'); + next(null, destBucket, xml.join('')); + }, + ], + (err, destinationBucket, xml) => { + const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, destinationBucket); + if (err) { + // The 200 metric is emitted on success in the final waterfall + // step; only count failures here to avoid double-counting. + monitoring.promMetrics('GET', bucketName, 400, 'listMultipartUploadParts'); } - xml.push(''); - buildXML([ - { tag: 'ID', value: mpuOverviewObj.initiatorID }, - { tag: 'DisplayName', - value: mpuOverviewObj.initiatorDisplayName }, - ], xml, encodingFn); - xml.push(''); - xml.push(''); - buildXML([ - { tag: 'ID', value: mpuOverviewObj.ownerID }, - { tag: 'DisplayName', value: mpuOverviewObj.ownerDisplayName }, - ], xml, encodingFn); - xml.push(''); - buildXML([ - { tag: 'StorageClass', value: mpuOverviewObj.storageClass }, - { tag: 'PartNumberMarker', value: partNumberMarker || - undefined }, - // print only if it's truncated - { tag: 'NextPartNumberMarker', value: isTruncated ? - parseInt(lastPartShown, 10) : undefined }, - { tag: 'MaxParts', value: maxParts }, - { tag: 'IsTruncated', value: isTruncated ? 'true' : 'false' }, - ], xml, encodingFn); + Object.assign(responseHeaders, corsHeaders); - partListing.forEach(part => { - const partChecksumXML = showChecksum ? - getPartChecksumXML( - part.checksumAlgorithm, part.checksumValue) : - undefined; - xml.push(''); - buildXML([ - { tag: 'PartNumber', value: part.partNumber }, - { tag: 'LastModified', value: part.lastModified }, - { tag: 'ETag', value: `"${part.ETag}"` }, - { tag: 'Size', value: part.size }, - ], xml, encodingFn); - if (partChecksumXML) { - buildXML([partChecksumXML], xml, encodingFn); - } - xml.push(''); - }); - xml.push(''); - pushMetric('listMultipartUploadParts', log, { - authInfo, - bucket: bucketName, - }); - monitoring.promMetrics( - 'GET', bucketName, '200', 'listMultipartUploadParts'); - next(null, destBucket, xml.join('')); + return callback(err, xml, responseHeaders); }, - ], (err, destinationBucket, xml) => { - const corsHeaders = collectCorsHeaders(request.headers.origin, - request.method, destinationBucket); - if (err) { - // The 200 metric is emitted on success in the final waterfall - // step; only count failures here to avoid double-counting. - monitoring.promMetrics('GET', bucketName, 400, - 'listMultipartUploadParts'); - } - Object.assign(responseHeaders, corsHeaders); - - return callback(err, xml, responseHeaders); - }); + ); return undefined; } diff --git a/tests/functional/aws-node-sdk/test/object/completeMpuChecksum.js b/tests/functional/aws-node-sdk/test/object/completeMpuChecksum.js index 0a51cc3530..a638bbdb1d 100644 --- a/tests/functional/aws-node-sdk/test/object/completeMpuChecksum.js +++ b/tests/functional/aws-node-sdk/test/object/completeMpuChecksum.js @@ -12,8 +12,7 @@ const { const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); -const { algorithms } = - require('../../../../../lib/api/apiUtils/integrity/validateChecksums'); +const { algorithms } = require('../../../../../lib/api/apiUtils/integrity/validateChecksums'); const bucket = `mpu-complete-checksum-${Date.now()}`; const partBody = Buffer.from('I am a part body for complete-MPU testing', 'utf8'); @@ -53,100 +52,138 @@ describe('CompleteMultipartUpload final-object checksum', () => const field = tagField(algo); it(`should return ${algo}/${type} on CompleteMPU response`, async () => { const key = `complete-${algo.toLowerCase()}-${type.toLowerCase()}-${Date.now()}`; - const partChecksum = - await algorithms[algo.toLowerCase()].digest(partBody); - - const create = await s3.send(new CreateMultipartUploadCommand({ - Bucket: bucket, Key: key, - ChecksumAlgorithm: algo, - ChecksumType: type, - })); - - const uploadPart = await s3.send(new UploadPartCommand({ - Bucket: bucket, Key: key, - UploadId: create.UploadId, - PartNumber: 1, - Body: partBody, - [field]: partChecksum, - })); - - const complete = await s3.send(new CompleteMultipartUploadCommand({ - Bucket: bucket, Key: key, - UploadId: create.UploadId, - MultipartUpload: { - Parts: [{ - PartNumber: 1, - ETag: uploadPart.ETag, - [field]: partChecksum, - }], - }, - })); - - assert(complete[field], - `expected ${field} in CompleteMPU response, got: ${JSON.stringify(complete)}`); + const partChecksum = await algorithms[algo.toLowerCase()].digest(partBody); + + const create = await s3.send( + new CreateMultipartUploadCommand({ + Bucket: bucket, + Key: key, + ChecksumAlgorithm: algo, + ChecksumType: type, + }), + ); + + const uploadPart = await s3.send( + new UploadPartCommand({ + Bucket: bucket, + Key: key, + UploadId: create.UploadId, + PartNumber: 1, + Body: partBody, + [field]: partChecksum, + }), + ); + + const complete = await s3.send( + new CompleteMultipartUploadCommand({ + Bucket: bucket, + Key: key, + UploadId: create.UploadId, + MultipartUpload: { + Parts: [ + { + PartNumber: 1, + ETag: uploadPart.ETag, + [field]: partChecksum, + }, + ], + }, + }), + ); + + assert(complete[field], `expected ${field} in CompleteMPU response, got: ${JSON.stringify(complete)}`); assert.strictEqual(complete.ChecksumType, type); if (type === 'COMPOSITE') { - assert(complete[field].endsWith('-1'), - `expected -1 suffix for 1-part COMPOSITE, got ${complete[field]}`); + assert( + complete[field].endsWith('-1'), + `expected -1 suffix for 1-part COMPOSITE, got ${complete[field]}`, + ); } else { - assert(!complete[field].includes('-'), - `FULL_OBJECT value should have no suffix, got ${complete[field]}`); + assert( + !complete[field].includes('-'), + `FULL_OBJECT value should have no suffix, got ${complete[field]}`, + ); } // HeadObject with ChecksumMode=ENABLED must surface the same // value that CompleteMPU returned for FULL_OBJECT MPUs. // COMPOSITE storage is deferred, so HeadObject leaves the field absent — matching // cloudserver's current intentional skip. - const head = await s3.send(new HeadObjectCommand({ - Bucket: bucket, Key: key, - ChecksumMode: 'ENABLED', - })); + const head = await s3.send( + new HeadObjectCommand({ + Bucket: bucket, + Key: key, + ChecksumMode: 'ENABLED', + }), + ); if (type === 'FULL_OBJECT') { - assert.strictEqual(head[field], complete[field], - `HeadObject ${field} should match CompleteMPU response`); + assert.strictEqual( + head[field], + complete[field], + `HeadObject ${field} should match CompleteMPU response`, + ); assert.strictEqual(head.ChecksumType, type); } else { - assert.strictEqual(head[field], undefined, - `COMPOSITE storage is deferred; HeadObject should not surface ${field}`); + assert.strictEqual( + head[field], + undefined, + `COMPOSITE storage is deferred; HeadObject should not surface ${field}`, + ); assert.strictEqual(head.ChecksumType, undefined); } }); }); - it('should return CRC64NVME/FULL_OBJECT on CompleteMPU response when CreateMPU sent no checksum headers', + it( + 'should return CRC64NVME/FULL_OBJECT on CompleteMPU response ' + 'when CreateMPU sent no checksum headers', async () => { const key = `complete-default-${Date.now()}`; - const create = await s3.send(new CreateMultipartUploadCommand({ - Bucket: bucket, Key: key, - })); - - const uploadPart = await s3.send(new UploadPartCommand({ - Bucket: bucket, Key: key, - UploadId: create.UploadId, - PartNumber: 1, - Body: partBody, - })); - - const complete = await s3.send(new CompleteMultipartUploadCommand({ - Bucket: bucket, Key: key, - UploadId: create.UploadId, - MultipartUpload: { - Parts: [{ PartNumber: 1, ETag: uploadPart.ETag }], - }, - })); - - assert(complete.ChecksumCRC64NVME, - `expected ChecksumCRC64NVME for default MPU, got: ${JSON.stringify(complete)}`); + const create = await s3.send( + new CreateMultipartUploadCommand({ + Bucket: bucket, + Key: key, + }), + ); + + const uploadPart = await s3.send( + new UploadPartCommand({ + Bucket: bucket, + Key: key, + UploadId: create.UploadId, + PartNumber: 1, + Body: partBody, + }), + ); + + const complete = await s3.send( + new CompleteMultipartUploadCommand({ + Bucket: bucket, + Key: key, + UploadId: create.UploadId, + MultipartUpload: { + Parts: [{ PartNumber: 1, ETag: uploadPart.ETag }], + }, + }), + ); + + assert( + complete.ChecksumCRC64NVME, + `expected ChecksumCRC64NVME for default MPU, got: ${JSON.stringify(complete)}`, + ); assert.strictEqual(complete.ChecksumType, 'FULL_OBJECT'); // Default MPU is FULL_OBJECT — checksum is persisted, so // HeadObject must return the same value. - const head = await s3.send(new HeadObjectCommand({ - Bucket: bucket, Key: key, - ChecksumMode: 'ENABLED', - })); + const head = await s3.send( + new HeadObjectCommand({ + Bucket: bucket, + Key: key, + ChecksumMode: 'ENABLED', + }), + ); assert.strictEqual(head.ChecksumCRC64NVME, complete.ChecksumCRC64NVME); assert.strictEqual(head.ChecksumType, 'FULL_OBJECT'); - }); + }, + ); })); diff --git a/tests/functional/aws-node-sdk/test/object/mpuVersion.js b/tests/functional/aws-node-sdk/test/object/mpuVersion.js index ebdd4e0f35..259a4a1537 100644 --- a/tests/functional/aws-node-sdk/test/object/mpuVersion.js +++ b/tests/functional/aws-node-sdk/test/object/mpuVersion.js @@ -25,9 +25,7 @@ const checkError = require('../../lib/utility/checkError'); const { getMetadata, fakeMetadataArchive, isNullKeyMetadataV1 } = require('../utils/init'); const { hasColdStorage } = require('../../lib/utility/test-utils'); -const { - LOCATION_NAME_DMF, -} = require('../../../../constants'); +const { LOCATION_NAME_DMF } = require('../../../../constants'); const log = new DummyRequestLogger(); @@ -59,11 +57,11 @@ async function putMPUVersion(s3, bucketName, objectName, vId) { args.request.headers['x-scal-s3-version-id'] = vId; return next(args); }, - { step: 'build' } + { step: 'build' }, ); } const resCreation = await s3.send(command); - + const uploadId = resCreation.UploadId; const uploadParams = { Body: 'okok', @@ -80,11 +78,11 @@ async function putMPUVersion(s3, bucketName, objectName, vId) { args.request.headers['x-scal-s3-version-id'] = vId; return next(args); }, - { step: 'build' } + { step: 'build' }, ); } const uploadRes = await s3.send(uploadCommand); - + const completeParams = { Bucket: bucketName, Key: objectName, @@ -92,9 +90,9 @@ async function putMPUVersion(s3, bucketName, objectName, vId) { Parts: [ { ETag: uploadRes.ETag, - PartNumber: 1 + PartNumber: 1, }, - ] + ], }, UploadId: uploadId, }; @@ -106,7 +104,7 @@ async function putMPUVersion(s3, bucketName, objectName, vId) { args.request.headers['x-scal-s3-version-id'] = vId; return next(args); }, - { step: 'build' } + { step: 'build' }, ); } return await s3.send(completeCommand); @@ -123,8 +121,11 @@ function checkVersionsAndUpdate(versionsBefore, versionsAfter, indexes) { /* eslint-disable no-param-reassign */ versionsBefore[i].value.Size = versionsAfter[i].value.Size; // Also update uploadId if it exists and is different since now aws sdk returns it as well - if (versionsAfter[i].value.uploadId && versionsBefore[i].value.uploadId && - versionsAfter[i].value.uploadId !== versionsBefore[i].value.uploadId) { + if ( + versionsAfter[i].value.uploadId && + versionsBefore[i].value.uploadId && + versionsAfter[i].value.uploadId !== versionsBefore[i].value.uploadId + ) { versionsBefore[i].value.uploadId = versionsAfter[i].value.uploadId; } /* eslint-enable no-param-reassign */ @@ -171,13 +172,15 @@ describe('MPU with x-scal-s3-version-id header', () => { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; await new Promise((resolve, reject) => { - metadata.setup(err => err ? reject(err) : resolve()); + metadata.setup(err => (err ? reject(err) : resolve())); }); await s3.send(new CreateBucketCommand({ Bucket: bucketName })); - await s3.send(new CreateBucketCommand({ - Bucket: bucketNameMD, - ObjectLockEnabledForBucket: true - })); + await s3.send( + new CreateBucketCommand({ + Bucket: bucketNameMD, + ObjectLockEnabledForBucket: true, + }), + ); }); afterEach(async () => { @@ -191,14 +194,14 @@ describe('MPU with x-scal-s3-version-id header', () => { Bucket: bucketName, VersioningConfiguration: { Status: 'Enabled', - } + }, }; const params = { Bucket: bucketName, Key: objectName }; try { await s3.send(new PutBucketVersioningCommand(vParams)); await s3.send(new PutObjectCommand(params)); - + try { await putMPUVersion(s3, bucketName, objectName, 'aJLWKz4Ko9IjBBgXKj5KQT.G9UHv0g7P'); throw new Error('Expected InvalidArgument error'); @@ -227,17 +230,21 @@ describe('MPU with x-scal-s3-version-id header', () => { Bucket: bucketName, VersioningConfiguration: { Status: 'Enabled', - } + }, }; const params = { Bucket: bucketName, Key: objectName }; try { await s3.send(new PutBucketVersioningCommand(vParams)); await s3.send(new PutObjectCommand(params)); - + try { - await putMPUVersion(s3, bucketName, objectName, - '393833343735313131383832343239393939393952473030312020313031'); + await putMPUVersion( + s3, + bucketName, + objectName, + '393833343735313131383832343239393939393952473030312020313031', + ); throw new Error('Expected NoSuchVersion error'); } catch (err) { checkError(err, 'NoSuchVersion', 404); @@ -255,7 +262,7 @@ describe('MPU with x-scal-s3-version-id header', () => { try { await s3.send(new PutObjectCommand(params)); - + try { await putMPUVersion(s3, bucketName, objectName, ''); throw new Error('Expected InvalidObjectState error'); @@ -276,7 +283,7 @@ describe('MPU with x-scal-s3-version-id header', () => { Bucket: bucketName, VersioningConfiguration: { Status: 'Enabled', - } + }, }; let vId; @@ -286,12 +293,14 @@ describe('MPU with x-scal-s3-version-id header', () => { const deleteRes = await s3.send(new DeleteObjectCommand(params)); vId = deleteRes.VersionId; - - putMPUVersion(s3, bucketName, objectName, vId).then(() => { - throw new Error('Expected MethodNotAllowed error'); - }).catch(err => { - checkError(err, 'MethodNotAllowed', 405); - }); + + putMPUVersion(s3, bucketName, objectName, vId) + .then(() => { + throw new Error('Expected MethodNotAllowed error'); + }) + .catch(err => { + checkError(err, 'MethodNotAllowed', 405); + }); } catch (err) { if (err.message === 'Expected MethodNotAllowed error') { throw err; @@ -309,28 +318,35 @@ describe('MPU with x-scal-s3-version-id header', () => { try { await putMPU(s3, bucketName, objectName); - + await fakeMetadataArchivePromise(bucketName, objectName, undefined, archive); - + objMDBefore = await getMetadataPromise(bucketName, objectName, undefined); - + const versionRes1 = await metadataListObjectPromise(bucketName, mdListingParams, log); versionsBefore = versionRes1.Versions; await putMPUVersion(s3, bucketName, objectName, ''); - + objMDAfter = await getMetadataPromise(bucketName, objectName, undefined); - + const versionRes2 = await metadataListObjectPromise(bucketName, mdListingParams, log); const versionsAfter = versionRes2.Versions; - + clearUploadIdAndRestoreStatusFromVersions(versionsBefore); clearUploadIdAndRestoreStatusFromVersions(versionsAfter); - + assert.deepStrictEqual(versionsAfter, versionsBefore); - checkObjMdAndUpdate(objMDBefore, objMDAfter, - ['location', 'uploadId', 'microVersionId', 'x-amz-restore', - 'archive', 'dataStoreName', 'originOp', 'checksum']); + checkObjMdAndUpdate(objMDBefore, objMDAfter, [ + 'location', + 'uploadId', + 'microVersionId', + 'x-amz-restore', + 'archive', + 'dataStoreName', + 'originOp', + 'checksum', + ]); assert.deepStrictEqual(objMDAfter, objMDBefore); } catch (err) { @@ -339,33 +355,41 @@ describe('MPU with x-scal-s3-version-id header', () => { }); it('should overwrite an object', async () => { - const params = { Bucket: bucketName, Key: objectName }; + const params = { Bucket: bucketName, Key: objectName }; - await s3.send(new PutObjectCommand(params)); + await s3.send(new PutObjectCommand(params)); - await fakeMetadataArchivePromise(bucketName, objectName, undefined, archive); - - const objMDBefore = await getMetadataPromise(bucketName, objectName, undefined); - - const versionRes1 = await metadataListObjectPromise(bucketName, mdListingParams, log); - const versionsBefore = clearUploadIdAndRestoreStatusFromVersions(versionRes1.Versions); + await fakeMetadataArchivePromise(bucketName, objectName, undefined, archive); - await putMPUVersion(s3, bucketName, objectName, ''); + const objMDBefore = await getMetadataPromise(bucketName, objectName, undefined); - const objMDAfter = await getMetadataPromise(bucketName, objectName, undefined); + const versionRes1 = await metadataListObjectPromise(bucketName, mdListingParams, log); + const versionsBefore = clearUploadIdAndRestoreStatusFromVersions(versionRes1.Versions); - const versionRes2 = await metadataListObjectPromise(bucketName, mdListingParams, log); - const versionsAfter = clearUploadIdAndRestoreStatusFromVersions(versionRes2.Versions); + await putMPUVersion(s3, bucketName, objectName, ''); - checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); + const objMDAfter = await getMetadataPromise(bucketName, objectName, undefined); - assert.deepStrictEqual(versionsAfter, versionsBefore); + const versionRes2 = await metadataListObjectPromise(bucketName, mdListingParams, log); + const versionsAfter = clearUploadIdAndRestoreStatusFromVersions(versionRes2.Versions); - checkObjMdAndUpdate(objMDBefore, objMDAfter, - ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', - 'x-amz-restore', 'archive', 'dataStoreName', 'checksum']); - - assert.deepStrictEqual(objMDAfter, objMDBefore); + checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); + + assert.deepStrictEqual(versionsAfter, versionsBefore); + + checkObjMdAndUpdate(objMDBefore, objMDAfter, [ + 'location', + 'content-length', + 'originOp', + 'uploadId', + 'microVersionId', + 'x-amz-restore', + 'archive', + 'dataStoreName', + 'checksum', + ]); + + assert.deepStrictEqual(objMDAfter, objMDBefore); }); it('should overwrite a version', async () => { @@ -373,17 +397,17 @@ describe('MPU with x-scal-s3-version-id header', () => { Bucket: bucketName, VersioningConfiguration: { Status: 'Enabled', - } + }, }; const params = { Bucket: bucketName, Key: objectName }; await s3.send(new PutBucketVersioningCommand(vParams)); - + const putRes = await s3.send(new PutObjectCommand(params)); const vId = putRes.VersionId; await fakeMetadataArchivePromise(bucketName, objectName, vId, archive); - + const versionRes1 = await metadataListObjectPromise(bucketName, mdListingParams, log); const versionsBefore = clearUploadIdAndRestoreStatusFromVersions(versionRes1.Versions); @@ -392,16 +416,24 @@ describe('MPU with x-scal-s3-version-id header', () => { await putMPUVersion(s3, bucketName, objectName, vId); const objMDAfter = await getMetadataPromise(bucketName, objectName, vId); - + const versionRes2 = await metadataListObjectPromise(bucketName, mdListingParams, log); const versionsAfter = clearUploadIdAndRestoreStatusFromVersions(versionRes2.Versions); checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); assert.deepStrictEqual(versionsAfter, versionsBefore); - checkObjMdAndUpdate(objMDBefore, objMDAfter, - ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', - 'x-amz-restore', 'archive', 'dataStoreName', 'checksum']); + checkObjMdAndUpdate(objMDBefore, objMDAfter, [ + 'location', + 'content-length', + 'originOp', + 'uploadId', + 'microVersionId', + 'x-amz-restore', + 'archive', + 'dataStoreName', + 'checksum', + ]); assert.deepStrictEqual(objMDAfter, objMDBefore); }); @@ -410,17 +442,17 @@ describe('MPU with x-scal-s3-version-id header', () => { Bucket: bucketName, VersioningConfiguration: { Status: 'Enabled', - } + }, }; const params = { Bucket: bucketName, Key: objectName }; await s3.send(new PutBucketVersioningCommand(vParams)); - + const putRes = await s3.send(new PutObjectCommand(params)); const vId = putRes.VersionId; await fakeMetadataArchivePromise(bucketName, objectName, vId, archive); - + const versionRes1 = await metadataListObjectPromise(bucketName, mdListingParams, log); const versionsBefore = clearUploadIdAndRestoreStatusFromVersions(versionRes1.Versions); @@ -429,16 +461,24 @@ describe('MPU with x-scal-s3-version-id header', () => { await putMPUVersion(s3, bucketName, objectName, ''); const objMDAfter = await getMetadataPromise(bucketName, objectName, vId); - + const versionRes2 = await metadataListObjectPromise(bucketName, mdListingParams, log); const versionsAfter = clearUploadIdAndRestoreStatusFromVersions(versionRes2.Versions); checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); assert.deepStrictEqual(versionsAfter, versionsBefore); - checkObjMdAndUpdate(objMDBefore, objMDAfter, - ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', - 'x-amz-restore', 'archive', 'dataStoreName', 'checksum']); + checkObjMdAndUpdate(objMDBefore, objMDAfter, [ + 'location', + 'content-length', + 'originOp', + 'uploadId', + 'microVersionId', + 'x-amz-restore', + 'archive', + 'dataStoreName', + 'checksum', + ]); assert.deepStrictEqual(objMDAfter, objMDBefore); }); @@ -447,14 +487,14 @@ describe('MPU with x-scal-s3-version-id header', () => { Bucket: bucketName, VersioningConfiguration: { Status: 'Enabled', - } + }, }; const params = { Bucket: bucketName, Key: objectName }; - + await s3.send(new PutObjectCommand(params)); await s3.send(new PutBucketVersioningCommand(vParams)); await s3.send(new PutObjectCommand(params)); - + await fakeMetadataArchivePromise(bucketName, objectName, 'null', archive); const objMDBefore = await getMetadataPromise(bucketName, objectName, 'null'); @@ -471,9 +511,17 @@ describe('MPU with x-scal-s3-version-id header', () => { checkVersionsAndUpdate(versionsBefore, versionsAfter, [1]); assert.deepStrictEqual(versionsAfter, versionsBefore); - checkObjMdAndUpdate(objMDBefore, objMDAfter, - ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', - 'x-amz-restore', 'archive', 'dataStoreName', 'checksum']); + checkObjMdAndUpdate(objMDBefore, objMDAfter, [ + 'location', + 'content-length', + 'originOp', + 'uploadId', + 'microVersionId', + 'x-amz-restore', + 'archive', + 'dataStoreName', + 'checksum', + ]); assert.deepStrictEqual(objMDAfter, objMDBefore); }); @@ -482,13 +530,13 @@ describe('MPU with x-scal-s3-version-id header', () => { Bucket: bucketName, VersioningConfiguration: { Status: 'Enabled', - } + }, }; const params = { Bucket: bucketName, Key: objectName }; - + await s3.send(new PutObjectCommand(params)); await s3.send(new PutBucketVersioningCommand(vParams)); - + const putRes = await s3.send(new PutObjectCommand(params)); const vId = putRes.VersionId; @@ -509,9 +557,17 @@ describe('MPU with x-scal-s3-version-id header', () => { checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); assert.deepStrictEqual(versionsAfter, versionsBefore); - checkObjMdAndUpdate(objMDBefore, objMDAfter, - ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', - 'x-amz-restore', 'archive', 'dataStoreName', 'checksum']); + checkObjMdAndUpdate(objMDBefore, objMDAfter, [ + 'location', + 'content-length', + 'originOp', + 'uploadId', + 'microVersionId', + 'x-amz-restore', + 'archive', + 'dataStoreName', + 'checksum', + ]); assert.deepStrictEqual(objMDAfter, objMDBefore); }); @@ -520,21 +576,21 @@ describe('MPU with x-scal-s3-version-id header', () => { Bucket: bucketName, VersioningConfiguration: { Status: 'Enabled', - } + }, }; const sParams = { Bucket: bucketName, VersioningConfiguration: { Status: 'Suspended', - } + }, }; const params = { Bucket: bucketName, Key: objectName }; - + await s3.send(new PutBucketVersioningCommand(vParams)); await s3.send(new PutObjectCommand(params)); await s3.send(new PutBucketVersioningCommand(sParams)); await s3.send(new PutObjectCommand(params)); - + await fakeMetadataArchivePromise(bucketName, objectName, undefined, archive); const objMDBefore = await getMetadataPromise(bucketName, objectName, undefined); @@ -552,9 +608,17 @@ describe('MPU with x-scal-s3-version-id header', () => { checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); assert.deepStrictEqual(versionsAfter, versionsBefore); - checkObjMdAndUpdate(objMDBefore, objMDAfter, - ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', - 'x-amz-restore', 'archive', 'dataStoreName', 'checksum']); + checkObjMdAndUpdate(objMDBefore, objMDAfter, [ + 'location', + 'content-length', + 'originOp', + 'uploadId', + 'microVersionId', + 'x-amz-restore', + 'archive', + 'dataStoreName', + 'checksum', + ]); assert.deepStrictEqual(objMDAfter, objMDBefore); }); @@ -563,22 +627,22 @@ describe('MPU with x-scal-s3-version-id header', () => { Bucket: bucketName, VersioningConfiguration: { Status: 'Enabled', - } + }, }; const params = { Bucket: bucketName, Key: objectName }; - + await s3.send(new PutBucketVersioningCommand(vParams)); await s3.send(new PutObjectCommand(params)); - + const putRes = await s3.send(new PutObjectCommand(params)); const vId = putRes.VersionId; await s3.send(new PutObjectCommand(params)); - + await fakeMetadataArchivePromise(bucketName, objectName, vId, archive); const objMDBefore = await getMetadataPromise(bucketName, objectName, vId); - + const versionRes1 = await metadataListObjectPromise(bucketName, mdListingParams, log); const versionsBefore = clearUploadIdAndRestoreStatusFromVersions(versionRes1.Versions); @@ -592,9 +656,17 @@ describe('MPU with x-scal-s3-version-id header', () => { checkVersionsAndUpdate(versionsBefore, versionsAfter, [1]); assert.deepStrictEqual(versionsAfter, versionsBefore); - checkObjMdAndUpdate(objMDBefore, objMDAfter, - ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', - 'x-amz-restore', 'archive', 'dataStoreName', 'checksum']); + checkObjMdAndUpdate(objMDBefore, objMDAfter, [ + 'location', + 'content-length', + 'originOp', + 'uploadId', + 'microVersionId', + 'x-amz-restore', + 'archive', + 'dataStoreName', + 'checksum', + ]); assert.deepStrictEqual(objMDAfter, objMDBefore); }); @@ -603,18 +675,18 @@ describe('MPU with x-scal-s3-version-id header', () => { Bucket: bucketName, VersioningConfiguration: { Status: 'Enabled', - } + }, }; const params = { Bucket: bucketName, Key: objectName }; await s3.send(new PutBucketVersioningCommand(vParams)); await s3.send(new PutObjectCommand(params)); - + const putRes = await s3.send(new PutObjectCommand(params)); const vId = putRes.VersionId; await fakeMetadataArchivePromise(bucketName, objectName, vId, archive); - + const versionRes1 = await metadataListObjectPromise(bucketName, mdListingParams, log); const versionsBefore = clearUploadIdAndRestoreStatusFromVersions(versionRes1.Versions); @@ -623,16 +695,24 @@ describe('MPU with x-scal-s3-version-id header', () => { await putMPUVersion(s3, bucketName, objectName, vId); const objMDAfter = await getMetadataPromise(bucketName, objectName, vId); - + const versionRes2 = await metadataListObjectPromise(bucketName, mdListingParams, log); const versionsAfter = clearUploadIdAndRestoreStatusFromVersions(versionRes2.Versions); checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); assert.deepStrictEqual(versionsAfter, versionsBefore); - checkObjMdAndUpdate(objMDBefore, objMDAfter, - ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', - 'x-amz-restore', 'archive', 'dataStoreName', 'checksum']); + checkObjMdAndUpdate(objMDBefore, objMDAfter, [ + 'location', + 'content-length', + 'originOp', + 'uploadId', + 'microVersionId', + 'x-amz-restore', + 'archive', + 'dataStoreName', + 'checksum', + ]); assert.deepStrictEqual(objMDAfter, objMDBefore); }); @@ -641,31 +721,31 @@ describe('MPU with x-scal-s3-version-id header', () => { Bucket: bucketName, VersioningConfiguration: { Status: 'Enabled', - } + }, }; const sParams = { Bucket: bucketName, VersioningConfiguration: { Status: 'Suspended', - } + }, }; const params = { Bucket: bucketName, Key: objectName }; await s3.send(new PutBucketVersioningCommand(vParams)); await s3.send(new PutObjectCommand(params)); - + const putRes = await s3.send(new PutObjectCommand(params)); const vId = putRes.VersionId; await fakeMetadataArchivePromise(bucketName, objectName, vId, archive); - + const versionRes1 = await metadataListObjectPromise(bucketName, mdListingParams, log); const versionsBefore = clearUploadIdAndRestoreStatusFromVersions(versionRes1.Versions); const objMDBefore = await getMetadataPromise(bucketName, objectName, vId); - + await s3.send(new PutBucketVersioningCommand(sParams)); - + await putMPUVersion(s3, bucketName, objectName, vId); const objMDAfter = await getMetadataPromise(bucketName, objectName, vId); @@ -676,9 +756,17 @@ describe('MPU with x-scal-s3-version-id header', () => { checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); assert.deepStrictEqual(versionsAfter, versionsBefore); - checkObjMdAndUpdate(objMDBefore, objMDAfter, - ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', - 'x-amz-restore', 'archive', 'dataStoreName', 'checksum']); + checkObjMdAndUpdate(objMDBefore, objMDAfter, [ + 'location', + 'content-length', + 'originOp', + 'uploadId', + 'microVersionId', + 'x-amz-restore', + 'archive', + 'dataStoreName', + 'checksum', + ]); assert.deepStrictEqual(objMDAfter, objMDBefore); }); @@ -687,21 +775,21 @@ describe('MPU with x-scal-s3-version-id header', () => { Bucket: bucketName, VersioningConfiguration: { Status: 'Enabled', - } + }, }; const params = { Bucket: bucketName, Key: objectName }; await s3.send(new PutObjectCommand(params)); - + await fakeMetadataArchivePromise(bucketName, objectName, undefined, archive); - + const versionRes1 = await metadataListObjectPromise(bucketName, mdListingParams, log); const versionsBefore = clearUploadIdAndRestoreStatusFromVersions(versionRes1.Versions); - + const objMDBefore = await getMetadataPromise(bucketName, objectName, undefined); - + await s3.send(new PutBucketVersioningCommand(vParams)); - + await putMPUVersion(s3, bucketName, objectName, 'null'); const objMDAfter = await getMetadataPromise(bucketName, objectName, undefined); @@ -712,9 +800,17 @@ describe('MPU with x-scal-s3-version-id header', () => { checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); assert.deepStrictEqual(versionsAfter, versionsBefore); - checkObjMdAndUpdate(objMDBefore, objMDAfter, - ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', - 'x-amz-restore', 'archive', 'dataStoreName', 'checksum']); + checkObjMdAndUpdate(objMDBefore, objMDAfter, [ + 'location', + 'content-length', + 'originOp', + 'uploadId', + 'microVersionId', + 'x-amz-restore', + 'archive', + 'dataStoreName', + 'checksum', + ]); assert(isDeepStrictEqual(objMDAfter, objMDBefore), 'Objects should be deeply equal'); }); @@ -726,12 +822,12 @@ describe('MPU with x-scal-s3-version-id header', () => { restoreRequestedAt: new Date(0), restoreRequestedDays: 5, restoreCompletedAt: new Date(10), - restoreWillExpireAt: new Date(10 + (5 * 24 * 60 * 60 * 1000)), + restoreWillExpireAt: new Date(10 + 5 * 24 * 60 * 60 * 1000), }; await s3.send(new PutObjectCommand(params)); - + await fakeMetadataArchivePromise(bucketName, objectName, undefined, archiveCompleted); - + try { await putMPUVersion(s3, bucketName, objectName, ''); throw new Error('Expected InvalidObjectState error'); @@ -740,45 +836,45 @@ describe('MPU with x-scal-s3-version-id header', () => { } }); - [ - 'non versioned', - 'versioned', - 'suspended' - ].forEach(versioning => { + ['non versioned', 'versioned', 'suspended'].forEach(versioning => { it(`should update restore metadata while keeping storage class (${versioning})`, async () => { const params = { Bucket: bucketName, Key: objectName }; if (versioning === 'versioned') { - await s3.send(new PutBucketVersioningCommand({ - Bucket: bucketName, - VersioningConfiguration: { Status: 'Enabled' } - })); + await s3.send( + new PutBucketVersioningCommand({ + Bucket: bucketName, + VersioningConfiguration: { Status: 'Enabled' }, + }), + ); } else if (versioning === 'suspended') { - await s3.send(new PutBucketVersioningCommand({ - Bucket: bucketName, - VersioningConfiguration: { Status: 'Suspended' } - })); + await s3.send( + new PutBucketVersioningCommand({ + Bucket: bucketName, + VersioningConfiguration: { Status: 'Suspended' }, + }), + ); } - + await s3.send(new PutObjectCommand(params)); - + await fakeMetadataArchivePromise(bucketName, objectName, undefined, archive); const objMDBefore = await getMetadataPromise(bucketName, objectName, undefined); await metadataListObjectPromise(bucketName, mdListingParams, log); - + await putMPUVersion(s3, bucketName, objectName, ''); const objMDAfter = await getMetadataPromise(bucketName, objectName, undefined); - + const listRes = await s3.send(new ListObjectsCommand({ Bucket: bucketName })); assert.strictEqual(listRes.Contents.length, 1); assert.strictEqual(listRes.Contents[0].StorageClass, LOCATION_NAME_DMF); - + const headRes = await s3.send(new HeadObjectCommand(params)); assert.strictEqual(headRes.StorageClass, LOCATION_NAME_DMF); - + const getRes = await s3.send(new GetObjectCommand(params)); assert.strictEqual(getRes.StorageClass, LOCATION_NAME_DMF); @@ -786,10 +882,14 @@ describe('MPU with x-scal-s3-version-id header', () => { assert.deepStrictEqual(objMDAfter.dataStoreName, 'us-east-1'); assert.deepStrictEqual(objMDAfter.archive.archiveInfo, objMDBefore.archive.archiveInfo); - assert.deepStrictEqual(objMDAfter.archive.restoreRequestedAt, - objMDBefore.archive.restoreRequestedAt); - assert.deepStrictEqual(objMDAfter.archive.restoreRequestedDays, - objMDBefore.archive.restoreRequestedDays); + assert.deepStrictEqual( + objMDAfter.archive.restoreRequestedAt, + objMDBefore.archive.restoreRequestedAt, + ); + assert.deepStrictEqual( + objMDAfter.archive.restoreRequestedDays, + objMDBefore.archive.restoreRequestedDays, + ); assert.deepStrictEqual(objMDAfter['x-amz-restore']['ongoing-request'], false); assert(objMDAfter.archive.restoreCompletedAt); @@ -798,18 +898,17 @@ describe('MPU with x-scal-s3-version-id header', () => { }); }); - it('should "copy" all but non data-related metadata (data encryption, data size...)', async () => { const params = { Bucket: bucketNameMD, - Key: objectName + Key: objectName, }; const putParams = { ...params, Metadata: { 'custom-user-md': 'custom-md', }, - WebsiteRedirectLocation: 'http://custom-redirect' + WebsiteRedirectLocation: 'http://custom-redirect', }; const aclParams = { ...params, @@ -819,51 +918,51 @@ describe('MPU with x-scal-s3-version-id header', () => { const tagParams = { ...params, Tagging: { - TagSet: [{ - Key: 'tag1', - Value: 'value1' - }, { - Key: 'tag2', - Value: 'value2' - }] - } + TagSet: [ + { + Key: 'tag1', + Value: 'value1', + }, + { + Key: 'tag2', + Value: 'value2', + }, + ], + }, }; const legalHoldParams = { ...params, LegalHold: { - Status: 'ON' + Status: 'ON', }, }; const acl = { - 'Canned': '', - 'FULL_CONTROL': [ + Canned: '', + FULL_CONTROL: [ // canonicalID of user Bart '79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be', ], - 'WRITE_ACP': [], - 'READ': [], - 'READ_ACP': [], + WRITE_ACP: [], + READ: [], + READ_ACP: [], }; const tags = { tag1: 'value1', tag2: 'value2' }; const replicationInfo = { - 'status': 'COMPLETED', - 'backends': [ - { - 'site': 'azure-normal', - 'status': 'COMPLETED', - 'dataStoreVersionId': '', - }, - ], - 'content': [ - 'DATA', - 'METADATA', + status: 'COMPLETED', + backends: [ + { + site: 'azure-normal', + status: 'COMPLETED', + dataStoreVersionId: '', + }, ], - 'destination': 'arn:aws:s3:::versioned', - 'storageClass': 'azure-normal', - 'role': 'arn:aws:iam::root:role/s3-replication-role', - 'storageType': 'azure', - 'dataStoreVersionId': '', - 'isNFS': null, + content: ['DATA', 'METADATA'], + destination: 'arn:aws:s3:::versioned', + storageClass: 'azure-normal', + role: 'arn:aws:iam::root:role/s3-replication-role', + storageType: 'azure', + dataStoreVersionId: '', + isNFS: null, }; await s3.send(new PutObjectCommand(putParams)); await s3.send(new PutObjectAclCommand(aclParams)); @@ -882,7 +981,6 @@ describe('MPU with x-scal-s3-version-id header', () => { objMD['content-encoding'] = 'testencoding'; objMD['x-amz-server-side-encryption'] = 'aws:kms'; - await metadataPutObjectMDPromise(bucketNameMD, objectName, objMD, undefined, log); await putMPUVersion(s3, bucketNameMD, objectName, ''); @@ -903,7 +1001,7 @@ describe('MPU with x-scal-s3-version-id header', () => { // data's etag inside x-amz-restore assert.strictEqual(finalObjMD['content-md5'], 'testmd5'); assert.strictEqual(typeof finalObjMD['x-amz-restore']['content-md5'], 'string'); - + // removing legal hold to be able to clean the bucket after the test legalHoldParams.LegalHold.Status = 'OFF'; await s3.send(new PutObjectLegalHoldCommand(legalHoldParams)); diff --git a/tests/functional/aws-node-sdk/test/object/objectGetAttributes.js b/tests/functional/aws-node-sdk/test/object/objectGetAttributes.js index e80d9a0d31..520decd4b6 100644 --- a/tests/functional/aws-node-sdk/test/object/objectGetAttributes.js +++ b/tests/functional/aws-node-sdk/test/object/objectGetAttributes.js @@ -31,9 +31,14 @@ describe('objectGetAttributes', () => { beforeEach(async () => { await s3.send(new CreateBucketCommand({ Bucket: bucket })); - await s3.send(new PutObjectCommand({ - Bucket: bucket, Key: key, Body: body, ChecksumAlgorithm: 'CRC64NVME', - })); + await s3.send( + new PutObjectCommand({ + Bucket: bucket, + Key: key, + Body: body, + ChecksumAlgorithm: 'CRC64NVME', + }), + ); }); afterEach(async () => { @@ -43,12 +48,14 @@ describe('objectGetAttributes', () => { it('should fail with a wrong bucket owner header', async () => { try { - await s3.send(new GetObjectAttributesCommand({ - Bucket: bucket, - Key: key, - ObjectAttributes: ['ETag'], - ExpectedBucketOwner: 'wrongAccountId', - })); + await s3.send( + new GetObjectAttributesCommand({ + Bucket: bucket, + Key: key, + ObjectAttributes: ['ETag'], + ExpectedBucketOwner: 'wrongAccountId', + }), + ); assert.fail('Expected AccessDenied error'); } catch (err) { assert.strictEqual(err.name, 'AccessDenied'); @@ -58,11 +65,13 @@ describe('objectGetAttributes', () => { it('should fail because attributes header is missing', async () => { try { - await s3.send(new GetObjectAttributesCommand({ - Bucket: bucket, - Key: key, - ObjectAttributes: [], - })); + await s3.send( + new GetObjectAttributesCommand({ + Bucket: bucket, + Key: key, + ObjectAttributes: [], + }), + ); assert.fail('Expected InvalidArgument error'); } catch (err) { assert.strictEqual(err.name, 'InvalidArgument'); @@ -72,11 +81,13 @@ describe('objectGetAttributes', () => { it('should fail because attribute name is invalid', async () => { try { - await s3.send(new GetObjectAttributesCommand({ - Bucket: bucket, - Key: key, - ObjectAttributes: ['InvalidAttribute'], - })); + await s3.send( + new GetObjectAttributesCommand({ + Bucket: bucket, + Key: key, + ObjectAttributes: ['InvalidAttribute'], + }), + ); assert.fail('Expected InvalidArgument error'); } catch (err) { assert.strictEqual(err.name, 'InvalidArgument'); @@ -86,11 +97,13 @@ describe('objectGetAttributes', () => { it('should return NoSuchKey for non-existent object', async () => { try { - await s3.send(new GetObjectAttributesCommand({ - Bucket: bucket, - Key: 'nonexistent', - ObjectAttributes: ['ETag'], - })); + await s3.send( + new GetObjectAttributesCommand({ + Bucket: bucket, + Key: 'nonexistent', + ObjectAttributes: ['ETag'], + }), + ); assert.fail('Expected NoSuchKey error'); } catch (err) { assert.strictEqual(err.name, 'NoSuchKey'); @@ -99,11 +112,13 @@ describe('objectGetAttributes', () => { }); it('should return all attributes', async () => { - const data = await s3.send(new GetObjectAttributesCommand({ - Bucket: bucket, - Key: key, - ObjectAttributes: ['ETag', 'ObjectParts', 'StorageClass', 'ObjectSize'], - })); + const data = await s3.send( + new GetObjectAttributesCommand({ + Bucket: bucket, + Key: key, + ObjectAttributes: ['ETag', 'ObjectParts', 'StorageClass', 'ObjectSize'], + }), + ); assert.strictEqual(data.ETag, expectedMD5); assert.strictEqual(data.StorageClass, 'STANDARD'); @@ -113,21 +128,25 @@ describe('objectGetAttributes', () => { }); it('should return ETag', async () => { - const data = await s3.send(new GetObjectAttributesCommand({ - Bucket: bucket, - Key: key, - ObjectAttributes: ['ETag'], - })); + const data = await s3.send( + new GetObjectAttributesCommand({ + Bucket: bucket, + Key: key, + ObjectAttributes: ['ETag'], + }), + ); assert.strictEqual(data.ETag, expectedMD5); }); it('should return ChecksumCRC64NVME for object', async () => { - const data = await s3.send(new GetObjectAttributesCommand({ - Bucket: bucket, - Key: key, - ObjectAttributes: ['Checksum'], - })); + const data = await s3.send( + new GetObjectAttributesCommand({ + Bucket: bucket, + Key: key, + ObjectAttributes: ['Checksum'], + }), + ); assert(data.Checksum, 'Checksum should be present'); assert(data.Checksum.ChecksumCRC64NVME, 'ChecksumCRC64NVME should be present'); @@ -135,11 +154,13 @@ describe('objectGetAttributes', () => { }); it('should not return Checksum when not requested', async () => { - const data = await s3.send(new GetObjectAttributesCommand({ - Bucket: bucket, - Key: key, - ObjectAttributes: ['ETag', 'ObjectSize'], - })); + const data = await s3.send( + new GetObjectAttributesCommand({ + Bucket: bucket, + Key: key, + ObjectAttributes: ['ETag', 'ObjectSize'], + }), + ); assert(data.ETag, 'ETag should be present'); assert(data.ObjectSize, 'ObjectSize should be present'); @@ -148,42 +169,50 @@ describe('objectGetAttributes', () => { it("shouldn't return ObjectParts for non-MPU objects", async () => { // Requesting only ObjectParts for a non-MPU object break AWS SDK v3 - const data = await s3.send(new GetObjectAttributesCommand({ - Bucket: bucket, - Key: key, - ObjectAttributes: ['ObjectParts', 'ETag'], - })); + const data = await s3.send( + new GetObjectAttributesCommand({ + Bucket: bucket, + Key: key, + ObjectAttributes: ['ObjectParts', 'ETag'], + }), + ); assert.strictEqual(data.ObjectParts, undefined, "ObjectParts shouldn't be present"); assert.strictEqual(data.ETag, expectedMD5); }); it('should return StorageClass', async () => { - const data = await s3.send(new GetObjectAttributesCommand({ - Bucket: bucket, - Key: key, - ObjectAttributes: ['StorageClass'], - })); + const data = await s3.send( + new GetObjectAttributesCommand({ + Bucket: bucket, + Key: key, + ObjectAttributes: ['StorageClass'], + }), + ); assert.strictEqual(data.StorageClass, 'STANDARD'); }); it('should return ObjectSize', async () => { - const data = await s3.send(new GetObjectAttributesCommand({ - Bucket: bucket, - Key: key, - ObjectAttributes: ['ObjectSize'], - })); + const data = await s3.send( + new GetObjectAttributesCommand({ + Bucket: bucket, + Key: key, + ObjectAttributes: ['ObjectSize'], + }), + ); assert.strictEqual(data.ObjectSize, body.length); }); it('should return LastModified', async () => { - const data = await s3.send(new GetObjectAttributesCommand({ - Bucket: bucket, - Key: key, - ObjectAttributes: ['ETag'], - })); + const data = await s3.send( + new GetObjectAttributesCommand({ + Bucket: bucket, + Key: key, + ObjectAttributes: ['ETag'], + }), + ); assert(data.LastModified, 'LastModified should be present'); assert(data.LastModified instanceof Date, 'LastModified should be a Date'); @@ -206,31 +235,37 @@ describe('Test get object attributes with multipart upload', () => { await s3.send(new CreateBucketCommand({ Bucket: bucket })); - const createResult = await s3.send(new CreateMultipartUploadCommand({ - Bucket: bucket, - Key: mpuKey, - })); + const createResult = await s3.send( + new CreateMultipartUploadCommand({ + Bucket: bucket, + Key: mpuKey, + }), + ); const uploadId = createResult.UploadId; const partData = Buffer.alloc(partSize, 'a'); const parts = []; for (let i = 1; i <= partCount; i++) { - const uploadResult = await s3.send(new UploadPartCommand({ - Bucket: bucket, - Key: mpuKey, - PartNumber: i, - UploadId: uploadId, - Body: partData, - })); + const uploadResult = await s3.send( + new UploadPartCommand({ + Bucket: bucket, + Key: mpuKey, + PartNumber: i, + UploadId: uploadId, + Body: partData, + }), + ); parts.push({ PartNumber: i, ETag: uploadResult.ETag }); } - await s3.send(new CompleteMultipartUploadCommand({ - Bucket: bucket, - Key: mpuKey, - UploadId: uploadId, - MultipartUpload: { Parts: parts }, - })); + await s3.send( + new CompleteMultipartUploadCommand({ + Bucket: bucket, + Key: mpuKey, + UploadId: uploadId, + MultipartUpload: { Parts: parts }, + }), + ); }); after(async () => { @@ -239,22 +274,26 @@ describe('Test get object attributes with multipart upload', () => { }); it('should return TotalPartsCount for MPU object', async () => { - const data = await s3.send(new GetObjectAttributesCommand({ - Bucket: bucket, - Key: mpuKey, - ObjectAttributes: ['ObjectParts'], - })); + const data = await s3.send( + new GetObjectAttributesCommand({ + Bucket: bucket, + Key: mpuKey, + ObjectAttributes: ['ObjectParts'], + }), + ); assert(data.ObjectParts, 'ObjectParts should be present'); assert.strictEqual(data.ObjectParts.TotalPartsCount, partCount); }); it('should return TotalPartsCount along with other attributes for MPU object', async () => { - const data = await s3.send(new GetObjectAttributesCommand({ - Bucket: bucket, - Key: mpuKey, - ObjectAttributes: ['ETag', 'ObjectParts', 'ObjectSize', 'StorageClass'], - })); + const data = await s3.send( + new GetObjectAttributesCommand({ + Bucket: bucket, + Key: mpuKey, + ObjectAttributes: ['ETag', 'ObjectParts', 'ObjectSize', 'StorageClass'], + }), + ); assert(data.ETag, 'ETag should be present'); assert(data.ETag.includes(`-${partCount}`), `ETag should indicate MPU with ${partCount} parts`); @@ -286,64 +325,76 @@ describe('objectGetAttributes with user metadata', () => { }); it('should return specific user metadata when requested', async () => { - await s3.send(new PutObjectCommand({ - Bucket: bucket, - Key: key, - Body: body, - Metadata: { - 'custom-key': 'custom-value', - 'another-key': 'another-value', - }, - })); - - const response = await s3.send(new GetObjectAttributesExtendedCommand({ - Bucket: bucket, - Key: key, - ObjectAttributes: ['x-amz-meta-custom-key'], - })); + await s3.send( + new PutObjectCommand({ + Bucket: bucket, + Key: key, + Body: body, + Metadata: { + 'custom-key': 'custom-value', + 'another-key': 'another-value', + }, + }), + ); + + const response = await s3.send( + new GetObjectAttributesExtendedCommand({ + Bucket: bucket, + Key: key, + ObjectAttributes: ['x-amz-meta-custom-key'], + }), + ); assert.strictEqual(response['x-amz-meta-custom-key'], 'custom-value'); }); it('should return multiple user metadata when requested', async () => { - await s3.send(new PutObjectCommand({ - Bucket: bucket, - Key: key, - Body: body, - Metadata: { - foo: 'foo-value', - bar: 'bar-value', - baz: 'baz-value', - }, - })); - - const response = await s3.send(new GetObjectAttributesExtendedCommand({ - Bucket: bucket, - Key: key, - ObjectAttributes: ['x-amz-meta-foo', 'x-amz-meta-bar'], - })); + await s3.send( + new PutObjectCommand({ + Bucket: bucket, + Key: key, + Body: body, + Metadata: { + foo: 'foo-value', + bar: 'bar-value', + baz: 'baz-value', + }, + }), + ); + + const response = await s3.send( + new GetObjectAttributesExtendedCommand({ + Bucket: bucket, + Key: key, + ObjectAttributes: ['x-amz-meta-foo', 'x-amz-meta-bar'], + }), + ); assert.strictEqual(response['x-amz-meta-foo'], 'foo-value'); assert.strictEqual(response['x-amz-meta-bar'], 'bar-value'); }); it('should return only all user metadata when x-amz-meta-* is requested', async () => { - await s3.send(new PutObjectCommand({ - Bucket: bucket, - Key: key, - Body: body, - Metadata: { - key1: 'value1', - key2: 'value2', - key3: 'value3', - }, - })); - - const response = await s3.send(new GetObjectAttributesExtendedCommand({ - Bucket: bucket, - Key: key, - ObjectAttributes: ['x-amz-meta-*'], - })); + await s3.send( + new PutObjectCommand({ + Bucket: bucket, + Key: key, + Body: body, + Metadata: { + key1: 'value1', + key2: 'value2', + key3: 'value3', + }, + }), + ); + + const response = await s3.send( + new GetObjectAttributesExtendedCommand({ + Bucket: bucket, + Key: key, + ObjectAttributes: ['x-amz-meta-*'], + }), + ); assert.strictEqual(response['x-amz-meta-key1'], 'value1'); assert.strictEqual(response['x-amz-meta-key2'], 'value2'); @@ -352,75 +403,91 @@ describe('objectGetAttributes with user metadata', () => { }); it('should return empty response when object has no user metadata and x-amz-meta-* is requested', async () => { - await s3.send(new PutObjectCommand({ - Bucket: bucket, - Key: key, - Body: body, - })); - - const response = await s3.send(new GetObjectAttributesExtendedCommand({ - Bucket: bucket, - Key: key, - ObjectAttributes: ['ETag', 'x-amz-meta-*'], - })); + await s3.send( + new PutObjectCommand({ + Bucket: bucket, + Key: key, + Body: body, + }), + ); + + const response = await s3.send( + new GetObjectAttributesExtendedCommand({ + Bucket: bucket, + Key: key, + ObjectAttributes: ['ETag', 'x-amz-meta-*'], + }), + ); const metadataKeys = Object.keys(response).filter(k => k.startsWith('x-amz-meta-')); assert.strictEqual(metadataKeys.length, 0); }); it('should return empty response when requested metadata key does not exist', async () => { - await s3.send(new PutObjectCommand({ - Bucket: bucket, - Key: key, - Body: body, - Metadata: { - existing: 'value', - }, - })); - - const response = await s3.send(new GetObjectAttributesExtendedCommand({ - Bucket: bucket, - Key: key, - ObjectAttributes: ['ETag', 'x-amz-meta-nonexistent'], - })); + await s3.send( + new PutObjectCommand({ + Bucket: bucket, + Key: key, + Body: body, + Metadata: { + existing: 'value', + }, + }), + ); + + const response = await s3.send( + new GetObjectAttributesExtendedCommand({ + Bucket: bucket, + Key: key, + ObjectAttributes: ['ETag', 'x-amz-meta-nonexistent'], + }), + ); assert.strictEqual(response['x-amz-meta-nonexistent'], undefined); }); it('should return empty response when only a non-existing metadata key is requested', async () => { - await s3.send(new PutObjectCommand({ - Bucket: bucket, - Key: key, - Body: body, - Metadata: { - existing: 'value', - }, - })); - - const response = await s3.send(new GetObjectAttributesExtendedCommand({ - Bucket: bucket, - Key: key, - ObjectAttributes: ['x-amz-meta-nonexistent'], - })); + await s3.send( + new PutObjectCommand({ + Bucket: bucket, + Key: key, + Body: body, + Metadata: { + existing: 'value', + }, + }), + ); + + const response = await s3.send( + new GetObjectAttributesExtendedCommand({ + Bucket: bucket, + Key: key, + ObjectAttributes: ['x-amz-meta-nonexistent'], + }), + ); assert.strictEqual(response['x-amz-meta-nonexistent'], undefined); }); it('should return user metadata along with standard attributes', async () => { - await s3.send(new PutObjectCommand({ - Bucket: bucket, - Key: key, - Body: body, - Metadata: { - custom: 'custom-value', - }, - })); - - const response = await s3.send(new GetObjectAttributesExtendedCommand({ - Bucket: bucket, - Key: key, - ObjectAttributes: ['ETag', 'x-amz-meta-custom', 'ObjectSize'], - })); + await s3.send( + new PutObjectCommand({ + Bucket: bucket, + Key: key, + Body: body, + Metadata: { + custom: 'custom-value', + }, + }), + ); + + const response = await s3.send( + new GetObjectAttributesExtendedCommand({ + Bucket: bucket, + Key: key, + ObjectAttributes: ['ETag', 'x-amz-meta-custom', 'ObjectSize'], + }), + ); assert.strictEqual(response.ETag, expectedMD5); assert.strictEqual(response.ObjectSize, body.length); @@ -428,22 +495,26 @@ describe('objectGetAttributes with user metadata', () => { }); it('should return all metadata once wildcard is provided', async () => { - await s3.send(new PutObjectCommand({ - Bucket: bucket, - Key: key, - Body: body, - Metadata: { - key1: 'value1', - key2: 'value2', - key3: 'value3', - }, - })); - - const response = await s3.send(new GetObjectAttributesExtendedCommand({ - Bucket: bucket, - Key: key, - ObjectAttributes: ['x-amz-meta-*', 'x-amz-meta-key1'], - })); + await s3.send( + new PutObjectCommand({ + Bucket: bucket, + Key: key, + Body: body, + Metadata: { + key1: 'value1', + key2: 'value2', + key3: 'value3', + }, + }), + ); + + const response = await s3.send( + new GetObjectAttributesExtendedCommand({ + Bucket: bucket, + Key: key, + ObjectAttributes: ['x-amz-meta-*', 'x-amz-meta-key1'], + }), + ); assert.strictEqual(response['x-amz-meta-key1'], 'value1'); assert.strictEqual(response['x-amz-meta-key2'], 'value2'); @@ -451,42 +522,50 @@ describe('objectGetAttributes with user metadata', () => { }); it('should handle duplicate wildcard requests without duplicating results', async () => { - await s3.send(new PutObjectCommand({ - Bucket: bucket, - Key: key, - Body: body, - Metadata: { - key1: 'value1', - key2: 'value2', - }, - })); - - const response = await s3.send(new GetObjectAttributesExtendedCommand({ - Bucket: bucket, - Key: key, - ObjectAttributes: ['x-amz-meta-*', 'x-amz-meta-*'], - })); + await s3.send( + new PutObjectCommand({ + Bucket: bucket, + Key: key, + Body: body, + Metadata: { + key1: 'value1', + key2: 'value2', + }, + }), + ); + + const response = await s3.send( + new GetObjectAttributesExtendedCommand({ + Bucket: bucket, + Key: key, + ObjectAttributes: ['x-amz-meta-*', 'x-amz-meta-*'], + }), + ); assert.strictEqual(response['x-amz-meta-key1'], 'value1'); assert.strictEqual(response['x-amz-meta-key2'], 'value2'); }); it('should handle duplicate specific metadata requests without duplicating results', async () => { - await s3.send(new PutObjectCommand({ - Bucket: bucket, - Key: key, - Body: body, - Metadata: { - key1: 'value1', - key2: 'value2', - }, - })); - - const response = await s3.send(new GetObjectAttributesExtendedCommand({ - Bucket: bucket, - Key: key, - ObjectAttributes: ['x-amz-meta-key1', 'x-amz-meta-key1'], - })); + await s3.send( + new PutObjectCommand({ + Bucket: bucket, + Key: key, + Body: body, + Metadata: { + key1: 'value1', + key2: 'value2', + }, + }), + ); + + const response = await s3.send( + new GetObjectAttributesExtendedCommand({ + Bucket: bucket, + Key: key, + ObjectAttributes: ['x-amz-meta-key1', 'x-amz-meta-key1'], + }), + ); assert.strictEqual(response['x-amz-meta-key1'], 'value1'); assert.strictEqual(response['x-amz-meta-key2'], undefined); @@ -523,18 +602,22 @@ describe('objectGetAttributes with checksum', () => { const sdkAlgorithm = name.toUpperCase(); it(`should return ${xmlTag} when object has ${name} checksum`, async () => { - await s3.send(new PutObjectCommand({ - Bucket: checksumBucket, - Key: checksumKey, - Body: checksumBody, - ChecksumAlgorithm: sdkAlgorithm, - })); - - const data = await s3.send(new GetObjectAttributesCommand({ - Bucket: checksumBucket, - Key: checksumKey, - ObjectAttributes: ['Checksum'], - })); + await s3.send( + new PutObjectCommand({ + Bucket: checksumBucket, + Key: checksumKey, + Body: checksumBody, + ChecksumAlgorithm: sdkAlgorithm, + }), + ); + + const data = await s3.send( + new GetObjectAttributesCommand({ + Bucket: checksumBucket, + Key: checksumKey, + ObjectAttributes: ['Checksum'], + }), + ); assert(data.Checksum, 'Checksum should be present'); assert.strictEqual(data.Checksum[xmlTag], expectedDigests[name]); @@ -542,18 +625,22 @@ describe('objectGetAttributes with checksum', () => { }); it(`should return ${xmlTag} along with other attributes`, async () => { - await s3.send(new PutObjectCommand({ - Bucket: checksumBucket, - Key: checksumKey, - Body: checksumBody, - ChecksumAlgorithm: sdkAlgorithm, - })); - - const data = await s3.send(new GetObjectAttributesCommand({ - Bucket: checksumBucket, - Key: checksumKey, - ObjectAttributes: ['ETag', 'Checksum', 'ObjectSize'], - })); + await s3.send( + new PutObjectCommand({ + Bucket: checksumBucket, + Key: checksumKey, + Body: checksumBody, + ChecksumAlgorithm: sdkAlgorithm, + }), + ); + + const data = await s3.send( + new GetObjectAttributesCommand({ + Bucket: checksumBucket, + Key: checksumKey, + ObjectAttributes: ['ETag', 'Checksum', 'ObjectSize'], + }), + ); assert(data.ETag, 'ETag should be present'); assert(data.ObjectSize, 'ObjectSize should be present'); @@ -564,18 +651,22 @@ describe('objectGetAttributes with checksum', () => { }); it('should not return Checksum when not requested', async () => { - await s3.send(new PutObjectCommand({ - Bucket: checksumBucket, - Key: checksumKey, - Body: checksumBody, - ChecksumAlgorithm: 'CRC64NVME', - })); - - const data = await s3.send(new GetObjectAttributesCommand({ - Bucket: checksumBucket, - Key: checksumKey, - ObjectAttributes: ['ETag', 'ObjectSize'], - })); + await s3.send( + new PutObjectCommand({ + Bucket: checksumBucket, + Key: checksumKey, + Body: checksumBody, + ChecksumAlgorithm: 'CRC64NVME', + }), + ); + + const data = await s3.send( + new GetObjectAttributesCommand({ + Bucket: checksumBucket, + Key: checksumKey, + ObjectAttributes: ['ETag', 'ObjectSize'], + }), + ); assert(data.ETag, 'ETag should be present'); assert(data.ObjectSize, 'ObjectSize should be present'); diff --git a/tests/functional/raw-node/test/xAmzChecksum.js b/tests/functional/raw-node/test/xAmzChecksum.js index 198fad2806..956a529663 100644 --- a/tests/functional/raw-node/test/xAmzChecksum.js +++ b/tests/functional/raw-node/test/xAmzChecksum.js @@ -19,8 +19,9 @@ describe('Test x-amz-checksums', () => { { name: 'CRC64NVME', objDataDigest: 'jC+ERbTL/Dw=', validWrong: 'AAAAAAAAAAA=' }, { name: 'SHA1', objDataDigest: 'hvfkN/qlp/zhXR3cuerq6jd2Z7g=', validWrong: 'AAAAAAAAAAAAAAAAAAAAAAAAAAA=' }, { - name: 'SHA256', objDataDigest: 'ypeBEsobvcr6wjGzmiPcTaeG7/gUfE5yuYB3ha/uSLs=', - validWrong: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=' + name: 'SHA256', + objDataDigest: 'ypeBEsobvcr6wjGzmiPcTaeG7/gUfE5yuYB3ha/uSLs=', + validWrong: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=', }, ]; // CompleteMultipartUpload intentionally not listed here: its @@ -151,7 +152,7 @@ describe('Test x-amz-checksums', () => { ...headers, }, }, - authCredentials + authCredentials, ), res => { let data = ''; @@ -165,14 +166,13 @@ describe('Test x-amz-checksums', () => { } done(); }); - } + }, ); req.on('error', err => { assert.ifError(err); }); - req.once('drain', () => { req.end(); }); @@ -186,13 +186,14 @@ describe('Test x-amz-checksums', () => { for (const algo of algos) { for (const method of methods) { itSkipIfAWS( - `${method.Name} should respond BadDigest ` + - `with invalid x-amz-checksum-${algo.name.toLowerCase()}`, done => { + `${method.Name} should respond BadDigest ` + `with invalid x-amz-checksum-${algo.name.toLowerCase()}`, + done => { const headers = { [`x-amz-checksum-${algo.name.toLowerCase()}`]: algo.validWrong, }; doTest(headers, method, 400, ['BadDigest'], done); - }); + }, + ); } } @@ -223,20 +224,19 @@ describe('Test x-amz-checksums', () => { ); }); - itSkipIfAWS( - 'should respond InvalidRequest if the value of x-amz-sdk-checksum-algorithm is invalid', done => { - const headers = { - 'x-amz-sdk-checksum-algorithm': 'BAD', - [`x-amz-checksum-${algos[0].name.toLowerCase()}`]: algos[0].objDataDigest, - }; - doTest( - headers, - methods[0], - 400, - ['InvalidRequest', 'Value for x-amz-sdk-checksum-algorithm header is invalid.'], - done, - ); - }); + itSkipIfAWS('should respond InvalidRequest if the value of x-amz-sdk-checksum-algorithm is invalid', done => { + const headers = { + 'x-amz-sdk-checksum-algorithm': 'BAD', + [`x-amz-checksum-${algos[0].name.toLowerCase()}`]: algos[0].objDataDigest, + }; + doTest( + headers, + methods[0], + 400, + ['InvalidRequest', 'Value for x-amz-sdk-checksum-algorithm header is invalid.'], + done, + ); + }); itSkipIfAWS('should respond InvalidRequest with if invalid x-amz-checksum- value', done => { const headers = { @@ -252,7 +252,8 @@ describe('Test x-amz-checksums', () => { }); itSkipIfAWS( - 'should respond InvalidRequest with if missing x-amz-checksum- for x-amz-sdk-checksum-algorithm ', done => { + 'should respond InvalidRequest with if missing x-amz-checksum- for x-amz-sdk-checksum-algorithm ', + done => { const headers = { 'x-amz-sdk-checksum-algorithm': 'SHA1', }; @@ -260,17 +261,22 @@ describe('Test x-amz-checksums', () => { headers, methods[0], 400, - ['InvalidRequest', 'x-amz-sdk-checksum-algorithm specified, but no corresponding x-amz-checksum-* ' + - 'or x-amz-trailer headers were found.'], + [ + 'InvalidRequest', + 'x-amz-sdk-checksum-algorithm specified, but no corresponding x-amz-checksum-* ' + + 'or x-amz-trailer headers were found.', + ], done, ); - }); + }, + ); for (const algo of algos) { for (const method of methods) { itSkipIfAWS( `${method.Name} should not respond BadDigest if ` + - `x-amz-checksum-${algo.name.toLowerCase()} is correct`, done => { + `x-amz-checksum-${algo.name.toLowerCase()} is correct`, + done => { const url = `http://localhost:8000/${bucket}/${method.Key}?${method.Query}`; const req = new HttpRequestAuthV4( url, @@ -284,7 +290,7 @@ describe('Test x-amz-checksums', () => { [`x-amz-checksum-${algo.name.toLowerCase()}`]: algo.objDataDigest, }, }, - authCredentials + authCredentials, ), res => { let data = ''; @@ -299,14 +305,13 @@ describe('Test x-amz-checksums', () => { assert(!data.includes('did not match the calculated checksum')); done(); }); - } + }, ); req.on('error', err => { assert.ifError(err); }); - req.once('drain', () => { req.end(); }); @@ -315,7 +320,8 @@ describe('Test x-amz-checksums', () => { assert.ifError(err); req.end(); }); - }); + }, + ); } } }); diff --git a/tests/unit/api/apiUtils/integrity/computeMpuChecksums.js b/tests/unit/api/apiUtils/integrity/computeMpuChecksums.js index 319f5b69af..3640238dc5 100644 --- a/tests/unit/api/apiUtils/integrity/computeMpuChecksums.js +++ b/tests/unit/api/apiUtils/integrity/computeMpuChecksums.js @@ -29,8 +29,7 @@ describe('computeCompositeMPUChecksum', () => { const label = algo.toUpperCase(); it(`should match ${label}(decode(c1) || ... || decode(cN)) + "-N"`, () => { const partChecksums = parts.map(p => algorithms[algo].digest(p)); - const expectedConcat = Buffer.concat( - partChecksums.map(c => Buffer.from(c, 'base64'))); + const expectedConcat = Buffer.concat(partChecksums.map(c => Buffer.from(c, 'base64'))); const expected = `${algorithms[algo].digest(expectedConcat)}-3`; const got = computeCompositeMPUChecksum(algo, partChecksums); @@ -102,9 +101,12 @@ describe('computeFullObjectMPUChecksum', () => { it(`should return the part CRC unchanged for a single-part ${label} MPU`, async () => { const buf = crypto.randomBytes(15); const partCrc = await algorithms[algo].digest(buf); - const got = computeFullObjectMPUChecksum(algo, [{ - value: partCrc, length: buf.length, - }]); + const got = computeFullObjectMPUChecksum(algo, [ + { + value: partCrc, + length: buf.length, + }, + ]); assert.strictEqual(got.error, null); assert.strictEqual(got.checksum, partCrc); }); @@ -121,8 +123,7 @@ describe('computeFullObjectMPUChecksum', () => { }); it('should return an error object on unsupported algorithm', () => { - const got = computeFullObjectMPUChecksum( - 'sha256', [{ value: 'AAAA', length: 4 }]); + const got = computeFullObjectMPUChecksum('sha256', [{ value: 'AAAA', length: 4 }]); assert.strictEqual(got.checksum, null); assert(got.error); assert.strictEqual(got.error.code, 'MPUAlgoNotSupported'); diff --git a/tests/unit/api/apiUtils/integrity/crcCombine.js b/tests/unit/api/apiUtils/integrity/crcCombine.js index fd5c6e367e..48f8239a21 100644 --- a/tests/unit/api/apiUtils/integrity/crcCombine.js +++ b/tests/unit/api/apiUtils/integrity/crcCombine.js @@ -7,9 +7,9 @@ const { algorithms } = require('../../../../../lib/api/apiUtils/integrity/valida // Reversed polynomial + bit width for each algorithm we use the combine // routine with. Same values that validateChecksums.js feeds in. const SPECS = [ - { algo: 'crc32', polyReversed: 0xEDB88320n, dim: 32 }, - { algo: 'crc32c', polyReversed: 0x82F63B78n, dim: 32 }, - { algo: 'crc64nvme', polyReversed: 0x9A6C9329AC4BC9B5n, dim: 64 }, + { algo: 'crc32', polyReversed: 0xedb88320n, dim: 32 }, + { algo: 'crc32c', polyReversed: 0x82f63b78n, dim: 32 }, + { algo: 'crc64nvme', polyReversed: 0x9a6c9329ac4bc9b5n, dim: 64 }, ]; function base64ToBigInt(b64) { @@ -60,12 +60,7 @@ describe('crcCombine', () => { it('should mask the result to `dim` bits', async () => { const a = crypto.randomBytes(256); const b = crypto.randomBytes(256); - const got = crcCombine( - await crcOf(algo, a), - await crcOf(algo, b), - BigInt(b.length), - polyReversed, - dim); + const got = crcCombine(await crcOf(algo, a), await crcOf(algo, b), BigInt(b.length), polyReversed, dim); assert.strictEqual(got & mask, got); assert.strictEqual(got >> BigInt(dim), 0n); }); @@ -84,8 +79,7 @@ describe('crcCombine', () => { // Right-fold: combine(A, combine(B, C), len(B)+len(C)) const bc = crcCombine(crcB, crcC, BigInt(c.length), polyReversed, dim); - const right = crcCombine( - crcA, bc, BigInt(b.length + c.length), polyReversed, dim); + const right = crcCombine(crcA, bc, BigInt(b.length + c.length), polyReversed, dim); assert.strictEqual(left, right); const expected = await crcOf(algo, Buffer.concat([a, b, c])); @@ -95,12 +89,7 @@ describe('crcCombine', () => { it('should handle single-byte chunks', async () => { const a = crypto.randomBytes(1); const b = crypto.randomBytes(1); - const got = crcCombine( - await crcOf(algo, a), - await crcOf(algo, b), - 1n, - polyReversed, - dim); + const got = crcCombine(await crcOf(algo, a), await crcOf(algo, b), 1n, polyReversed, dim); const expected = await crcOf(algo, Buffer.concat([a, b])); assert.strictEqual(got, expected); }); @@ -113,12 +102,7 @@ describe('crcCombine', () => { const crcA = await crcOf(algo, a); for (const size of sizes) { const b = crypto.randomBytes(size); - const got = crcCombine( - crcA, - await crcOf(algo, b), - BigInt(size), - polyReversed, - dim); + const got = crcCombine(crcA, await crcOf(algo, b), BigInt(size), polyReversed, dim); const expected = await crcOf(algo, Buffer.concat([a, b])); assert.strictEqual(got, expected, `failed at size=${size}`); } @@ -135,10 +119,7 @@ describe('combineCrcs', () => { it('should return the part CRC unchanged for a single-part input', async () => { const buf = crypto.randomBytes(13); const partCrc = await algorithms[algo].digest(buf); - const got = combineCrcs( - [{ value: partCrc, length: buf.length }], - polyReversed, - dim); + const got = combineCrcs([{ value: partCrc, length: buf.length }], polyReversed, dim); assert.strictEqual(got, partCrc); }); @@ -175,4 +156,4 @@ describe('combineCrcs', () => { }); }); }); -}); \ No newline at end of file +}); diff --git a/tests/unit/api/apiUtils/object/objectAttributes.js b/tests/unit/api/apiUtils/object/objectAttributes.js index 2586f71a7f..a8ce74e197 100644 --- a/tests/unit/api/apiUtils/object/objectAttributes.js +++ b/tests/unit/api/apiUtils/object/objectAttributes.js @@ -1,7 +1,7 @@ const assert = require('assert'); const { parseAttributesHeaders, - buildAttributesXml + buildAttributesXml, } = require('../../../../../lib/api/apiUtils/object/objectAttributes'); const { algorithms } = require('../../../../../lib/api/apiUtils/integrity/validateChecksums'); const { DummyRequestLogger } = require('../../../helpers'); @@ -51,16 +51,15 @@ describe('parseAttributesHeaders', () => { }); }); - describe('buildXmlAttributes', () => { const objectMD = { 'content-md5': '16e37e19194511993498801d4692795f', 'content-length': 5000, 'x-amz-storage-class': 'STANDARD', - 'restoreStatus': { + restoreStatus: { inProgress: false, - expiryDate: 'Fri, 20 Feb 2026 12:00:00 GMT' - } + expiryDate: 'Fri, 20 Feb 2026 12:00:00 GMT', + }, }; const userMetadata = { @@ -164,9 +163,11 @@ describe('buildXmlAttributes', () => { const expectedDigests = {}; before(async () => { - await Promise.all(Object.keys(algorithms).map(async name => { - expectedDigests[name] = await algorithms[name].digest(testData); - })); + await Promise.all( + Object.keys(algorithms).map(async name => { + expectedDigests[name] = await algorithms[name].digest(testData); + }), + ); }); it('should not generate Checksum XML when checksumAlgorithm is unknown', () => { diff --git a/tests/unit/api/completeMultipartUpload.js b/tests/unit/api/completeMultipartUpload.js index 0b06395944..08fdf2eb3b 100644 --- a/tests/unit/api/completeMultipartUpload.js +++ b/tests/unit/api/completeMultipartUpload.js @@ -4,19 +4,15 @@ const async = require('async'); const { parseString } = require('xml2js'); const { bucketPut } = require('../../../lib/api/bucketPut'); -const initiateMultipartUpload = - require('../../../lib/api/initiateMultipartUpload'); +const initiateMultipartUpload = require('../../../lib/api/initiateMultipartUpload'); const objectPutPart = require('../../../lib/api/objectPutPart'); -const completeMultipartUpload = - require('../../../lib/api/completeMultipartUpload'); +const completeMultipartUpload = require('../../../lib/api/completeMultipartUpload'); const metadata = require('../../../lib/metadata/wrapper'); +const { validatePerPartChecksums, computeFinalChecksum, validateExpectedFinalChecksum } = completeMultipartUpload; const { - validatePerPartChecksums, - computeFinalChecksum, - validateExpectedFinalChecksum, -} = completeMultipartUpload; -const { validateMethodChecksumNoChunking, algorithms } = - require('../../../lib/api/apiUtils/integrity/validateChecksums'); + validateMethodChecksumNoChunking, + algorithms, +} = require('../../../lib/api/apiUtils/integrity/validateChecksums'); const DummyRequest = require('../DummyRequest'); const { cleanup, DummyRequestLogger, makeAuthInfo } = require('../helpers'); @@ -37,25 +33,24 @@ const TAG_BY_ALGO = { // digest lengths so the test data looks realistic, though the validator // itself doesn't enforce length. const SAMPLE_DIGESTS = { - crc32: ['AQIDBA==', 'BQYHCA=='], - crc32c: ['CQoLDA==', 'DQ4PEA=='], + crc32: ['AQIDBA==', 'BQYHCA=='], + crc32c: ['CQoLDA==', 'DQ4PEA=='], crc64nvme: ['AQIDBAUGBwg=', 'CQoLDA0ODxA='], - sha1: ['YWFhYWFhYWFhYWFhYWFhYWFhYWFhYWE=', 'YmJiYmJiYmJiYmJiYmJiYmJiYmJiYmI='], - sha256: ['YWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWE=', - 'YmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmI='], + sha1: ['YWFhYWFhYWFhYWFhYWFhYWFhYWFhYWE=', 'YmJiYmJiYmJiYmJiYmJiYmJiYmJiYmI='], + sha256: ['YWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWE=', 'YmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmJiYmI='], }; // Every AWS-valid (algorithm, type) combination, plus the implicit default. // See validateChecksums.getChecksumDataFromMPUHeaders for the source of truth. const MATRIX = [ - { algorithm: 'crc32', type: 'COMPOSITE', isDefault: false }, - { algorithm: 'crc32', type: 'FULL_OBJECT', isDefault: false }, - { algorithm: 'crc32c', type: 'COMPOSITE', isDefault: false }, - { algorithm: 'crc32c', type: 'FULL_OBJECT', isDefault: false }, + { algorithm: 'crc32', type: 'COMPOSITE', isDefault: false }, + { algorithm: 'crc32', type: 'FULL_OBJECT', isDefault: false }, + { algorithm: 'crc32c', type: 'COMPOSITE', isDefault: false }, + { algorithm: 'crc32c', type: 'FULL_OBJECT', isDefault: false }, { algorithm: 'crc64nvme', type: 'FULL_OBJECT', isDefault: false }, { algorithm: 'crc64nvme', type: 'FULL_OBJECT', isDefault: true }, - { algorithm: 'sha1', type: 'COMPOSITE', isDefault: false }, - { algorithm: 'sha256', type: 'COMPOSITE', isDefault: false }, + { algorithm: 'sha1', type: 'COMPOSITE', isDefault: false }, + { algorithm: 'sha256', type: 'COMPOSITE', isDefault: false }, ]; function makeStoredPart(partNumber, checksum) { @@ -99,21 +94,14 @@ describe('validatePerPartChecksums', () => { const [d1, d2] = SAMPLE_DIGESTS[algorithm]; const mpuChecksum = { algorithm, type, isDefault }; - const stored = [ - makeStoredPart(1, { algorithm, value: d1 }), - makeStoredPart(2, { algorithm, value: d2 }), - ]; + const stored = [makeStoredPart(1, { algorithm, value: d1 }), makeStoredPart(2, { algorithm, value: d2 })]; describe(label, () => { it('should accept when every part includes the matching checksum', () => { const jsonList = { - Part: [ - makeJsonPart(1, 'etag1', { [tag]: d1 }), - makeJsonPart(2, 'etag2', { [tag]: d2 }), - ], + Part: [makeJsonPart(1, 'etag1', { [tag]: d1 }), makeJsonPart(2, 'etag2', { [tag]: d2 })], }; - const err = validatePerPartChecksums( - jsonList, stored, SPLITTER, mpuChecksum); + const err = validatePerPartChecksums(jsonList, stored, SPLITTER, mpuChecksum); assert.strictEqual(err, null); }); @@ -127,26 +115,21 @@ describe('validatePerPartChecksums', () => { makeJsonPart(2, 'etag2', { [tag]: d2 }), ], }; - const err = validatePerPartChecksums( - jsonList, stored, SPLITTER, mpuChecksum); + const err = validatePerPartChecksums(jsonList, stored, SPLITTER, mpuChecksum); assert(err); assert.strictEqual(err.is.BadDigest, true); // AWS-style message: "The {algo} you specified for part {N} did not match what we received." assert.strictEqual( err.description, - `The ${wrongAlgo} you specified for part 1 did ` + - 'not match what we received.'); + `The ${wrongAlgo} you specified for part 1 did ` + 'not match what we received.', + ); }); it('should return InvalidPart when the matching field has the wrong value', () => { const jsonList = { - Part: [ - makeJsonPart(1, 'etag1', { [tag]: d1 }), - makeJsonPart(2, 'etag2', { [tag]: d1 }), - ], + Part: [makeJsonPart(1, 'etag1', { [tag]: d1 }), makeJsonPart(2, 'etag2', { [tag]: d1 })], }; - const err = validatePerPartChecksums( - jsonList, stored, SPLITTER, mpuChecksum); + const err = validatePerPartChecksums(jsonList, stored, SPLITTER, mpuChecksum); assert(err); assert.strictEqual(err.is.InvalidPart, true); // AWS reuses its generic InvalidPart message — no algorithm @@ -154,9 +137,10 @@ describe('validatePerPartChecksums', () => { assert.strictEqual( err.description, 'One or more of the specified parts could not be ' + - 'found. The part may not have been uploaded, or ' + - 'the specified entity tag may not match the ' + - 'part\'s entity tag.'); + 'found. The part may not have been uploaded, or ' + + 'the specified entity tag may not match the ' + + "part's entity tag.", + ); }); const requiresPerPart = type === 'COMPOSITE' && !isDefault; @@ -165,13 +149,9 @@ describe('validatePerPartChecksums', () => { : 'should accept a parts list missing per-part checksums'; it(missingLabel, () => { const jsonList = { - Part: [ - makeJsonPart(1, 'etag1', { [tag]: d1 }), - makeJsonPart(2, 'etag2'), - ], + Part: [makeJsonPart(1, 'etag1', { [tag]: d1 }), makeJsonPart(2, 'etag2')], }; - const err = validatePerPartChecksums( - jsonList, stored, SPLITTER, mpuChecksum); + const err = validatePerPartChecksums(jsonList, stored, SPLITTER, mpuChecksum); if (requiresPerPart) { assert(err); assert.strictEqual(err.is.InvalidRequest, true); @@ -192,8 +172,7 @@ describe('validatePerPartChecksums', () => { type: 'COMPOSITE', isDefault: false, }; - const err = validatePerPartChecksums( - { Part: [] }, [], SPLITTER, mpuChecksum); + const err = validatePerPartChecksums({ Part: [] }, [], SPLITTER, mpuChecksum); assert.strictEqual(err, null); }); @@ -203,8 +182,7 @@ describe('validatePerPartChecksums', () => { type: 'FULL_OBJECT', isDefault: true, }; - const err = validatePerPartChecksums( - {}, [], SPLITTER, mpuChecksum); + const err = validatePerPartChecksums({}, [], SPLITTER, mpuChecksum); assert.strictEqual(err, null); }); @@ -220,13 +198,9 @@ describe('validatePerPartChecksums', () => { makeStoredPart(2, { algorithm: 'crc32', value: d2 }), ]; const jsonList = { - Part: [ - makeJsonPart(1, 'etag1', { ChecksumCRC32: d1 }), - makeJsonPart(2, 'etag2'), - ], + Part: [makeJsonPart(1, 'etag1', { ChecksumCRC32: d1 }), makeJsonPart(2, 'etag2')], }; - const err = validatePerPartChecksums( - jsonList, stored, SPLITTER, mpuChecksum); + const err = validatePerPartChecksums(jsonList, stored, SPLITTER, mpuChecksum); assert.strictEqual(err, null); }); @@ -240,13 +214,9 @@ describe('validatePerPartChecksums', () => { }; const stored = [makeStoredPart(1, null), makeStoredPart(2, null)]; const jsonList = { - Part: [ - makeJsonPart(1, 'etag1'), - makeJsonPart(2, 'etag2'), - ], + Part: [makeJsonPart(1, 'etag1'), makeJsonPart(2, 'etag2')], }; - const err = validatePerPartChecksums( - jsonList, stored, SPLITTER, mpuChecksum); + const err = validatePerPartChecksums(jsonList, stored, SPLITTER, mpuChecksum); assert.strictEqual(err, null); }); @@ -264,8 +234,7 @@ describe('validatePerPartChecksums', () => { }), ], }; - const err = validatePerPartChecksums( - jsonList, stored, SPLITTER, mpuChecksum); + const err = validatePerPartChecksums(jsonList, stored, SPLITTER, mpuChecksum); assert(err); assert.strictEqual(err.is.InvalidPart, true); }); @@ -284,7 +253,8 @@ describe('CompleteMultipartUpload x-amz-checksum-type header', () => { namespace, headers: { host: `${bucketName}.s3.amazonaws.com` }, url: '/', - post: '' + 'scality-internal-mem' + '', @@ -292,45 +262,51 @@ describe('CompleteMultipartUpload x-amz-checksum-type header', () => { }; function setupMpu(initiateHeaders, cb) { - async.waterfall([ - next => bucketPut(authInfo, bucketPutRequest, log, next), - (corsHeaders, next) => { - const initiateRequest = { - bucketName, - namespace, - objectKey, - headers: { - host: `${bucketName}.s3.amazonaws.com`, - ...initiateHeaders, - }, - url: `/${objectKey}?uploads`, - actionImplicitDenies: false, - }; - initiateMultipartUpload(authInfo, initiateRequest, log, next); - }, - (xml, corsHeaders, next) => parseString(xml, next), - (json, next) => { - const uploadId = json.InitiateMultipartUploadResult.UploadId[0]; - const partBody = Buffer.from('I am a part\n', 'utf8'); - const partHash = crypto.createHash('md5').update(partBody).digest('hex'); - const partRequest = new DummyRequest({ - bucketName, - namespace, - objectKey, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - url: `/${objectKey}?partNumber=1&uploadId=${uploadId}`, - query: { partNumber: '1', uploadId }, - partHash, - actionImplicitDenies: false, - }, partBody); - objectPutPart(authInfo, partRequest, undefined, log, - err => next(err, uploadId, partHash)); - }, - ], cb); + async.waterfall( + [ + next => bucketPut(authInfo, bucketPutRequest, log, next), + (corsHeaders, next) => { + const initiateRequest = { + bucketName, + namespace, + objectKey, + headers: { + host: `${bucketName}.s3.amazonaws.com`, + ...initiateHeaders, + }, + url: `/${objectKey}?uploads`, + actionImplicitDenies: false, + }; + initiateMultipartUpload(authInfo, initiateRequest, log, next); + }, + (xml, corsHeaders, next) => parseString(xml, next), + (json, next) => { + const uploadId = json.InitiateMultipartUploadResult.UploadId[0]; + const partBody = Buffer.from('I am a part\n', 'utf8'); + const partHash = crypto.createHash('md5').update(partBody).digest('hex'); + const partRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: `/${objectKey}?partNumber=1&uploadId=${uploadId}`, + query: { partNumber: '1', uploadId }, + partHash, + actionImplicitDenies: false, + }, + partBody, + ); + objectPutPart(authInfo, partRequest, undefined, log, err => next(err, uploadId, partHash)); + }, + ], + cb, + ); } function makeCompleteRequest(uploadId, partHash, extraHeaders) { - const completeBody = '' + + const completeBody = + '' + '' + '1' + `"${partHash}"` + @@ -403,8 +379,9 @@ describe('CompleteMultipartUpload x-amz-checksum-type header', () => { assert.strictEqual( completeErr.description, 'The upload was created using the FULL_OBJECT checksum ' + - 'mode. The complete request must use the same checksum ' + - 'mode.'); + 'mode. The complete request must use the same checksum ' + + 'mode.', + ); done(); }); }); @@ -423,9 +400,7 @@ describe('CompleteMultipartUpload x-amz-checksum-type header', () => { completeMultipartUpload(authInfo, req, log, completeErr => { assert(completeErr); assert.strictEqual(completeErr.is.InvalidRequest, true); - assert.strictEqual( - completeErr.description, - 'Value for x-amz-checksum-type header is invalid.'); + assert.strictEqual(completeErr.description, 'Value for x-amz-checksum-type header is invalid.'); done(); }); }); @@ -452,38 +427,43 @@ describe('CompleteMultipartUpload x-amz-checksum-type header', () => { describe('CompleteMultipartUpload body-checksum bypass', () => { const log = new DummyRequestLogger(); - it('validateMethodChecksumNoChunking returns null for completeMultipartUpload ' + - 'even when x-amz-checksum-sha256 does not match the body digest', async () => { - const body = Buffer.from( - '1' + - '"abc"'); - // A syntactically valid SHA256 base64 digest that is NOT the digest of `body` - // (it's the digest of the empty string). On CompleteMPU this header carries - // the expected final-object checksum, not a body checksum, so pre-validation - // must skip it. - const finalObjectChecksum = - '47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU='; - const request = { - apiMethod: 'completeMultipartUpload', - headers: { 'x-amz-checksum-sha256': finalObjectChecksum }, - }; - const err = await validateMethodChecksumNoChunking(request, body, log); - assert.strictEqual(err, null); - }); - - it('validateMethodChecksumNoChunking still rejects body mismatch for methods ' + - 'that remain in checksumedMethods (sanity check)', async () => { - const body = Buffer.from('{"Objects":[]}'); - const finalObjectChecksum = - '47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU='; - const request = { - apiMethod: 'multiObjectDelete', - headers: { 'x-amz-checksum-sha256': finalObjectChecksum }, - }; - const err = await validateMethodChecksumNoChunking(request, body, log); - assert(err, 'expected an error for body checksum mismatch'); - assert.strictEqual(err.is.BadDigest, true); - }); + it( + 'validateMethodChecksumNoChunking returns null for completeMultipartUpload ' + + 'even when x-amz-checksum-sha256 does not match the body digest', + async () => { + const body = Buffer.from( + '1' + + '"abc"', + ); + // A syntactically valid SHA256 base64 digest that is NOT the digest of `body` + // (it's the digest of the empty string). On CompleteMPU this header carries + // the expected final-object checksum, not a body checksum, so pre-validation + // must skip it. + const finalObjectChecksum = '47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU='; + const request = { + apiMethod: 'completeMultipartUpload', + headers: { 'x-amz-checksum-sha256': finalObjectChecksum }, + }; + const err = await validateMethodChecksumNoChunking(request, body, log); + assert.strictEqual(err, null); + }, + ); + + it( + 'validateMethodChecksumNoChunking still rejects body mismatch for methods ' + + 'that remain in checksumedMethods (sanity check)', + async () => { + const body = Buffer.from('{"Objects":[]}'); + const finalObjectChecksum = '47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU='; + const request = { + apiMethod: 'multiObjectDelete', + headers: { 'x-amz-checksum-sha256': finalObjectChecksum }, + }; + const err = await validateMethodChecksumNoChunking(request, body, log); + assert(err, 'expected an error for body checksum mismatch'); + assert.strictEqual(err.is.BadDigest, true); + }, + ); }); describe('computeFinalChecksum', () => { @@ -500,44 +480,43 @@ describe('computeFinalChecksum', () => { } it('should return null when MPU has no checksumAlgorithm', () => { - const stored = [ - makeStoredPart(1, { algorithm: 'sha256', value: SAMPLE_DIGESTS.sha256[0] }), - ]; - const got = computeFinalChecksum( - stored, partListFromStored(stored), {}, SPLITTER, uploadId, log); + const stored = [makeStoredPart(1, { algorithm: 'sha256', value: SAMPLE_DIGESTS.sha256[0] })]; + const got = computeFinalChecksum(stored, partListFromStored(stored), {}, SPLITTER, uploadId, log); assert.strictEqual(got, null); }); it('should return null when MPU has no checksumType', () => { - const stored = [ - makeStoredPart(1, { algorithm: 'sha256', value: SAMPLE_DIGESTS.sha256[0] }), - ]; + const stored = [makeStoredPart(1, { algorithm: 'sha256', value: SAMPLE_DIGESTS.sha256[0] })]; const got = computeFinalChecksum( - stored, partListFromStored(stored), - { checksumAlgorithm: 'sha256' }, SPLITTER, uploadId, log); + stored, + partListFromStored(stored), + { checksumAlgorithm: 'sha256' }, + SPLITTER, + uploadId, + log, + ); assert.strictEqual(got, null); }); it('should return COMPOSITE checksum with -N suffix for SHA256 MPU', () => { - const [d1, d2, d3] = [ - SAMPLE_DIGESTS.sha256[0], - SAMPLE_DIGESTS.sha256[1], - SAMPLE_DIGESTS.sha256[0], - ]; + const [d1, d2, d3] = [SAMPLE_DIGESTS.sha256[0], SAMPLE_DIGESTS.sha256[1], SAMPLE_DIGESTS.sha256[0]]; const stored = [ makeStoredPart(1, { algorithm: 'sha256', value: d1 }), makeStoredPart(2, { algorithm: 'sha256', value: d2 }), makeStoredPart(3, { algorithm: 'sha256', value: d3 }), ]; const got = computeFinalChecksum( - stored, partListFromStored(stored), + stored, + partListFromStored(stored), { checksumAlgorithm: 'sha256', checksumType: 'COMPOSITE' }, - SPLITTER, uploadId, log); + SPLITTER, + uploadId, + log, + ); assert(got); assert.strictEqual(got.algorithm, 'sha256'); assert.strictEqual(got.type, 'COMPOSITE'); - assert(got.value.endsWith('-3'), - `expected -N suffix, got ${got.value}`); + assert(got.value.endsWith('-3'), `expected -N suffix, got ${got.value}`); // computeCompositeMPUChecksum's deterministic output for these // exact placeholder digests: const expected = crypto @@ -555,9 +534,13 @@ describe('computeFinalChecksum', () => { makeStoredPart(2, { algorithm: algo, value: d2 }), ]; const got = computeFinalChecksum( - stored, partListFromStored(stored), + stored, + partListFromStored(stored), { checksumAlgorithm: algo, checksumType: 'COMPOSITE' }, - SPLITTER, uploadId, log); + SPLITTER, + uploadId, + log, + ); assert(got); assert.strictEqual(got.algorithm, algo); assert.strictEqual(got.type, 'COMPOSITE'); @@ -573,24 +556,39 @@ describe('computeFinalChecksum', () => { const dA = await algorithms.crc64nvme.digest(a); const dB = await algorithms.crc64nvme.digest(b); const stored = [ - { key: `${UPLOAD_ID}${SPLITTER}1`, - value: { ETag: 'e', Size: a.length, - ChecksumAlgorithm: 'crc64nvme', ChecksumValue: dA, - partLocations: [] } }, - { key: `${UPLOAD_ID}${SPLITTER}2`, - value: { ETag: 'e', Size: b.length, - ChecksumAlgorithm: 'crc64nvme', ChecksumValue: dB, - partLocations: [] } }, + { + key: `${UPLOAD_ID}${SPLITTER}1`, + value: { + ETag: 'e', + Size: a.length, + ChecksumAlgorithm: 'crc64nvme', + ChecksumValue: dA, + partLocations: [], + }, + }, + { + key: `${UPLOAD_ID}${SPLITTER}2`, + value: { + ETag: 'e', + Size: b.length, + ChecksumAlgorithm: 'crc64nvme', + ChecksumValue: dB, + partLocations: [], + }, + }, ]; const got = computeFinalChecksum( - stored, partListFromStored(stored), + stored, + partListFromStored(stored), { checksumAlgorithm: 'crc64nvme', checksumType: 'FULL_OBJECT' }, - SPLITTER, uploadId, log); + SPLITTER, + uploadId, + log, + ); assert(got); assert.strictEqual(got.algorithm, 'crc64nvme'); assert.strictEqual(got.type, 'FULL_OBJECT'); - assert(!got.value.includes('-'), - `FULL_OBJECT should have no -N suffix, got ${got.value}`); + assert(!got.value.includes('-'), `FULL_OBJECT should have no -N suffix, got ${got.value}`); const expected = await algorithms.crc64nvme.digest(Buffer.concat([a, b])); assert.strictEqual(got.value, expected); }); @@ -602,41 +600,44 @@ describe('computeFinalChecksum', () => { makeStoredPart(3, { algorithm: 'sha256', value: SAMPLE_DIGESTS.sha256[1] }), ]; const got = computeFinalChecksum( - stored, partListFromStored(stored), + stored, + partListFromStored(stored), { checksumAlgorithm: 'sha256', checksumType: 'COMPOSITE' }, - SPLITTER, uploadId, log); + SPLITTER, + uploadId, + log, + ); assert.strictEqual(got, null); }); it('should return null when checksumType is unknown', () => { - const stored = [ - makeStoredPart(1, { algorithm: 'sha256', value: SAMPLE_DIGESTS.sha256[0] }), - ]; + const stored = [makeStoredPart(1, { algorithm: 'sha256', value: SAMPLE_DIGESTS.sha256[0] })]; const got = computeFinalChecksum( - stored, partListFromStored(stored), + stored, + partListFromStored(stored), { checksumAlgorithm: 'sha256', checksumType: 'WEIRD' }, - SPLITTER, uploadId, log); + SPLITTER, + uploadId, + log, + ); assert.strictEqual(got, null); }); - it('should return null when underlying compute reports an error ' + - '(crc64nvme COMPOSITE is not allowed)', () => { - const stored = [ - makeStoredPart(1, { algorithm: 'crc64nvme', - value: SAMPLE_DIGESTS.crc64nvme[0] }), - ]; + it('should return null when underlying compute reports an error ' + '(crc64nvme COMPOSITE is not allowed)', () => { + const stored = [makeStoredPart(1, { algorithm: 'crc64nvme', value: SAMPLE_DIGESTS.crc64nvme[0] })]; const got = computeFinalChecksum( - stored, partListFromStored(stored), + stored, + partListFromStored(stored), { checksumAlgorithm: 'crc64nvme', checksumType: 'COMPOSITE' }, - SPLITTER, uploadId, log); + SPLITTER, + uploadId, + log, + ); assert.strictEqual(got, null); }); it('should compute over filteredPartList (subset), not all storedParts', () => { - const [d1, d2, d3] = [ - SAMPLE_DIGESTS.sha256[0], SAMPLE_DIGESTS.sha256[1], - SAMPLE_DIGESTS.sha256[0], - ]; + const [d1, d2, d3] = [SAMPLE_DIGESTS.sha256[0], SAMPLE_DIGESTS.sha256[1], SAMPLE_DIGESTS.sha256[0]]; const stored = [ makeStoredPart(1, { algorithm: 'sha256', value: d1 }), makeStoredPart(2, { algorithm: 'sha256', value: d2 }), @@ -644,16 +645,21 @@ describe('computeFinalChecksum', () => { ]; // User completes only parts 1 and 3, dropping 2 (orphan). const filtered = [stored[0], stored[2]].map(s => ({ - key: s.key, ETag: `"${s.value.ETag}"`, - size: s.value.Size, locations: s.value.partLocations, + key: s.key, + ETag: `"${s.value.ETag}"`, + size: s.value.Size, + locations: s.value.partLocations, })); const got = computeFinalChecksum( - stored, filtered, + stored, + filtered, { checksumAlgorithm: 'sha256', checksumType: 'COMPOSITE' }, - SPLITTER, uploadId, log); + SPLITTER, + uploadId, + log, + ); assert(got); - assert(got.value.endsWith('-2'), - `should reflect 2 completed parts, got ${got.value}`); + assert(got.value.endsWith('-2'), `should reflect 2 completed parts, got ${got.value}`); const expected = crypto .createHash('sha256') .update(Buffer.concat([d1, d3].map(x => Buffer.from(x, 'base64')))) @@ -668,9 +674,11 @@ describe('validateExpectedFinalChecksum', () => { it('should return null when no x-amz-checksum- header is present', () => { const err = validateExpectedFinalChecksum( - { 'host': 'example.com' }, + { host: 'example.com' }, { algorithm: 'sha256', type: 'COMPOSITE', value: 'abc-3' }, - uploadId, log); + uploadId, + log, + ); assert.strictEqual(err, null); }); @@ -681,7 +689,9 @@ describe('validateExpectedFinalChecksum', () => { 'x-amz-checksum-algorithm': 'SHA256', }, { algorithm: 'sha256', type: 'COMPOSITE', value: 'abc-3' }, - uploadId, log); + uploadId, + log, + ); assert.strictEqual(err, null); }); @@ -689,7 +699,9 @@ describe('validateExpectedFinalChecksum', () => { const err = validateExpectedFinalChecksum( { 'x-amz-checksum-sha256': 'abc-3' }, { algorithm: 'sha256', type: 'COMPOSITE', value: 'abc-3' }, - uploadId, log); + uploadId, + log, + ); assert.strictEqual(err, null); }); @@ -697,7 +709,9 @@ describe('validateExpectedFinalChecksum', () => { const err = validateExpectedFinalChecksum( { 'x-amz-checksum-sha256': 'wrong-3' }, { algorithm: 'sha256', type: 'COMPOSITE', value: 'abc-3' }, - uploadId, log); + uploadId, + log, + ); assert(err); assert.strictEqual(err.is.BadDigest, true); assert(err.description.includes('SHA256')); @@ -707,23 +721,22 @@ describe('validateExpectedFinalChecksum', () => { const err = validateExpectedFinalChecksum( { 'x-amz-checksum-crc32': 'aGVsbG8=' }, { algorithm: 'sha256', type: 'COMPOSITE', value: 'abc-3' }, - uploadId, log); + uploadId, + log, + ); assert(err); assert.strictEqual(err.is.BadDigest, true); assert(err.description.includes('CRC32')); }); it('should return BadDigest when header is present but finalChecksum is null', () => { - const err = validateExpectedFinalChecksum( - { 'x-amz-checksum-sha256': 'abc-3' }, - null, uploadId, log); + const err = validateExpectedFinalChecksum({ 'x-amz-checksum-sha256': 'abc-3' }, null, uploadId, log); assert(err); assert.strictEqual(err.is.BadDigest, true); }); it('should return null when finalChecksum is null and no header present', () => { - const err = validateExpectedFinalChecksum( - { 'host': 'example.com' }, null, uploadId, log); + const err = validateExpectedFinalChecksum({ host: 'example.com' }, null, uploadId, log); assert.strictEqual(err, null); }); @@ -734,7 +747,9 @@ describe('validateExpectedFinalChecksum', () => { 'x-amz-checksum-crc32': 'def', }, { algorithm: 'sha256', type: 'COMPOSITE', value: 'abc-3' }, - uploadId, log); + uploadId, + log, + ); assert(err); assert.strictEqual(err.is.InvalidRequest, true); assert(err.description.includes('Multiple checksum Types')); @@ -755,7 +770,8 @@ describe('CompleteMultipartUpload final-object checksum storage', () => { namespace, headers: { host: `${bucketName}.s3.amazonaws.com` }, url: '/', - post: '' + 'scality-internal-mem' + '', @@ -765,77 +781,97 @@ describe('CompleteMultipartUpload final-object checksum storage', () => { // (algorithm, type) pairs valid for an MPU per AWS rules. // shouldStore reflects Part 3's gating: only FULL_OBJECT is persisted. const STORAGE_MATRIX = [ - { algorithm: 'crc32', type: 'FULL_OBJECT', shouldStore: true }, - { algorithm: 'crc32c', type: 'FULL_OBJECT', shouldStore: true }, + { algorithm: 'crc32', type: 'FULL_OBJECT', shouldStore: true }, + { algorithm: 'crc32c', type: 'FULL_OBJECT', shouldStore: true }, { algorithm: 'crc64nvme', type: 'FULL_OBJECT', shouldStore: true }, - { algorithm: 'crc32', type: 'COMPOSITE', shouldStore: false }, - { algorithm: 'crc32c', type: 'COMPOSITE', shouldStore: false }, - { algorithm: 'sha1', type: 'COMPOSITE', shouldStore: false }, - { algorithm: 'sha256', type: 'COMPOSITE', shouldStore: false }, + { algorithm: 'crc32', type: 'COMPOSITE', shouldStore: false }, + { algorithm: 'crc32c', type: 'COMPOSITE', shouldStore: false }, + { algorithm: 'sha1', type: 'COMPOSITE', shouldStore: false }, + { algorithm: 'sha256', type: 'COMPOSITE', shouldStore: false }, ]; function bucketPutP() { return new Promise((resolve, reject) => - bucketPut(authInfo, bucketPutRequest, log, - err => err ? reject(err) : resolve())); + bucketPut(authInfo, bucketPutRequest, log, err => (err ? reject(err) : resolve())), + ); } function initiateMpuP(headers) { return new Promise((resolve, reject) => { - initiateMultipartUpload(authInfo, { - bucketName, namespace, objectKey, - headers: { host: `${bucketName}.s3.amazonaws.com`, ...headers }, - url: `/${objectKey}?uploads`, - actionImplicitDenies: false, - }, log, (err, xml) => { - if (err) return reject(err); - return parseString(xml, (parseErr, json) => parseErr - ? reject(parseErr) - : resolve(json.InitiateMultipartUploadResult.UploadId[0])); - }); + initiateMultipartUpload( + authInfo, + { + bucketName, + namespace, + objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com`, ...headers }, + url: `/${objectKey}?uploads`, + actionImplicitDenies: false, + }, + log, + (err, xml) => { + if (err) { + return reject(err); + } + return parseString(xml, (parseErr, json) => + parseErr ? reject(parseErr) : resolve(json.InitiateMultipartUploadResult.UploadId[0]), + ); + }, + ); }); } function uploadPartP(uploadId, headers = {}) { return new Promise((resolve, reject) => { - const partRequest = new DummyRequest({ - bucketName, namespace, objectKey, - headers: { host: `${bucketName}.s3.amazonaws.com`, ...headers }, - url: `/${objectKey}?partNumber=1&uploadId=${uploadId}`, - query: { partNumber: '1', uploadId }, - partHash, - actionImplicitDenies: false, - }, partBody); - objectPutPart(authInfo, partRequest, undefined, log, - err => err ? reject(err) : resolve()); + const partRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com`, ...headers }, + url: `/${objectKey}?partNumber=1&uploadId=${uploadId}`, + query: { partNumber: '1', uploadId }, + partHash, + actionImplicitDenies: false, + }, + partBody, + ); + objectPutPart(authInfo, partRequest, undefined, log, err => (err ? reject(err) : resolve())); }); } function completeMpuP(uploadId, partChecksumXml = '') { - const completeBody = '' + + const completeBody = + '' + '' + '1' + - `"${partHash}"` + - partChecksumXml + + `"${partHash}"${partChecksumXml}` + '' + ''; return new Promise((resolve, reject) => { - completeMultipartUpload(authInfo, { - bucketName, namespace, objectKey, - parsedHost: 's3.amazonaws.com', - url: `/${objectKey}?uploadId=${uploadId}`, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - query: { uploadId }, - post: completeBody, - actionImplicitDenies: false, - }, log, err => err ? reject(err) : resolve()); + completeMultipartUpload( + authInfo, + { + bucketName, + namespace, + objectKey, + parsedHost: 's3.amazonaws.com', + url: `/${objectKey}?uploadId=${uploadId}`, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + query: { uploadId }, + post: completeBody, + actionImplicitDenies: false, + }, + log, + err => (err ? reject(err) : resolve()), + ); }); } function fetchObjectMDP() { return new Promise((resolve, reject) => - metadata.getObjectMD(bucketName, objectKey, {}, log, - (err, md) => err ? reject(err) : resolve(md))); + metadata.getObjectMD(bucketName, objectKey, {}, log, (err, md) => (err ? reject(err) : resolve(md))), + ); } beforeEach(() => cleanup()); @@ -854,25 +890,19 @@ describe('CompleteMultipartUpload final-object checksum storage', () => { // Pre-compute the part's checksum so we can supply it on // UploadPart and (for COMPOSITE non-default) in the Complete body. const partChecksum = await algorithms[algorithm].digest(partBody); - const uploadHeaders = type === 'COMPOSITE' - ? { [`x-amz-checksum-${algorithm}`]: partChecksum } - : {}; + const uploadHeaders = type === 'COMPOSITE' ? { [`x-amz-checksum-${algorithm}`]: partChecksum } : {}; await uploadPartP(uploadId, uploadHeaders); - const partChecksumXml = type === 'COMPOSITE' - ? `<${tag}>${partChecksum}` - : ''; + const partChecksumXml = type === 'COMPOSITE' ? `<${tag}>${partChecksum}` : ''; await completeMpuP(uploadId, partChecksumXml); const md = await fetchObjectMDP(); if (shouldStore) { - assert(md.checksum, - `expected ${type} ${upper} checksum on ObjectMD`); + assert(md.checksum, `expected ${type} ${upper} checksum on ObjectMD`); assert.strictEqual(md.checksum.checksumAlgorithm, algorithm); assert.strictEqual(md.checksum.checksumType, type); assert(typeof md.checksum.checksumValue === 'string'); assert(md.checksum.checksumValue.length > 0); } else { - assert.strictEqual(md.checksum, undefined, - `${type} ${upper} should not persist on ObjectMD`); + assert.strictEqual(md.checksum, undefined, `${type} ${upper} should not persist on ObjectMD`); } }); }); @@ -921,7 +951,8 @@ describe('CompleteMultipartUpload final-object checksum response', () => { namespace, headers: { host: `${bucketName}.s3.amazonaws.com` }, url: '/', - post: '' + 'scality-internal-mem' + '', @@ -929,80 +960,99 @@ describe('CompleteMultipartUpload final-object checksum response', () => { }; const RESPONSE_MATRIX = [ - { algorithm: 'crc32', type: 'FULL_OBJECT' }, - { algorithm: 'crc32c', type: 'FULL_OBJECT' }, + { algorithm: 'crc32', type: 'FULL_OBJECT' }, + { algorithm: 'crc32c', type: 'FULL_OBJECT' }, { algorithm: 'crc64nvme', type: 'FULL_OBJECT' }, - { algorithm: 'crc32', type: 'COMPOSITE' }, - { algorithm: 'crc32c', type: 'COMPOSITE' }, - { algorithm: 'sha1', type: 'COMPOSITE' }, - { algorithm: 'sha256', type: 'COMPOSITE' }, + { algorithm: 'crc32', type: 'COMPOSITE' }, + { algorithm: 'crc32c', type: 'COMPOSITE' }, + { algorithm: 'sha1', type: 'COMPOSITE' }, + { algorithm: 'sha256', type: 'COMPOSITE' }, ]; function bucketPutP() { return new Promise((resolve, reject) => - bucketPut(authInfo, bucketPutRequest, log, - err => err ? reject(err) : resolve())); + bucketPut(authInfo, bucketPutRequest, log, err => (err ? reject(err) : resolve())), + ); } function initiateMpuP(headers) { return new Promise((resolve, reject) => { - initiateMultipartUpload(authInfo, { - bucketName, namespace, objectKey, - headers: { host: `${bucketName}.s3.amazonaws.com`, ...headers }, - url: `/${objectKey}?uploads`, - actionImplicitDenies: false, - }, log, (err, xml) => { - if (err) return reject(err); - return parseString(xml, (parseErr, json) => parseErr - ? reject(parseErr) - : resolve(json.InitiateMultipartUploadResult.UploadId[0])); - }); + initiateMultipartUpload( + authInfo, + { + bucketName, + namespace, + objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com`, ...headers }, + url: `/${objectKey}?uploads`, + actionImplicitDenies: false, + }, + log, + (err, xml) => { + if (err) { + return reject(err); + } + return parseString(xml, (parseErr, json) => + parseErr ? reject(parseErr) : resolve(json.InitiateMultipartUploadResult.UploadId[0]), + ); + }, + ); }); } function uploadPartP(uploadId, headers = {}) { return new Promise((resolve, reject) => { - const partRequest = new DummyRequest({ - bucketName, namespace, objectKey, - headers: { host: `${bucketName}.s3.amazonaws.com`, ...headers }, - url: `/${objectKey}?partNumber=1&uploadId=${uploadId}`, - query: { partNumber: '1', uploadId }, - partHash, - actionImplicitDenies: false, - }, partBody); - objectPutPart(authInfo, partRequest, undefined, log, - err => err ? reject(err) : resolve()); + const partRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com`, ...headers }, + url: `/${objectKey}?partNumber=1&uploadId=${uploadId}`, + query: { partNumber: '1', uploadId }, + partHash, + actionImplicitDenies: false, + }, + partBody, + ); + objectPutPart(authInfo, partRequest, undefined, log, err => (err ? reject(err) : resolve())); }); } // Resolves with { xml, headers } so callers can inspect both the // response body and the response headers. function completeMpuP(uploadId, partChecksumXml = '') { - const completeBody = '' + + const completeBody = + '' + '' + '1' + - `"${partHash}"` + - partChecksumXml + + `"${partHash}"${partChecksumXml}` + '' + ''; return new Promise((resolve, reject) => { - completeMultipartUpload(authInfo, { - bucketName, namespace, objectKey, - parsedHost: 's3.amazonaws.com', - url: `/${objectKey}?uploadId=${uploadId}`, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - query: { uploadId }, - post: completeBody, - actionImplicitDenies: false, - }, log, (err, xml, headers) => err - ? reject(err) - : resolve({ xml, headers })); + completeMultipartUpload( + authInfo, + { + bucketName, + namespace, + objectKey, + parsedHost: 's3.amazonaws.com', + url: `/${objectKey}?uploadId=${uploadId}`, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + query: { uploadId }, + post: completeBody, + actionImplicitDenies: false, + }, + log, + (err, xml, headers) => (err ? reject(err) : resolve({ xml, headers })), + ); }); } function parseXmlP(xmlStr) { return new Promise((resolve, reject) => - parseString(xmlStr, (err, json) => err ? reject(err) : resolve(json))); + parseString(xmlStr, (err, json) => (err ? reject(err) : resolve(json))), + ); } beforeEach(() => cleanup()); @@ -1018,13 +1068,9 @@ describe('CompleteMultipartUpload final-object checksum response', () => { 'x-amz-checksum-type': type, }); const partChecksum = await algorithms[algorithm].digest(partBody); - const uploadHeaders = type === 'COMPOSITE' - ? { [`x-amz-checksum-${algorithm}`]: partChecksum } - : {}; + const uploadHeaders = type === 'COMPOSITE' ? { [`x-amz-checksum-${algorithm}`]: partChecksum } : {}; await uploadPartP(uploadId, uploadHeaders); - const partChecksumXml = type === 'COMPOSITE' - ? `<${tag}>${partChecksum}` - : ''; + const partChecksumXml = type === 'COMPOSITE' ? `<${tag}>${partChecksum}` : ''; const { xml, headers } = await completeMpuP(uploadId, partChecksumXml); const json = await parseXmlP(xml); const result = json.CompleteMultipartUploadResult; @@ -1034,11 +1080,9 @@ describe('CompleteMultipartUpload final-object checksum response', () => { assert.strictEqual(result.ChecksumType[0], type); // COMPOSITE values carry the "-N" suffix; FULL_OBJECT do not. if (type === 'COMPOSITE') { - assert(xmlValue.endsWith('-1'), - `expected -1 suffix for 1-part COMPOSITE, got ${xmlValue}`); + assert(xmlValue.endsWith('-1'), `expected -1 suffix for 1-part COMPOSITE, got ${xmlValue}`); } else { - assert(!xmlValue.includes('-'), - `FULL_OBJECT value should have no suffix, got ${xmlValue}`); + assert(!xmlValue.includes('-'), `FULL_OBJECT value should have no suffix, got ${xmlValue}`); } // AWS-verified: CompleteMPU does NOT emit // x-amz-checksum-* / x-amz-checksum-type response headers. @@ -1063,5 +1107,3 @@ describe('CompleteMultipartUpload final-object checksum response', () => { assert.strictEqual(headers['x-amz-checksum-type'], undefined); }); }); - - diff --git a/tests/unit/api/objectGetAttributes.js b/tests/unit/api/objectGetAttributes.js index d3e38f4568..7244efd0ec 100644 --- a/tests/unit/api/objectGetAttributes.js +++ b/tests/unit/api/objectGetAttributes.js @@ -25,15 +25,18 @@ const postBody = Buffer.from(body, 'utf8'); const expectedMD5 = 'fc3ff98e8c6a0d3087d515c0473f8677'; // Promisify helper for functions with non-standard callback signatures -const promisify = fn => (...args) => new Promise((resolve, reject) => { - fn(...args, (err, ...results) => { - if (err) { - reject(err); - } else { - resolve(results); - } - }); -}); +const promisify = + fn => + (...args) => + new Promise((resolve, reject) => { + fn(...args, (err, ...results) => { + if (err) { + reject(err); + } else { + resolve(results); + } + }); + }); const bucketPutAsync = promisify(bucketPut); const bucketPutVersioningAsync = promisify(bucketPutVersioning); @@ -106,7 +109,7 @@ describe('objectGetAttributes API', () => { assert.strictEqual( err.description, 'The x-amz-object-attributes header specifying the attributes ' + - 'to be retrieved is either missing or empty', + 'to be retrieved is either missing or empty', ); } }); @@ -176,12 +179,7 @@ describe('objectGetAttributes API', () => { }); it('should return all attributes', async () => { - const testGetRequest = createGetAttributesRequest([ - 'ETag', - 'ObjectParts', - 'StorageClass', - 'ObjectSize', - ]); + const testGetRequest = createGetAttributesRequest(['ETag', 'ObjectParts', 'StorageClass', 'ObjectSize']); const { xml, responseHeaders } = await objectGetAttributes(authInfo, testGetRequest, log); assert(xml, 'Response XML should be present'); @@ -298,8 +296,7 @@ describe('objectGetAttributes API with multipart upload', () => { completeParts.push(`${i}"${partHash}"`); } - const completeBody = - `${completeParts.join('')}`; + const completeBody = `${completeParts.join('')}`; const completeRequest = { bucketName, @@ -654,9 +651,11 @@ describe('objectGetAttributes API with checksum', () => { const expectedDigests = {}; before(async () => { - await Promise.all(Object.keys(algorithms).map(async name => { - expectedDigests[name] = await algorithms[name].digest(postBody); - })); + await Promise.all( + Object.keys(algorithms).map(async name => { + expectedDigests[name] = await algorithms[name].digest(postBody); + }), + ); }); beforeEach(async () => { @@ -730,11 +729,7 @@ describe('objectGetAttributes API with checksum', () => { const { xml } = await objectGetAttributes(authInfo, testGetRequest, log); const result = await parseStringPromise(xml); - assert.strictEqual( - result.GetObjectAttributesResponse.Checksum, - undefined, - 'Checksum should not be present', - ); + assert.strictEqual(result.GetObjectAttributesResponse.Checksum, undefined, 'Checksum should not be present'); }); it('should not return Checksum when not requested', async () => { From 87ee513f8df5af2760d225d8b93f2bad59d68a38 Mon Sep 17 00:00:00 2001 From: Leif Henriksen Date: Mon, 11 May 2026 16:04:38 +0200 Subject: [PATCH 11/12] TMP use arsenal branch --- package.json | 2 +- yarn.lock | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/package.json b/package.json index d512509634..638153ed17 100644 --- a/package.json +++ b/package.json @@ -33,7 +33,7 @@ "@azure/storage-blob": "^12.28.0", "@hapi/joi": "^17.1.1", "@smithy/node-http-handler": "^3.0.0", - "arsenal": "git+https://github.com/scality/Arsenal#8.4.1", + "arsenal": "git+https://github.com/scality/Arsenal#improvement/ARSN-580-handle-checksums-in-complete-mpu", "async": "2.6.4", "bucketclient": "scality/bucketclient#8.2.7", "bufferutil": "^4.0.8", diff --git a/yarn.lock b/yarn.lock index 0c6ce95e2a..a0b1b96790 100644 --- a/yarn.lock +++ b/yarn.lock @@ -6143,9 +6143,9 @@ arraybuffer.prototype.slice@^1.0.4: optionalDependencies: ioctl "^2.0.2" -"arsenal@git+https://github.com/scality/Arsenal#8.4.1": - version "8.4.1" - resolved "git+https://github.com/scality/Arsenal#6b3b58b152ac23d29176ab1f24f49f8eda3145b2" +"arsenal@git+https://github.com/scality/Arsenal#improvement/ARSN-580-handle-checksums-in-complete-mpu": + version "8.4.2" + resolved "git+https://github.com/scality/Arsenal#a817398ac4110ef4502e7de44d75aafec6f29c6f" dependencies: "@aws-sdk/client-kms" "^3.975.0" "@aws-sdk/client-s3" "^3.975.0" From 4f3d566644fce60f086bb75e7f2439e6bc9ee0ca Mon Sep 17 00:00:00 2001 From: Leif Henriksen Date: Tue, 12 May 2026 16:08:34 +0200 Subject: [PATCH 12/12] CLDSRV-898: await checksum algorithm --- .../apiUtils/integrity/validateChecksums.js | 4 +- lib/api/completeMultipartUpload.js | 194 +++++++++++------- .../apiUtils/integrity/computeMpuChecksums.js | 16 +- tests/unit/api/completeMultipartUpload.js | 62 +++--- 4 files changed, 158 insertions(+), 118 deletions(-) diff --git a/lib/api/apiUtils/integrity/validateChecksums.js b/lib/api/apiUtils/integrity/validateChecksums.js index 7fb987ab18..8f0ae233cd 100644 --- a/lib/api/apiUtils/integrity/validateChecksums.js +++ b/lib/api/apiUtils/integrity/validateChecksums.js @@ -571,13 +571,13 @@ const COMPOSITE_ALGOS = new Set(['crc32', 'crc32c', 'sha1', 'sha256']); * @returns {{ checksum: string, error: null } * | { checksum: null, error: { code: string, details: object } }} */ -function computeCompositeMPUChecksum(algorithm, partChecksumsBase64) { +async function computeCompositeMPUChecksum(algorithm, partChecksumsBase64) { if (!COMPOSITE_ALGOS.has(algorithm)) { return { checksum: null, error: { code: ChecksumError.MPUAlgoNotSupported, details: { algorithm } } }; } const concat = Buffer.concat(partChecksumsBase64.map(c => Buffer.from(c, 'base64'))); - const digest = algorithms[algorithm].digest(concat); + const digest = await algorithms[algorithm].digest(concat); return { checksum: `${digest}-${partChecksumsBase64.length}`, error: null, diff --git a/lib/api/completeMultipartUpload.js b/lib/api/completeMultipartUpload.js index fe922f47cc..0961a247d3 100644 --- a/lib/api/completeMultipartUpload.js +++ b/lib/api/completeMultipartUpload.js @@ -125,7 +125,7 @@ function validatePerPartChecksums(jsonList, storedParts, mpuSplitter, mpuChecksu * @param {object} log - werelogs logger * @returns {object|null} { algorithm, type, value } or null */ -function computeFinalChecksum(storedParts, filteredPartList, storedMetadata, mpuSplitter, uploadId, log) { +async function computeFinalChecksum(storedParts, filteredPartList, storedMetadata, mpuSplitter, uploadId, log) { const algorithm = storedMetadata.checksumAlgorithm; const type = storedMetadata.checksumType; if (!algorithm || !type) { @@ -159,7 +159,7 @@ function computeFinalChecksum(storedParts, filteredPartList, storedMetadata, mpu let result; if (type === 'COMPOSITE') { - result = computeCompositeMPUChecksum( + result = await computeCompositeMPUChecksum( algorithm, partInputs.map(p => p.value), ); @@ -624,74 +624,126 @@ function completeMultipartUpload(authInfo, request, log, callback) { totalMPUSize, next, ) { + let nextCalled = false; + const callNext = (...args) => { + if (nextCalled) { + return undefined; + } + nextCalled = true; + return next(...args); + }; // External-handled MPUs (ingestion / external backends) come in // with completeObjData set and no filteredPartsObj — the data // store already aggregated the parts, and we have no per-part // info to feed the compute step. Skip in that case. - if (filteredPartsObj) { - finalChecksum = computeFinalChecksum( - storedParts, - filteredPartsObj.partList, - storedMetadata, - splitter, - uploadId, - log, - ); - const expectedErr = validateExpectedFinalChecksum(request.headers, finalChecksum, uploadId, log); - if (expectedErr) { - return next(expectedErr, destBucket); - } - } - // if mpu was completed on backend that stored mpu MD externally, - // skip MD processing steps - if (completeObjData && skipMpuPartProcessing(completeObjData)) { - const dataLocations = [ - { - key: completeObjData.key, - size: completeObjData.contentLength, - start: 0, - dataStoreVersionId: completeObjData.dataStoreVersionId, - dataStoreName: storedMetadata.dataStoreName, - dataStoreETag: completeObjData.eTag, - dataStoreType: completeObjData.dataStoreType, - }, - ]; - const calculatedSize = completeObjData.contentLength; - return next( - null, - destBucket, - objMD, - mpuBucket, - storedMetadata, - completeObjData.eTag, - calculatedSize, - dataLocations, - [mpuOverviewKey], - null, - completeObjData, - totalMPUSize, - ); + if (!filteredPartsObj) { + return continueProcessParts(null); } + computeFinalChecksum( + storedParts, + filteredPartsObj.partList, + storedMetadata, + splitter, + uploadId, + log, + ).then( + fc => { + try { + finalChecksum = fc; + const expectedErr = validateExpectedFinalChecksum( + request.headers, + finalChecksum, + uploadId, + log, + ); + if (expectedErr) { + return callNext(expectedErr, destBucket); + } + return continueProcessParts(null); + } catch (resolveErr) { + log.error('unexpected throw after final-checksum compute', { + uploadId, + error: resolveErr, + }); + return callNext(resolveErr, destBucket); + } + }, + computeErr => { + log.error('final-object checksum compute threw', { + uploadId, + error: computeErr, + }); + return callNext(computeErr, destBucket); + }, + ); + return undefined; - const partsInfo = generateMpuPartStorageInfo(filteredPartsObj.partList); - if (partsInfo.error) { - return next(partsInfo.error, destBucket); - } - const { keysToDelete, extraPartLocations } = filteredPartsObj; - const { aggregateETag, dataLocations, calculatedSize } = partsInfo; + function continueProcessParts() { + // if mpu was completed on backend that stored mpu MD externally, + // skip MD processing steps + if (completeObjData && skipMpuPartProcessing(completeObjData)) { + const dataLocations = [ + { + key: completeObjData.key, + size: completeObjData.contentLength, + start: 0, + dataStoreVersionId: completeObjData.dataStoreVersionId, + dataStoreName: storedMetadata.dataStoreName, + dataStoreETag: completeObjData.eTag, + dataStoreType: completeObjData.dataStoreType, + }, + ]; + const calculatedSize = completeObjData.contentLength; + return callNext( + null, + destBucket, + objMD, + mpuBucket, + storedMetadata, + completeObjData.eTag, + calculatedSize, + dataLocations, + [mpuOverviewKey], + null, + completeObjData, + totalMPUSize, + ); + } - if (completeObjData) { - const dataLocations = [ - { - key: completeObjData.key, - size: calculatedSize, - start: 0, - dataStoreName: storedMetadata.dataStoreName, - dataStoreETag: aggregateETag, - dataStoreType: completeObjData.dataStoreType, - }, - ]; - return next( + const partsInfo = generateMpuPartStorageInfo(filteredPartsObj.partList); + if (partsInfo.error) { + return callNext(partsInfo.error, destBucket); + } + const { keysToDelete, extraPartLocations } = filteredPartsObj; + const { aggregateETag, dataLocations, calculatedSize } = partsInfo; + + if (completeObjData) { + const dataLocations = [ + { + key: completeObjData.key, + size: calculatedSize, + start: 0, + dataStoreName: storedMetadata.dataStoreName, + dataStoreETag: aggregateETag, + dataStoreType: completeObjData.dataStoreType, + }, + ]; + return callNext( + null, + destBucket, + objMD, + mpuBucket, + storedMetadata, + aggregateETag, + calculatedSize, + dataLocations, + keysToDelete, + extraPartLocations, + completeObjData, + totalMPUSize, + ); + } + return callNext( null, destBucket, objMD, @@ -702,24 +754,10 @@ function completeMultipartUpload(authInfo, request, log, callback) { dataLocations, keysToDelete, extraPartLocations, - completeObjData, + null, totalMPUSize, ); } - return next( - null, - destBucket, - objMD, - mpuBucket, - storedMetadata, - aggregateETag, - calculatedSize, - dataLocations, - keysToDelete, - extraPartLocations, - null, - totalMPUSize, - ); }, function prepForStoring( destBucket, diff --git a/tests/unit/api/apiUtils/integrity/computeMpuChecksums.js b/tests/unit/api/apiUtils/integrity/computeMpuChecksums.js index 3640238dc5..67ff80a9b5 100644 --- a/tests/unit/api/apiUtils/integrity/computeMpuChecksums.js +++ b/tests/unit/api/apiUtils/integrity/computeMpuChecksums.js @@ -27,34 +27,34 @@ describe('computeCompositeMPUChecksum', () => { COMPOSITE_ALGOS.forEach(algo => { const label = algo.toUpperCase(); - it(`should match ${label}(decode(c1) || ... || decode(cN)) + "-N"`, () => { + it(`should match ${label}(decode(c1) || ... || decode(cN)) + "-N"`, async () => { const partChecksums = parts.map(p => algorithms[algo].digest(p)); const expectedConcat = Buffer.concat(partChecksums.map(c => Buffer.from(c, 'base64'))); const expected = `${algorithms[algo].digest(expectedConcat)}-3`; - const got = computeCompositeMPUChecksum(algo, partChecksums); + const got = await computeCompositeMPUChecksum(algo, partChecksums); assert.strictEqual(got.error, null); assert.strictEqual(got.checksum, expected); }); }); - it('should return N=1 for a single part', () => { + it('should return N=1 for a single part', async () => { const partChecksums = [algorithms.sha256.digest(parts[0])]; - const got = computeCompositeMPUChecksum('sha256', partChecksums); + const got = await computeCompositeMPUChecksum('sha256', partChecksums); assert.strictEqual(got.error, null); assert(got.checksum.endsWith('-1')); }); - it('should return an error object on unsupported algorithm', () => { - const got = computeCompositeMPUChecksum('md5', ['AAAA']); + it('should return an error object on unsupported algorithm', async () => { + const got = await computeCompositeMPUChecksum('md5', ['AAAA']); assert.strictEqual(got.checksum, null); assert(got.error); assert.strictEqual(got.error.code, 'MPUAlgoNotSupported'); assert.deepStrictEqual(got.error.details, { algorithm: 'md5' }); }); - it('should return an error object for crc64nvme (not allowed for COMPOSITE)', () => { - const got = computeCompositeMPUChecksum('crc64nvme', ['AQIDBAUGBwg=']); + it('should return an error object for crc64nvme (not allowed for COMPOSITE)', async () => { + const got = await computeCompositeMPUChecksum('crc64nvme', ['AQIDBAUGBwg=']); assert.strictEqual(got.checksum, null); assert.strictEqual(got.error.code, 'MPUAlgoNotSupported'); }); diff --git a/tests/unit/api/completeMultipartUpload.js b/tests/unit/api/completeMultipartUpload.js index 08fdf2eb3b..6e422ab60a 100644 --- a/tests/unit/api/completeMultipartUpload.js +++ b/tests/unit/api/completeMultipartUpload.js @@ -428,7 +428,7 @@ describe('CompleteMultipartUpload body-checksum bypass', () => { const log = new DummyRequestLogger(); it( - 'validateMethodChecksumNoChunking returns null for completeMultipartUpload ' + + 'should skip body-checksum validation for completeMultipartUpload ' + 'even when x-amz-checksum-sha256 does not match the body digest', async () => { const body = Buffer.from( @@ -450,8 +450,7 @@ describe('CompleteMultipartUpload body-checksum bypass', () => { ); it( - 'validateMethodChecksumNoChunking still rejects body mismatch for methods ' + - 'that remain in checksumedMethods (sanity check)', + 'should still reject body mismatch for methods that remain in checksumedMethods ' + '(sanity check)', async () => { const body = Buffer.from('{"Objects":[]}'); const finalObjectChecksum = '47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU='; @@ -479,15 +478,15 @@ describe('computeFinalChecksum', () => { })); } - it('should return null when MPU has no checksumAlgorithm', () => { + it('should return null when MPU has no checksumAlgorithm', async () => { const stored = [makeStoredPart(1, { algorithm: 'sha256', value: SAMPLE_DIGESTS.sha256[0] })]; - const got = computeFinalChecksum(stored, partListFromStored(stored), {}, SPLITTER, uploadId, log); + const got = await computeFinalChecksum(stored, partListFromStored(stored), {}, SPLITTER, uploadId, log); assert.strictEqual(got, null); }); - it('should return null when MPU has no checksumType', () => { + it('should return null when MPU has no checksumType', async () => { const stored = [makeStoredPart(1, { algorithm: 'sha256', value: SAMPLE_DIGESTS.sha256[0] })]; - const got = computeFinalChecksum( + const got = await computeFinalChecksum( stored, partListFromStored(stored), { checksumAlgorithm: 'sha256' }, @@ -498,14 +497,14 @@ describe('computeFinalChecksum', () => { assert.strictEqual(got, null); }); - it('should return COMPOSITE checksum with -N suffix for SHA256 MPU', () => { + it('should return COMPOSITE checksum with -N suffix for SHA256 MPU', async () => { const [d1, d2, d3] = [SAMPLE_DIGESTS.sha256[0], SAMPLE_DIGESTS.sha256[1], SAMPLE_DIGESTS.sha256[0]]; const stored = [ makeStoredPart(1, { algorithm: 'sha256', value: d1 }), makeStoredPart(2, { algorithm: 'sha256', value: d2 }), makeStoredPart(3, { algorithm: 'sha256', value: d3 }), ]; - const got = computeFinalChecksum( + const got = await computeFinalChecksum( stored, partListFromStored(stored), { checksumAlgorithm: 'sha256', checksumType: 'COMPOSITE' }, @@ -527,13 +526,13 @@ describe('computeFinalChecksum', () => { }); ['sha1', 'crc32', 'crc32c'].forEach(algo => { - it(`should compute COMPOSITE checksum for ${algo.toUpperCase()}`, () => { + it(`should compute COMPOSITE checksum for ${algo.toUpperCase()}`, async () => { const [d1, d2] = SAMPLE_DIGESTS[algo]; const stored = [ makeStoredPart(1, { algorithm: algo, value: d1 }), makeStoredPart(2, { algorithm: algo, value: d2 }), ]; - const got = computeFinalChecksum( + const got = await computeFinalChecksum( stored, partListFromStored(stored), { checksumAlgorithm: algo, checksumType: 'COMPOSITE' }, @@ -577,7 +576,7 @@ describe('computeFinalChecksum', () => { }, }, ]; - const got = computeFinalChecksum( + const got = await computeFinalChecksum( stored, partListFromStored(stored), { checksumAlgorithm: 'crc64nvme', checksumType: 'FULL_OBJECT' }, @@ -593,13 +592,13 @@ describe('computeFinalChecksum', () => { assert.strictEqual(got.value, expected); }); - it('should return null and log when a part is missing ChecksumValue', () => { + it('should return null and log when a part is missing ChecksumValue', async () => { const stored = [ makeStoredPart(1, { algorithm: 'sha256', value: SAMPLE_DIGESTS.sha256[0] }), makeStoredPart(2, null), makeStoredPart(3, { algorithm: 'sha256', value: SAMPLE_DIGESTS.sha256[1] }), ]; - const got = computeFinalChecksum( + const got = await computeFinalChecksum( stored, partListFromStored(stored), { checksumAlgorithm: 'sha256', checksumType: 'COMPOSITE' }, @@ -610,9 +609,9 @@ describe('computeFinalChecksum', () => { assert.strictEqual(got, null); }); - it('should return null when checksumType is unknown', () => { + it('should return null when checksumType is unknown', async () => { const stored = [makeStoredPart(1, { algorithm: 'sha256', value: SAMPLE_DIGESTS.sha256[0] })]; - const got = computeFinalChecksum( + const got = await computeFinalChecksum( stored, partListFromStored(stored), { checksumAlgorithm: 'sha256', checksumType: 'WEIRD' }, @@ -623,20 +622,23 @@ describe('computeFinalChecksum', () => { assert.strictEqual(got, null); }); - it('should return null when underlying compute reports an error ' + '(crc64nvme COMPOSITE is not allowed)', () => { - const stored = [makeStoredPart(1, { algorithm: 'crc64nvme', value: SAMPLE_DIGESTS.crc64nvme[0] })]; - const got = computeFinalChecksum( - stored, - partListFromStored(stored), - { checksumAlgorithm: 'crc64nvme', checksumType: 'COMPOSITE' }, - SPLITTER, - uploadId, - log, - ); - assert.strictEqual(got, null); - }); + it( + 'should return null when underlying compute reports an error ' + '(crc64nvme COMPOSITE is not allowed)', + async () => { + const stored = [makeStoredPart(1, { algorithm: 'crc64nvme', value: SAMPLE_DIGESTS.crc64nvme[0] })]; + const got = await computeFinalChecksum( + stored, + partListFromStored(stored), + { checksumAlgorithm: 'crc64nvme', checksumType: 'COMPOSITE' }, + SPLITTER, + uploadId, + log, + ); + assert.strictEqual(got, null); + }, + ); - it('should compute over filteredPartList (subset), not all storedParts', () => { + it('should compute over filteredPartList (subset), not all storedParts', async () => { const [d1, d2, d3] = [SAMPLE_DIGESTS.sha256[0], SAMPLE_DIGESTS.sha256[1], SAMPLE_DIGESTS.sha256[0]]; const stored = [ makeStoredPart(1, { algorithm: 'sha256', value: d1 }), @@ -650,7 +652,7 @@ describe('computeFinalChecksum', () => { size: s.value.Size, locations: s.value.partLocations, })); - const got = computeFinalChecksum( + const got = await computeFinalChecksum( stored, filtered, { checksumAlgorithm: 'sha256', checksumType: 'COMPOSITE' },