Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
40 changes: 40 additions & 0 deletions .claude/rules/test-verification.xml
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
<rules category="testing">
<rule severity="HIGH" enforcement="STRICT">
<name>Wait for user test confirmation before pushing fixes</name>
<description>
Tests in this project run only in the example React Native app — the
assistant cannot execute them. After fixing a bug or adding a feature
that would normally be validated by running tests, commit locally and
WAIT for the user to manually run the test suite (`bun ios` /
`bun android` and exercise the relevant suite) and confirm it passes
before pushing to the remote.
</description>
<requirements>
<requirement>After committing a fix that requires runtime validation, do NOT immediately `git push`.</requirement>
<requirement>Tell the user the commit is ready and ask them to run the test that exercises the fix.</requirement>
<requirement>Wait for explicit user confirmation ("tests pass", "all green", "ship it", etc.) before pushing.</requirement>
<requirement>If the user reports a failure, iterate locally with new commits — do NOT push interim fixes either.</requirement>
<requirement>Once the user confirms, batch-push all the validated commits in one `git push`.</requirement>
</requirements>
<appliesWhen>
<case>The change touches C++ that only the example app can exercise.</case>
<case>The change modifies behavior that an existing example-app test asserts.</case>
<case>The change adds new example-app tests whose pass/fail is unknown.</case>
<case>A previous push had a confirmed test failure and you are pushing the followup.</case>
</appliesWhen>
<doesNotApplyWhen>
<case>The change is purely documentation, plan files, or `.claude/` config — no runtime impact.</case>
<case>The user explicitly says "push it" or "ship now" before running tests.</case>
<case>The branch has never been pushed and you are creating the first PR push (still ask first if any unverified runtime change is included).</case>
</doesNotApplyWhen>
<rationale>
The /pr and /commit skills are written for projects where the assistant
can run tests itself. This project's testing model puts that step on the
user. Pushing unverified fixes ships potentially broken code to the
remote, pollutes PR history with revert-style "fix the fix" commits,
and burns user trust. The cost of waiting is a single round-trip
message; the cost of not waiting is a force-push or noise on the PR.
</rationale>
<mustAcknowledge>true</mustAcknowledge>
</rule>
</rules>
2 changes: 1 addition & 1 deletion example/ios/Podfile.lock
Original file line number Diff line number Diff line change
Expand Up @@ -2815,7 +2815,7 @@ SPEC CHECKSUMS:
NitroMmkv: afbc5b2fbf963be567c6c545aa1efcf6a9cec68e
NitroModules: 11bba9d065af151eae51e38a6425e04c3b223ff3
OpenSSL-Universal: 9110d21982bb7e8b22a962b6db56a8aa805afde7
QuickCrypto: 51001a1827a20257b7c159d8c527306cdb578b5a
QuickCrypto: f8ed40d88a6dcacc4451d22004c2fd22b8d07f79
RCT-Folly: 846fda9475e61ec7bcbf8a3fe81edfcaeb090669
RCTDeprecation: c4b9e2fd0ab200e3af72b013ed6113187c607077
RCTRequired: e97dd5dafc1db8094e63bc5031e0371f092ae92a
Expand Down
2 changes: 1 addition & 1 deletion example/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@
"react-native-mmkv": "4.0.1",
"react-native-nitro-modules": "0.33.2",
"react-native-quick-base64": "2.2.2",
"react-native-quick-crypto": "1.1.0",
"react-native-quick-crypto": "workspace:*",
"react-native-safe-area-context": "5.6.2",
"react-native-screens": "4.18.0",
"react-native-vector-icons": "10.3.0",
Expand Down
93 changes: 93 additions & 0 deletions example/src/tests/cipher/cipher_tests.ts
Original file line number Diff line number Diff line change
Expand Up @@ -399,3 +399,96 @@ test(SUITE, 'GCM tampered auth tag throws error', () => {

expect(() => decipher.final()).to.throw();
});

// --- setAAD byte-offset regression tests ---
// Pre-fix, setAAD passed `buffer.buffer` to native, ignoring byteOffset /
// byteLength on sliced Buffers. That meant a sliced AAD authenticated the
// wrong bytes — a silent AEAD integrity violation.

test(
SUITE,
'GCM setAAD with sliced Buffer authenticates the slice (not backing)',
() => {
const testKey = Buffer.from(randomFillSync(new Uint8Array(32)));
const testIv = randomFillSync(new Uint8Array(12));
const testPlaintext = Buffer.from('test data for AAD slice');

// Build a backing buffer with a known 16-byte AAD region in the middle and
// distinct surrounding bytes. The cipher must only authenticate the slice.
const backing = Buffer.concat([
Buffer.from('PREFIX_NOISE_'),
Buffer.from('aad-payload-1234'), // 16-byte AAD window
Buffer.from('_SUFFIX_NOISE'),
]);
const aadSlice = backing.subarray(13, 13 + 16);
expect(aadSlice.byteLength).to.equal(16);
expect(aadSlice.toString('utf8')).to.equal('aad-payload-1234');

// Encrypt with the sliced AAD.
const cipher = createCipheriv('aes-256-gcm', testKey, Buffer.from(testIv));
cipher.setAAD(aadSlice);
const encrypted = Buffer.concat([
cipher.update(testPlaintext),
cipher.final(),
]);
const authTag = cipher.getAuthTag();

// Decrypt with a freshly-constructed Buffer carrying the same 16 logical
// bytes — no surrounding noise, byteOffset = 0. If setAAD honors the
// slice on encrypt, this must verify successfully.
const aadStandalone = Buffer.from('aad-payload-1234');
const decipher = createDecipheriv(
'aes-256-gcm',
testKey,
Buffer.from(testIv),
);
decipher.setAAD(aadStandalone);
decipher.setAuthTag(authTag);
const plaintextOut = Buffer.concat([
decipher.update(encrypted),
decipher.final(),
]);
expect(plaintextOut.toString('utf8')).to.equal(
testPlaintext.toString('utf8'),
);
},
);

test(
SUITE,
'GCM setAAD with sliced Buffer rejects wrong AAD on decrypt',
() => {
// Mirror of the previous test but supplies different AAD bytes on decrypt
// — must fail authentication.
const testKey = Buffer.from(randomFillSync(new Uint8Array(32)));
const testIv = randomFillSync(new Uint8Array(12));
const testPlaintext = Buffer.from('test data for AAD slice');

const backing = Buffer.concat([
Buffer.from('PREFIX_NOISE_'),
Buffer.from('aad-payload-1234'),
Buffer.from('_SUFFIX_NOISE'),
]);
const aadSlice = backing.subarray(13, 13 + 16);

const cipher = createCipheriv('aes-256-gcm', testKey, Buffer.from(testIv));
cipher.setAAD(aadSlice);
const encrypted = Buffer.concat([
cipher.update(testPlaintext),
cipher.final(),
]);
const authTag = cipher.getAuthTag();

// Decrypt with WRONG AAD bytes — must throw on final().
const wrongAad = Buffer.from('aad-payload-DIFF');
const decipher = createDecipheriv(
'aes-256-gcm',
testKey,
Buffer.from(testIv),
);
decipher.setAAD(wrongAad);
decipher.setAuthTag(authTag);
decipher.update(encrypted);
expect(() => decipher.final()).to.throw();
},
);
165 changes: 164 additions & 1 deletion example/src/tests/cipher/xsalsa20_tests.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,10 @@
import { Buffer, randomFillSync, xsalsa20 } from 'react-native-quick-crypto';
import {
Buffer,
createCipheriv,
createDecipheriv,
randomFillSync,
xsalsa20,
} from 'react-native-quick-crypto';
import { expect } from 'chai';
import { test } from '../util';

Expand All @@ -24,3 +30,160 @@ test(SUITE, 'xsalsa20', () => {
// test decrypted == data
expect(decrypted).eql(data);
});

// --- Streaming regression tests ---
//
// XSalsa20 is a stream cipher: chunked update() calls must advance the
// keystream, NOT restart it from block 0 every time. The previous
// implementation called crypto_stream_xor() on each update(), which restarted
// the keystream and produced a two-time pad if the caller streamed >1 chunk.
//
// These tests pin that fix in place by checking streaming equivalence with
// the one-shot xsalsa20() function, which is the correct reference output.

const STREAM_KEY = Buffer.from(
'a8a7d6a5d4a3d2a1a09f9e9d9c8b8a89a8a7d6a5d4a3d2a1a09f9e9d9c8b8a89',
'hex',
);
const STREAM_NONCE = Buffer.from(
'111213141516171821222324252627283132333435363738',
'hex',
);

// Block-aligned split: two 64-byte chunks (full Salsa20 blocks).
test(SUITE, 'xsalsa20 streaming equivalence — block-aligned split', () => {
const data = Buffer.alloc(128);
for (let i = 0; i < data.length; i++) data[i] = i & 0xff;

const oneShot = xsalsa20(
new Uint8Array(STREAM_KEY),
new Uint8Array(STREAM_NONCE),
new Uint8Array(data),
);

const cipher = createCipheriv('xsalsa20', STREAM_KEY, STREAM_NONCE);
const part1 = cipher.update(data.subarray(0, 64));
const part2 = cipher.update(data.subarray(64));
const streamed = Buffer.concat([part1, part2, cipher.final()]);

expect(new Uint8Array(streamed)).eql(oneShot);
});

// Mid-block split: 30 + 70 bytes, neither chunk is a multiple of 64.
test(SUITE, 'xsalsa20 streaming equivalence — mid-block split', () => {
const data = Buffer.alloc(100);
for (let i = 0; i < data.length; i++) data[i] = (i * 7 + 3) & 0xff;

const oneShot = xsalsa20(
new Uint8Array(STREAM_KEY),
new Uint8Array(STREAM_NONCE),
new Uint8Array(data),
);

const cipher = createCipheriv('xsalsa20', STREAM_KEY, STREAM_NONCE);
const part1 = cipher.update(data.subarray(0, 30));
const part2 = cipher.update(data.subarray(30));
const streamed = Buffer.concat([part1, part2, cipher.final()]);

expect(new Uint8Array(streamed)).eql(oneShot);
});

// Many small chunks crossing several block boundaries.
test(SUITE, 'xsalsa20 streaming equivalence — many small chunks', () => {
const data = Buffer.alloc(257);
for (let i = 0; i < data.length; i++) data[i] = (i * 13 + 5) & 0xff;

const oneShot = xsalsa20(
new Uint8Array(STREAM_KEY),
new Uint8Array(STREAM_NONCE),
new Uint8Array(data),
);

const cipher = createCipheriv('xsalsa20', STREAM_KEY, STREAM_NONCE);
const chunkSizes = [1, 7, 16, 31, 33, 64, 65, 40];
const parts: Buffer[] = [];
let offset = 0;
for (const size of chunkSizes) {
const end = Math.min(offset + size, data.length);
if (end > offset) parts.push(cipher.update(data.subarray(offset, end)));
offset = end;
}
if (offset < data.length) parts.push(cipher.update(data.subarray(offset)));
parts.push(cipher.final());
const streamed = Buffer.concat(parts);

expect(new Uint8Array(streamed)).eql(oneShot);
});

// Regression: identical plaintext in two consecutive update() calls MUST
// produce different ciphertexts because the keystream advances. The previous
// (buggy) implementation reset the keystream on every update(), so both
// chunks would have been bitwise identical — a two-time-pad break.
test(SUITE, 'xsalsa20 keystream advances across update() calls', () => {
const block = Buffer.alloc(64, 0xaa);

const cipher = createCipheriv('xsalsa20', STREAM_KEY, STREAM_NONCE);
const c1 = cipher.update(block);
const c2 = cipher.update(block);
cipher.final();

expect(c1.length).to.equal(block.length);
expect(c2.length).to.equal(block.length);
// If the bug returns, c1 === c2 (catastrophic).
expect(c1.equals(c2)).to.equal(false);
});

// Edge case: a chunk that exactly drains the leftover keystream to the block
// boundary, followed by a subsequent update. Catches a regression where
// `leftover_offset` doesn't wrap to the sentinel correctly.
test(
SUITE,
'xsalsa20 streaming equivalence — drain-to-boundary then continue',
() => {
// 60 + 4 + 100 = 164 bytes. After the 60-byte chunk, leftover_offset=60;
// the 4-byte chunk drains exactly to 64 (sentinel); the 100-byte chunk
// must then start cleanly on a fresh block boundary.
const data = Buffer.alloc(164);
for (let i = 0; i < data.length; i++) data[i] = (i * 5 + 19) & 0xff;

const oneShot = xsalsa20(
new Uint8Array(STREAM_KEY),
new Uint8Array(STREAM_NONCE),
new Uint8Array(data),
);

const cipher = createCipheriv('xsalsa20', STREAM_KEY, STREAM_NONCE);
const part1 = cipher.update(data.subarray(0, 60));
const part2 = cipher.update(data.subarray(60, 64));
const part3 = cipher.update(data.subarray(64));
const streamed = Buffer.concat([part1, part2, part3, cipher.final()]);

expect(new Uint8Array(streamed)).eql(oneShot);
},
);

// Streaming round-trip: encrypt and decrypt streamed across multiple
// update() calls. Decryption is just XOR with the same keystream, so this
// also exercises the streaming state on the decrypt side.
test(SUITE, 'xsalsa20 streaming round-trip across two cipher instances', () => {
const data = Buffer.alloc(200);
for (let i = 0; i < data.length; i++) data[i] = (i * 11 + 17) & 0xff;

const enc = createCipheriv('xsalsa20', STREAM_KEY, STREAM_NONCE);
const ciphertext = Buffer.concat([
enc.update(data.subarray(0, 50)),
enc.update(data.subarray(50, 130)),
enc.update(data.subarray(130)),
enc.final(),
]);

const dec = createDecipheriv('xsalsa20', STREAM_KEY, STREAM_NONCE);
const decrypted = Buffer.concat([
dec.update(ciphertext.subarray(0, 17)),
dec.update(ciphertext.subarray(17, 99)),
dec.update(ciphertext.subarray(99)),
dec.final(),
]);

expect(decrypted.equals(data)).to.equal(true);
});
42 changes: 42 additions & 0 deletions example/src/tests/dh/dh_tests.ts
Original file line number Diff line number Diff line change
Expand Up @@ -144,3 +144,45 @@ test(SUITE, 'verifyError should return 0 for created DH', () => {
const dh = crypto.createDiffieHellman(prime, 2);
assert.strictEqual(dh.verifyError, 0);
});

// --- Peer public-key validation (security audit Phase 0.3) ---
//
// Without an explicit DH_check_pub_key call, EVP_PKEY_derive_set_peer
// silently accepts a peer pubkey of 0, 1, or p-1 and produces a
// "shared secret" of 0, 1, or +/-1 — the small-subgroup attack.

test(SUITE, 'computeSecret should reject peer public key of 0', () => {
const alice = crypto.getDiffieHellman('modp14');
alice.generateKeys();
assert.throws(() => {
alice.computeSecret(Buffer.from([0]));
}, /too small/i);
});

test(SUITE, 'computeSecret should reject peer public key of 1', () => {
const alice = crypto.getDiffieHellman('modp14');
alice.generateKeys();
assert.throws(() => {
alice.computeSecret(Buffer.from([1]));
}, /too small/i);
});

test(SUITE, 'computeSecret should reject peer public key of p-1', () => {
const alice = crypto.getDiffieHellman('modp14');
alice.generateKeys();
// modp14 prime ends in 0xFF...FF, so p-1 differs only in the trailing byte.
const pMinus1 = Buffer.from(MODP14_PRIME, 'hex');
pMinus1[pMinus1.length - 1] = pMinus1[pMinus1.length - 1]! ^ 0x01;
assert.throws(() => {
alice.computeSecret(pMinus1);
}, /too large/i);
});

test(SUITE, 'computeSecret should reject peer public key equal to p', () => {
const alice = crypto.getDiffieHellman('modp14');
alice.generateKeys();
const p = Buffer.from(MODP14_PRIME, 'hex');
assert.throws(() => {
alice.computeSecret(p);
}, /too large|invalid/i);
});
Loading
Loading