diff --git a/barretenberg/cpp/scripts/audit/audit_scopes/merkle_tree_audit_scope.md b/barretenberg/cpp/scripts/audit/audit_scopes/merkle_tree_audit_scope.md index aa2014a7e321..8903716cb126 100644 --- a/barretenberg/cpp/scripts/audit/audit_scopes/merkle_tree_audit_scope.md +++ b/barretenberg/cpp/scripts/audit/audit_scopes/merkle_tree_audit_scope.md @@ -1,7 +1,7 @@ # External Audit Scope: merkle_tree Repository: https://github.com/AztecProtocol/aztec-packages -Commit hash: Most recent commit on branch 'next' +Commit hash: 22d6fc368da0fbe5412f4f7b2890a052aa48d803 ## Files to Audit Note: Paths relative to `aztec-packages/barretenberg/cpp/src/barretenberg` diff --git a/barretenberg/cpp/src/barretenberg/bbapi/bbapi_chonk.cpp b/barretenberg/cpp/src/barretenberg/bbapi/bbapi_chonk.cpp index 3041efed572f..73a0012cb873 100644 --- a/barretenberg/cpp/src/barretenberg/bbapi/bbapi_chonk.cpp +++ b/barretenberg/cpp/src/barretenberg/bbapi/bbapi_chonk.cpp @@ -28,8 +28,14 @@ ChonkStart::Response ChonkStart::execute(BBApiRequest& request) && BB_BENCH_NAME(MSGPACK_SCHEMA_NAME); request.ivc_in_progress = std::make_shared(num_circuits); - request.ivc_stack_depth = 0; + + // Clear any stale loaded-circuit state from a previous session so that + // ChonkAccumulate cannot silently reuse a circuit loaded before this ChonkStart. + request.loaded_circuit_name.clear(); + request.loaded_circuit_constraints.reset(); + request.loaded_circuit_vk.clear(); + return Response{}; } @@ -63,6 +69,15 @@ ChonkAccumulate::Response ChonkAccumulate::execute(BBApiRequest& request) && acir_format::WitnessVector witness_data = acir_format::witness_buf_to_witness_vector(std::move(witness)); acir_format::AcirProgram program{ std::move(request.loaded_circuit_constraints.value()), std::move(witness_data) }; + // Clear loaded state immediately after moving out of it. This ensures that if any subsequent + // step throws, the request won't appear to still have a valid circuit loaded (the optional + // would be in a moved-from state, which is technically has_value()==true but poisoned). + auto loaded_vk = std::move(request.loaded_circuit_vk); + auto circuit_name = std::move(request.loaded_circuit_name); + request.loaded_circuit_constraints.reset(); + request.loaded_circuit_vk.clear(); + request.loaded_circuit_name.clear(); + const acir_format::ProgramMetadata metadata{ .ivc = request.ivc_in_progress }; auto circuit = acir_format::create_circuit(program, metadata); @@ -71,9 +86,9 @@ ChonkAccumulate::Response ChonkAccumulate::execute(BBApiRequest& request) && if (request.vk_policy == VkPolicy::RECOMPUTE) { precomputed_vk = nullptr; } else if (request.vk_policy == VkPolicy::DEFAULT || request.vk_policy == VkPolicy::CHECK) { - if (!request.loaded_circuit_vk.empty()) { - validate_vk_size(request.loaded_circuit_vk); - precomputed_vk = from_buffer>(request.loaded_circuit_vk); + if (!loaded_vk.empty()) { + validate_vk_size(loaded_vk); + precomputed_vk = from_buffer>(loaded_vk); if (request.vk_policy == VkPolicy::CHECK) { auto prover_instance = std::make_shared(circuit); @@ -81,7 +96,7 @@ ChonkAccumulate::Response ChonkAccumulate::execute(BBApiRequest& request) && // Dereference to compare VK contents if (*precomputed_vk != *computed_vk) { - throw_or_abort("VK check failed for circuit '" + request.loaded_circuit_name + + throw_or_abort("VK check failed for circuit '" + circuit_name + "': provided VK does not match computed VK"); } } @@ -90,16 +105,13 @@ ChonkAccumulate::Response ChonkAccumulate::execute(BBApiRequest& request) && throw_or_abort("Invalid VK policy. Valid options: default, check, recompute"); } - info("ChonkAccumulate - accumulating circuit '", request.loaded_circuit_name, "'"); + info("ChonkAccumulate - accumulating circuit '", circuit_name, "'"); if (detail::use_memory_profile) { - detail::GLOBAL_MEMORY_PROFILE.set_circuit_name(request.loaded_circuit_name); + detail::GLOBAL_MEMORY_PROFILE.set_circuit_name(circuit_name); } request.ivc_in_progress->accumulate(circuit, precomputed_vk); request.ivc_stack_depth++; - request.loaded_circuit_constraints.reset(); - request.loaded_circuit_vk.clear(); - return Response{}; } diff --git a/barretenberg/cpp/src/barretenberg/chonk/batched_honk_translator/batched_honk_translator_verifier.cpp b/barretenberg/cpp/src/barretenberg/chonk/batched_honk_translator/batched_honk_translator_verifier.cpp index c7ae47eacc1e..9f3e67b6c578 100644 --- a/barretenberg/cpp/src/barretenberg/chonk/batched_honk_translator/batched_honk_translator_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/chonk/batched_honk_translator/batched_honk_translator_verifier.cpp @@ -44,6 +44,9 @@ typename BatchedHonkTranslatorVerifier_::OinkResult BatchedHonkTranslator mega_zk_verifier_instance = std::make_shared(mega_zk_vk_and_hash); // Derive num_public_inputs from the Oink-only MegaZK proof. + if (mega_zk_proof.size() < ProofLength::Oink::LENGTH_WITHOUT_PUB_INPUTS) { + throw_or_abort("MegaZK Oink proof too short to derive num_public_inputs"); + } const size_t num_public_inputs = mega_zk_proof.size() - ProofLength::Oink::LENGTH_WITHOUT_PUB_INPUTS; OinkVerifier oink_verifier{ mega_zk_verifier_instance, transcript, num_public_inputs }; diff --git a/barretenberg/cpp/src/barretenberg/chonk/chonk.cpp b/barretenberg/cpp/src/barretenberg/chonk/chonk.cpp index 4758275484eb..6d00c2e0ae2b 100644 --- a/barretenberg/cpp/src/barretenberg/chonk/chonk.cpp +++ b/barretenberg/cpp/src/barretenberg/chonk/chonk.cpp @@ -732,7 +732,7 @@ void Chonk::update_native_verifier_accumulator(const VerifierInputs& queue_entry info("Chonk accumulate: hash of verifier accumulator computed natively set in previous kernel IO: ", native_verifier_accum_hash); } - has_last_app_been_accumulated = num_circuits_accumulated + 1 == num_circuits - 4; + has_last_app_been_accumulated = num_circuits_accumulated + 1 == num_circuits - 3; is_previous_circuit_a_kernel = queue_entry.is_kernel; info("======= END OF DEBUGGING INFO FOR NATIVE FOLDING STEP ======="); diff --git a/barretenberg/cpp/src/barretenberg/chonk/chonk.test.cpp b/barretenberg/cpp/src/barretenberg/chonk/chonk.test.cpp index 60b60fe5da2d..9dcdb0f2467b 100644 --- a/barretenberg/cpp/src/barretenberg/chonk/chonk.test.cpp +++ b/barretenberg/cpp/src/barretenberg/chonk/chonk.test.cpp @@ -449,8 +449,9 @@ TEST_F(ChonkTests, MsgpackProofFromFileOrBuffer) std::generate(random_bytes.begin(), random_bytes.end(), []() { return static_cast(rand() % 256); }); std::copy(random_bytes.begin(), random_bytes.end(), buffer.data()); - // Expect deserialization to fail with error msgpack::v1::type_error with description "std::bad_cast" - EXPECT_THROW(ChonkProof::from_msgpack_buffer(buffer), msgpack::v1::type_error); + // Expect deserialization to fail (either msgpack parse error, type mismatch, trailing data, + // or non-canonical field encoding) + EXPECT_ANY_THROW(ChonkProof::from_msgpack_buffer(buffer)); } }; diff --git a/barretenberg/cpp/src/barretenberg/chonk/chonk_proof.cpp b/barretenberg/cpp/src/barretenberg/chonk/chonk_proof.cpp index 9da0e122ccee..ce2fd9984769 100644 --- a/barretenberg/cpp/src/barretenberg/chonk/chonk_proof.cpp +++ b/barretenberg/cpp/src/barretenberg/chonk/chonk_proof.cpp @@ -44,7 +44,9 @@ ChonkProof_ ChonkProof_::from_field_elements(const std // MegaZK Oink proof size = total - all other fixed-size components. // This correctly accounts for any ACIR public inputs prepended to the oink portion. constexpr size_t fixed_total = merge_size + eccvm_size + ipa_size + joint_size; - BB_ASSERT_GTE(fields.size(), fixed_total, "ChonkProof::from_field_elements: proof too short"); + if (fields.size() < fixed_total) { + throw_or_abort("ChonkProof::from_field_elements: proof too short"); + } const size_t mega_zk_oink_length = fields.size() - fixed_total; auto it = fields.begin(); diff --git a/barretenberg/cpp/src/barretenberg/chonk/chonk_proof.hpp b/barretenberg/cpp/src/barretenberg/chonk/chonk_proof.hpp index dc8a85a0c7c4..3d43d7ddc443 100644 --- a/barretenberg/cpp/src/barretenberg/chonk/chonk_proof.hpp +++ b/barretenberg/cpp/src/barretenberg/chonk/chonk_proof.hpp @@ -124,10 +124,14 @@ template struct ChonkProof_ { static ChonkProof_ from_msgpack_buffer(const msgpack::sbuffer& buffer) requires(!IsRecursive) { - msgpack::object_handle oh = msgpack::unpack(buffer.data(), buffer.size()); - msgpack::object obj = oh.get(); + std::size_t offset = 0; + msgpack::object_handle oh = msgpack::unpack(buffer.data(), buffer.size(), offset); + if (offset != buffer.size()) { + throw_or_abort("ChonkProof::from_msgpack_buffer: trailing data (" + std::to_string(buffer.size() - offset) + + " extra bytes)"); + } ChonkProof_ proof; - obj.convert(proof); + oh.get().convert(proof); return proof; } diff --git a/barretenberg/cpp/src/barretenberg/chonk/proof_compression.hpp b/barretenberg/cpp/src/barretenberg/chonk/proof_compression.hpp index dbbd6e3777a0..0607a80b96cd 100644 --- a/barretenberg/cpp/src/barretenberg/chonk/proof_compression.hpp +++ b/barretenberg/cpp/src/barretenberg/chonk/proof_compression.hpp @@ -59,7 +59,9 @@ class ProofCompressor { static uint256_t read_u256(const std::vector& data, size_t& pos) { - BB_ASSERT(pos + 32 <= data.size()); + if (pos + 32 > data.size()) { + throw_or_abort("proof_compression: read_u256 out of bounds"); + } uint256_t val{ 0, 0, 0, 0 }; for (int i = 31; i >= 0; --i) { val.data[i / 8] |= static_cast(data[pos++]) << (8 * (i % 8)); @@ -423,10 +425,14 @@ class ProofCompressor { */ static size_t compressed_mega_num_public_inputs(size_t compressed_bytes) { - BB_ASSERT(compressed_bytes % 32 == 0); + if (compressed_bytes % 32 != 0) { + throw_or_abort("proof_compression: compressed size not aligned to 32 bytes"); + } size_t total_elements = compressed_bytes / 32; size_t fixed_elements = compressed_element_count(0); - BB_ASSERT(total_elements >= fixed_elements); + if (total_elements < fixed_elements) { + throw_or_abort("proof_compression: compressed proof too short"); + } return total_elements - fixed_elements; } @@ -493,7 +499,9 @@ class ProofCompressor { size_t mega_num_pub_inputs = proof.hiding_oink_proof.size() - ProofLength::Oink::LENGTH_WITHOUT_PUB_INPUTS; walk_chonk_proof(bn254_scalar, bn254_comm, grumpkin_scalar, grumpkin_comm, mega_num_pub_inputs); - BB_ASSERT(offset == flat.size()); + if (offset != flat.size()) { + throw_or_abort("proof_compression: compress did not consume all proof elements"); + } return out; } @@ -505,7 +513,9 @@ class ProofCompressor { // BN254 callbacks auto bn254_scalar = [&]() { uint256_t raw = read_u256(compressed, pos); - BB_ASSERT(raw < Fr::modulus); + if (raw >= Fr::modulus) { + throw_or_abort("proof_compression: BN254 scalar out of range"); + } flat.emplace_back(raw); }; @@ -523,11 +533,15 @@ class ProofCompressor { return; } - BB_ASSERT(x_val < Fq::modulus); + if (x_val >= Fq::modulus) { + throw_or_abort("proof_compression: BN254 x-coordinate out of range"); + } Fq x(x_val); Fq y_squared = x * x * x + Bn254G1Params::b; auto [is_square, y] = y_squared.sqrt(); - BB_ASSERT(is_square); + if (!is_square) { + throw_or_abort("proof_compression: BN254 point not on curve"); + } if (y_is_negative(y) != sign) { y = -y; @@ -555,11 +569,15 @@ class ProofCompressor { return; } - BB_ASSERT(x_val < Fr::modulus); + if (x_val >= Fr::modulus) { + throw_or_abort("proof_compression: Grumpkin x-coordinate out of range"); + } Fr x(x_val); Fr y_squared = x * x * x + grumpkin::G1Params::b; auto [is_square, y] = y_squared.sqrt(); - BB_ASSERT(is_square); + if (!is_square) { + throw_or_abort("proof_compression: Grumpkin point not on curve"); + } if (y_is_negative(y) != sign) { y = -y; @@ -571,7 +589,9 @@ class ProofCompressor { auto grumpkin_scalar = [&]() { uint256_t raw = read_u256(compressed, pos); - BB_ASSERT(raw < Fq::modulus); + if (raw >= Fq::modulus) { + throw_or_abort("proof_compression: Grumpkin scalar out of range"); + } Fq fq_val(raw); auto [lo, hi] = split_fq(fq_val); flat.emplace_back(lo); @@ -579,7 +599,9 @@ class ProofCompressor { }; walk_chonk_proof(bn254_scalar, bn254_comm, grumpkin_scalar, grumpkin_comm, mega_num_public_inputs); - BB_ASSERT(pos == compressed.size()); + if (pos != compressed.size()) { + throw_or_abort("proof_compression: decompression did not consume all bytes"); + } return ChonkProof::from_field_elements(flat); } }; diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.hpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.hpp index cc844ecd54f2..fe0b8bc09c0b 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.hpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.hpp @@ -475,6 +475,7 @@ template class IPA scalar_multiplication::pippenger_unsafe(data.s_vec, { &srs_elements[0], /*size*/ poly_length }); } if (G_zero != data.G_zero_from_prover) { + info("IPA verification failed: G_0 mismatch"); return false; } @@ -669,8 +670,14 @@ template class IPA requires(!Curve::is_stdlib_type) { const size_t num_claims = opening_claims.size(); - BB_ASSERT(num_claims == transcripts.size()); - BB_ASSERT(num_claims > 0); + if (num_claims != transcripts.size()) { + info("IPA batch verification failed: claims/transcripts size mismatch"); + return false; + } + if (num_claims == 0) { + info("IPA batch verification failed: no claims provided"); + return false; + } // Phase 1: Per-proof transcript processing (sequential, each proof is cheap) std::vector C_zeros(num_claims); @@ -770,6 +777,12 @@ template class IPA static bool full_verify_recursive(const VK& vk, const OpeningClaim& opening_claim, auto& transcript) requires Curve::is_stdlib_type { + // Check SRS size up front before any circuit construction + if (vk.get_monomial_points().size() < poly_length) { + throw_or_abort("IPA recursive verification: not enough SRS points (need " + std::to_string(poly_length) + + ", have " + std::to_string(vk.get_monomial_points().size()) + ")"); + } + add_claim_to_hash_buffer(opening_claim, transcript); VerifierAccumulator verifier_accumulator = reduce_verify_internal_recursive(opening_claim, transcript); auto round_challenges_inv = verifier_accumulator.u_challenges_inv; @@ -806,7 +819,6 @@ template class IPA // Compute G_zero // In the native verifier, this uses pippenger. Here we use batch_mul. std::vector srs_elements = vk.get_monomial_points(); - BB_ASSERT_GTE(srs_elements.size(), poly_length, "Not enough SRS points for IPA!"); srs_elements.resize(poly_length); Commitment computed_G_zero = Commitment::batch_mul(srs_elements, s_vec); // check the computed G_zero and the claimed G_zero are the same. diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/kzg/kzg.hpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/kzg/kzg.hpp index ffdeaf763a4c..81e93b837ff6 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/kzg/kzg.hpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/kzg/kzg.hpp @@ -151,7 +151,11 @@ template class KZG { // Validate the final MSM size if expected size is provided if (expected_final_msm_size != 0) { - BB_ASSERT_EQ(batch_opening_claim.commitments.size(), expected_final_msm_size); + if (batch_opening_claim.commitments.size() != expected_final_msm_size) { + throw_or_abort("KZG verification: unexpected final MSM size " + + std::to_string(batch_opening_claim.commitments.size()) + " (expected " + + std::to_string(expected_final_msm_size) + ")"); + } } // Compute C + [W]₁ ⋅ z diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/shplonk/shplemini.hpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/shplonk/shplemini.hpp index 054e8d935f76..7f01ea9f37ee 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/shplonk/shplemini.hpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/shplonk/shplemini.hpp @@ -371,7 +371,9 @@ template class ShpleminiVerifier_ { // Used in ECCVM and BatchedHonkTranslator. The nu power offset in batch_sumcheck_round_claims // assumes ZK claims (NUM_SMALL_IPA_EVALUATIONS) precede sumcheck round claims in the batching order. if (committed_sumcheck) { - BB_ASSERT(HasZK, "committed sumcheck requires ZK for correct nu power indexing"); + if constexpr (!HasZK) { + throw_or_abort("Shplemini: committed sumcheck requires ZK for correct nu power indexing"); + } batch_sumcheck_round_claims(commitments, scalars, constant_term_accumulator, @@ -522,10 +524,11 @@ template class ShpleminiVerifier_ { // next duplicate; the original at original_start + i is unaffected since // we erase higher-index ranges first. if constexpr (!Curve::is_stdlib_type) { - BB_ASSERT(commitments[duplicate_start] == commitments[original_start + i], - "remove_repeated_commitments: commitment mismatch at duplicate index " + - std::to_string(duplicate_start) + " vs original index " + - std::to_string(original_start + i)); + if (commitments[duplicate_start] != commitments[original_start + i]) { + throw_or_abort("remove_repeated_commitments: commitment mismatch at duplicate index " + + std::to_string(duplicate_start) + " vs original index " + + std::to_string(original_start + i)); + } } scalars.erase(scalars.begin() + static_cast(duplicate_start)); commitments.erase(commitments.begin() + static_cast(duplicate_start)); diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/shplonk/shplonk.hpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/shplonk/shplonk.hpp index 9a17539fa203..d4cb38105c85 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/shplonk/shplonk.hpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/shplonk/shplonk.hpp @@ -359,7 +359,9 @@ template class ShplonkVerifier_ { , commitments({ quotient }) , scalars{ Fr{ 1 } } { - BB_ASSERT_GT(num_claims, 1U, "Using Shplonk with just one claim. Should use batch reduction."); + if (num_claims <= 1U) { + throw_or_abort("Using Shplonk with just one claim. Should use batch reduction."); + } const size_t num_commitments = commitments.size(); commitments.reserve(num_commitments); scalars.reserve(num_commitments); diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.hpp index a3e101135878..31936b73b6db 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.hpp @@ -1,5 +1,5 @@ // === AUDIT STATUS === -// internal: { status: Planned, auditors: [], commit: } +// internal: { status: Complete, auditors: [Nishat], commit: 22d6fc368da0fbe5412f4f7b2890a052aa48d803 } // external_1: { status: not started, auditors: [], commit: } // external_2: { status: not started, auditors: [], commit: } // ===================== @@ -560,6 +560,12 @@ std::optional ContentAddressedAppendOnlyTree::find_lea ReadTransaction& tx, bool updateNodesByIndexCache) const { + // Note: reads at leaf_index >= max_size_ are treated as out-of-range and rejected. This helper + // returns std::nullopt only for valid in-range positions whose subtree is unwritten. + if (leaf_index >= max_size_) { + throw std::runtime_error(format("Leaf index ", leaf_index, " out of range for tree of depth ", depth_)); + } + fr hash = requestContext.root; // std::cout << "Finding leaf hash for root " << hash << " at index " << leaf_index << std::endl; index_t mask = static_cast(1) << (depth_ - 1); @@ -575,8 +581,6 @@ std::optional ContentAddressedAppendOnlyTree::find_lea } // std::cout << "Found root at depth " << i << " : " << hash << std::endl; - // TODO(#17755): This does not consider maximum leaf index and will wrap around to give incorrect values. - // e.g. if leaf_index = maximum + 1, returns the leaf at index + 1. See #17684 // Do we need to go right or left bool is_right = static_cast(leaf_index & mask); // std::cout << "Mask " << mask << " depth " << depth_ << std::endl; @@ -673,6 +677,9 @@ ContentAddressedAppendOnlyTree::OptionalSiblingPath Conten return path; } +// Leaf read semantics +// - If leaf_index >= max_size_ -> failure (index out of tree range) +// - If leaf_index < max_size_ but leaf not written -> return 0 with success = true template void ContentAddressedAppendOnlyTree::get_leaf(const index_t& leaf_index, bool includeUncommitted, @@ -682,9 +689,9 @@ void ContentAddressedAppendOnlyTree::get_leaf(const index_ execute_and_report( [=, this](TypedResponse& response) { ReadTransactionPtr tx = store_->create_read_transaction(); - if (max_size_ < leaf_index) { - // TODO(#17755): Throw error to world state -> TS? (native_world_state_instance.ts -> call() - // translates this to null) + if (max_size_ <= leaf_index) { + // Note: leaf_index >= max_size_ is out of tree range and returns failure. Valid in-range but + // unwritten leaves return zero with success = true. response.message = format("Unable to get leaf at index ", leaf_index, ", leaf index out of tree range."); response.success = false; @@ -700,9 +707,6 @@ void ContentAddressedAppendOnlyTree::get_leaf(const index_ } else { // We have an unwritten leaf, so return a 0 value, which is not a failure: response.inner.leaf = fr::zero(); - // TODO(#17755): The below is now redundant (we will always set response.success = true at this - // point), but keeping it in case we want to handle specific (non out of range index) errors e.g. no - // root response.success = true; response.message = format("Failed to find leaf hash at index ", leaf_index); } @@ -712,6 +716,10 @@ void ContentAddressedAppendOnlyTree::get_leaf(const index_ workers_->enqueue(job); } +// Leaf read semantics +// - If leaf_index >= max_size_ -> failure (index out of tree range) +// - If leaf_index >= blockData.size -> failure (index out of block range) +// - If leaf_index < blockData.size but traversal encounters an unwritten subtree -> return 0 with success = true template void ContentAddressedAppendOnlyTree::get_leaf(const index_t& leaf_index, const block_number_t& blockNumber, @@ -733,9 +741,8 @@ void ContentAddressedAppendOnlyTree::get_leaf(const index_ blockNumber, ", failed to get block data.")); } - if (max_size_ < leaf_index) { - // TODO(#17755): Throw error to world state -> TS? (native_world_state_instance.ts -> call() - // translates this to null) + if (max_size_ <= leaf_index) { + // Note: leaf_index >= max_size_ is out of tree range and returns failure. response.message = format("Unable to get leaf at index ", leaf_index, " for block ", @@ -744,10 +751,10 @@ void ContentAddressedAppendOnlyTree::get_leaf(const index_ response.success = false; return; } - if (blockData.size < leaf_index) { + if (blockData.size <= leaf_index) { // Note: this failure does diverge from the below behaviour (unwritten leaf at valid index returns a // 0 value with success = true) but is intentional for snapshot-reliant trees where failure is - // expected when reading unwritten leaves. Should be considered with #17755. + // expected when reading unwritten leaves. response.message = format("Unable to get leaf at index ", leaf_index, " for block ", @@ -767,9 +774,6 @@ void ContentAddressedAppendOnlyTree::get_leaf(const index_ } else { // We have an unwritten leaf, so return a 0 value, which is not a failure: response.inner.leaf = fr::zero(); - // TODO(#17755): The below is now redundant (we will always set response.success = true at this - // point), but keeping it in case we want to handle specific (non out of range index) errors e.g. no - // root response.success = true; response.message = format("Failed to find leaf hash at index ", leaf_index, " for block number ", blockNumber); @@ -997,11 +1001,6 @@ void ContentAddressedAppendOnlyTree::rollback(const Rollba workers_->enqueue(job); } -// TODO(PhilWindle): One possible optimisation is for the following 3 functions -// checkpoint, commit_checkpoint and revert_checkpoint to not use the thread pool -// It is not stricly necessary for these operations to use it. The balance is whether -// the cost of using it outweighs the benefit or checkpointing/reverting all tree concurrently - template void ContentAddressedAppendOnlyTree::checkpoint(const CheckpointCallback& on_completion) { diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.test.cpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.test.cpp index 1518b2d2bfa5..ef5f713561b6 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.test.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.test.cpp @@ -1025,11 +1025,16 @@ TEST_F(PersistedContentAddressedAppendOnlyTreeTest, retrieves_historic_leaves) uint64_t block_tree_size = 0; for (uint32_t i = 0; i < num_blocks; i++) { - tree.get_meta_data(i + 1, false, [&](auto response) -> void { block_tree_size = response.inner.meta.size; }); + Signal meta_signal; + tree.get_meta_data(i + 1, false, [&](auto response) -> void { + block_tree_size = response.inner.meta.size; + meta_signal.signal_level(); + }); + meta_signal.wait_for_level(); for (uint32_t j = 0; j < num_blocks; j++) { index_t indexToQuery = j * batch_size; fr expectedLeaf = j <= i ? get_value(indexToQuery) : fr::zero(); - check_historic_leaf(tree, i + 1, expectedLeaf, indexToQuery, indexToQuery <= block_tree_size, true, false); + check_historic_leaf(tree, i + 1, expectedLeaf, indexToQuery, indexToQuery < block_tree_size, true, false); } } } diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/hash.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/hash.hpp index 371771eca23a..4d50e837c56e 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/hash.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/hash.hpp @@ -1,5 +1,5 @@ // === AUDIT STATUS === -// internal: { status: Planned, auditors: [], commit: } +// internal: { status: Complete, auditors: [Nishat], commit: 22d6fc368da0fbe5412f4f7b2890a052aa48d803 } // external_1: { status: not started, auditors: [], commit: } // external_2: { status: not started, auditors: [], commit: } // ===================== diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/hash_path.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/hash_path.hpp index 5282dfa7a8a4..18ea67a29f55 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/hash_path.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/hash_path.hpp @@ -1,5 +1,5 @@ // === AUDIT STATUS === -// internal: { status: Planned, auditors: [], commit: } +// internal: { status: Complete, auditors: [Nishat], commit: 22d6fc368da0fbe5412f4f7b2890a052aa48d803 } // external_1: { status: not started, auditors: [], commit: } // external_2: { status: not started, auditors: [], commit: } // ===================== diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/content_addressed_indexed_tree.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/content_addressed_indexed_tree.hpp index a5b76c12a3b5..8dd6f0373232 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/content_addressed_indexed_tree.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/content_addressed_indexed_tree.hpp @@ -1,5 +1,5 @@ // === AUDIT STATUS === -// internal: { status: Planned, auditors: [], commit: } +// internal: { status: Complete, auditors: [Nishat], commit: 22d6fc368da0fbe5412f4f7b2890a052aa48d803 } // external_1: { status: not started, auditors: [], commit: } // external_2: { status: not started, auditors: [], commit: } // ===================== diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/indexed_leaf.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/indexed_leaf.hpp index 8155930f4b4b..6ffeb94066a1 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/indexed_leaf.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/indexed_tree/indexed_leaf.hpp @@ -1,5 +1,5 @@ // === AUDIT STATUS === -// internal: { status: Planned, auditors: [], commit: } +// internal: { status: Complete, auditors: [Nishat], commit: 22d6fc368da0fbe5412f4f7b2890a052aa48d803 } // external_1: { status: not started, auditors: [], commit: } // external_2: { status: not started, auditors: [], commit: } // ===================== diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.cpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.cpp index 7328b0e377e6..6127f8040630 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.cpp @@ -1,5 +1,5 @@ // === AUDIT STATUS === -// internal: { status: Planned, auditors: [], commit: } +// internal: { status: Complete, auditors: [Nishat], commit: 22d6fc368da0fbe5412f4f7b2890a052aa48d803 } // external_1: { status: not started, auditors: [], commit: } // external_2: { status: not started, auditors: [], commit: } // ===================== diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.hpp index e06f835ddbd4..30f68061b706 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.hpp @@ -1,5 +1,5 @@ // === AUDIT STATUS === -// internal: { status: Planned, auditors: [], commit: } +// internal: { status: Complete, auditors: [Nishat], commit: 22d6fc368da0fbe5412f4f7b2890a052aa48d803 } // external_1: { status: not started, auditors: [], commit: } // external_2: { status: not started, auditors: [], commit: } // ===================== diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/memory_tree.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/memory_tree.hpp index fe8dc8947e37..bf2a5759cef0 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/memory_tree.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/memory_tree.hpp @@ -1,5 +1,5 @@ // === AUDIT STATUS === -// internal: { status: Planned, auditors: [], commit: } +// internal: { status: Complete, auditors: [Nishat], commit: 22d6fc368da0fbe5412f4f7b2890a052aa48d803 } // external_1: { status: not started, auditors: [], commit: } // external_2: { status: not started, auditors: [], commit: } // ===================== @@ -115,6 +115,9 @@ template fr_sibling_path MemoryTree::get template fr MemoryTree::update_element(size_t index, fr const& value) { + if (index >= total_size_) { + throw_or_abort("update_element: index out of range"); + } size_t offset = 0; size_t layer_size = total_size_; fr current = value; diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/array_store.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/array_store.hpp index 3beafb369ce1..a04918875c16 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/array_store.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/array_store.hpp @@ -1,5 +1,5 @@ // === AUDIT STATUS === -// internal: { status: Planned, auditors: [], commit: } +// internal: { status: Complete, auditors: [Nishat], commit: 22d6fc368da0fbe5412f4f7b2890a052aa48d803 } // external_1: { status: not started, auditors: [], commit: } // external_2: { status: not started, auditors: [], commit: } // ===================== @@ -97,4 +97,4 @@ template class ArrayStore { std::vector>>> map; TreeMeta meta; }; -} // namespace bb::crypto::merkle_tree \ No newline at end of file +} // namespace bb::crypto::merkle_tree diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp index 49a4dc1110cd..c9f87a05858e 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp @@ -1,5 +1,5 @@ // === AUDIT STATUS === -// internal: { status: Planned, auditors: [], commit: } +// internal: { status: Complete, auditors: [Nishat], commit: 22d6fc368da0fbe5412f4f7b2890a052aa48d803 } // external_1: { status: not started, auditors: [], commit: } // external_2: { status: not started, auditors: [], commit: } // ===================== diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/content_addressed_cache.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/content_addressed_cache.hpp index 908d3ccde1cb..40ef06125ed4 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/content_addressed_cache.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/content_addressed_cache.hpp @@ -1,5 +1,5 @@ // === AUDIT STATUS === -// internal: { status: Planned, auditors: [], commit: } +// internal: { status: Complete, auditors: [Nishat], commit: 22d6fc368da0fbe5412f4f7b2890a052aa48d803 } // external_1: { status: not started, auditors: [], commit: } // external_2: { status: not started, auditors: [], commit: } // ===================== @@ -89,7 +89,6 @@ template class ContentAddressedCache { TreeMeta meta_; // Captures the cache's node hashes at the time of checkpoint. If the node does not exist in the cache, the // optional will == nullopt - // TODO (PhilWindle): Consider where a more optimal approach is a single unordered map, instead of 1 per level std::vector>> nodes_by_index_; // Captures the cache's leaf pre-images at the time of checkpoint. Again, if the leaf does not exist in the // cache, the optional will == nullopt diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/tree_meta.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/tree_meta.hpp index 80920bc445ec..3127c46808bc 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/tree_meta.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/tree_meta.hpp @@ -1,5 +1,5 @@ // === AUDIT STATUS === -// internal: { status: Planned, auditors: [], commit: } +// internal: { status: Complete, auditors: [Nishat], commit: 22d6fc368da0fbe5412f4f7b2890a052aa48d803 } // external_1: { status: not started, auditors: [], commit: } // external_2: { status: not started, auditors: [], commit: } // ===================== diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/nullifier_tree/nullifier_leaf.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/nullifier_tree/nullifier_leaf.hpp index 825ecd43cac5..de4180b00e1b 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/nullifier_tree/nullifier_leaf.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/nullifier_tree/nullifier_leaf.hpp @@ -1,5 +1,5 @@ // === AUDIT STATUS === -// internal: { status: Planned, auditors: [], commit: } +// internal: { status: Complete, auditors: [Nishat], commit: 22d6fc368da0fbe5412f4f7b2890a052aa48d803 } // external_1: { status: not started, auditors: [], commit: } // external_2: { status: not started, auditors: [], commit: } // ===================== diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/nullifier_tree/nullifier_memory_tree.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/nullifier_tree/nullifier_memory_tree.hpp index ceba88035a7e..b13f0f7500d8 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/nullifier_tree/nullifier_memory_tree.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/nullifier_tree/nullifier_memory_tree.hpp @@ -1,5 +1,5 @@ // === AUDIT STATUS === -// internal: { status: Planned, auditors: [], commit: } +// internal: { status: Complete, auditors: [Nishat], commit: 22d6fc368da0fbe5412f4f7b2890a052aa48d803 } // external_1: { status: not started, auditors: [], commit: } // external_2: { status: not started, auditors: [], commit: } // ===================== diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/response.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/response.hpp index e586b3a951cd..2e36dd514428 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/response.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/response.hpp @@ -1,5 +1,5 @@ // === AUDIT STATUS === -// internal: { status: Planned, auditors: [], commit: } +// internal: { status: Complete, auditors: [Nishat], commit: 22d6fc368da0fbe5412f4f7b2890a052aa48d803 } // external_1: { status: not started, auditors: [], commit: } // external_2: { status: not started, auditors: [], commit: } // ===================== diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/signal.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/signal.hpp index 4e8f9292122b..ac055746fc9d 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/signal.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/signal.hpp @@ -1,5 +1,5 @@ // === AUDIT STATUS === -// internal: { status: Planned, auditors: [], commit: } +// internal: { status: Complete, auditors: [Nishat], commit: 22d6fc368da0fbe5412f4f7b2890a052aa48d803 } // external_1: { status: not started, auditors: [], commit: } // external_2: { status: not started, auditors: [], commit: } // ===================== diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/types.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/types.hpp index 0cbdb90f20f1..2af3fb57c206 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/types.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/types.hpp @@ -1,5 +1,5 @@ // === AUDIT STATUS === -// internal: { status: Planned, auditors: [], commit: } +// internal: { status: Complete, auditors: [Nishat], commit: 22d6fc368da0fbe5412f4f7b2890a052aa48d803 } // external_1: { status: not started, auditors: [], commit: } // external_2: { status: not started, auditors: [], commit: } // ===================== diff --git a/barretenberg/cpp/src/barretenberg/ecc/fields/field_impl.hpp b/barretenberg/cpp/src/barretenberg/ecc/fields/field_impl.hpp index 837b652939b5..8cc47323fe59 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/fields/field_impl.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/fields/field_impl.hpp @@ -830,8 +830,13 @@ template void field::msgpack_unpack(auto o) data[i] = reversed[i]; } + // Reject non-canonical field encodings (values >= modulus) to ensure strict parsing. + if (uint256_t{ data[0], data[1], data[2], data[3] } >= modulus) { + throw_or_abort("msgpack field deserialization: non-canonical encoding (value >= modulus)"); + } + // Finally, the field is converted back to Montgomery form, just like in the old format. - *this = to_montgomery_form_reduced(); + *this = to_montgomery_form(); } } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/ecc/fields/prime_field.test.cpp b/barretenberg/cpp/src/barretenberg/ecc/fields/prime_field.test.cpp index 483547c7040d..e69eb7f2130d 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/fields/prime_field.test.cpp +++ b/barretenberg/cpp/src/barretenberg/ecc/fields/prime_field.test.cpp @@ -615,6 +615,55 @@ TYPED_TEST(PrimeFieldTest, Msgpack) EXPECT_EQ(actual, expected); } +// This test requires exception support; in WASM builds (BB_NO_EXCEPTIONS), +// throw_or_abort calls abort() instead of throwing, so EXPECT_THROW cannot work. +#ifndef BB_NO_EXCEPTIONS +TYPED_TEST(PrimeFieldTest, MsgpackRejectsNonCanonical) +{ + using F = TypeParam; + + // Use a small value so that (value + modulus) is guaranteed to fit in 256 bits, + // even for fields with moduli close to 2^256 (e.g., secp256k1, secp256r1). + F a = F(1); + msgpack::sbuffer buffer; + msgpack::pack(buffer, a); + + // Find the 32-byte binary payload inside the msgpack buffer. + // pack_bin(32) produces: 0xc4 0x20 <32 bytes> + uint8_t* buf = reinterpret_cast(buffer.data()); + size_t buf_size = buffer.size(); + uint8_t* field_bytes = nullptr; + for (size_t i = 0; i + 33 <= buf_size; i++) { + if (buf[i] == 0xc4 && buf[i + 1] == 0x20) { + field_bytes = &buf[i + 2]; + break; + } + } + ASSERT_NE(field_bytes, nullptr) << "Could not find 32-byte bin payload in msgpack buffer"; + + // Replace the payload with (1 + modulus), a non-canonical encoding of 1. + uint256_t non_canonical = uint256_t(1) + F::modulus; + for (int i = 31; i >= 0; i--) { + field_bytes[i] = static_cast(non_canonical.data[0] & 0xFF); + non_canonical >>= 8; + } + + // Deserializing the mutated buffer must throw + EXPECT_THROW( + { + msgpack::object_handle oh = msgpack::unpack(buffer.data(), buffer.size()); + F result; + oh.get().convert(result); + }, + std::runtime_error); +} +#else +TYPED_TEST(PrimeFieldTest, MsgpackRejectsNonCanonical) +{ + GTEST_SKIP() << "Skipping: throw_or_abort calls abort() when BB_NO_EXCEPTIONS is defined"; +} +#endif + // ================================ // Montgomery Form Conversion Tests (254-bit fields) // ================================ diff --git a/barretenberg/cpp/src/barretenberg/transcript/transcript.hpp b/barretenberg/cpp/src/barretenberg/transcript/transcript.hpp index 43cfc758497b..61a12eef5b18 100644 --- a/barretenberg/cpp/src/barretenberg/transcript/transcript.hpp +++ b/barretenberg/cpp/src/barretenberg/transcript/transcript.hpp @@ -171,7 +171,9 @@ template class BaseTranscript { template T deserialize_from_buffer(const Proof& proof_data, size_t& offset) const { constexpr size_t element_fr_size = Codec::template calc_num_fields(); - BB_ASSERT_LTE(offset + element_fr_size, proof_data.size()); + if (offset + element_fr_size > proof_data.size()) { + throw_or_abort("Transcript: deserialize_from_buffer out of bounds"); + } auto element_frs = std::span{ proof_data }.subspan(offset, element_fr_size); offset += element_fr_size; @@ -295,6 +297,7 @@ template class BaseTranscript { template std::vector get_dyadic_powers_of_challenge(const std::string& label, size_t num_challenges) { + BB_ASSERT(num_challenges > 0, "get_dyadic_powers_of_challenge called with num_challenges=0"); ChallengeType challenge = get_challenge(label); std::vector pows(num_challenges); pows[0] = challenge; @@ -368,7 +371,9 @@ template class BaseTranscript { template T receive_from_prover(const std::string& label) { const size_t element_size = Codec::template calc_num_fields(); - BB_ASSERT_LTE(num_frs_read + element_size, proof_data.size()); + if (num_frs_read + element_size > proof_data.size()) { + throw_or_abort("Transcript: receive_from_prover out of bounds (proof too short)"); + } auto element_frs = std::span{ proof_data }.subspan(num_frs_read, element_size); // Track Fiat-Shamir round transitions: if we were generating challenges, diff --git a/barretenberg/cpp/src/barretenberg/translator_vm/relation_failure.test.cpp b/barretenberg/cpp/src/barretenberg/translator_vm/relation_failure.test.cpp index 15aaa68cb892..f0fedbe501d4 100644 --- a/barretenberg/cpp/src/barretenberg/translator_vm/relation_failure.test.cpp +++ b/barretenberg/cpp/src/barretenberg/translator_vm/relation_failure.test.cpp @@ -29,6 +29,10 @@ * AccumulatorTransferFailsAtFirstTransferRow — row 9 (left boundary of transfer chain) * AccumulatorTransferFailsAtLastTransferRow — row 8185 (right boundary before zero-init) * + * Non-native field relation (TranslatorNonNativeFieldRelation) — 1 test: + * NonNativeFieldRejectsAccumulatorAlias — #2492 regression: acc += q, quot -= 1 caught + * by higher carry check (subrelation 1) + * * Pipeline correctness — 1 test: * InRangeValueInMaskingFlowsToOrderedTail — trace FF(42) from wire masking through * concatenation into ordered poly tail @@ -157,6 +161,26 @@ ValidTranslatorState build_valid_accumulator_transfer_state() pp.accumulators_binary_limbs_2[Flavor::RESULT_ROW], pp.accumulators_binary_limbs_3[Flavor::RESULT_ROW] }; + // Populate evaluation_input_x and batching_challenge_v limbs + native values + // (needed by TranslatorNonNativeFieldRelation; harmless for other relations) + static constexpr size_t NUM_LIMB_BITS = Flavor::CircuitBuilder::NUM_LIMB_BITS; + auto uint_input_x = uint256_t(evaluation_input_x); + params.evaluation_input_x = { uint_input_x.slice(0, NUM_LIMB_BITS), + uint_input_x.slice(NUM_LIMB_BITS, NUM_LIMB_BITS * 2), + uint_input_x.slice(NUM_LIMB_BITS * 2, NUM_LIMB_BITS * 3), + uint_input_x.slice(NUM_LIMB_BITS * 3, NUM_LIMB_BITS * 4), + uint_input_x }; + auto v_power = BF::one(); + for (size_t i = 0; i < 4; i++) { + v_power *= batching_challenge_v; + auto uint_v_power = uint256_t(v_power); + params.batching_challenge_v.at(i) = { uint_v_power.slice(0, NUM_LIMB_BITS), + uint_v_power.slice(NUM_LIMB_BITS, NUM_LIMB_BITS * 2), + uint_v_power.slice(NUM_LIMB_BITS * 2, NUM_LIMB_BITS * 3), + uint_v_power.slice(NUM_LIMB_BITS * 3, NUM_LIMB_BITS * 4), + uint_v_power }; + } + return { std::move(key), params }; } @@ -689,3 +713,91 @@ TEST_F(TranslatorRelationFailureTests, PermutationFailsOnConcatenatedBlockBounda RelationChecker::check>(pp, params, "TranslatorPermutationRelation"); EXPECT_FALSE(failures.empty()) << "Permutation should fail after block boundary corruption"; } + +// ======================== Non-Native Field Relation: Accumulator Alias ======================== + +/** + * @brief Regression for AztecProtocol/barretenberg#2492: verify that the accumulator alias + * attack (current_acc += q, quotient -= 1) is caught by the higher carry check. + * + * @details The Fq accumulation formula holds in integers: + * prev * x + stuff - quotient * p - current_acc = 0 + * Replacing current_acc with current_acc + p and quotient with quotient - 1 preserves this + * equation, plus its mod-2^272 and mod-r projections. However, the limb-level carry witnesses + * (relation_wide_limbs) become stale: the new intermediate sums at limbs 2-3 no longer match + * the old high carry value, and subrelation 1 (higher mod 2^136 check) detects the mismatch. + */ +TEST_F(TranslatorRelationFailureTests, NonNativeFieldRejectsAccumulatorAlias) +{ + using BF = typename Flavor::BF; + static constexpr size_t NUM_LIMB_BITS = Flavor::CircuitBuilder::NUM_LIMB_BITS; + + auto [key, params] = build_valid_accumulator_transfer_state(); + auto& pp = key.proving_key->polynomials; + + // Baseline: all three NonNativeField subrelations pass + auto baseline = RelationChecker::check>( + pp, params, "TranslatorNonNativeFieldRelation"); + EXPECT_TRUE(baseline.empty()) << "Baseline non-native field should pass"; + + constexpr size_t ROW = Flavor::RESULT_ROW; // 8 + + // --- Read old accumulator and quotient at RESULT_ROW --- + // Accumulator limbs are at the even row (current acc = row ROW, previous acc = row ROW+1 via shift) + auto read_limbs = [](const auto& l0, const auto& l1, const auto& l2, const auto& l3, size_t row) { + return uint256_t(l0[row]) | (uint256_t(l1[row]) << NUM_LIMB_BITS) | + (uint256_t(l2[row]) << (2 * NUM_LIMB_BITS)) | (uint256_t(l3[row]) << (3 * NUM_LIMB_BITS)); + }; + + uint256_t old_acc = read_limbs(pp.accumulators_binary_limbs_0, + pp.accumulators_binary_limbs_1, + pp.accumulators_binary_limbs_2, + pp.accumulators_binary_limbs_3, + ROW); + + // Quotient: limbs 0,1 from quotient_low at rows ROW, ROW+1; limbs 2,3 from quotient_high at rows ROW, ROW+1 + uint256_t old_quot = uint256_t(pp.quotient_low_binary_limbs[ROW]) | + (uint256_t(pp.quotient_low_binary_limbs[ROW + 1]) << NUM_LIMB_BITS) | + (uint256_t(pp.quotient_high_binary_limbs[ROW]) << (2 * NUM_LIMB_BITS)) | + (uint256_t(pp.quotient_high_binary_limbs[ROW + 1]) << (3 * NUM_LIMB_BITS)); + + // --- Apply the alias mutation: acc += p, quotient -= 1 --- + const uint256_t p_mod = BF::modulus; + uint256_t new_acc = old_acc + p_mod; + uint256_t new_quot = old_quot - uint256_t(1); + + auto split = [](const uint256_t& val) -> std::array { + return { FF(val.slice(0, NUM_LIMB_BITS)), + FF(val.slice(NUM_LIMB_BITS, 2 * NUM_LIMB_BITS)), + FF(val.slice(2 * NUM_LIMB_BITS, 3 * NUM_LIMB_BITS)), + FF(val.slice(3 * NUM_LIMB_BITS, 4 * NUM_LIMB_BITS)) }; + }; + + auto new_acc_limbs = split(new_acc); + auto new_quot_limbs = split(new_quot); + + // Write mutated accumulator limbs + pp.accumulators_binary_limbs_0.at(ROW) = new_acc_limbs[0]; + pp.accumulators_binary_limbs_1.at(ROW) = new_acc_limbs[1]; + pp.accumulators_binary_limbs_2.at(ROW) = new_acc_limbs[2]; + pp.accumulators_binary_limbs_3.at(ROW) = new_acc_limbs[3]; + + // Write mutated quotient limbs + pp.quotient_low_binary_limbs.at(ROW) = new_quot_limbs[0]; + pp.quotient_low_binary_limbs.at(ROW + 1) = new_quot_limbs[1]; + pp.quotient_high_binary_limbs.at(ROW) = new_quot_limbs[2]; + pp.quotient_high_binary_limbs.at(ROW + 1) = new_quot_limbs[3]; + + // Deliberately do NOT update relation_wide_limbs (carry witnesses) — the stale carries + // should cause the higher mod-2^136 check to fail. + + auto failures = RelationChecker::check>( + pp, params, "TranslatorNonNativeFieldRelation"); + + // The higher carry check (subrelation 1) must fail at RESULT_ROW. + EXPECT_TRUE(failures.contains(1)) << "Subrelation 1 (higher carry check) should reject the alias"; + EXPECT_EQ(failures.at(1), static_cast(ROW)) << "Failure should be at RESULT_ROW"; + + // The native-field check (subrelation 2) should still pass — the mod-r projection is preserved. + EXPECT_FALSE(failures.contains(2)) << "Subrelation 2 (native check) should pass under the alias mutation"; +} diff --git a/barretenberg/cpp/src/barretenberg/translator_vm/translator_verifier.cpp b/barretenberg/cpp/src/barretenberg/translator_vm/translator_verifier.cpp index 5029e47908a2..bab635aa1c00 100644 --- a/barretenberg/cpp/src/barretenberg/translator_vm/translator_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/translator_vm/translator_verifier.cpp @@ -239,10 +239,12 @@ typename TranslatorVerifier_::ReductionResult TranslatorVerifier_ size_t UltraVerifier_::compute_ return static_cast(Flavor::VIRTUAL_LOG_N); } else { // Non-padded: use actual circuit size from VK (native only) - return static_cast(verifier_instance->get_vk()->log_circuit_size); + const size_t log_circuit_size = static_cast(verifier_instance->get_vk()->log_circuit_size); + BB_ASSERT_GTE( + log_circuit_size, static_cast(1), "VK log_circuit_size is 0, which is invalid for any circuit"); + return log_circuit_size; } } diff --git a/barretenberg/rust/barretenberg-rs/src/types.rs b/barretenberg/rust/barretenberg-rs/src/types.rs index bb6fd3a0c3af..6ad04b26ecd7 100644 --- a/barretenberg/rust/barretenberg-rs/src/types.rs +++ b/barretenberg/rust/barretenberg-rs/src/types.rs @@ -7,10 +7,10 @@ use serde::{Deserialize, Serialize}; pub struct Fr(pub [u8; 32]); impl Fr { - /// Create a new field element from a u64 value (little-endian encoding) + /// Create a new field element from a u64 value (big-endian encoding, matching C++ msgpack format) pub fn from_u64(value: u64) -> Self { let mut bytes = [0u8; 32]; - bytes[0..8].copy_from_slice(&value.to_le_bytes()); + bytes[24..32].copy_from_slice(&value.to_be_bytes()); Fr(bytes) } diff --git a/yarn-project/ivc-integration/src/batch_verifier_test_helpers.ts b/yarn-project/ivc-integration/src/batch_verifier_test_helpers.ts index 18f7417230bc..787d7895d204 100644 --- a/yarn-project/ivc-integration/src/batch_verifier_test_helpers.ts +++ b/yarn-project/ivc-integration/src/batch_verifier_test_helpers.ts @@ -5,8 +5,8 @@ import type { IVCProofVerificationResult } from '@aztec/stdlib/interfaces/server export function corruptProofFields(fields: Uint8Array[]): Uint8Array[] { const corrupted = fields.map(f => Uint8Array.from(f)); corrupted[2] = Uint8Array.from(corrupted[2]); - corrupted[2][0] ^= 0xff; - corrupted[2][1] ^= 0xff; + corrupted[2][30] ^= 0xff; + corrupted[2][31] ^= 0xff; return corrupted; }