From 663a77488a3bb2a9c184514fe1046e964eeaa776 Mon Sep 17 00:00:00 2001 From: Raju Krishnamoorthy Date: Mon, 8 Sep 2025 04:41:47 -0400 Subject: [PATCH 1/6] chore: audit of lookup relations in the ECCVM (#16712) No content, just documentation for the lookup relation in the ECCVM Co-authored-by: notnotraju --- .../relations/ecc_vm/ecc_lookup_relation.hpp | 90 +++++++++---------- 1 file changed, 42 insertions(+), 48 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_lookup_relation.hpp b/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_lookup_relation.hpp index 469a7029018a..06cf7bdba3b8 100644 --- a/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_lookup_relation.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_lookup_relation.hpp @@ -103,31 +103,50 @@ template class ECCVMLookupRelationImpl { } return Accumulator(1); } - + /** + * @brief Returns the fingerprint of `(precompute_pc, compressed_slice, (2 * compressed_slice - 15)[P])`, where [P] + * is the point corresponding to `precompute_pc` and `compressed_slice`∈{0, ..., 15}. + */ template static Accumulator compute_write_term(const AllEntities& in, const Parameters& params) { using View = typename Accumulator::View; static_assert(write_index < WRITE_TERMS); - - // what are we looking up? - // we want to map: - // 1: point pc - // 2: point slice - // 3: point x - // 4: point y - // for each point in our point table, we want to map `slice` to (x, -y) AND `slice + 8` to (x, y) + // write_index == 0 means our wNAF digit is positive (i.e., ∈{1, 3..., 15}). + // write_index == 1 means our wNAF digit is negative (i.e., ∈{-15, -13..., -1}) // round starts at 0 and increments to 7 // point starts at 15[P] and decrements to [P] // a slice value of 0 maps to -15[P] - // 1 -> -13[P] - // 7 -> -[P] - // 8 -> P - // 15 -> 15[P] - // negative points map pc, round, x, -y - // positive points map pc, 15 - (round * 2), x, y + + // we have computed `(15 - 2 * round)[P] =: (precompute_tx, precompute_ty)`. + // `round`∈{0, 1..., 7} + // if write_index == 0, we want to write (pc, 15 - 2 * round, precompute_tx, precompute_ty) + // if write_index == 1, we want to write (pc, round, precompute_tx, -precompute_ty) + // to sum up, both: + // (pc, round, precompute_tx, -precompute_ty) _and_ + // (pc, 15 - 2 * round, precompute_tx, precompute_ty) + // will be written to the lookup table. + // + // therefore, if `pc` corresponds to the elliptic curve point [P], we will write: + // | pc | 0 | -15[P].x | -15[P].y | + // | pc | 1 | -13[P].x | -13[P].y | + // | pc | 2 | -11[P].x | -11[P].y | + // | pc | 3 | -9[P].x | -9[P].y | + // | pc | 4 | -7[P].x | -7[P].y | + // | pc | 5 | -5[P].x | -5[P].y | + // | pc | 6 | -3[P].x | -3[P].y | + // | pc | 7 | -1[P].x | -1[P].y | + // | pc | 8 | [P].x | [P].y | + // | pc | 9 | 3[P].x | 3[P].y | + // | pc | 10 | 5[P].x | 5[P].y | + // | pc | 11 | 7[P].x | 7[P].y | + // | pc | 12 | 9[P].x | 9[P].y | + // | pc | 13 | 11[P].x | 11[P].y | + // | pc | 14 | 13[P].x | 13[P].y | + // | pc | 15 | 15[P].x | 15[P].y | + const auto& precompute_pc = View(in.precompute_pc); const auto& tx = View(in.precompute_tx); const auto& ty = View(in.precompute_ty); @@ -137,31 +156,6 @@ template class ECCVMLookupRelationImpl { const auto& beta_sqr = params.beta_sqr; const auto& beta_cube = params.beta_cube; - // slice value : (wnaf value) : lookup term - // 0 : -15 : 0 - // 1 : -13 : 1 - // 7 : -1 : 7 - // 8 : 1 : 0 - // 9 : 3 : 1 - // 15 : 15 : 7 - - // slice value : negative term : positive term - // 0 : 0 : 7 - // 1 : 1 : 6 - // 2 : 2 : 5 - // 3 : 3 : 4 - // 7 : 7 : 0 - - // | 0 | 15[P].x | 15[P].y | 0, -15[P].x, -15[P].y | 15, 15[P].x, 15[P].y | - // | 1 | 13[P].x | 13[P].y | 1, -13[P].x, -13[P].y | 14, 13[P].x, 13[P].y - // | 2 | 11[P].x | 11[P].y - // | 3 | 9[P].x | 9[P].y - // | 4 | 7[P].x | 7[P].y - // | 5 | 5[P].x | 5[P].y - // | 6 | 3[P].x | 3[P].y - // | 7 | 1[P].x | 1[P].y | 7, -[P].x, -[P].y | 8 , [P].x, [P].y | - - // todo optimize this? if constexpr (write_index == 0) { const auto positive_slice_value = -(precompute_round) + 15; const auto positive_term = @@ -180,8 +174,8 @@ template class ECCVMLookupRelationImpl { { using View = typename Accumulator::View; - // read term: - // pc, slice, x, y + // read term: (pc, compressed_slice, (2 * compressed_slice - 15)[P]) + // (the latter term is of course represented via an x and y coordinate.) static_assert(read_index < READ_TERMS); const auto& gamma = params.gamma; const auto& beta = params.beta; @@ -202,12 +196,12 @@ template class ECCVMLookupRelationImpl { const auto& msm_y3 = View(in.msm_y3); const auto& msm_y4 = View(in.msm_y4); - // how do we get pc value + // Recall that `pc` stands for point-counter. We recall how to compute the current pc. + // // row pc = value of pc after msm - // row count = num processed points in round - // size_of_msm = msm_size - // value of pc at start of msm = msm_pc - msm_size_of_msm - // value of current pc = msm_pc - msm_size_of_msm + msm_count + (0,1,2,3) + // msm_count = number of (128-bit) multiplications processed so far in current MSM round (NOT INCLUDING current + // row) current_pc = msm_pc - msm_count next_pc = current_pc - {0, 1, 2, 3}, depending on how many adds are + // performed in the current row. const auto current_pc = msm_pc - msm_count; if constexpr (read_index == 0) { @@ -254,4 +248,4 @@ template class ECCVMLookupRelationImpl { template using ECCVMLookupRelation = Relation>; -} // namespace bb \ No newline at end of file +} // namespace bb From 0a951fc0169a131f5fb688019540f3f869b11782 Mon Sep 17 00:00:00 2001 From: maramihali Date: Mon, 8 Sep 2025 10:19:38 +0100 Subject: [PATCH 2/6] feat: translator handles no-op range (#16628) Context: To achieve a fully CIVC zero-knowledge proof, the size of the EccOpQueue must be capped to a fixed size (correlated to the maximum size of Translator). We achieve this by appending the last ecc op subtable at a fixed offset. In turn, this implies that the translator circuit builder has to handle a sequence of no-ops when building the accumulation trace to show the evaluation of the batched ecc op polynomial at the random point, performed non-natively within the Translator circuit, is consistent to what is produced by ECCVM. The check is done between the result from ECCVM and what is produced in the Translator circuit at `RESULT_ROW` (pointed to by a lagrange polynomial during proving and residing at the begining at Translator trace). In Translator, when encountering a genuine ecc op, the accumulation result is update at an even row and then copied across the odd row. In the case of no-ops, we want to not change the accumulation result but we still need to continue passing it across even and odd rows and assert this was done properly via relations to ensure RESULT_ROW contains the correct result. --- ...test_civc_standalone_vks_havent_changed.sh | 3 +- .../graph_description_goblin.test.cpp | 30 +- .../translator_circuit_checker.cpp | 743 +++++----- .../barretenberg/client_ivc/client_ivc.cpp | 9 + .../cpp/src/barretenberg/constants.hpp | 7 + .../src/barretenberg/goblin/mock_circuits.hpp | 26 +- .../barretenberg/op_queue/ecc_op_queue.hpp | 14 +- .../op_queue/ecc_op_queue.test.cpp | 14 +- .../barretenberg/op_queue/ecc_ops_table.hpp | 50 +- .../op_queue/ecc_ops_table.test.cpp | 42 +- .../translator_decomposition_relation.hpp | 18 +- ...translator_decomposition_relation_impl.hpp | 1202 +++++++++-------- .../translator_extra_relations.hpp | 15 +- .../translator_extra_relations_impl.hpp | 52 +- .../translator_non_native_field_relation.hpp | 6 +- ...nslator_non_native_field_relation_impl.hpp | 10 +- .../translator_relation_consistency.test.cpp | 121 +- .../stdlib/encryption/ecdsa/ecdsa_impl.hpp | 4 +- .../goblin_recursive_verifier.test.cpp | 54 +- .../translator_recursive_verifier.test.cpp | 44 +- .../mega_circuit_builder.cpp | 9 +- .../relation_correctness.test.cpp | 6 +- .../translator_vm/translator.test.cpp | 91 +- .../translator_circuit_builder.cpp | 42 +- .../translator_circuit_builder.hpp | 6 +- .../translator_circuit_builder.test.cpp | 28 +- .../translator_vm/translator_flavor.hpp | 2 +- .../translator_vm/translator_proving_key.hpp | 2 +- .../barretenberg/ultra_honk/merge_prover.cpp | 9 +- 29 files changed, 1496 insertions(+), 1163 deletions(-) diff --git a/barretenberg/cpp/scripts/test_civc_standalone_vks_havent_changed.sh b/barretenberg/cpp/scripts/test_civc_standalone_vks_havent_changed.sh index 98ada8cbd720..00c3da733e5c 100755 --- a/barretenberg/cpp/scripts/test_civc_standalone_vks_havent_changed.sh +++ b/barretenberg/cpp/scripts/test_civc_standalone_vks_havent_changed.sh @@ -13,8 +13,7 @@ cd .. # - Generate a hash for versioning: sha256sum bb-civc-inputs.tar.gz # - Upload the compressed results: aws s3 cp bb-civc-inputs.tar.gz s3://aztec-ci-artifacts/protocol/bb-civc-inputs-[hash(0:8)].tar.gz # Note: In case of the "Test suite failed to run ... Unexpected token 'with' " error, need to run: docker pull aztecprotocol/build:3.0 - -pinned_short_hash="ec9b5be3" +pinned_short_hash="d6f612e1" pinned_civc_inputs_url="https://aztec-ci-artifacts.s3.us-east-2.amazonaws.com/protocol/bb-civc-inputs-${pinned_short_hash}.tar.gz" function compress_and_upload { diff --git a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_goblin.test.cpp b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_goblin.test.cpp index 984a457f6ff5..c2d8f044e70f 100644 --- a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_goblin.test.cpp +++ b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_goblin.test.cpp @@ -40,37 +40,25 @@ class BoomerangGoblinRecursiveVerifierTests : public testing::Test { * * @return ProverOutput */ - static ProverOutput create_goblin_prover_output(const size_t NUM_CIRCUITS = 3) + static ProverOutput create_goblin_prover_output() { Goblin goblin; - // Construct and accumulate multiple circuits - for (size_t idx = 0; idx < NUM_CIRCUITS - 1; ++idx) { - MegaCircuitBuilder builder{ goblin.op_queue }; - GoblinMockCircuits::construct_simple_circuit(builder); - goblin.prove_merge(); - } - - auto goblin_transcript = std::make_shared(); + GoblinMockCircuits::construct_and_merge_mock_circuits(goblin, 5); - Goblin goblin_final; - goblin_final.op_queue = goblin.op_queue; - MegaCircuitBuilder builder{ goblin_final.op_queue }; - GoblinMockCircuits::construct_simple_circuit(builder, /*last_circuit=*/true); - goblin_final.op_queue->merge(); + // Merge the ecc ops from the newly constructed circuit + auto goblin_proof = goblin.prove(MergeSettings::APPEND); // Subtable values and commitments - needed for (Recursive)MergeVerifier MergeCommitments merge_commitments; - auto t_current = goblin_final.op_queue->construct_current_ultra_ops_subtable_columns(); - auto T_prev = goblin_final.op_queue->construct_previous_ultra_ops_table_columns(); - CommitmentKey pcs_commitment_key(goblin_final.op_queue->get_ultra_ops_table_num_rows()); + auto t_current = goblin.op_queue->construct_current_ultra_ops_subtable_columns(); + auto T_prev = goblin.op_queue->construct_previous_ultra_ops_table_columns(); + CommitmentKey pcs_commitment_key(goblin.op_queue->get_ultra_ops_table_num_rows()); for (size_t idx = 0; idx < MegaFlavor::NUM_WIRES; idx++) { merge_commitments.t_commitments[idx] = pcs_commitment_key.commit(t_current[idx]); merge_commitments.T_prev_commitments[idx] = pcs_commitment_key.commit(T_prev[idx]); } // Output is a goblin proof plus ECCVM/Translator verification keys - return { goblin_final.prove(), - { std::make_shared(), std::make_shared() }, - merge_commitments }; + return { goblin_proof, { std::make_shared(), std::make_shared() }, merge_commitments }; } }; @@ -96,7 +84,7 @@ TEST_F(BoomerangGoblinRecursiveVerifierTests, graph_description_basic) } GoblinRecursiveVerifier verifier{ &builder, verifier_input }; - GoblinRecursiveVerifierOutput output = verifier.verify(proof, recursive_merge_commitments); + GoblinRecursiveVerifierOutput output = verifier.verify(proof, recursive_merge_commitments, MergeSettings::APPEND); output.points_accumulator.set_public(); // Construct and verify a proof for the Goblin Recursive Verifier circuit { diff --git a/barretenberg/cpp/src/barretenberg/circuit_checker/translator_circuit_checker.cpp b/barretenberg/cpp/src/barretenberg/circuit_checker/translator_circuit_checker.cpp index 9c16cb0c33d4..7a3f7fb0d2e5 100644 --- a/barretenberg/cpp/src/barretenberg/circuit_checker/translator_circuit_checker.cpp +++ b/barretenberg/cpp/src/barretenberg/circuit_checker/translator_circuit_checker.cpp @@ -85,389 +85,408 @@ bool TranslatorCircuitChecker::check(const Builder& circuit) return mini_accumulator; }; - // TODO(https: // github.com/AztecProtocol/barretenberg/issues/1367): Report all failures more explicitly and - // consider making use of relations. - - for (size_t i = 2; i < circuit.num_gates - 1; i += 2) { - { - // Get the values of P.x - Fr op_code = circuit.get_variable(op_wire[i]); - Fr p_x_lo = circuit.get_variable(x_lo_y_hi_wire[i]); - Fr p_x_hi = circuit.get_variable(x_hi_z_1_wire[i]); - Fr p_x_0 = circuit.get_variable(p_x_0_p_x_1_wire[i]); - Fr p_x_1 = circuit.get_variable(p_x_0_p_x_1_wire[i + 1]); - Fr p_x_2 = circuit.get_variable(p_x_2_p_x_3_wire[i]); - Fr p_x_3 = circuit.get_variable(p_x_2_p_x_3_wire[i + 1]); - const std::vector p_x_binary_limbs = { p_x_0, p_x_1, p_x_2, p_x_3 }; - - // P.y - Fr p_y_lo = circuit.get_variable(y_lo_z_2_wire[i]); - Fr p_y_hi = circuit.get_variable(x_lo_y_hi_wire[i + 1]); - Fr p_y_0 = circuit.get_variable(p_y_0_p_y_1_wire[i]); - Fr p_y_1 = circuit.get_variable(p_y_0_p_y_1_wire[i + 1]); - Fr p_y_2 = circuit.get_variable(p_y_2_p_y_3_wire[i]); - Fr p_y_3 = circuit.get_variable(p_y_2_p_y_3_wire[i + 1]); - const std::vector p_y_binary_limbs = { p_y_0, p_y_1, p_y_2, p_y_3 }; - // z1, z2 - Fr z_1 = circuit.get_variable(x_hi_z_1_wire[i + 1]); - Fr z_2 = circuit.get_variable(y_lo_z_2_wire[i + 1]); - - Fr z_1_lo = circuit.get_variable(z_lo_wire[i]); - Fr z_2_lo = circuit.get_variable(z_lo_wire[i + 1]); - Fr z_1_hi = circuit.get_variable(z_hi_wire[i]); - Fr z_2_hi = circuit.get_variable(z_hi_wire[i + 1]); - - const std::vector z_1_binary_limbs = { z_1_lo, z_1_hi }; - const std::vector z_2_binary_limbs = { z_2_lo, z_2_hi }; - // Relation limbs - Fr low_wide_relation_limb = circuit.get_variable(relation_wide_limbs_wire[i]); - Fr high_wide_relation_limb = circuit.get_variable(relation_wide_limbs_wire[i + 1]); - - // Current accumulator (updated value) - const std::vector current_accumulator_binary_limbs = { - circuit.get_variable(accumulators_binary_limbs_0_wire[i]), - circuit.get_variable(accumulators_binary_limbs_1_wire[i]), - circuit.get_variable(accumulators_binary_limbs_2_wire[i]), - circuit.get_variable(accumulators_binary_limbs_3_wire[i]), - }; - - // Previous accumulator - const std::vector previous_accumulator_binary_limbs = { - circuit.get_variable(accumulators_binary_limbs_0_wire[i + 1]), - circuit.get_variable(accumulators_binary_limbs_1_wire[i + 1]), - circuit.get_variable(accumulators_binary_limbs_2_wire[i + 1]), - circuit.get_variable(accumulators_binary_limbs_3_wire[i + 1]), - }; + auto check_binary_limbs_equality = [&](const std::vector& first, const std::vector& second, size_t gate) { + for (const auto [first_limb, second_limb] : zip_view(first, second)) { + if (first_limb != second_limb) { + return report_fail("Binary limbs are not equal = ", gate); + } + } + return true; + }; - // Quotient - const std::vector quotient_binary_limbs = { - circuit.get_variable(quotient_low_binary_limbs[i]), - circuit.get_variable(quotient_low_binary_limbs[i + 1]), - circuit.get_variable(quotient_high_binary_limbs[i]), - circuit.get_variable(quotient_high_binary_limbs[i + 1]), + auto check_accumulator_transfer = [&](const std::vector& previous_accumulator, size_t gate) { + if (gate % 2 != 1) { + return report_fail("accumulator transfer should only be checked at odd gates = ", gate); + } + if (gate + 1 < circuit.num_gates - 1) { + // Check that the next gate's current accumulator equals this gate's previous accumulator + const std::vector next_gate_current_accumulator = { + circuit.get_variable(accumulators_binary_limbs_0_wire[gate + 1]), + circuit.get_variable(accumulators_binary_limbs_1_wire[gate + 1]), + circuit.get_variable(accumulators_binary_limbs_2_wire[gate + 1]), + circuit.get_variable(accumulators_binary_limbs_3_wire[gate + 1]), }; + if (!check_binary_limbs_equality(next_gate_current_accumulator, previous_accumulator, gate + 1)) { + return false; + } + } else { + // Check accumulator starts at zero + for (const auto& limb : previous_accumulator) { + if (limb != Fr(0)) { + return report_fail("accumulator doesn't start with 0 = ", gate + 1); + } + } + } + return true; + }; - const size_t NUM_MICRO_LIMBS = Builder::NUM_MICRO_LIMBS; + auto check_no_op = + [&](const std::vector& current_accumulator, const std::vector& previous_accumulator, size_t gate) { + if (!check_binary_limbs_equality(current_accumulator, previous_accumulator, gate)) { + return false; + } + return check_accumulator_transfer(previous_accumulator, gate + 1); + }; - // Get micro chunks for checking decomposition and range - auto p_x_micro_chunks = { - get_sequential_micro_chunks(i, WireIds::P_X_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), - get_sequential_micro_chunks(i + 1, WireIds::P_X_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), - get_sequential_micro_chunks(i, WireIds::P_X_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), - get_sequential_micro_chunks(i + 1, WireIds::P_X_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS) - }; - auto p_y_micro_chunks = { - get_sequential_micro_chunks(i, WireIds::P_Y_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), - get_sequential_micro_chunks(i + 1, WireIds::P_Y_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), - get_sequential_micro_chunks(i, WireIds::P_Y_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), - get_sequential_micro_chunks(i + 1, WireIds::P_Y_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS) - }; - auto z_1_micro_chunks = { - get_sequential_micro_chunks(i, WireIds::Z_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), - get_sequential_micro_chunks(i, WireIds::Z_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), - }; + // TODO(https: // github.com/AztecProtocol/barretenberg/issues/1367): Report all failures more explicitly and + // consider making use of relations. - auto z_2_micro_chunks = { + for (size_t i = 2; i < circuit.num_gates - 1; i += 2) { - get_sequential_micro_chunks(i + 1, WireIds::Z_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), - get_sequential_micro_chunks(i + 1, WireIds::Z_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS) + // Get the values of P.x + Fr op_code = circuit.get_variable(op_wire[i]); + + // Current accumulator (updated value) + const std::vector current_accumulator_binary_limbs = { + circuit.get_variable(accumulators_binary_limbs_0_wire[i]), + circuit.get_variable(accumulators_binary_limbs_1_wire[i]), + circuit.get_variable(accumulators_binary_limbs_2_wire[i]), + circuit.get_variable(accumulators_binary_limbs_3_wire[i]), + }; + + // Previous accumulator + const std::vector previous_accumulator_binary_limbs = { + circuit.get_variable(accumulators_binary_limbs_0_wire[i + 1]), + circuit.get_variable(accumulators_binary_limbs_1_wire[i + 1]), + circuit.get_variable(accumulators_binary_limbs_2_wire[i + 1]), + circuit.get_variable(accumulators_binary_limbs_3_wire[i + 1]), + }; + + if (op_code == 0) { + if (!check_no_op(current_accumulator_binary_limbs, previous_accumulator_binary_limbs, i)) { + return false; }; + continue; + } - auto current_accumulator_micro_chunks = { - get_sequential_micro_chunks(i, WireIds::ACCUMULATOR_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), - get_sequential_micro_chunks(i + 1, WireIds::ACCUMULATOR_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), - get_sequential_micro_chunks(i, WireIds::ACCUMULATOR_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), - get_sequential_micro_chunks(i + 1, WireIds::ACCUMULATOR_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), - }; - auto quotient_micro_chunks = { - get_sequential_micro_chunks(i, WireIds::QUOTIENT_LOW_LIMBS_RANGE_CONSTRAIN_0, NUM_MICRO_LIMBS), - get_sequential_micro_chunks(i + 1, WireIds::QUOTIENT_LOW_LIMBS_RANGE_CONSTRAIN_0, NUM_MICRO_LIMBS), - get_sequential_micro_chunks(i, WireIds::QUOTIENT_HIGH_LIMBS_RANGE_CONSTRAIN_0, NUM_MICRO_LIMBS), - get_sequential_micro_chunks(i + 1, WireIds::QUOTIENT_HIGH_LIMBS_RANGE_CONSTRAIN_0, NUM_MICRO_LIMBS), - }; + Fr p_x_lo = circuit.get_variable(x_lo_y_hi_wire[i]); + Fr p_x_hi = circuit.get_variable(x_hi_z_1_wire[i]); + Fr p_x_0 = circuit.get_variable(p_x_0_p_x_1_wire[i]); + Fr p_x_1 = circuit.get_variable(p_x_0_p_x_1_wire[i + 1]); + Fr p_x_2 = circuit.get_variable(p_x_2_p_x_3_wire[i]); + Fr p_x_3 = circuit.get_variable(p_x_2_p_x_3_wire[i + 1]); + const std::vector p_x_binary_limbs = { p_x_0, p_x_1, p_x_2, p_x_3 }; + + // P.y + Fr p_y_lo = circuit.get_variable(y_lo_z_2_wire[i]); + Fr p_y_hi = circuit.get_variable(x_lo_y_hi_wire[i + 1]); + Fr p_y_0 = circuit.get_variable(p_y_0_p_y_1_wire[i]); + Fr p_y_1 = circuit.get_variable(p_y_0_p_y_1_wire[i + 1]); + Fr p_y_2 = circuit.get_variable(p_y_2_p_y_3_wire[i]); + Fr p_y_3 = circuit.get_variable(p_y_2_p_y_3_wire[i + 1]); + const std::vector p_y_binary_limbs = { p_y_0, p_y_1, p_y_2, p_y_3 }; + // z1, z2 + Fr z_1 = circuit.get_variable(x_hi_z_1_wire[i + 1]); + Fr z_2 = circuit.get_variable(y_lo_z_2_wire[i + 1]); + + Fr z_1_lo = circuit.get_variable(z_lo_wire[i]); + Fr z_2_lo = circuit.get_variable(z_lo_wire[i + 1]); + Fr z_1_hi = circuit.get_variable(z_hi_wire[i]); + Fr z_2_hi = circuit.get_variable(z_hi_wire[i + 1]); + + const std::vector z_1_binary_limbs = { z_1_lo, z_1_hi }; + const std::vector z_2_binary_limbs = { z_2_lo, z_2_hi }; + // Relation limbs + Fr low_wide_relation_limb = circuit.get_variable(relation_wide_limbs_wire[i]); + Fr high_wide_relation_limb = circuit.get_variable(relation_wide_limbs_wire[i + 1]); + + // Quotient + const std::vector quotient_binary_limbs = { + circuit.get_variable(quotient_low_binary_limbs[i]), + circuit.get_variable(quotient_low_binary_limbs[i + 1]), + circuit.get_variable(quotient_high_binary_limbs[i]), + circuit.get_variable(quotient_high_binary_limbs[i + 1]), + }; + + const size_t NUM_MICRO_LIMBS = Builder::NUM_MICRO_LIMBS; + + // Get micro chunks for checking decomposition and range + auto p_x_micro_chunks = { + get_sequential_micro_chunks(i, WireIds::P_X_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), + get_sequential_micro_chunks(i + 1, WireIds::P_X_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), + get_sequential_micro_chunks(i, WireIds::P_X_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), + get_sequential_micro_chunks(i + 1, WireIds::P_X_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS) + }; + auto p_y_micro_chunks = { + get_sequential_micro_chunks(i, WireIds::P_Y_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), + get_sequential_micro_chunks(i + 1, WireIds::P_Y_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), + get_sequential_micro_chunks(i, WireIds::P_Y_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), + get_sequential_micro_chunks(i + 1, WireIds::P_Y_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS) + }; + auto z_1_micro_chunks = { + get_sequential_micro_chunks(i, WireIds::Z_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), + get_sequential_micro_chunks(i, WireIds::Z_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), + }; + + auto z_2_micro_chunks = { + + get_sequential_micro_chunks(i + 1, WireIds::Z_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), + get_sequential_micro_chunks(i + 1, WireIds::Z_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS) + }; + + auto current_accumulator_micro_chunks = { + get_sequential_micro_chunks(i, WireIds::ACCUMULATOR_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), + get_sequential_micro_chunks(i + 1, WireIds::ACCUMULATOR_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), + get_sequential_micro_chunks(i, WireIds::ACCUMULATOR_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), + get_sequential_micro_chunks(i + 1, WireIds::ACCUMULATOR_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), + }; + auto quotient_micro_chunks = { + get_sequential_micro_chunks(i, WireIds::QUOTIENT_LOW_LIMBS_RANGE_CONSTRAIN_0, NUM_MICRO_LIMBS), + get_sequential_micro_chunks(i + 1, WireIds::QUOTIENT_LOW_LIMBS_RANGE_CONSTRAIN_0, NUM_MICRO_LIMBS), + get_sequential_micro_chunks(i, WireIds::QUOTIENT_HIGH_LIMBS_RANGE_CONSTRAIN_0, NUM_MICRO_LIMBS), + get_sequential_micro_chunks(i + 1, WireIds::QUOTIENT_HIGH_LIMBS_RANGE_CONSTRAIN_0, NUM_MICRO_LIMBS), + }; + + // Lambda for checking the correctness of decomposition of values in the Queue into limbs for + // checking the relation + auto check_wide_limb_into_binary_limb_relation = [](const std::vector& wide_limbs, + const std::vector& binary_limbs) { + BB_ASSERT_EQ(wide_limbs.size() * 2, binary_limbs.size()); + for (size_t i = 0; i < wide_limbs.size(); i++) { + if ((binary_limbs[i * 2] + Fr(Builder::SHIFT_1) * binary_limbs[i * 2 + 1]) != wide_limbs[i]) { + return false; + } + } + return true; + }; + // Check that everything has been decomposed correctly + // P.xₗₒ = P.xₗₒ_0 + SHIFT_1 * P.xₗₒ_1 + // P.xₕᵢ = P.xₕᵢ_0 + SHIFT_1 * P.xₕᵢ_1 + // z_1 = z_1ₗₒ + SHIFT_1 * z_1ₕᵢ + // z_2 = z_2ₗₒ + SHIFT_2 * z_1ₕᵢ + if (!(check_wide_limb_into_binary_limb_relation({ p_x_lo, p_x_hi }, p_x_binary_limbs) && + check_wide_limb_into_binary_limb_relation({ p_y_lo, p_y_hi }, p_y_binary_limbs) && + check_wide_limb_into_binary_limb_relation({ z_1 }, z_1_binary_limbs) && + check_wide_limb_into_binary_limb_relation({ z_2 }, z_2_binary_limbs))) { + + return report_fail("wide limb decomposition failied at row = ", i); + } - // Lambda for checking the correctness of decomposition of values in the Queue into limbs for - // checking the relation - auto check_wide_limb_into_binary_limb_relation = [](const std::vector& wide_limbs, - const std::vector& binary_limbs) { - BB_ASSERT_EQ(wide_limbs.size() * 2, binary_limbs.size()); - for (size_t i = 0; i < wide_limbs.size(); i++) { - if ((binary_limbs[i * 2] + Fr(Builder::SHIFT_1) * binary_limbs[i * 2 + 1]) != wide_limbs[i]) { + enum LimbSeriesType { STANDARD_COORDINATE, Z_SCALAR, QUOTIENT }; + + // Check that limbs have been decomposed into microlimbs correctly + // value = ∑ (2ˡ)ⁱ⋅ chunkᵢ, where 2ˡ is the shift + auto check_micro_limb_decomposition_correctness = [&accumulate_limb_from_micro_chunks]( + const std::vector& binary_limbs, + const std::vector>& micro_limbs, + const LimbSeriesType limb_series_type) { + // Shifts for decompositions + constexpr auto SHIFT_12_TO_14 = Fr(4); + constexpr auto SHIFT_10_TO_14 = Fr(16); + constexpr auto SHIFT_8_TO_14 = Fr(64); + constexpr auto SHIFT_4_TO_14 = Fr(1024); + + BB_ASSERT_EQ(binary_limbs.size(), micro_limbs.size()); + // First check that all the microlimbs are properly range constrained + for (auto& micro_limb_series : micro_limbs) { + for (auto& micro_limb : micro_limb_series) { + if (uint256_t(micro_limb) > Builder::MAX_MICRO_LIMB_SIZE) { return false; } } - return true; - }; - // Check that everything has been decomposed correctly - // P.xₗₒ = P.xₗₒ_0 + SHIFT_1 * P.xₗₒ_1 - // P.xₕᵢ = P.xₕᵢ_0 + SHIFT_1 * P.xₕᵢ_1 - // z_1 = z_1ₗₒ + SHIFT_1 * z_1ₕᵢ - // z_2 = z_2ₗₒ + SHIFT_2 * z_1ₕᵢ - if (!(check_wide_limb_into_binary_limb_relation({ p_x_lo, p_x_hi }, p_x_binary_limbs) && - check_wide_limb_into_binary_limb_relation({ p_y_lo, p_y_hi }, p_y_binary_limbs) && - check_wide_limb_into_binary_limb_relation({ z_1 }, z_1_binary_limbs) && - check_wide_limb_into_binary_limb_relation({ z_2 }, z_2_binary_limbs))) { - - return report_fail("wide limb decomposition failied at row = ", i); } - - enum LimbSeriesType { STANDARD_COORDINATE, Z_SCALAR, QUOTIENT }; - - // Check that limbs have been decomposed into microlimbs correctly - // value = ∑ (2ˡ)ⁱ⋅ chunkᵢ, where 2ˡ is the shift - auto check_micro_limb_decomposition_correctness = [&accumulate_limb_from_micro_chunks]( - const std::vector& binary_limbs, - const std::vector>& micro_limbs, - const LimbSeriesType limb_series_type) { - // Shifts for decompositions - constexpr auto SHIFT_12_TO_14 = Fr(4); - constexpr auto SHIFT_10_TO_14 = Fr(16); - constexpr auto SHIFT_8_TO_14 = Fr(64); - constexpr auto SHIFT_4_TO_14 = Fr(1024); - - BB_ASSERT_EQ(binary_limbs.size(), micro_limbs.size()); - // First check that all the microlimbs are properly range constrained - for (auto& micro_limb_series : micro_limbs) { - for (auto& micro_limb : micro_limb_series) { - if (uint256_t(micro_limb) > Builder::MAX_MICRO_LIMB_SIZE) { - return false; - } - } + // For low limbs the last microlimb is used with the shift, so we skip it when reconstructing + // the limb + const size_t SKIPPED_FOR_LOW_LIMBS = 1; + for (size_t i = 0; i < binary_limbs.size() - 1; i++) { + if (binary_limbs[i] != accumulate_limb_from_micro_chunks(micro_limbs[i], SKIPPED_FOR_LOW_LIMBS)) { + return false; } - // For low limbs the last microlimb is used with the shift, so we skip it when reconstructing - // the limb - const size_t SKIPPED_FOR_LOW_LIMBS = 1; - for (size_t i = 0; i < binary_limbs.size() - 1; i++) { - if (binary_limbs[i] != accumulate_limb_from_micro_chunks(micro_limbs[i], SKIPPED_FOR_LOW_LIMBS)) { - return false; - } - // Check last additional constraint (68->70) - if (micro_limbs[i][NUM_MICRO_LIMBS - 1] != (SHIFT_12_TO_14 * micro_limbs[i][NUM_MICRO_LIMBS - 2])) { - return false; - } + // Check last additional constraint (68->70) + if (micro_limbs[i][NUM_MICRO_LIMBS - 1] != (SHIFT_12_TO_14 * micro_limbs[i][NUM_MICRO_LIMBS - 2])) { + return false; } + } - const size_t SKIPPED_FOR_STANDARD = 2; - const size_t SKIPPED_FOR_Z_SCALARS = 1; - const size_t SKIPPED_FOR_QUOTIENT = 2; - switch (limb_series_type) { - case STANDARD_COORDINATE: - // For standard Fq value the highest limb is 50 bits, so we skip the top 2 microlimbs - if (binary_limbs[binary_limbs.size() - 1] != - accumulate_limb_from_micro_chunks(micro_limbs[binary_limbs.size() - 1], SKIPPED_FOR_STANDARD)) { - return false; - } - // Check last additional constraint (50->56) - if (micro_limbs[binary_limbs.size() - 1][NUM_MICRO_LIMBS - SKIPPED_FOR_STANDARD] != - (SHIFT_8_TO_14 * - micro_limbs[binary_limbs.size() - 1][NUM_MICRO_LIMBS - SKIPPED_FOR_STANDARD - 1])) { - - return false; - } - break; - // For z top limbs we need as many microlimbs as for the low limbs - case Z_SCALAR: - if (binary_limbs[binary_limbs.size() - 1] != - accumulate_limb_from_micro_chunks(micro_limbs[binary_limbs.size() - 1], - SKIPPED_FOR_Z_SCALARS)) { - return false; - } - // Check last additional constraint (60->70) - if (micro_limbs[binary_limbs.size() - 1][NUM_MICRO_LIMBS - SKIPPED_FOR_Z_SCALARS] != - (SHIFT_4_TO_14 * - micro_limbs[binary_limbs.size() - 1][NUM_MICRO_LIMBS - SKIPPED_FOR_Z_SCALARS - 1])) { - return false; - } - break; - // Quotient also doesn't need the top 2 - case QUOTIENT: - if (binary_limbs[binary_limbs.size() - 1] != - accumulate_limb_from_micro_chunks(micro_limbs[binary_limbs.size() - 1], SKIPPED_FOR_QUOTIENT)) { - return false; - } - // Check last additional constraint (52->56) - if (micro_limbs[binary_limbs.size() - 1][NUM_MICRO_LIMBS - SKIPPED_FOR_QUOTIENT] != - (SHIFT_10_TO_14 * - micro_limbs[binary_limbs.size() - 1][NUM_MICRO_LIMBS - SKIPPED_FOR_QUOTIENT - 1])) { - return false; - } - break; - default: - abort(); + const size_t SKIPPED_FOR_STANDARD = 2; + const size_t SKIPPED_FOR_Z_SCALARS = 1; + const size_t SKIPPED_FOR_QUOTIENT = 2; + switch (limb_series_type) { + case STANDARD_COORDINATE: + // For standard Fq value the highest limb is 50 bits, so we skip the top 2 microlimbs + if (binary_limbs[binary_limbs.size() - 1] != + accumulate_limb_from_micro_chunks(micro_limbs[binary_limbs.size() - 1], SKIPPED_FOR_STANDARD)) { + return false; } + // Check last additional constraint (50->56) + if (micro_limbs[binary_limbs.size() - 1][NUM_MICRO_LIMBS - SKIPPED_FOR_STANDARD] != + (SHIFT_8_TO_14 * + micro_limbs[binary_limbs.size() - 1][NUM_MICRO_LIMBS - SKIPPED_FOR_STANDARD - 1])) { - return true; - }; - // Check all micro limb decompositions - if (!check_micro_limb_decomposition_correctness(p_x_binary_limbs, p_x_micro_chunks, STANDARD_COORDINATE)) { - return false; - } - if (!check_micro_limb_decomposition_correctness(p_y_binary_limbs, p_y_micro_chunks, STANDARD_COORDINATE)) { - return false; - } - if (!check_micro_limb_decomposition_correctness(z_1_binary_limbs, z_1_micro_chunks, Z_SCALAR)) { - return false; - } - if (!check_micro_limb_decomposition_correctness(z_2_binary_limbs, z_2_micro_chunks, Z_SCALAR)) { - return false; - } - if (!check_micro_limb_decomposition_correctness( - current_accumulator_binary_limbs, current_accumulator_micro_chunks, STANDARD_COORDINATE)) { - return false; - } - if (!check_micro_limb_decomposition_correctness(quotient_binary_limbs, quotient_micro_chunks, QUOTIENT)) { - return false; - } - - // The logic we are trying to enforce is: - // current_accumulator = previous_accumulator ⋅ x + op_code + P.x ⋅ v + P.y ⋅ v² + z_1 ⋅ v³ + z_2 ⋅ - // v⁴ mod Fq To ensure this we transform the relation into the form: previous_accumulator ⋅ x + op + - // P.x ⋅ v + P.y ⋅ v² + z_1 ⋅ v³ + z_2 ⋅ v⁴ - quotient ⋅ p - current_accumulator = 0 However, we - // don't have integers. Despite that, we can approximate integers for a certain range, if we know - // that there will not be any overflows. For now we set the range to 2²⁷² ⋅ r. We can evaluate the - // logic modulo 2²⁷² with range constraints and r is native. - // - // previous_accumulator ⋅ x + op + P.x ⋅ v + P.y ⋅ v² + z_1 ⋅ v³ + z_2 ⋅ v⁴ - quotient ⋅ p - - // current_accumulator = 0 => - // 1. previous_accumulator ⋅ x + op + P.x ⋅ v + P.y ⋅ v² + z_1 ⋅ v³ + z_2 ⋅ v⁴ + quotient ⋅ (-p mod - // 2²⁷²) - current_accumulator = 0 mod 2²⁷² - // 2. previous_accumulator ⋅ x + op + P.x ⋅ v + P.y ⋅ v² + z_1 ⋅ v³ + z_2 ⋅ v⁴ - quotient ⋅ p - - // current_accumulator = 0 mod r - // - // The second relation is straightforward and easy to check. The first, not so much. We have to - // evaluate certain bit chunks of the equation and ensure that they are zero. For example, for the - // lowest limb it would be (inclusive ranges): - // - // previous_accumulator[0:67] ⋅ x[0:67] + op + P.x[0:67] ⋅ v[0:67] + P.y[0:67] ⋅ v²[0:67] + - // z_1[0:67] ⋅ v³[0:67] + z_2[0:67] ⋅ v⁴[0:67] + quotient[0:67] ⋅ (-p mod 2²⁷²)[0:67] - - // current_accumulator[0:67] = intermediate_value; (we don't take parts of op, because it's supposed - // to be between 0 and 3) - // - // We could check that this intermediate_value is equal to 0 mod 2⁶⁸ by dividing it by 2⁶⁸ and - // constraining it. For efficiency, we actually compute wider evaluations for 136 bits, which - // require us to also obtain and shift products of [68:135] by [0:67] and [0:67] by [68:135] bits. - // The result of division goes into the next evaluation (the same as a carry flag would) - // So the lowest wide limb is : (∑everything[0:67]⋅everything[0:67] + - // 2⁶⁸⋅(∑everything[0:67]⋅everything[68:135]))/ 2¹³⁶ - // - // The high is: - // (low_limb + ∑everything[0:67]⋅everything[136:203] + ∑everything[68:135]⋅everything[68:135] + - // 2⁶⁸(∑everything[0:67]⋅everything[204:271] + ∑everything[68:135]⋅everything[136:203])) / 2¹³⁶ - // - // We also limit computation on limbs of op, z_1 and z_2, since we know that op has only the lowest - // limb and z_1 and z_2 have only the two lowest limbs - constexpr std::array NEGATIVE_MODULUS_LIMBS = Builder::NEGATIVE_MODULUS_LIMBS; - const uint256_t SHIFT_1 = Builder::SHIFT_1; - const uint256_t SHIFT_2 = Builder::SHIFT_2; - const uint256_t SHIFT_3 = Builder::SHIFT_3; - Fr low_wide_limb_relation_check = - - (previous_accumulator_binary_limbs[0] * relation_inputs.x_limbs[0] + op_code + - relation_inputs.v_limbs[0] * p_x_0 + relation_inputs.v_squared_limbs[0] * p_y_0 + - relation_inputs.v_cubed_limbs[0] * z_1_lo + relation_inputs.v_quarted_limbs[0] * z_2_lo + - quotient_binary_limbs[0] * NEGATIVE_MODULUS_LIMBS[0] - current_accumulator_binary_limbs[0]) + - (previous_accumulator_binary_limbs[1] * relation_inputs.x_limbs[0] + - relation_inputs.v_limbs[1] * p_x_0 + relation_inputs.v_squared_limbs[1] * p_y_0 + - relation_inputs.v_cubed_limbs[1] * z_1_lo + relation_inputs.v_quarted_limbs[1] * z_2_lo + - quotient_binary_limbs[1] * NEGATIVE_MODULUS_LIMBS[0] + - previous_accumulator_binary_limbs[0] * relation_inputs.x_limbs[1] + - relation_inputs.v_limbs[0] * p_x_1 + relation_inputs.v_squared_limbs[0] * p_y_1 + - relation_inputs.v_cubed_limbs[0] * z_1_hi + relation_inputs.v_quarted_limbs[0] * z_2_hi + - quotient_binary_limbs[0] * NEGATIVE_MODULUS_LIMBS[1] - current_accumulator_binary_limbs[1]) * - Fr(SHIFT_1); - if (low_wide_limb_relation_check != (low_wide_relation_limb * SHIFT_2)) { - return false; - } - Fr high_wide_relation_limb_check = - low_wide_relation_limb + previous_accumulator_binary_limbs[2] * relation_inputs.x_limbs[0] + - previous_accumulator_binary_limbs[1] * relation_inputs.x_limbs[1] + - previous_accumulator_binary_limbs[0] * relation_inputs.x_limbs[2] + relation_inputs.v_limbs[2] * p_x_0 + - relation_inputs.v_limbs[1] * p_x_1 + relation_inputs.v_limbs[0] * p_x_2 + - relation_inputs.v_squared_limbs[2] * p_y_0 + relation_inputs.v_squared_limbs[1] * p_y_1 + - relation_inputs.v_squared_limbs[0] * p_y_2 + relation_inputs.v_cubed_limbs[2] * z_1_lo + - relation_inputs.v_cubed_limbs[1] * z_1_hi + relation_inputs.v_quarted_limbs[2] * z_2_lo + - relation_inputs.v_quarted_limbs[1] * z_2_hi + quotient_binary_limbs[2] * NEGATIVE_MODULUS_LIMBS[0] + - quotient_binary_limbs[1] * NEGATIVE_MODULUS_LIMBS[1] + - quotient_binary_limbs[0] * NEGATIVE_MODULUS_LIMBS[2] - current_accumulator_binary_limbs[2] + - (previous_accumulator_binary_limbs[3] * relation_inputs.x_limbs[0] + - previous_accumulator_binary_limbs[2] * relation_inputs.x_limbs[1] + - previous_accumulator_binary_limbs[1] * relation_inputs.x_limbs[2] + - previous_accumulator_binary_limbs[0] * relation_inputs.x_limbs[3] + - relation_inputs.v_limbs[3] * p_x_0 + relation_inputs.v_limbs[2] * p_x_1 + - relation_inputs.v_limbs[1] * p_x_2 + relation_inputs.v_limbs[0] * p_x_3 + - relation_inputs.v_squared_limbs[3] * p_y_0 + relation_inputs.v_squared_limbs[2] * p_y_1 + - relation_inputs.v_squared_limbs[1] * p_y_2 + relation_inputs.v_squared_limbs[0] * p_y_3 + - relation_inputs.v_cubed_limbs[3] * z_1_lo + relation_inputs.v_cubed_limbs[2] * z_1_hi + - relation_inputs.v_quarted_limbs[3] * z_2_lo + relation_inputs.v_quarted_limbs[2] * z_2_hi + - quotient_binary_limbs[3] * NEGATIVE_MODULUS_LIMBS[0] + - quotient_binary_limbs[2] * NEGATIVE_MODULUS_LIMBS[1] + - quotient_binary_limbs[1] * NEGATIVE_MODULUS_LIMBS[2] + - quotient_binary_limbs[0] * NEGATIVE_MODULUS_LIMBS[3] - current_accumulator_binary_limbs[3]) * - SHIFT_1; - if (high_wide_relation_limb_check != (high_wide_relation_limb * SHIFT_2)) { - return false; - } - // Apart from checking the correctness of the evaluation modulo 2²⁷² we also need to ensure that the - // logic works in our scalar field. For this we reconstruct the scalar field values from individual - // limbs - auto reconstructed_p_x = (p_x_0 + p_x_1 * SHIFT_1 + p_x_2 * SHIFT_2 + p_x_3 * SHIFT_3); - auto reconstructed_p_y = (p_y_0 + p_y_1 * SHIFT_1 + p_y_2 * SHIFT_2 + p_y_3 * SHIFT_3); - auto reconstructed_current_accumulator = - (current_accumulator_binary_limbs[0] + current_accumulator_binary_limbs[1] * SHIFT_1 + - current_accumulator_binary_limbs[2] * SHIFT_2 + current_accumulator_binary_limbs[3] * SHIFT_3); - auto reconstructed_previous_accumulator = - (previous_accumulator_binary_limbs[0] + previous_accumulator_binary_limbs[1] * SHIFT_1 + - previous_accumulator_binary_limbs[2] * SHIFT_2 + previous_accumulator_binary_limbs[3] * SHIFT_3); - - auto reconstructed_z1 = (z_1_lo + z_1_hi * SHIFT_1); - auto reconstructed_z2 = (z_2_lo + z_2_hi * SHIFT_1); - auto reconstructed_quotient = (quotient_binary_limbs[0] + quotient_binary_limbs[1] * SHIFT_1 + - quotient_binary_limbs[2] * SHIFT_2 + quotient_binary_limbs[3] * SHIFT_3); - - // Check the relation - if (!(reconstructed_previous_accumulator * reconstructed_evaluation_input_x + op_code + - reconstructed_p_x * reconstructed_batching_evaluation_v + - reconstructed_p_y * reconstructed_batching_evaluation_v2 + - reconstructed_z1 * reconstructed_batching_evaluation_v3 + - reconstructed_z2 * reconstructed_batching_evaluation_v4 + - reconstructed_quotient * NEGATIVE_MODULUS_LIMBS[4] - reconstructed_current_accumulator) - .is_zero()) { - return false; - }; - } - { - size_t odd_gate_index = i + 1; - // Check the accumulator is copied correctly - const std::vector current_accumulator_binary_limbs_copy = { - circuit.get_variable(accumulators_binary_limbs_0_wire[odd_gate_index]), - circuit.get_variable(accumulators_binary_limbs_1_wire[odd_gate_index]), - circuit.get_variable(accumulators_binary_limbs_2_wire[odd_gate_index]), - circuit.get_variable(accumulators_binary_limbs_3_wire[odd_gate_index]), - }; - if (odd_gate_index < circuit.num_gates - 1) { - size_t next_even_gate_index = i + 2; - const std::vector current_accumulator_binary_limbs = { - circuit.get_variable(accumulators_binary_limbs_0_wire[next_even_gate_index]), - circuit.get_variable(accumulators_binary_limbs_1_wire[next_even_gate_index]), - circuit.get_variable(accumulators_binary_limbs_2_wire[next_even_gate_index]), - circuit.get_variable(accumulators_binary_limbs_3_wire[next_even_gate_index]), - }; - - for (size_t j = 0; j < current_accumulator_binary_limbs.size(); j++) { - if (current_accumulator_binary_limbs_copy[j] != current_accumulator_binary_limbs[j]) { - return report_fail("accumulator copy failed at row = ", odd_gate_index); - } + return false; } - } else { - // Check accumulator starts at zero - for (const auto& limb : current_accumulator_binary_limbs_copy) { - if (limb != Fr(0)) { - return report_fail("accumulator doesn't start with 0 = ", odd_gate_index); - } + break; + // For z top limbs we need as many microlimbs as for the low limbs + case Z_SCALAR: + if (binary_limbs[binary_limbs.size() - 1] != + accumulate_limb_from_micro_chunks(micro_limbs[binary_limbs.size() - 1], SKIPPED_FOR_Z_SCALARS)) { + return false; + } + // Check last additional constraint (60->70) + if (micro_limbs[binary_limbs.size() - 1][NUM_MICRO_LIMBS - SKIPPED_FOR_Z_SCALARS] != + (SHIFT_4_TO_14 * + micro_limbs[binary_limbs.size() - 1][NUM_MICRO_LIMBS - SKIPPED_FOR_Z_SCALARS - 1])) { + return false; } + break; + // Quotient also doesn't need the top 2 + case QUOTIENT: + if (binary_limbs[binary_limbs.size() - 1] != + accumulate_limb_from_micro_chunks(micro_limbs[binary_limbs.size() - 1], SKIPPED_FOR_QUOTIENT)) { + return false; + } + // Check last additional constraint (52->56) + if (micro_limbs[binary_limbs.size() - 1][NUM_MICRO_LIMBS - SKIPPED_FOR_QUOTIENT] != + (SHIFT_10_TO_14 * + micro_limbs[binary_limbs.size() - 1][NUM_MICRO_LIMBS - SKIPPED_FOR_QUOTIENT - 1])) { + return false; + } + break; + default: + abort(); } + + return true; + }; + // Check all micro limb decompositions + if (!check_micro_limb_decomposition_correctness(p_x_binary_limbs, p_x_micro_chunks, STANDARD_COORDINATE)) { + return false; } - } + if (!check_micro_limb_decomposition_correctness(p_y_binary_limbs, p_y_micro_chunks, STANDARD_COORDINATE)) { + return false; + } + if (!check_micro_limb_decomposition_correctness(z_1_binary_limbs, z_1_micro_chunks, Z_SCALAR)) { + return false; + } + if (!check_micro_limb_decomposition_correctness(z_2_binary_limbs, z_2_micro_chunks, Z_SCALAR)) { + return false; + } + if (!check_micro_limb_decomposition_correctness( + current_accumulator_binary_limbs, current_accumulator_micro_chunks, STANDARD_COORDINATE)) { + return false; + } + if (!check_micro_limb_decomposition_correctness(quotient_binary_limbs, quotient_micro_chunks, QUOTIENT)) { + return false; + } + + // The logic we are trying to enforce is: + // current_accumulator = previous_accumulator ⋅ x + op_code + P.x ⋅ v + P.y ⋅ v² + z_1 ⋅ v³ + z_2 ⋅ + // v⁴ mod Fq To ensure this we transform the relation into the form: previous_accumulator ⋅ x + op + + // P.x ⋅ v + P.y ⋅ v² + z_1 ⋅ v³ + z_2 ⋅ v⁴ - quotient ⋅ p - current_accumulator = 0 However, we + // don't have integers. Despite that, we can approximate integers for a certain range, if we know + // that there will not be any overflows. For now we set the range to 2²⁷² ⋅ r. We can evaluate the + // logic modulo 2²⁷² with range constraints and r is native. + // + // previous_accumulator ⋅ x + op + P.x ⋅ v + P.y ⋅ v² + z_1 ⋅ v³ + z_2 ⋅ v⁴ - quotient ⋅ p - + // current_accumulator = 0 => + // 1. previous_accumulator ⋅ x + op + P.x ⋅ v + P.y ⋅ v² + z_1 ⋅ v³ + z_2 ⋅ v⁴ + quotient ⋅ (-p mod + // 2²⁷²) - current_accumulator = 0 mod 2²⁷² + // 2. previous_accumulator ⋅ x + op + P.x ⋅ v + P.y ⋅ v² + z_1 ⋅ v³ + z_2 ⋅ v⁴ - quotient ⋅ p - + // current_accumulator = 0 mod r + // + // The second relation is straightforward and easy to check. The first, not so much. We have to + // evaluate certain bit chunks of the equation and ensure that they are zero. For example, for the + // lowest limb it would be (inclusive ranges): + // + // previous_accumulator[0:67] ⋅ x[0:67] + op + P.x[0:67] ⋅ v[0:67] + P.y[0:67] ⋅ v²[0:67] + + // z_1[0:67] ⋅ v³[0:67] + z_2[0:67] ⋅ v⁴[0:67] + quotient[0:67] ⋅ (-p mod 2²⁷²)[0:67] - + // current_accumulator[0:67] = intermediate_value; (we don't take parts of op, because it's supposed + // to be between 0 and 3) + // + // We could check that this intermediate_value is equal to 0 mod 2⁶⁸ by dividing it by 2⁶⁸ and + // constraining it. For efficiency, we actually compute wider evaluations for 136 bits, which + // require us to also obtain and shift products of [68:135] by [0:67] and [0:67] by [68:135] bits. + // The result of division goes into the next evaluation (the same as a carry flag would) + // So the lowest wide limb is : (∑everything[0:67]⋅everything[0:67] + + // 2⁶⁸⋅(∑everything[0:67]⋅everything[68:135]))/ 2¹³⁶ + // + // The high is: + // (low_limb + ∑everything[0:67]⋅everything[136:203] + ∑everything[68:135]⋅everything[68:135] + + // 2⁶⁸(∑everything[0:67]⋅everything[204:271] + ∑everything[68:135]⋅everything[136:203])) / 2¹³⁶ + // + // We also limit computation on limbs of op, z_1 and z_2, since we know that op has only the lowest + // limb and z_1 and z_2 have only the two lowest limbs + constexpr std::array NEGATIVE_MODULUS_LIMBS = Builder::NEGATIVE_MODULUS_LIMBS; + const uint256_t SHIFT_1 = Builder::SHIFT_1; + const uint256_t SHIFT_2 = Builder::SHIFT_2; + const uint256_t SHIFT_3 = Builder::SHIFT_3; + Fr low_wide_limb_relation_check = + + (previous_accumulator_binary_limbs[0] * relation_inputs.x_limbs[0] + op_code + + relation_inputs.v_limbs[0] * p_x_0 + relation_inputs.v_squared_limbs[0] * p_y_0 + + relation_inputs.v_cubed_limbs[0] * z_1_lo + relation_inputs.v_quarted_limbs[0] * z_2_lo + + quotient_binary_limbs[0] * NEGATIVE_MODULUS_LIMBS[0] - current_accumulator_binary_limbs[0]) + + (previous_accumulator_binary_limbs[1] * relation_inputs.x_limbs[0] + relation_inputs.v_limbs[1] * p_x_0 + + relation_inputs.v_squared_limbs[1] * p_y_0 + relation_inputs.v_cubed_limbs[1] * z_1_lo + + relation_inputs.v_quarted_limbs[1] * z_2_lo + quotient_binary_limbs[1] * NEGATIVE_MODULUS_LIMBS[0] + + previous_accumulator_binary_limbs[0] * relation_inputs.x_limbs[1] + relation_inputs.v_limbs[0] * p_x_1 + + relation_inputs.v_squared_limbs[0] * p_y_1 + relation_inputs.v_cubed_limbs[0] * z_1_hi + + relation_inputs.v_quarted_limbs[0] * z_2_hi + quotient_binary_limbs[0] * NEGATIVE_MODULUS_LIMBS[1] - + current_accumulator_binary_limbs[1]) * + Fr(SHIFT_1); + if (low_wide_limb_relation_check != (low_wide_relation_limb * SHIFT_2)) { + return false; + } + Fr high_wide_relation_limb_check = + low_wide_relation_limb + previous_accumulator_binary_limbs[2] * relation_inputs.x_limbs[0] + + previous_accumulator_binary_limbs[1] * relation_inputs.x_limbs[1] + + previous_accumulator_binary_limbs[0] * relation_inputs.x_limbs[2] + relation_inputs.v_limbs[2] * p_x_0 + + relation_inputs.v_limbs[1] * p_x_1 + relation_inputs.v_limbs[0] * p_x_2 + + relation_inputs.v_squared_limbs[2] * p_y_0 + relation_inputs.v_squared_limbs[1] * p_y_1 + + relation_inputs.v_squared_limbs[0] * p_y_2 + relation_inputs.v_cubed_limbs[2] * z_1_lo + + relation_inputs.v_cubed_limbs[1] * z_1_hi + relation_inputs.v_quarted_limbs[2] * z_2_lo + + relation_inputs.v_quarted_limbs[1] * z_2_hi + quotient_binary_limbs[2] * NEGATIVE_MODULUS_LIMBS[0] + + quotient_binary_limbs[1] * NEGATIVE_MODULUS_LIMBS[1] + + quotient_binary_limbs[0] * NEGATIVE_MODULUS_LIMBS[2] - current_accumulator_binary_limbs[2] + + (previous_accumulator_binary_limbs[3] * relation_inputs.x_limbs[0] + + previous_accumulator_binary_limbs[2] * relation_inputs.x_limbs[1] + + previous_accumulator_binary_limbs[1] * relation_inputs.x_limbs[2] + + previous_accumulator_binary_limbs[0] * relation_inputs.x_limbs[3] + relation_inputs.v_limbs[3] * p_x_0 + + relation_inputs.v_limbs[2] * p_x_1 + relation_inputs.v_limbs[1] * p_x_2 + + relation_inputs.v_limbs[0] * p_x_3 + relation_inputs.v_squared_limbs[3] * p_y_0 + + relation_inputs.v_squared_limbs[2] * p_y_1 + relation_inputs.v_squared_limbs[1] * p_y_2 + + relation_inputs.v_squared_limbs[0] * p_y_3 + relation_inputs.v_cubed_limbs[3] * z_1_lo + + relation_inputs.v_cubed_limbs[2] * z_1_hi + relation_inputs.v_quarted_limbs[3] * z_2_lo + + relation_inputs.v_quarted_limbs[2] * z_2_hi + quotient_binary_limbs[3] * NEGATIVE_MODULUS_LIMBS[0] + + quotient_binary_limbs[2] * NEGATIVE_MODULUS_LIMBS[1] + + quotient_binary_limbs[1] * NEGATIVE_MODULUS_LIMBS[2] + + quotient_binary_limbs[0] * NEGATIVE_MODULUS_LIMBS[3] - current_accumulator_binary_limbs[3]) * + SHIFT_1; + if (high_wide_relation_limb_check != (high_wide_relation_limb * SHIFT_2)) { + return false; + } + // Apart from checking the correctness of the evaluation modulo 2²⁷² we also need to ensure that the + // logic works in our scalar field. For this we reconstruct the scalar field values from individual + // limbs + auto reconstructed_p_x = (p_x_0 + p_x_1 * SHIFT_1 + p_x_2 * SHIFT_2 + p_x_3 * SHIFT_3); + auto reconstructed_p_y = (p_y_0 + p_y_1 * SHIFT_1 + p_y_2 * SHIFT_2 + p_y_3 * SHIFT_3); + auto reconstructed_current_accumulator = + (current_accumulator_binary_limbs[0] + current_accumulator_binary_limbs[1] * SHIFT_1 + + current_accumulator_binary_limbs[2] * SHIFT_2 + current_accumulator_binary_limbs[3] * SHIFT_3); + auto reconstructed_previous_accumulator = + (previous_accumulator_binary_limbs[0] + previous_accumulator_binary_limbs[1] * SHIFT_1 + + previous_accumulator_binary_limbs[2] * SHIFT_2 + previous_accumulator_binary_limbs[3] * SHIFT_3); + + auto reconstructed_z1 = (z_1_lo + z_1_hi * SHIFT_1); + auto reconstructed_z2 = (z_2_lo + z_2_hi * SHIFT_1); + auto reconstructed_quotient = (quotient_binary_limbs[0] + quotient_binary_limbs[1] * SHIFT_1 + + quotient_binary_limbs[2] * SHIFT_2 + quotient_binary_limbs[3] * SHIFT_3); + + // Check the relation + if (!(reconstructed_previous_accumulator * reconstructed_evaluation_input_x + op_code + + reconstructed_p_x * reconstructed_batching_evaluation_v + + reconstructed_p_y * reconstructed_batching_evaluation_v2 + + reconstructed_z1 * reconstructed_batching_evaluation_v3 + + reconstructed_z2 * reconstructed_batching_evaluation_v4 + + reconstructed_quotient * NEGATIVE_MODULUS_LIMBS[4] - reconstructed_current_accumulator) + .is_zero()) { + return false; + }; + + if (!check_accumulator_transfer(previous_accumulator_binary_limbs, i + 1)) { + return false; + } + }; return true; -}; +} }; // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/client_ivc/client_ivc.cpp b/barretenberg/cpp/src/barretenberg/client_ivc/client_ivc.cpp index 15381108199c..c4f43ca23e71 100644 --- a/barretenberg/cpp/src/barretenberg/client_ivc/client_ivc.cpp +++ b/barretenberg/cpp/src/barretenberg/client_ivc/client_ivc.cpp @@ -268,6 +268,9 @@ ClientIVC::perform_recursive_verification_and_databus_consistency_checks( pairing_points.aggregate(nested_pairing_points); if (is_hiding_kernel) { pairing_points.aggregate(decider_pairing_points); + // Placeholder for randomness at the end of the hiding circuit (to be handled in subsequent PR) + circuit.queue_ecc_no_op(); + circuit.queue_ecc_no_op(); } return { output_verifier_accumulator, pairing_points, merged_table_commitments }; @@ -310,6 +313,12 @@ void ClientIVC::complete_kernel_circuit_logic(ClientCircuit& circuit) // to ensure the op queue wires in translator are shiftable, i.e. their 0th coefficient is 0. (The tail kernel // subtable is at the top of the final aggregate table since it is the last to be prepended). if (is_tail_kernel) { + BB_ASSERT_EQ(circuit.op_queue->get_current_subtable_size(), + 0U, + "tail kernel ecc ops table should be empty at this point"); + circuit.queue_ecc_no_op(); + // Placeholder for randomness at the beginning of tail circuit + circuit.queue_ecc_no_op(); circuit.queue_ecc_no_op(); } circuit.queue_ecc_eq(); diff --git a/barretenberg/cpp/src/barretenberg/constants.hpp b/barretenberg/cpp/src/barretenberg/constants.hpp index 69943225f0e9..3897a7fb79fe 100644 --- a/barretenberg/cpp/src/barretenberg/constants.hpp +++ b/barretenberg/cpp/src/barretenberg/constants.hpp @@ -8,6 +8,13 @@ namespace bb { // permutation argument polynomials (sigmas, ids) are unique, e.g. id[i][j] == id[m][n] iff (i == m && j == n) constexpr uint32_t PERMUTATION_ARGUMENT_VALUE_SEPARATOR = 1 << 28; +// The fixed size of the Translator trace where each accumulation gate, corresponding to one UltraOp, will occupy two +// rows. +static constexpr uint32_t CONST_TRANSLATOR_MINI_CIRCUIT_LOG_SIZE = 14; + +// -1 as each op occupies two rows in Translator trace +static constexpr uint32_t CONST_OP_QUEUE_LOG_SIZE = CONST_TRANSLATOR_MINI_CIRCUIT_LOG_SIZE - 1; + // The log of the max circuit size assumed in order to achieve constant sized Honk proofs // TODO(https://github.com/AztecProtocol/barretenberg/issues/1046): Remove the need for const sized proofs static constexpr uint32_t CONST_PROOF_SIZE_LOG_N = 28; diff --git a/barretenberg/cpp/src/barretenberg/goblin/mock_circuits.hpp b/barretenberg/cpp/src/barretenberg/goblin/mock_circuits.hpp index 3b14db1a55f1..c6c5a0f297fc 100644 --- a/barretenberg/cpp/src/barretenberg/goblin/mock_circuits.hpp +++ b/barretenberg/cpp/src/barretenberg/goblin/mock_circuits.hpp @@ -12,6 +12,7 @@ #include "barretenberg/crypto/merkle_tree/memory_store.hpp" #include "barretenberg/crypto/merkle_tree/merkle_tree.hpp" #include "barretenberg/flavor/mega_flavor.hpp" +#include "barretenberg/goblin/goblin.hpp" #include "barretenberg/srs/global_crs.hpp" #include "barretenberg/stdlib/encryption/ecdsa/ecdsa.hpp" #include "barretenberg/stdlib/hash/keccak/keccak.hpp" @@ -136,19 +137,34 @@ class GoblinMockCircuits { * * @param builder */ - static void construct_simple_circuit(MegaBuilder& builder, bool last_circuit = false) + static void construct_simple_circuit(MegaBuilder& builder) { BB_BENCH(); - // The last circuit to be accumulated must contain a no-op - if (last_circuit) { - builder.queue_ecc_no_op(); - } add_some_ecc_op_gates(builder); MockCircuits::construct_arithmetic_circuit(builder); bb::stdlib::recursion::honk::DefaultIO::add_default(builder); } + static void construct_and_merge_mock_circuits(Goblin& goblin, const size_t num_circuits = 3) + { + for (size_t idx = 0; idx < num_circuits - 1; ++idx) { + MegaCircuitBuilder builder{ goblin.op_queue }; + if (idx == num_circuits - 2) { + // Last circuit appended needs to begin with a no-op for translator to be shiftable + builder.queue_ecc_no_op(); + } + construct_simple_circuit(builder); + goblin.prove_merge(); + // Pop the merge proof from the queue, Goblin will be verified at the end + goblin.merge_verification_queue.pop_front(); + } + MegaCircuitBuilder builder{ goblin.op_queue }; + GoblinMockCircuits::construct_simple_circuit(builder); + builder.queue_ecc_no_op(); + builder.queue_ecc_no_op(); + } + /** * @brief Construct a mock kernel circuit * @details Construct an arbitrary circuit meant to represent the aztec private function execution kernel. Recursive diff --git a/barretenberg/cpp/src/barretenberg/op_queue/ecc_op_queue.hpp b/barretenberg/cpp/src/barretenberg/op_queue/ecc_op_queue.hpp index 3ca4e070b853..baac62d40cf0 100644 --- a/barretenberg/cpp/src/barretenberg/op_queue/ecc_op_queue.hpp +++ b/barretenberg/cpp/src/barretenberg/op_queue/ecc_op_queue.hpp @@ -48,6 +48,7 @@ class ECCOpQueue { EccvmRowTracker eccvm_row_tracker; public: + static const size_t OP_QUEUE_SIZE = 1 << CONST_OP_QUEUE_LOG_SIZE; /** * @brief Instantiate an initial ECC op subtable. */ @@ -63,6 +64,8 @@ class ECCOpQueue { ultra_ops_table.create_new_subtable(); } + size_t get_current_subtable_size() const { return ultra_ops_table.get_current_subtable_size(); } + void merge(MergeSettings settings = MergeSettings::PREPEND, std::optional ultra_fixed_offset = std::nullopt) { eccvm_ops_table.merge(settings); @@ -75,7 +78,8 @@ class ECCOpQueue { return ultra_ops_table.construct_table_columns(); } - // Construct polys corresponding to the columns of the aggregate ultra ops table, excluding the most recent subtable + // Construct polys corresponding to the columns of the aggregate ultra ops table, excluding the most recent + // subtable std::array, ULTRA_TABLE_WIDTH> construct_previous_ultra_ops_table_columns() const { return ultra_ops_table.construct_previous_table_columns(); @@ -97,8 +101,8 @@ class ECCOpQueue { size_t get_current_ultra_ops_subtable_num_rows() const { return ultra_ops_table.current_ultra_subtable_size(); } size_t get_previous_ultra_ops_table_num_rows() const { return ultra_ops_table.previous_ultra_table_size(); } - // TODO(https://github.com/AztecProtocol/barretenberg/issues/1339): Consider making the ultra and eccvm ops getters - // more memory efficient + // TODO(https://github.com/AztecProtocol/barretenberg/issues/1339): Consider making the ultra and eccvm ops + // getters more memory efficient // Get the full table of ECCVM ops in contiguous memory; construct it if it has not been constructed already std::vector& get_eccvm_ops() @@ -225,8 +229,8 @@ class ECCOpQueue { /** * @brief Writes randomness to the ultra ops table but adds no eccvm operations. * - * @details This method is used to add randomness to the ultra ops table with the aim of randomising the commitment - * and evaluations of its corresponding columns + * @details This method is used to add randomness to the ultra ops table with the aim of randomising the + * commitment and evaluations of its corresponding columns * @return UltraOp */ UltraOp random_op_ultra_only() diff --git a/barretenberg/cpp/src/barretenberg/op_queue/ecc_op_queue.test.cpp b/barretenberg/cpp/src/barretenberg/op_queue/ecc_op_queue.test.cpp index 0a185968cc74..f6a95676bcda 100644 --- a/barretenberg/cpp/src/barretenberg/op_queue/ecc_op_queue.test.cpp +++ b/barretenberg/cpp/src/barretenberg/op_queue/ecc_op_queue.test.cpp @@ -54,7 +54,9 @@ class ECCOpQueueTest { } else { // APPEND merge performs concatenation directly to end of previous table or at a specified fixed offset const size_t prev_table_size = op_queue->get_previous_ultra_ops_table_num_rows(); // k - const size_t shift_magnitude = ultra_fixed_offset.value_or(prev_table_size); + const size_t shift_magnitude = ultra_fixed_offset.has_value() + ? ultra_fixed_offset.value() * bb::UltraEccOpsTable::NUM_ROWS_PER_OP + : prev_table_size; // k // T(x) = T_prev(x) + x^k * t_current(x), where k is the shift magnitude const Fr prev_table_eval = prev_table_poly.evaluate(eval_challenge); // T_prev(x) const Fr shifted_subtable_eval = @@ -74,10 +76,12 @@ class ECCOpQueueTest { auto ultra_table = op_queue->get_ultra_ops(); auto eccvm_table = op_queue->get_eccvm_ops(); - EXPECT_EQ(eccvm_table.size(), ultra_table.size()); - - for (auto [ultra_op, eccvm_op] : zip_view(ultra_table, eccvm_table)) { - EXPECT_EQ(ultra_op.op_code.value(), eccvm_op.op_code.value()); + size_t j = 0; + for (const auto& ultra_op : ultra_table) { + if (ultra_op.op_code.value() == 0) { + continue; + } + EXPECT_EQ(ultra_op.op_code.value(), eccvm_table[j++].op_code.value()); } }; }; diff --git a/barretenberg/cpp/src/barretenberg/op_queue/ecc_ops_table.hpp b/barretenberg/cpp/src/barretenberg/op_queue/ecc_ops_table.hpp index 17ccbf859bb7..19cbf6ffe7ae 100644 --- a/barretenberg/cpp/src/barretenberg/op_queue/ecc_ops_table.hpp +++ b/barretenberg/cpp/src/barretenberg/op_queue/ecc_ops_table.hpp @@ -74,6 +74,8 @@ struct UltraOp { Fr z_2; bool return_is_infinity; + bool operator==(const UltraOp& other) const = default; + /** * @brief Get the point in standard form i.e. as two coordinates x and y in the base field or as a point at * infinity whose coordinates are set to (0,0). @@ -130,6 +132,7 @@ template class EccOpsTable { } size_t num_subtables() const { return table.size(); } + size_t get_current_subtable_size() const { return current_subtable.size(); } auto& get() const { return table; } @@ -246,7 +249,7 @@ class UltraEccOpsTable { // The last subtable in deque is the fixed-location one last_subtable_size = table.get().back().size() * NUM_ROWS_PER_OP; } - return std::max(base_size, fixed_append_offset.value() + last_subtable_size); + return std::max(base_size, (fixed_append_offset.value() * NUM_ROWS_PER_OP) + last_subtable_size); } return base_size; } @@ -270,7 +273,46 @@ class UltraEccOpsTable { } } - std::vector get_reconstructed() const { return table.get_reconstructed(); } + size_t get_current_subtable_size() const { return table.get_current_subtable_size(); } + + std::vector get_reconstructed() const + { + if (has_fixed_append && fixed_append_offset.has_value()) { + return get_reconstructed_with_fixed_append(); + } + return table.get_reconstructed(); + } + std::vector get_reconstructed_with_fixed_append() const + { + + ASSERT(get_current_subtable_size() == 0, + "current subtable should be merged before reconstructing the full table of operations."); + + std::vector reconstructed_table; + reconstructed_table.reserve(1 << CONST_OP_QUEUE_LOG_SIZE); + + for (size_t subtable_idx = 0; subtable_idx < table.num_subtables() - 1; subtable_idx++) { + const auto& subtable = table.get()[subtable_idx]; + for (const auto& op : subtable) { + reconstructed_table.push_back(op); + } + } + + // Add zeros if fixed offset is larger than current size + if (has_fixed_append && fixed_append_offset.has_value()) { + size_t current_size = reconstructed_table.size(); + size_t target_offset = fixed_append_offset.value(); + // Fill gap with no-ops if needed + reconstructed_table.insert(reconstructed_table.end(), target_offset - current_size, UltraOp{ /*no-op*/ }); + } + + // Add the final subtable (appended at fixed location) + const auto& final_subtable = table.get()[table.num_subtables() - 1]; + for (const auto& op : final_subtable) { + reconstructed_table.push_back(op); + } + return reconstructed_table; + } // Construct the columns of the full ultra ecc ops table ColumnPolynomials construct_table_columns() const @@ -342,7 +384,7 @@ class UltraEccOpsTable { // Process all prepended subtables (all except last) size_t i = 0; - for (size_t subtable_idx = 0; subtable_idx < table.num_subtables() - 1; ++subtable_idx) { + for (size_t subtable_idx = 0; subtable_idx < table.num_subtables() - 1; subtable_idx++) { const auto& subtable = table.get()[subtable_idx]; for (const auto& op : subtable) { write_op_to_polynomials(column_polynomials, op, i); @@ -351,7 +393,7 @@ class UltraEccOpsTable { } // Place the appended subtable at the fixed offset - size_t append_position = fixed_append_offset.value_or(i); + size_t append_position = fixed_append_offset.has_value() ? fixed_append_offset.value() * NUM_ROWS_PER_OP : i; const auto& appended_subtable = table.get()[table.num_subtables() - 1]; size_t j = append_position; diff --git a/barretenberg/cpp/src/barretenberg/op_queue/ecc_ops_table.test.cpp b/barretenberg/cpp/src/barretenberg/op_queue/ecc_ops_table.test.cpp index aed854b8981f..cf4511632361 100644 --- a/barretenberg/cpp/src/barretenberg/op_queue/ecc_ops_table.test.cpp +++ b/barretenberg/cpp/src/barretenberg/op_queue/ecc_ops_table.test.cpp @@ -267,8 +267,9 @@ TEST(EccOpsTableTest, UltraOpsFixedLocationAppendWithGap) // Define a fixed offset at which to append the table (must be greater than the total size of the prepended tables) const size_t fixed_offset = 20; + const size_t fixed_offset_num_rows = fixed_offset * ULTRA_ROWS_PER_OP; const size_t prepended_size = (subtable_op_counts[0] + subtable_op_counts[1]) * ULTRA_ROWS_PER_OP; - ASSERT(fixed_offset > prepended_size); + ASSERT(fixed_offset_num_rows > prepended_size); // Construct the ultra ops table for (size_t i = 0; i < NUM_SUBTABLES; ++i) { @@ -290,7 +291,7 @@ TEST(EccOpsTableTest, UltraOpsFixedLocationAppendWithGap) EXPECT_EQ(ultra_ops_table.size(), expected_num_ops); // Check that the polynomials have the correct size (including gap) - size_t expected_poly_size = fixed_offset + (subtable_op_counts[2] * ULTRA_ROWS_PER_OP); + size_t expected_poly_size = fixed_offset_num_rows + (subtable_op_counts[2] * ULTRA_ROWS_PER_OP); EXPECT_EQ(ultra_ops_table.ultra_table_size(), expected_poly_size); // Construct polynomials corresponding to the columns of the ultra ops table @@ -316,7 +317,7 @@ TEST(EccOpsTableTest, UltraOpsFixedLocationAppendWithGap) // Check gap from offset to appended subtable is filled with zeros for (auto ultra_op_poly : ultra_ops_table_polynomials) { - for (size_t row = prepended_size; row < fixed_offset; ++row) { + for (size_t row = prepended_size; row < fixed_offset_num_rows; ++row) { EXPECT_EQ(ultra_op_poly.at(row), Fr::zero()); } } @@ -325,10 +326,41 @@ TEST(EccOpsTableTest, UltraOpsFixedLocationAppendWithGap) std::vector> appended_subtables = { subtables[2] }; EccOpsTableTest::MockUltraOpsTable expected_appended_table(appended_subtables); for (auto [ultra_op_poly, expected_poly] : zip_view(ultra_ops_table_polynomials, expected_appended_table.columns)) { - for (size_t row = 0; row < subtable_op_counts[2] * ULTRA_ROWS_PER_OP; ++row) { - EXPECT_EQ(ultra_op_poly.at(fixed_offset + row), expected_poly[row]); + for (size_t row = 0; row < subtable_op_counts[2] * ULTRA_ROWS_PER_OP; row++) { + EXPECT_EQ(ultra_op_poly.at(fixed_offset_num_rows + row), expected_poly[row]); } } + + // Mimic get_reconstructed by unifying all the ops from subtables into a single vector with the appropriate append + // offset + { + std::vector expected_reconstructed; + expected_reconstructed.reserve(expected_num_ops + fixed_offset); + + // Order: subtable[1], subtable[0], no-ops range, subtable[2] + for (const auto& op : subtables[1]) { + expected_reconstructed.push_back(op); + } + for (const auto& op : subtables[0]) { + expected_reconstructed.push_back(op); + } + + // Add the range of noops + UltraOp no_op = {}; + size_t size_before = expected_reconstructed.size(); + for (size_t i = size_before; i < fixed_offset; i++) { + expected_reconstructed.push_back(no_op); + } + + for (const auto& op : subtables[2]) { + expected_reconstructed.push_back(op); + } + + EXPECT_EQ(expected_reconstructed.size(), ultra_ops_table.get_reconstructed().size()); + + // Compare to the op-queue's reconstruction (should include the gap as no-ops) + EXPECT_EQ(expected_reconstructed, ultra_ops_table.get_reconstructed()); + } } // Ensure EccvmOpsTable correctly constructs a concatenated table from successively prepended subtables diff --git a/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_decomposition_relation.hpp b/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_decomposition_relation.hpp index 6fba92a17fa6..00af4d0bf823 100644 --- a/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_decomposition_relation.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_decomposition_relation.hpp @@ -16,12 +16,12 @@ template class TranslatorDecompositionRelationImpl { // 1 + polynomial degree of this relation static constexpr size_t RELATION_LENGTH = - 3; // degree(lagrange_even_in_minicircuit_in_minicircuit(a - a_0 - a_1*2¹⁴ ... - a_l⋅2¹⁴ˡ )) = 2 + 4; // degree(lagrange_even_in_minicircuit_in_minicircuit(a - a_0 - a_1*2¹⁴ ... - a_l⋅2¹⁴ˡ )op) = 3 static constexpr std::array SUBRELATION_PARTIAL_LENGTHS{ - 3, // decomposition of P.x limb 0 into microlimbs subrelation - 3, // decomposition of P.x limb 1 into microlimbs subrelation - 3, // decomposition of P.x limb 2 into microlimbs subrelation - 3, // decomposition of P.x limb 3 into microlimbs subrelation + 4, // decomposition of accumulator limb 0 into microlimbs subrelation + 4, // decomposition of accumulator limb 1 into microlimbs subrelation + 4, // decomposition of accumulator limb 2 into microlimbs subrelation + 4, // decomposition of accumulator limb 3 into microlimbs subrelation 3, // decomposition of P.y limb 0 into microlimbs subrelation 3, // decomposition of P.y limb 1 into microlimbs subrelation 3, // decomposition of P.y limb 2 into microlimbs subrelation @@ -30,10 +30,10 @@ template class TranslatorDecompositionRelationImpl { 3, // decomposition of z2 limb 0 into microlimbs subrelation 3, // decomposition of z1 limb 1 into microlimbs subrelation 3, // decomposition of z2 limb 1 into microlimbs subrelation - 3, // decomposition of accumulator limb 0 into microlimbs subrelation - 3, // decomposition of accumulator limb 1 into microlimbs subrelation - 3, // decomposition of accumulator limb 2 into microlimbs subrelation - 3, // decomposition of accumulator limb 3 into microlimbs subrelation + 3, // decomposition of P.x limb 0 into microlimbs subrelation + 3, // decomposition of P.x limb 1 into microlimbs subrelation + 3, // decomposition of P.x limb 2 into microlimbs subrelation + 3, // decomposition of P.x limb 3 into microlimbs subrelation 3, // decomposition of quotient limb 0 into microlimbs subrelation 3, // decomposition of quotient limb 1 into microlimbs subrelation 3, // decomposition of quotient limb 2 into microlimbs subrelation diff --git a/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_decomposition_relation_impl.hpp b/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_decomposition_relation_impl.hpp index 52a8d47b717d..6ccaa6b737b4 100644 --- a/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_decomposition_relation_impl.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_decomposition_relation_impl.hpp @@ -37,588 +37,624 @@ void TranslatorDecompositionRelationImpl::accumulate(ContainerOverSubrelatio static constexpr size_t NUM_LIMB_BITS = 68; // Number of bits in a standard limb used for bigfield operations static constexpr size_t NUM_MICRO_LIMB_BITS = 14; // Number of bits in a standard limb used for bigfield operations - // Value to multiply an element by to perform an appropriate shift - static auto LIMB_SHIFT = FF(uint256_t(1) << NUM_LIMB_BITS); - - // Values to multiply an element by to perform an appropriate shift - static auto MICRO_LIMB_SHIFT = FF(uint256_t(1) << NUM_MICRO_LIMB_BITS); - static auto MICRO_LIMB_SHIFTx2 = MICRO_LIMB_SHIFT * MICRO_LIMB_SHIFT; - static auto MICRO_LIMB_SHIFTx3 = MICRO_LIMB_SHIFTx2 * MICRO_LIMB_SHIFT; - static auto MICRO_LIMB_SHIFTx4 = MICRO_LIMB_SHIFTx3 * MICRO_LIMB_SHIFT; - static auto MICRO_LIMB_SHIFTx5 = MICRO_LIMB_SHIFTx4 * MICRO_LIMB_SHIFT; - - // Shifts used to constrain ranges further - static auto SHIFT_12_TO_14 = - FF(4); // Shift used to range constrain the last microlimb of 68-bit limbs (standard limbs) - static auto SHIFT_10_TO_14 = - FF(16); // Shift used to range constrain the last microlimb of 52-bit limb (top quotient limb) - static auto SHIFT_8_TO_14 = FF(64); // Shift used to range constrain the last microlimb of 50-bit - // limbs (top limb of standard 254-bit value) - static auto SHIFT_4_TO_14 = - FF(1024); // Shift used to range constrain the last mircrolimb of 60-bit limbs from z scalars - - using Accumulator = std::tuple_element_t<0, ContainerOverSubrelations>; - using View = typename Accumulator::View; - - auto p_x_low_limbs = View(in.p_x_low_limbs); - auto p_x_low_limbs_range_constraint_0 = View(in.p_x_low_limbs_range_constraint_0); - auto p_x_low_limbs_range_constraint_1 = View(in.p_x_low_limbs_range_constraint_1); - auto p_x_low_limbs_range_constraint_2 = View(in.p_x_low_limbs_range_constraint_2); - auto p_x_low_limbs_range_constraint_3 = View(in.p_x_low_limbs_range_constraint_3); - auto p_x_low_limbs_range_constraint_4 = View(in.p_x_low_limbs_range_constraint_4); - auto p_x_low_limbs_shift = View(in.p_x_low_limbs_shift); - auto p_x_low_limbs_range_constraint_0_shift = View(in.p_x_low_limbs_range_constraint_0_shift); - auto p_x_low_limbs_range_constraint_1_shift = View(in.p_x_low_limbs_range_constraint_1_shift); - auto p_x_low_limbs_range_constraint_2_shift = View(in.p_x_low_limbs_range_constraint_2_shift); - auto p_x_low_limbs_range_constraint_3_shift = View(in.p_x_low_limbs_range_constraint_3_shift); - auto p_x_low_limbs_range_constraint_4_shift = View(in.p_x_low_limbs_range_constraint_4_shift); - auto p_x_high_limbs = View(in.p_x_high_limbs); - auto p_x_high_limbs_range_constraint_0 = View(in.p_x_high_limbs_range_constraint_0); - auto p_x_high_limbs_range_constraint_1 = View(in.p_x_high_limbs_range_constraint_1); - auto p_x_high_limbs_range_constraint_2 = View(in.p_x_high_limbs_range_constraint_2); - auto p_x_high_limbs_range_constraint_3 = View(in.p_x_high_limbs_range_constraint_3); - auto p_x_high_limbs_range_constraint_4 = View(in.p_x_high_limbs_range_constraint_4); - auto p_x_high_limbs_shift = View(in.p_x_high_limbs_shift); - auto p_x_high_limbs_range_constraint_0_shift = View(in.p_x_high_limbs_range_constraint_0_shift); - auto p_x_high_limbs_range_constraint_1_shift = View(in.p_x_high_limbs_range_constraint_1_shift); - auto p_x_high_limbs_range_constraint_2_shift = View(in.p_x_high_limbs_range_constraint_2_shift); - auto p_x_high_limbs_range_constraint_3_shift = View(in.p_x_high_limbs_range_constraint_3_shift); - auto p_y_low_limbs = View(in.p_y_low_limbs); - auto p_y_low_limbs_range_constraint_0 = View(in.p_y_low_limbs_range_constraint_0); - auto p_y_low_limbs_range_constraint_1 = View(in.p_y_low_limbs_range_constraint_1); - auto p_y_low_limbs_range_constraint_2 = View(in.p_y_low_limbs_range_constraint_2); - auto p_y_low_limbs_range_constraint_3 = View(in.p_y_low_limbs_range_constraint_3); - auto p_y_low_limbs_range_constraint_4 = View(in.p_y_low_limbs_range_constraint_4); - auto p_y_low_limbs_shift = View(in.p_y_low_limbs_shift); - auto p_y_low_limbs_range_constraint_0_shift = View(in.p_y_low_limbs_range_constraint_0_shift); - auto p_y_low_limbs_range_constraint_1_shift = View(in.p_y_low_limbs_range_constraint_1_shift); - auto p_y_low_limbs_range_constraint_2_shift = View(in.p_y_low_limbs_range_constraint_2_shift); - auto p_y_low_limbs_range_constraint_3_shift = View(in.p_y_low_limbs_range_constraint_3_shift); - auto p_y_low_limbs_range_constraint_4_shift = View(in.p_y_low_limbs_range_constraint_4_shift); - auto p_y_high_limbs = View(in.p_y_high_limbs); - auto p_y_high_limbs_range_constraint_0 = View(in.p_y_high_limbs_range_constraint_0); - auto p_y_high_limbs_range_constraint_1 = View(in.p_y_high_limbs_range_constraint_1); - auto p_y_high_limbs_range_constraint_2 = View(in.p_y_high_limbs_range_constraint_2); - auto p_y_high_limbs_range_constraint_3 = View(in.p_y_high_limbs_range_constraint_3); - auto p_y_high_limbs_range_constraint_4 = View(in.p_y_high_limbs_range_constraint_4); - auto p_y_high_limbs_shift = View(in.p_y_high_limbs_shift); - auto p_y_high_limbs_range_constraint_0_shift = View(in.p_y_high_limbs_range_constraint_0_shift); - auto p_y_high_limbs_range_constraint_1_shift = View(in.p_y_high_limbs_range_constraint_1_shift); - auto p_y_high_limbs_range_constraint_2_shift = View(in.p_y_high_limbs_range_constraint_2_shift); - auto p_y_high_limbs_range_constraint_3_shift = View(in.p_y_high_limbs_range_constraint_3_shift); - auto z_low_limbs = View(in.z_low_limbs); - auto z_low_limbs_range_constraint_0 = View(in.z_low_limbs_range_constraint_0); - auto z_low_limbs_range_constraint_1 = View(in.z_low_limbs_range_constraint_1); - auto z_low_limbs_range_constraint_2 = View(in.z_low_limbs_range_constraint_2); - auto z_low_limbs_range_constraint_3 = View(in.z_low_limbs_range_constraint_3); - auto z_low_limbs_range_constraint_4 = View(in.z_low_limbs_range_constraint_4); - auto z_low_limbs_shift = View(in.z_low_limbs_shift); - auto z_low_limbs_range_constraint_0_shift = View(in.z_low_limbs_range_constraint_0_shift); - auto z_low_limbs_range_constraint_1_shift = View(in.z_low_limbs_range_constraint_1_shift); - auto z_low_limbs_range_constraint_2_shift = View(in.z_low_limbs_range_constraint_2_shift); - auto z_low_limbs_range_constraint_3_shift = View(in.z_low_limbs_range_constraint_3_shift); - auto z_low_limbs_range_constraint_4_shift = View(in.z_low_limbs_range_constraint_4_shift); - auto z_high_limbs = View(in.z_high_limbs); - auto z_high_limbs_range_constraint_0 = View(in.z_high_limbs_range_constraint_0); - auto z_high_limbs_range_constraint_1 = View(in.z_high_limbs_range_constraint_1); - auto z_high_limbs_range_constraint_2 = View(in.z_high_limbs_range_constraint_2); - auto z_high_limbs_range_constraint_3 = View(in.z_high_limbs_range_constraint_3); - auto z_high_limbs_range_constraint_4 = View(in.z_high_limbs_range_constraint_4); - auto z_high_limbs_shift = View(in.z_high_limbs_shift); - auto z_high_limbs_range_constraint_0_shift = View(in.z_high_limbs_range_constraint_0_shift); - auto z_high_limbs_range_constraint_1_shift = View(in.z_high_limbs_range_constraint_1_shift); - auto z_high_limbs_range_constraint_2_shift = View(in.z_high_limbs_range_constraint_2_shift); - auto z_high_limbs_range_constraint_3_shift = View(in.z_high_limbs_range_constraint_3_shift); - auto z_high_limbs_range_constraint_4_shift = View(in.z_high_limbs_range_constraint_4_shift); - auto accumulators_binary_limbs_0 = View(in.accumulators_binary_limbs_0); - auto accumulators_binary_limbs_1 = View(in.accumulators_binary_limbs_1); - auto accumulators_binary_limbs_2 = View(in.accumulators_binary_limbs_2); - auto accumulators_binary_limbs_3 = View(in.accumulators_binary_limbs_3); - auto accumulator_low_limbs_range_constraint_0 = View(in.accumulator_low_limbs_range_constraint_0); - auto accumulator_low_limbs_range_constraint_1 = View(in.accumulator_low_limbs_range_constraint_1); - auto accumulator_low_limbs_range_constraint_2 = View(in.accumulator_low_limbs_range_constraint_2); - auto accumulator_low_limbs_range_constraint_3 = View(in.accumulator_low_limbs_range_constraint_3); - auto accumulator_low_limbs_range_constraint_4 = View(in.accumulator_low_limbs_range_constraint_4); - auto accumulator_low_limbs_range_constraint_0_shift = View(in.accumulator_low_limbs_range_constraint_0_shift); - auto accumulator_low_limbs_range_constraint_1_shift = View(in.accumulator_low_limbs_range_constraint_1_shift); - auto accumulator_low_limbs_range_constraint_2_shift = View(in.accumulator_low_limbs_range_constraint_2_shift); - auto accumulator_low_limbs_range_constraint_3_shift = View(in.accumulator_low_limbs_range_constraint_3_shift); - auto accumulator_low_limbs_range_constraint_4_shift = View(in.accumulator_low_limbs_range_constraint_4_shift); - auto accumulator_high_limbs_range_constraint_0 = View(in.accumulator_high_limbs_range_constraint_0); - auto accumulator_high_limbs_range_constraint_1 = View(in.accumulator_high_limbs_range_constraint_1); - auto accumulator_high_limbs_range_constraint_2 = View(in.accumulator_high_limbs_range_constraint_2); - auto accumulator_high_limbs_range_constraint_3 = View(in.accumulator_high_limbs_range_constraint_3); - auto accumulator_high_limbs_range_constraint_4 = View(in.accumulator_high_limbs_range_constraint_4); - auto accumulator_high_limbs_range_constraint_0_shift = View(in.accumulator_high_limbs_range_constraint_0_shift); - auto accumulator_high_limbs_range_constraint_1_shift = View(in.accumulator_high_limbs_range_constraint_1_shift); - auto accumulator_high_limbs_range_constraint_2_shift = View(in.accumulator_high_limbs_range_constraint_2_shift); - auto accumulator_high_limbs_range_constraint_3_shift = View(in.accumulator_high_limbs_range_constraint_3_shift); - auto quotient_low_binary_limbs = View(in.quotient_low_binary_limbs); - auto quotient_low_limbs_range_constraint_0 = View(in.quotient_low_limbs_range_constraint_0); - auto quotient_low_limbs_range_constraint_1 = View(in.quotient_low_limbs_range_constraint_1); - auto quotient_low_limbs_range_constraint_2 = View(in.quotient_low_limbs_range_constraint_2); - auto quotient_low_limbs_range_constraint_3 = View(in.quotient_low_limbs_range_constraint_3); - auto quotient_low_limbs_range_constraint_4 = View(in.quotient_low_limbs_range_constraint_4); - auto quotient_low_binary_limbs_shift = View(in.quotient_low_binary_limbs_shift); - auto quotient_low_limbs_range_constraint_0_shift = View(in.quotient_low_limbs_range_constraint_0_shift); - auto quotient_low_limbs_range_constraint_1_shift = View(in.quotient_low_limbs_range_constraint_1_shift); - auto quotient_low_limbs_range_constraint_2_shift = View(in.quotient_low_limbs_range_constraint_2_shift); - auto quotient_low_limbs_range_constraint_3_shift = View(in.quotient_low_limbs_range_constraint_3_shift); - auto quotient_low_limbs_range_constraint_4_shift = View(in.quotient_low_limbs_range_constraint_4_shift); - auto quotient_high_binary_limbs = View(in.quotient_high_binary_limbs); - auto quotient_high_limbs_range_constraint_0 = View(in.quotient_high_limbs_range_constraint_0); - auto quotient_high_limbs_range_constraint_1 = View(in.quotient_high_limbs_range_constraint_1); - auto quotient_high_limbs_range_constraint_2 = View(in.quotient_high_limbs_range_constraint_2); - auto quotient_high_limbs_range_constraint_3 = View(in.quotient_high_limbs_range_constraint_3); - auto quotient_high_limbs_range_constraint_4 = View(in.quotient_high_limbs_range_constraint_4); - auto quotient_high_binary_limbs_shift = View(in.quotient_high_binary_limbs_shift); - auto quotient_high_limbs_range_constraint_0_shift = View(in.quotient_high_limbs_range_constraint_0_shift); - auto quotient_high_limbs_range_constraint_1_shift = View(in.quotient_high_limbs_range_constraint_1_shift); - auto quotient_high_limbs_range_constraint_2_shift = View(in.quotient_high_limbs_range_constraint_2_shift); - auto quotient_high_limbs_range_constraint_3_shift = View(in.quotient_high_limbs_range_constraint_3_shift); - auto relation_wide_limbs = View(in.relation_wide_limbs); - auto relation_wide_limbs_range_constraint_0 = View(in.relation_wide_limbs_range_constraint_0); - auto relation_wide_limbs_range_constraint_1 = View(in.relation_wide_limbs_range_constraint_1); - auto relation_wide_limbs_range_constraint_2 = View(in.relation_wide_limbs_range_constraint_2); - auto relation_wide_limbs_range_constraint_3 = View(in.relation_wide_limbs_range_constraint_3); - auto p_x_high_limbs_range_constraint_tail_shift = View(in.p_x_high_limbs_range_constraint_tail_shift); - auto accumulator_high_limbs_range_constraint_tail_shift = - View(in.accumulator_high_limbs_range_constraint_tail_shift); - auto relation_wide_limbs_shift = View(in.relation_wide_limbs_shift); - auto relation_wide_limbs_range_constraint_0_shift = View(in.relation_wide_limbs_range_constraint_0_shift); - auto relation_wide_limbs_range_constraint_1_shift = View(in.relation_wide_limbs_range_constraint_1_shift); - auto relation_wide_limbs_range_constraint_2_shift = View(in.relation_wide_limbs_range_constraint_2_shift); - auto relation_wide_limbs_range_constraint_3_shift = View(in.relation_wide_limbs_range_constraint_3_shift); - auto p_y_high_limbs_range_constraint_tail_shift = View(in.p_y_high_limbs_range_constraint_tail_shift); - auto quotient_high_limbs_range_constraint_tail_shift = View(in.quotient_high_limbs_range_constraint_tail_shift); - auto p_x_low_limbs_range_constraint_tail = View(in.p_x_low_limbs_range_constraint_tail); - auto p_x_low_limbs_range_constraint_tail_shift = View(in.p_x_low_limbs_range_constraint_tail_shift); - auto p_x_high_limbs_range_constraint_tail = View(in.p_x_high_limbs_range_constraint_tail); - auto p_x_high_limbs_range_constraint_4_shift = View(in.p_x_high_limbs_range_constraint_4_shift); - auto p_y_low_limbs_range_constraint_tail = View(in.p_y_low_limbs_range_constraint_tail); - auto p_y_low_limbs_range_constraint_tail_shift = View(in.p_y_low_limbs_range_constraint_tail_shift); - auto p_y_high_limbs_range_constraint_tail = View(in.p_y_high_limbs_range_constraint_tail); - auto p_y_high_limbs_range_constraint_4_shift = View(in.p_y_high_limbs_range_constraint_4_shift); - auto z_low_limbs_range_constraint_tail = View(in.z_low_limbs_range_constraint_tail); - auto z_low_limbs_range_constraint_tail_shift = View(in.z_low_limbs_range_constraint_tail_shift); - auto z_high_limbs_range_constraint_tail = View(in.z_high_limbs_range_constraint_tail); - auto z_high_limbs_range_constraint_tail_shift = View(in.z_high_limbs_range_constraint_tail_shift); - auto accumulator_low_limbs_range_constraint_tail = View(in.accumulator_low_limbs_range_constraint_tail); - auto accumulator_low_limbs_range_constraint_tail_shift = View(in.accumulator_low_limbs_range_constraint_tail_shift); - auto accumulator_high_limbs_range_constraint_tail = View(in.accumulator_high_limbs_range_constraint_tail); - auto accumulator_high_limbs_range_constraint_4_shift = View(in.accumulator_high_limbs_range_constraint_4_shift); - auto quotient_low_limbs_range_constraint_tail = View(in.quotient_low_limbs_range_constraint_tail); - auto quotient_low_limbs_range_constraint_tail_shift = View(in.quotient_low_limbs_range_constraint_tail_shift); - auto quotient_high_limbs_range_constraint_tail = View(in.quotient_high_limbs_range_constraint_tail); - auto quotient_high_limbs_range_constraint_4_shift = View(in.quotient_high_limbs_range_constraint_4_shift); - auto x_lo_y_hi = View(in.x_lo_y_hi); - auto x_hi_z_1 = View(in.x_hi_z_1); - auto y_lo_z_2 = View(in.y_lo_z_2); - auto x_lo_y_hi_shift = View(in.x_lo_y_hi_shift); - auto x_hi_z_1_shift = View(in.x_hi_z_1_shift); - auto y_lo_z_2_shift = View(in.y_lo_z_2_shift); - auto lagrange_even_in_minicircuit = View(in.lagrange_even_in_minicircuit); - - // Contributions that decompose 50, 52, 68 or 84 bit limbs used for computation into range-constrained chunks - // Contribution 1 , P_x lowest limb decomposition - auto tmp_1 = ((p_x_low_limbs_range_constraint_0 + p_x_low_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + - p_x_low_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + - p_x_low_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + - p_x_low_limbs_range_constraint_4 * MICRO_LIMB_SHIFTx4) - - p_x_low_limbs); - tmp_1 *= lagrange_even_in_minicircuit; - tmp_1 *= scaling_factor; - std::get<0>(accumulators) += tmp_1; - - // Contribution 2 , P_x second lowest limb decomposition - auto tmp_2 = ((p_x_low_limbs_range_constraint_0_shift + p_x_low_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + - p_x_low_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + - p_x_low_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3 + - p_x_low_limbs_range_constraint_4_shift * MICRO_LIMB_SHIFTx4) - - p_x_low_limbs_shift); - tmp_2 *= lagrange_even_in_minicircuit; - tmp_2 *= scaling_factor; - std::get<1>(accumulators) += tmp_2; - - // Contribution 3 , P_x third limb decomposition - auto tmp_3 = ((p_x_high_limbs_range_constraint_0 + p_x_high_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + - p_x_high_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + - p_x_high_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + - p_x_high_limbs_range_constraint_4 * MICRO_LIMB_SHIFTx4) - - p_x_high_limbs); - tmp_3 *= lagrange_even_in_minicircuit; - tmp_3 *= scaling_factor; - std::get<2>(accumulators) += tmp_3; - - // Contribution 4 , P_x highest limb decomposition - auto tmp_4 = - ((p_x_high_limbs_range_constraint_0_shift + p_x_high_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + - p_x_high_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + - p_x_high_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3) - - p_x_high_limbs_shift); - tmp_4 *= lagrange_even_in_minicircuit; - tmp_4 *= scaling_factor; - std::get<3>(accumulators) += tmp_4; - - // Contribution 5 , P_y lowest limb decomposition - auto tmp_5 = ((p_y_low_limbs_range_constraint_0 + p_y_low_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + - p_y_low_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + - p_y_low_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + - p_y_low_limbs_range_constraint_4 * MICRO_LIMB_SHIFTx4) - - p_y_low_limbs); - tmp_5 *= lagrange_even_in_minicircuit; - tmp_5 *= scaling_factor; - std::get<4>(accumulators) += tmp_5; - - // Contribution 6 , P_y second lowest limb decomposition - auto tmp_6 = ((p_y_low_limbs_range_constraint_0_shift + p_y_low_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + - p_y_low_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + - p_y_low_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3 + - p_y_low_limbs_range_constraint_4_shift * MICRO_LIMB_SHIFTx4) - - p_y_low_limbs_shift); - tmp_6 *= lagrange_even_in_minicircuit; - tmp_6 *= scaling_factor; - std::get<5>(accumulators) += tmp_6; - - // Contribution 7 , P_y third limb decomposition - auto tmp_7 = ((p_y_high_limbs_range_constraint_0 + p_y_high_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + - p_y_high_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + - p_y_high_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + - p_y_high_limbs_range_constraint_4 * MICRO_LIMB_SHIFTx4) - - p_y_high_limbs); - tmp_7 *= lagrange_even_in_minicircuit; - tmp_7 *= scaling_factor; - std::get<6>(accumulators) += tmp_7; - - // Contribution 8 , P_y highest limb decomposition - auto tmp_8 = - ((p_y_high_limbs_range_constraint_0_shift + p_y_high_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + - p_y_high_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + - p_y_high_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3) - - p_y_high_limbs_shift); - tmp_8 *= lagrange_even_in_minicircuit; - tmp_8 *= scaling_factor; - std::get<7>(accumulators) += tmp_8; - - // Contribution 9 , z_1 low limb decomposition - auto tmp_9 = - ((z_low_limbs_range_constraint_0 + z_low_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + - z_low_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + z_low_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + - z_low_limbs_range_constraint_4 * MICRO_LIMB_SHIFTx4) - - z_low_limbs); - tmp_9 *= lagrange_even_in_minicircuit; - tmp_9 *= scaling_factor; - std::get<8>(accumulators) += tmp_9; - - // Contribution 10 , z_2 low limb decomposition - auto tmp_10 = ((z_low_limbs_range_constraint_0_shift + z_low_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + - z_low_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + - z_low_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3 + - z_low_limbs_range_constraint_4_shift * MICRO_LIMB_SHIFTx4) - - z_low_limbs_shift); - tmp_10 *= lagrange_even_in_minicircuit; - tmp_10 *= scaling_factor; - std::get<9>(accumulators) += tmp_10; - - // Contribution 11 , z_1 high limb decomposition - auto tmp_11 = - ((z_high_limbs_range_constraint_0 + z_high_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + - z_high_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + z_high_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + - z_high_limbs_range_constraint_4 * MICRO_LIMB_SHIFTx4) - - z_high_limbs); - tmp_11 *= lagrange_even_in_minicircuit; - tmp_11 *= scaling_factor; - std::get<10>(accumulators) += tmp_11; - - // Contribution 12 , z_2 high limb decomposition - auto tmp_12 = ((z_high_limbs_range_constraint_0_shift + z_high_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + - z_high_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + - z_high_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3 + - z_high_limbs_range_constraint_4_shift * MICRO_LIMB_SHIFTx4) - - z_high_limbs_shift); - tmp_12 *= lagrange_even_in_minicircuit; - tmp_12 *= scaling_factor; - std::get<11>(accumulators) += tmp_12; - - // Contribution 13 , accumulator lowest limb decomposition - auto tmp_13 = - ((accumulator_low_limbs_range_constraint_0 + accumulator_low_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + - accumulator_low_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + - accumulator_low_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + - accumulator_low_limbs_range_constraint_4 * MICRO_LIMB_SHIFTx4) - - accumulators_binary_limbs_0); - tmp_13 *= lagrange_even_in_minicircuit; - tmp_13 *= scaling_factor; - std::get<12>(accumulators) += tmp_13; - // Contribution 14 , accumulator second limb decomposition - auto tmp_14 = ((accumulator_low_limbs_range_constraint_0_shift + - accumulator_low_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + - accumulator_low_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + - accumulator_low_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3 + - accumulator_low_limbs_range_constraint_4_shift * MICRO_LIMB_SHIFTx4) - - accumulators_binary_limbs_1); - tmp_14 *= lagrange_even_in_minicircuit; - tmp_14 *= scaling_factor; - std::get<13>(accumulators) += tmp_14; - - // Contribution 15 , accumulator second highest limb decomposition - auto tmp_15 = - ((accumulator_high_limbs_range_constraint_0 + accumulator_high_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + - accumulator_high_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + - accumulator_high_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + - accumulator_high_limbs_range_constraint_4 * MICRO_LIMB_SHIFTx4) - - accumulators_binary_limbs_2); - tmp_15 *= lagrange_even_in_minicircuit; - tmp_15 *= scaling_factor; - std::get<14>(accumulators) += tmp_15; - // Contribution 16 , accumulator highest limb decomposition - auto tmp_16 = ((accumulator_high_limbs_range_constraint_0_shift + - accumulator_high_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + - accumulator_high_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + - accumulator_high_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3) - - accumulators_binary_limbs_3); - tmp_16 *= lagrange_even_in_minicircuit; - tmp_16 *= scaling_factor; - std::get<15>(accumulators) += tmp_16; - - // Contribution 15 , quotient lowest limb decomposition - auto tmp_17 = ((quotient_low_limbs_range_constraint_0 + quotient_low_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + - quotient_low_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + - quotient_low_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + - quotient_low_limbs_range_constraint_4 * MICRO_LIMB_SHIFTx4) - - quotient_low_binary_limbs); - tmp_17 *= lagrange_even_in_minicircuit; - tmp_17 *= scaling_factor; - std::get<16>(accumulators) += tmp_17; - // Contribution 16 , quotient second lowest limb decomposition - auto tmp_18 = - ((quotient_low_limbs_range_constraint_0_shift + quotient_low_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + - quotient_low_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + - quotient_low_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3 + - quotient_low_limbs_range_constraint_4_shift * MICRO_LIMB_SHIFTx4) - - quotient_low_binary_limbs_shift); - tmp_18 *= lagrange_even_in_minicircuit; - tmp_18 *= scaling_factor; - std::get<17>(accumulators) += tmp_18; - - // Contribution 19 , quotient second highest limb decomposition - auto tmp_19 = ((quotient_high_limbs_range_constraint_0 + quotient_high_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + - quotient_high_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + - quotient_high_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + - quotient_high_limbs_range_constraint_4 * MICRO_LIMB_SHIFTx4) - - quotient_high_binary_limbs); - tmp_19 *= lagrange_even_in_minicircuit; - tmp_19 *= scaling_factor; - std::get<18>(accumulators) += tmp_19; - // Contribution 20 , quotient highest limb decomposition - auto tmp_20 = ((quotient_high_limbs_range_constraint_0_shift + - quotient_high_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + - quotient_high_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + - quotient_high_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3) - - quotient_high_binary_limbs_shift); - tmp_20 *= lagrange_even_in_minicircuit; - tmp_20 *= scaling_factor; - std::get<19>(accumulators) += tmp_20; - - // Contribution 21 , decomposition of the low wide relation limb used for the bigfield relation. - // N.B. top microlimbs of relation wide limbs are stored in microlimbs for range constraints of P_x, P_y, - // accumulator and quotient. This is to save space and because these microlimbs are not used by their namesakes, - // since top limbs in 254/6-bit values use one less microlimb for the top 50/52-bit limb - auto tmp_21 = ((relation_wide_limbs_range_constraint_0 + relation_wide_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + - relation_wide_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + - relation_wide_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + - p_x_high_limbs_range_constraint_tail_shift * MICRO_LIMB_SHIFTx4 + - accumulator_high_limbs_range_constraint_tail_shift * MICRO_LIMB_SHIFTx5) - - relation_wide_limbs); - tmp_21 *= lagrange_even_in_minicircuit; - tmp_21 *= scaling_factor; - std::get<20>(accumulators) += tmp_21; - - // Contribution 22 , decomposition of high relation limb - auto tmp_22 = ((relation_wide_limbs_range_constraint_0_shift + - relation_wide_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + - relation_wide_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + - relation_wide_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3 + - p_y_high_limbs_range_constraint_tail_shift * MICRO_LIMB_SHIFTx4 + - quotient_high_limbs_range_constraint_tail_shift * MICRO_LIMB_SHIFTx5) - - relation_wide_limbs_shift); - tmp_22 *= lagrange_even_in_minicircuit; - tmp_22 *= scaling_factor; - std::get<21>(accumulators) += tmp_22; - - // Contributions enfocing a reduced range constraint on high limbs (these relation force the last microlimb in - // each limb to be more severely range constrained) - - // Contribution 23, range constrain the highest microlimb of lowest P.x limb to be 12 bits (68 % 14 = 12) - auto tmp_23 = p_x_low_limbs_range_constraint_4 * SHIFT_12_TO_14 - p_x_low_limbs_range_constraint_tail; - tmp_23 *= lagrange_even_in_minicircuit; - tmp_23 *= scaling_factor; - std::get<22>(accumulators) += tmp_23; - - // Contribution 24, range constrain the highest microlimb of second lowest P.x limb to be 12 bits - auto tmp_24 = p_x_low_limbs_range_constraint_4_shift * SHIFT_12_TO_14 - p_x_low_limbs_range_constraint_tail_shift; - tmp_24 *= lagrange_even_in_minicircuit; - tmp_24 *= scaling_factor; - std::get<23>(accumulators) += tmp_24; - - // Contribution 25, range constrain the highest microlimb of second highest P.x limb to be 12 bits - auto tmp_25 = p_x_high_limbs_range_constraint_4 * SHIFT_12_TO_14 - p_x_high_limbs_range_constraint_tail; - tmp_25 *= lagrange_even_in_minicircuit; - tmp_25 *= scaling_factor; - std::get<24>(accumulators) += tmp_25; - - // Contribution 26, range constrain the highest microilmb of highest P.x limb to be 8 bits (50 % 14 = 8) - auto tmp_26 = (p_x_high_limbs_range_constraint_3_shift * SHIFT_8_TO_14 - p_x_high_limbs_range_constraint_4_shift); - - tmp_26 *= lagrange_even_in_minicircuit; - tmp_26 *= scaling_factor; - std::get<25>(accumulators) += tmp_26; - - // Contribution 27, range constrain the highest microlimb of lowest P.y limb to be 12 bits (68 % 14 = 12) - auto tmp_27 = p_y_low_limbs_range_constraint_4 * SHIFT_12_TO_14 - p_y_low_limbs_range_constraint_tail; - tmp_27 *= lagrange_even_in_minicircuit; - tmp_27 *= scaling_factor; - std::get<26>(accumulators) += tmp_27; - - // Contribution 28, range constrain the highest microlimb of second lowest P.y limb to be 12 bits (68 % 14 = 12) - auto tmp_28 = p_y_low_limbs_range_constraint_4_shift * SHIFT_12_TO_14 - p_y_low_limbs_range_constraint_tail_shift; - tmp_28 *= lagrange_even_in_minicircuit; - tmp_28 *= scaling_factor; - std::get<27>(accumulators) += tmp_28; - - // Contribution 29, range constrain the highest microlimb of second highest P.y limb to be 12 bits (68 % 14 = - // 12) - auto tmp_29 = p_y_high_limbs_range_constraint_4 * SHIFT_12_TO_14 - p_y_high_limbs_range_constraint_tail; - tmp_29 *= lagrange_even_in_minicircuit; - tmp_29 *= scaling_factor; - std::get<28>(accumulators) += tmp_29; - - // Contribution 30, range constrain the highest microlimb of highest P.y limb to be 8 bits (50 % 14 = 8) - auto tmp_30 = (p_y_high_limbs_range_constraint_3_shift * SHIFT_8_TO_14 - p_y_high_limbs_range_constraint_4_shift); - - tmp_30 *= lagrange_even_in_minicircuit; - tmp_30 *= scaling_factor; - std::get<29>(accumulators) += tmp_30; - - // Contribution 31, range constrain the highest microlimb of low z1 limb to be 12 bits (68 % 14 = 12) - auto tmp_31 = (z_low_limbs_range_constraint_4 * SHIFT_12_TO_14 - z_low_limbs_range_constraint_tail); - tmp_31 *= lagrange_even_in_minicircuit; - tmp_31 *= scaling_factor; - std::get<30>(accumulators) += tmp_31; - - // Contribution 32, range constrain the highest microlimb of low z2 limb to be 12 bits (68 % 14 = 12) - auto tmp_32 = (z_low_limbs_range_constraint_4_shift * SHIFT_12_TO_14 - z_low_limbs_range_constraint_tail_shift); - tmp_32 *= lagrange_even_in_minicircuit; - tmp_32 *= scaling_factor; - std::get<31>(accumulators) += tmp_32; - - // Contribution 33, range constrain the highest microlimb of high z1 limb to be 4 bits (60 % 14 = 12) - auto tmp_33 = (z_high_limbs_range_constraint_4 * SHIFT_4_TO_14 - z_high_limbs_range_constraint_tail); - tmp_33 *= lagrange_even_in_minicircuit; - tmp_33 *= scaling_factor; - std::get<32>(accumulators) += tmp_33; - - // Contribution 34, range constrain the highest microlimb of high z2 limb to be 4 bits (60 % 14 = 12) - auto tmp_34 = (z_high_limbs_range_constraint_4_shift * SHIFT_4_TO_14 - z_high_limbs_range_constraint_tail_shift); - tmp_34 *= lagrange_even_in_minicircuit; - tmp_34 *= scaling_factor; - std::get<33>(accumulators) += tmp_34; - - // Contribution 35, range constrain the highest microlimb of lowest current accumulator limb to be 12 bits (68 % - // 14 = 12) - auto tmp_35 = - (accumulator_low_limbs_range_constraint_4 * SHIFT_12_TO_14 - accumulator_low_limbs_range_constraint_tail); - tmp_35 *= lagrange_even_in_minicircuit; - tmp_35 *= scaling_factor; - std::get<34>(accumulators) += tmp_35; - - // Contribution 36, range constrain the highest microlimb of second lowest current accumulator limb to be 12 - // bits (68 % 14 = 12) - auto tmp_36 = (accumulator_low_limbs_range_constraint_4_shift * SHIFT_12_TO_14 - - accumulator_low_limbs_range_constraint_tail_shift); - tmp_36 *= lagrange_even_in_minicircuit; - tmp_36 *= scaling_factor; - std::get<35>(accumulators) += tmp_36; - - // Contribution 37, range constrain the highest microlimb of second highest current accumulator limb to be 12 - // bits (68 % 14 = 12) - auto tmp_37 = - (accumulator_high_limbs_range_constraint_4 * SHIFT_12_TO_14 - accumulator_high_limbs_range_constraint_tail); - tmp_37 *= lagrange_even_in_minicircuit; - tmp_37 *= scaling_factor; - std::get<36>(accumulators) += tmp_37; - - // Contribution 38, range constrain the highest microlimb of highest current accumulator limb to be 8 bits (50 % - // 14 = 12) - auto tmp_38 = (accumulator_high_limbs_range_constraint_3_shift * SHIFT_8_TO_14 - - accumulator_high_limbs_range_constraint_4_shift); - tmp_38 *= lagrange_even_in_minicircuit; - tmp_38 *= scaling_factor; - std::get<37>(accumulators) += tmp_38; - - // Contribution 39, range constrain the highest microlimb of lowest quotient limb to be 12 bits (68 % 14 = 12) - auto tmp_39 = (quotient_low_limbs_range_constraint_4 * SHIFT_12_TO_14 - quotient_low_limbs_range_constraint_tail); - tmp_39 *= lagrange_even_in_minicircuit; - tmp_39 *= scaling_factor; - std::get<38>(accumulators) += tmp_39; - - // Contribution 40, range constrain the highest microlimb of second lowest quotient limb to be 12 bits (68 % 14 - // = 12) - auto tmp_40 = - (quotient_low_limbs_range_constraint_4_shift * SHIFT_12_TO_14 - quotient_low_limbs_range_constraint_tail_shift); - tmp_40 *= lagrange_even_in_minicircuit; - tmp_40 *= scaling_factor; - std::get<39>(accumulators) += tmp_40; - - // Contribution 41, range constrain the highest microlimb of second highest quotient limb to be 12 bits (68 % 14 - // = 12) - auto tmp_41 = (quotient_high_limbs_range_constraint_4 * SHIFT_12_TO_14 - quotient_high_limbs_range_constraint_tail); - tmp_41 *= lagrange_even_in_minicircuit; - tmp_41 *= scaling_factor; - std::get<40>(accumulators) += tmp_41; - - // Contribution 42, range constrain the highest microlimb of highest quotient limb to be 10 bits (52 % 14 = 12) - auto tmp_42 = - (quotient_high_limbs_range_constraint_3_shift * SHIFT_10_TO_14 - quotient_high_limbs_range_constraint_4_shift); - tmp_42 *= lagrange_even_in_minicircuit; - tmp_42 *= scaling_factor; - std::get<41>(accumulators) += tmp_42; - - // Contributions where we decompose initial EccOpQueue values into 68-bit limbs - - // Contribution 43, decompose x_lo - auto tmp_43 = (p_x_low_limbs + p_x_low_limbs_shift * LIMB_SHIFT) - x_lo_y_hi; - tmp_43 *= lagrange_even_in_minicircuit; - tmp_43 *= scaling_factor; - std::get<42>(accumulators) += tmp_43; - - // Contribution 44, decompose x_hi - auto tmp_44 = (p_x_high_limbs + p_x_high_limbs_shift * LIMB_SHIFT) - x_hi_z_1; - tmp_44 *= lagrange_even_in_minicircuit; - tmp_44 *= scaling_factor; - std::get<43>(accumulators) += tmp_44; - // Contribution 45, decompose y_lo - auto tmp_45 = (p_y_low_limbs + p_y_low_limbs_shift * LIMB_SHIFT) - y_lo_z_2; - tmp_45 *= lagrange_even_in_minicircuit; - tmp_45 *= scaling_factor; - std::get<44>(accumulators) += tmp_45; - - // Contribution 46, decompose y_hi - auto tmp_46 = (p_y_high_limbs + p_y_high_limbs_shift * LIMB_SHIFT) - x_lo_y_hi_shift; - tmp_46 *= lagrange_even_in_minicircuit; - tmp_46 *= scaling_factor; - std::get<45>(accumulators) += tmp_46; - - // Contribution 47, decompose z1 - auto tmp_47 = (z_low_limbs + z_high_limbs * LIMB_SHIFT) - x_hi_z_1_shift; - tmp_47 *= lagrange_even_in_minicircuit; - tmp_47 *= scaling_factor; - std::get<46>(accumulators) += tmp_47; - - // Contribution 48, decompose z2 - auto tmp_48 = (z_low_limbs_shift + z_high_limbs_shift * LIMB_SHIFT) - y_lo_z_2_shift; - tmp_48 *= lagrange_even_in_minicircuit; - tmp_48 *= scaling_factor; - std::get<47>(accumulators) += tmp_48; + [&]() { + // Within the no-op range i.e. when the op polynomial is 0 at even index the 2 Translator trace rows are empty + // except for the accumulator binary limbs which get transferred across the no-op range + using Accumulator = std::tuple_element_t<0, ContainerOverSubrelations>; + using View = typename Accumulator::View; + + // Values to multiply an element by to perform an appropriate shift + static auto MICRO_LIMB_SHIFT = FF(uint256_t(1) << NUM_MICRO_LIMB_BITS); + static auto MICRO_LIMB_SHIFTx2 = MICRO_LIMB_SHIFT * MICRO_LIMB_SHIFT; + static auto MICRO_LIMB_SHIFTx3 = MICRO_LIMB_SHIFTx2 * MICRO_LIMB_SHIFT; + static auto MICRO_LIMB_SHIFTx4 = MICRO_LIMB_SHIFTx3 * MICRO_LIMB_SHIFT; + + auto accumulators_binary_limbs_0 = View(in.accumulators_binary_limbs_0); + auto accumulators_binary_limbs_1 = View(in.accumulators_binary_limbs_1); + auto accumulators_binary_limbs_2 = View(in.accumulators_binary_limbs_2); + auto accumulators_binary_limbs_3 = View(in.accumulators_binary_limbs_3); + auto accumulator_low_limbs_range_constraint_0 = View(in.accumulator_low_limbs_range_constraint_0); + auto accumulator_low_limbs_range_constraint_1 = View(in.accumulator_low_limbs_range_constraint_1); + auto accumulator_low_limbs_range_constraint_2 = View(in.accumulator_low_limbs_range_constraint_2); + auto accumulator_low_limbs_range_constraint_3 = View(in.accumulator_low_limbs_range_constraint_3); + auto accumulator_low_limbs_range_constraint_4 = View(in.accumulator_low_limbs_range_constraint_4); + auto accumulator_low_limbs_range_constraint_0_shift = View(in.accumulator_low_limbs_range_constraint_0_shift); + auto accumulator_low_limbs_range_constraint_1_shift = View(in.accumulator_low_limbs_range_constraint_1_shift); + auto accumulator_low_limbs_range_constraint_2_shift = View(in.accumulator_low_limbs_range_constraint_2_shift); + auto accumulator_low_limbs_range_constraint_3_shift = View(in.accumulator_low_limbs_range_constraint_3_shift); + auto accumulator_low_limbs_range_constraint_4_shift = View(in.accumulator_low_limbs_range_constraint_4_shift); + auto accumulator_high_limbs_range_constraint_0 = View(in.accumulator_high_limbs_range_constraint_0); + auto accumulator_high_limbs_range_constraint_1 = View(in.accumulator_high_limbs_range_constraint_1); + auto accumulator_high_limbs_range_constraint_2 = View(in.accumulator_high_limbs_range_constraint_2); + auto accumulator_high_limbs_range_constraint_3 = View(in.accumulator_high_limbs_range_constraint_3); + auto accumulator_high_limbs_range_constraint_4 = View(in.accumulator_high_limbs_range_constraint_4); + auto accumulator_high_limbs_range_constraint_0_shift = View(in.accumulator_high_limbs_range_constraint_0_shift); + auto accumulator_high_limbs_range_constraint_1_shift = View(in.accumulator_high_limbs_range_constraint_1_shift); + auto accumulator_high_limbs_range_constraint_2_shift = View(in.accumulator_high_limbs_range_constraint_2_shift); + auto accumulator_high_limbs_range_constraint_3_shift = View(in.accumulator_high_limbs_range_constraint_3_shift); + auto op = View(in.op); + auto lagrange_even_in_minicircuit = View(in.lagrange_even_in_minicircuit); + auto not_even_or_no_op_scaled = lagrange_even_in_minicircuit * op * scaling_factor; + + // Contribution 1, accumulator lowest limb decomposition + auto tmp_1 = + ((accumulator_low_limbs_range_constraint_0 + accumulator_low_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + + accumulator_low_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + + accumulator_low_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + + accumulator_low_limbs_range_constraint_4 * MICRO_LIMB_SHIFTx4) - + accumulators_binary_limbs_0); + tmp_1 *= not_even_or_no_op_scaled; + std::get<0>(accumulators) += tmp_1; + + // Contribution 2, accumulator second limb decomposition + auto tmp_2 = ((accumulator_low_limbs_range_constraint_0_shift + + accumulator_low_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + + accumulator_low_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + + accumulator_low_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3 + + accumulator_low_limbs_range_constraint_4_shift * MICRO_LIMB_SHIFTx4) - + accumulators_binary_limbs_1); + tmp_2 *= not_even_or_no_op_scaled; + std::get<1>(accumulators) += tmp_2; + + // Contribution 3, accumulator second highest limb decomposition + auto tmp_3 = + ((accumulator_high_limbs_range_constraint_0 + accumulator_high_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + + accumulator_high_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + + accumulator_high_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + + accumulator_high_limbs_range_constraint_4 * MICRO_LIMB_SHIFTx4) - + accumulators_binary_limbs_2); + tmp_3 *= not_even_or_no_op_scaled; + std::get<2>(accumulators) += tmp_3; + + // Contribution 4, accumulator highest limb decomposition + auto tmp_4 = ((accumulator_high_limbs_range_constraint_0_shift + + accumulator_high_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + + accumulator_high_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + + accumulator_high_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3) - + accumulators_binary_limbs_3); + tmp_4 *= not_even_or_no_op_scaled; + std::get<3>(accumulators) += tmp_4; + }(); + + [&]() { + using Accumulator = std::tuple_element_t<4, ContainerOverSubrelations>; + using View = typename Accumulator::View; + + // Value to multiply an element by to perform an appropriate shift + static auto LIMB_SHIFT = FF(uint256_t(1) << NUM_LIMB_BITS); + + // Values to multiply an element by to perform an appropriate shift + static auto MICRO_LIMB_SHIFT = FF(uint256_t(1) << NUM_MICRO_LIMB_BITS); + static auto MICRO_LIMB_SHIFTx2 = MICRO_LIMB_SHIFT * MICRO_LIMB_SHIFT; + static auto MICRO_LIMB_SHIFTx3 = MICRO_LIMB_SHIFTx2 * MICRO_LIMB_SHIFT; + static auto MICRO_LIMB_SHIFTx4 = MICRO_LIMB_SHIFTx3 * MICRO_LIMB_SHIFT; + static auto MICRO_LIMB_SHIFTx5 = MICRO_LIMB_SHIFTx4 * MICRO_LIMB_SHIFT; + + auto accumulator_low_limbs_range_constraint_4 = View(in.accumulator_low_limbs_range_constraint_4); + auto accumulator_low_limbs_range_constraint_4_shift = View(in.accumulator_low_limbs_range_constraint_4_shift); + auto accumulator_high_limbs_range_constraint_4 = View(in.accumulator_high_limbs_range_constraint_4); + + // Shifts used to constrain ranges further + static auto SHIFT_12_TO_14 = + FF(4); // Shift used to range constrain the last microlimb of 68-bit limbs (standard limbs) + static auto SHIFT_10_TO_14 = + FF(16); // Shift used to range constrain the last microlimb of 52-bit limb (top quotient limb) + static auto SHIFT_8_TO_14 = FF(64); // Shift used to range constrain the last microlimb of 50-bit + // limbs (top limb of standard 254-bit value) + static auto SHIFT_4_TO_14 = + FF(1024); // Shift used to range constrain the last mircrolimb of 60-bit limbs from z scalars + + auto p_x_low_limbs = View(in.p_x_low_limbs); + auto p_x_low_limbs_range_constraint_0 = View(in.p_x_low_limbs_range_constraint_0); + auto p_x_low_limbs_range_constraint_1 = View(in.p_x_low_limbs_range_constraint_1); + auto p_x_low_limbs_range_constraint_2 = View(in.p_x_low_limbs_range_constraint_2); + auto p_x_low_limbs_range_constraint_3 = View(in.p_x_low_limbs_range_constraint_3); + auto p_x_low_limbs_range_constraint_4 = View(in.p_x_low_limbs_range_constraint_4); + auto p_x_low_limbs_shift = View(in.p_x_low_limbs_shift); + auto p_x_low_limbs_range_constraint_0_shift = View(in.p_x_low_limbs_range_constraint_0_shift); + auto p_x_low_limbs_range_constraint_1_shift = View(in.p_x_low_limbs_range_constraint_1_shift); + auto p_x_low_limbs_range_constraint_2_shift = View(in.p_x_low_limbs_range_constraint_2_shift); + auto p_x_low_limbs_range_constraint_3_shift = View(in.p_x_low_limbs_range_constraint_3_shift); + auto p_x_low_limbs_range_constraint_4_shift = View(in.p_x_low_limbs_range_constraint_4_shift); + auto p_x_high_limbs = View(in.p_x_high_limbs); + auto p_x_high_limbs_range_constraint_0 = View(in.p_x_high_limbs_range_constraint_0); + auto p_x_high_limbs_range_constraint_1 = View(in.p_x_high_limbs_range_constraint_1); + auto p_x_high_limbs_range_constraint_2 = View(in.p_x_high_limbs_range_constraint_2); + auto p_x_high_limbs_range_constraint_3 = View(in.p_x_high_limbs_range_constraint_3); + auto p_x_high_limbs_range_constraint_4 = View(in.p_x_high_limbs_range_constraint_4); + auto p_x_high_limbs_shift = View(in.p_x_high_limbs_shift); + auto p_x_high_limbs_range_constraint_0_shift = View(in.p_x_high_limbs_range_constraint_0_shift); + auto p_x_high_limbs_range_constraint_1_shift = View(in.p_x_high_limbs_range_constraint_1_shift); + auto p_x_high_limbs_range_constraint_2_shift = View(in.p_x_high_limbs_range_constraint_2_shift); + auto p_x_high_limbs_range_constraint_3_shift = View(in.p_x_high_limbs_range_constraint_3_shift); + auto p_y_low_limbs = View(in.p_y_low_limbs); + auto p_y_low_limbs_range_constraint_0 = View(in.p_y_low_limbs_range_constraint_0); + auto p_y_low_limbs_range_constraint_1 = View(in.p_y_low_limbs_range_constraint_1); + auto p_y_low_limbs_range_constraint_2 = View(in.p_y_low_limbs_range_constraint_2); + auto p_y_low_limbs_range_constraint_3 = View(in.p_y_low_limbs_range_constraint_3); + auto p_y_low_limbs_range_constraint_4 = View(in.p_y_low_limbs_range_constraint_4); + auto p_y_low_limbs_shift = View(in.p_y_low_limbs_shift); + auto p_y_low_limbs_range_constraint_0_shift = View(in.p_y_low_limbs_range_constraint_0_shift); + auto p_y_low_limbs_range_constraint_1_shift = View(in.p_y_low_limbs_range_constraint_1_shift); + auto p_y_low_limbs_range_constraint_2_shift = View(in.p_y_low_limbs_range_constraint_2_shift); + auto p_y_low_limbs_range_constraint_3_shift = View(in.p_y_low_limbs_range_constraint_3_shift); + auto p_y_low_limbs_range_constraint_4_shift = View(in.p_y_low_limbs_range_constraint_4_shift); + auto p_y_high_limbs = View(in.p_y_high_limbs); + auto p_y_high_limbs_range_constraint_0 = View(in.p_y_high_limbs_range_constraint_0); + auto p_y_high_limbs_range_constraint_1 = View(in.p_y_high_limbs_range_constraint_1); + auto p_y_high_limbs_range_constraint_2 = View(in.p_y_high_limbs_range_constraint_2); + auto p_y_high_limbs_range_constraint_3 = View(in.p_y_high_limbs_range_constraint_3); + auto p_y_high_limbs_range_constraint_4 = View(in.p_y_high_limbs_range_constraint_4); + auto p_y_high_limbs_shift = View(in.p_y_high_limbs_shift); + auto p_y_high_limbs_range_constraint_0_shift = View(in.p_y_high_limbs_range_constraint_0_shift); + auto p_y_high_limbs_range_constraint_1_shift = View(in.p_y_high_limbs_range_constraint_1_shift); + auto p_y_high_limbs_range_constraint_2_shift = View(in.p_y_high_limbs_range_constraint_2_shift); + auto p_y_high_limbs_range_constraint_3_shift = View(in.p_y_high_limbs_range_constraint_3_shift); + auto z_low_limbs = View(in.z_low_limbs); + auto z_low_limbs_range_constraint_0 = View(in.z_low_limbs_range_constraint_0); + auto z_low_limbs_range_constraint_1 = View(in.z_low_limbs_range_constraint_1); + auto z_low_limbs_range_constraint_2 = View(in.z_low_limbs_range_constraint_2); + auto z_low_limbs_range_constraint_3 = View(in.z_low_limbs_range_constraint_3); + auto z_low_limbs_range_constraint_4 = View(in.z_low_limbs_range_constraint_4); + auto z_low_limbs_shift = View(in.z_low_limbs_shift); + auto z_low_limbs_range_constraint_0_shift = View(in.z_low_limbs_range_constraint_0_shift); + auto z_low_limbs_range_constraint_1_shift = View(in.z_low_limbs_range_constraint_1_shift); + auto z_low_limbs_range_constraint_2_shift = View(in.z_low_limbs_range_constraint_2_shift); + auto z_low_limbs_range_constraint_3_shift = View(in.z_low_limbs_range_constraint_3_shift); + auto z_low_limbs_range_constraint_4_shift = View(in.z_low_limbs_range_constraint_4_shift); + auto z_high_limbs = View(in.z_high_limbs); + auto z_high_limbs_range_constraint_0 = View(in.z_high_limbs_range_constraint_0); + auto z_high_limbs_range_constraint_1 = View(in.z_high_limbs_range_constraint_1); + auto z_high_limbs_range_constraint_2 = View(in.z_high_limbs_range_constraint_2); + auto z_high_limbs_range_constraint_3 = View(in.z_high_limbs_range_constraint_3); + auto z_high_limbs_range_constraint_4 = View(in.z_high_limbs_range_constraint_4); + auto z_high_limbs_shift = View(in.z_high_limbs_shift); + auto z_high_limbs_range_constraint_0_shift = View(in.z_high_limbs_range_constraint_0_shift); + auto z_high_limbs_range_constraint_1_shift = View(in.z_high_limbs_range_constraint_1_shift); + auto z_high_limbs_range_constraint_2_shift = View(in.z_high_limbs_range_constraint_2_shift); + auto z_high_limbs_range_constraint_3_shift = View(in.z_high_limbs_range_constraint_3_shift); + auto z_high_limbs_range_constraint_4_shift = View(in.z_high_limbs_range_constraint_4_shift); + auto quotient_low_binary_limbs = View(in.quotient_low_binary_limbs); + auto quotient_low_limbs_range_constraint_0 = View(in.quotient_low_limbs_range_constraint_0); + auto quotient_low_limbs_range_constraint_1 = View(in.quotient_low_limbs_range_constraint_1); + auto quotient_low_limbs_range_constraint_2 = View(in.quotient_low_limbs_range_constraint_2); + auto quotient_low_limbs_range_constraint_3 = View(in.quotient_low_limbs_range_constraint_3); + auto quotient_low_limbs_range_constraint_4 = View(in.quotient_low_limbs_range_constraint_4); + auto quotient_low_binary_limbs_shift = View(in.quotient_low_binary_limbs_shift); + auto quotient_low_limbs_range_constraint_0_shift = View(in.quotient_low_limbs_range_constraint_0_shift); + auto quotient_low_limbs_range_constraint_1_shift = View(in.quotient_low_limbs_range_constraint_1_shift); + auto quotient_low_limbs_range_constraint_2_shift = View(in.quotient_low_limbs_range_constraint_2_shift); + auto quotient_low_limbs_range_constraint_3_shift = View(in.quotient_low_limbs_range_constraint_3_shift); + auto quotient_low_limbs_range_constraint_4_shift = View(in.quotient_low_limbs_range_constraint_4_shift); + auto quotient_high_binary_limbs = View(in.quotient_high_binary_limbs); + auto quotient_high_limbs_range_constraint_0 = View(in.quotient_high_limbs_range_constraint_0); + auto quotient_high_limbs_range_constraint_1 = View(in.quotient_high_limbs_range_constraint_1); + auto quotient_high_limbs_range_constraint_2 = View(in.quotient_high_limbs_range_constraint_2); + auto quotient_high_limbs_range_constraint_3 = View(in.quotient_high_limbs_range_constraint_3); + auto quotient_high_limbs_range_constraint_4 = View(in.quotient_high_limbs_range_constraint_4); + auto quotient_high_binary_limbs_shift = View(in.quotient_high_binary_limbs_shift); + auto quotient_high_limbs_range_constraint_0_shift = View(in.quotient_high_limbs_range_constraint_0_shift); + auto quotient_high_limbs_range_constraint_1_shift = View(in.quotient_high_limbs_range_constraint_1_shift); + auto quotient_high_limbs_range_constraint_2_shift = View(in.quotient_high_limbs_range_constraint_2_shift); + auto quotient_high_limbs_range_constraint_3_shift = View(in.quotient_high_limbs_range_constraint_3_shift); + auto relation_wide_limbs = View(in.relation_wide_limbs); + auto relation_wide_limbs_range_constraint_0 = View(in.relation_wide_limbs_range_constraint_0); + auto relation_wide_limbs_range_constraint_1 = View(in.relation_wide_limbs_range_constraint_1); + auto relation_wide_limbs_range_constraint_2 = View(in.relation_wide_limbs_range_constraint_2); + auto relation_wide_limbs_range_constraint_3 = View(in.relation_wide_limbs_range_constraint_3); + auto p_x_high_limbs_range_constraint_tail_shift = View(in.p_x_high_limbs_range_constraint_tail_shift); + auto accumulator_high_limbs_range_constraint_tail_shift = + View(in.accumulator_high_limbs_range_constraint_tail_shift); + auto relation_wide_limbs_shift = View(in.relation_wide_limbs_shift); + auto relation_wide_limbs_range_constraint_0_shift = View(in.relation_wide_limbs_range_constraint_0_shift); + auto relation_wide_limbs_range_constraint_1_shift = View(in.relation_wide_limbs_range_constraint_1_shift); + auto relation_wide_limbs_range_constraint_2_shift = View(in.relation_wide_limbs_range_constraint_2_shift); + auto relation_wide_limbs_range_constraint_3_shift = View(in.relation_wide_limbs_range_constraint_3_shift); + auto p_y_high_limbs_range_constraint_tail_shift = View(in.p_y_high_limbs_range_constraint_tail_shift); + auto quotient_high_limbs_range_constraint_tail_shift = View(in.quotient_high_limbs_range_constraint_tail_shift); + auto p_x_low_limbs_range_constraint_tail = View(in.p_x_low_limbs_range_constraint_tail); + auto p_x_low_limbs_range_constraint_tail_shift = View(in.p_x_low_limbs_range_constraint_tail_shift); + auto p_x_high_limbs_range_constraint_tail = View(in.p_x_high_limbs_range_constraint_tail); + auto p_x_high_limbs_range_constraint_4_shift = View(in.p_x_high_limbs_range_constraint_4_shift); + auto p_y_low_limbs_range_constraint_tail = View(in.p_y_low_limbs_range_constraint_tail); + auto p_y_low_limbs_range_constraint_tail_shift = View(in.p_y_low_limbs_range_constraint_tail_shift); + auto p_y_high_limbs_range_constraint_tail = View(in.p_y_high_limbs_range_constraint_tail); + auto p_y_high_limbs_range_constraint_4_shift = View(in.p_y_high_limbs_range_constraint_4_shift); + auto z_low_limbs_range_constraint_tail = View(in.z_low_limbs_range_constraint_tail); + auto z_low_limbs_range_constraint_tail_shift = View(in.z_low_limbs_range_constraint_tail_shift); + auto z_high_limbs_range_constraint_tail = View(in.z_high_limbs_range_constraint_tail); + auto z_high_limbs_range_constraint_tail_shift = View(in.z_high_limbs_range_constraint_tail_shift); + auto accumulator_low_limbs_range_constraint_tail = View(in.accumulator_low_limbs_range_constraint_tail); + auto accumulator_low_limbs_range_constraint_tail_shift = + View(in.accumulator_low_limbs_range_constraint_tail_shift); + auto accumulator_high_limbs_range_constraint_tail = View(in.accumulator_high_limbs_range_constraint_tail); + auto accumulator_high_limbs_range_constraint_3_shift = View(in.accumulator_high_limbs_range_constraint_3_shift); + auto accumulator_high_limbs_range_constraint_4_shift = View(in.accumulator_high_limbs_range_constraint_4_shift); + auto quotient_low_limbs_range_constraint_tail = View(in.quotient_low_limbs_range_constraint_tail); + auto quotient_low_limbs_range_constraint_tail_shift = View(in.quotient_low_limbs_range_constraint_tail_shift); + auto quotient_high_limbs_range_constraint_tail = View(in.quotient_high_limbs_range_constraint_tail); + auto quotient_high_limbs_range_constraint_4_shift = View(in.quotient_high_limbs_range_constraint_4_shift); + auto x_lo_y_hi = View(in.x_lo_y_hi); + auto x_hi_z_1 = View(in.x_hi_z_1); + auto y_lo_z_2 = View(in.y_lo_z_2); + auto x_lo_y_hi_shift = View(in.x_lo_y_hi_shift); + auto x_hi_z_1_shift = View(in.x_hi_z_1_shift); + auto y_lo_z_2_shift = View(in.y_lo_z_2_shift); + auto lagrange_even_in_minicircuit = View(in.lagrange_even_in_minicircuit); + + // Contribution 5 , P_y lowest limb decomposition + auto tmp_5 = ((p_y_low_limbs_range_constraint_0 + p_y_low_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + + p_y_low_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + + p_y_low_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + + p_y_low_limbs_range_constraint_4 * MICRO_LIMB_SHIFTx4) - + p_y_low_limbs); + tmp_5 *= lagrange_even_in_minicircuit; + tmp_5 *= scaling_factor; + std::get<4>(accumulators) += tmp_5; + + // Contribution 6 , P_y second lowest limb decomposition + auto tmp_6 = + ((p_y_low_limbs_range_constraint_0_shift + p_y_low_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + + p_y_low_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + + p_y_low_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3 + + p_y_low_limbs_range_constraint_4_shift * MICRO_LIMB_SHIFTx4) - + p_y_low_limbs_shift); + tmp_6 *= lagrange_even_in_minicircuit; + tmp_6 *= scaling_factor; + std::get<5>(accumulators) += tmp_6; + + // Contribution 7 , P_y third limb decomposition + auto tmp_7 = ((p_y_high_limbs_range_constraint_0 + p_y_high_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + + p_y_high_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + + p_y_high_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + + p_y_high_limbs_range_constraint_4 * MICRO_LIMB_SHIFTx4) - + p_y_high_limbs); + tmp_7 *= lagrange_even_in_minicircuit; + tmp_7 *= scaling_factor; + std::get<6>(accumulators) += tmp_7; + + // Contribution 8 , P_y highest limb decomposition + auto tmp_8 = + ((p_y_high_limbs_range_constraint_0_shift + p_y_high_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + + p_y_high_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + + p_y_high_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3) - + p_y_high_limbs_shift); + tmp_8 *= lagrange_even_in_minicircuit; + tmp_8 *= scaling_factor; + std::get<7>(accumulators) += tmp_8; + + // Contribution 9 , z_1 low limb decomposition + auto tmp_9 = ((z_low_limbs_range_constraint_0 + z_low_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + + z_low_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + + z_low_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + + z_low_limbs_range_constraint_4 * MICRO_LIMB_SHIFTx4) - + z_low_limbs); + tmp_9 *= lagrange_even_in_minicircuit; + tmp_9 *= scaling_factor; + std::get<8>(accumulators) += tmp_9; + + // Contribution 10 , z_2 low limb decomposition + auto tmp_10 = ((z_low_limbs_range_constraint_0_shift + z_low_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + + z_low_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + + z_low_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3 + + z_low_limbs_range_constraint_4_shift * MICRO_LIMB_SHIFTx4) - + z_low_limbs_shift); + tmp_10 *= lagrange_even_in_minicircuit; + tmp_10 *= scaling_factor; + std::get<9>(accumulators) += tmp_10; + + // Contribution 11 , z_1 high limb decomposition + auto tmp_11 = ((z_high_limbs_range_constraint_0 + z_high_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + + z_high_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + + z_high_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + + z_high_limbs_range_constraint_4 * MICRO_LIMB_SHIFTx4) - + z_high_limbs); + tmp_11 *= lagrange_even_in_minicircuit; + tmp_11 *= scaling_factor; + std::get<10>(accumulators) += tmp_11; + + // Contribution 12 , z_2 high limb decomposition + auto tmp_12 = + ((z_high_limbs_range_constraint_0_shift + z_high_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + + z_high_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + + z_high_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3 + + z_high_limbs_range_constraint_4_shift * MICRO_LIMB_SHIFTx4) - + z_high_limbs_shift); + tmp_12 *= lagrange_even_in_minicircuit; + tmp_12 *= scaling_factor; + std::get<11>(accumulators) += tmp_12; + + // Contributions that decompose 50, 52, 68 or 84 bit limbs used for computation into range-constrained chunks + // Contribution 13, P_x lowest limb decomposition + auto tmp_13 = ((p_x_low_limbs_range_constraint_0 + p_x_low_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + + p_x_low_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + + p_x_low_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + + p_x_low_limbs_range_constraint_4 * MICRO_LIMB_SHIFTx4) - + p_x_low_limbs); + tmp_13 *= lagrange_even_in_minicircuit; + tmp_13 *= scaling_factor; + std::get<12>(accumulators) += tmp_13; + + // Contribution 14 , P_x second lowest limb decomposition + auto tmp_14 = + ((p_x_low_limbs_range_constraint_0_shift + p_x_low_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + + p_x_low_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + + p_x_low_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3 + + p_x_low_limbs_range_constraint_4_shift * MICRO_LIMB_SHIFTx4) - + p_x_low_limbs_shift); + tmp_14 *= lagrange_even_in_minicircuit; + tmp_14 *= scaling_factor; + std::get<13>(accumulators) += tmp_14; + + // Contribution 15 , P_x third limb decomposition + auto tmp_15 = ((p_x_high_limbs_range_constraint_0 + p_x_high_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + + p_x_high_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + + p_x_high_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + + p_x_high_limbs_range_constraint_4 * MICRO_LIMB_SHIFTx4) - + p_x_high_limbs); + tmp_15 *= lagrange_even_in_minicircuit; + tmp_15 *= scaling_factor; + std::get<14>(accumulators) += tmp_15; + + // Contribution 16 , P_x highest limb decomposition + auto tmp_16 = + ((p_x_high_limbs_range_constraint_0_shift + p_x_high_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + + p_x_high_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + + p_x_high_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3) - + p_x_high_limbs_shift); + tmp_16 *= lagrange_even_in_minicircuit; + tmp_16 *= scaling_factor; + std::get<15>(accumulators) += tmp_16; + + // Contribution 17 , quotient lowest limb decomposition + auto tmp_17 = + ((quotient_low_limbs_range_constraint_0 + quotient_low_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + + quotient_low_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + + quotient_low_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + + quotient_low_limbs_range_constraint_4 * MICRO_LIMB_SHIFTx4) - + quotient_low_binary_limbs); + tmp_17 *= lagrange_even_in_minicircuit; + tmp_17 *= scaling_factor; + std::get<16>(accumulators) += tmp_17; + // Contribution 18 , quotient second lowest limb decomposition + auto tmp_18 = ((quotient_low_limbs_range_constraint_0_shift + + quotient_low_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + + quotient_low_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + + quotient_low_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3 + + quotient_low_limbs_range_constraint_4_shift * MICRO_LIMB_SHIFTx4) - + quotient_low_binary_limbs_shift); + tmp_18 *= lagrange_even_in_minicircuit; + tmp_18 *= scaling_factor; + std::get<17>(accumulators) += tmp_18; + + // Contribution 19 , quotient second highest limb decomposition + auto tmp_19 = + ((quotient_high_limbs_range_constraint_0 + quotient_high_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + + quotient_high_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + + quotient_high_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + + quotient_high_limbs_range_constraint_4 * MICRO_LIMB_SHIFTx4) - + quotient_high_binary_limbs); + tmp_19 *= lagrange_even_in_minicircuit; + tmp_19 *= scaling_factor; + std::get<18>(accumulators) += tmp_19; + // Contribution 20 , quotient highest limb decomposition + auto tmp_20 = ((quotient_high_limbs_range_constraint_0_shift + + quotient_high_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + + quotient_high_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + + quotient_high_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3) - + quotient_high_binary_limbs_shift); + tmp_20 *= lagrange_even_in_minicircuit; + tmp_20 *= scaling_factor; + std::get<19>(accumulators) += tmp_20; + + // Contribution 21 , decomposition of the low wide relation limb used for the bigfield relation. + // N.B. top microlimbs of relation wide limbs are stored in microlimbs for range constraints of P_x, P_y, + // accumulator and quotient. This is to save space and because these microlimbs are not used by their namesakes, + // since top limbs in 254/6-bit values use one less microlimb for the top 50/52-bit limb + auto tmp_21 = + ((relation_wide_limbs_range_constraint_0 + relation_wide_limbs_range_constraint_1 * MICRO_LIMB_SHIFT + + relation_wide_limbs_range_constraint_2 * MICRO_LIMB_SHIFTx2 + + relation_wide_limbs_range_constraint_3 * MICRO_LIMB_SHIFTx3 + + p_x_high_limbs_range_constraint_tail_shift * MICRO_LIMB_SHIFTx4 + + accumulator_high_limbs_range_constraint_tail_shift * MICRO_LIMB_SHIFTx5) - + relation_wide_limbs); + tmp_21 *= lagrange_even_in_minicircuit; + tmp_21 *= scaling_factor; + std::get<20>(accumulators) += tmp_21; + + // Contribution 22 , decomposition of high relation limb + auto tmp_22 = ((relation_wide_limbs_range_constraint_0_shift + + relation_wide_limbs_range_constraint_1_shift * MICRO_LIMB_SHIFT + + relation_wide_limbs_range_constraint_2_shift * MICRO_LIMB_SHIFTx2 + + relation_wide_limbs_range_constraint_3_shift * MICRO_LIMB_SHIFTx3 + + p_y_high_limbs_range_constraint_tail_shift * MICRO_LIMB_SHIFTx4 + + quotient_high_limbs_range_constraint_tail_shift * MICRO_LIMB_SHIFTx5) - + relation_wide_limbs_shift); + tmp_22 *= lagrange_even_in_minicircuit; + tmp_22 *= scaling_factor; + std::get<21>(accumulators) += tmp_22; + + // Contributions enfocing a reduced range constraint on high limbs (these relation force the last microlimb in + // each limb to be more severely range constrained) + + // Contribution 23, range constrain the highest microlimb of lowest P.x limb to be 12 bits (68 % 14 = 12) + auto tmp_23 = p_x_low_limbs_range_constraint_4 * SHIFT_12_TO_14 - p_x_low_limbs_range_constraint_tail; + tmp_23 *= lagrange_even_in_minicircuit; + tmp_23 *= scaling_factor; + std::get<22>(accumulators) += tmp_23; + + // Contribution 24, range constrain the highest microlimb of second lowest P.x limb to be 12 bits + auto tmp_24 = + p_x_low_limbs_range_constraint_4_shift * SHIFT_12_TO_14 - p_x_low_limbs_range_constraint_tail_shift; + tmp_24 *= lagrange_even_in_minicircuit; + tmp_24 *= scaling_factor; + std::get<23>(accumulators) += tmp_24; + + // Contribution 25, range constrain the highest microlimb of second highest P.x limb to be 12 bits + auto tmp_25 = p_x_high_limbs_range_constraint_4 * SHIFT_12_TO_14 - p_x_high_limbs_range_constraint_tail; + tmp_25 *= lagrange_even_in_minicircuit; + tmp_25 *= scaling_factor; + std::get<24>(accumulators) += tmp_25; + + // Contribution 26, range constrain the highest microilmb of highest P.x limb to be 8 bits (50 % 14 = 8) + auto tmp_26 = + (p_x_high_limbs_range_constraint_3_shift * SHIFT_8_TO_14 - p_x_high_limbs_range_constraint_4_shift); + + tmp_26 *= lagrange_even_in_minicircuit; + tmp_26 *= scaling_factor; + std::get<25>(accumulators) += tmp_26; + + // Contribution 27, range constrain the highest microlimb of lowest P.y limb to be 12 bits (68 % 14 = 12) + auto tmp_27 = p_y_low_limbs_range_constraint_4 * SHIFT_12_TO_14 - p_y_low_limbs_range_constraint_tail; + tmp_27 *= lagrange_even_in_minicircuit; + tmp_27 *= scaling_factor; + std::get<26>(accumulators) += tmp_27; + + // Contribution 28, range constrain the highest microlimb of second lowest P.y limb to be 12 bits (68 % 14 = 12) + auto tmp_28 = + p_y_low_limbs_range_constraint_4_shift * SHIFT_12_TO_14 - p_y_low_limbs_range_constraint_tail_shift; + tmp_28 *= lagrange_even_in_minicircuit; + tmp_28 *= scaling_factor; + std::get<27>(accumulators) += tmp_28; + + // Contribution 29, range constrain the highest microlimb of second highest P.y limb to be 12 bits (68 % 14 = + // 12) + auto tmp_29 = p_y_high_limbs_range_constraint_4 * SHIFT_12_TO_14 - p_y_high_limbs_range_constraint_tail; + tmp_29 *= lagrange_even_in_minicircuit; + tmp_29 *= scaling_factor; + std::get<28>(accumulators) += tmp_29; + + // Contribution 30, range constrain the highest microlimb of highest P.y limb to be 8 bits (50 % 14 = 8) + auto tmp_30 = + (p_y_high_limbs_range_constraint_3_shift * SHIFT_8_TO_14 - p_y_high_limbs_range_constraint_4_shift); + + tmp_30 *= lagrange_even_in_minicircuit; + tmp_30 *= scaling_factor; + std::get<29>(accumulators) += tmp_30; + + // Contribution 31, range constrain the highest microlimb of low z1 limb to be 12 bits (68 % 14 = 12) + auto tmp_31 = (z_low_limbs_range_constraint_4 * SHIFT_12_TO_14 - z_low_limbs_range_constraint_tail); + tmp_31 *= lagrange_even_in_minicircuit; + tmp_31 *= scaling_factor; + std::get<30>(accumulators) += tmp_31; + + // Contribution 32, range constrain the highest microlimb of low z2 limb to be 12 bits (68 % 14 = 12) + auto tmp_32 = (z_low_limbs_range_constraint_4_shift * SHIFT_12_TO_14 - z_low_limbs_range_constraint_tail_shift); + tmp_32 *= lagrange_even_in_minicircuit; + tmp_32 *= scaling_factor; + std::get<31>(accumulators) += tmp_32; + + // Contribution 33, range constrain the highest microlimb of high z1 limb to be 4 bits (60 % 14 = 12) + auto tmp_33 = (z_high_limbs_range_constraint_4 * SHIFT_4_TO_14 - z_high_limbs_range_constraint_tail); + tmp_33 *= lagrange_even_in_minicircuit; + tmp_33 *= scaling_factor; + std::get<32>(accumulators) += tmp_33; + + // Contribution 34, range constrain the highest microlimb of high z2 limb to be 4 bits (60 % 14 = 12) + auto tmp_34 = + (z_high_limbs_range_constraint_4_shift * SHIFT_4_TO_14 - z_high_limbs_range_constraint_tail_shift); + tmp_34 *= lagrange_even_in_minicircuit; + tmp_34 *= scaling_factor; + std::get<33>(accumulators) += tmp_34; + + // Contribution 35, range constrain the highest microlimb of lowest current accumulator limb to be 12 bits (68 % + // 14 = 12) + auto tmp_35 = + (accumulator_low_limbs_range_constraint_4 * SHIFT_12_TO_14 - accumulator_low_limbs_range_constraint_tail); + tmp_35 *= lagrange_even_in_minicircuit; + tmp_35 *= scaling_factor; + std::get<34>(accumulators) += tmp_35; + + // Contribution 36, range constrain the highest microlimb of second lowest current accumulator limb to be 12 + // bits (68 % 14 = 12) + auto tmp_36 = (accumulator_low_limbs_range_constraint_4_shift * SHIFT_12_TO_14 - + accumulator_low_limbs_range_constraint_tail_shift); + tmp_36 *= lagrange_even_in_minicircuit; + tmp_36 *= scaling_factor; + std::get<35>(accumulators) += tmp_36; + + // Contribution 37, range constrain the highest microlimb of second highest current accumulator limb to be 12 + // bits (68 % 14 = 12) + auto tmp_37 = + (accumulator_high_limbs_range_constraint_4 * SHIFT_12_TO_14 - accumulator_high_limbs_range_constraint_tail); + tmp_37 *= lagrange_even_in_minicircuit; + tmp_37 *= scaling_factor; + std::get<36>(accumulators) += tmp_37; + + // Contribution 38, range constrain the highest microlimb of highest current accumulator limb to be 8 bits (50 % + // 14 = 12) + auto tmp_38 = (accumulator_high_limbs_range_constraint_3_shift * SHIFT_8_TO_14 - + accumulator_high_limbs_range_constraint_4_shift); + tmp_38 *= lagrange_even_in_minicircuit; + tmp_38 *= scaling_factor; + std::get<37>(accumulators) += tmp_38; + + // Contribution 39, range constrain the highest microlimb of lowest quotient limb to be 12 bits (68 % 14 = 12) + auto tmp_39 = + (quotient_low_limbs_range_constraint_4 * SHIFT_12_TO_14 - quotient_low_limbs_range_constraint_tail); + tmp_39 *= lagrange_even_in_minicircuit; + tmp_39 *= scaling_factor; + std::get<38>(accumulators) += tmp_39; + + // Contribution 40, range constrain the highest microlimb of second lowest quotient limb to be 12 bits (68 % 14 + // = 12) + auto tmp_40 = (quotient_low_limbs_range_constraint_4_shift * SHIFT_12_TO_14 - + quotient_low_limbs_range_constraint_tail_shift); + tmp_40 *= lagrange_even_in_minicircuit; + tmp_40 *= scaling_factor; + std::get<39>(accumulators) += tmp_40; + + // Contribution 41, range constrain the highest microlimb of second highest quotient limb to be 12 bits (68 % 14 + // = 12) + auto tmp_41 = + (quotient_high_limbs_range_constraint_4 * SHIFT_12_TO_14 - quotient_high_limbs_range_constraint_tail); + tmp_41 *= lagrange_even_in_minicircuit; + tmp_41 *= scaling_factor; + std::get<40>(accumulators) += tmp_41; + + // Contribution 42, range constrain the highest microlimb of highest quotient limb to be 10 bits (52 % 14 = 12) + auto tmp_42 = (quotient_high_limbs_range_constraint_3_shift * SHIFT_10_TO_14 - + quotient_high_limbs_range_constraint_4_shift); + tmp_42 *= lagrange_even_in_minicircuit; + tmp_42 *= scaling_factor; + std::get<41>(accumulators) += tmp_42; + + // Contributions where we decompose initial EccOpQueue values into 68-bit limbs + + // Contribution 43, decompose x_lo + auto tmp_43 = (p_x_low_limbs + p_x_low_limbs_shift * LIMB_SHIFT) - x_lo_y_hi; + tmp_43 *= lagrange_even_in_minicircuit; + tmp_43 *= scaling_factor; + std::get<42>(accumulators) += tmp_43; + + // Contribution 44, decompose x_hi + auto tmp_44 = (p_x_high_limbs + p_x_high_limbs_shift * LIMB_SHIFT) - x_hi_z_1; + tmp_44 *= lagrange_even_in_minicircuit; + tmp_44 *= scaling_factor; + std::get<43>(accumulators) += tmp_44; + // Contribution 45, decompose y_lo + auto tmp_45 = (p_y_low_limbs + p_y_low_limbs_shift * LIMB_SHIFT) - y_lo_z_2; + tmp_45 *= lagrange_even_in_minicircuit; + tmp_45 *= scaling_factor; + std::get<44>(accumulators) += tmp_45; + + // Contribution 46, decompose y_hi + auto tmp_46 = (p_y_high_limbs + p_y_high_limbs_shift * LIMB_SHIFT) - x_lo_y_hi_shift; + tmp_46 *= lagrange_even_in_minicircuit; + tmp_46 *= scaling_factor; + std::get<45>(accumulators) += tmp_46; + + // Contribution 47, decompose z1 + auto tmp_47 = (z_low_limbs + z_high_limbs * LIMB_SHIFT) - x_hi_z_1_shift; + tmp_47 *= lagrange_even_in_minicircuit; + tmp_47 *= scaling_factor; + std::get<46>(accumulators) += tmp_47; + + // Contribution 48, decompose z2 + auto tmp_48 = (z_low_limbs_shift + z_high_limbs_shift * LIMB_SHIFT) - y_lo_z_2_shift; + tmp_48 *= lagrange_even_in_minicircuit; + tmp_48 *= scaling_factor; + std::get<47>(accumulators) += tmp_48; + }(); }; } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_extra_relations.hpp b/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_extra_relations.hpp index 344988a16082..e3abe1497d28 100644 --- a/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_extra_relations.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_extra_relations.hpp @@ -14,16 +14,23 @@ template class TranslatorOpcodeConstraintRelationImpl { using FF = FF_; // 1 + polynomial degree of this relation - static constexpr size_t RELATION_LENGTH = 6; // degree((lagrange_masking - 1)⋅op ⋅(op - 3)⋅(op - 4)⋅(op - 8)) = 5 - static constexpr std::array SUBRELATION_PARTIAL_LENGTHS{ - 6 // opcode constraint relation + static constexpr size_t RELATION_LENGTH = 6; + static constexpr std::array SUBRELATION_PARTIAL_LENGTHS{ + 6, // opcode constraint relation + 6, // opcode constraint relation + 6, // opcode constraint relation + 6, // opcode constraint relation + 6 // opcode constraint relation }; /** * @brief Returns true if the contribution from all subrelations for the provided inputs is identically zero * */ - template inline static bool skip(const AllEntities& in) { return in.op.is_zero(); } + template inline static bool skip(const AllEntities& in) + { + return (in.lagrange_even_in_minicircuit + in.lagrange_mini_masking).is_zero(); + } /** * @brief Expression for enforcing the value of the Opcode to be {0,3,4,8} * @details This relation enforces the opcode to be one of described values. Since we don't care about even diff --git a/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_extra_relations_impl.hpp b/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_extra_relations_impl.hpp index 860946261690..e838fcedfd8b 100644 --- a/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_extra_relations_impl.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_extra_relations_impl.hpp @@ -45,6 +45,54 @@ void TranslatorOpcodeConstraintRelationImpl::accumulate(ContainerOverSubrela tmp_1 *= (lagrange_mini_masking + minus_one); tmp_1 *= scaling_factor; std::get<0>(accumulators) += tmp_1; + + auto lagrange_even_in_minicircuit = View(in.lagrange_even_in_minicircuit); + + auto accumulators_binary_limbs_0 = View(in.accumulators_binary_limbs_0); + auto accumulators_binary_limbs_1 = View(in.accumulators_binary_limbs_1); + auto accumulators_binary_limbs_2 = View(in.accumulators_binary_limbs_2); + auto accumulators_binary_limbs_3 = View(in.accumulators_binary_limbs_3); + auto accumulators_binary_limbs_0_shift = View(in.accumulators_binary_limbs_0_shift); + auto accumulators_binary_limbs_1_shift = View(in.accumulators_binary_limbs_1_shift); + auto accumulators_binary_limbs_2_shift = View(in.accumulators_binary_limbs_2_shift); + auto accumulators_binary_limbs_3_shift = View(in.accumulators_binary_limbs_3_shift); + + // Contribution (2) (2-5 ensure that the accumulator stays the same at even indices within the no-op range if + // one exists) + auto tmp_2 = (accumulators_binary_limbs_0 - accumulators_binary_limbs_0_shift); + tmp_2 *= (op + minus_three); + tmp_2 *= (op + minus_four); + tmp_2 *= (op + minus_eight); + tmp_2 *= lagrange_even_in_minicircuit; + tmp_2 *= scaling_factor; + std::get<1>(accumulators) += tmp_2; + + // Contribution (3) + auto tmp_3 = (accumulators_binary_limbs_1 - accumulators_binary_limbs_1_shift); + tmp_3 *= (op + minus_three); + tmp_3 *= (op + minus_four); + tmp_3 *= (op + minus_eight); + tmp_3 *= lagrange_even_in_minicircuit; + tmp_3 *= scaling_factor; + std::get<2>(accumulators) += tmp_3; + + // Contribution (4) + auto tmp_4 = (accumulators_binary_limbs_2 - accumulators_binary_limbs_2_shift); + tmp_4 *= (op + minus_three); + tmp_4 *= (op + minus_four); + tmp_4 *= (op + minus_eight); + tmp_4 *= lagrange_even_in_minicircuit; + tmp_4 *= scaling_factor; + std::get<3>(accumulators) += tmp_4; + + // Contribution (5) + auto tmp_5 = (accumulators_binary_limbs_3 - accumulators_binary_limbs_3_shift); + tmp_5 *= (op + minus_three); + tmp_5 *= (op + minus_four); + tmp_5 *= (op + minus_eight); + tmp_5 *= lagrange_even_in_minicircuit; + tmp_5 *= scaling_factor; + std::get<4>(accumulators) += tmp_5; }; /** @@ -75,8 +123,8 @@ void TranslatorAccumulatorTransferRelationImpl::accumulate(ContainerOverSubr // Lagrange ensuring the accumulator result is validated at the correct row auto lagrange_result_row = View(in.lagrange_result_row); - // Lagrange at index (size of minicircuit - 1) is used to enforce that the accumulator is initialized to zero in the - // circuit + // Lagrange at index (size of minicircuit - 1) is used to enforce that the accumulator is initialized to zero in + // the circuit auto lagrange_last_in_minicircuit = View(in.lagrange_last_in_minicircuit); // Locations of randomness in the minicircuit diff --git a/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_non_native_field_relation.hpp b/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_non_native_field_relation.hpp index 219a208a957e..b3b4839dccd9 100644 --- a/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_non_native_field_relation.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_non_native_field_relation.hpp @@ -16,9 +16,9 @@ template class TranslatorNonNativeFieldRelationImpl { // 1 + polynomial degree of this relation static constexpr std::array SUBRELATION_PARTIAL_LENGTHS{ - 3, // Lower wide limb subrelation (checks result is 0 mod 2¹³⁶) - 3, // Higher wide limb subrelation (checks result is 0 in higher mod 2¹³⁶), - 3 // Prime subrelation (checks result in native field) + 4, // Lower wide limb subrelation (checks result is 0 mod 2¹³⁶) + 4, // Higher wide limb subrelation (checks result is 0 in higher mod 2¹³⁶), + 4 // Prime subrelation (checks result in native field) }; /** diff --git a/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_non_native_field_relation_impl.hpp b/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_non_native_field_relation_impl.hpp index 6e5ed427eee4..10d8da453e2e 100644 --- a/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_non_native_field_relation_impl.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_non_native_field_relation_impl.hpp @@ -63,7 +63,9 @@ namespace bb { * which we need to calculate non-permutation relations). All other indices are set to zero. Each EccOpQueue entry * (operation) occupies 2 rows in bn254 transcripts. So the Translator VM has a 2-row cycle and we need to * switch the checks being performed depending on which row we are at right now. We have half a cycle of - * accumulation, where we perform this computation, and half a cycle where we just copy accumulator data. + * accumulation, where we perform this computation, and half a cycle where we just copy accumulator data. They also get + * multiplied by the op because the no-op range within the trace (if one exits) should imply the accumulator doesn't + * change (fully enforced by the AccumulatorTransferRelation and OpcodeRelation ) * * @param evals transformed to `evals + C(in(X)...)*scaling_factor` * @param in an std::array containing the fully extended Univariate edges. @@ -183,7 +185,7 @@ void TranslatorNonNativeFieldRelationImpl::accumulate(ContainerOverSubrelati // clang-format on // subtract large value; vanishing shows the desired relation holds on low 136-bit limb tmp -= relation_wide_limbs * shiftx2; - tmp *= lagrange_even_in_minicircuit; + tmp *= lagrange_even_in_minicircuit * op; tmp *= scaling_factor; std::get<0>(accumulators) += tmp; @@ -236,7 +238,7 @@ void TranslatorNonNativeFieldRelationImpl::accumulate(ContainerOverSubrelati // clang-format on // subtract large value; vanishing shows the desired relation holds on high 136-bit limb tmp -= relation_wide_limbs_shift * shiftx2; - tmp *= lagrange_even_in_minicircuit; + tmp *= lagrange_even_in_minicircuit * op; tmp *= scaling_factor; std::get<1>(accumulators) += tmp; @@ -278,7 +280,7 @@ void TranslatorNonNativeFieldRelationImpl::accumulate(ContainerOverSubrelati + reconstructed_quotient * NEGATIVE_MODULUS_LIMBS[4] - reconstructed_current_accumulator; // clang-format on - tmp *= lagrange_even_in_minicircuit; + tmp *= lagrange_even_in_minicircuit * op; tmp *= scaling_factor; std::get<2>(accumulators) += tmp; }; diff --git a/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_relation_consistency.test.cpp b/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_relation_consistency.test.cpp index cf6baecc73b8..c60a2925ee38 100644 --- a/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_relation_consistency.test.cpp +++ b/barretenberg/cpp/src/barretenberg/relations/translator_vm/translator_relation_consistency.test.cpp @@ -382,6 +382,7 @@ TEST_F(TranslatorRelationConsistency, DecompositionRelation) const auto& x_lo_y_hi_shift = input_elements.x_lo_y_hi_shift; const auto& x_hi_z_1_shift = input_elements.x_hi_z_1_shift; const auto& y_lo_z_2_shift = input_elements.y_lo_z_2_shift; + const auto& op = input_elements.op; const auto& lagrange_even_in_minicircuit = input_elements.lagrange_even_in_minicircuit; @@ -509,29 +510,29 @@ TEST_F(TranslatorRelationConsistency, DecompositionRelation) }; // Check decomposition 50-72 bit limbs into microlimbs - expected_values[0] = check_standard_limb_decomposition(p_x_low_limbs_range_constraint_0, - p_x_low_limbs_range_constraint_1, - p_x_low_limbs_range_constraint_2, - p_x_low_limbs_range_constraint_3, - p_x_low_limbs_range_constraint_4, - p_x_low_limbs); - expected_values[1] = check_standard_limb_decomposition(p_x_low_limbs_range_constraint_0_shift, - p_x_low_limbs_range_constraint_1_shift, - p_x_low_limbs_range_constraint_2_shift, - p_x_low_limbs_range_constraint_3_shift, - p_x_low_limbs_range_constraint_4_shift, - p_x_low_limbs_shift); - expected_values[2] = check_standard_limb_decomposition(p_x_high_limbs_range_constraint_0, - p_x_high_limbs_range_constraint_1, - p_x_high_limbs_range_constraint_2, - p_x_high_limbs_range_constraint_3, - p_x_high_limbs_range_constraint_4, - p_x_high_limbs); - expected_values[3] = check_standard_top_limb_decomposition(p_x_high_limbs_range_constraint_0_shift, - p_x_high_limbs_range_constraint_1_shift, - p_x_high_limbs_range_constraint_2_shift, - p_x_high_limbs_range_constraint_3_shift, - p_x_high_limbs_shift); + expected_values[0] = op * check_standard_limb_decomposition(accumulator_low_limbs_range_constraint_0, + accumulator_low_limbs_range_constraint_1, + accumulator_low_limbs_range_constraint_2, + accumulator_low_limbs_range_constraint_3, + accumulator_low_limbs_range_constraint_4, + accumulators_binary_limbs_0); + expected_values[1] = op * check_standard_limb_decomposition(accumulator_low_limbs_range_constraint_0_shift, + accumulator_low_limbs_range_constraint_1_shift, + accumulator_low_limbs_range_constraint_2_shift, + accumulator_low_limbs_range_constraint_3_shift, + accumulator_low_limbs_range_constraint_4_shift, + accumulators_binary_limbs_1); + expected_values[2] = op * check_standard_limb_decomposition(accumulator_high_limbs_range_constraint_0, + accumulator_high_limbs_range_constraint_1, + accumulator_high_limbs_range_constraint_2, + accumulator_high_limbs_range_constraint_3, + accumulator_high_limbs_range_constraint_4, + accumulators_binary_limbs_2); + expected_values[3] = op * check_standard_top_limb_decomposition(accumulator_high_limbs_range_constraint_0_shift, + accumulator_high_limbs_range_constraint_1_shift, + accumulator_high_limbs_range_constraint_2_shift, + accumulator_high_limbs_range_constraint_3_shift, + accumulators_binary_limbs_3); expected_values[4] = check_standard_limb_decomposition(p_y_low_limbs_range_constraint_0, p_y_low_limbs_range_constraint_1, @@ -580,29 +581,30 @@ TEST_F(TranslatorRelationConsistency, DecompositionRelation) z_high_limbs_range_constraint_3_shift, z_high_limbs_range_constraint_4_shift, z_high_limbs_shift); - expected_values[12] = check_standard_limb_decomposition(accumulator_low_limbs_range_constraint_0, - accumulator_low_limbs_range_constraint_1, - accumulator_low_limbs_range_constraint_2, - accumulator_low_limbs_range_constraint_3, - accumulator_low_limbs_range_constraint_4, - accumulators_binary_limbs_0); - expected_values[13] = check_standard_limb_decomposition(accumulator_low_limbs_range_constraint_0_shift, - accumulator_low_limbs_range_constraint_1_shift, - accumulator_low_limbs_range_constraint_2_shift, - accumulator_low_limbs_range_constraint_3_shift, - accumulator_low_limbs_range_constraint_4_shift, - accumulators_binary_limbs_1); - expected_values[14] = check_standard_limb_decomposition(accumulator_high_limbs_range_constraint_0, - accumulator_high_limbs_range_constraint_1, - accumulator_high_limbs_range_constraint_2, - accumulator_high_limbs_range_constraint_3, - accumulator_high_limbs_range_constraint_4, - accumulators_binary_limbs_2); - expected_values[15] = check_standard_top_limb_decomposition(accumulator_high_limbs_range_constraint_0_shift, - accumulator_high_limbs_range_constraint_1_shift, - accumulator_high_limbs_range_constraint_2_shift, - accumulator_high_limbs_range_constraint_3_shift, - accumulators_binary_limbs_3); + expected_values[12] = check_standard_limb_decomposition(p_x_low_limbs_range_constraint_0, + p_x_low_limbs_range_constraint_1, + p_x_low_limbs_range_constraint_2, + p_x_low_limbs_range_constraint_3, + p_x_low_limbs_range_constraint_4, + p_x_low_limbs); + expected_values[13] = check_standard_limb_decomposition(p_x_low_limbs_range_constraint_0_shift, + p_x_low_limbs_range_constraint_1_shift, + p_x_low_limbs_range_constraint_2_shift, + p_x_low_limbs_range_constraint_3_shift, + p_x_low_limbs_range_constraint_4_shift, + p_x_low_limbs_shift); + expected_values[14] = check_standard_limb_decomposition(p_x_high_limbs_range_constraint_0, + p_x_high_limbs_range_constraint_1, + p_x_high_limbs_range_constraint_2, + p_x_high_limbs_range_constraint_3, + p_x_high_limbs_range_constraint_4, + p_x_high_limbs); + expected_values[15] = check_standard_top_limb_decomposition(p_x_high_limbs_range_constraint_0_shift, + p_x_high_limbs_range_constraint_1_shift, + p_x_high_limbs_range_constraint_2_shift, + p_x_high_limbs_range_constraint_3_shift, + p_x_high_limbs_shift); + expected_values[16] = check_standard_limb_decomposition(quotient_low_limbs_range_constraint_0, quotient_low_limbs_range_constraint_1, quotient_low_limbs_range_constraint_2, @@ -735,15 +737,30 @@ TEST_F(TranslatorRelationConsistency, OpcodeConstraintRelation) const InputElements input_elements = random_inputs ? get_random_input() : get_special_input(); const auto& op = input_elements.op; + const auto& accumulators_binary_limbs_0 = input_elements.accumulators_binary_limbs_0; + const auto& accumulators_binary_limbs_1 = input_elements.accumulators_binary_limbs_1; + const auto& accumulators_binary_limbs_2 = input_elements.accumulators_binary_limbs_2; + const auto& accumulators_binary_limbs_3 = input_elements.accumulators_binary_limbs_3; + const auto& accumulators_binary_limbs_0_shift = input_elements.accumulators_binary_limbs_0_shift; + const auto& accumulators_binary_limbs_1_shift = input_elements.accumulators_binary_limbs_1_shift; + const auto& accumulators_binary_limbs_2_shift = input_elements.accumulators_binary_limbs_2_shift; + const auto& accumulators_binary_limbs_3_shift = input_elements.accumulators_binary_limbs_3_shift; + const auto& lagrange_mini_masking = input_elements.lagrange_mini_masking; + const auto& lagrange_even_in_minicircuit = input_elements.lagrange_even_in_minicircuit; RelationValues expected_values; const auto parameters = RelationParameters::get_random(); - // (Contribution 1) - auto contribution_1 = op * (op - FF(3)) * (op - FF(4)) * (op - FF(8)) * (lagrange_mini_masking - FF(1)); - expected_values[0] = contribution_1; + // Opcode constraints - ensure op is 0, 3, 4, or 8 + expected_values[0] = op * (op - FF(3)) * (op - FF(4)) * (op - FF(8)) * (lagrange_mini_masking - FF(1)); + + auto shared = (op - FF(3)) * (op - FF(4)) * (op - FF(8)) * lagrange_even_in_minicircuit; + expected_values[1] = shared * (accumulators_binary_limbs_0 - accumulators_binary_limbs_0_shift); + expected_values[2] = shared * (accumulators_binary_limbs_1 - accumulators_binary_limbs_1_shift); + expected_values[3] = shared * (accumulators_binary_limbs_2 - accumulators_binary_limbs_2_shift); + expected_values[4] = shared * (accumulators_binary_limbs_3 - accumulators_binary_limbs_3_shift); validate_relation_execution(expected_values, input_elements, parameters); }; @@ -1119,7 +1136,7 @@ TEST_F(TranslatorRelationConsistency, NonNativeFieldRelation) quotient_low_binary_limbs_shift * NEGATIVE_MODULUS_LIMBS[0] - accumulators_binary_limbs_1) * shift - relation_wide_limbs * shiftx2) * - lagrange_even_in_minicircuit; + lagrange_even_in_minicircuit * op; // Higher wide limb subrelation expected_values[1] = @@ -1161,7 +1178,7 @@ TEST_F(TranslatorRelationConsistency, NonNativeFieldRelation) quotient_low_binary_limbs * NEGATIVE_MODULUS_LIMBS[3] - accumulators_binary_limbs_3) * shift - relation_wide_limbs_shift * shiftx2) * - lagrange_even_in_minicircuit; + lagrange_even_in_minicircuit * op; auto reconstructed_p_x = (p_x_low_limbs + p_x_low_limbs_shift * shift + p_x_high_limbs * shiftx2 + p_x_high_limbs_shift * shiftx3); auto reconstructed_p_y = @@ -1185,7 +1202,7 @@ TEST_F(TranslatorRelationConsistency, NonNativeFieldRelation) reconstructed_z1 * parameters.batching_challenge_v[2][4] + reconstructed_z2 * parameters.batching_challenge_v[3][4] + reconstructed_quotient * NEGATIVE_MODULUS_LIMBS[4] - reconstructed_current_accumulator) * - lagrange_even_in_minicircuit; + lagrange_even_in_minicircuit * op; validate_relation_execution(expected_values, input_elements, parameters); }; diff --git a/barretenberg/cpp/src/barretenberg/stdlib/encryption/ecdsa/ecdsa_impl.hpp b/barretenberg/cpp/src/barretenberg/stdlib/encryption/ecdsa/ecdsa_impl.hpp index df3e90c22297..3560d971101e 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/encryption/ecdsa/ecdsa_impl.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/encryption/ecdsa/ecdsa_impl.hpp @@ -211,9 +211,9 @@ bool_t ecdsa_verify_signature(const stdlib::byte_array& hashed // Logging if (is_signature_valid.get_value()) { - info("Signature verification succeeded."); + vinfo("Signature verification succeeded."); } else { - info("Signature verification failed"); + vinfo("Signature verification failed"); } return is_signature_valid; diff --git a/barretenberg/cpp/src/barretenberg/stdlib/goblin_verifier/goblin_recursive_verifier.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/goblin_verifier/goblin_recursive_verifier.test.cpp index f150d9f86b17..80c180bd8d4d 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/goblin_verifier/goblin_recursive_verifier.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/goblin_verifier/goblin_recursive_verifier.test.cpp @@ -40,30 +40,19 @@ class GoblinRecursiveVerifierTests : public testing::Test { * * @return ProverOutput */ - static ProverOutput create_goblin_prover_output(Builder* outer_builder = nullptr, const size_t NUM_CIRCUITS = 3) + static ProverOutput create_goblin_prover_output(Builder* outer_builder = nullptr, const size_t num_circuits = 5) { Goblin goblin; - // Construct and accumulate multiple circuits - for (size_t idx = 0; idx < NUM_CIRCUITS - 1; ++idx) { - MegaCircuitBuilder builder{ goblin.op_queue }; - GoblinMockCircuits::construct_simple_circuit(builder); - goblin.prove_merge(); - } - - Goblin goblin_final; - goblin_final.op_queue = goblin.op_queue; - MegaCircuitBuilder builder{ goblin_final.op_queue }; - GoblinMockCircuits::construct_simple_circuit(builder, /*last_circuit=*/true); + GoblinMockCircuits::construct_and_merge_mock_circuits(goblin, num_circuits); // Merge the ecc ops from the newly constructed circuit - goblin_final.op_queue->merge(); - + auto goblin_proof = goblin.prove(MergeSettings::APPEND); // Subtable values and commitments - needed for (Recursive)MergeVerifier MergeCommitments merge_commitments; - auto t_current = goblin_final.op_queue->construct_current_ultra_ops_subtable_columns(); - auto T_prev = goblin_final.op_queue->construct_previous_ultra_ops_table_columns(); - CommitmentKey pcs_commitment_key(goblin_final.op_queue->get_ultra_ops_table_num_rows()); + auto t_current = goblin.op_queue->construct_current_ultra_ops_subtable_columns(); + auto T_prev = goblin.op_queue->construct_previous_ultra_ops_table_columns(); + CommitmentKey pcs_commitment_key(goblin.op_queue->get_ultra_ops_table_num_rows()); for (size_t idx = 0; idx < MegaFlavor::NUM_WIRES; idx++) { merge_commitments.t_commitments[idx] = pcs_commitment_key.commit(t_current[idx]); merge_commitments.T_prev_commitments[idx] = pcs_commitment_key.commit(T_prev[idx]); @@ -84,7 +73,7 @@ class GoblinRecursiveVerifierTests : public testing::Test { } // Output is a goblin proof plus ECCVM/Translator verification keys - return { goblin_final.prove(), + return { goblin_proof, { std::make_shared(), std::make_shared() }, merge_commitments, recursive_merge_commitments }; @@ -101,7 +90,7 @@ TEST_F(GoblinRecursiveVerifierTests, NativeVerification) std::shared_ptr verifier_transcript = std::make_shared(); - EXPECT_TRUE(Goblin::verify(proof, merge_commitments, verifier_transcript)); + EXPECT_TRUE(Goblin::verify(proof, merge_commitments, verifier_transcript, MergeSettings::APPEND)); } /** @@ -116,7 +105,7 @@ TEST_F(GoblinRecursiveVerifierTests, Basic) create_goblin_prover_output(&builder); GoblinRecursiveVerifier verifier{ &builder, verifier_input }; - GoblinRecursiveVerifierOutput output = verifier.verify(proof, recursive_merge_commitments); + GoblinRecursiveVerifierOutput output = verifier.verify(proof, recursive_merge_commitments, MergeSettings::APPEND); output.points_accumulator.set_public(); info("Recursive Verifier: num gates = ", builder.num_gates); @@ -150,7 +139,8 @@ TEST_F(GoblinRecursiveVerifierTests, IndependentVKHash) create_goblin_prover_output(&builder, inner_size); GoblinRecursiveVerifier verifier{ &builder, verifier_input }; - GoblinRecursiveVerifierOutput output = verifier.verify(proof, recursive_merge_commitments); + GoblinRecursiveVerifierOutput output = + verifier.verify(proof, recursive_merge_commitments, MergeSettings::APPEND); output.points_accumulator.set_public(); info("Recursive Verifier: num gates = ", builder.num_gates); @@ -164,11 +154,11 @@ TEST_F(GoblinRecursiveVerifierTests, IndependentVKHash) return { builder.blocks, outer_verification_key }; }; - auto [blocks_2, verification_key_2] = get_blocks(2); - auto [blocks_4, verification_key_4] = get_blocks(4); + auto [blocks_5, verification_key_5] = get_blocks(5); + auto [blocks_6, verification_key_6] = get_blocks(6); - compare_ultra_blocks_and_verification_keys({ blocks_2, blocks_4 }, - { verification_key_2, verification_key_4 }); + compare_ultra_blocks_and_verification_keys({ blocks_5, blocks_6 }, + { verification_key_5, verification_key_6 }); } /** @@ -237,7 +227,8 @@ TEST_F(GoblinRecursiveVerifierTests, TranslatorFailure) } GoblinRecursiveVerifier verifier{ &builder, verifier_input }; - [[maybe_unused]] auto goblin_rec_verifier_output = verifier.verify(tampered_proof, recursive_merge_commitments); + [[maybe_unused]] auto goblin_rec_verifier_output = + verifier.verify(tampered_proof, recursive_merge_commitments, MergeSettings::APPEND); EXPECT_FALSE(CircuitChecker::check(builder)); } // Tamper with the Translator proof non-preamble values @@ -266,7 +257,8 @@ TEST_F(GoblinRecursiveVerifierTests, TranslatorFailure) } GoblinRecursiveVerifier verifier{ &builder, verifier_input }; - [[maybe_unused]] auto goblin_rec_verifier_output = verifier.verify(tampered_proof, recursive_merge_commitments); + [[maybe_unused]] auto goblin_rec_verifier_output = + verifier.verify(tampered_proof, recursive_merge_commitments, MergeSettings::APPEND); EXPECT_FALSE(CircuitChecker::check(builder)); } } @@ -289,7 +281,8 @@ TEST_F(GoblinRecursiveVerifierTests, TranslationEvaluationsFailure) proof.eccvm_proof.pre_ipa_proof[op_limb_index] += 1; GoblinRecursiveVerifier verifier{ &builder, verifier_input }; - [[maybe_unused]] auto goblin_rec_verifier_output = verifier.verify(proof, recursive_merge_commitments); + [[maybe_unused]] auto goblin_rec_verifier_output = + verifier.verify(proof, recursive_merge_commitments, MergeSettings::APPEND); EXPECT_FALSE(CircuitChecker::check(builder)); } @@ -314,7 +307,7 @@ TEST_F(GoblinRecursiveVerifierTests, TranslatorMergeConsistencyFailure) std::shared_ptr verifier_transcript = std::make_shared(); // Check natively that the proof is correct. - EXPECT_TRUE(Goblin::verify(proof, merge_commitments, verifier_transcript)); + EXPECT_TRUE(Goblin::verify(proof, merge_commitments, verifier_transcript, MergeSettings::APPEND)); // TODO(https://github.com/AztecProtocol/barretenberg/issues/1298): // Better recursion testing - create more flexible proof tampering tests. @@ -341,7 +334,8 @@ TEST_F(GoblinRecursiveVerifierTests, TranslatorMergeConsistencyFailure) // Construct and check the Goblin Recursive Verifier circuit GoblinRecursiveVerifier verifier{ &builder, verifier_input }; - [[maybe_unused]] auto goblin_rec_verifier_output = verifier.verify(proof, recursive_merge_commitments); + [[maybe_unused]] auto goblin_rec_verifier_output = + verifier.verify(proof, recursive_merge_commitments, MergeSettings::APPEND); EXPECT_FALSE(CircuitChecker::check(builder)); } diff --git a/barretenberg/cpp/src/barretenberg/stdlib/translator_vm_verifier/translator_recursive_verifier.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/translator_vm_verifier/translator_recursive_verifier.test.cpp index 6aadbcd98572..f36438aece26 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/translator_vm_verifier/translator_recursive_verifier.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/translator_vm_verifier/translator_recursive_verifier.test.cpp @@ -44,29 +44,49 @@ class TranslatorRecursiveTests : public ::testing::Test { static void SetUpTestSuite() { bb::srs::init_file_crs_factory(bb::srs::bb_crs_path()); } - static std::shared_ptr create_op_queue(const size_t num_ops) + // Helper function to add no-ops + static void add_no_ops(std::shared_ptr& op_queue, size_t count = 1) + { + for (size_t i = 0; i < count; i++) { + op_queue->no_op_ultra_only(); + } + } + + // Helper function to create an MSM + static void add_mixed_ops(std::shared_ptr& op_queue, size_t count = 100) { auto P1 = InnerG1::random_element(); auto P2 = InnerG1::random_element(); auto z = InnerFF::random_element(); - - // Add the same operations to the ECC op queue; the native computation is performed under the hood. - auto op_queue = std::make_shared(); - op_queue->no_op_ultra_only(); - - for (size_t i = 0; i < num_ops; i++) { + for (size_t i = 0; i < count; i++) { op_queue->add_accumulate(P1); op_queue->mul_accumulate(P2, z); } + op_queue->eq_and_reset(); + } + + // Construct a test circuit based on some random operations + static InnerBuilder generate_test_circuit(const InnerBF& batching_challenge_v, + const InnerBF& evaluation_challenge_x, + const size_t circuit_size_parameter = 500) + { + + // Add the same operations to the ECC op queue; the native computation is performed under the hood. + auto op_queue = std::make_shared(); + add_no_ops(op_queue); + add_mixed_ops(op_queue, circuit_size_parameter / 2); op_queue->merge(); - return op_queue; + add_mixed_ops(op_queue, circuit_size_parameter / 2); + add_no_ops(op_queue, 2); + op_queue->merge(MergeSettings::APPEND, ECCOpQueue::OP_QUEUE_SIZE - op_queue->get_current_subtable_size()); + + return InnerBuilder{ batching_challenge_v, evaluation_challenge_x, op_queue }; } static void test_recursive_verification() { using NativeVerifierCommitmentKey = InnerFlavor::VerifierCommitmentKey; // Add the same operations to the ECC op queue; the native computation is performed under the hood. - auto op_queue = create_op_queue(500); auto prover_transcript = std::make_shared(); prover_transcript->send_to_verifier("init", InnerBF::random_element()); @@ -76,7 +96,7 @@ class TranslatorRecursiveTests : public ::testing::Test { InnerBF batching_challenge_v = InnerBF::random_element(); InnerBF evaluation_challenge_x = InnerBF::random_element(); - auto circuit_builder = InnerBuilder(batching_challenge_v, evaluation_challenge_x, op_queue); + InnerBuilder circuit_builder = generate_test_circuit(batching_challenge_v, evaluation_challenge_x); EXPECT_TRUE(TranslatorCircuitChecker::check(circuit_builder)); auto proving_key = std::make_shared(circuit_builder); InnerProver prover{ proving_key, prover_transcript }; @@ -142,8 +162,6 @@ class TranslatorRecursiveTests : public ::testing::Test { // Retrieves the trace blocks (each consisting of a specific gate) from the recursive verifier circuit auto get_blocks = [](size_t num_ops) -> std::tuple> { - auto op_queue = create_op_queue(num_ops); - auto prover_transcript = std::make_shared(); prover_transcript->send_to_verifier("init", InnerBF::random_element()); @@ -152,7 +170,7 @@ class TranslatorRecursiveTests : public ::testing::Test { InnerBF batching_challenge_v = InnerBF::random_element(); InnerBF evaluation_challenge_x = InnerBF::random_element(); - auto inner_circuit = InnerBuilder(batching_challenge_v, evaluation_challenge_x, op_queue); + InnerBuilder inner_circuit = generate_test_circuit(batching_challenge_v, evaluation_challenge_x, num_ops); // Generate a proof over the inner circuit auto inner_proving_key = std::make_shared(inner_circuit); diff --git a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/mega_circuit_builder.cpp b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/mega_circuit_builder.cpp index 1b31cd0feafb..5e58f1a36677 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/mega_circuit_builder.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/mega_circuit_builder.cpp @@ -61,9 +61,12 @@ template void MegaCircuitBuilder_::add_mega_gates_to_ensure_al read_idx = this->add_variable(raw_read_idx); read_return_data(read_idx); - // add dummy mul accum op and an equality op - this->queue_ecc_mul_accum(bb::g1::affine_element::one(), 2); - this->queue_ecc_eq(); + if (op_queue->get_current_subtable_size() == 0) { + // Add a mul dummy op in the subtable to avoid column polynomial being zero (it has to be a mul rather than an + // add to ensure all 4 column polynomials contain some data) + this->queue_ecc_mul_accum(bb::g1::affine_element::one(), 2); + this->queue_ecc_eq(); + } } /** diff --git a/barretenberg/cpp/src/barretenberg/translator_vm/relation_correctness.test.cpp b/barretenberg/cpp/src/barretenberg/translator_vm/relation_correctness.test.cpp index 46fb3a8c30cb..63ebfc5ebbc3 100644 --- a/barretenberg/cpp/src/barretenberg/translator_vm/relation_correctness.test.cpp +++ b/barretenberg/cpp/src/barretenberg/translator_vm/relation_correctness.test.cpp @@ -93,11 +93,11 @@ TEST_F(TranslatorRelationCorrectnessTests, TranslatorExtraRelationsCorrectness) prover_polynomials.lagrange_even_in_minicircuit.at(i) = 1; prover_polynomials.lagrange_odd_in_minicircuit.at(i + 1) = 1; } - constexpr size_t NUMBER_OF_POSSIBLE_OPCODES = 4; - constexpr std::array possible_opcode_values = { 0, 3, 4, 8 }; + constexpr size_t NUMBER_OF_POSSIBLE_OPCODES = 3; + constexpr std::array possible_opcode_values = { 3, 4, 8 }; // Assign random opcode values - for (size_t i = 1; i < mini_circuit_size - 1; i += 2) { + for (size_t i = 2; i < mini_circuit_size; i += 2) { prover_polynomials.op.at(i) = possible_opcode_values[static_cast(engine.get_random_uint8() % NUMBER_OF_POSSIBLE_OPCODES)]; } diff --git a/barretenberg/cpp/src/barretenberg/translator_vm/translator.test.cpp b/barretenberg/cpp/src/barretenberg/translator_vm/translator.test.cpp index 0420ca323b9b..337ac8219c0c 100644 --- a/barretenberg/cpp/src/barretenberg/translator_vm/translator.test.cpp +++ b/barretenberg/cpp/src/barretenberg/translator_vm/translator.test.cpp @@ -10,11 +10,10 @@ #include using namespace bb; -namespace { using CircuitBuilder = TranslatorFlavor::CircuitBuilder; using Transcript = TranslatorFlavor::Transcript; using OpQueue = ECCOpQueue; -auto& engine = numeric::get_debug_randomness(); +static auto& engine = numeric::get_debug_randomness(); class TranslatorTests : public ::testing::Test { using G1 = g1::affine_element; @@ -24,29 +23,73 @@ class TranslatorTests : public ::testing::Test { protected: static void SetUpTestSuite() { bb::srs::init_file_crs_factory(bb::srs::bb_crs_path()); } + // Helper function to add no-ops + static void add_no_ops(std::shared_ptr& op_queue, size_t count = 1) + { + for (size_t i = 0; i < count; i++) { + op_queue->no_op_ultra_only(); + } + } + + static void add_mixed_ops(std::shared_ptr& op_queue, size_t count = 100) + { + auto P1 = G1::random_element(); + auto P2 = G1::random_element(); + auto z = Fr::random_element(); + for (size_t i = 0; i < count; i++) { + op_queue->add_accumulate(P1); + op_queue->mul_accumulate(P2, z); + } + op_queue->eq_and_reset(); + } + // Construct a test circuit based on some random operations static CircuitBuilder generate_test_circuit(const Fq& batching_challenge_v, const Fq& evaluation_challenge_x, const size_t circuit_size_parameter = 500) { - auto P1 = G1::random_element(); - auto P2 = G1::random_element(); - auto z = Fr::random_element(); // Add the same operations to the ECC op queue; the native computation is performed under the hood. auto op_queue = std::make_shared(); - op_queue->no_op_ultra_only(); - - for (size_t i = 0; i < circuit_size_parameter; i++) { - op_queue->add_accumulate(P1); - op_queue->mul_accumulate(P2, z); - } + add_no_ops(op_queue); + add_mixed_ops(op_queue, circuit_size_parameter / 2); op_queue->merge(); + add_mixed_ops(op_queue, circuit_size_parameter / 2); + add_no_ops(op_queue, 2); + op_queue->merge(MergeSettings::APPEND, ECCOpQueue::OP_QUEUE_SIZE - op_queue->get_current_subtable_size()); return CircuitBuilder{ batching_challenge_v, evaluation_challenge_x, op_queue }; } + + static bool prove_and_verify(const CircuitBuilder& circuit_builder, + const Fq& evaluation_challenge_x, + const Fq& batching_challenge_v) + { + // Setup prover transcript + auto prover_transcript = std::make_shared(); + prover_transcript->send_to_verifier("init", Fq::random_element()); + auto initial_transcript = prover_transcript->export_proof(); + + // Setup verifier transcript + auto verifier_transcript = std::make_shared(); + verifier_transcript->load_proof(initial_transcript); + verifier_transcript->template receive_from_prover("init"); + + // Create proving key and prover + auto proving_key = std::make_shared(circuit_builder); + TranslatorProver prover{ proving_key, prover_transcript }; + + // Generate proof + auto proof = prover.construct_proof(); + + // Create verifier + auto verification_key = std::make_shared(proving_key->proving_key); + TranslatorVerifier verifier(verification_key, verifier_transcript); + + // Verify proof and return result + return verifier.verify_proof(proof, evaluation_challenge_x, batching_challenge_v); + } }; -} // namespace /** * @brief Check that size of a Translator proof matches the corresponding constant @@ -59,16 +102,22 @@ TEST_F(TranslatorTests, ProofLengthCheck) { using Fq = fq; - auto prover_transcript = std::make_shared(); Fq batching_challenge_v = Fq::random_element(); Fq evaluation_challenge_x = Fq::random_element(); // Generate a circuit and its verification key (computed at runtime from the proving key) CircuitBuilder circuit_builder = generate_test_circuit(batching_challenge_v, evaluation_challenge_x); + // Setup prover transcript + auto prover_transcript = std::make_shared(); + prover_transcript->send_to_verifier("init", Fq::random_element()); + prover_transcript->export_proof(); auto proving_key = std::make_shared(circuit_builder); TranslatorProver prover{ proving_key, prover_transcript }; + + // Generate proof auto proof = prover.construct_proof(); + EXPECT_EQ(proof.size(), TranslatorFlavor::PROOF_LENGTH_WITHOUT_PUB_INPUTS); } @@ -80,26 +129,14 @@ TEST_F(TranslatorTests, Basic) { using Fq = fq; - auto prover_transcript = std::make_shared(); - prover_transcript->send_to_verifier("init", Fq::random_element()); - auto initial_transcript = prover_transcript->export_proof(); Fq batching_challenge_v = Fq::random_element(); Fq evaluation_challenge_x = Fq::random_element(); - // Generate a circuit and its verification key (computed at runtime from the proving key) + // Generate a circuit without no-ops CircuitBuilder circuit_builder = generate_test_circuit(batching_challenge_v, evaluation_challenge_x); EXPECT_TRUE(TranslatorCircuitChecker::check(circuit_builder)); - auto proving_key = std::make_shared(circuit_builder); - TranslatorProver prover{ proving_key, prover_transcript }; - auto proof = prover.construct_proof(); - - auto verifier_transcript = std::make_shared(); - verifier_transcript->load_proof(initial_transcript); - verifier_transcript->template receive_from_prover("init"); - auto verification_key = std::make_shared(proving_key->proving_key); - TranslatorVerifier verifier(verification_key, verifier_transcript); - bool verified = verifier.verify_proof(proof, evaluation_challenge_x, batching_challenge_v); + bool verified = prove_and_verify(circuit_builder, evaluation_challenge_x, batching_challenge_v); EXPECT_TRUE(verified); } diff --git a/barretenberg/cpp/src/barretenberg/translator_vm/translator_circuit_builder.cpp b/barretenberg/cpp/src/barretenberg/translator_vm/translator_circuit_builder.cpp index efb75138f0e2..8e536a2c0633 100644 --- a/barretenberg/cpp/src/barretenberg/translator_vm/translator_circuit_builder.cpp +++ b/barretenberg/cpp/src/barretenberg/translator_vm/translator_circuit_builder.cpp @@ -528,12 +528,9 @@ void TranslatorCircuitBuilder::feed_ecc_op_queue_into_circuit(const std::shared_ // add two zeros for consistency. // TODO(https://github.com/AztecProtocol/barretenberg/issues/1360): We'll also have to eventually process random // data in the merge protocol (added for zero knowledge)/ - populate_wires_from_ultra_op(ultra_ops[0]); for (auto& wire : wires) { - if (wire.empty()) { - wire.push_back(zero_idx); - wire.push_back(zero_idx); - } + wire.push_back(zero_idx); + wire.push_back(zero_idx); } num_gates += 2; @@ -541,6 +538,10 @@ void TranslatorCircuitBuilder::feed_ecc_op_queue_into_circuit(const std::shared_ // from the later indices. We need to know the previous accumulator to create the gate for (size_t i = 1; i < ultra_ops.size(); i++) { const auto& ultra_op = ultra_ops[ultra_ops.size() - i]; + if (ultra_op.op_code.value() == 0) { + // Skip no-ops as they should not affect the computation of the accumulator + continue; + } current_accumulator *= evaluation_input_x; const auto [x_256, y_256] = ultra_op.get_base_point_standard_form(); current_accumulator += @@ -552,13 +553,38 @@ void TranslatorCircuitBuilder::feed_ecc_op_queue_into_circuit(const std::shared_ accumulator_trace.push_back(current_accumulator); } - // We don't care about the last value since we'll recompute it during witness generation anyway + // Accumulator final value,recomputed during witness generation and expected at RESULT_ROW + Fq final_accumulator_state = accumulator_trace.back(); accumulator_trace.pop_back(); + std::array previous_accumulator_binary_limbs = split_fq_into_limbs(final_accumulator_state); // Generate witness values from all the UltraOps for (size_t i = 1; i < ultra_ops.size(); i++) { const auto& ultra_op = ultra_ops[i]; - Fq previous_accumulator = 0; + if (ultra_op.op_code.value() == 0) { + // Within the no-op range the translator trace is empty except for the accumulator binary limbs which gets + // copied from the last row k where an op happened (i.e. the op wire the even index has a non-zero value). + // Then, during proving we perform all the checks to estabilish wires at k are well formed and that we + // appropriately transfered the accumulator value at k across the entire no-op range, both in even and odd + // indices. + for (size_t j = 0; j < ACCUMULATORS_BINARY_LIMBS_0; j++) { + wires[j].push_back(zero_idx); + wires[j].push_back(zero_idx); + } + size_t idx = 0; + for (size_t j = ACCUMULATORS_BINARY_LIMBS_0; j < ACCUMULATORS_BINARY_LIMBS_3 + 1; j++) { + wires[j].push_back(add_variable(previous_accumulator_binary_limbs[idx])); + wires[j].push_back(add_variable(previous_accumulator_binary_limbs[idx])); + idx++; + } + for (size_t j = ACCUMULATORS_BINARY_LIMBS_3 + 1; j < TOTAL_COUNT; j++) { + wires[j].push_back(zero_idx); + wires[j].push_back(zero_idx); + } + num_gates += 2; + continue; + } + Fq previous_accumulator{ 0 }; // Pop the last value from accumulator trace and use it as previous accumulator if (!accumulator_trace.empty()) { previous_accumulator = accumulator_trace.back(); @@ -568,6 +594,8 @@ void TranslatorCircuitBuilder::feed_ecc_op_queue_into_circuit(const std::shared_ AccumulationInput one_accumulation_step = generate_witness_values(ultra_op, previous_accumulator, batching_challenge_v, evaluation_input_x); + // Save the state of accumulator in case the next operation encountered is a no-op + previous_accumulator_binary_limbs = one_accumulation_step.previous_accumulator; // And put them into the wires create_accumulation_gate(one_accumulation_step); } diff --git a/barretenberg/cpp/src/barretenberg/translator_vm/translator_circuit_builder.hpp b/barretenberg/cpp/src/barretenberg/translator_vm/translator_circuit_builder.hpp index 8b2d94866c24..fb1797f54f5e 100644 --- a/barretenberg/cpp/src/barretenberg/translator_vm/translator_circuit_builder.hpp +++ b/barretenberg/cpp/src/barretenberg/translator_vm/translator_circuit_builder.hpp @@ -314,7 +314,6 @@ class TranslatorCircuitBuilder : public CircuitBuilderBase { /** * @brief Construct a new Translator Circuit Builder object - * * @details Translator Circuit builder has to be initializaed with evaluation input and batching challenge * (they are used to compute witness and to store the value for the prover) * @@ -324,7 +323,10 @@ class TranslatorCircuitBuilder : public CircuitBuilderBase { TranslatorCircuitBuilder(Fq batching_challenge_v_, Fq evaluation_input_x_) : CircuitBuilderBase(DEFAULT_TRANSLATOR_VM_LENGTH) , batching_challenge_v(batching_challenge_v_) - , evaluation_input_x(evaluation_input_x_) {}; + , evaluation_input_x(evaluation_input_x_) + { + this->zero_idx = add_variable(Fr::zero()); + }; /** * @brief Construct a new Translator Circuit Builder object and feed op_queue inside diff --git a/barretenberg/cpp/src/barretenberg/translator_vm/translator_circuit_builder.test.cpp b/barretenberg/cpp/src/barretenberg/translator_vm/translator_circuit_builder.test.cpp index 996cb07102ba..eba893dccaea 100644 --- a/barretenberg/cpp/src/barretenberg/translator_vm/translator_circuit_builder.test.cpp +++ b/barretenberg/cpp/src/barretenberg/translator_vm/translator_circuit_builder.test.cpp @@ -87,8 +87,22 @@ TEST(TranslatorCircuitBuilder, SeveralOperationCorrectness) // Add the same operations to the ECC op queue; the native computation is performed under the hood. auto op_queue = std::make_shared(); op_queue->no_op_ultra_only(); + + op_queue->add_accumulate(P1); + op_queue->mul_accumulate(P2, z); + op_queue->eq_and_reset(); + op_queue->merge(); + + op_queue->add_accumulate(P1); + op_queue->mul_accumulate(P2, z); op_queue->add_accumulate(P1); op_queue->mul_accumulate(P2, z); + op_queue->eq_and_reset(); + // Placeholder for randomness + op_queue->no_op_ultra_only(); + op_queue->no_op_ultra_only(); + op_queue->merge(MergeSettings::APPEND, ECCOpQueue::OP_QUEUE_SIZE - op_queue->get_current_subtable_size()); + Fq op_accumulator = 0; Fq p_x_accumulator = 0; Fq p_y_accumulator = 0; @@ -96,28 +110,28 @@ TEST(TranslatorCircuitBuilder, SeveralOperationCorrectness) Fq z_2_accumulator = 0; Fq batching_challenge = fq::random_element(); - op_queue->eq_and_reset(); - op_queue->empty_row_for_testing(); - op_queue->merge(); - // Sample the evaluation input x Fq x = Fq::random_element(); + // Compute x_pow (power given by the degree of the polynomial) to be number of real ultra ops - 1 + Fq x_pow = Fq(1); // Get an inverse Fq x_inv = x.invert(); // Compute the batched evaluation of polynomials (multiplying by inverse to go from lower to higher) const auto& ultra_ops = op_queue->get_ultra_ops(); for (size_t i = 1; i < ultra_ops.size(); i++) { const auto& ecc_op = ultra_ops[i]; + if (ecc_op.op_code.value() == 0) { + continue; + } op_accumulator = op_accumulator * x_inv + ecc_op.op_code.value(); const auto [x_u256, y_u256] = ecc_op.get_base_point_standard_form(); p_x_accumulator = p_x_accumulator * x_inv + x_u256; p_y_accumulator = p_y_accumulator * x_inv + y_u256; z_1_accumulator = z_1_accumulator * x_inv + uint256_t(ecc_op.z_1); z_2_accumulator = z_2_accumulator * x_inv + uint256_t(ecc_op.z_2); + x_pow *= x; } - // The degree is ultra_ops.size() - 2 as we ignore the first no-op in computation - Fq x_pow = x.pow(ultra_ops.size() - 2); - + x_pow *= x_inv; // Multiply by an appropriate power of x to get rid of the inverses Fq result = ((((z_2_accumulator * batching_challenge + z_1_accumulator) * batching_challenge + p_y_accumulator) * batching_challenge + diff --git a/barretenberg/cpp/src/barretenberg/translator_vm/translator_flavor.hpp b/barretenberg/cpp/src/barretenberg/translator_vm/translator_flavor.hpp index 4ea2ed1245d9..a038d08539bf 100644 --- a/barretenberg/cpp/src/barretenberg/translator_vm/translator_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/translator_vm/translator_flavor.hpp @@ -62,7 +62,7 @@ class TranslatorFlavor { // The fixed log size of Translator circuit determining the size most polynomials (except the ones // involved in the interleaving subprotocol). It should be determined by the size of the EccOpQueue. - static constexpr size_t LOG_MINI_CIRCUIT_SIZE = 14; + static constexpr size_t LOG_MINI_CIRCUIT_SIZE = CONST_TRANSLATOR_MINI_CIRCUIT_LOG_SIZE; // Log of size of interleaved_* and ordered_* polynomials static constexpr size_t CONST_TRANSLATOR_LOG_N = LOG_MINI_CIRCUIT_SIZE + numeric::get_msb(INTERLEAVING_GROUP_SIZE); diff --git a/barretenberg/cpp/src/barretenberg/translator_vm/translator_proving_key.hpp b/barretenberg/cpp/src/barretenberg/translator_vm/translator_proving_key.hpp index ab4aa5ebe39d..8f1ca6074ae3 100644 --- a/barretenberg/cpp/src/barretenberg/translator_vm/translator_proving_key.hpp +++ b/barretenberg/cpp/src/barretenberg/translator_vm/translator_proving_key.hpp @@ -49,7 +49,7 @@ class TranslatorProvingKey { BB_BENCH_NAME("TranslatorProvingKey(TranslatorCircuit&)"); // Check that the Translator Circuit does not exceed the fixed upper bound, the current value amounts to // a number of EccOps sufficient for 10 rounds of folding (so 20 circuits) - if (circuit.num_gates > Flavor::MINI_CIRCUIT_SIZE - NUM_DISABLED_ROWS_IN_SUMCHECK) { + if (circuit.num_gates > Flavor::MINI_CIRCUIT_SIZE) { throw_or_abort("The Translator circuit size has exceeded the fixed upper bound"); } diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/merge_prover.cpp b/barretenberg/cpp/src/barretenberg/ultra_honk/merge_prover.cpp index e1f4c9095c1a..6c983225ca75 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/merge_prover.cpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/merge_prover.cpp @@ -25,7 +25,14 @@ MergeProver::MergeProver(const std::shared_ptr& op_queue, { // Merge the current subtable (for which a merge proof is being constructed) prior to // procedeing with proving. - op_queue->merge(settings); + if (settings == MergeSettings::APPEND) { + size_t last_subtable_size = op_queue->get_current_subtable_size(); + op_queue->merge(settings, ECCOpQueue::OP_QUEUE_SIZE - last_subtable_size); + + } else { + op_queue->merge(settings); + } + pcs_commitment_key = commitment_key.initialized() ? commitment_key : CommitmentKey(op_queue->get_ultra_ops_table_num_rows()); }; From 4f70911842d921cef54ef92382ba7712ef4ee3ad Mon Sep 17 00:00:00 2001 From: Raju Krishnamoorthy Date: Mon, 8 Sep 2025 05:22:06 -0400 Subject: [PATCH 3/6] chore: audit wnaf relations in the ECCVM (#16573) wNAF relations audited, including a sketch of the argument of why the inductive logic works to constrain `round` (rather than range checking). (note that the bound of the degrees of the relations is 5 here, while range is in the range [0, 8), so this seems advantageous, especially as we need the multiset matching for other purposes.) --------- Co-authored-by: notnotraju --- .../src/barretenberg/eccvm/eccvm_flavor.hpp | 2 +- .../relations/ecc_vm/ecc_wnaf_relation.hpp | 25 ++-- .../ecc_vm/ecc_wnaf_relation_impl.hpp | 120 ++++++++++++------ 3 files changed, 93 insertions(+), 54 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_flavor.hpp b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_flavor.hpp index e6cff19a6bd9..7aab509fc1bb 100644 --- a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_flavor.hpp @@ -512,7 +512,7 @@ class ECCVMFlavor { * transcript_msm_count_at_transition_inverse: used to validate transcript_msm_count_zero_at_transition * precompute_pc: point counter for Straus precomputation columns * precompute_select: if 1, evaluate Straus precomputation algorithm at current row - * precompute_point_transition: 1 if current row operating on a different point to previous row + * precompute_point_transition: 1 if next row operating on a different point than current row. * precompute_round: round counter for Straus precomputation algorithm * precompute_scalar_sum: accumulating sum of Straus scalar slices * precompute_s1hi/lo: 2-bit hi/lo components of a Straus 4-bit scalar slice diff --git a/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_wnaf_relation.hpp b/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_wnaf_relation.hpp index fba0dae261bb..ade42e0a1621 100644 --- a/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_wnaf_relation.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_wnaf_relation.hpp @@ -18,19 +18,22 @@ namespace bb { * | point_transition | round | slices | skew | scalar_sum | * | ---------------- | ----- | --------------- | ------ | ------------------------------- | * | 0 | 0 | s0,s1,s2,s3 | 0 | 0 | - * | 0 | 1 | s4,s5,s6,s7 | 0 | \sum_{i=0}^4 16^i * s_{31 - i} | - * | 0 | 2 | s8,s9,s10,s11 | 0 | \sum_{i=0}^8 16^i * s_{31 - i} | - * | 0 | 3 | s12,s13,s14,s14 | 0 | \sum_{i=0}^12 16^i * s_{31 - i} | - * | 0 | 4 | s16,s17,s18,s19 | 0 | \sum_{i=0}^16 16^i * s_{31 - i} | - * | 0 | 5 | s20,s21,s22,s23 | 0 | \sum_{i=0}^20 16^i * s_{31 - i} | - * | 0 | 6 | s24,s25,s26,s27 | 0 | \sum_{i=0}^24 16^i * s_{31 - i} | - * | 1 | 7 | s28,s29,s30,s31 | s_skew | \sum_{i=0}^28 16^i * s_{31 - i} | + * | 0 | 1 | s4,s5,s6,s7 | 0 | \sum_{i=0}^4 16^i * s_{3 - i} | + * | 0 | 2 | s8,s9,s10,s11 | 0 | \sum_{i=0}^8 16^i * s_{7 - i} | + * | 0 | 3 | s12,s13,s14,s14 | 0 | \sum_{i=0}^12 16^i * s_{11 - i} | + * | 0 | 4 | s16,s17,s18,s19 | 0 | \sum_{i=0}^16 16^i * s_{15 - i} | + * | 0 | 5 | s20,s21,s22,s23 | 0 | \sum_{i=0}^20 16^i * s_{19 - i} | + * | 0 | 6 | s24,s25,s26,s27 | 0 | \sum_{i=0}^24 16^i * s_{23 - i} | + * | 1 | 7 | s28,s29,s30,s31 | s_skew | \sum_{i=0}^28 16^i * s_{27 - i} | * * The value of the input scalar is equal to the following: * - * scalar = 2^16 * scalar_sum + 2^12 * s31 + 2^8 * s30 + 2^4 * s29 + s28 - s_skew - * We use a set equality check in `ecc_set_relation.hpp` to validate the above value maps to the correct input - * scalar for a given value of `pc`. + * scalar = 2^16 * scalar_sum + 2^12 * s28 + 2^8 * s29 + 2^4 * s30 + s31 - s_skew + * + * We use a multiset equality check in `ecc_set_relation.hpp` to validate the above value maps to the correct input + * scalar for a given value of `pc` (i.e., for a given non-trivial EC point). In other words, this constrains that the + * wNAF expansion is correct. Note that, from the perpsective of the Precomputed table, we only add the tuple (pc, + * round, slice) to the multiset when point_transition == 1. * * The column `point_transition` is committed to by the Prover, we must constrain it is correctly computed (see * `ecc_point_table_relation.cpp` for details) @@ -54,4 +57,4 @@ template class ECCVMWnafRelationImpl { template using ECCVMWnafRelation = Relation>; -} // namespace bb \ No newline at end of file +} // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_wnaf_relation_impl.hpp b/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_wnaf_relation_impl.hpp index 81a931360ccb..226373d50a8b 100644 --- a/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_wnaf_relation_impl.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_wnaf_relation_impl.hpp @@ -18,22 +18,25 @@ namespace bb { * | point_transition | round | slices | skew | scalar_sum | * | ---------------- | ----- | --------------- | ------ | ------------------------------- | * | 0 | 0 | s0,s1,s2,s3 | 0 | 0 | - * | 0 | 1 | s4,s5,s6,s7 | 0 | \sum_{i=0}^4 16^i * s_{31 - i} | - * | 0 | 2 | s8,s9,s10,s11 | 0 | \sum_{i=0}^8 16^i * s_{31 - i} | - * | 0 | 3 | s12,s13,s14,s14 | 0 | \sum_{i=0}^12 16^i * s_{31 - i} | - * | 0 | 4 | s16,s17,s18,s19 | 0 | \sum_{i=0}^16 16^i * s_{31 - i} | - * | 0 | 5 | s20,s21,s22,s23 | 0 | \sum_{i=0}^20 16^i * s_{31 - i} | - * | 0 | 6 | s24,s25,s26,s27 | 0 | \sum_{i=0}^24 16^i * s_{31 - i} | - * | 1 | 7 | s28,s29,s30,s31 | s_skew | \sum_{i=0}^28 16^i * s_{31 - i} | + * | 0 | 1 | s4,s5,s6,s7 | 0 | \sum_{i=0}^4 16^i * s_{3 - i} | + * | 0 | 2 | s8,s9,s10,s11 | 0 | \sum_{i=0}^8 16^i * s_{7 - i} | + * | 0 | 3 | s12,s13,s14,s14 | 0 | \sum_{i=0}^12 16^i * s_{11 - i} | + * | 0 | 4 | s16,s17,s18,s19 | 0 | \sum_{i=0}^16 16^i * s_{15 - i} | + * | 0 | 5 | s20,s21,s22,s23 | 0 | \sum_{i=0}^20 16^i * s_{19 - i} | + * | 0 | 6 | s24,s25,s26,s27 | 0 | \sum_{i=0}^24 16^i * s_{23 - i} | + * | 1 | 7 | s28,s29,s30,s31 | s_skew | \sum_{i=0}^28 16^i * s_{27 - i} | * * The value of the input scalar is equal to the following: * - * scalar = 2^16 * scalar_sum + 2^12 * s31 + 2^8 * s30 + 2^4 * s29 + s28 - s_skew - * We use a set equality check in `ecc_set_relation.hpp` to validate the above value maps to the correct input - * scalar for a given value of `pc`. + * scalar = 2^16 * scalar_sum + 2^12 * s28 + 2^8 * s29 + 2^4 * s30 + s31 - s_skew * - * The column `point_transition` is committed to by the Prover, we must constrain it is correctly computed (see - * `ecc_point_table_relation.cpp` for details) + * We use a multiset equality check in `ecc_set_relation.hpp` to validate the above value maps to the correct input + * scalar for a given value of `pc` (i.e., for a given non-trivial EC point). In other words, this constrains that the + * wNAF expansion is correct. Note that, from the perpsective of the Precomputed table, we only add the tuple (pc, + * round, slice) to the multiset when point_transition == 1. + * + * Furthermore, as the column `point_transition` is committed to by the Prover, we must constrain it is correctly + * computed (see also `ecc_point_table_relation.cpp` for a description of what the table looks like.) * * @tparam FF * @tparam AccumulatorTypes @@ -49,11 +52,11 @@ void ECCVMWnafRelationImpl::accumulate(ContainerOverSubrelations& accumulato using View = typename Accumulator::View; auto scalar_sum = View(in.precompute_scalar_sum); - auto scalar_sum_new = View(in.precompute_scalar_sum_shift); + auto scalar_sum_shift = View(in.precompute_scalar_sum_shift); auto q_transition = View(in.precompute_point_transition); auto round = View(in.precompute_round); auto round_shift = View(in.precompute_round_shift); - auto pc = View(in.precompute_pc); + auto pc = View(in.precompute_pc); // note that this is a _point-counter_. auto pc_shift = View(in.precompute_pc_shift); // precompute_select is a boolean column. We only evaluate the ecc_wnaf_relation and the ecc_point_table_relation if // `precompute_select=1` @@ -71,6 +74,9 @@ void ECCVMWnafRelationImpl::accumulate(ContainerOverSubrelations& accumulato acc += ((s - 1).sqr() - 1) * ((s - 2).sqr() - 1) * scaling_factor; }; + // given two 2-bit numbers `s0, `s1`, convert to a wNAF digit (in {-15, -13, ..., 13, 15}) via the formula: + // `2(4s0 + s1) - 15`. (Here, `4s0 + s1` represents the 4-bit number corresponding to the concatenation of `s0` and + // `s1`.) const auto convert_to_wnaf = [](const View& s0, const View& s1) { auto t = s0 + s0; t += t; @@ -80,7 +86,9 @@ void ECCVMWnafRelationImpl::accumulate(ContainerOverSubrelations& accumulato }; const auto scaled_transition = q_transition * scaling_factor; - const auto scaled_transition_is_zero = -scaled_transition + scaling_factor; + const auto scaled_transition_is_zero = + -scaled_transition + scaling_factor; // `scaling_factor * (1 - q_transition)`, i.e., is the scaling_factor if we + // are _not_ at a transition, else 0. /** * @brief Constrain each of our scalar slice chunks (s1, ..., s8) to be 2 bits. * Doing range checks this way vs permutation-based range check removes need to create sorted list + grand product @@ -125,9 +133,11 @@ void ECCVMWnafRelationImpl::accumulate(ContainerOverSubrelations& accumulato * i.e. next_scalar_sum - 2^{16} * current_scalar_sum - 2^12 * w_0 - 2^8 * w_1 - 2^4 * w_2 - w_3 = 0 * @note We only perform slice_consistency check when next row is processing the same scalar as the current row! * i.e. when q_transition = 0 - * TODO(@zac-williamson) Optimize WNAF use (#2224) + * Note(@zac-williamson): improve WNAF use (#2224) */ - auto row_slice = w0; + auto row_slice = w0; // row_slice will eventually contain the truncated scalar corresponding to the current row, + // which is 2^12 * w_0 + 2^8 * w_1 + 2^4 * w_2 + w_3. (If one just looks at the wNAF digits in + // this row, this is the resulting odd number. Note that it is not necessarily positive.) row_slice += row_slice; row_slice += row_slice; row_slice += row_slice; @@ -144,46 +154,70 @@ void ECCVMWnafRelationImpl::accumulate(ContainerOverSubrelations& accumulato row_slice += row_slice; row_slice += w3; auto sum_delta = scalar_sum * FF(1ULL << 16) + row_slice; - const auto check_sum = scalar_sum_new - sum_delta; + const auto check_sum = scalar_sum_shift - sum_delta; std::get<8>(accumulator) += precompute_select * check_sum * scaled_transition_is_zero; /** - * @brief Round transition logic. + * @brief Transition logic with `round` and `q_transition`. * Goal: `round` is an integer in [0, ... 7] that tracks how many slices we have processed for a given scalar. - * i.e. number of 4-bit WNAF slices processed = round * 4. - * We apply the following constraints: - * If q_transition = 0, round increments by 1 between rows. - * If q_transition = 1, round value at current row = 7 - * If q_transition = 1, round value at next row = 0 - * Question: is this sufficient? We don't actually range constrain `round` (expensive if we don't need to!). - * Let us analyze... - * 1. When `q_transition = 1`, we use a set membership check to map the tuple of (pc, scalar_sum) into a set. - * We compare this set with an equivalent set generated from the transcript columns. The sets must match. - * 2. Only case where, at row `i`, a Prover can set `round` to value > 7 is if `q_transition = 0` for all j > i. - * `precompute_pc` decrements by 1 when `q_transition` = 1 - * We can infer from 1, 2, that if `round > 7`, the resulting wnafs will map into a set at a value of `pc` that is - * greater than all valid msm pc values (assuming the set equivalence check on the scalar sums is satisfied). - * The resulting msm output of such a computation cannot be mapped into the set of msm outputs in - * the transcript columns (see relations in ecc_msm_relation.cpp). - * Conclusion: not applying a strict range-check on `round` does not affect soundness (TODO(@zac-williamson) - * validate this! #2225) + * i.e., the number of 4-bit WNAF slices processed = round * 4. + * We must ensure that `q_transition` is well-formed and that `round` is correctly constrained. Recall that `pc` + * stands for point-counter. + * + * For the former, we force the following: + * 1. When `q_transition == 1`, then `scalar_sum_shift == 0`, `round_shift == 0`, `round == 7`, and `pc_shift + * == pc - 1`. + * 2. When `q_transition == 0`, then `round_shift - round == 1` and `pc_shift == pc` + * + * For the latter: note that we don't actually range-constrain `round` (expensive if we don't need to!). We + * nonetheless can correctly constrain `round`, because of the multiset checks. There are two multiset equality + * checks that we perform that implicate the wNAF relation: + * 1. (pc, msm_round, wnaf_slice) + * 2. (pc, P.x, P.y, scalar-multiplier) + * The first is used to communicate with the MSM table, to validate that the slice * point values the MSM tables use + * are indeed what we have precomputed. The second facilitates communication with the Transcript table, to ensure + * that the wNAF expansion of the scalar is indeed correct. Moreover, the second is only "sent" to the multiset when + * `q_transition == 1`. (It is helpful to recall that `pc` is monotonic: one per each point involved in a + * non-trivial scalar multiplication.) + * + * Here is the logic. We must ensure that `round` can never be set to a value > 7. If this were possible at row `i`, + * then `q_transition == 0` for all subsequent rows by the incrementing logic. There are (at least) two problems. + * + * 1. The implicit MSM round (accounted for in (1)) is between `4 * round` and `4 * round + 3` (in fact `4 * + * round + 4` iff we are at a skew). As the `round` must increment, this means that the `msm_round` will be + * larger than 32, which can't happen due to the internal constraints in the MSM table. In particular, the multiset + * equality check will fail, as the MSM tables can never send an entry with a round larger than 32. + * + * 2. This forces `precompute_pc` to be constant from here on out. This will violate the multiset equalities both + * of terms (1) _and_ (2). For the former, we will write too many entries with the given `pc`. (However, we've + * already shown how this multset equality fails due to `round`.) More importantly, for the latter, we will _never_ + * "send" the tuple (pc, P.x, P.x, scalar-multiplier) to the multiset, for this value of `pc` and all potentially + * subsequent values. We explicate this latter failure. The transcript table will certainly fill _some_ values in + * for (pc, P.x, P.y, scalar-multipler) (at least with correct pc and scalar-multiplier values), which will cause + * the multiset equality check to fail. + * + * As always, we are relying on the monotonicity of the `pc` in these arguments. + * */ - // We combine checks 0, 1 into a single relation + + // We combine two checks into a single relation // q_transition * (round - 7) + (-q_transition + 1) * (round_shift - round - 1) // => q_transition * (round - 7 - round_shift + round + 1) + (round_shift - round - 1) // => q_transition * (2 * round - round_shift - 6) + (round_shift - round - 1) const auto round_check = round_shift - round - 1; - std::get<9>(accumulator) += precompute_select * scaled_transition * ((round - round_check - 7) + round_check); - std::get<10>(accumulator) += precompute_select * scaled_transition * round_shift; + std::get<9>(accumulator) += + precompute_select * (scaled_transition * (round - round_check - 7) + scaling_factor * round_check); + std::get<10>(accumulator) += + precompute_select * scaled_transition * round_shift; // at a transition, next round == 0 /** - * @brief Scalar transition checks. + * @brief Scalar transition/PC checks. * 1: if q_transition = 1, scalar_sum_new = 0 * 2: if q_transition = 0, pc at next row = pc at current row * 3: if q_transition = 1, pc at next row = pc at current row - 1 (decrements by 1) * (we combine 2 and 3 into a single relation) */ - std::get<11>(accumulator) += precompute_select * scalar_sum_new * scaled_transition; + std::get<11>(accumulator) += precompute_select * scaled_transition * scalar_sum_shift; // (2, 3 combined): q_transition * (pc - pc_shift - 1) + (-q_transition + 1) * (pc_shift - pc) // => q_transition * (-2 * (pc_shift - pc) - 1) + (pc_shift - pc) const auto pc_delta = pc_shift - pc; @@ -201,6 +235,8 @@ void ECCVMWnafRelationImpl::accumulate(ContainerOverSubrelations& accumulato */ std::get<13>(accumulator) += precompute_select * (precompute_skew * (precompute_skew - 7)) * scaling_factor; + // Set slices (a.k.a. compressed digits), pc, and round all to zero when `precompute_select == 0`. + // (this is for one of the multiset equality checks.) const auto precompute_select_zero = (-precompute_select + 1) * scaling_factor; std::get<14>(accumulator) += precompute_select_zero * (w0 + 15); std::get<15>(accumulator) += precompute_select_zero * (w1 + 15); @@ -210,7 +246,7 @@ void ECCVMWnafRelationImpl::accumulate(ContainerOverSubrelations& accumulato std::get<18>(accumulator) += precompute_select_zero * round; std::get<19>(accumulator) += precompute_select_zero * pc; - // TODO(@zac-williamson #2226) + // Note(@zac-williamson #2226) // if precompute_select = 0, validate pc, round, slice values are all zero // If we do this we can reduce the degree of the set equivalence relations // (currently when checking pc/round/wnaf tuples from WNAF columns match those from MSM columns, From 59d8f94ed43ea4f206f2a628fb8f5c1aecc86254 Mon Sep 17 00:00:00 2001 From: Jonathan Hao Date: Mon, 8 Sep 2025 12:51:33 +0100 Subject: [PATCH 4/6] feat: add option to choose between tmpfs and $HOME/tmp in docker_isolate (#16854) Default behavior uses tmpfs mounted at /tmp with configurable size. Set USE_HOME_TMP=1 to mount $HOME/tmp instead of tmpfs. This helps to test the low meomry mode as it needs more storage to map memory to files. --- ci3/docker_isolate | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/ci3/docker_isolate b/ci3/docker_isolate index 1995f3fd533e..fc2d5b395aa2 100755 --- a/ci3/docker_isolate +++ b/ci3/docker_isolate @@ -7,6 +7,7 @@ cmd=$1 export CPUS=${CPUS:-2} export MEM=${MEM:-$((CPUS * 4))g} export TMPFS_SIZE=${TMPFS_SIZE:-1g} +export USE_HOME_TMP=${USE_HOME_TMP:-0} function cleanup { if [ -n "${cid:-}" ]; then @@ -41,6 +42,16 @@ done network_arg="--net=none" [ "${NET:-0}" -eq 1 ] && network_arg="" +# Set up tmp mount - default to tmpfs, optionally use $HOME/tmp +if [ "${USE_HOME_TMP}" -eq 1 ]; then + # Ensure $HOME/tmp exists + mkdir -p $HOME/tmp + tmp_mount="-v$HOME/tmp:/tmp:z" +else + # Default: use tmpfs with specified size + tmp_mount="--mount type=tmpfs,target=/tmp,tmpfs-size=$TMPFS_SIZE" +fi + # Launch the container in the background. # Don't launch in the foreground or you can't process SIGINT/SIGTERM. # Don't use & as we want to block, and be sure it starts before processing any signals. @@ -55,7 +66,7 @@ cid=$(docker run -d \ --pid=host \ --user $(id -u):$(id -g) \ -v$HOME:$HOME \ - --mount type=tmpfs,target=/tmp,tmpfs-size=$TMPFS_SIZE \ + $tmp_mount \ --workdir $PWD \ -e HOME \ -e VERBOSE \ @@ -64,6 +75,7 @@ cid=$(docker run -d \ -e CPUS \ -e MEM \ -e TMPFS_SIZE \ + -e USE_HOME_TMP \ "${arg_env_vars[@]}" \ aztecprotocol/build:3.0 \ /bin/bash -c "$cmd") From e37e9d7ff07d30ab2380448798c9d0eac919e04d Mon Sep 17 00:00:00 2001 From: ludamad Date: Mon, 8 Sep 2025 11:36:39 -0400 Subject: [PATCH 5/6] refactor(bb-prover): don't use --write_vk in yarn-project (#16834) This is a big current performance sink. Saves over a minute in prover/full Closes #15043 --- .../mock-protocol-circuits/bootstrap.sh | 2 +- .../noir-protocol-circuits/bootstrap.sh | 77 ++++++++++--------- yarn-project/bb-prover/src/bb/execute.ts | 10 ++- .../bb-prover/src/prover/server/bb_prover.ts | 1 + .../ivc-integration/src/prove_native.ts | 21 ++++- .../src/scripts/generate_declaration_files.ts | 8 -- yarn-project/ivc-integration/src/witgen.ts | 32 +++++--- .../src/artifacts/vks/client.ts | 26 +++---- .../src/artifacts/vks/server.ts | 54 ++++++------- .../generate_client_artifacts_helper.ts | 6 +- .../src/scripts/generate_declaration_files.ts | 8 -- .../generate_private_kernel_reset_data.ts | 6 +- .../src/scripts/generate_vk_hashes.ts | 22 +++--- .../src/utils/vk_json.ts | 22 +++--- yarn-project/stdlib/src/noir/index.ts | 9 +++ 15 files changed, 169 insertions(+), 135 deletions(-) diff --git a/noir-projects/mock-protocol-circuits/bootstrap.sh b/noir-projects/mock-protocol-circuits/bootstrap.sh index b15177ce1692..c1d0c1eb0904 100755 --- a/noir-projects/mock-protocol-circuits/bootstrap.sh +++ b/noir-projects/mock-protocol-circuits/bootstrap.sh @@ -1,4 +1,4 @@ #!/usr/bin/env bash source $(git rev-parse --show-toplevel)/ci3/source -../noir-protocol-circuits/bootstrap.sh "${1:-}" $(pwd) +NOIR_PROTOCOL_CIRCUITS_WORKING_DIR="$(pwd)" ../noir-protocol-circuits/bootstrap.sh "${1:-}" diff --git a/noir-projects/noir-protocol-circuits/bootstrap.sh b/noir-projects/noir-protocol-circuits/bootstrap.sh index 9c5f44f42a3e..d359670ece45 100755 --- a/noir-projects/noir-protocol-circuits/bootstrap.sh +++ b/noir-projects/noir-protocol-circuits/bootstrap.sh @@ -3,10 +3,9 @@ source $(git rev-parse --show-toplevel)/ci3/source_bootstrap cmd=${1:-} -working_dir=${2:-} # entrypoint for mock circuits -if [ -n "$working_dir" ]; then - cd "$working_dir" +if [ -n "${NOIR_PROTOCOL_CIRCUITS_WORKING_DIR:-}" ]; then + cd "$NOIR_PROTOCOL_CIRCUITS_WORKING_DIR" fi export RAYON_NUM_THREADS=${RAYON_NUM_THREADS:-16} @@ -52,14 +51,13 @@ function compile { local name=${dir//-/_} local filename="$name.json" local json_path="./target/$filename" - local program_hash hash bytecode_hash vk vk_fields # We get the monomorphized program hash from nargo. If this changes, we have to recompile. local program_hash_cmd="$NARGO check --package $name --silence-warnings --show-program-hash | cut -d' ' -f2" # echo_stderr $program_hash_cmd - program_hash=$(dump_fail "$program_hash_cmd") + local program_hash=$(dump_fail "$program_hash_cmd") echo_stderr "Hash preimage: $NOIR_HASH-$program_hash" - hash=$(hash_str "$NOIR_HASH-$program_hash" $(cache_content_hash "^noir-projects/noir-protocol-circuits/bootstrap.sh")) + local hash=$(hash_str "$NOIR_HASH-$program_hash" $(cache_content_hash "^noir-projects/noir-protocol-circuits/bootstrap.sh")) if ! cache_download circuit-$hash.tar.gz 1>&2; then SECONDS=0 @@ -79,48 +77,48 @@ function compile { cache_upload circuit-$hash.tar.gz $json_path &> /dev/null fi - if echo "$name" | grep -qE "${private_tail_regex}"; then - local proto="client_ivc_tail" - # We still need the standalone IVC vk. We also create the final IVC vk from the tail (specifically, the number of public inputs is used from it). - local write_vk_cmd="write_vk --scheme client_ivc --verifier_type standalone" - elif echo "$name" | grep -qE "${ivc_regex}"; then - local proto="client_ivc" - local write_vk_cmd="write_vk --scheme client_ivc --verifier_type standalone" - elif echo "$name" | grep -qE "${rollup_honk_regex}"; then - local proto="ultra_rollup_honk" - local write_vk_cmd="write_vk --scheme ultra_honk --ipa_accumulation" - elif echo "$name" | grep -qE "rollup_root"; then - local proto="ultra_keccak_honk" - # the root rollup does not need to inject a fake ipa claim - # and does not need to inject a default agg obj, so no -h flag - local write_vk_cmd="write_vk --scheme ultra_honk --oracle_hash keccak" - else - local proto="ultra_honk" - local write_vk_cmd="write_vk --scheme ultra_honk" - fi # No vks needed for simulated circuits. [[ "$name" == *"simulated"* ]] && return - # TODO: Change this to add verification_key to original json, like contracts does. - # Will require changing TS code downstream. - bytecode_hash=$(jq -r '.bytecode' $json_path | sha256sum | tr -d ' -') - hash=$(hash_str "$BB_HASH-$bytecode_hash-$proto-$(cache_content_hash "^noir-projects/noir-protocol-circuits/bootstrap.sh")") + # Add verification key to original json, similar to contracts. + # This adds keyAsBytes and keyAsFields to the JSON artifact. + local bytecode_hash=$(jq -r '.bytecode' $json_path | sha256sum | tr -d ' -') + local hash=$(hash_str "$BB_HASH-$bytecode_hash-$name-3") + local key_path="$key_dir/$name.vk.data.json" if ! cache_download vk-$hash.tar.gz 1>&2; then - local key_path="$key_dir/$name.vk.data.json" - echo_stderr "Generating vk for function: $name..." SECONDS=0 - outdir=$(mktemp -d) + local outdir=$(mktemp -d) trap "rm -rf $outdir" EXIT - local vk_cmd="jq -r '.bytecode' $json_path | base64 -d | gunzip | $BB $write_vk_cmd -b - -o $outdir" - echo_stderr $vk_cmd - dump_fail "$vk_cmd" + function write_vk { + if echo "$name" | grep -qE "${private_tail_regex}"; then + # We still need the standalone IVC vk. We also create the final IVC vk from the tail (specifically, the number of public inputs is used from it). + denoise "$BB write_vk --scheme client_ivc --verifier_type standalone -b - -o $outdir" + elif echo "$name" | grep -qE "${ivc_regex}"; then + denoise "$BB write_vk --scheme client_ivc --verifier_type standalone -b - -o $outdir" + elif echo "$name" | grep -qE "${rollup_honk_regex}"; then + denoise "$BB write_vk --scheme ultra_honk --ipa_accumulation -b - -o $outdir" + elif echo "$name" | grep -qE "rollup_root"; then + denoise "$BB write_vk --scheme ultra_honk --oracle_hash keccak -b - -o $outdir" + else + denoise "$BB write_vk --scheme ultra_honk -b - -o $outdir" + fi + } + echo_stderr "Generating vk for function: $name..." + jq -r '.bytecode' $json_path | base64 -d | gunzip | write_vk vk_bytes=$(cat $outdir/vk | xxd -p -c 0) # Split the hex-encoded vk bytes into fields boundaries (but still hex-encoded), first making 64-character lines and then encoding as JSON. # This used to be done by barretenberg itself, but with serialization now always being in field elements we can do it outside of bb. vk_fields=$(echo "$vk_bytes" | hex_to_fields_json) - # echo_stderr $vkf_cmd - jq -n --arg vk "$vk_bytes" --argjson vkf "$vk_fields" '{keyAsBytes: $vk, keyAsFields: $vkf}' > $key_path + if [ -f $outdir/vk_hash ]; then + # not created in civc + vk_hash=$(cat $outdir/vk_hash | xxd -p -c 0) + else + vk_hash="" + fi + jq -n --arg vk "$vk_bytes" --argjson vk_fields "$vk_fields" --arg vk_hash "$vk_hash" \ + '{verificationKey: {bytes: $vk, fields: $vk_fields, hash: $vk_hash}}' > $key_path echo_stderr "Key output at: $key_path (${SECONDS}s)" + if echo "$name" | grep -qE "rollup_root"; then # If we are a rollup root circuit, we also need to generate the solidity verifier. local verifier_path="$key_dir/${name}_verifier.sol" @@ -145,6 +143,11 @@ function compile { cache_upload vk-$hash.tar.gz $key_path &> /dev/null fi fi + # VK was downloaded from cache, update the JSON artifact with VK information + jq -s '.[0] * .[1]' "$json_path" "$key_path" > "${json_path}.tmp" + mv "${json_path}.tmp" "$json_path" + # remove temporary json file + rm $key_path } export -f hex_to_fields_json compile diff --git a/yarn-project/bb-prover/src/bb/execute.ts b/yarn-project/bb-prover/src/bb/execute.ts index 6e002785254a..06661d25c904 100644 --- a/yarn-project/bb-prover/src/bb/execute.ts +++ b/yarn-project/bb-prover/src/bb/execute.ts @@ -214,6 +214,7 @@ export async function generateProof( workingDirectory: string, circuitName: string, bytecode: Buffer, + verificationKey: Buffer, inputWitnessFile: string, flavor: UltraHonkFlavor, log: Logger, @@ -227,6 +228,7 @@ export async function generateProof( // The bytecode is written to e.g. /workingDirectory/BaseParityArtifact-bytecode const bytecodePath = `${workingDirectory}/${circuitName}-bytecode`; + const vkPath = `${workingDirectory}/${circuitName}-vk`; // The proof is written to e.g. /workingDirectory/ultra_honk/proof const outputPath = `${workingDirectory}`; @@ -240,16 +242,16 @@ export async function generateProof( } try { - // Write the bytecode to the working directory - await fs.writeFile(bytecodePath, bytecode); - // TODO(#15043): Avoid write_vk flag here. + // Write the bytecode and vk to the working directory + await Promise.all([fs.writeFile(bytecodePath, bytecode), fs.writeFile(vkPath, verificationKey)]); const args = getArgs(flavor).concat([ '--disable_zk', - '--write_vk', '-o', outputPath, '-b', bytecodePath, + '-k', + vkPath, '-w', inputWitnessFile, '-v', diff --git a/yarn-project/bb-prover/src/prover/server/bb_prover.ts b/yarn-project/bb-prover/src/prover/server/bb_prover.ts index 9ae9a401fbcd..ab18aecd1a81 100644 --- a/yarn-project/bb-prover/src/prover/server/bb_prover.ts +++ b/yarn-project/bb-prover/src/prover/server/bb_prover.ts @@ -467,6 +467,7 @@ export class BBNativeRollupProver implements ServerCircuitProver { workingDirectory, circuitType, Buffer.from(artifact.bytecode, 'base64'), + this.getVerificationKeyDataForCircuit(circuitType).keyAsBytes, outputWitnessFile, getUltraHonkFlavorForCircuit(circuitType), logger, diff --git a/yarn-project/ivc-integration/src/prove_native.ts b/yarn-project/ivc-integration/src/prove_native.ts index c61bb510095c..f48de7ab8acc 100644 --- a/yarn-project/ivc-integration/src/prove_native.ts +++ b/yarn-project/ivc-integration/src/prove_native.ts @@ -29,12 +29,27 @@ import { makeProofAndVerificationKey } from '@aztec/stdlib/interfaces/server'; import type { NoirCompiledCircuit } from '@aztec/stdlib/noir'; import type { ClientIvcProof, Proof } from '@aztec/stdlib/proofs'; import { enhanceProofWithPiValidationFlag } from '@aztec/stdlib/rollup'; -import { VerificationKeyAsFields, type VerificationKeyData } from '@aztec/stdlib/vks'; +import { VerificationKeyAsFields, VerificationKeyData } from '@aztec/stdlib/vks'; import * as fs from 'fs/promises'; import { Encoder } from 'msgpackr'; import * as path from 'path'; +/** + * Converts verification key bytes from a compiled circuit to VerificationKeyData format + * @param vkBytes - The verification key bytes from the circuit + * @returns The verification key data + */ +async function convertVkBytesToVkData(vkBytes: Buffer): Promise { + // Convert binary to field elements (32 bytes per field) + const numFields = vkBytes.length / Fr.SIZE_IN_BYTES; + const reader = BufferReader.asReader(vkBytes); + const fields = reader.readArray(numFields, Fr); + + const vkAsFields = await VerificationKeyAsFields.fromKey(fields); + return new VerificationKeyData(vkAsFields, vkBytes); +} + export async function proveClientIVC( bbBinaryPath: string, bbWorkingDirectory: string, @@ -120,11 +135,13 @@ async function proveRollupCircuit = { - PrivateKernelInitArtifact: keyJsonToVKData(PrivateKernelInitVkJson), - PrivateKernelInnerArtifact: keyJsonToVKData(PrivateKernelInnerVkJson), - PrivateKernelTailArtifact: keyJsonToVKData(PrivateKernelTailVkJson), - PrivateKernelTailToPublicArtifact: keyJsonToVKData(PrivateKernelTailToPublicVkJson), - HidingKernelToRollup: keyJsonToVKData(HidingKernelToRollupVkJson), - HidingKernelToPublic: keyJsonToVKData(HidingKernelToPublicVkJson), + PrivateKernelInitArtifact: abiToVKData(PrivateKernelInitJson), + PrivateKernelInnerArtifact: abiToVKData(PrivateKernelInnerJson), + PrivateKernelTailArtifact: abiToVKData(PrivateKernelTailJson), + PrivateKernelTailToPublicArtifact: abiToVKData(PrivateKernelTailToPublicJson), + HidingKernelToRollup: abiToVKData(HidingKernelToRollupJson), + HidingKernelToPublic: abiToVKData(HidingKernelToPublicJson), ...PrivateKernelResetVks, }; diff --git a/yarn-project/noir-protocol-circuits-types/src/artifacts/vks/server.ts b/yarn-project/noir-protocol-circuits-types/src/artifacts/vks/server.ts index c113d5fd46b5..bb7afbff723d 100644 --- a/yarn-project/noir-protocol-circuits-types/src/artifacts/vks/server.ts +++ b/yarn-project/noir-protocol-circuits-types/src/artifacts/vks/server.ts @@ -21,37 +21,37 @@ import { } from '@aztec/constants'; import { VerificationKeyData } from '@aztec/stdlib/vks'; -import BaseParityVkJson from '../../../artifacts/keys/parity_base.vk.data.json' with { type: 'json' }; -import RootParityVkJson from '../../../artifacts/keys/parity_root.vk.data.json' with { type: 'json' }; -import PrivateTubeVkJson from '../../../artifacts/keys/private_tube.vk.data.json' with { type: 'json' }; -import PublicTubeVkJson from '../../../artifacts/keys/public_tube.vk.data.json' with { type: 'json' }; -import PrivateBaseRollupVkJson from '../../../artifacts/keys/rollup_base_private.vk.data.json' with { type: 'json' }; -import PublicBaseRollupVkJson from '../../../artifacts/keys/rollup_base_public.vk.data.json' with { type: 'json' }; -import BlockMergeRollupVkJson from '../../../artifacts/keys/rollup_block_merge.vk.data.json' with { type: 'json' }; -import BlockRootRollupVkJson from '../../../artifacts/keys/rollup_block_root.vk.data.json' with { type: 'json' }; -import EmptyBlockRootRollupVkJson from '../../../artifacts/keys/rollup_block_root_empty.vk.data.json' with { type: 'json' }; -import PaddingBlockRootRollupVkJson from '../../../artifacts/keys/rollup_block_root_padding.vk.data.json' with { type: 'json' }; -import SingleTxBlockRootRollupVkJson from '../../../artifacts/keys/rollup_block_root_single_tx.vk.data.json' with { type: 'json' }; -import MergeRollupVkJson from '../../../artifacts/keys/rollup_merge.vk.data.json' with { type: 'json' }; -import RootRollupVkJson from '../../../artifacts/keys/rollup_root.vk.data.json' with { type: 'json' }; +import BaseParity from '../../../artifacts/parity_base.json' with { type: 'json' }; +import RootParity from '../../../artifacts/parity_root.json' with { type: 'json' }; +import PrivateTube from '../../../artifacts/private_tube.json' with { type: 'json' }; +import PublicTube from '../../../artifacts/public_tube.json' with { type: 'json' }; +import PrivateBaseRollup from '../../../artifacts/rollup_base_private.json' with { type: 'json' }; +import PublicBaseRollup from '../../../artifacts/rollup_base_public.json' with { type: 'json' }; +import BlockMergeRollup from '../../../artifacts/rollup_block_merge.json' with { type: 'json' }; +import BlockRootRollup from '../../../artifacts/rollup_block_root.json' with { type: 'json' }; +import EmptyBlockRootRollup from '../../../artifacts/rollup_block_root_empty.json' with { type: 'json' }; +import PaddingBlockRootRollup from '../../../artifacts/rollup_block_root_padding.json' with { type: 'json' }; +import SingleTxBlockRootRollup from '../../../artifacts/rollup_block_root_single_tx.json' with { type: 'json' }; +import MergeRollup from '../../../artifacts/rollup_merge.json' with { type: 'json' }; +import RootRollup from '../../../artifacts/rollup_root.json' with { type: 'json' }; import { PrivateKernelResetVkIndexes } from '../../private_kernel_reset_vks.js'; -import { keyJsonToVKData } from '../../utils/vk_json.js'; +import { abiToVKData } from '../../utils/vk_json.js'; import type { ProtocolCircuitName, ServerProtocolCircuitName } from '../types.js'; export const ServerCircuitVks: Record = { - BaseParityArtifact: keyJsonToVKData(BaseParityVkJson), - RootParityArtifact: keyJsonToVKData(RootParityVkJson), - PrivateBaseRollupArtifact: keyJsonToVKData(PrivateBaseRollupVkJson), - PublicBaseRollupArtifact: keyJsonToVKData(PublicBaseRollupVkJson), - MergeRollupArtifact: keyJsonToVKData(MergeRollupVkJson), - BlockRootRollupArtifact: keyJsonToVKData(BlockRootRollupVkJson), - SingleTxBlockRootRollupArtifact: keyJsonToVKData(SingleTxBlockRootRollupVkJson), - EmptyBlockRootRollupArtifact: keyJsonToVKData(EmptyBlockRootRollupVkJson), - PaddingBlockRootRollupArtifact: keyJsonToVKData(PaddingBlockRootRollupVkJson), - BlockMergeRollupArtifact: keyJsonToVKData(BlockMergeRollupVkJson), - RootRollupArtifact: keyJsonToVKData(RootRollupVkJson), - PrivateTube: keyJsonToVKData(PrivateTubeVkJson), - PublicTube: keyJsonToVKData(PublicTubeVkJson), + BaseParityArtifact: abiToVKData(BaseParity), + RootParityArtifact: abiToVKData(RootParity), + PrivateBaseRollupArtifact: abiToVKData(PrivateBaseRollup), + PublicBaseRollupArtifact: abiToVKData(PublicBaseRollup), + MergeRollupArtifact: abiToVKData(MergeRollup), + BlockRootRollupArtifact: abiToVKData(BlockRootRollup), + SingleTxBlockRootRollupArtifact: abiToVKData(SingleTxBlockRootRollup), + EmptyBlockRootRollupArtifact: abiToVKData(EmptyBlockRootRollup), + PaddingBlockRootRollupArtifact: abiToVKData(PaddingBlockRootRollup), + BlockMergeRollupArtifact: abiToVKData(BlockMergeRollup), + RootRollupArtifact: abiToVKData(RootRollup), + PrivateTube: abiToVKData(PrivateTube), + PublicTube: abiToVKData(PublicTube), }; export const ProtocolCircuitVkIndexes: Record = { diff --git a/yarn-project/noir-protocol-circuits-types/src/scripts/generate_client_artifacts_helper.ts b/yarn-project/noir-protocol-circuits-types/src/scripts/generate_client_artifacts_helper.ts index f4e5e5ed6720..44a13761bfc8 100644 --- a/yarn-project/noir-protocol-circuits-types/src/scripts/generate_client_artifacts_helper.ts +++ b/yarn-project/noir-protocol-circuits-types/src/scripts/generate_client_artifacts_helper.ts @@ -26,7 +26,7 @@ function generateImports() { import type { NoirCompiledCircuit, NoirCompiledCircuitWithName } from '@aztec/stdlib/noir'; import type { ClientProtocolArtifact } from './artifacts/types.js'; import { VerificationKeyData } from '@aztec/stdlib/vks'; - import { keyJsonToVKData } from './utils/vk_json.js'; + import { abiToVKData } from './utils/vk_json.js'; `; } @@ -88,8 +88,8 @@ function generateVkImportFunction() { // https://caniuse.com/mdn-javascript_statements_import_import_attributes_type_json // In the meantime, this lazy import is INCOMPATIBLE WITH NODEJS return `case '${artifactName}': { - const { default: keyData } = await import("../artifacts/keys/${artifactName}.vk.data.json"); - return keyJsonToVKData(keyData); + const { default: keyData } = await import("../artifacts/${artifactName}.json"); + return abiToVKData(keyData); }`; }); diff --git a/yarn-project/noir-protocol-circuits-types/src/scripts/generate_declaration_files.ts b/yarn-project/noir-protocol-circuits-types/src/scripts/generate_declaration_files.ts index 2534fab8e71b..3d57a06b03fb 100644 --- a/yarn-project/noir-protocol-circuits-types/src/scripts/generate_declaration_files.ts +++ b/yarn-project/noir-protocol-circuits-types/src/scripts/generate_declaration_files.ts @@ -9,11 +9,6 @@ const circuit: NoirCompiledCircuit; export = circuit; `; -const vk = `\ -const vk: { keyAsBytes: string; keyAsFields: string[], vkHash: string }; -export = vk; -`; - async function generateDeclarationFor(target: string, content: string) { const files = await readdir(target); for (const file of files) { @@ -28,6 +23,3 @@ async function generateDeclarationFor(target: string, content: string) { // Generate declaration files for contracts await generateDeclarationFor(fileURLToPath(new URL('../../artifacts', import.meta.url).href), contract); - -// Generate declaration files for vks -await generateDeclarationFor(fileURLToPath(new URL('../../artifacts/keys', import.meta.url).href), vk); diff --git a/yarn-project/noir-protocol-circuits-types/src/scripts/generate_private_kernel_reset_data.ts b/yarn-project/noir-protocol-circuits-types/src/scripts/generate_private_kernel_reset_data.ts index bf3230187824..59f9a3d0b47f 100644 --- a/yarn-project/noir-protocol-circuits-types/src/scripts/generate_private_kernel_reset_data.ts +++ b/yarn-project/noir-protocol-circuits-types/src/scripts/generate_private_kernel_reset_data.ts @@ -41,7 +41,7 @@ function generateTypeFileImports() { function generateVkFileImports() { return ` import type { VerificationKeyData } from '@aztec/stdlib/vks'; - import { keyJsonToVKData } from './utils/vk_json.js'; + import { abiToVKData } from './utils/vk_json.js'; import type { PrivateResetArtifact } from './private_kernel_reset_types.js'; `; @@ -89,7 +89,7 @@ function generateVksImports(importTags: string[]) { return importTags .map( tag => - `import PrivateKernelResetVkJson${tag} from '../artifacts/keys/private_kernel_reset${tag}.vk.data.json' with { type: 'json' };`, + `import PrivateKernelResetJson${tag} from '../artifacts/private_kernel_reset${tag}.json' with { type: 'json' };`, ) .join('\n'); } @@ -127,7 +127,7 @@ function generateSimulatedArtifacts(resetVariantTags: string[], importTags: stri function generateVks(resetVariantTags: string[], importTags: string[]) { const artifacts = resetVariantTags.map( - (tag, i) => `${getArtifactName(tag)}: keyJsonToVKData(PrivateKernelResetVkJson${importTags[i]}),`, + (tag, i) => `${getArtifactName(tag)}: abiToVKData(PrivateKernelResetJson${importTags[i]}),`, ); return ` export const PrivateKernelResetVks: Record = { diff --git a/yarn-project/noir-protocol-circuits-types/src/scripts/generate_vk_hashes.ts b/yarn-project/noir-protocol-circuits-types/src/scripts/generate_vk_hashes.ts index e420197187bb..53be9f9d3df7 100644 --- a/yarn-project/noir-protocol-circuits-types/src/scripts/generate_vk_hashes.ts +++ b/yarn-project/noir-protocol-circuits-types/src/scripts/generate_vk_hashes.ts @@ -15,12 +15,15 @@ function resolveRelativePath(relativePath: string) { async function generateFakeTubeVK(name: string) { const tubeVK = VerificationKeyData.makeFakeRollupHonk(); - const tubeVKPath = resolveRelativePath(`../../artifacts/keys/${name}.vk.data.json`); + const tubeVKPath = resolveRelativePath(`../../artifacts/${name}.json`); await fs.writeFile( tubeVKPath, JSON.stringify({ - keyAsBytes: tubeVK.keyAsBytes.toString('hex'), - keyAsFields: tubeVK.keyAsFields.key.map((field: Fr) => field.toString()), + verificationKey: { + bytes: tubeVK.keyAsBytes.toString('hex'), + fields: tubeVK.keyAsFields.key.map((field: Fr) => field.toString()), + hash: tubeVK.keyAsFields.hash.toString(), + }, }), ); } @@ -31,15 +34,16 @@ const main = async () => { await generateFakeTubeVK('private_tube'); await generateFakeTubeVK('public_tube'); - const files = await fs.readdir(resolveRelativePath('../../artifacts/keys')); + const files = await fs.readdir(resolveRelativePath('../../artifacts')); for (const fileName of files) { - if (fileName.endsWith('.vk.data.json')) { - const keyPath = join(resolveRelativePath(`../../artifacts/keys`), fileName); + if (fileName.endsWith('.json')) { + const keyPath = join(resolveRelativePath(`../../artifacts`), fileName); const content = JSON.parse(await fs.readFile(keyPath, 'utf-8')); - if (!content.vkHash) { - const { keyAsFields } = content; + // Check if this has verificationKey field (from noir-protocol-circuits) + if (content.verificationKey && !content.verificationKey.hash) { + const { fields } = content.verificationKey; - content.vkHash = (await hashVK(keyAsFields.map((str: string) => Fr.fromHexString(str)))).toString(); + content.verificationKey.hash = (await hashVK(fields.map((str: string) => Fr.fromHexString(str)))).toString(); await fs.writeFile(keyPath, JSON.stringify(content, null, 2)); } } diff --git a/yarn-project/noir-protocol-circuits-types/src/utils/vk_json.ts b/yarn-project/noir-protocol-circuits-types/src/utils/vk_json.ts index bafb0aa27bd6..16f58fd9b60e 100644 --- a/yarn-project/noir-protocol-circuits-types/src/utils/vk_json.ts +++ b/yarn-project/noir-protocol-circuits-types/src/utils/vk_json.ts @@ -1,19 +1,23 @@ import { Fr } from '@aztec/foundation/fields'; +import type { NoirCompiledCircuit } from '@aztec/stdlib/noir'; import { VerificationKeyAsFields, VerificationKeyData } from '@aztec/stdlib/vks'; -interface VkJson { - keyAsBytes: string; - keyAsFields: string[]; - vkHash: string; +// Type for VK-only JSON files +interface VkOnlyJson { + verificationKey: { + bytes: string; + fields: string[]; + hash: string; + }; } -export function keyJsonToVKData(json: VkJson): VerificationKeyData { - const { keyAsBytes, keyAsFields, vkHash } = json; +export function abiToVKData(json: NoirCompiledCircuit | VkOnlyJson): VerificationKeyData { + const { verificationKey } = json; return new VerificationKeyData( new VerificationKeyAsFields( - keyAsFields.map((str: string) => Fr.fromHexString(str)), - Fr.fromHexString(vkHash), + verificationKey.fields.map((str: string) => Fr.fromHexString(str)), + Fr.fromHexString(verificationKey.hash), ), - Buffer.from(keyAsBytes, 'hex'), + Buffer.from(verificationKey.bytes, 'hex'), ); } diff --git a/yarn-project/stdlib/src/noir/index.ts b/yarn-project/stdlib/src/noir/index.ts index 0787e17c5b2e..2fd8467f26be 100644 --- a/yarn-project/stdlib/src/noir/index.ts +++ b/yarn-project/stdlib/src/noir/index.ts @@ -87,6 +87,15 @@ export interface NoirCompiledCircuit { abi: NoirFunctionAbi; /** The bytecode of the circuit in base64. */ bytecode: string; + /** The verification key of the circuit. */ + verificationKey: { + /** hex-encoded binary */ + bytes: string; + /** list of hex-encoded fields */ + fields: string[]; + /** The hex-encoded hash. */ + hash: string; + }; /** The debug information, compressed and base64 encoded. */ debug_symbols: string; /** The map of file ID to the source code and path of the file. */ From e9cb0945f0df09d42408305dc04a9084ea4aa47e Mon Sep 17 00:00:00 2001 From: ledwards2225 <98505400+ledwards2225@users.noreply.github.com> Date: Mon, 8 Sep 2025 10:30:58 -0700 Subject: [PATCH 6/6] chore: cycle group cleanup (#16830) More cycle_group cleanup: - Simplify type aliasing based on the fact that cycle_group is only used to represent grumpkin points - Introduce shared method for nearly identical `unconditional_add`/`subtract` (fixes small performance bug) - Add `const` and eliminate use of `auto` in a number of places - Temporary ASSERT protecting against use of mixed-sized scalars in `batch_mul` which is ostensibly supported but broken, plus a test. (Will likely remove the option for mixed sized scalars in a follow on since its not working and not currently used) - Remove unnecessary duplicate computation of `scalar_slices` in `_variable_base_batch_mul_internal` (Note: this removes ~12 gates per scalar and is the reason for the VK change). --- ...test_civc_standalone_vks_havent_changed.sh | 2 +- .../eccvm_recursive_verifier.test.cpp | 6 + .../stdlib/primitives/group/cycle_group.cpp | 201 +++++++----------- .../stdlib/primitives/group/cycle_group.hpp | 37 ++-- .../primitives/group/cycle_group.test.cpp | 30 +++ 5 files changed, 129 insertions(+), 147 deletions(-) diff --git a/barretenberg/cpp/scripts/test_civc_standalone_vks_havent_changed.sh b/barretenberg/cpp/scripts/test_civc_standalone_vks_havent_changed.sh index 00c3da733e5c..2221aa607057 100755 --- a/barretenberg/cpp/scripts/test_civc_standalone_vks_havent_changed.sh +++ b/barretenberg/cpp/scripts/test_civc_standalone_vks_havent_changed.sh @@ -13,7 +13,7 @@ cd .. # - Generate a hash for versioning: sha256sum bb-civc-inputs.tar.gz # - Upload the compressed results: aws s3 cp bb-civc-inputs.tar.gz s3://aztec-ci-artifacts/protocol/bb-civc-inputs-[hash(0:8)].tar.gz # Note: In case of the "Test suite failed to run ... Unexpected token 'with' " error, need to run: docker pull aztecprotocol/build:3.0 -pinned_short_hash="d6f612e1" +pinned_short_hash="16b530e4" pinned_civc_inputs_url="https://aztec-ci-artifacts.s3.us-east-2.amazonaws.com/protocol/bb-civc-inputs-${pinned_short_hash}.tar.gz" function compress_and_upload { diff --git a/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_verifier.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_verifier.test.cpp index ad59e61ea5c9..f4a1ddd09418 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_verifier.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_verifier.test.cpp @@ -137,6 +137,12 @@ class ECCVMRecursiveTests : public ::testing::Test { ASSERT_TRUE(verified); } + + // Check that the size of the recursive verifier is consistent with historical expectation + uint32_t NUM_GATES_EXPECTED = 213923; + ASSERT_EQ(static_cast(outer_circuit.get_num_finalized_gates()), NUM_GATES_EXPECTED) + << "Ultra-arithmetized ECCVM Recursive verifier gate count changed! Update this value if you are sure this " + "is expected."; } static void test_recursive_verification_failure() diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp index 6cb545dd9880..f5d5941bf000 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp @@ -85,7 +85,7 @@ cycle_group::cycle_group(field_t _x, field_t _y, bool_t is_infinity) * @param is_infinity */ template -cycle_group::cycle_group(const FF& _x, const FF& _y, bool is_infinity) +cycle_group::cycle_group(const bb::fr& _x, const bb::fr& _y, bool is_infinity) : x(is_infinity ? 0 : _x) , y(is_infinity ? 0 : _y) , _is_infinity(is_infinity) @@ -151,8 +151,8 @@ cycle_group cycle_group::from_witness(Builder* _context, const // Since we are not using these coordinates anyway // We can set them both to be zero if (_in.is_point_at_infinity()) { - result.x = field_t(witness_t(_context, FF::zero())); - result.y = field_t(witness_t(_context, FF::zero())); + result.x = field_t(witness_t(_context, bb::fr::zero())); + result.y = field_t(witness_t(_context, bb::fr::zero())); } else { result.x = field_t(witness_t(_context, _in.x)); result.y = field_t(witness_t(_context, _in.y)); @@ -186,8 +186,8 @@ cycle_group cycle_group::from_constant_witness(Builder* _conte // Since we are not using these coordinates anyway // We can set them both to be zero if (_in.is_point_at_infinity()) { - result.x = FF::zero(); - result.y = FF::zero(); + result.x = bb::fr::zero(); + result.y = bb::fr::zero(); result._is_constant = true; } else { result.x = field_t(witness_t(_context, _in.x)); @@ -362,8 +362,8 @@ cycle_group cycle_group::dbl(const std::optional cycle_group::dbl(const std::optional cycle_group::dbl(const std::optionalcreate_ecc_dbl_gate(bb::ecc_dbl_gate_{ + context->create_ecc_dbl_gate(bb::ecc_dbl_gate_{ .x1 = x.get_witness_index(), .y1 = modified_y.get_witness_index(), .x3 = result.x.get_witness_index(), @@ -411,68 +411,68 @@ cycle_group cycle_group::dbl(const std::optional */ template -cycle_group cycle_group::unconditional_add(const cycle_group& other, - const std::optional hint) const - requires IsUltraArithmetic +cycle_group cycle_group::_unconditional_add_or_subtract(const cycle_group& other, + bool is_addition, + const std::optional hint) const { auto context = get_context(other); + // if one or the other point is constant, construct a corresponding fixed witness in order to utilize the custom + // ecc_add gate const bool lhs_constant = is_constant(); const bool rhs_constant = other.is_constant(); if (lhs_constant && !rhs_constant) { auto lhs = cycle_group::from_constant_witness(context, get_value()); // We need to manually propagate the origin tag lhs.set_origin_tag(get_origin_tag()); - return lhs.unconditional_add(other, hint); + return lhs._unconditional_add_or_subtract(other, is_addition, hint); } if (!lhs_constant && rhs_constant) { auto rhs = cycle_group::from_constant_witness(context, other.get_value()); // We need to manually propagate the origin tag rhs.set_origin_tag(other.get_origin_tag()); - return unconditional_add(rhs, hint); + return _unconditional_add_or_subtract(rhs, is_addition, hint); } cycle_group result; if (hint.has_value()) { - auto x3 = hint.value().x; - auto y3 = hint.value().y; + const bb::fr x3 = hint.value().x; + const bb::fr y3 = hint.value().y; if (lhs_constant && rhs_constant) { return cycle_group(x3, y3, /*is_infinity=*/false); } result = cycle_group(witness_t(context, x3), witness_t(context, y3), /*is_infinity=*/false); } else { - const auto p1 = get_value(); - const auto p2 = other.get_value(); - AffineElement p3(Element(p1) + Element(p2)); + const AffineElement p1 = get_value(); + const AffineElement p2 = other.get_value(); + AffineElement p3 = is_addition ? (Element(p1) + Element(p2)) : (Element(p1) - Element(p2)); if (lhs_constant && rhs_constant) { auto result = cycle_group(p3); // We need to manually propagate the origin tag result.set_origin_tag(OriginTag(get_origin_tag(), other.get_origin_tag())); return result; } - field_t r_x(witness_t(context, p3.x)); - field_t r_y(witness_t(context, p3.y)); - result = cycle_group(r_x, r_y, /*is_infinity=*/false); + result = cycle_group(witness_t(context, p3.x), witness_t(context, p3.y), /*is_infinity=*/false); } - bb::ecc_add_gate_ add_gate{ + bb::ecc_add_gate_ add_gate{ .x1 = x.get_witness_index(), .y1 = y.get_witness_index(), .x2 = other.x.get_witness_index(), .y2 = other.y.get_witness_index(), .x3 = result.x.get_witness_index(), .y3 = result.y.get_witness_index(), - .sign_coefficient = 1, + .sign_coefficient = is_addition ? 1 : -1, }; context->create_ecc_add_gate(add_gate); @@ -481,78 +481,18 @@ cycle_group cycle_group::unconditional_add(const cycle_group& return result; } -/** - * @brief will evaluate ECC point subtraction over `*this` and `other`. - * Incomplete addition formula edge cases are *NOT* checked! - * Only use this method if you know the x-coordinates of the operands cannot collide - * and none of the operands is a point at infinity - * - * @tparam Builder - * @param other - * @param hint : value of output point witness, if known ahead of time (used to avoid modular inversions during witgen) - * @return cycle_group - */ +template +cycle_group cycle_group::unconditional_add(const cycle_group& other, + const std::optional hint) const +{ + return _unconditional_add_or_subtract(other, /*is_addition=*/true, hint); +} + template cycle_group cycle_group::unconditional_subtract(const cycle_group& other, const std::optional hint) const { - if constexpr (!IS_ULTRA) { - return unconditional_add(-other, hint); - } else { - auto context = get_context(other); - - const bool lhs_constant = is_constant(); - const bool rhs_constant = other.is_constant(); - - if (lhs_constant && !rhs_constant) { - auto lhs = cycle_group::from_constant_witness(context, get_value()); - // We need to manually propagate the origin tag - lhs.set_origin_tag(get_origin_tag()); - return lhs.unconditional_subtract(other, hint); - } - if (!lhs_constant && rhs_constant) { - auto rhs = cycle_group::from_constant_witness(context, other.get_value()); - // We need to manually propagate the origin tag - rhs.set_origin_tag(other.get_origin_tag()); - return unconditional_subtract(rhs); - } - cycle_group result; - if (hint.has_value()) { - auto x3 = hint.value().x; - auto y3 = hint.value().y; - if (lhs_constant && rhs_constant) { - return cycle_group(x3, y3, /*is_infinity=*/false); - } - result = cycle_group(witness_t(context, x3), witness_t(context, y3), /*is_infinity=*/false); - } else { - auto p1 = get_value(); - auto p2 = other.get_value(); - AffineElement p3(Element(p1) - Element(p2)); - if (lhs_constant && rhs_constant) { - auto result = cycle_group(p3); - // We need to manually propagate the origin tag - result.set_origin_tag(OriginTag(get_origin_tag(), other.get_origin_tag())); - return result; - } - field_t r_x(witness_t(context, p3.x)); - field_t r_y(witness_t(context, p3.y)); - result = cycle_group(r_x, r_y, /*is_infinity=*/false); - } - bb::ecc_add_gate_ add_gate{ - .x1 = x.get_witness_index(), - .y1 = y.get_witness_index(), - .x2 = other.x.get_witness_index(), - .y2 = other.y.get_witness_index(), - .x3 = result.x.get_witness_index(), - .y3 = result.y.get_witness_index(), - .sign_coefficient = -1, - }; - context->create_ecc_add_gate(add_gate); - - // We need to manually propagate the origin tag (merging the tag of two inputs) - result.set_origin_tag(OriginTag(get_origin_tag(), other.get_origin_tag())); - return result; - } + return _unconditional_add_or_subtract(other, /*is_addition=*/false, hint); } /** @@ -572,7 +512,7 @@ template cycle_group cycle_group::checked_unconditional_add(const cycle_group& other, const std::optional hint) const { - field_t x_delta = this->x - other.x; + const field_t x_delta = this->x - other.x; if (x_delta.is_constant()) { ASSERT(x_delta.get_value() != 0); } else { @@ -598,7 +538,7 @@ template cycle_group cycle_group::checked_unconditional_subtract(const cycle_group& other, const std::optional hint) const { - field_t x_delta = this->x - other.x; + const field_t x_delta = this->x - other.x; if (x_delta.is_constant()) { ASSERT(x_delta.get_value() != 0); } else { @@ -631,13 +571,13 @@ template cycle_group cycle_group::operator+ const bool_t double_predicate = (x_coordinates_match && y_coordinates_match); const bool_t infinity_predicate = (x_coordinates_match && !y_coordinates_match); - auto x1 = x; - auto y1 = y; - auto x2 = other.x; - auto y2 = other.y; + const field_t x1 = x; + const field_t y1 = y; + const field_t x2 = other.x; + const field_t y2 = other.y; // if x_coordinates match, lambda triggers a divide by zero error. // Adding in `x_coordinates_match` ensures that lambda will always be well-formed - auto x_diff = x2.add_two(-x1, x_coordinates_match); + const field_t x_diff = x2.add_two(-x1, x_coordinates_match); // Computes lambda = (y2-y1)/x_diff, using the fact that x_diff is never 0 field_t lambda; if ((y1.is_constant() && y2.is_constant()) || x_diff.is_constant()) { @@ -650,11 +590,11 @@ template cycle_group cycle_group::operator+ field_t::evaluate_polynomial_identity(x_diff, lambda, -y2, y1); } - auto x3 = lambda.madd(lambda, -(x2 + x1)); - auto y3 = lambda.madd(x1 - x3, -y1); + const field_t x3 = lambda.madd(lambda, -(x2 + x1)); + const field_t y3 = lambda.madd(x1 - x3, -y1); cycle_group add_result(x3, y3, x_coordinates_match); - auto dbl_result = dbl(); + const cycle_group dbl_result = dbl(); // dbl if x_match, y_match // infinity if x_match, !y_match @@ -706,11 +646,11 @@ template cycle_group cycle_group::operator- if (!infinity_predicate.is_constant()) { infinity_predicate.get_context()->update_used_witnesses(infinity_predicate.get_normalized_witness_index()); } - auto x1 = x; - auto y1 = y; - auto x2 = other.x; - auto y2 = other.y; - auto x_diff = x2.add_two(-x1, x_coordinates_match); + const field_t x1 = x; + const field_t y1 = y; + const field_t x2 = other.x; + const field_t y2 = other.y; + const field_t x_diff = x2.add_two(-x1, x_coordinates_match); // Computes lambda = (-y2-y1)/x_diff, using the fact that x_diff is never 0 field_t lambda; if ((y1.is_constant() && y2.is_constant()) || x_diff.is_constant()) { @@ -723,16 +663,16 @@ template cycle_group cycle_group::operator- field_t::evaluate_polynomial_identity(x_diff, lambda, y2, y1); } - auto x3 = lambda.madd(lambda, -(x2 + x1)); - auto y3 = lambda.madd(x1 - x3, -y1); + const field_t x3 = lambda.madd(lambda, -(x2 + x1)); + const field_t y3 = lambda.madd(x1 - x3, -y1); cycle_group add_result(x3, y3, x_coordinates_match); - auto dbl_result = dbl(); + const cycle_group dbl_result = dbl(); // dbl if x_match, !y_match // infinity if x_match, y_match - auto result_x = field_t::conditional_assign(double_predicate, dbl_result.x, add_result.x); - auto result_y = field_t::conditional_assign(double_predicate, dbl_result.y, add_result.y); + field_t result_x = field_t::conditional_assign(double_predicate, dbl_result.x, add_result.x); + field_t result_y = field_t::conditional_assign(double_predicate, dbl_result.y, add_result.y); if constexpr (IsUltraBuilder) { if (result_x.get_context()) { @@ -850,6 +790,13 @@ typename cycle_group::batch_mul_internal_output cycle_group::_ const size_t num_points = scalars.size(); std::vector scalar_slices; + scalar_slices.reserve(num_points); + for (size_t i = 0; i < num_points; ++i) { + scalar_slices.emplace_back(straus_scalar_slice(context, scalars[i], TABLE_BITS)); + // AUDITTODO: temporary safety check. See test MixedLengthScalarsIsNotSupported + BB_ASSERT_EQ( + scalar_slices[i].slices_native.size() == num_rounds, true, "Scalars of different sizes not supported!"); + } /** * Compute the witness values of the batch_mul algorithm natively, as Element types with a Z-coordinate. @@ -871,8 +818,6 @@ typename cycle_group::batch_mul_internal_output cycle_group::_ native_straus_tables.emplace_back(native_straus_table); } for (size_t i = 0; i < num_points; ++i) { - scalar_slices.emplace_back(straus_scalar_slice(context, scalars[i], TABLE_BITS)); - auto table_transcript = straus_lookup_table::compute_straus_lookup_table_hints( base_points[i].get_value(), offset_generators[i + 1], TABLE_BITS); std::copy(table_transcript.begin() + 1, table_transcript.end(), std::back_inserter(operation_transcript)); @@ -889,7 +834,6 @@ typename cycle_group::batch_mul_internal_output cycle_group::_ } } for (size_t j = 0; j < num_points; ++j) { - const Element point = native_straus_tables[j][static_cast(scalar_slices[j].slices_native[num_rounds - i - 1])]; @@ -917,7 +861,6 @@ typename cycle_group::batch_mul_internal_output cycle_group::_ std::span table_hints(&operation_hints[i * hints_per_table], hints_per_table); // Merge tags tag = OriginTag(tag, scalars[i].get_origin_tag(), base_points[i].get_origin_tag()); - scalar_slices.emplace_back(straus_scalar_slice(context, scalars[i], TABLE_BITS)); point_tables.emplace_back(straus_lookup_table(context, base_points[i], offset_generators[i + 1], TABLE_BITS)); } @@ -1122,7 +1065,7 @@ typename cycle_group::batch_mul_internal_output cycle_group::_ template cycle_group cycle_group::batch_mul(const std::vector& base_points, const std::vector& scalars, - const GeneratorContext context) + const GeneratorContext& context) { BB_ASSERT_EQ(scalars.size(), base_points.size()); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp index 53d360c52920..98a9211efb12 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp @@ -38,30 +38,29 @@ template class cycle_group { using field_t = stdlib::field_t; using bool_t = stdlib::bool_t; using witness_t = stdlib::witness_t; - using FF = typename Builder::FF; - using Curve = typename Builder::EmbeddedCurve; - using Group = typename Curve::Group; - using Element = typename Curve::Element; - using AffineElement = typename Curve::AffineElement; + + using Curve = bb::curve::Grumpkin; + using Group = bb::grumpkin::g1; + using Element = bb::grumpkin::g1::element; + using AffineElement = bb::grumpkin::g1::affine_element; using GeneratorContext = crypto::GeneratorContext; - using ScalarField = typename Curve::ScalarField; - using BigScalarField = stdlib::bigfield; + + using BigScalarField = stdlib::bigfield; + using cycle_scalar = ::bb::stdlib::cycle_scalar; + using straus_lookup_table = ::bb::stdlib::straus_lookup_table; + using straus_scalar_slice = ::bb::stdlib::straus_scalar_slice; static constexpr size_t STANDARD_NUM_TABLE_BITS = 1; static constexpr size_t ULTRA_NUM_TABLE_BITS = 4; static constexpr bool IS_ULTRA = Builder::CIRCUIT_TYPE == CircuitType::ULTRA; static constexpr size_t TABLE_BITS = IS_ULTRA ? ULTRA_NUM_TABLE_BITS : STANDARD_NUM_TABLE_BITS; - static constexpr size_t NUM_BITS = ScalarField::modulus.get_msb() + 1; + static constexpr size_t NUM_BITS = bb::fq::modulus.get_msb() + 1; static constexpr size_t NUM_ROUNDS = (NUM_BITS + TABLE_BITS - 1) / TABLE_BITS; - inline static constexpr std::string_view OFFSET_GENERATOR_DOMAIN_SEPARATOR = "cycle_group_offset_generator"; + static constexpr std::string_view OFFSET_GENERATOR_DOMAIN_SEPARATOR = "cycle_group_offset_generator"; // Since the cycle_group base field is the circuit's native field, it can be stored using two public inputs. static constexpr size_t PUBLIC_INPUTS_SIZE = 2; - using cycle_scalar = ::bb::stdlib::cycle_scalar; - using straus_lookup_table = ::bb::stdlib::straus_lookup_table; - using straus_scalar_slice = ::bb::stdlib::straus_scalar_slice; - private: /** * @brief Stores temporary variables produced by internal multiplication algorithms @@ -75,7 +74,7 @@ template class cycle_group { public: cycle_group(Builder* _context = nullptr); cycle_group(field_t _x, field_t _y, bool_t _is_infinity); - cycle_group(const FF& _x, const FF& _y, bool _is_infinity); + cycle_group(const bb::fr& _x, const bb::fr& _y, bool _is_infinity); cycle_group(const AffineElement& _in); static cycle_group one(Builder* _context); static cycle_group from_witness(Builder* _context, const AffineElement& _in); @@ -93,8 +92,7 @@ template class cycle_group { cycle_group dbl(const std::optional hint = std::nullopt) const requires IsUltraArithmetic; cycle_group unconditional_add(const cycle_group& other, - const std::optional hint = std::nullopt) const - requires IsUltraArithmetic; + const std::optional hint = std::nullopt) const; cycle_group unconditional_subtract(const cycle_group& other, const std::optional hint = std::nullopt) const; cycle_group checked_unconditional_add(const cycle_group& other, @@ -118,7 +116,7 @@ template class cycle_group { } static cycle_group batch_mul(const std::vector& base_points, const std::vector& scalars, - GeneratorContext context = {}); + const GeneratorContext& context = {}); cycle_group operator*(const cycle_scalar& scalar) const; cycle_group& operator*=(const cycle_scalar& scalar); cycle_group operator*(const BigScalarField& scalar) const; @@ -232,6 +230,11 @@ template class cycle_group { std::span base_points, std::span offset_generators) requires IsUltraArithmetic; + + // Internal implementation for unconditional_add and unconditional_subtract + cycle_group _unconditional_add_or_subtract(const cycle_group& other, + bool is_addition, + const std::optional hint) const; }; template inline std::ostream& operator<<(std::ostream& os, cycle_group const& v) diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp index 9999e14162f8..c4bcbf68a84f 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp @@ -1091,4 +1091,34 @@ TYPED_TEST(CycleGroupTest, TestBatchMulIsConsistent) run_test(/*construct_witnesses=*/true); run_test(/*construct_witnesses=*/false); } + +/** + * @brief Temporary debugging test demonstrating that batch_mul with scalars of different bit lengths is not supported + * + */ +TYPED_TEST(CycleGroupTest, MixedLengthScalarsIsNotSupported) +{ + STDLIB_TYPE_ALIASES + Builder builder; + + // Create two points + std::vector points; + points.push_back(cycle_group_ct::from_witness(&builder, TestFixture::generators[0])); + points.push_back(cycle_group_ct::from_witness(&builder, TestFixture::generators[1])); + + // Create two scalars with DIFFERENT bit lengths + std::vector scalars; + + // First scalar: 256 bits + uint256_t scalar1_value = uint256_t(123456789); + scalars.push_back(cycle_group_ct::cycle_scalar::from_witness(&builder, typename Curve::ScalarField(scalar1_value))); + + // Second scalar: 128 bits + uint256_t scalar2_value = uint256_t(987654321); + scalars.push_back(cycle_group_ct::cycle_scalar::from_witness_bitstring(&builder, scalar2_value, 128)); + + // The different sized scalars results in different sized scalar slices arrays which is not handled in batch_mul + EXPECT_THROW_OR_ABORT(cycle_group_ct::batch_mul(points, scalars), + "Assertion failed: (scalar_slices[j].slices_native.size() == num_rounds == true)"); +} #pragma GCC diagnostic pop