Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 21 additions & 0 deletions barretenberg/cpp/CLAUDE.md
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,27 @@ Key constants to watch:

If C++ static_asserts fail after your changes, update both the assert values AND the corresponding Noir constants, then run `yarn remake-constants`.

## Prover.toml Fixtures

Proof-length-affecting changes (e.g. `CHONK_PROOF_LENGTH` bumps from MegaFlavor entity additions) make the committed `Prover.toml` fixtures stale. `nargo execute --program-dir <crate>` then fails with `Type Array { length: N, typ: Field } is expected to have length N but value Vec(...)`.

Regenerate via the e2e prover full test with fake proofs:

```bash
cd yarn-project
AZTEC_GENERATE_TEST_DATA=1 FAKE_PROOFS=1 yarn workspace @aztec/end-to-end test full.test
```

`FAKE_PROOFS=1` skips real proving — runs in ~2 min (orchestrator + witness generation only). Writes 12 `Prover.toml` files under `noir-projects/noir-protocol-circuits/crates/<circuit>/Prover.toml`.

For circuits not exercised by `full.test.ts` (`rollup-tx-merge`, `rollup-block-root`, `rollup-block-root-single-tx`, `rollup-block-merge`, `rollup-checkpoint-root`, `rollup-block-root-first-empty-tx`), additionally run:

```bash
AZTEC_GENERATE_TEST_DATA=1 yarn workspace @aztec/prover-client test orchestrator_single_checkpoint
```

Verify with `nargo execute --program-dir noir-projects/noir-protocol-circuits/crates/<crate>` for any previously-failing crate; should print `Circuit witness successfully solved`.

## Verification Keys

**IMPORTANT**: When making barretenberg changes that could affect verification keys, you must verify that VKs haven't changed unexpectedly, or
Expand Down
18 changes: 13 additions & 5 deletions barretenberg/cpp/src/barretenberg/polynomials/polynomial.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -308,15 +308,23 @@ void add_scaled_batch(Polynomial<Fr>& dst,
parallel_for([&](const ThreadChunk& chunk) {
BB_BENCH_TRACY_NAME("add_scaled_batch/chunk");
auto chunk_indices = chunk.range(union_size, min_start);
if (chunk_indices.empty()) {
return;
}
auto chunk_start = chunk_indices.front();
auto chunk_end = chunk_indices.back();

for (size_t k = 0; k < sources.size(); ++k) {
const auto& src = sources[k];
const Fr c = scalars[k];
const Fr& c = scalars[k];
const size_t src_start = src.start_index;
const size_t src_end = src.end_index();
for (size_t i : chunk_indices) {
if (i >= src_start && i < src_end) {
dst.at(i) += c * src[i];
}

const size_t idx_start = std::max(chunk_start, src_start);
const size_t idx_end = std::min(chunk_end + 1, src_end);

for (size_t i = idx_start; i < idx_end; ++i) {
dst.at(i) += c * src[i];
}
}
});
Expand Down
7 changes: 4 additions & 3 deletions barretenberg/cpp/src/barretenberg/ultra_honk/oink_prover.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -186,9 +186,10 @@ template <typename Flavor> void OinkProver<Flavor>::commit_to_z_perm()
template <typename Flavor> void OinkProver<Flavor>::commit_to_masking_poly()
{
if constexpr (flavor_has_gemini_masking<Flavor>()) {
// Gemini masking poly only needs to cover the actual polynomial extent, not full dyadic size
const size_t polynomial_size = prover_instance->polynomials.max_end_index();
prover_instance->polynomials.gemini_masking_poly = Polynomial<FF>::random(polynomial_size);
// virtual_size = dyadic_size matches every other witness poly, so sumcheck's pairwise read
// past end_index lands in the virtual-zero region.
prover_instance->polynomials.gemini_masking_poly = Polynomial<FF>::random(
prover_instance->polynomials.max_end_index(), prover_instance->dyadic_size(), /*start_index=*/0);

// Commit to the masking polynomial and send to transcript
auto masking_commitment = commitment_key.commit(prover_instance->polynomials.gemini_masking_poly);
Expand Down
81 changes: 46 additions & 35 deletions barretenberg/cpp/src/barretenberg/vm2/constraining/prover.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -216,27 +216,49 @@ void AvmProver::execute_pcs_rounds()
return static_cast<size_t>(std::distance(polys.begin(), it));
};

auto add_scaled_batched =
[](Polynomial& dst, const std::span<Polynomial>& sources, const std::span<FF>& scalars, const size_t skip_idx) {
const size_t num_slots = bb::get_num_cpus();
std::vector<Polynomial> batched_polys(num_slots);
for (auto& poly : batched_polys) {
poly = Polynomial(dst.size(), dst.virtual_size(), dst.start_index());
}

// Chunks are consumed dynamically via an atomic counter: faster threads naturally pick up
// more chunks while the slot they write to stays fixed for the life of their outer task.
std::atomic<size_t> next_poly(0);

// Accumulate polynomials: each thread picks up the next available polynomial
parallel_for(num_slots, [&](size_t slot_id) {
while (true) {
const size_t poly_id = next_poly.fetch_add(1, std::memory_order_relaxed);
if (poly_id >= sources.size()) {
break;
}
if (poly_id == skip_idx) {
continue;
}

const size_t start_idx = sources[poly_id].start_index();
const size_t end_idx = sources[poly_id].end_index();
for (size_t idx = start_idx; idx < end_idx; idx++) {
batched_polys[slot_id].at(idx) += scalars[poly_id] * sources[poly_id][idx];
}
}
});

for (const auto& poly : batched_polys) {
dst += poly;
}
};

// Batch to be shifted polys in their to_be_shifted form
// Search for poly with largest end index to avoid allocating a zero polynomial of circuit size
size_t max_idx = index_of_max_end_index(shifted_polys);

Polynomial batched_shifted = std::move(shifted_polys[max_idx]);
batched_shifted *= shifted_challenges[max_idx];
{
// Fuse the remaining add_scaled dispatches into a single parallel_for to amortise startup cost.
std::vector<PolynomialSpan<const FF>> sources;
std::vector<FF> scalars;
sources.reserve(shifted_polys.size());
scalars.reserve(shifted_polys.size());
for (size_t idx = 0; idx < shifted_polys.size(); ++idx) {
if (idx != max_idx) {
sources.emplace_back(shifted_polys[idx]);
scalars.push_back(shifted_challenges[idx]);
}
}
add_scaled_batch(
batched_shifted, std::span<const PolynomialSpan<const FF>>(sources), std::span<const FF>(scalars));
}
add_scaled_batched(batched_shifted, shifted_polys, shifted_challenges, max_idx);

// Batch unshifted polys (to avoid allocating a zero polynomial of circuit size, we initialize the batched
// polynomial with the polynomial of the largest size)
Expand All @@ -245,25 +267,15 @@ void AvmProver::execute_pcs_rounds()
Polynomial batched_unshifted = std::move(unshifted_polys[max_idx]);
batched_unshifted *= unshifted_challenges[max_idx];
batched_unshifted += batched_shifted;
{
// Only operate in the range of not to be shifted polys, as the contribution for those has already been added.
std::vector<PolynomialSpan<const FF>> sources;
std::vector<FF> scalars;
sources.reserve(unshifted_polys.size());
scalars.reserve(unshifted_polys.size());
for (size_t idx = 0; idx < unshifted_polys.size(); ++idx) {
if (idx >= WIRES_TO_BE_SHIFTED_START_IDX && idx < WIRES_TO_BE_SHIFTED_END_IDX) {
continue;
}
if (idx == max_idx) {
continue;
}
sources.emplace_back(unshifted_polys[idx]);
scalars.push_back(unshifted_challenges[idx]);
}
add_scaled_batch(
batched_unshifted, std::span<const PolynomialSpan<const FF>>(sources), std::span<const FF>(scalars));
}
add_scaled_batched(batched_unshifted,
unshifted_polys.subspan(0, WIRES_TO_BE_SHIFTED_START_IDX),
unshifted_challenges.subspan(0, WIRES_TO_BE_SHIFTED_START_IDX),
max_idx);
add_scaled_batched(batched_unshifted,
unshifted_polys.subspan(WIRES_TO_BE_SHIFTED_END_IDX),
unshifted_challenges.subspan(WIRES_TO_BE_SHIFTED_END_IDX),
max_idx > WIRES_TO_BE_SHIFTED_END_IDX ? max_idx - WIRES_TO_BE_SHIFTED_END_IDX
: unshifted_polys.size());

const size_t circuit_dyadic_size = numeric::round_up_power_2(batched_unshifted.end_index());

Expand Down Expand Up @@ -308,5 +320,4 @@ HonkProof AvmProver::construct_proof()

return export_proof();
}

} // namespace bb::avm2
Loading