diff --git a/.claude/skills/release-network-docs/SKILL.md b/.claude/skills/release-network-docs/SKILL.md index 29eceb3ca4ba..0307e1c2f4d6 100644 --- a/.claude/skills/release-network-docs/SKILL.md +++ b/.claude/skills/release-network-docs/SKILL.md @@ -21,12 +21,14 @@ self-identify its release type, ask the user to confirm. **This skill does NOT:** -- Generate API docs (aztec-nr or TypeScript) +- Generate developer API docs (aztec-nr or TypeScript) - Generate CLI reference docs - Update developer version config or cut developer versioned docs - Update migration notes -- Require aztec CLI or nargo (yarn-project build is only needed for node API - reference regeneration in Step 5b, which can be skipped) +- Require aztec CLI, nargo, or yarn-project build + +**This skill DOES** regenerate the Node JSON-RPC API reference for the +versioned docs (see Step 5a). ## Usage @@ -82,18 +84,6 @@ git tag -l "v" - If the tag exists but is not checked out: `git checkout v` - **Abort if the tag doesn't exist** — the release hasn't been tagged yet. -#### Pre-release workflow - -If the user provides a target version that differs from the `nodeVersion` -returned by the RPC (e.g. the network is still running `4.1.3` but the user -wants to prepare docs for `4.2.0`), this is a **pre-release** docs preparation. -Ask the user to confirm the target version, then use that version instead of -`nodeVersion` throughout the remaining steps. The git tag for the target version -must still exist. Contract addresses from the RPC reflect the *current* network -state (the old version); they are still valid if the upgrade reuses the same -contracts, but ask the user to confirm whether any addresses will change at -upgrade time. - ### Step 3: Identify and Resolve Missing Contract Addresses The `networks.md` L1 table includes contracts that are **not** returned by @@ -105,12 +95,8 @@ The Rollup and Registry addresses are already known from the RPC response. Use them to query additional addresses on L1. Determine the L1 RPC from the chain ID: `1` → Ethereum mainnet, `11155111` → Sepolia. -First check whether the RPC response already includes `gseAddress` in -`l1ContractAddresses` — newer node versions return it directly. If present, -use it and skip the on-chain query for GSE. - ```bash -# GSE (Governance Staking Escrow) — from Rollup (skip if already in RPC response) +# GSE (Governance Staking Escrow) — from Rollup cast call "getGSE()(address)" --rpc-url # Slasher — from Rollup @@ -182,27 +168,29 @@ Ask the user if any content changes are needed in `docs/docs-operate/`: - Operator changelog updates (if not handled by `/updating-changelog`) If the user has content changes, apply them to the source files in -`docs/docs-operate/`. If no content changes are needed, skip to Step 5b. - -### Step 5b: Regenerate Node API Reference Docs +`docs/docs-operate/`. If no content changes are needed, skip to Step 5a. -Regenerate the Node JSON-RPC API reference documentation. This script parses the -TypeScript interface definitions and Zod schemas in `yarn-project/stdlib/src/interfaces/` -to produce a complete markdown reference for the `node_` and `nodeAdmin_` RPC methods. +### Step 5a: Regenerate Node API Reference -**Prerequisite:** `yarn-project` must be built (`cd yarn-project && yarn && yarn build`). -This is the only heavy build dependency for this skill and is only needed for this -step. If the node API has not changed since the last release, you can skip this step. +The Node JSON-RPC API reference is auto-generated from TypeScript source. It +must be regenerated from the release tag's source files to ensure the versioned +docs reflect the actual API at that release. ```bash cd docs yarn generate:node-api-reference ``` -This updates `docs/docs-operate/operators/reference/node-api-reference.md`. +This writes to `docs/docs-operate/operators/reference/node-api-reference.md` +using the source files from the currently checked-out tag. The generator parses +`yarn-project/stdlib/src/interfaces/aztec-node.ts` and +`yarn-project/stdlib/src/interfaces/aztec-node-admin.ts` directly (no +yarn-project build needed, but `yarn-project/node_modules/` must be installed +so `npx tsx` can resolve `typescript` — run `yarn install` from `yarn-project` +if needed). -The file is auto-generated — do not hand-edit it. When cutting network versioned -docs (Step 7), the generated content is included in the snapshot automatically. +Verify the output lists the expected number of methods and has no ungrouped +methods warnings. ### Step 6: Build and Validate @@ -318,11 +306,13 @@ Check for stash conflicts. Then report to the user: - **Some addresses are not in the RPC**: Contracts like Staking Registry, Reward Booster, Tally Slashing Proposer, and others must be queried on-chain, obtained from deployment output, or confirmed unchanged by the user. -- **Mostly lightweight**: This skill does not require aztec CLI or nargo. The - only heavy dependency is `yarn-project` build, needed only for regenerating - the node API reference (Step 5b) — skip that step if the node API hasn't - changed. Otherwise only `yarn` (for the docs build), `curl`/`jq` (for the - RPC query), and `cast` (for on-chain address queries) are needed. +- **No heavy prerequisites**: This skill does not require aztec CLI, nargo, or + a yarn-project build. Only `yarn` (for the docs build), `curl`/`jq` (for + the RPC query), and `cast` (for on-chain address queries) are needed. +- **Node API reference is auto-generated**: Run `yarn generate:node-api-reference` + (Step 5a) before building. The generator parses TypeScript source directly, so + no yarn-project build is required — but `yarn-project/node_modules/` must exist + (run `yarn install` from `yarn-project` if missing). - **Build must pass**: Do not cut versioned docs until `yarn build` succeeds. - **COMMIT_TAG needs `v` prefix**: The preprocessor uses COMMIT_TAG for GitHub URLs and git tag references. Omitting the `v` will break links in versioned diff --git a/.github/ci3_labels_to_env.sh b/.github/ci3_labels_to_env.sh index ac602e6a3767..476a4f201f05 100755 --- a/.github/ci3_labels_to_env.sh +++ b/.github/ci3_labels_to_env.sh @@ -37,6 +37,11 @@ function main { echo "NO_FAIL_FAST=1" >> $GITHUB_ENV fi + # Handle skip-compat-e2e label (escape hatch for backwards compat test failures on release PRs) + if has_label "ci-skip-compat-e2e"; then + echo "SKIP_COMPAT_E2E=1" >> $GITHUB_ENV + fi + # Determine CI mode based on event, labels, and target branch local ci_mode if [ "${GITHUB_EVENT_NAME:-}" == "merge_group" ] || has_label "ci-merge-queue"; then diff --git a/.github/workflows/aztec-cli-acceptance-test.yml b/.github/workflows/aztec-cli-acceptance-test.yml new file mode 100644 index 000000000000..85c607c888c5 --- /dev/null +++ b/.github/workflows/aztec-cli-acceptance-test.yml @@ -0,0 +1,66 @@ +# Validates that a published Aztec CLI release can be installed and used end-to-end. +# Runs after CI3 completes a tag release (via workflow_run), or manually via workflow_dispatch. +name: Aztec CLI Acceptance Test + +on: + workflow_dispatch: + inputs: + version: + description: "Version to install (e.g. latest, nightly, 4.3.0, 4.3.0-nightly.20260420)" + required: true + type: string + workflow_run: + workflows: ["CI3"] + types: + - completed + branches: + - "v*" + +jobs: + release-acceptance: + runs-on: ubuntu-latest + if: >- + github.event_name == 'workflow_dispatch' || + (github.event_name == 'workflow_run' + && github.event.workflow_run.conclusion == 'success' + && !contains(github.event.workflow_run.head_branch, '-commit.')) + timeout-minutes: 30 + env: + VERSION: ${{ github.event.inputs.version || github.event.workflow_run.head_branch }} + steps: + - name: Checkout + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + with: + ref: ${{ github.event.workflow_run.head_sha || github.sha }} + + # Node is only used to run the .ts harness in run-test.sh, which needs >=22.18 for TS + # type-stripping. The aztec CLI installer manages its own node version independently. + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: 22 + + - name: Run Aztec CLI acceptance test + run: ./aztec-up/test/aztec-cli-acceptance-test/run-test.sh + + - name: Notify Slack on success + if: success() && github.event_name != 'workflow_dispatch' + env: + SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} + run: | + export CI=1 + ./ci3/slack_notify "#team-fairies" \ + "Aztec CLI Acceptance Test passed for version ${VERSION} :white_check_mark:" + + - name: Notify Slack and dispatch ClaudeBox on failure + if: failure() && github.event_name != 'workflow_dispatch' + env: + SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} + GITHUB_TOKEN: ${{ secrets.AZTEC_BOT_GITHUB_TOKEN }} + run: | + RUN_URL="https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}" + export CI=1 + ./ci3/slack_notify_with_claudebox_kickoff "#team-fairies" \ + "Aztec CLI Acceptance Test FAILED (version ${VERSION}): <${RUN_URL}|View Run>" \ + "Aztec CLI acceptance test failed for version ${VERSION}. CI run: ${RUN_URL}. Investigate the failure and explain the root cause." \ + --link "$RUN_URL" diff --git a/.github/workflows/ci3.yml b/.github/workflows/ci3.yml index 56d5c7004824..e924fcdd97a0 100644 --- a/.github/workflows/ci3.yml +++ b/.github/workflows/ci3.yml @@ -445,3 +445,108 @@ jobs: AWS_SHUTDOWN_TIME: 180 run: | ./.github/ci3.sh network-tests-kind + + # Backwards compatibility e2e tests. + # Runs e2e tests with contract artifacts from every prior stable release to validate + # that new client code works with old contract artifacts ("new pxe / old contracts"). + # Blocking for stable/RC releases: ci-release-publish requires this job to pass before + # publishing. Observational for nightlies: runs, but continue-on-error keeps the workflow + # green and ci-release-publish's condition publishes nightlies regardless of the result. + # Escape hatch: ci-skip-compat-e2e label makes failures non-blocking on release PRs. + ci-compat-e2e: + runs-on: ubuntu-latest + permissions: + id-token: write + contents: read + needs: [ci] + if: | + always() + && (needs.ci.result == 'success' || needs.ci.result == 'skipped') + && github.event.pull_request.head.repo.fork != true + && github.event.pull_request.draft == false + && ( + (startsWith(github.ref, 'refs/tags/v') && !contains(github.ref_name, '-commit.')) + || contains(github.event.pull_request.labels.*.name, 'ci-compat-e2e') + || contains(github.event.pull_request.labels.*.name, 'ci-release-pr') + ) + # Non-blocking for nightlies and when ci-skip-compat-e2e escape hatch is applied. + continue-on-error: ${{ contains(github.ref_name, '-nightly.') || contains(github.event.pull_request.labels.*.name, 'ci-skip-compat-e2e') }} + steps: + - name: Checkout + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + with: + ref: ${{ github.event.pull_request.head.sha || github.sha }} + + - name: Configure AWS credentials (OIDC) + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.AWS_OIDC_ROLE_ARN }} + aws-region: us-east-2 + role-session-name: ci3-compat-e2e-${{ github.run_id }} + role-duration-seconds: 21600 # 6h – covers AWS_SHUTDOWN_TIME (300 min) + 60 min buffer + + - name: Run Backwards Compatibility E2E Tests + timeout-minutes: 330 + env: + GITHUB_TOKEN: ${{ secrets.AZTEC_BOT_GITHUB_TOKEN }} + BUILD_INSTANCE_SSH_KEY: ${{ secrets.BUILD_INSTANCE_SSH_KEY }} + GCP_SA_KEY: ${{ secrets.GCP_SA_KEY }} + DOCKERHUB_PASSWORD: ${{ secrets.DOCKERHUB_PASSWORD }} + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} + SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} + CI3_INSTANCE_PROFILE_NAME: ${{ secrets.CI3_INSTANCE_PROFILE_NAME }} + CI3_SECURITY_GROUP_ID: ${{ secrets.CI3_SECURITY_GROUP_ID }} + RUN_ID: ${{ github.run_id }} + AWS_SHUTDOWN_TIME: 300 + run: ./.github/ci3.sh compat-e2e + + # Publishes the release (npm, Docker, GitHub release, aztec-up scripts, etc.). + # Gated on ci-compat-e2e: a compat regression blocks stable/RC publishing. Nightlies + # publish regardless — compat-e2e runs there observationally. Dev `-commit.` tags from + # the ci-release-pr flow never reach this job (they are not real releases). + ci-release-publish: + runs-on: ubuntu-latest + environment: master + permissions: + id-token: write + contents: read + needs: [ci, ci-compat-e2e] + if: | + startsWith(github.ref, 'refs/tags/v') + && !contains(github.ref_name, '-commit.') + && needs.ci.result == 'success' + && ( + contains(github.ref_name, '-nightly.') + || needs.ci-compat-e2e.result == 'success' + ) + steps: + - name: Checkout + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + with: + ref: ${{ github.sha }} + + - name: Configure AWS credentials (OIDC) + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.AWS_OIDC_ROLE_ARN }} + aws-region: us-east-2 + role-session-name: ci3-release-publish-${{ github.run_id }} + role-duration-seconds: 21600 + + - name: Run Release Publish + env: + GITHUB_TOKEN: ${{ secrets.AZTEC_BOT_GITHUB_TOKEN }} + BUILD_INSTANCE_SSH_KEY: ${{ secrets.BUILD_INSTANCE_SSH_KEY }} + GCP_SA_KEY: ${{ secrets.GCP_SA_KEY }} + GCP_PROJECT_ID: ${{ secrets.GCP_PROJECT_ID }} + DOCKERHUB_PASSWORD: ${{ secrets.DOCKERHUB_PASSWORD }} + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} + NPM_TOKEN: ${{ secrets.NPM_TOKEN }} + CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} + NETLIFY_SITE_ID: ${{ secrets.NETLIFY_SITE_ID }} + NETLIFY_AUTH_TOKEN: ${{ secrets.NETLIFY_AUTH_TOKEN }} + SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} + CI3_INSTANCE_PROFILE_NAME: ${{ secrets.CI3_INSTANCE_PROFILE_NAME }} + CI3_SECURITY_GROUP_ID: ${{ secrets.CI3_SECURITY_GROUP_ID }} + RUN_ID: ${{ github.run_id }} + run: ./.github/ci3.sh release-publish diff --git a/.github/workflows/docs-typesense.yml b/.github/workflows/docs-typesense.yml index 85fdf3c382e2..8cc3726256d8 100644 --- a/.github/workflows/docs-typesense.yml +++ b/.github/workflows/docs-typesense.yml @@ -27,11 +27,50 @@ jobs: fetch-depth: 0 - name: Reindex with Typesense docsearch-scraper + env: + # Fail the run if the scraper indexes fewer than this many records. + # The docsearch-scraper container exits 0 even when its config is broken + # and the index ends up nearly empty, so this guard turns a silent + # regression (which happened with #22861 dropping the index from + # ~12k to 48 records) into a loud CI failure. + MIN_HITS: "5000" + TYPESENSE_API_KEY: ${{ secrets.TYPESENSE_API_KEY }} + TYPESENSE_HOST: ${{ secrets.TYPESENSE_HOST }} run: | + set -euo pipefail + docker run \ - -e "TYPESENSE_API_KEY=${{ secrets.TYPESENSE_API_KEY }}" \ - -e "TYPESENSE_HOST=${{ secrets.TYPESENSE_HOST }}" \ + -e "TYPESENSE_API_KEY=$TYPESENSE_API_KEY" \ + -e "TYPESENSE_HOST=$TYPESENSE_HOST" \ -e "TYPESENSE_PORT=443" \ -e "TYPESENSE_PROTOCOL=https" \ - -e "CONFIG=$(cat docs/typesense.config.json | jq -r tostring)" \ - typesense/docsearch-scraper:0.11.0 + -e "CONFIG=$(cat docs/typesense.config.json)" \ + typesense/docsearch-scraper:0.11.0 2>&1 | tee scraper.log + + nb_hits=$(grep -oE 'Nb hits: *[0-9]+' scraper.log | tail -1 | grep -oE '[0-9]+' || true) + if [ -z "$nb_hits" ]; then + echo "::error::Could not parse 'Nb hits' from scraper output, assuming index is broken." + exit 1 + fi + echo "Indexed $nb_hits records (threshold: $MIN_HITS)" + if [ "$nb_hits" -lt "$MIN_HITS" ]; then + echo "::error::Indexed only $nb_hits records (expected at least $MIN_HITS). Search index is likely broken." + exit 1 + fi + + # Log how many api-nr records are visible in the live index. The + # docusaurus theme always prepends `default` to its contextual + # docusaurus_tag filter, and no docusaurus page is stamped with + # `default` (each carries its plugin-context tag instead), so this + # facet count is effectively the count of indexed api-nr records. + # Informational only: the count varies with aztec-nr content size. + api_hits=$(curl -fsS \ + "https://$TYPESENSE_HOST/collections/aztec-docs/documents/search" \ + -H "X-TYPESENSE-API-KEY: $TYPESENSE_API_KEY" \ + -G \ + --data-urlencode "q=*" \ + --data-urlencode "query_by=hierarchy.lvl0" \ + --data-urlencode "filter_by=docusaurus_tag:=[default]&&language:=en" \ + --data-urlencode "per_page=1" \ + | jq -r '.found') + echo "api-nr records visible under docusaurus_tag:=[default]: $api_hits" diff --git a/.github/workflows/full-dev-path.yml b/.github/workflows/full-dev-path.yml deleted file mode 100644 index 5eee9fa514a0..000000000000 --- a/.github/workflows/full-dev-path.yml +++ /dev/null @@ -1,49 +0,0 @@ -# Tests that the installed Aztec toolchain works end-to-end. -# Exercises the full developer onboarding path: aztec init -> compile -> test -> start -> codegen -> TS end-to-end test. -name: Full Dev Path Test - -on: - workflow_dispatch: - inputs: - version: - description: "Version to install (e.g. latest, nightly, 4.3.0, 4.3.0-nightly.20260420)" - required: true - type: string - push: - tags: - - "v*" - -jobs: - full-dev-path: - runs-on: ubuntu-latest - timeout-minutes: 30 - env: - VERSION: ${{ github.event.inputs.version || github.ref_name }} - steps: - - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - - - name: Run full dev path test - run: ./aztec-up/test/full-dev-path/run-test.sh - - - name: Notify Slack on success - if: success() && github.event_name != 'workflow_dispatch' - env: - SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} - run: | - export CI=1 - ./ci3/slack_notify "#team-fairies" \ - "Full Dev Path Test passed for version ${VERSION} :white_check_mark:" - - - name: Notify Slack and dispatch ClaudeBox on failure - if: failure() && github.event_name != 'workflow_dispatch' - env: - SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} - GITHUB_TOKEN: ${{ secrets.AZTEC_BOT_GITHUB_TOKEN }} - run: | - RUN_URL="https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}" - export CI=1 - ./ci3/slack_notify_with_claudebox_kickoff "#team-fairies" \ - "Full Dev Path Test FAILED (version ${VERSION}): <${RUN_URL}|View Run>" \ - "Full dev path test failed for version ${VERSION}. CI run: ${RUN_URL}. Investigate the failure and explain the root cause." \ - --link "$RUN_URL" diff --git a/.github/workflows/metrics-deploy.yml b/.github/workflows/metrics-deploy.yml index ae99d66aa728..b8774a1a7e96 100644 --- a/.github/workflows/metrics-deploy.yml +++ b/.github/workflows/metrics-deploy.yml @@ -37,6 +37,11 @@ on: required: true type: string default: "grafana-dashboard-password" + slack_alert_mention_user_ids: + description: Optional Terraform list of Slack user IDs to mention on Grafana alert notifications + required: false + type: string + default: '["U0AHB6VR8N5"]' secrets: GCP_SA_KEY: required: true @@ -70,6 +75,10 @@ on: description: The name of the secret which holds the Grafana dashboard password required: true default: "grafana-dashboard-password" + slack_alert_mention_user_ids: + description: Optional Terraform list of Slack user IDs to mention on Grafana alert notifications + required: false + default: '["U0AHB6VR8N5"]' jobs: metrics_deployment: @@ -96,6 +105,7 @@ jobs: SLACK_WEBHOOK_NEXT_NET_SECRET_NAME: slack-webhook-next-net-url SLACK_WEBHOOK_TESTNET_SECRET_NAME: slack-webhook-testnet-url SLACK_WEBHOOK_MAINNET_SECRET_NAME: slack-webhook-mainnet-url + TF_VAR_SLACK_ALERT_MENTION_USER_IDS: ${{ inputs.slack_alert_mention_user_ids }} steps: - name: Checkout code @@ -129,10 +139,6 @@ jobs: echo "Terraform state bucket already exists" fi - - name: Import Dashboard - working-directory: ./spartan/metrics - run: ./copy-dashboard.sh - - name: Setup Terraform uses: hashicorp/setup-terraform@633666f66e0061ca3b725c73b2ec20cd13a8fdd1 with: diff --git a/.github/workflows/pull-noir.yml b/.github/workflows/pull-noir.yml index 2209aff23489..9a7f811b1e7b 100644 --- a/.github/workflows/pull-noir.yml +++ b/.github/workflows/pull-noir.yml @@ -68,7 +68,13 @@ jobs: cd ../.. # Update Cargo.lock if needed, but don't fail if transpiler no longer builds cargo check --manifest-path avm-transpiler/Cargo.toml || true - git add noir/noir-repo avm-transpiler/Cargo.lock + # Update yarn.lock to pick up any changes to noir's JS packages (versions + # or the file: hash of noir_js). --mode=update-lockfile skips linking and + # build scripts. Don't fail the workflow if yarn errors out -- a partial + # update is still useful for the resulting PR. + corepack enable + (cd yarn-project && yarn install --mode=update-lockfile) || true + git add noir/noir-repo avm-transpiler/Cargo.lock yarn-project/yarn.lock - name: Check for existing PR if: steps.noir_versions.outputs.update_needed == 'true' diff --git a/.test_patterns.yml b/.test_patterns.yml index 5a334cd586c6..6b1cc561f658 100644 --- a/.test_patterns.yml +++ b/.test_patterns.yml @@ -60,9 +60,6 @@ tests: error_regex: "Aborted.*core dumped" owners: - *adam - - regex: "barretenberg/cpp/scripts/run_bench.sh wasm bb-micro-bench/wasm/chonk build-wasm-threads/bin/chonk_bench" - owners: - - *luke # noir # Something to do with how I run the tests now. Think these are fine in nextest. @@ -146,6 +143,11 @@ tests: - *palla # yarn-project tests + # Attempt to catch all kv-store browser test failures (consider them quarantined for now) + - regex: "yarn-project/kv-store" + error_regex: "vitest" + owners: + - *martin - regex: "yarn-project/kv-store" error_regex: "Could not import your test module" owners: @@ -153,7 +155,7 @@ tests: - regex: "yarn-project/kv-store" error_regex: "timeout: sending signal TERM to command" owners: - - *alex + - *martin - regex: "yarn-project/kv-store" error_regex: "Failed to fetch dynamically imported module" owners: @@ -183,6 +185,14 @@ tests: - *phil - *palla + # http://ci.aztec-labs.com/64a972aafaa40dd0 + # ProvingBroker › Retries › does not retry if job is stale — kv-store closes + # before the broker's final reportProvingJobError write lands. + - regex: "prover-client/src/proving_broker/proving_broker.test.ts" + error_regex: "does not retry if job is stale|Store is closed" + owners: + - *alex + # Nightly GKE tests - regex: "spartan/bootstrap.sh" owners: @@ -284,11 +294,6 @@ tests: owners: - *palla - - regex: "bb-micro-bench/wasm/chonk build-wasm-threads/bin/chonk_bench" - error_regex: "core dumped" - owners: - - *adam - - regex: "src/e2e_token_bridge_tutorial.test.ts" error_regex: "Error: Unable to find low leaf for block" owners: diff --git a/AGENTS.md b/AGENTS.md new file mode 100644 index 000000000000..7c665258297b --- /dev/null +++ b/AGENTS.md @@ -0,0 +1,54 @@ +This repo uses CLAUDE.md files. When working in a module, look for the CLAUDE.md. *Immediately read the root ./CLAUDE.md here*. + +## Specialist subagents + +Domain-expert references under `.claude/agents/`: + +- `analyze-logs` — `.claude/agents/analyze-logs.md` — Deep-read test logs and extract relevant info; runs in a separate context to avoid polluting the main conversation. +- `aztec-wallet` — `.claude/agents/aztec-wallet.md` — Execute cli-wallet commands against live Aztec networks (account setup, contract deployment, function calls, fee juice bridging). +- `identify-ci-failures` — `.claude/agents/identify-ci-failures.md` — Identify CI failures from a PR number, CI URL, or log hash; returns a structured list of failures with downloaded log paths. +- `network-logs` — `.claude/agents/network-logs.md` — Query GCP Cloud Logging for live Aztec network deployments (block production, proving status, errors). + +## Skills + +Task-specific workflows. When the user asks for one of these, read the matching `SKILL.md` (or `*.md` under `commands/`) and follow its instructions. + +Repo-wide (`.claude/skills//SKILL.md`): + +- `acir-formal-proofs` — Build/run ACIR formal proof tests with SMT verification; updates the README results table. +- `adding-benchmarks` — Add new benchmarks to the CI pipeline (JSON files, `bootstrap.sh` wiring, `ci3.yml` upload). +- `aztec-wallet` — Run `cli-wallet` against a live Aztec network: deploy contracts, send transactions, query state, bridge funds, manage accounts. +- `backport` — Backport a merged PR to a release branch (e.g. `v4`, `v4-devnet-2`), resolving conflicts if needed. +- `ci-logs` — Analyze CI logs from `ci.aztec-labs.com`. Use instead of `WebFetch` for CI URLs. +- `cycle` — Show Linear issues for the current cycle, grouped by status. +- `fix` — Analyze Linear issues, validate them against the codebase, and open draft fix PRs. +- `merge-train-infra` — Reference for merge-train automation internals (workflows, scripts, CI integration). +- `merge-trains` — Guide for working with merge-train branches: PR creation, base branch choice, labels, failure handling. +- `network-logs` — Query and analyze logs from live Aztec network deployments on GCP Cloud Logging. +- `noir-sync-update` — Follow-on updates after bumping the `noir/noir-repo` submodule (`Cargo.lock`, `yarn.lock`, etc.). +- `release-docs` — Build and update the developer documentation site for a new release. +- `release-network-docs` — Update network/operator documentation for a mainnet/testnet release without touching developer docs. +- `update-doc-references` — Update documentation when source files it references change in a PR. +- `updating-changelog` — Update changelog/migration notes for contract developers and node operators based on branch changes. + +Barretenberg-scoped (`barretenberg/.claude/skills//SKILL.md`): + +- `benchmark-chonk` — Run realistic Chonk (client IVC) benchmarks; native + WASM, per-circuit breakdowns, `BB_BENCH` instrumentation. +- `profile-chonk` — Run Chonk prover on the remote EC2 and collect Perfetto-compatible traces with a one-click UI link. +- `remote-bench` — Run benchmarks on the dedicated remote EC2 benchmarking machine (noise-free, single-run). +- `stdlib-point-at-infinity` — Guidelines for handling point-at-infinity in stdlib circuit types (serialization, public inputs, `cycle_group`/`biggroup`). +- `sumcheck` — Comprehensive reference for the Sumcheck protocol implementation in barretenberg (prover/verifier, relations, ZK sumcheck, ECCVM committed sumcheck). + +Yarn-project-scoped (`yarn-project/.claude/skills//SKILL.md`): + +- `debug-e2e` — Interactive ping-pong debugging for failed e2e tests; delegates log reading to subagents. +- `fix-pr` — Autonomous workflow that fixes a failing PR by analyzing CI logs, rebasing, fixing, and pushing. +- `read-gist` — Fetch and display a GitHub gist. +- `readme-writer` — Guidelines for writing module READMEs that explain how a module works. +- `rebase-pr` — Rebase a PR on its base branch, fix conflicts, and verify the build. +- `unit-test-implementation` — Best practices for unit tests in the TS monorepo (mocking, organization, helpers, assertions). +- `worktree-spawn` — Spawn an independent Claude instance in a git worktree to work on a task in parallel. + +Docs slash-commands (`docs/.claude/commands/.md`): + +- `review-docs` — Review documentation for correctness, accuracy, and adherence to conventions. diff --git a/SECURITY.md b/SECURITY.md index 5dc0da97c908..1a10d07e7ece 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -6,9 +6,11 @@ We welcome external submissions in the meantime. To submit a vulnerability, plea ## Reporting Security Vulnerabilities -- **Do not** open public GitHub issues or pull requests for suspected security vulnerabilities. +Please use [Aztec Network Bug Bounty](https://cantina.xyz/bounties/80e74370-10d8-4e52-8e4b-7294deb7c9ee) to submit vulnerabilities. If the vulnerability is not in scope of the bug bounty program, please use the following procedure. -Instead, please use the [Private Vulnerability Reporting](https://github.com/AztecProtocol/aztec-packages/security/advisories/new) process on GitHub. +**Do not** open public GitHub issues or pull requests for suspected security vulnerabilities. + +Instead, please use the [Private Vulnerability Reporting](https://github.com/AztecProtocol/aztec-packages/security/advisories/new) process on GitHub. - Navigate to the "Security" tab of this repository. - Click "Report a vulnerability" on the left sidebar. diff --git a/avm-transpiler/Cargo.lock b/avm-transpiler/Cargo.lock index 0766dd7b08a0..a99f3f100e89 100644 --- a/avm-transpiler/Cargo.lock +++ b/avm-transpiler/Cargo.lock @@ -790,9 +790,9 @@ checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" [[package]] name = "ecdsa" -version = "0.17.0-rc.16" +version = "0.17.0-rc.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91bbdd377139884fafcad8dc43a760a3e1e681aa26db910257fa6535b70e1829" +checksum = "54fb064faabbee66e1fc8e5c5a9458d4269dc2d8b638fe86a425adb2510d1a96" dependencies = [ "der", "digest 0.11.2", @@ -823,9 +823,9 @@ checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" [[package]] name = "elliptic-curve" -version = "0.14.0-rc.29" +version = "0.14.0-rc.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e84043d573efd4ac9d2d125817979a379204bf7e328b25a4a30487e8d100e618" +checksum = "cda94f31325c4275e9706adecbb6f0650dee2f904c915a98e3d81adaaaa757aa" dependencies = [ "base16ct", "crypto-bigint", @@ -1177,9 +1177,9 @@ dependencies = [ [[package]] name = "k256" -version = "0.14.0-rc.8" +version = "0.14.0-rc.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d2c6c227649d5ec80eaae541f1736232641a0bcdb3062a52b34edb42054158" +checksum = "1b382cbfd43caf55991a93850ce538aa1aa67bb264af367d22dfe7937c4e997d" dependencies = [ "cpubits", "ecdsa", @@ -1435,9 +1435,9 @@ checksum = "384b8ab6d37215f3c5301a95a4accb5d64aa607f1fcb26a11b5303878451b4fe" [[package]] name = "p256" -version = "0.14.0-rc.8" +version = "0.14.0-rc.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44f0a10fe314869359cb2901342b045f4e5a962ef9febc006f03d2a8c848fe4c" +checksum = "8b97e3bf0465157ae90975ff52dbeb1362ba618924878c9f74c25baa27a65f9a" dependencies = [ "ecdsa", "elliptic-curve", @@ -1481,9 +1481,9 @@ checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" [[package]] name = "pkcs8" -version = "0.11.0-rc.11" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12922b6296c06eb741b02d7b5161e3aaa22864af38dfa025a1a3ba3f68c84577" +checksum = "451913da69c775a56034ea8d9003d27ee8948e12443eae7c038ba100a4f21cb7" dependencies = [ "der", "spki", @@ -1531,9 +1531,9 @@ dependencies = [ [[package]] name = "primefield" -version = "0.14.0-rc.8" +version = "0.14.0-rc.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6543f5eec854fbf74ba5ef651fbdc9408919b47c3e1526623687135c16d12e9" +checksum = "1b52e6ee42db392378a95622b463c9740631171d1efce43fa445a569c1600cb6" dependencies = [ "crypto-bigint", "crypto-common 0.2.1", @@ -1545,9 +1545,9 @@ dependencies = [ [[package]] name = "primeorder" -version = "0.14.0-rc.8" +version = "0.14.0-rc.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "569d9ad6ef822bb0322c7e7d84e5e286244050bd5246cac4c013535ae91c2c90" +checksum = "0556580e42c19833f5d232aca11a7687a503ee41f937b54f5ae1d50fc2a6a36a" dependencies = [ "elliptic-curve", ] @@ -1744,9 +1744,9 @@ checksum = "781442f29170c5c93b7185ad559492601acdc71d5bb0706f5868094f45cfcd08" [[package]] name = "rustcrypto-ff" -version = "0.14.0-rc.0" +version = "0.14.0-rc.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5db129183b2c139d7d87d08be57cba626c715789db17aec65c8866bfd767d1f" +checksum = "fd2a8adb347447693cd2ba0d218c4b66c62da9b0a5672b17b981e4291ec65ff6" dependencies = [ "rand_core 0.10.0", "subtle", @@ -1754,9 +1754,9 @@ dependencies = [ [[package]] name = "rustcrypto-group" -version = "0.14.0-rc.0" +version = "0.14.0-rc.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57c4b1463f274a3ff6fb2f44da43e576cb9424367bd96f185ead87b52fe00523" +checksum = "369f9b61aa45933c062c9f6b5c3c50ab710687eca83dd3802653b140b43f85ed" dependencies = [ "rand_core 0.10.0", "rustcrypto-ff", @@ -1978,9 +1978,9 @@ dependencies = [ [[package]] name = "spki" -version = "0.8.0-rc.4" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8baeff88f34ed0691978ec34440140e1572b68c7dd4a495fd14a3dc1944daa80" +checksum = "1d9efca8738c78ee9484207732f728b1ef517bbb1833d6fc0879ca898a522f6f" dependencies = [ "base64ct", "der", diff --git a/aztec-up/README.md b/aztec-up/README.md index e2f3ec2edbd8..5548356922b5 100644 --- a/aztec-up/README.md +++ b/aztec-up/README.md @@ -9,9 +9,14 @@ That is all. This will install into `~/.aztec/bin` a collection of scripts to help with running aztec containers, and will update the user's `PATH` variable in their shell startup script so they can be found. -- `aztec` - a collection of tools to compile and test contracts, to launch subsystems and interact with the aztec network." -- `aztec-up` - a tool to install and manage aztec toolchain versions." -- `aztec-wallet` - our minimalistic CLI wallet" +- `aztec` - compiles and tests contracts, launches infrastructure subsystems, interacts with the network. +- `aztec-up` - a version manager for the Aztec toolchain. +- `aztec-wallet` - a tool for interacting with the Aztec network. +- `aztec-bb` - the Barretenberg proving backend. +- `aztec-nargo` - the Noir compiler and simulator. +- `aztec-forge`, `aztec-cast`, `aztec-anvil`, `aztec-chisel` - the bundled Foundry tools. + +Foundry, Noir, and Barretenberg are bundled at the versions `aztec` needs. Your own `forge` / `nargo` / `bb` installs still work under their bare names. After installed, you can use `aztec-up` to install specific versions. diff --git a/aztec-up/bin/0.0.1/aztec-install b/aztec-up/bin/0.0.1/aztec-install index 6d0321d49b71..1a5faaa4355e 100755 --- a/aztec-up/bin/0.0.1/aztec-install +++ b/aztec-up/bin/0.0.1/aztec-install @@ -94,12 +94,14 @@ function title { echo -e "Installing version: ${bold}${o}$VERSION${r}" echo echo -e "This script installs the following and updates your PATH if necessary:" - echo -e " ${bold}${g}nargo${r} - the noir programming language compiler and simulator." - echo -e " ${bold}${g}noir-profiler${r} - a profiler for analyzing and visualizing noir programs." - echo -e " ${bold}${g}bb${r} - the barretenberg proving backend." echo -e " ${bold}${g}aztec${r} - compiles and tests contracts, interacts with the network." echo -e " ${bold}${g}aztec-up${r} - installs and manages aztec toolchain versions." echo -e " ${bold}${g}aztec-wallet${r} - a minimalistic wallet cli." + echo -e " ${bold}${g}aztec-bb${r} - the barretenberg proving backend." + echo -e " ${bold}${g}aztec-nargo${r} - the noir compiler and simulator." + echo -e " ${bold}${g}aztec-forge${r}, ${bold}${g}aztec-cast${r}, ${bold}${g}aztec-anvil${r}, ${bold}${g}aztec-chisel${r} - the bundled foundry tools." + echo + echo -e "Foundry, Noir, and Barretenberg are bundled at the versions ${bold}${g}aztec${r} needs. Your own ${bold}${g}forge${r} / ${bold}${g}nargo${r} / ${bold}${g}bb${r} installs still work under their bare names." echo read -p "Do you wish to continue? (Y/n) " -n 1 -r echo diff --git a/aztec-up/bin/0.0.1/install b/aztec-up/bin/0.0.1/install index 9d5b3f500871..6f3dfadfedf9 100755 --- a/aztec-up/bin/0.0.1/install +++ b/aztec-up/bin/0.0.1/install @@ -37,6 +37,12 @@ export FORCE_COLOR=1 # Version-specific paths version_path="$AZTEC_HOME/versions/$VERSION" version_bin_path="$version_path/bin" +# Native binaries (forge/nargo/anvil/...) live here and are NOT on the user's PATH. +# The aztec wrapper prepends this directory to its child PATH so subprocesses +# spawned from the aztec workflow (which call sibling tools by basename) resolve +# to the bundled binaries. User-facing access is only via the aztec-* symlinks +# in version_bin_path. +version_internal_bin_path="$version_path/internal-bin" os="$(uname -s)" @@ -137,7 +143,15 @@ function install_noir { set -euo pipefail if [[ -n ${NARGO:-} ]]; then - cp "$NARGO" "$version_bin_path/nargo" + cp "$NARGO" "$version_internal_bin_path/nargo" + # noir's release build co-locates `noir-profiler` next to `nargo` + # (see noir/bootstrap.sh::build_native). Copy it too so the + # `aztec-noir-profiler` symlink in install_native_symlinks resolves. + local nargo_dir + nargo_dir=$(dirname "$NARGO") + if [ -f "$nargo_dir/noir-profiler" ]; then + cp "$nargo_dir/noir-profiler" "$version_internal_bin_path/noir-profiler" + fi else local noir_version noir_version=$(get_version "noir") @@ -150,13 +164,12 @@ function install_noir { # Always install/update noirup curl -Ls https://raw.githubusercontent.com/noir-lang/noirup/refs/heads/main/install | bash - # Install noir to temp location and move to version directory + # Install noir to temp location and move into internal-bin (off PATH). NARGO_HOME="$temp_nargo_home" "$HOME/.nargo/bin/noirup" -v "$noir_version" - # Move the nargo binary to our version bin directory - mv "$temp_nargo_home/bin/nargo" "$version_bin_path/nargo" + mv "$temp_nargo_home/bin/nargo" "$version_internal_bin_path/nargo" # Also move noir-profiler, needed by `aztec profile flamegraph` - mv "$temp_nargo_home/bin/noir-profiler" "$version_bin_path/noir-profiler" + mv "$temp_nargo_home/bin/noir-profiler" "$version_internal_bin_path/noir-profiler" # Cleanup temp directory rm -rf "$temp_nargo_home" @@ -173,16 +186,20 @@ function install_foundry { temp_foundry_dir=$(mktemp -d) || { echo "Error: Failed to create temp directory" >&2; exit 1; } mkdir -p "$temp_foundry_dir/bin" - # Always install/update foundryup - curl --max-time 30 -L https://foundry.paradigm.xyz | bash + # The foundry installer defaults to $XDG_CONFIG_HOME/.foundry when that var is set, which + # puts foundryup somewhere we don't expect. We'll pin FOUNDRY_DIR so we control the install + # location regardless of the caller's environment. + local foundry_home="$HOME/.foundry" + FOUNDRY_DIR="$foundry_home" curl --max-time 30 -L https://foundry.paradigm.xyz | FOUNDRY_DIR="$foundry_home" bash # Install foundry to temp location and move to version directory - FOUNDRY_DIR="$temp_foundry_dir" timeout 30 "$HOME/.foundry/bin/foundryup" -i "$foundry_version" + FOUNDRY_DIR="$temp_foundry_dir" timeout 30 "$foundry_home/bin/foundryup" -i "$foundry_version" - # Move the foundry binaries to our version bin directory + # Move the foundry binaries into internal-bin (off PATH); aztec-forge etc. + # symlinks in version_bin_path expose them under aztec-prefixed names. for binary in forge cast anvil chisel; do if [ -f "$temp_foundry_dir/bin/$binary" ]; then - mv "$temp_foundry_dir/bin/$binary" "$version_bin_path/$binary" + mv "$temp_foundry_dir/bin/$binary" "$version_internal_bin_path/$binary" fi done @@ -197,32 +214,88 @@ function install_aztec_packages { npm install @aztec/aztec@$VERSION @aztec/cli-wallet@$VERSION @aztec/bb.js@$VERSION --prefix "$version_path" } -function symlink_aztec_bins { +# Expose every native tool in internal-bin under an aztec- symlink in bin. +# Bare `forge`/`nargo`/etc. are intentionally NOT on PATH so they cannot shadow +# user-installed tools. +function install_native_symlinks { + set -euo pipefail + local tool + for tool in forge cast anvil chisel nargo noir-profiler; do + if [ ! -e "$version_internal_bin_path/$tool" ]; then + echo "Error: expected bundled binary '$tool' missing from $version_internal_bin_path" >&2 + exit 1 + fi + ln -sfn "../internal-bin/$tool" "$version_bin_path/aztec-$tool" + done +} + +# Npm bins are NOT mirrored into internal-bin. The contract for any TS tool +# that needs to spawn a sibling binary is to resolve an absolute bundled +# path itself (e.g. via @aztec/bb.js findBbBinary(), or a BB-style env +# var) -- never by basename + PATH lookup. If you find yourself wanting to +# add an npm bin to internal-bin so a child process can find it on PATH, +# fix the caller instead of broadening this list. +function install_npm_bin_symlinks { set -euo pipefail - # Populate version_bin_path with symlinks to @aztec-owned bins only. Adding - # node_modules/.bin wholesale to PATH would shadow user-installed tools - # (jest, tsc, semver, ...) with Aztec's transitive dependencies. local npm_bin_dir="$version_path/node_modules/.bin" [ -d "$npm_bin_dir" ] || return 0 - local bin_link bin_name target + local bin_link bin_name target link_name for bin_link in "$npm_bin_dir"/*; do [ -L "$bin_link" ] || continue target=$(readlink "$bin_link") # npm writes relative symlinks like ../@aztec/aztec/... for scoped packages. [[ "$target" == ../@aztec/* ]] || continue bin_name=$(basename "$bin_link") - if [ -e "$version_bin_path/$bin_name" ] && [ ! -L "$version_bin_path/$bin_name" ]; then - echo_yellow "refusing to overwrite non-symlink $bin_name; @aztec package bin collides with a hand-installed toolchain binary" >&2 + + # `aztec` is a wrapper, written by install_aztec_wrapper. + [[ "$bin_name" == "aztec" ]] && continue + + # Already prefixed (e.g. aztec-wallet) keeps its name; everything else gets + # renamed to aztec- so it cannot shadow a user-installed binary. + if [[ "$bin_name" == aztec-* ]]; then + link_name="$bin_name" + else + link_name="aztec-$bin_name" + fi + + if [ -e "$version_bin_path/$link_name" ] && [ ! -L "$version_bin_path/$link_name" ]; then + echo_yellow "refusing to overwrite non-symlink $link_name; @aztec package bin collides with a hand-installed toolchain binary" >&2 exit 1 fi - ln -sfn "../node_modules/.bin/$bin_name" "$version_bin_path/$bin_name" + ln -sfn "../node_modules/.bin/$bin_name" "$version_bin_path/$link_name" done } +# Wrapper for `aztec`: prepends internal-bin to PATH so the aztec workflow +# resolves bundled native tools by basename. yarn-project/aztec/scripts/aztec.sh +# (the npm bin entry of @aztec/aztec) calls bare `nargo` (line 36, in `aztec +# test`) and bare `anvil` (line 55-56, in `aztec start --local-network`); the +# TS commands it spawns also fall back to bare `forge` / `nargo` / +# `noir-profiler` when their respective env vars are unset. The wrapper is +# kept minimal -- it is the only place we maintain a PATH-based execution +# context, by design (see install_npm_bin_symlinks for the rationale). +# BASH_SOURCE[0] (not $0) so PATH-name invocation still resolves internal-bin +# from the wrapper's actual location. +function install_aztec_wrapper { + set -euo pipefail + local wrapper="$version_bin_path/aztec" + cat > "$wrapper" <<'EOF' +#!/usr/bin/env bash +# Auto-generated by aztec-up. Do not edit. +set -euo pipefail +self_dir="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)" +internal_bin="$self_dir/../internal-bin" +export PATH="$internal_bin:$PATH" +exec "$self_dir/../node_modules/.bin/aztec" "$@" +EOF + chmod +x "$wrapper" +} + function main { - # Create version directory + # Create version directories. bin/ is exposed on PATH; internal-bin/ is not. mkdir -p "$version_bin_path" + mkdir -p "$version_internal_bin_path" # Download versions manifest echo -n "Installing version manifest... " @@ -247,14 +320,23 @@ function main { dump_fail retry install_foundry echo_green "done." + # Expose native tools as aztec-forge / aztec-nargo / etc. + echo -n "Linking aztec native tools... " + dump_fail install_native_symlinks + echo_green "done." + # Install aztec npm packages - echo -n "Installing aztec packages... " + echo -n "Installing aztec npm packages... " dump_fail retry install_aztec_packages echo_green "done." - # Expose only @aztec-owned bins on PATH (drops transitive npm deps). + # Expose @aztec-owned npm bins under aztec-prefixed symlinks. Bare names + # (bb, pxe, txe, ...) are intentionally NOT exposed as they would shadow + # user-installed tools. echo -n "Making aztec commands available... " - dump_fail retry symlink_aztec_bins + dump_fail install_npm_bin_symlinks + # Wrap `aztec` so its subprocesses see internal-bin on PATH. + dump_fail install_aztec_wrapper echo_green "done." } diff --git a/aztec-up/bootstrap.sh b/aztec-up/bootstrap.sh index c9bb4139611b..dad8802868d8 100755 --- a/aztec-up/bootstrap.sh +++ b/aztec-up/bootstrap.sh @@ -106,7 +106,7 @@ EOF } function test_cmds { - for test in amm_flow bridge_and_claim basic_install counter_contract default_scaffold; do + for test in amm_flow bridge_and_claim basic_install counter_contract default_scaffold no_shadow_user_bins; do echo "$hash:TIMEOUT=15m aztec-up/scripts/run_test.sh $test" done } @@ -236,8 +236,8 @@ function install_on_mac_vm { . "\$HOME/.nvm/nvm.sh" # Verify installation. - nargo --version - bb --version + aztec-nargo --version + aztec-bb --version aztec --version REMOTE_EOF } diff --git a/aztec-up/scripts/run_isolated_test.sh b/aztec-up/scripts/run_isolated_test.sh index 970724ec1871..b6615a7aac43 100755 --- a/aztec-up/scripts/run_isolated_test.sh +++ b/aztec-up/scripts/run_isolated_test.sh @@ -98,9 +98,9 @@ EOF echo "Version information:" bash -i -c -e ' - forge --version + aztec-forge --version echo - nargo --version + aztec-nargo --version echo echo -n "aztec version: " aztec --version diff --git a/aztec-up/test/full-dev-path/README.md b/aztec-up/test/aztec-cli-acceptance-test/README.md similarity index 68% rename from aztec-up/test/full-dev-path/README.md rename to aztec-up/test/aztec-cli-acceptance-test/README.md index fa3ef2896770..f37c307d6418 100644 --- a/aztec-up/test/full-dev-path/README.md +++ b/aztec-up/test/aztec-cli-acceptance-test/README.md @@ -1,6 +1,6 @@ -# Full Dev Path Test +# Aztec CLI Acceptance Test -Tests that the installed Aztec toolchain works end-to-end. Exercises the complete developer onboarding path: +Tests that the installed Aztec CLI toolchain works end-to-end. Exercises the complete developer onboarding path: 1. `aztec init` - scaffold a new workspace with a Counter contract and test crate 2. `aztec compile` - compile the scaffolded contract @@ -30,6 +30,6 @@ VERSION=4.3.0 ./run-test.sh ## Architecture -- **`run-test.sh`** - Bash launcher. Runs the aztec installer (unless skipped), sets up PATH, then `exec node full-dev-path.ts`. -- **`full-dev-path.ts`** - Orchestrator. Runs each CLI step against the installed toolchain and, after codegen, copies `counter.test.ts` into the scaffolded workspace and spawns `node --test` on it. Each phase is wrapped in `step(name, fn)` so failures clearly identify which step broke. Always emits a machine-readable result line for CI/Slack integration: `TEST_RESULT=pass version=...` on success, or `TEST_RESULT=fail step=... version=... error="..."` on failure (with a full banner printed above it). +- **`run-test.sh`** - Bash launcher. Runs the aztec installer (unless skipped), sets up PATH, then `exec node aztec-cli-acceptance-test.ts`. +- **`aztec-cli-acceptance-test.ts`** - Orchestrator. Runs each CLI step against the installed toolchain and, after codegen, copies `counter.test.ts` into the scaffolded workspace and spawns `node --test` on it. Each phase is wrapped in `step(name, fn)` so failures clearly identify which step broke. Always emits a machine-readable result line for CI/Slack integration: `TEST_RESULT=pass version=...` on success, or `TEST_RESULT=fail step=... version=... error="..."` on failure (with a full banner printed above it). - **`counter.test.ts`** - The `node:test` suite that drives the deployed Counter end-to-end through the codegen'd bindings. Lives here as a template; copied into the workspace at test time so it can statically `import { CounterContract } from './artifacts/Counter.js'` with real codegen types and resolve `@aztec/*` via the workspace's `node_modules` symlink to the install. diff --git a/aztec-up/test/full-dev-path/full-dev-path.ts b/aztec-up/test/aztec-cli-acceptance-test/aztec-cli-acceptance-test.ts similarity index 60% rename from aztec-up/test/full-dev-path/full-dev-path.ts rename to aztec-up/test/aztec-cli-acceptance-test/aztec-cli-acceptance-test.ts index 93c7eaa8fb0c..0b59db12d893 100644 --- a/aztec-up/test/full-dev-path/full-dev-path.ts +++ b/aztec-up/test/aztec-cli-acceptance-test/aztec-cli-acceptance-test.ts @@ -13,7 +13,7 @@ // Every phase is wrapped in step(name, ...). The script always emits a `TEST_RESULT=pass|fail ...` line for CI // parsing; on failure it also prints a banner identifying the step that failed. -import { execFileSync, spawn } from 'node:child_process'; +import { execFileSync, spawn } from "node:child_process"; import { closeSync, copyFileSync, @@ -25,35 +25,39 @@ import { readFileSync, rmSync, symlinkSync, -} from 'node:fs'; -import { tmpdir } from 'node:os'; -import { dirname, join, resolve } from 'node:path'; -import { fileURLToPath } from 'node:url'; -import { setTimeout as delay } from 'node:timers/promises'; +} from "node:fs"; +import { tmpdir } from "node:os"; +import { dirname, join, resolve } from "node:path"; +import { fileURLToPath } from "node:url"; +import { setTimeout as delay } from "node:timers/promises"; const NODE_PORT = 8080; const LOCAL_NETWORK_READY_TIMEOUT_MS = 600_000; // 10 minutes const POLL_INTERVAL_MS = 2000; // 2 seconds const SCRIPT_DIR = dirname(fileURLToPath(import.meta.url)); -const COUNTER_TEST_TEMPLATE = join(SCRIPT_DIR, 'counter.test.ts'); +const COUNTER_TEST_TEMPLATE = join(SCRIPT_DIR, "counter.test.ts"); // Defaults to ~/.aztec/current (the symlink aztec-up maintains); fails if no package.json is found there. -const AZTEC_INSTALL_DIR = process.env.AZTEC_INSTALL_DIR ?? join(process.env.HOME ?? '', '.aztec/current'); -if (!existsSync(join(AZTEC_INSTALL_DIR, 'package.json'))) { - console.error(`FATAL: AZTEC_INSTALL_DIR does not point at an installed aztec: ${AZTEC_INSTALL_DIR}`); +const AZTEC_INSTALL_DIR = + process.env.AZTEC_INSTALL_DIR ?? + join(process.env.HOME ?? "", ".aztec/current"); +if (!existsSync(join(AZTEC_INSTALL_DIR, "package.json"))) { + console.error( + `FATAL: AZTEC_INSTALL_DIR does not point at an installed aztec: ${AZTEC_INSTALL_DIR}`, + ); process.exit(2); } -const TMP_DIR = mkdtempSync(join(tmpdir(), 'aztec-full-dev-path-')); -const WORKSPACE_DIR = join(TMP_DIR, 'my_workspace'); +const TMP_DIR = mkdtempSync(join(tmpdir(), "aztec-cli-acceptance-test-")); +const WORKSPACE_DIR = join(TMP_DIR, "my_workspace"); // Exit codes follow the Unix 128+signal convention for signal terminations. -process.on('SIGINT', () => { +process.on("SIGINT", () => { leaveTmpDirForInspection(); process.exit(130); }); -process.on('SIGTERM', () => { +process.on("SIGTERM", () => { leaveTmpDirForInspection(); process.exit(143); }); @@ -76,8 +80,8 @@ if (result.ok) { async function main(): Promise { log(`Working in ${TMP_DIR}`); - let stepName = ''; - let aztecVersion = 'unknown'; + let stepName = ""; + let aztecVersion = "unknown"; async function step(name: string, fn: () => T | Promise): Promise { stepName = name; @@ -89,21 +93,36 @@ async function main(): Promise { } try { - aztecVersion = await step('Checking installed tool versions', logVersions); - await step('Scaffolding new workspace (aztec init)', scaffoldWorkspace); - await step('Verifying scaffold structure', assertScaffold); - await step('Compiling contract (aztec compile)', () => run('aztec', ['compile'], WORKSPACE_DIR)); - - const artifactPath = await step('Locating compiled artifact', locateArtifact); + aztecVersion = await step("Checking installed tool versions", logVersions); + await step("Scaffolding new workspace (aztec init)", scaffoldWorkspace); + await step("Verifying scaffold structure", assertScaffold); + await step("Compiling contract (aztec compile)", () => + run("aztec", ["compile"], WORKSPACE_DIR), + ); + + const artifactPath = await step( + "Locating compiled artifact", + locateArtifact, + ); log(` artifact at ${artifactPath}`); - await step('Running TXE tests (aztec test)', () => run('aztec', ['test'], WORKSPACE_DIR)); + await step("Running TXE tests (aztec test)", () => + run("aztec", ["test"], WORKSPACE_DIR), + ); - await step('Starting local sandbox (aztec start --local-network)', startLocalNetwork); + await step( + "Starting local sandbox (aztec start --local-network)", + startLocalNetwork, + ); - await step('Generating TypeScript bindings (aztec codegen)', () => codegen(artifactPath)); + await step("Generating TypeScript bindings (aztec codegen)", () => + codegen(artifactPath), + ); - await step('Running TypeScript end-to-end test (node --test)', runTsEndToEndTest); + await step( + "Running TypeScript end-to-end test (node --test)", + runTsEndToEndTest, + ); return { ok: true, aztecVersion }; } catch (error) { return { ok: false, stepName, aztecVersion, error }; @@ -113,16 +132,18 @@ async function main(): Promise { function scaffoldWorkspace() { // aztec init scaffolds in pwd and uses the directory name as the package name; create the dir first. mkdirSync(WORKSPACE_DIR, { recursive: true }); - run('aztec', ['init'], WORKSPACE_DIR); + run("aztec", ["init"], WORKSPACE_DIR); } function logVersions(): string { - log('Tool versions:'); - let aztecVersion = 'unknown'; - for (const cmd of ['aztec', 'nargo', 'bb', 'aztec-wallet']) { - const version = execFileSync(cmd, ['--version'], { encoding: 'utf8' }).trim().split('\n')[0]; + log("Tool versions:"); + let aztecVersion = "unknown"; + for (const cmd of ["aztec", "aztec-nargo", "aztec-bb", "aztec-wallet"]) { + const version = execFileSync(cmd, ["--version"], { encoding: "utf8" }) + .trim() + .split("\n")[0]; console.log(` ${cmd}: ${version}`); - if (cmd === 'aztec') { + if (cmd === "aztec") { aztecVersion = version; } } @@ -131,9 +152,9 @@ function logVersions(): string { function assertScaffold() { // aztec init scaffolds a workspace with `_contract/` (Counter) and `_test/` crates. - const packageName = 'my_workspace'; + const packageName = "my_workspace"; const required = [ - 'Nargo.toml', + "Nargo.toml", `${packageName}_contract/Nargo.toml`, `${packageName}_contract/src/main.nr`, `${packageName}_test/Nargo.toml`, @@ -148,32 +169,34 @@ function assertScaffold() { } function locateArtifact(): string { - const matches = globSync('**/target/*-Counter.json', { cwd: WORKSPACE_DIR }); + const matches = globSync("**/target/*-Counter.json", { cwd: WORKSPACE_DIR }); if (matches.length === 0) { - fail('compiled Counter artifact not found under target/'); + fail("compiled Counter artifact not found under target/"); } if (matches.length > 1) { - fail(`expected one Counter artifact, found ${matches.length}: ${matches.join(', ')}`); + fail( + `expected one Counter artifact, found ${matches.length}: ${matches.join(", ")}`, + ); } return resolve(WORKSPACE_DIR, matches[0]); } async function startLocalNetwork(): Promise { - const logPath = join(TMP_DIR, 'local_network.log'); - const logFd = openSync(logPath, 'a'); - const proc = spawn('aztec', ['start', '--local-network'], { + const logPath = join(TMP_DIR, "local_network.log"); + const logFd = openSync(logPath, "a"); + const proc = spawn("aztec", ["start", "--local-network"], { cwd: TMP_DIR, - stdio: ['ignore', logFd, logFd], - env: { ...process.env, LOG_LEVEL: 'silent', PXE_PROVER: 'none' }, + stdio: ["ignore", logFd, logFd], + env: { ...process.env, LOG_LEVEL: "silent", PXE_PROVER: "none" }, }); closeSync(logFd); log(` local-network pid=${proc.pid}, log=${logPath}`); // Kill the network on process exit (including SIGINT/SIGTERM via the signal handlers). - process.on('exit', () => { + process.on("exit", () => { if (proc.exitCode === null) { try { - proc.kill('SIGTERM'); + proc.kill("SIGTERM"); } catch {} } }); @@ -182,16 +205,20 @@ async function startLocalNetwork(): Promise { while (true) { if (proc.exitCode !== null) { dumpTail(logPath); - fail(`local-network exited early with code ${proc.exitCode} (see ${logPath})`); + fail( + `local-network exited early with code ${proc.exitCode} (see ${logPath})`, + ); } if (Date.now() > deadline) { dumpTail(logPath); - fail(`timed out after ${msToSecs(LOCAL_NETWORK_READY_TIMEOUT_MS)}s waiting for local-network /status (see ${logPath})`); + fail( + `timed out after ${msToSecs(LOCAL_NETWORK_READY_TIMEOUT_MS)}s waiting for local-network /status (see ${logPath})`, + ); } try { const res = await fetch(`http://localhost:${NODE_PORT}/status`); if (res.ok) { - log(' local-network ready'); + log(" local-network ready"); return; } } catch { @@ -202,11 +229,11 @@ async function startLocalNetwork(): Promise { } function codegen(artifactPath: string) { - const artifactsOutDir = join(WORKSPACE_DIR, 'artifacts'); + const artifactsOutDir = join(WORKSPACE_DIR, "artifacts"); mkdirSync(artifactsOutDir, { recursive: true }); - const targetDir = resolve(artifactPath, '..'); - run('aztec', ['codegen', targetDir, '-o', artifactsOutDir], WORKSPACE_DIR); - const codegenTs = join(artifactsOutDir, 'Counter.ts'); + const targetDir = resolve(artifactPath, ".."); + run("aztec", ["codegen", targetDir, "-o", artifactsOutDir], WORKSPACE_DIR); + const codegenTs = join(artifactsOutDir, "Counter.ts"); if (!existsSync(codegenTs)) { fail(`codegen did not emit Counter.ts (wrote to ${artifactsOutDir})`); } @@ -215,23 +242,26 @@ function codegen(artifactPath: string) { function runTsEndToEndTest() { // Point the workspace at the installed node_modules so @aztec/* imports (and transitive deps // of the codegen'd Counter.ts) resolve to the same bundle a real user would have. - const modulesLink = join(WORKSPACE_DIR, 'node_modules'); + const modulesLink = join(WORKSPACE_DIR, "node_modules"); if (!existsSync(modulesLink)) { - symlinkSync(join(AZTEC_INSTALL_DIR, 'node_modules'), modulesLink, 'dir'); + symlinkSync(join(AZTEC_INSTALL_DIR, "node_modules"), modulesLink, "dir"); } - const testDest = join(WORKSPACE_DIR, 'counter.test.ts'); + const testDest = join(WORKSPACE_DIR, "counter.test.ts"); copyFileSync(COUNTER_TEST_TEMPLATE, testDest); - run('node', ['--no-warnings', '--test', testDest], WORKSPACE_DIR); + run("node", ["--no-warnings", "--test", testDest], WORKSPACE_DIR); } function reportFailure(stepName: string, aztecVersion: string, err: unknown) { const message = err instanceof Error ? err.message : String(err); - const childExit = typeof (err as { status?: unknown })?.status === 'number' ? (err as { status: number }).status : undefined; - const banner = '='.repeat(72); + const childExit = + typeof (err as { status?: unknown })?.status === "number" + ? (err as { status: number }).status + : undefined; + const banner = "=".repeat(72); console.error(`\n${banner}`); - console.error('FULL DEV PATH TEST FAILED'); + console.error("AZTEC CLI ACCEPTANCE TEST FAILED"); console.error(banner); console.error(`Step: ${stepName}`); console.error(`Version: ${aztecVersion}`); @@ -241,13 +271,15 @@ function reportFailure(stepName: string, aztecVersion: string, err: unknown) { console.error(`Tmp dir: ${TMP_DIR}`); console.error(`Error: ${message}`); if (err instanceof Error && err.stack) { - console.error(''); + console.error(""); console.error(err.stack); } console.error(banner); - const safeStep = stepName.replace(/\s+/g, '_'); - const safeError = message.replace(/[\r\n]+/g, ' ').slice(0, 240); - console.log(`TEST_RESULT=fail step=${safeStep} version=${aztecVersion} error="${safeError}"`); + const safeStep = stepName.replace(/\s+/g, "_"); + const safeError = message.replace(/[\r\n]+/g, " ").slice(0, 240); + console.log( + `TEST_RESULT=fail step=${safeStep} version=${aztecVersion} error="${safeError}"`, + ); } function msToSecs(ms: number): string { @@ -255,7 +287,7 @@ function msToSecs(ms: number): string { } function run(cmd: string, args: string[], cwd: string) { - execFileSync(cmd, args, { cwd, stdio: 'inherit' }); + execFileSync(cmd, args, { cwd, stdio: "inherit" }); } function log(msg: string) { @@ -276,7 +308,9 @@ function dumpTail(path: string, lines = 100) { } console.error(`--- last ${lines} lines of ${path} ---`); try { - console.error(readFileSync(path, 'utf8').split('\n').slice(-lines).join('\n')); + console.error( + readFileSync(path, "utf8").split("\n").slice(-lines).join("\n"), + ); } catch { console.error(`(failed to read ${path})`); } diff --git a/aztec-up/test/full-dev-path/counter.test.ts b/aztec-up/test/aztec-cli-acceptance-test/counter.test.ts similarity index 100% rename from aztec-up/test/full-dev-path/counter.test.ts rename to aztec-up/test/aztec-cli-acceptance-test/counter.test.ts diff --git a/aztec-up/test/full-dev-path/run-test.sh b/aztec-up/test/aztec-cli-acceptance-test/run-test.sh similarity index 87% rename from aztec-up/test/full-dev-path/run-test.sh rename to aztec-up/test/aztec-cli-acceptance-test/run-test.sh index 52e79d56a2a9..2ea5413c43fd 100755 --- a/aztec-up/test/full-dev-path/run-test.sh +++ b/aztec-up/test/aztec-cli-acceptance-test/run-test.sh @@ -1,10 +1,10 @@ #!/usr/bin/env bash -# Launcher for the full-dev-path test. +# Launcher for the Aztec CLI acceptance test. # # Steps: # 1. Install Node via NVM if not present (skipped with SKIP_INSTALL=1) # 2. Install the Aztec toolchain via the public installer (skipped with SKIP_INSTALL=1) -# 3. Run full-dev-path.ts which exercises the installed toolchain end-to-end +# 3. Run aztec-cli-acceptance-test.ts which exercises the installed toolchain end-to-end # # Env vars: # SKIP_INSTALL=1 Skip steps 1-2 and use the already-installed toolchain (dev-box inner loop). @@ -37,4 +37,4 @@ export PATH="$HOME/.aztec/current/bin:$HOME/.aztec/bin:$PATH" export AZTEC_INSTALL_DIR="${AZTEC_INSTALL_DIR:-$HOME/.aztec/current}" echo ">>> Running test" -exec node --no-warnings "${script_dir}/full-dev-path.ts" +exec node --no-warnings "${script_dir}/aztec-cli-acceptance-test.ts" diff --git a/aztec-up/test/basic_install.sh b/aztec-up/test/basic_install.sh index b7973431f612..497aeda2b873 100755 --- a/aztec-up/test/basic_install.sh +++ b/aztec-up/test/basic_install.sh @@ -2,8 +2,8 @@ set -euo pipefail echo -echo "nargo version: $(nargo --version | head -1 | cut -d' ' -f4)" -echo "bb version: $(bb --version)" +echo "nargo version: $(aztec-nargo --version | head -1 | cut -d' ' -f4)" +echo "bb version: $(aztec-bb --version)" echo "aztec version: $(aztec --version)" echo "aztec-wallet version: $(aztec-wallet --version)" echo diff --git a/aztec-up/test/no_shadow_user_bins.sh b/aztec-up/test/no_shadow_user_bins.sh new file mode 100755 index 000000000000..cb0bfec5273c --- /dev/null +++ b/aztec-up/test/no_shadow_user_bins.sh @@ -0,0 +1,82 @@ +#!/usr/bin/env bash +# Regression test: nothing the aztec installer ships is exposed under a +# bare name on PATH. Every entry in $HOME/.aztec/current/bin is `aztec` or +# `aztec-*`. Native binaries live in current/internal-bin (off PATH) and are +# reachable only via the `aztec-*` symlinks or via the `aztec` wrapper, which +# prepends internal-bin to PATH for its subprocesses. +set -euo pipefail + +aztec_bin="$HOME/.aztec/current/bin" + +# Names this test exercises. Pre-create user-installed sentinel shims that +# print known strings, then assert bare names resolve to them after install +# (i.e. NOT shadowed by aztec). The shim directory comes AFTER the aztec bin +# on PATH, mirroring a realistic user setup where the shell PATH update was +# the last step. Names cover every bare bin the installer used to expose. +shadow_targets=( + forge cast anvil chisel + nargo noir-profiler noir-codegen + bb bb-cli + pxe txe validator-client blob-client +) +user_bin="$HOME/.local/bin" +mkdir -p "$user_bin" +for tool in "${shadow_targets[@]}"; do + printf '#!/usr/bin/env bash\necho "user-%s"\n' "$tool" > "$user_bin/$tool" + chmod +x "$user_bin/$tool" +done +export PATH="$PATH:$user_bin" + +# Bins known to support --version cleanly under the new (aztec-prefixed) layout. +runs_version=(aztec aztec-forge aztec-cast aztec-anvil aztec-chisel aztec-nargo aztec-noir-profiler) + +function assert_no_shadowing { + local tool resolved + for tool in "${shadow_targets[@]}"; do + resolved=$(command -v "$tool") + if [[ "$resolved" == "$HOME/.aztec/"* ]]; then + echo "FAIL: bare '$tool' shadowed by aztec at $resolved" + exit 1 + fi + if [[ "$("$tool")" != "user-$tool" ]]; then + echo "FAIL: bare '$tool' did not invoke user shim" + exit 1 + fi + done +} + +function assert_bin_only_aztec_prefixed { + local entry name + for entry in "$aztec_bin"/*; do + [ -e "$entry" ] || continue + name=$(basename "$entry") + if [[ "$name" != "aztec" && "$name" != aztec-* ]]; then + echo "FAIL: bare-named entry in $aztec_bin: $name" + exit 1 + fi + done +} + +function assert_aztec_bins_run { + local name resolved + for name in "${runs_version[@]}"; do + if ! resolved=$(command -v "$name"); then + echo "FAIL: $name not on PATH" + exit 1 + fi + if [[ "$resolved" != "$HOME/.aztec/"* ]]; then + echo "FAIL: $name resolves outside aztec ($resolved)" + exit 1 + fi + if ! "$name" --version >/dev/null 2>&1; then + echo "FAIL: $name --version exited non-zero" + exit 1 + fi + done +} + +assert_no_shadowing +assert_bin_only_aztec_prefixed +assert_aztec_bins_run + +echo "PASS: aztec installer does not shadow user binaries" diff --git a/barretenberg/.claude/skills/benchmark-chonk/SKILL.md b/barretenberg/.claude/skills/benchmark-chonk/SKILL.md index ec480bcf79a0..07afa48c2bbe 100644 --- a/barretenberg/.claude/skills/benchmark-chonk/SKILL.md +++ b/barretenberg/.claude/skills/benchmark-chonk/SKILL.md @@ -6,16 +6,9 @@ argument-hint: e.g. "run", "compare", "wasm", "instrument ", "per # Benchmark Chonk -Run realistic Chonk IVC benchmarks using **pinned protocol inputs** (real transaction flows captured from end-to-end tests), not the synthetic `chonk_bench` target. The synthetic benchmark (`chonk_bench`) uses trivially small mock circuits — it is useful for quick regression checks but does NOT reflect production proving performance. Users invoking `/benchmark-chonk` want the real thing. +Run realistic Chonk IVC benchmarks using **pinned protocol inputs** (real transaction flows captured from end-to-end tests). -## What makes this different from `chonk_bench` - -| | `chonk_bench` (synthetic) | This skill (realistic) | -|---|---|---| -| Input data | Mock circuits via `test_bench_shared.hpp` | Pinned msgpack from real Aztec transactions | -| Circuit count | 2 or 5 tiny circuits | Full transaction flows (10+ circuits) | -| Circuit variety | All identical | Mixed: app, kernel, tail, public | -| BB command | `./chonk_bench --benchmark_filter=...` | `bb prove --scheme chonk --ivc_inputs_path ...` | +**Chonk has no synthetic micro-benchmark.** Past attempts (`chonk_bench`) used trivially small mock circuits and produced misleading numbers — the target was deleted to prevent regression of that mistake. Always benchmark Chonk via `bb prove --scheme chonk` against pinned `ivc-inputs.msgpack` for real transaction flows. If a Chonk proving question seems to call for a micro-benchmark, the answer is still `bb prove` on a real flow. ## Step 1: Get pinned IVC inputs @@ -158,7 +151,7 @@ The macros create `BenchReporter` RAII objects that: ### Google Benchmark integration -For `chonk_bench` and other `.bench.cpp` targets: +For `.bench.cpp` targets that integrate BB_BENCH into Google Benchmark counters: ```cpp #include "barretenberg/common/google_bb_bench.hpp" @@ -275,15 +268,15 @@ python3 barretenberg/cpp/scripts/extract_component_benchmarks.py e.g. "bb", "chonk_bench", "ultra_honk_bench", "wasm bb" +argument-hint: e.g. "bb", "ultra_honk_bench", "wasm bb" --- # Remote Bench @@ -95,7 +95,7 @@ The standard flow used by `scripts/benchmark_remote.sh`: ```bash cd barretenberg/cpp -BENCHMARK="bb" # or chonk_bench, ultra_honk_bench, etc. +BENCHMARK="bb" # or ultra_honk_bench, etc. (Chonk: use bb with --scheme chonk on real example flows — there is no synthetic chonk benchmark) PRESET="clang20-no-avm" # or clang20 BUILD_DIR="build-no-avm" # matches preset @@ -204,14 +204,14 @@ Compare current branch vs baseline (builds and runs both on remote): ```bash # Native -./scripts/compare_chonk_bench.sh # ChonkBench/Full/6 ./scripts/compare_branch_vs_baseline_remote.sh '' # WASM -./scripts/compare_chonk_bench_wasm.sh # ChonkBench/Full/6 ./scripts/compare_branch_vs_baseline_remote_wasm.sh '' ``` +For Chonk A/B, do not use a synthetic benchmark — measure `bb prove --scheme chonk` against pinned `ivc-inputs.msgpack` for both branches and compare manually. + These use Google Benchmark's `compare.py` for statistical analysis. Note: comparison scripts check out the baseline branch locally, so your working tree must be clean. ## Scripts reference @@ -220,10 +220,7 @@ These use Google Benchmark's `compare.py` for statistical analysis. Note: compar |--------|---------| | `scripts/benchmark_remote.sh` | Generic: build locally, scp, run remotely | | `scripts/benchmark_wasm_remote.sh` | Same for WASM (wasmtime on remote) | -| `scripts/benchmark_example_ivc_flow_remote.sh` | Chonk with pinned inputs on remote | -| `scripts/benchmark_chonk.sh` | Synthetic chonk_bench on remote | -| `scripts/compare_chonk_bench.sh` | A/B native comparison | -| `scripts/compare_chonk_bench_wasm.sh` | A/B WASM comparison | +| `scripts/benchmark_example_ivc_flow_remote.sh` | Chonk with pinned inputs on remote (the only realistic Chonk bench) | | `scripts/compare_branch_vs_baseline_remote.sh` | Generic A/B native | | `scripts/compare_branch_vs_baseline_remote_wasm.sh` | Generic A/B WASM | | `scripts/_benchmark_remote_lock.sh` | Lock mechanism (source it, don't run it) | diff --git a/barretenberg/AGENTS.md b/barretenberg/AGENTS.md new file mode 120000 index 000000000000..681311eb9cf4 --- /dev/null +++ b/barretenberg/AGENTS.md @@ -0,0 +1 @@ +CLAUDE.md \ No newline at end of file diff --git a/barretenberg/bbup/bbup b/barretenberg/bbup/bbup index e50950c8e734..037f80eb2545 100755 --- a/barretenberg/bbup/bbup +++ b/barretenberg/bbup/bbup @@ -50,8 +50,8 @@ get_bb_version_for_noir() { local lookup_url="https://raw.githubusercontent.com/AztecProtocol/aztec-packages/next/barretenberg/bbup/bb-versions.json" - # Extract BB version from install script - local bb_version=$(curl --fail -s "$lookup_url" | jq -r --arg version "$resolved_version" '.[$version]') + # bb-versions.json is a flat "": "" map with one entry per line. + local bb_version=$(curl --fail -s "$lookup_url" | grep -F "\"$resolved_version\":" | head -1 | cut -d'"' -f4) echo "$bb_version" } diff --git a/barretenberg/cpp/AGENTS.md b/barretenberg/cpp/AGENTS.md new file mode 120000 index 000000000000..681311eb9cf4 --- /dev/null +++ b/barretenberg/cpp/AGENTS.md @@ -0,0 +1 @@ +CLAUDE.md \ No newline at end of file diff --git a/barretenberg/cpp/CLAUDE.md b/barretenberg/cpp/CLAUDE.md index b2b1dfeb8f8c..b2fb9b3aadac 100644 --- a/barretenberg/cpp/CLAUDE.md +++ b/barretenberg/cpp/CLAUDE.md @@ -108,6 +108,27 @@ Key constants to watch: If C++ static_asserts fail after your changes, update both the assert values AND the corresponding Noir constants, then run `yarn remake-constants`. +## Prover.toml Fixtures + +Proof-length-affecting changes (e.g. `CHONK_PROOF_LENGTH` bumps from MegaFlavor entity additions) make the committed `Prover.toml` fixtures stale. `nargo execute --program-dir ` then fails with `Type Array { length: N, typ: Field } is expected to have length N but value Vec(...)`. + +Regenerate via the e2e prover full test with fake proofs: + +```bash +cd yarn-project +AZTEC_GENERATE_TEST_DATA=1 FAKE_PROOFS=1 yarn workspace @aztec/end-to-end test full.test +``` + +`FAKE_PROOFS=1` skips real proving — runs in ~2 min (orchestrator + witness generation only). Writes 12 `Prover.toml` files under `noir-projects/noir-protocol-circuits/crates//Prover.toml`. + +For circuits not exercised by `full.test.ts` (`rollup-tx-merge`, `rollup-block-root`, `rollup-block-root-single-tx`, `rollup-block-merge`, `rollup-checkpoint-root`, `rollup-block-root-first-empty-tx`), additionally run: + +```bash +AZTEC_GENERATE_TEST_DATA=1 yarn workspace @aztec/prover-client test orchestrator_single_checkpoint +``` + +Verify with `nargo execute --program-dir noir-projects/noir-protocol-circuits/crates/` for any previously-failing crate; should print `Circuit witness successfully solved`. + ## Verification Keys **IMPORTANT**: When making barretenberg changes that could affect verification keys, you must verify that VKs haven't changed unexpectedly, or diff --git a/barretenberg/cpp/CMakePresets.json b/barretenberg/cpp/CMakePresets.json index 8b67388d33c3..3d5b17a58d32 100644 --- a/barretenberg/cpp/CMakePresets.json +++ b/barretenberg/cpp/CMakePresets.json @@ -788,7 +788,6 @@ "barretenberg-debug.wasm", "ecc_tests", "ultra_honk_bench", - "chonk_bench", "bb" ] }, diff --git a/barretenberg/cpp/bootstrap.sh b/barretenberg/cpp/bootstrap.sh index b9c2f351d656..b8ccc9b2a486 100755 --- a/barretenberg/cpp/bootstrap.sh +++ b/barretenberg/cpp/bootstrap.sh @@ -87,6 +87,15 @@ function preset_cache_paths { find $build_dir/bin $build_dir/lib \ -maxdepth 1 \( -name "$t" -o -name "$t.exe" -o -name "$t.node" -o -name "lib${t}.a" \) \ 2>/dev/null + # Emscripten emits a .js loader and a .worker.mjs pthread worker as + # side-outputs of any .wasm executable target; cache them next to the + # .wasm so consumers like bb.js see a complete artifact set on cache hit. + if [[ "$t" == *.wasm ]]; then + local stem="${t%.wasm}" + find $build_dir/bin -maxdepth 1 \ + \( -name "$stem.js" -o -name "$stem.worker.mjs" \) \ + 2>/dev/null + fi done fi } @@ -265,7 +274,11 @@ function test_cmds_native { done || (echo "Failed to list tests in $bin" && exit 1) done - echo "$hash barretenberg/cpp/scripts/test_chonk_standalone_vks_havent_changed.sh" + # The pinned IVC inputs / VKs live in the public repo; the private fork + # carries divergent circuits so the check is expected to fail there. + if [[ "${GITHUB_REPOSITORY,,}" != "aztecprotocol/aztec-packages-private" ]]; then + echo "$hash barretenberg/cpp/scripts/test_chonk_standalone_vks_havent_changed.sh" + fi } function test_cmds_wasm_threads { @@ -320,12 +333,8 @@ function bench_cmds { prefix="$hash:CPUS=8" echo "$prefix barretenberg/cpp/scripts/run_bench.sh native bb-micro-bench/native/ultra_honk $native_build_dir/bin/ultra_honk_bench construct_proof_ultrahonk_power_of_2/20$" echo "$prefix barretenberg/cpp/scripts/run_bench.sh native bb-micro-bench/native/ultra_honk_zk $native_build_dir/bin/ultra_honk_bench construct_proof_ultrahonk_zk_power_of_2/20$" - echo "$prefix barretenberg/cpp/scripts/run_bench.sh native bb-micro-bench/native/chonk $native_build_dir/bin/chonk_bench ChonkBench/Full/5$" echo "$prefix barretenberg/cpp/scripts/run_bench.sh wasm bb-micro-bench/wasm/ultra_honk build-wasm-threads/bin/ultra_honk_bench construct_proof_ultrahonk_power_of_2/20$" echo "$prefix barretenberg/cpp/scripts/run_bench.sh wasm bb-micro-bench/wasm/ultra_honk_zk build-wasm-threads/bin/ultra_honk_bench construct_proof_ultrahonk_zk_power_of_2/20$" - echo "$prefix barretenberg/cpp/scripts/run_bench.sh wasm bb-micro-bench/wasm/chonk build-wasm-threads/bin/chonk_bench ChonkBench/Full/5$" - prefix="$hash:CPUS=1" - echo "$prefix barretenberg/cpp/scripts/run_bench.sh native bb-micro-bench/native/chonk_verify $native_build_dir/bin/chonk_bench VerificationOnly$" } # Runs benchmarks sharded over machine cores. diff --git a/barretenberg/cpp/pil/vm2/AGENTS.md b/barretenberg/cpp/pil/vm2/AGENTS.md new file mode 120000 index 000000000000..681311eb9cf4 --- /dev/null +++ b/barretenberg/cpp/pil/vm2/AGENTS.md @@ -0,0 +1 @@ +CLAUDE.md \ No newline at end of file diff --git a/barretenberg/cpp/pil/vm2/precomputed.pil b/barretenberg/cpp/pil/vm2/precomputed.pil index a8611d6826da..734d6b761e46 100644 --- a/barretenberg/cpp/pil/vm2/precomputed.pil +++ b/barretenberg/cpp/pil/vm2/precomputed.pil @@ -209,7 +209,7 @@ pol constant sel_mem_tag_out_of_range; // - instruction size (in bytes): instr_size // - Selector on whether the instruction has a tag: sel_has_tag // - Selector on whether operand op2 is a tag: sel_tag_is_op2 -// +// // Used by the instruction fetching subtrace to decode raw bytecode. // Selectors for operands decomposition into bytes (required by instr_fetching.pil) // This table is populated by a map generated by a cpp test defined in op_decomposition.test.cpp. @@ -361,22 +361,23 @@ pol constant addressing_gas; // properties: which tree operations are allowed, public-input offsets, revert behavior. // Used by the transaction trace to enforce phase-based constraints. // -// Example trace (idx encodes the TransactionPhase enum; abbreviated column names): -// idx | sel_phase | is_call | is_teardown | is_collect_fee | is_tree_padding | is_cleanup | is_revertible | nr_null | nr_note | nr_l2l1 | r_null | r_note | r_l2l1 | next_on_revert -// ----+-----------+---------+-------------+----------------+-----------------+------------+---------------+---------+---------+---------+--------+--------+--------+--------------- -// 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 (NR_NULLIFIER_INSERTION) -// 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 (NR_NOTE_INSERTION) -// 2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 (NR_L2_TO_L1_MESSAGE) -// 3 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 (SETUP) -// 4 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 8 (R_NULLIFIER_INSERTION) -// 5 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 8 (R_NOTE_INSERTION) -// 6 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 8 (R_L2_TO_L1_MESSAGE) -// 7 | 1 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 8 (APP_LOGIC) -// 8 | 1 | 1 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 9 (TEARDOWN) -// 9 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 (COLLECT_GAS_FEES) -// 10 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 (TREE_PADDING) -// 11 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 (CLEANUP) -// 12 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 (beyond phase range) +// Example trace (idx encodes the TransactionPhase enum; abbreviated column names). +// Whether an append is revertible or not is determined by `is_revertible`. +// idx | sel_phase | is_call | is_teardown | is_collect_fee | is_tree_padding | is_cleanup | is_revertible | append_null | append_note | append_l2l1 | next_on_revert +// ----+-----------+---------+-------------+----------------+-----------------+------------+---------------+-------------+-------------+-------------+--------------- +// 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 (NR_NULLIFIER_INSERTION) +// 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 (NR_NOTE_INSERTION) +// 2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 (NR_L2_TO_L1_MESSAGE) +// 3 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 (SETUP) +// 4 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 8 (R_NULLIFIER_INSERTION) +// 5 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 8 (R_NOTE_INSERTION) +// 6 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 8 (R_L2_TO_L1_MESSAGE) +// 7 | 1 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 8 (APP_LOGIC) +// 8 | 1 | 1 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 9 (TEARDOWN) +// 9 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 (COLLECT_GAS_FEES) +// 10 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 (TREE_PADDING) +// 11 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 (CLEANUP) +// 12 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 (beyond phase range) // Phase Table for Tx Trace pol constant sel_phase; // Selector for phase table @@ -389,12 +390,9 @@ pol constant is_cleanup; pol constant is_revertible; pol constant read_pi_start_offset; pol constant read_pi_length_offset; -pol constant sel_non_revertible_append_note_hash; -pol constant sel_non_revertible_append_nullifier; -pol constant sel_non_revertible_append_l2_l1_msg; -pol constant sel_revertible_append_note_hash; -pol constant sel_revertible_append_nullifier; -pol constant sel_revertible_append_l2_l1_msg; +pol constant sel_append_note_hash; +pol constant sel_append_nullifier; +pol constant sel_append_l2_l1_msg; pol constant next_phase_on_revert; // ===== Section 12: Keccak round constants ===== diff --git a/barretenberg/cpp/pil/vm2/tx.pil b/barretenberg/cpp/pil/vm2/tx.pil index 0b4b62757ecd..fefc07a34d31 100644 --- a/barretenberg/cpp/pil/vm2/tx.pil +++ b/barretenberg/cpp/pil/vm2/tx.pil @@ -209,12 +209,9 @@ namespace tx; is_revertible, read_pi_start_offset, read_pi_length_offset, - sel_non_revertible_append_note_hash, - sel_non_revertible_append_nullifier, - sel_non_revertible_append_l2_l1_msg, - sel_revertible_append_note_hash, - sel_revertible_append_nullifier, - sel_revertible_append_l2_l1_msg, + sel_append_note_hash, + sel_append_nullifier, + sel_append_l2_l1_msg, next_phase_on_revert } in @@ -228,12 +225,9 @@ namespace tx; precomputed.is_revertible, precomputed.read_pi_start_offset, precomputed.read_pi_length_offset, - precomputed.sel_non_revertible_append_note_hash, - precomputed.sel_non_revertible_append_nullifier, - precomputed.sel_non_revertible_append_l2_l1_msg, - precomputed.sel_revertible_append_note_hash, - precomputed.sel_revertible_append_nullifier, - precomputed.sel_revertible_append_l2_l1_msg, + precomputed.sel_append_note_hash, + precomputed.sel_append_nullifier, + precomputed.sel_append_l2_l1_msg, precomputed.next_phase_on_revert }; @@ -465,18 +459,16 @@ namespace tx; /*************************************************************************** * Private Side Effect Insertions **************************************************************************/ - pol commit sel_revertible_append_note_hash; // @boolean - pol commit sel_non_revertible_append_note_hash; // @boolean - pol commit sel_revertible_append_nullifier; // @boolean - pol commit sel_non_revertible_append_nullifier; // @boolean - pol commit sel_revertible_append_l2_l1_msg; // @boolean - pol commit sel_non_revertible_append_l2_l1_msg; // @boolean - // The 6 above selectors are booleans thanks to #[READ_PHASE_SPEC] on active rows. + pol commit sel_append_note_hash; // @boolean + pol commit sel_append_nullifier; // @boolean + pol commit sel_append_l2_l1_msg; // @boolean + // The 3 above selectors are booleans thanks to #[READ_PHASE_SPEC] on active rows. // Furthermore, phase spec guarantees that they are mutually exclusive. + // Whether such an append is revertible or not is determined by `is_revertible`. // A tree selector means we need to get the tree value - pol commit is_tree_insert_phase; // @boolean on active rows (follows from mutual exclusivity of the 6 above selectors) - is_tree_insert_phase = sel_revertible_append_note_hash + sel_non_revertible_append_note_hash + sel_revertible_append_nullifier + sel_non_revertible_append_nullifier; + pol commit is_tree_insert_phase; // @boolean on active rows (follows from mutual exclusivity of the above selectors) + is_tree_insert_phase = sel_append_note_hash + sel_append_nullifier; pol commit leaf_value; // Shared column to track the inverse of the remaining side effects for note hashes, nullifiers, and L2 to L1 messages pol commit remaining_side_effects_inv; @@ -491,7 +483,7 @@ namespace tx; // ===== NOTE HASHES ===== pol commit sel_try_note_hash_append; // @boolean (follows from definition) - sel_try_note_hash_append = (sel - is_padded) * (sel_revertible_append_note_hash + sel_non_revertible_append_note_hash); + sel_try_note_hash_append = (sel - is_padded) * sel_append_note_hash; // If we are at the maximum emitted note hashes, we must revert pol REMAINING_NOTE_HASH_WRITES = constants.MAX_NOTE_HASHES_PER_TX - prev_num_note_hashes_emitted; @@ -509,7 +501,9 @@ namespace tx; prev_note_hash_tree_size, prev_note_hash_tree_root, precomputed.zero, // Already siloed. (No need to pass address.) - sel_revertible_append_note_hash, // Not unique for revertible note hashes. + is_revertible, // Used as `sel_unique`: revertible note hashes need to be made unique with a nonce. + // On rows where sel_note_hash_append == 1 (NR_NOTE_INSERTION or R_NOTE_INSERTION), + // is_revertible is exactly the discriminator we want. prev_num_note_hashes_emitted, discard, // from tx_discard.pil virtual trace next_note_hash_tree_root @@ -532,7 +526,7 @@ namespace tx; // ===== NULLIFIERS ===== pol commit sel_try_nullifier_append; // @boolean (follows from definition) - sel_try_nullifier_append = (sel - is_padded) * (sel_revertible_append_nullifier + sel_non_revertible_append_nullifier); + sel_try_nullifier_append = (sel - is_padded) * sel_append_nullifier; pol commit nullifier_limit_error; // @boolean nullifier_limit_error * (1 - nullifier_limit_error) = 0; @@ -594,7 +588,7 @@ namespace tx; // ===== L2 - L1 Messages ===== pol commit sel_try_l2_l1_msg_append; // @boolean (follows from definition) - sel_try_l2_l1_msg_append = (sel - is_padded) * (sel_revertible_append_l2_l1_msg + sel_non_revertible_append_l2_l1_msg); + sel_try_l2_l1_msg_append = (sel - is_padded) * sel_append_l2_l1_msg; pol commit l2_l1_msg_contract_address; pol commit l2_l1_msg_recipient; diff --git a/barretenberg/cpp/pil/vm2/tx_context.pil b/barretenberg/cpp/pil/vm2/tx_context.pil index d17074b09cd7..832773e0f5d8 100644 --- a/barretenberg/cpp/pil/vm2/tx_context.pil +++ b/barretenberg/cpp/pil/vm2/tx_context.pil @@ -330,12 +330,12 @@ namespace tx; NOT_LAST_ROW * (1 - is_teardown') * (da_gas_limit - da_gas_limit') = 0; // Selectors to allow prev => next state changes in the different phases - pol SEL_CAN_EMIT_NOTE_HASH = is_public_call_request + sel_non_revertible_append_note_hash + sel_revertible_append_note_hash; - pol SEL_CAN_EMIT_NULLIFIER = is_public_call_request + sel_non_revertible_append_nullifier + sel_revertible_append_nullifier; + pol SEL_CAN_EMIT_NOTE_HASH = is_public_call_request + sel_append_note_hash; + pol SEL_CAN_EMIT_NULLIFIER = is_public_call_request + sel_append_nullifier; pol SEL_CAN_WRITE_PUBLIC_DATA = is_public_call_request + is_collect_fee; pol SEL_CAN_WRITE_WRITTEN_PUBLIC_DATA_SLOTS = is_public_call_request; pol SEL_CAN_EMIT_PUBLIC_LOG = is_public_call_request; - pol SEL_CAN_EMIT_L2_L1_MSG = is_public_call_request + sel_non_revertible_append_l2_l1_msg + sel_revertible_append_l2_l1_msg; + pol SEL_CAN_EMIT_L2_L1_MSG = is_public_call_request + sel_append_l2_l1_msg; // The 6 above selectors are booleans on active rows due to #[READ_PHASE_SPEC] and // mutual exclusivity of the selectors on the right-hand side. diff --git a/barretenberg/cpp/scripts/README.md b/barretenberg/cpp/scripts/README.md index ee686390d982..378c8562da47 100644 --- a/barretenberg/cpp/scripts/README.md +++ b/barretenberg/cpp/scripts/README.md @@ -28,7 +28,8 @@ There are scripts that: 4. If `ssh` worked, the setup is complete. ## How -- `./scripts/benchmark_chonk.sh` lets you run `chonk_bench` remotely and analyze the results. - `./scripts/benchmark_example_ivc_flow_remote.sh` copies the example flow input you'd like to run to the remote machine, runs `bb prove`, and analyze the results. - For the script to work you need to have the example flows downloaded locally, by `AZTEC_CACHE_COMMIT=origin/next~3 FORCE_CACHE_DOWNLOAD=1 yarn-project/end-to-end/bootstrap.sh build_bench` - If you have other special needs, look inside the above scripts and see what parameters you can give, or use `./scripts/benchmark_remote.sh`. + +Chonk proving must always be measured on real example app flows via `benchmark_example_ivc_flow_remote.sh` — there is no synthetic chonk benchmark, and there should not be one. Running synthetic Chonk benchmarks gives misleading numbers because the mock circuits do not reflect production proving costs. diff --git a/barretenberg/cpp/scripts/benchmark_chonk.sh b/barretenberg/cpp/scripts/benchmark_chonk.sh deleted file mode 100755 index bd0abef06bb1..000000000000 --- a/barretenberg/cpp/scripts/benchmark_chonk.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bash -set -eu - -TARGET=${1:-"chonk_bench"} -BENCHMARK="ChonkBench/Full/5" -BUILD_DIR="build" -FILTER="${BENCHMARK}$" # '$' to ensure only specified bench is run - -# Move above script dir. -cd $(dirname $0)/.. - -# Measure the benchmarks with ops time counting -./scripts/benchmark_remote.sh "$TARGET"\ - "BB_BENCH=1 ./$TARGET --benchmark_filter=$FILTER\ - --benchmark_out=$TARGET.json\ - --benchmark_out_format=json"\ - clang20\ - "$BUILD_DIR" - -# Retrieve output from benching instance -cd $BUILD_DIR -scp $BB_SSH_KEY $BB_SSH_INSTANCE:$BB_SSH_CPP_PATH/build/$TARGET.json . diff --git a/barretenberg/cpp/scripts/benchmark_remote.sh b/barretenberg/cpp/scripts/benchmark_remote.sh index a85d5c202d0f..eca222bd6860 100755 --- a/barretenberg/cpp/scripts/benchmark_remote.sh +++ b/barretenberg/cpp/scripts/benchmark_remote.sh @@ -7,7 +7,7 @@ # - BB_SSH_CPP_PATH: Path to barretenberg/cpp in a cloned repository on the EC2 instance set -eu -BENCHMARK=${1:-chonk_bench} +BENCHMARK=${1:?usage: benchmark_remote.sh [command] [preset] [build_dir]} COMMAND=${2:-./$BENCHMARK} PRESET=${3:-clang20-no-avm} BUILD_DIR=${4:-build} diff --git a/barretenberg/cpp/scripts/benchmark_wasm.sh b/barretenberg/cpp/scripts/benchmark_wasm.sh index f7bbd7edc9f2..7097b914209f 100755 --- a/barretenberg/cpp/scripts/benchmark_wasm.sh +++ b/barretenberg/cpp/scripts/benchmark_wasm.sh @@ -1,8 +1,8 @@ #!/usr/bin/env bash set -eu -BENCHMARK=${1:-chonk_bench} -COMMAND=${2:-./bin/$BENCHMARK --benchmark_filter=ChonkBench/Full/6} +BENCHMARK=${1:?usage: benchmark_wasm.sh [command]} +COMMAND=${2:-./bin/$BENCHMARK} HARDWARE_CONCURRENCY=${HARDWARE_CONCURRENCY:-16} # Move above script dir. diff --git a/barretenberg/cpp/scripts/compare_branch_vs_baseline.sh b/barretenberg/cpp/scripts/compare_branch_vs_baseline.sh index f7af1e2f438d..e1552f8c18bb 100755 --- a/barretenberg/cpp/scripts/compare_branch_vs_baseline.sh +++ b/barretenberg/cpp/scripts/compare_branch_vs_baseline.sh @@ -10,7 +10,7 @@ # it is up to date with local master, and run the script. # Specify the benchmark suite and the "baseline" branch against which to compare -BENCHMARK=${1:-chonk_bench} +BENCHMARK=${1:?usage: compare_branch_vs_baseline.sh [filter] [preset] [build_dir]} FILTER=${2:-""} PRESET=${3:-clang20} BUILD_DIR=${4:-build} diff --git a/barretenberg/cpp/scripts/compare_branch_vs_baseline_remote.sh b/barretenberg/cpp/scripts/compare_branch_vs_baseline_remote.sh index 748bb978fef4..c29f29defef4 100755 --- a/barretenberg/cpp/scripts/compare_branch_vs_baseline_remote.sh +++ b/barretenberg/cpp/scripts/compare_branch_vs_baseline_remote.sh @@ -10,7 +10,7 @@ # it is up to date with local master, and run the script. # Specify the benchmark suite and the "baseline" branch against which to compare -BENCHMARK=${1:-chonk_bench} +BENCHMARK=${1:?usage: compare_branch_vs_baseline_remote.sh [filter] [preset] [build_dir]} FILTER=${2:-"*."} PRESET=${3:-clang20} BUILD_DIR=${4:-build} diff --git a/barretenberg/cpp/scripts/compare_chonk_bench.sh b/barretenberg/cpp/scripts/compare_chonk_bench.sh deleted file mode 100755 index 2d2e0a66d75e..000000000000 --- a/barretenberg/cpp/scripts/compare_chonk_bench.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/usr/bin/env bash -set -eu - -./scripts/compare_branch_vs_baseline_remote.sh chonk_bench 'Full/6$' \ No newline at end of file diff --git a/barretenberg/cpp/scripts/compare_chonk_bench_wasm.sh b/barretenberg/cpp/scripts/compare_chonk_bench_wasm.sh deleted file mode 100755 index 77e7d43204c8..000000000000 --- a/barretenberg/cpp/scripts/compare_chonk_bench_wasm.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/usr/bin/env bash -set -eu - -./scripts/compare_branch_vs_baseline_remote_wasm.sh chonk_bench 'Full/6$' \ No newline at end of file diff --git a/barretenberg/cpp/scripts/profile_tracy_capture_mainframe_view_local.sh b/barretenberg/cpp/scripts/profile_tracy_capture_mainframe_view_local.sh index 45032d6bbef2..89046fed0d9d 100755 --- a/barretenberg/cpp/scripts/profile_tracy_capture_mainframe_view_local.sh +++ b/barretenberg/cpp/scripts/profile_tracy_capture_mainframe_view_local.sh @@ -11,8 +11,8 @@ set -eux USER=${1:-$USER} BOX=$USER-box -TARGET=${2:-chonk_bench} -COMMAND=${3:-./bin/$TARGET --benchmark_filter=ChonkBench/Full/6"\$"} +TARGET=${2:?usage: profile_tracy_capture_mainframe_view_local.sh [command]} +COMMAND=${3:-./bin/$TARGET} HARDWARE_CONCURRENCY=${HARDWARE_CONCURRENCY:-16} # Can also set PRESET=tracy-gates env variable PRESET=${PRESET:-tracy-memory} diff --git a/barretenberg/cpp/scripts/profile_wasm_samply.sh b/barretenberg/cpp/scripts/profile_wasm_samply.sh index 3bfc7a80dd4b..218ed2b8f89c 100755 --- a/barretenberg/cpp/scripts/profile_wasm_samply.sh +++ b/barretenberg/cpp/scripts/profile_wasm_samply.sh @@ -2,8 +2,8 @@ #!/usr/bin/env bash set -eu -BENCHMARK=${1:-chonk_bench} -COMMAND=${2:-./bin/$BENCHMARK --benchmark_filter=ChonkBench/Full/6} +BENCHMARK=${1:?usage: profile_wasm_samply.sh [command]} +COMMAND=${2:-./bin/$BENCHMARK} HARDWARE_CONCURRENCY=${HARDWARE_CONCURRENCY:-16} # Move above script dir. diff --git a/barretenberg/cpp/scripts/test_chonk_standalone_vks_havent_changed.sh b/barretenberg/cpp/scripts/test_chonk_standalone_vks_havent_changed.sh index bf61dd398a7f..a11ea58edcf0 100755 --- a/barretenberg/cpp/scripts/test_chonk_standalone_vks_havent_changed.sh +++ b/barretenberg/cpp/scripts/test_chonk_standalone_vks_havent_changed.sh @@ -21,7 +21,7 @@ script_path="$root/barretenberg/cpp/scripts/test_chonk_standalone_vks_havent_cha # - Generate a hash for versioning: sha256sum bb-chonk-inputs.tar.gz # - Upload the compressed results: aws s3 cp bb-chonk-inputs.tar.gz s3://aztec-ci-artifacts/protocol/bb-chonk-inputs-[hash(0:8)].tar.gz # Note: In case of the "Test suite failed to run ... Unexpected token 'with' " error, need to run: docker pull aztecprotocol/build:3.0 -pinned_short_hash="ae9c2d06" +pinned_short_hash="20c388cc" pinned_chonk_inputs_url="https://aztec-ci-artifacts.s3.us-east-2.amazonaws.com/protocol/bb-chonk-inputs-${pinned_short_hash}.tar.gz" function update_pinned_hash_in_script { diff --git a/barretenberg/cpp/src/barretenberg/api/api_avm.hpp b/barretenberg/cpp/src/barretenberg/api/api_avm.hpp index e21e383c203e..fa49a1b041e0 100644 --- a/barretenberg/cpp/src/barretenberg/api/api_avm.hpp +++ b/barretenberg/cpp/src/barretenberg/api/api_avm.hpp @@ -1,8 +1,38 @@ #pragma once #include +#include + +#include "barretenberg/ecc/curves/bn254/fr.hpp" namespace bb { +/** + * @brief Result of in-memory AVM proving. + */ +struct AvmProveResult { + std::vector proof; +}; + +/** + * @brief Prove an AVM transaction from serialized inputs (msgpack bytes). + * Callers that need to verify the proof should call avm_verify_from_bytes separately. + */ +AvmProveResult avm_prove_from_bytes(std::vector inputs); + +/** + * @brief Verify an AVM proof from serialized data. + * @param proof The proof as a vector of field elements. + * @param public_inputs Serialized public inputs (msgpack bytes). + * @return true if verification succeeds. + */ +bool avm_verify_from_bytes(std::vector proof, std::vector public_inputs); + +/** + * @brief Check the AVM circuit from serialized inputs (msgpack bytes). + * @return true if the circuit check passes. + */ +bool avm_check_circuit_from_bytes(std::vector inputs); + // Global flag indicating AVM support is available extern const bool avm_enabled; diff --git a/barretenberg/cpp/src/barretenberg/avm_fuzzer/harness/ecc.fuzzer.cpp b/barretenberg/cpp/src/barretenberg/avm_fuzzer/harness/ecc.fuzzer.cpp index 9987593ec2a4..19e79a8078ed 100644 --- a/barretenberg/cpp/src/barretenberg/avm_fuzzer/harness/ecc.fuzzer.cpp +++ b/barretenberg/cpp/src/barretenberg/avm_fuzzer/harness/ecc.fuzzer.cpp @@ -114,11 +114,25 @@ struct EccFuzzerInput { static EccFuzzerInput from_buffer(const uint8_t* buffer) { + // Note: we cannot use AffinePoint::serialize_from_buffer() because this now throws if the point is not on the + // curve. We want to test such points so have to deserialize manually: + auto read_point = [](const uint8_t* src) -> AffinePoint { + bool is_point_at_infinity = + std::all_of(src, src + (sizeof(Fq) * 2), [](uint8_t val) { return val == 255; }); + if (is_point_at_infinity) { + return AffinePoint::infinity(); + } + AffinePoint result; + read(src, result.x); + read(src, result.y); + return result; + }; + EccFuzzerInput input; size_t offset = 0; - input.p = AffinePoint::serialize_from_buffer(buffer + offset); + input.p = read_point(buffer + offset); offset += sizeof(AffinePoint); - input.q = AffinePoint::serialize_from_buffer(buffer + offset); + input.q = read_point(buffer + offset); offset += sizeof(AffinePoint); input.scalar = Fq::serialize_from_buffer(buffer + offset); offset += sizeof(Fq); diff --git a/barretenberg/cpp/src/barretenberg/avm_fuzzer/run_fuzzer.sh b/barretenberg/cpp/src/barretenberg/avm_fuzzer/run_fuzzer.sh index 0c0f6cd4dc11..38b56e0f172b 100755 --- a/barretenberg/cpp/src/barretenberg/avm_fuzzer/run_fuzzer.sh +++ b/barretenberg/cpp/src/barretenberg/avm_fuzzer/run_fuzzer.sh @@ -145,9 +145,9 @@ fi # Set build directory based on command if [ "$COMMAND" = "coverage" ]; then - BUILD_DIR="$CPP_DIR/build-coverage" - BUILD_PRESET="clang20-coverage" - BUILD_CMAKE_FLAGS="-DCOVERAGE_AVM=ON -DFUZZING=ON -DFUZZING_AVM=ON" + BUILD_DIR="$CPP_DIR/build-fuzzing-avm-cov" + BUILD_PRESET="fuzzing-avm" + BUILD_CMAKE_FLAGS="-DCOVERAGE=ON -DCOVERAGE_AVM=ON" else BUILD_DIR="$CPP_DIR/build-fuzzing-avm" BUILD_PRESET="fuzzing-avm" @@ -170,9 +170,9 @@ build_fuzzer() { if [ ! -f "$BUILD_DIR/CMakeCache.txt" ]; then echo "Configuring cmake..." if [ -n "$BUILD_CMAKE_FLAGS" ]; then - cmake --preset "$BUILD_PRESET" $BUILD_CMAKE_FLAGS + cmake --preset "$BUILD_PRESET" -B "$BUILD_DIR" $BUILD_CMAKE_FLAGS else - cmake --preset "$BUILD_PRESET" + cmake --preset "$BUILD_PRESET" -B "$BUILD_DIR" fi fi @@ -314,13 +314,14 @@ if [ "$COMMAND" = "coverage" ] && [ -f "$LLVM_PROFILE_FILE" ]; then echo "==========================================" COVERAGE_DATA="$COVERAGE_OUTPUT_DIR/bb_avm.profdata" - # Merge all profraw and profdata files in coverage directory - COVERAGE_FILES=("$COVERAGE_OUTPUT_DIR"/*.profraw "$COVERAGE_OUTPUT_DIR"/*.profdata) + # Merge all profraw and profdata files in coverage directory (skip globs that match nothing) + COVERAGE_FILES=() + for f in "$COVERAGE_OUTPUT_DIR"/*.profraw "$COVERAGE_OUTPUT_DIR"/*.profdata; do + [ -f "$f" ] && COVERAGE_FILES+=("$f") + done echo "Merging coverage files:" for f in "${COVERAGE_FILES[@]}"; do - if [ -f "$f" ]; then - echo " $f" - fi + echo " $f" done echo "" llvm-profdata-20 merge -sparse "${COVERAGE_FILES[@]}" -o "$COVERAGE_DATA" diff --git a/barretenberg/cpp/src/barretenberg/bbapi/bbapi_avm.cpp b/barretenberg/cpp/src/barretenberg/bbapi/bbapi_avm.cpp new file mode 100644 index 000000000000..945b8018930e --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/bbapi/bbapi_avm.cpp @@ -0,0 +1,52 @@ +#include "barretenberg/bbapi/bbapi_avm.hpp" +#include "barretenberg/api/api_avm.hpp" +#include "barretenberg/vm2/tooling/stats.hpp" + +namespace bb::bbapi { + +namespace { + +// Reset the AVM per-stage timings registry so the snapshot we return reflects only this call. +void reset_avm_stats() +{ + ::bb::avm2::Stats::get().reset(); +} + +// Take a snapshot of the AVM per-stage timings registry and convert it to the wire-format struct. +std::vector snapshot_avm_stats() +{ + auto snapshot = ::bb::avm2::Stats::get().snapshot(); + std::vector result; + result.reserve(snapshot.size()); + for (auto& [name, value] : snapshot) { + result.push_back(AvmStat{ .name = std::move(name), .value_ms = value }); + } + return result; +} + +} // namespace + +AvmProve::Response AvmProve::execute(const BBApiRequest& /*request*/) && +{ + reset_avm_stats(); + auto result = avm_prove_from_bytes(std::move(inputs)); + return Response{ + .proof = std::move(result.proof), + .stats = snapshot_avm_stats(), + }; +} + +AvmVerify::Response AvmVerify::execute(const BBApiRequest& /*request*/) && +{ + bool verified = avm_verify_from_bytes(std::move(proof), std::move(public_inputs)); + return Response{ .verified = verified }; +} + +AvmCheckCircuit::Response AvmCheckCircuit::execute(const BBApiRequest& /*request*/) && +{ + reset_avm_stats(); + bool passed = avm_check_circuit_from_bytes(std::move(inputs)); + return Response{ .passed = passed, .stats = snapshot_avm_stats() }; +} + +} // namespace bb::bbapi diff --git a/barretenberg/cpp/src/barretenberg/bbapi/bbapi_avm.hpp b/barretenberg/cpp/src/barretenberg/bbapi/bbapi_avm.hpp new file mode 100644 index 000000000000..2457b5e1fa8e --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/bbapi/bbapi_avm.hpp @@ -0,0 +1,102 @@ +#pragma once +/** + * @file bbapi_avm.hpp + * @brief AVM-specific command definitions for the Barretenberg RPC API. + * + * This file contains command structures for AVM operations including proving, + * verification, and circuit checking. When built with bb (non-AVM), these + * commands return an error response. When built with bb-avm, they work normally. + */ +#include "barretenberg/bbapi/bbapi_shared.hpp" +#include "barretenberg/common/named_union.hpp" +#include "barretenberg/ecc/curves/bn254/fr.hpp" +#include "barretenberg/serialize/msgpack.hpp" +#include +#include +#include + +namespace bb::bbapi { + +/** + * @struct AvmStat + * @brief A single AVM per-stage timing entry. `value_ms` is wall-clock milliseconds captured by + * bb::avm2::Stats during a prove or check-circuit call. + */ +struct AvmStat { + static constexpr const char MSGPACK_SCHEMA_NAME[] = "AvmStat"; + + std::string name; + uint64_t value_ms; + SERIALIZATION_FIELDS(name, value_ms); + bool operator==(const AvmStat&) const = default; +}; + +/** + * @struct AvmProve + * @brief Prove an AVM transaction from serialized inputs. + * The inputs are opaque msgpack bytes of AvmProvingInputs. Callers should call AvmVerify + * separately if they need to verify the resulting proof. + */ +struct AvmProve { + static constexpr const char MSGPACK_SCHEMA_NAME[] = "AvmProve"; + + struct Response { + static constexpr const char MSGPACK_SCHEMA_NAME[] = "AvmProveResponse"; + + std::vector proof; + std::vector stats; + SERIALIZATION_FIELDS(proof, stats); + bool operator==(const Response&) const = default; + }; + + std::vector inputs; + SERIALIZATION_FIELDS(inputs); + Response execute(const BBApiRequest& request = {}) &&; + bool operator==(const AvmProve&) const = default; +}; + +/** + * @struct AvmVerify + * @brief Verify an AVM proof against serialized public inputs. + */ +struct AvmVerify { + static constexpr const char MSGPACK_SCHEMA_NAME[] = "AvmVerify"; + + struct Response { + static constexpr const char MSGPACK_SCHEMA_NAME[] = "AvmVerifyResponse"; + + bool verified; + SERIALIZATION_FIELDS(verified); + bool operator==(const Response&) const = default; + }; + + std::vector proof; + std::vector public_inputs; + SERIALIZATION_FIELDS(proof, public_inputs); + Response execute(const BBApiRequest& request = {}) &&; + bool operator==(const AvmVerify&) const = default; +}; + +/** + * @struct AvmCheckCircuit + * @brief Check the AVM circuit from serialized inputs. + */ +struct AvmCheckCircuit { + static constexpr const char MSGPACK_SCHEMA_NAME[] = "AvmCheckCircuit"; + + struct Response { + static constexpr const char MSGPACK_SCHEMA_NAME[] = "AvmCheckCircuitResponse"; + + bool passed; + std::vector stats; + SERIALIZATION_FIELDS(passed, stats); + bool operator==(const Response&) const = default; + }; + + std::vector inputs; + SERIALIZATION_FIELDS(inputs); + Response execute(const BBApiRequest& request = {}) &&; + bool operator==(const AvmCheckCircuit&) const = default; +}; + +} // namespace bb::bbapi diff --git a/barretenberg/cpp/src/barretenberg/bbapi/bbapi_ecc.cpp b/barretenberg/cpp/src/barretenberg/bbapi/bbapi_ecc.cpp index 533f541db738..7bcc3fc2edfc 100644 --- a/barretenberg/cpp/src/barretenberg/bbapi/bbapi_ecc.cpp +++ b/barretenberg/cpp/src/barretenberg/bbapi/bbapi_ecc.cpp @@ -99,6 +99,12 @@ Bn254G2Mul::Response Bn254G2Mul::execute(BBApiRequest& request) && if (!point.on_curve()) { BBAPI_ERROR(request, "Input point must be on the curve"); } + // BN254 G2 has cofactor h2 ≈ 2^254. An on-curve point may lie in a cofactor subgroup of order + // dividing h2 rather than the prime-order subgroup; we do not want to allow such points + // as inputs to bbapi. + if (!point.is_in_prime_subgroup()) { + BBAPI_ERROR(request, "Input point must lie in the prime-order subgroup"); + } auto result = point * scalar; if (!result.on_curve()) { BBAPI_ERROR(request, "Output point must be on the curve"); diff --git a/barretenberg/cpp/src/barretenberg/bbapi/bbapi_execute.hpp b/barretenberg/cpp/src/barretenberg/bbapi/bbapi_execute.hpp index e9031a9363ec..ebfce39ddb7b 100644 --- a/barretenberg/cpp/src/barretenberg/bbapi/bbapi_execute.hpp +++ b/barretenberg/cpp/src/barretenberg/bbapi/bbapi_execute.hpp @@ -1,5 +1,6 @@ #pragma once +#include "barretenberg/bbapi/bbapi_avm.hpp" #include "barretenberg/bbapi/bbapi_chonk.hpp" #include "barretenberg/bbapi/bbapi_crypto.hpp" #include "barretenberg/bbapi/bbapi_ecc.hpp" @@ -13,7 +14,10 @@ namespace bb::bbapi { -using Command = NamedUnion; using CommandResponse = NamedUnion(g2_point.data()); + if (!g2_point_elem.is_in_prime_subgroup()) { + throw_or_abort("SrsInitSrs: g2_point is not in the BN254 G2 prime-order subgroup"); + } // Initialize BN254 SRS bb::srs::init_bn254_mem_crs_factory(g1_points, g2_point_elem); diff --git a/barretenberg/cpp/src/barretenberg/benchmark/CMakeLists.txt b/barretenberg/cpp/src/barretenberg/benchmark/CMakeLists.txt index 12d08f15e49a..32eb72345a8c 100644 --- a/barretenberg/cpp/src/barretenberg/benchmark/CMakeLists.txt +++ b/barretenberg/cpp/src/barretenberg/benchmark/CMakeLists.txt @@ -3,7 +3,6 @@ add_subdirectory(decrypt_bench) add_subdirectory(goblin_bench) add_subdirectory(ipa_bench) add_subdirectory(ipc_bench) -add_subdirectory(chonk_bench) add_subdirectory(pippenger_bench) add_subdirectory(relations_bench) add_subdirectory(poseidon2_bench) diff --git a/barretenberg/cpp/src/barretenberg/benchmark/chonk_bench/CMakeLists.txt b/barretenberg/cpp/src/barretenberg/benchmark/chonk_bench/CMakeLists.txt deleted file mode 100644 index 9f1df1bd7f32..000000000000 --- a/barretenberg/cpp/src/barretenberg/benchmark/chonk_bench/CMakeLists.txt +++ /dev/null @@ -1 +0,0 @@ -barretenberg_module(chonk_bench vm2_stub chonk stdlib_honk_verifier stdlib_sha256 stdlib_primitives) diff --git a/barretenberg/cpp/src/barretenberg/benchmark/chonk_bench/chonk.bench.cpp b/barretenberg/cpp/src/barretenberg/benchmark/chonk_bench/chonk.bench.cpp deleted file mode 100644 index 025fbeb4ab8b..000000000000 --- a/barretenberg/cpp/src/barretenberg/benchmark/chonk_bench/chonk.bench.cpp +++ /dev/null @@ -1,154 +0,0 @@ -/** - * @warning These benchmarks use functions that are tested elsewhere to guard against regressions in the benchmark. - * Please do not anything that is untested. - */ - -#include -#include - -#include "barretenberg/api/file_io.hpp" -#include "barretenberg/chonk/chonk_verifier.hpp" -#include "barretenberg/chonk/proof_compression.hpp" -#include "barretenberg/chonk/test_bench_shared.hpp" -#include "barretenberg/common/google_bb_bench.hpp" -#include "barretenberg/common/thread.hpp" -#include "barretenberg/honk/proof_length.hpp" -#include "barretenberg/srs/global_crs.hpp" - -using namespace benchmark; -using namespace bb; - -namespace { - -/** - * @brief Benchmark suite for the aztec Chonk scheme - */ -class ChonkBench : public benchmark::Fixture { - public: - // Number of function circuits to accumulate (based on Zac's target numbers) - static constexpr size_t NUM_ITERATIONS_MEDIUM_COMPLEXITY = 5; - - void SetUp([[maybe_unused]] const ::benchmark::State& state) override - { - bb::srs::init_file_crs_factory(bb::srs::bb_crs_path()); - } -}; - -/** - * @brief Benchmark only the verification work for the IVC protocol - */ -BENCHMARK_DEFINE_F(ChonkBench, VerificationOnly)(benchmark::State& state) -{ - size_t NUM_APP_CIRCUITS = 1; - auto precomputed_vks = precompute_vks(NUM_APP_CIRCUITS); - auto [proof, vk_and_hash] = accumulate_and_prove_with_precomputed_vks(NUM_APP_CIRCUITS, precomputed_vks); - - for (auto _ : state) { - GOOGLE_BB_BENCH_REPORTER(state); - ChonkNativeVerifier verifier(vk_and_hash); - benchmark::DoNotOptimize(verifier.verify(proof)); - } -} - -/** - * @brief Benchmark the prover work for the full IVC protocol - */ -BENCHMARK_DEFINE_F(ChonkBench, Full)(benchmark::State& state) -{ - size_t NUM_APP_CIRCUITS = static_cast(state.range(0)); - auto precomputed_vks = precompute_vks(NUM_APP_CIRCUITS); - - for (auto _ : state) { - GOOGLE_BB_BENCH_REPORTER(state); - accumulate_and_prove_with_precomputed_vks(NUM_APP_CIRCUITS, precomputed_vks); - } -} - -/** - * @brief Benchmark proof compression (prover-side cost) - */ -BENCHMARK_DEFINE_F(ChonkBench, ProofCompress)(benchmark::State& state) -{ - size_t NUM_APP_CIRCUITS = 1; - auto precomputed_vks = precompute_vks(NUM_APP_CIRCUITS); - auto [proof, vk_and_hash] = accumulate_and_prove_with_precomputed_vks(NUM_APP_CIRCUITS, precomputed_vks); - - for (auto _ : state) { - benchmark::DoNotOptimize(ProofCompressor::compress_chonk_proof(proof)); - } -} - -/** - * @brief Benchmark proof decompression (verifier-side cost) - */ -BENCHMARK_DEFINE_F(ChonkBench, ProofDecompress)(benchmark::State& state) -{ - size_t NUM_APP_CIRCUITS = 1; - auto precomputed_vks = precompute_vks(NUM_APP_CIRCUITS); - auto [proof, vk_and_hash] = accumulate_and_prove_with_precomputed_vks(NUM_APP_CIRCUITS, precomputed_vks); - - auto compressed = ProofCompressor::compress_chonk_proof(proof); - size_t mega_num_pub_inputs = - proof.hiding_oink_proof.size() - ProofLength::Oink::LENGTH_WITHOUT_PUB_INPUTS; - - for (auto _ : state) { - benchmark::DoNotOptimize(ProofCompressor::decompress_chonk_proof(compressed, mega_num_pub_inputs)); - } -} - -/** - * @brief Benchmark N individual Chonk verifications (sequential). Baseline for batch comparison. - */ -BENCHMARK_DEFINE_F(ChonkBench, VerifyIndividual)(benchmark::State& state) -{ - const size_t num_proofs = static_cast(state.range(0)); - auto precomputed_vks = precompute_vks(1); - - // Generate a single proof and reuse it N times - auto [proof, vk_and_hash] = accumulate_and_prove_with_precomputed_vks(1, precomputed_vks); - - for (auto _ : state) { - for (size_t i = 0; i < num_proofs; i++) { - ChonkNativeVerifier verifier(vk_and_hash); - benchmark::DoNotOptimize(verifier.verify(proof)); - } - } -} - -#define ARGS Arg(ChonkBench::NUM_ITERATIONS_MEDIUM_COMPLEXITY)->Arg(2) - -BENCHMARK_REGISTER_F(ChonkBench, Full)->Unit(benchmark::kMillisecond)->ARGS; -BENCHMARK_REGISTER_F(ChonkBench, VerificationOnly)->Unit(benchmark::kMillisecond); -BENCHMARK_REGISTER_F(ChonkBench, ProofCompress)->Unit(benchmark::kMillisecond); -BENCHMARK_REGISTER_F(ChonkBench, ProofDecompress)->Unit(benchmark::kMillisecond); -BENCHMARK_REGISTER_F(ChonkBench, VerifyIndividual)->Unit(benchmark::kMillisecond)->Arg(1)->Arg(2)->Arg(4)->Arg(8); - -/** - * @brief Benchmark BN254 G1 point decompression (used by SRS compressed download) - */ -void bn254_point_decompression(benchmark::State& state) -{ - constexpr size_t NUM_POINTS = 1 << 17; // 131072 — typical circuit size - - // Read compressed points from disk (32 bytes each, big-endian uint256_t) - auto compressed_buf = read_file(bb::srs::bb_crs_path() / "bn254_g1_compressed.dat", NUM_POINTS * sizeof(uint256_t)); - std::vector compressed(NUM_POINTS); - for (size_t i = 0; i < NUM_POINTS; ++i) { - compressed[i] = from_buffer(compressed_buf, i * sizeof(uint256_t)); - } - - for (auto _ : state) { - std::vector points(NUM_POINTS); - parallel_for([&](ThreadChunk chunk) { - for (auto i : chunk.range(NUM_POINTS)) { - points[i] = g1::affine_element::from_compressed(compressed[i]); - } - }); - benchmark::DoNotOptimize(points); - } -} -BENCHMARK(bn254_point_decompression)->Unit(benchmark::kMillisecond); - -} // namespace - -BENCHMARK_MAIN(); diff --git a/barretenberg/cpp/src/barretenberg/benchmark/pippenger_bench/bn254_decompress.bench.cpp b/barretenberg/cpp/src/barretenberg/benchmark/pippenger_bench/bn254_decompress.bench.cpp new file mode 100644 index 000000000000..f9c74bc60ea5 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/benchmark/pippenger_bench/bn254_decompress.bench.cpp @@ -0,0 +1,41 @@ +#include + +#include "barretenberg/api/file_io.hpp" +#include "barretenberg/common/thread.hpp" +#include "barretenberg/ecc/curves/bn254/g1.hpp" +#include "barretenberg/srs/global_crs.hpp" + +using namespace benchmark; +using namespace bb; + +namespace { + +/** + * @brief Benchmark BN254 G1 point decompression (used by SRS compressed download) + */ +void bn254_point_decompression(benchmark::State& state) +{ + constexpr size_t NUM_POINTS = 1 << 17; // 131072 — typical circuit size + + // Read compressed points from disk (32 bytes each, big-endian uint256_t) + auto compressed_buf = read_file(bb::srs::bb_crs_path() / "bn254_g1_compressed.dat", NUM_POINTS * sizeof(uint256_t)); + std::vector compressed(NUM_POINTS); + for (size_t i = 0; i < NUM_POINTS; ++i) { + compressed[i] = from_buffer(compressed_buf, i * sizeof(uint256_t)); + } + + for (auto _ : state) { + std::vector points(NUM_POINTS); + parallel_for([&](ThreadChunk chunk) { + for (auto i : chunk.range(NUM_POINTS)) { + points[i] = g1::affine_element::from_compressed(compressed[i]); + } + }); + benchmark::DoNotOptimize(points); + } +} +BENCHMARK(bn254_point_decompression)->Unit(benchmark::kMillisecond); + +} // namespace + +BENCHMARK_MAIN(); diff --git a/barretenberg/cpp/src/barretenberg/benchmark/pippenger_bench/thread_scaling.bench.cpp b/barretenberg/cpp/src/barretenberg/benchmark/pippenger_bench/thread_scaling.bench.cpp new file mode 100644 index 000000000000..c7e4c15cb324 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/benchmark/pippenger_bench/thread_scaling.bench.cpp @@ -0,0 +1,139 @@ +/** + * @brief Pippenger thread-scaling benchmark for heterogeneous scalar distributions. + * + * MSM::batch_multi_scalar_mul partitions work across threads by cumulative per-scalar + * weight (see get_work_units in scalar_multiplication.cpp), where each scalar's weight + * is ceil(bit_length / bits_per_slice) -- i.e. the number of nonzero c-bit slices it + * contributes to bucket accumulation. Small scalars weigh less because their high-order + * slices are zero and get filtered by the zero-bucket pre-sort. This benchmark exercises + * pathological and typical bit-size distributions to verify thread scaling stays uniform. + * + * Distributions contrasted here: + * - Clustered: first half small (32-bit), second half full random -- stresses the + * weighted split; count-based partitioning would give half the threads + * ~all of the heavy work. + * - UniformMixed: small/full randomly interleaved -- isolates heterogeneity alone. + * - AllFull: all full random (z_perm-like baseline). + * + * Expected: all three scale comparably under the weighted partition. + */ +#include "barretenberg/common/thread.hpp" +#include "barretenberg/ecc/curves/bn254/bn254.hpp" +#include "barretenberg/ecc/scalar_multiplication/scalar_multiplication.hpp" +#include "barretenberg/numeric/random/engine.hpp" +#include "barretenberg/srs/global_crs.hpp" + +#include + +#include "barretenberg/common/google_bb_bench.hpp" + +using namespace benchmark; + +using Curve = bb::curve::BN254; +using Fr = Curve::ScalarField; +using G1 = Curve::AffineElement; + +namespace { + +constexpr size_t MSM_SIZE = 1 << 20; + +enum class Distribution { Clustered, UniformMixed, AllFull }; + +class ThreadScalingBench : public benchmark::Fixture { + public: + std::shared_ptr> srs; + bb::numeric::RNG& engine = bb::numeric::get_debug_randomness(); + + void SetUp([[maybe_unused]] const ::benchmark::State& state) override + { + if (srs) { + return; + } + bb::srs::init_file_crs_factory(bb::srs::bb_crs_path()); + srs = bb::srs::get_crs_factory()->get_crs(MSM_SIZE); + } + + // 32-bit "small" value -- mimics witness indices, booleans, limbs. + // On BN254 (254-bit field) with ~14 bits per Pippenger slice, only the lowest + // ~2-3 rounds produce nonzero slices for these scalars; the rest get filtered. + Fr small_scalar() { return Fr(static_cast(engine.get_random_uint32())); } + Fr full_scalar() { return Fr::random_element(&engine); } + + std::vector build_scalars(Distribution dist) + { + std::vector scalars(MSM_SIZE); + switch (dist) { + case Distribution::Clustered: + for (size_t i = 0; i < MSM_SIZE / 2; ++i) { + scalars[i] = small_scalar(); + } + for (size_t i = MSM_SIZE / 2; i < MSM_SIZE; ++i) { + scalars[i] = full_scalar(); + } + break; + case Distribution::UniformMixed: + for (size_t i = 0; i < MSM_SIZE; ++i) { + scalars[i] = (engine.get_random_uint32() & 1U) ? small_scalar() : full_scalar(); + } + break; + case Distribution::AllFull: + for (size_t i = 0; i < MSM_SIZE; ++i) { + scalars[i] = full_scalar(); + } + break; + } + return scalars; + } +}; + +static void run_msm(ThreadScalingBench& fx, benchmark::State& state, Distribution dist) +{ + const size_t num_threads = static_cast(state.range(0)); + + // Rebuild per-invocation of the bench is fine: scalars get mutated (Montgomery + // round-trip) inside batch_multi_scalar_mul, and we want consistent input across iterations. + std::vector scalars = fx.build_scalars(dist); + + std::vector> scalar_spans; + std::vector> point_spans; + scalar_spans.emplace_back(scalars); + point_spans.emplace_back(fx.srs->get_monomial_points().subspan(0, MSM_SIZE)); + + const size_t original_concurrency = bb::get_num_cpus(); + bb::set_parallel_for_concurrency(num_threads); + + for (auto _ : state) { + GOOGLE_BB_BENCH_REPORTER(state); + bb::scalar_multiplication::MSM::batch_multi_scalar_mul(point_spans, scalar_spans, false); + } + + bb::set_parallel_for_concurrency(original_concurrency); +} + +BENCHMARK_DEFINE_F(ThreadScalingBench, Clustered)(benchmark::State& state) +{ + run_msm(*this, state, Distribution::Clustered); +} +BENCHMARK_DEFINE_F(ThreadScalingBench, UniformMixed)(benchmark::State& state) +{ + run_msm(*this, state, Distribution::UniformMixed); +} +BENCHMARK_DEFINE_F(ThreadScalingBench, AllFull)(benchmark::State& state) +{ + run_msm(*this, state, Distribution::AllFull); +} + +static void ThreadSweep(benchmark::internal::Benchmark* b) +{ + for (int64_t t : { 1, 2, 4, 8 }) { + b->Arg(t); + } +} + +BENCHMARK_REGISTER_F(ThreadScalingBench, Clustered)->Unit(benchmark::kMillisecond)->Apply(ThreadSweep); +BENCHMARK_REGISTER_F(ThreadScalingBench, UniformMixed)->Unit(benchmark::kMillisecond)->Apply(ThreadSweep); +BENCHMARK_REGISTER_F(ThreadScalingBench, AllFull)->Unit(benchmark::kMillisecond)->Apply(ThreadSweep); + +} // namespace + +BENCHMARK_MAIN(); diff --git a/barretenberg/cpp/src/barretenberg/benchmark/relations_bench/relations.bench.cpp b/barretenberg/cpp/src/barretenberg/benchmark/relations_bench/relations.bench.cpp index 1a2acbf93f2e..58dce9fa322a 100644 --- a/barretenberg/cpp/src/barretenberg/benchmark/relations_bench/relations.bench.cpp +++ b/barretenberg/cpp/src/barretenberg/benchmark/relations_bench/relations.bench.cpp @@ -61,7 +61,9 @@ BENCHMARK(execute_relation_for_univariates>); BENCHMARK(execute_relation_for_univariates>); BENCHMARK(execute_relation_for_univariates>); -BENCHMARK(execute_relation_for_univariates>); +BENCHMARK(execute_relation_for_univariates>); +BENCHMARK(execute_relation_for_univariates>); +BENCHMARK(execute_relation_for_univariates>); // Ultra relations (verifier work) BENCHMARK(execute_relation_for_values>); @@ -76,7 +78,9 @@ BENCHMARK(execute_relation_for_values> BENCHMARK(execute_relation_for_values>); BENCHMARK(execute_relation_for_values>); BENCHMARK(execute_relation_for_values>); -BENCHMARK(execute_relation_for_values>); +BENCHMARK(execute_relation_for_values>); +BENCHMARK(execute_relation_for_values>); +BENCHMARK(execute_relation_for_values>); // Translator VM BENCHMARK(execute_relation_for_values>); diff --git a/barretenberg/cpp/src/barretenberg/benchmark/ultra_bench/mega_honk.bench.cpp b/barretenberg/cpp/src/barretenberg/benchmark/ultra_bench/mega_honk.bench.cpp index 6c79987705f3..81246ee0f1b8 100644 --- a/barretenberg/cpp/src/barretenberg/benchmark/ultra_bench/mega_honk.bench.cpp +++ b/barretenberg/cpp/src/barretenberg/benchmark/ultra_bench/mega_honk.bench.cpp @@ -1,6 +1,7 @@ #include #include "barretenberg/benchmark/ultra_bench/mock_circuits.hpp" +#include "barretenberg/common/bb_bench.hpp" #include "barretenberg/stdlib_circuit_builders/mega_circuit_builder.hpp" using namespace benchmark; @@ -41,7 +42,37 @@ static void get_row_power_of_2(State& state) noexcept } } +/** + * @brief Benchmark: Mega Honk proof of a single poseidon2 hash over a vector of state.range(0) elements. + */ +static void construct_proof_megahonk_poseidon2_hash(State& state) noexcept +{ + const auto num_inputs = static_cast(state.range(0)); + + MegaCircuitBuilder builder; + bb::generate_poseidon2_hash_test_circuit(builder, num_inputs); + auto instance = std::make_shared>(builder); + info("construct_proof_megahonk_poseidon2_hash: num_inputs=", + num_inputs, + ", actual_gates=", + builder.num_gates(), + ", dyadic_size=", + instance->dyadic_size()); + + bb::mock_circuits::construct_proof_with_specified_num_iterations( + state, &bb::generate_poseidon2_hash_test_circuit, num_inputs); +} + // Define benchmarks +// Sweep input sizes so dyadic domain ranges 2^15..2^19 (Mega: ~12 gates/input). +BENCHMARK(construct_proof_megahonk_poseidon2_hash) + ->Arg(1500) + ->Arg(3000) + ->Arg(6000) + ->Arg(12000) + ->Arg(24000) + ->Arg(50000) + ->Unit(kMillisecond); // This exists due to an issue where get_row was blowing up in time BENCHMARK_CAPTURE(construct_proof_megahonk, sha256, &generate_sha256_test_circuit) @@ -61,4 +92,18 @@ BENCHMARK(construct_proof_megahonk_power_of_2) ->DenseRange(15, 20) ->Unit(kMillisecond); -BENCHMARK_MAIN(); +int main(int argc, char** argv) +{ + bb::detail::use_bb_bench = true; + + ::benchmark::Initialize(&argc, argv); + if (::benchmark::ReportUnrecognizedArguments(argc, argv)) + return 1; + ::benchmark::RunSpecifiedBenchmarks(); + ::benchmark::Shutdown(); + + std::cout << "\n=== Detailed BB_BENCH Profiling Stats ===\n"; + bb::detail::GLOBAL_BENCH_STATS.print_aggregate_counts_hierarchical(std::cout); + + return 0; +} diff --git a/barretenberg/cpp/src/barretenberg/benchmark/ultra_bench/ultra_honk.bench.cpp b/barretenberg/cpp/src/barretenberg/benchmark/ultra_bench/ultra_honk.bench.cpp index 29eadf766572..b2bb734571e8 100644 --- a/barretenberg/cpp/src/barretenberg/benchmark/ultra_bench/ultra_honk.bench.cpp +++ b/barretenberg/cpp/src/barretenberg/benchmark/ultra_bench/ultra_honk.bench.cpp @@ -88,7 +88,38 @@ static void construct_proof_ultrahonk_1M_gates_dyadic_2_21(State& state) noexcep state, &bb::mock_circuits::generate_basic_arithmetic_circuit_with_target_gates, num_gates); } +/** + * @brief Benchmark: Ultra Honk proof of a single poseidon2 hash over a vector of state.range(0) elements. + */ +static void construct_proof_ultrahonk_poseidon2_hash(State& state) noexcept +{ + const auto num_inputs = static_cast(state.range(0)); + + UltraCircuitBuilder builder; + bb::generate_poseidon2_hash_test_circuit(builder, num_inputs); + auto instance = std::make_shared>(builder); + info("construct_proof_ultrahonk_poseidon2_hash: num_inputs=", + num_inputs, + ", actual_gates=", + builder.num_gates(), + ", dyadic_size=", + instance->dyadic_size()); + + bb::mock_circuits::construct_proof_with_specified_num_iterations( + state, &bb::generate_poseidon2_hash_test_circuit, num_inputs); +} + // Define benchmarks +// Sweep input sizes so dyadic domain ranges 2^15..2^19 (Ultra: ~25 gates/input). +BENCHMARK(construct_proof_ultrahonk_poseidon2_hash) + ->Arg(750) + ->Arg(1500) + ->Arg(3000) + ->Arg(6000) + ->Arg(12000) + ->Arg(50000) + ->Unit(kMillisecond); + BENCHMARK_CAPTURE(construct_proof_ultrahonk, sha256, &generate_sha256_test_circuit) ->Unit(kMillisecond); BENCHMARK_CAPTURE(construct_proof_ultrahonk, diff --git a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/gate_patterns.hpp b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/gate_patterns.hpp index 256e545c8ccd..ec6d51e28e69 100644 --- a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/gate_patterns.hpp +++ b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/gate_patterns.hpp @@ -322,6 +322,81 @@ inline const GatePattern POSEIDON2_EXTERNAL = { .name = "poseidon2_external", { Wire::W_4_SHIFT, [](const Selectors&) { return true; } }, } }; +// ============================================================================ +// Poseidon2 Initial External Pattern (from poseidon2_initial_external_relation.hpp) +// +// All 4 current wires and all 4 shifted wires are constrained +// +// gate_selector = q_poseidon2_external_initial +// ============================================================================ + +inline const GatePattern POSEIDON2_INITIAL_EXTERNAL = { .name = "poseidon2_initial_external", + .wires = { + { Wire::W_L, [](const Selectors&) { return true; } }, + { Wire::W_R, [](const Selectors&) { return true; } }, + { Wire::W_O, [](const Selectors&) { return true; } }, + { Wire::W_4, [](const Selectors&) { return true; } }, + { Wire::W_L_SHIFT, [](const Selectors&) { return true; } }, + { Wire::W_R_SHIFT, [](const Selectors&) { return true; } }, + { Wire::W_O_SHIFT, [](const Selectors&) { return true; } }, + { Wire::W_4_SHIFT, [](const Selectors&) { return true; } }, + } }; + +// ============================================================================ +// Poseidon2 Quad-Internal Pattern (from poseidon2_quad_internal_relation.hpp) +// +// gate_selector = q_poseidon2_quad_internal +// ============================================================================ + +inline const GatePattern POSEIDON2_QUAD_INTERNAL = { .name = "poseidon2_quad_internal", + .wires = { + { Wire::W_L, [](const Selectors&) { return true; } }, + { Wire::W_R, [](const Selectors&) { return true; } }, + { Wire::W_O, [](const Selectors&) { return true; } }, + { Wire::W_4, [](const Selectors&) { return true; } }, + { Wire::W_L_SHIFT, [](const Selectors&) { return true; } }, + { Wire::W_R_SHIFT, [](const Selectors&) { return true; } }, + { Wire::W_O_SHIFT, [](const Selectors&) { return true; } }, + { Wire::W_4_SHIFT, [](const Selectors&) { return true; } }, + } }; + +// ============================================================================ +// Poseidon2 Quad-Internal Terminal Pattern +// (from poseidon2_quad_internal_terminal_relation.hpp) +// +// gate_selector = q_poseidon2_quad_internal_terminal +// ============================================================================ + +inline const GatePattern + POSEIDON2_QUAD_INTERNAL_TERMINAL = { .name = "poseidon2_quad_internal_terminal", + .wires = { + { Wire::W_L, [](const Selectors&) { return true; } }, + { Wire::W_R, [](const Selectors&) { return true; } }, + { Wire::W_O, [](const Selectors&) { return true; } }, + { Wire::W_4, [](const Selectors&) { return true; } }, + { Wire::W_L_SHIFT, [](const Selectors&) { return true; } }, + { Wire::W_R_SHIFT, [](const Selectors&) { return true; } }, + { Wire::W_O_SHIFT, [](const Selectors&) { return true; } }, + { Wire::W_4_SHIFT, [](const Selectors&) { return true; } }, + } }; + +// ============================================================================ +// Poseidon2 Transition Entry Pattern (from poseidon2_transition_entry_relation.hpp) +// +// gate_selector = q_poseidon2_transition_entry +// ============================================================================ + +inline const GatePattern POSEIDON2_TRANSITION_ENTRY = { .name = "poseidon2_transition_entry", + .wires = { + { Wire::W_L, [](const Selectors&) { return true; } }, + { Wire::W_R, [](const Selectors&) { return true; } }, + { Wire::W_O, [](const Selectors&) { return true; } }, + { Wire::W_4, [](const Selectors&) { return true; } }, + { Wire::W_R_SHIFT, [](const Selectors&) { return true; } }, + { Wire::W_O_SHIFT, [](const Selectors&) { return true; } }, + { Wire::W_4_SHIFT, [](const Selectors&) { return true; } }, + } }; + // ============================================================================ // Databus Pattern (from databus_lookup_relation.hpp) // diff --git a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/gate_patterns.test.cpp b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/gate_patterns.test.cpp index 7337f59aa433..5b25ee622591 100644 --- a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/gate_patterns.test.cpp +++ b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/gate_patterns.test.cpp @@ -11,6 +11,7 @@ #include "gate_patterns.hpp" #include "barretenberg/flavor/mega_flavor.hpp" +#include "barretenberg/flavor/ultra_flavor.hpp" #include "barretenberg/relations/databus_lookup_relation.hpp" #include "barretenberg/relations/delta_range_constraint_relation.hpp" #include "barretenberg/relations/elliptic_relation.hpp" @@ -18,6 +19,7 @@ #include "barretenberg/relations/memory_relation.hpp" #include "barretenberg/relations/non_native_field_relation.hpp" #include "barretenberg/relations/poseidon2_external_relation.hpp" +#include "barretenberg/relations/poseidon2_initial_external_relation.hpp" #include "barretenberg/relations/poseidon2_internal_relation.hpp" #include "barretenberg/relations/relation_parameters.hpp" #include "barretenberg/relations/ultra_arithmetic_relation.hpp" @@ -30,16 +32,16 @@ using namespace bb::gate_patterns; using FF = fr; using Entities = MegaFlavor::AllValues; -Entities get_random_entities() +template E get_random_entities() { - Entities entities; + E entities; for (auto& field : entities.get_all()) { field = FF::random_element(); } return entities; } -FF& get_wire(Entities& entities, Wire wire) +template FF& get_wire(E& entities, Wire wire) { switch (wire) { case Wire::W_L: @@ -62,7 +64,7 @@ FF& get_wire(Entities& entities, Wire wire) __builtin_unreachable(); } -Selectors make_selectors(const Entities& entities, int64_t gate_selector_value) +template Selectors make_selectors(const E& entities, int64_t gate_selector_value) { return Selectors{ .gate_selector = gate_selector_value, @@ -94,8 +96,8 @@ std::set get_pattern_wires(const GatePattern& pattern, const Selectors& se * * This is the ground truth: perturb each wire and see if the output changes. */ -template -std::set get_actually_constrained_wires(const Entities& entities, const auto& parameters) +template +std::set get_actually_constrained_wires(const E& entities, const auto& parameters) { std::set constrained; @@ -112,7 +114,7 @@ std::set get_actually_constrained_wires(const Entities& entities, const au Wire::W_R_SHIFT, Wire::W_O_SHIFT, Wire::W_4_SHIFT }) { - Entities perturbed = entities; + E perturbed = entities; get_wire(perturbed, wire) += FF::random_element(); typename Relation::SumcheckArrayOfValuesOverSubrelations perturbed_result{}; @@ -131,9 +133,10 @@ std::set get_actually_constrained_wires(const Entities& entities, const au * * @param configure_selectors Lambda that configures entity selectors and returns the gate selector field value */ -template void verify_pattern(const GatePattern& pattern, auto configure_selectors) +template +void verify_pattern(const GatePattern& pattern, auto configure_selectors) { - Entities entities = get_random_entities(); + E entities = get_random_entities(); FF gate_selector = configure_selectors(entities); int64_t gate_selector_value = static_cast(uint64_t(gate_selector)); @@ -141,7 +144,7 @@ template void verify_pattern(const GatePattern& pattern, aut auto pattern_claims = get_pattern_wires(pattern, selectors); auto parameters = RelationParameters::get_random(); - auto actually_constrained = get_actually_constrained_wires(entities, parameters); + auto actually_constrained = get_actually_constrained_wires(entities, parameters); EXPECT_EQ(actually_constrained, pattern_claims); } @@ -288,8 +291,11 @@ TEST(PatternTest, MemoryRamConsistency) TEST(PatternTest, Poseidon2Internal) { - verify_pattern>(POSEIDON2_INTERNAL, - [](Entities& e) { return e.q_poseidon2_internal = FF(1); }); + // q_poseidon2_internal lives on UltraFlavor only; MegaFlavor covers all internal rounds via the + // compressed quad-internal block. + using UltraEntities = UltraFlavor::AllValues; + verify_pattern, UltraEntities>( + POSEIDON2_INTERNAL, [](UltraEntities& e) { return e.q_poseidon2_internal = FF(1); }); } TEST(PatternTest, Poseidon2External) @@ -298,6 +304,12 @@ TEST(PatternTest, Poseidon2External) [](Entities& e) { return e.q_poseidon2_external = FF(1); }); } +TEST(PatternTest, Poseidon2InitialExternal) +{ + verify_pattern>( + POSEIDON2_INITIAL_EXTERNAL, [](Entities& e) { return e.q_poseidon2_external_initial = FF(1); }); +} + TEST(PatternTest, LookupBasic) { verify_pattern>(LOOKUP, [](Entities& e) { @@ -363,7 +375,7 @@ TEST(PatternTest, DetectOverConstrained) } }; // q_arith=3 disables mul term, q_2=0 means w_r has no linear term, so w_r is unconstrained - Entities entities = get_random_entities(); + Entities entities = get_random_entities(); entities.q_arith = FF(3); entities.q_m = FF(1); entities.q_l = FF(1); @@ -373,7 +385,7 @@ TEST(PatternTest, DetectOverConstrained) auto pattern_claims = get_pattern_wires(OVERCONSTRAINED_PATTERN, selectors); auto correct_claims = get_pattern_wires(ARITHMETIC, selectors); auto parameters = RelationParameters::get_random(); - auto actually_constrained = get_actually_constrained_wires>(entities, parameters); + auto actually_constrained = get_actually_constrained_wires, Entities>(entities, parameters); EXPECT_TRUE(pattern_claims.contains(Wire::W_R)) << "Over-constrained pattern claims W_R"; EXPECT_FALSE(actually_constrained.contains(Wire::W_R)) << "Relation does not constrain W_R in this config"; @@ -402,7 +414,7 @@ TEST(PatternTest, DetectUnderConstrained) } }; // RAM consistency check: q_3 != 0 - Entities entities = get_random_entities(); + Entities entities = get_random_entities(); entities.q_memory = FF(1); entities.q_o = FF(1); // q_3 @@ -410,7 +422,7 @@ TEST(PatternTest, DetectUnderConstrained) auto pattern_claims = get_pattern_wires(UNDERCONSTRAINED_PATTERN, selectors); auto correct_claims = get_pattern_wires(MEMORY, selectors); auto parameters = RelationParameters::get_random(); - auto actually_constrained = get_actually_constrained_wires>(entities, parameters); + auto actually_constrained = get_actually_constrained_wires, Entities>(entities, parameters); EXPECT_FALSE(pattern_claims.contains(Wire::W_L)) << "Under-constrained pattern missing W_L"; EXPECT_FALSE(pattern_claims.contains(Wire::W_R)) << "Under-constrained pattern missing W_R"; diff --git a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph.cpp b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph.cpp index da01c5e79321..e225957330a0 100644 --- a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph.cpp +++ b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph.cpp @@ -272,8 +272,15 @@ template void StaticAnalyzer_) { + try_pattern(POSEIDON2_QUAD_INTERNAL, blk.q_poseidon2_quad_internal()); + try_pattern(POSEIDON2_QUAD_INTERNAL_TERMINAL, blk.q_poseidon2_quad_internal_terminal()); + try_pattern(POSEIDON2_TRANSITION_ENTRY, blk.q_poseidon2_transition_entry()); + try_pattern(POSEIDON2_INITIAL_EXTERNAL, blk.q_poseidon2_external_initial()); + } else { + try_pattern(POSEIDON2_INTERNAL, blk.q_poseidon2_internal()); + } try_pattern(NON_NATIVE_FIELD, blk.q_nnf()); try_pattern(MEMORY, blk.q_memory()); // consistency gates only; access gates via ROM/RAM transcripts try_pattern(DELTA_RANGE, blk.q_delta_range()); @@ -1244,11 +1251,22 @@ void StaticAnalyzer_::print_delta_range_gate_info(size_t gat template void StaticAnalyzer_::print_poseidon2s_gate_info(size_t gate_index, auto& block) { - auto internal_selector = block.q_poseidon2_internal()[gate_index]; auto external_selector = block.q_poseidon2_external()[gate_index]; - if (!internal_selector.is_zero() || !external_selector.is_zero()) { - info("q_poseidon2_internal == ", internal_selector); + bool nonzero = !external_selector.is_zero(); + if constexpr (IsMegaBuilder) { + nonzero = nonzero || !block.q_poseidon2_external_initial()[gate_index].is_zero() || + !block.q_poseidon2_quad_internal()[gate_index].is_zero(); + } else { + nonzero = nonzero || !block.q_poseidon2_internal()[gate_index].is_zero(); + } + if (nonzero) { info("q_poseidon2_external == ", external_selector); + if constexpr (IsMegaBuilder) { + info("q_poseidon2_external_initial == ", block.q_poseidon2_external_initial()[gate_index]); + info("q_poseidon2_quad_internal == ", block.q_poseidon2_quad_internal()[gate_index]); + } else { + info("q_poseidon2_internal == ", block.q_poseidon2_internal()[gate_index]); + } info("w_1 == ", block.w_l()[gate_index]); info("w_2 == ", block.w_r()[gate_index]); info("w_3 == ", block.w_o()[gate_index]); diff --git a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_goblin.test.cpp b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_goblin.test.cpp index 77cae1d02c09..c24a9d649c4f 100644 --- a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_goblin.test.cpp +++ b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_goblin.test.cpp @@ -45,14 +45,16 @@ class BoomerangGoblinRecursiveVerifierTests : public testing::Test { { Goblin goblin; GoblinMockCircuits::construct_and_merge_mock_circuits(goblin, 5); + goblin.op_queue->construct_zk_columns(); // Merge the ecc ops from the newly constructed circuit auto goblin_proof = goblin.prove(); // Subtable values and commitments - needed for (Recursive)MergeVerifier MergeCommitments merge_commitments; auto t_current = goblin.op_queue->construct_current_ultra_ops_subtable_columns(); - auto T_prev = goblin.op_queue->construct_previous_ultra_ops_table_columns(); - CommitmentKey pcs_commitment_key(goblin.op_queue->get_ultra_ops_table_num_rows()); + auto T_prev = goblin.op_queue->construct_table_columns_up_to_tail(); + CommitmentKey pcs_commitment_key(goblin.op_queue->get_ultra_ops_table_num_rows() + + UltraEccOpsTable::ZK_ULTRA_OPS); for (size_t idx = 0; idx < MegaFlavor::NUM_WIRES; idx++) { merge_commitments.t_commitments[idx] = pcs_commitment_key.commit(t_current[idx]); merge_commitments.T_prev_commitments[idx] = pcs_commitment_key.commit(T_prev[idx]); @@ -86,7 +88,7 @@ TEST_F(BoomerangGoblinRecursiveVerifierTests, graph_description_basic) auto transcript = std::make_shared(); GoblinStdlibProof stdlib_proof(builder, proof); - GoblinRecursiveVerifier verifier{ transcript, stdlib_proof, recursive_merge_commitments, MergeSettings::APPEND }; + GoblinRecursiveVerifier verifier{ transcript, stdlib_proof, recursive_merge_commitments }; GoblinRecursiveVerifier::ReductionResult output = verifier.reduce_to_pairing_check_and_ipa_opening(); // Aggregate merge + translator pairing points diff --git a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_goblin_avm.test.cpp b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_goblin_avm.test.cpp index 2475005c0de3..945f7f82cfcb 100644 --- a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_goblin_avm.test.cpp +++ b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_goblin_avm.test.cpp @@ -54,17 +54,14 @@ class BoomerangGoblinAvmRecursiveVerifierTests : public testing::Test { GoblinAvm goblin(inner_builder); MockCircuits::construct_arithmetic_circuit(inner_builder); - // Build a MegaAvm prover instance to get ecc_op_wire commitments matching the real flow. - auto mega_avm_instance = std::make_shared>(inner_builder); - CommitmentKey pcs_commitment_key(mega_avm_instance->dyadic_size()); - auto goblin_proof = goblin.prove(); - // Commit to ecc_op_wire polynomials from the MegaAvm prover instance + // Commit to op_queue columns. TableCommitments table_commitments; - size_t idx = 0; - for (auto& wire : mega_avm_instance->polynomials.get_ecc_op_wires()) { - table_commitments[idx++] = pcs_commitment_key.commit(wire); + auto ultra_ops_table_columns = goblin.op_queue->construct_ultra_ops_table_columns(/*include_zk_ops=*/false); + CommitmentKey pcs_commitment_key(goblin.op_queue->get_ultra_ops_table_num_rows()); + for (size_t idx = 0; idx < MegaFlavor::NUM_WIRES; idx++) { + table_commitments[idx] = pcs_commitment_key.commit(ultra_ops_table_columns[idx]); } RecursiveTableCommitments recursive_table_commitments; diff --git a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_merge_recursive_verifier.test.cpp b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_merge_recursive_verifier.test.cpp index f2565ca29c7b..fd1229684835 100644 --- a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_merge_recursive_verifier.test.cpp +++ b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph_description_merge_recursive_verifier.test.cpp @@ -47,22 +47,37 @@ template class BoomerangRecursiveMergeVerifierTest : pu EXPECT_EQ(result.second.size(), 0); } - static void prove_and_verify_merge(const std::shared_ptr& op_queue, - const MergeSettings settings = MergeSettings::PREPEND, - const bool run_analyzer = false) + static std::shared_ptr construct_final_merge_op_queue(const size_t num_subtables_up_to_tail) + { + auto op_queue = std::make_shared(); + + for (size_t idx = 0; idx < num_subtables_up_to_tail; ++idx) { + InnerBuilder circuit{ op_queue }; + GoblinMockCircuits::construct_simple_circuit(circuit); + op_queue->merge(); + } + + op_queue->construct_zk_columns(); + + InnerBuilder hiding_circuit{ op_queue }; + GoblinMockCircuits::construct_simple_circuit(hiding_circuit); + return op_queue; + } + + static void prove_and_verify_merge(const std::shared_ptr& op_queue, const bool run_analyzer = false) { RecursiveBuilder outer_circuit; auto prover_transcript = std::make_shared(); - MergeProver merge_prover{ op_queue, prover_transcript, settings }; + MergeProver merge_prover{ op_queue, prover_transcript }; auto merge_proof = merge_prover.construct_proof(); // Subtable values and commitments - needed for (Recursive)MergeVerifier MergeCommitments merge_commitments; RecursiveMergeCommitments recursive_merge_commitments; auto t_current = op_queue->construct_current_ultra_ops_subtable_columns(); - auto T_prev = op_queue->construct_previous_ultra_ops_table_columns(); + auto T_prev = op_queue->construct_table_columns_up_to_tail(); for (size_t idx = 0; idx < InnerFlavor::NUM_WIRES; idx++) { merge_commitments.t_commitments[idx] = merge_prover.pcs_commitment_key.commit(t_current[idx]); merge_commitments.T_prev_commitments[idx] = merge_prover.pcs_commitment_key.commit(T_prev[idx]); @@ -78,7 +93,7 @@ template class BoomerangRecursiveMergeVerifierTest : pu // Create a recursive merge verification circuit for the merge proof auto merge_transcript = std::make_shared>(); - RecursiveMergeVerifier verifier{ settings, merge_transcript }; + RecursiveMergeVerifier verifier{ merge_transcript }; const stdlib::Proof stdlib_merge_proof(outer_circuit, merge_proof); auto [pairing_points, merged_commitments, reduction_succeeded] = verifier.reduce_to_pairing_check(stdlib_merge_proof, recursive_merge_commitments); @@ -96,38 +111,10 @@ template class BoomerangRecursiveMergeVerifierTest : pu } } - static void test_recursive_merge_verification_prepend() - { - auto op_queue = std::make_shared(); - - InnerBuilder circuit{ op_queue }; - GoblinMockCircuits::construct_simple_circuit(circuit); - prove_and_verify_merge(op_queue); - - InnerBuilder circuit2{ op_queue }; - GoblinMockCircuits::construct_simple_circuit(circuit2); - prove_and_verify_merge(op_queue); - - InnerBuilder circuit3{ op_queue }; - GoblinMockCircuits::construct_simple_circuit(circuit3); - prove_and_verify_merge(op_queue, MergeSettings::PREPEND, true); - } - - static void test_recursive_merge_verification_append() + static void test_recursive_merge_verification() { - auto op_queue = std::make_shared(); - - InnerBuilder circuit{ op_queue }; - GoblinMockCircuits::construct_simple_circuit(circuit); - prove_and_verify_merge(op_queue); - - InnerBuilder circuit2{ op_queue }; - GoblinMockCircuits::construct_simple_circuit(circuit2); - prove_and_verify_merge(op_queue); - - InnerBuilder circuit3{ op_queue }; - GoblinMockCircuits::construct_simple_circuit(circuit3); - prove_and_verify_merge(op_queue, MergeSettings::APPEND, true); + auto op_queue = construct_final_merge_op_queue(/*num_subtables_up_to_tail=*/3); + prove_and_verify_merge(op_queue, /*run_analyzer=*/true); } }; @@ -135,14 +122,9 @@ using Builder = testing::Types; TYPED_TEST_SUITE(BoomerangRecursiveMergeVerifierTest, Builder); -TYPED_TEST(BoomerangRecursiveMergeVerifierTest, RecursiveVerificationPrepend) -{ - TestFixture::test_recursive_merge_verification_prepend(); -}; - -TYPED_TEST(BoomerangRecursiveMergeVerifierTest, RecursiveVerificationAppend) +TYPED_TEST(BoomerangRecursiveMergeVerifierTest, RecursiveMergeVerification) { - TestFixture::test_recursive_merge_verification_append(); + TestFixture::test_recursive_merge_verification(); }; } // namespace bb::stdlib::recursion::goblin diff --git a/barretenberg/cpp/src/barretenberg/chonk/README.md b/barretenberg/cpp/src/barretenberg/chonk/README.md index d3963fa4adc4..8f6193b1c517 100644 --- a/barretenberg/cpp/src/barretenberg/chonk/README.md +++ b/barretenberg/cpp/src/barretenberg/chonk/README.md @@ -111,8 +111,8 @@ MegaZK Oink → Merge → ECCVM → Translator Oink + Joint Sumcheck + Joint PCS Concretely (`ChonkVerifier::reduce_to_ipa_claim` / `ChonkVerifier::verify`): -1. **MegaZK Oink verification**: `BatchedHonkTranslatorVerifier::verify_mega_zk_oink` processes the hiding kernel's pre-sumcheck proof and extracts `HidingKernelIO` (pairing points, calldata commitment, ECC op wire commitments) -2. **Databus consistency check**: Asserts the hiding kernel's calldata commitment equals its `kernel_return_data` commitment +1. **MegaZK Oink verification**: `BatchedHonkTranslatorVerifier::verify_mega_zk_oink` processes the hiding kernel's pre-sumcheck proof and extracts `HidingKernelIO` (pairing points, kernel return data commitment, ECC op wire commitments) +2. **Databus consistency check**: Asserts the hiding kernel's kernel calldata commitment equals the `kernel_return_data` commitment contained it its public inputs 3. **Merge verification**: Verifies the hiding kernel's APPEND-mode merge proof using the ECC op wire commitments from step 1 and `ecc_op_tables` from `HidingKernelIO` 4. **ECCVM verification**: Reduces to an IPA opening claim; extracts translator input parameters (`v`, `x`, `accumulated_result`) 5. **Joint verification**: `BatchedHonkTranslatorVerifier::verify` processes the translator Oink, runs the 17-round joint sumcheck, and performs the joint Shplemini/KZG PCS reduction @@ -269,13 +269,13 @@ The databus solves this by using **commitments** instead of raw data. Rather tha | Column | Purpose | |--------|---------| -| `calldata` | Input from previous kernel's return data ($C_i$) | -| `secondary_calldata` | Input from previous app's return data ($C'_i$) | +| `kernel_calldata` | Input from previous kernel's return data ($C_i$) | +| `app_calldata[0..2]` | Inputs from up to three apps' return data ($C'_{i,j}$) | | `return_data` | Output to be consumed by next circuit ($R_i$) | -App circuits only produce `return_data` (no calldata). Kernel circuits receive both: -- `calldata` from the previous kernel's return data -- `secondary_calldata` from the corresponding app's return data +App circuits only produce `return_data` (no calldata). Kernel circuits receive: +- `kernel_calldata` from the previous kernel's return data +- `app_calldata[0..2]` from the corresponding apps' return data #### Lookup Relations @@ -301,7 +301,7 @@ $$\sum_{i=0}^{n-1} a_i \cdot I_i \cdot (w_{1,i} + w_{2,i}\beta + \gamma) - q_{bu Inverse correctness is enforced by two separate gating subrelations: $(I \cdot L \cdot T - 1) \cdot \text{is\_read} = 0$ on read rows, and $(I \cdot L \cdot T - 1) \cdot \text{count} = 0$ on write rows. At inactive rows (where both gates are zero), $I$ is unconstrained but the lookup identity contribution is also zero, so the prover gets no free degrees of freedom. -**Multiple columns**: Each bus column (calldata, secondary_calldata, return_data) has separate subrelations, distinguished by column-specific selectors $q_j$. +**Multiple columns**: Each bus column (kernel calldata, three app calldata columns, return data) has separate subrelations, distinguished by column-specific selectors $q_j$. #### Population @@ -315,14 +315,15 @@ The databus columns are populated from ACIR constraints generated by the Noir co ``` App₀ ──return_data [R'₀]──┐ ↓ - Kernel₀ ←─calldata─── (empty for first kernel) + Kernel₀ ←─kernel_calldata─── (empty for first kernel) + ←─app_calldata[0] [C'₀,0]─── App₀.return_data │ return_data [R₀] ↓ App₁ ──return_data [R'₁]──┐ ↓ - Kernel₁ ←─calldata [C₁]─── Kernel₀.return_data - ←─secondary_calldata [C'₁]─── App₁.return_data + Kernel₁ ←─kernel_calldata [C₁]─── Kernel₀.return_data + ←─app_calldata[0] [C'₁,0]─── App₁.return_data │ return_data [R₁] ↓ @@ -400,13 +401,12 @@ A Chonk proof must reveal nothing about the private execution. ZK is achieved th The op queue contains EC operations from all circuits and must be hidden: -1. **`hide_op_queue_accumulation_result`**: Hides the final accumulator point -2. **`hide_op_queue_content_in_tail`**: Protects tail kernel op queue data -3. **`hide_op_queue_content_in_hiding`**: Final ZK protection in Hiding kernel +1. **Batch merge ZK prefix**: `BatchMergeProver` constructs the initial ZK rows used to hide the op queue up to the tail. +2. **`hide_op_queue_content_in_hiding`**: Adds the final random non-ops in the hiding kernel. ### Constant Merged Table Size for ZK -**Problem**: The final merge step uses APPEND mode. If the merged table size varied with transaction complexity, an observer could infer information about the transaction from the proof structure. +**Problem**: The final merge step appends the hiding kernel table to the accumulated table. If the merged table size varied with transaction complexity, an observer could infer information about the transaction from the proof structure. **Solution**: We always merge to a **uniform total size** = `OP_QUEUE_SIZE`. In the code, `shift_size` is set to `(OP_QUEUE_SIZE - |hiding_ops|) × NUM_ROWS_PER_OP`, which represents the total degree of the prepended table and places the hiding kernel's ops at fixed positions at the end of the table, regardless of how many ops the actual transaction used. @@ -686,8 +686,8 @@ Kernel circuits output a structured public input block that carries cross-circui struct KernelIO { PairingInputs pairing_inputs; // Accumulated {P0, P1} for deferred pairing check G1 kernel_return_data; // Commitment to this kernel's return data - G1 app_return_data; // Commitment to the app's return data - TableCommitments ecc_op_tables; // [M_1]...[M_4] merged op queue tables from Merge + std::array app_return_data; // App return data commitments + FF ecc_op_hash; // Running hash over ECC op column commitments FF output_hn_accum_hash; // Hash of the HyperNova accumulator state }; @@ -723,10 +723,10 @@ The [Databus](#databus) section explains how circuits pass data via commitment e ```cpp // Kernel's calldata must match previous kernel's return_data -kernel_input.kernel_return_data.incomplete_assert_equal(witness_commitments.calldata); +kernel_input.kernel_return_data.incomplete_assert_equal(witness_commitments.kernel_calldata); -// Kernel's secondary_calldata must match previous app's return_data -kernel_input.app_return_data.incomplete_assert_equal(witness_commitments.secondary_calldata); +// Each app calldata column must match the corresponding app's return_data +kernel_input.app_return_data[idx].incomplete_assert_equal(*app_calldata_commitments[idx]); ``` The `incomplete_assert_equal` (for non-native G1 points) adds in-circuit constraints that the commitments are equal. Combined with the HyperNova binding of public inputs to proofs, tampering with databus content invalidates the proof. @@ -791,7 +791,7 @@ The type is assigned to the circuit being accumulated based on its position: |-----------------|------------|-------------| | Circuit 0 | `OINK` | First app - no prior accumulator, just Oink verification | | Circuits 1..n-4 | `HN` | Apps, inner kernels, reset kernels - standard HyperNova folding | -| Circuit n-3 | `HN_TAIL` | Pre-tail kernel - adds ZK masking at op queue start | +| Circuit n-3 | `HN_TAIL` | Pre-tail kernel | | Circuit n-2 | `HN_FINAL` | Tail kernel - final folding + decider verification | | Circuit n-1 | `MEGA` | Hiding kernel - MegaZK proof, no folding | @@ -802,8 +802,8 @@ The type indicates which proof is being verified BY the current kernel: |---------------------|-------------------|---------| | `OINK` | Init kernel (circuit 1) | Verify first app's Oink proof | | `HN` | Inner/reset kernel | Verify standard HN folding proof | -| `HN_TAIL` | **Tail kernel** (circuit n-2) | Verify pre-tail kernel's proof, add ZK ops | -| `HN_FINAL` | **Hiding kernel** (circuit n-1) | Verify tail kernel's proof + decider | +| `HN_TAIL` | **Tail kernel** (circuit n-2) | Verify pre-tail kernel's proof | +| `HN_FINAL` | **Hiding kernel** (circuit n-1) | Verify tail kernel's proof + verify batch merge + decider | **Key Point**: `HN_TAIL` is the proof FROM circuit n-3, verified BY the tail kernel (n-2). Similarly, `HN_FINAL` is the proof FROM the tail kernel (n-2), verified BY the hiding kernel (n-1). @@ -891,7 +891,7 @@ kernel_io.reconstruct_from_public(oink_result.public_inputs); - Extracts `ecc_op_tables` = $[M_{tail}]$ from `HidingKernelIO` public inputs - MegaZK verification (completed by the joint sumcheck+PCS) ensures `ecc_op_tables` is bound to the hiding kernel's proof -**Step 6: Final merge (APPEND mode) - merges hiding kernel's ops with constant shift size** +**Step 6: Final merge - merges hiding kernel's ops with constant shift size** ### Key Verifier Guarantees diff --git a/barretenberg/cpp/src/barretenberg/chonk/batched_honk_translator/batched_honk_translator.test.cpp b/barretenberg/cpp/src/barretenberg/chonk/batched_honk_translator/batched_honk_translator.test.cpp index 37901dc78b48..fc147b7964f8 100644 --- a/barretenberg/cpp/src/barretenberg/chonk/batched_honk_translator/batched_honk_translator.test.cpp +++ b/barretenberg/cpp/src/barretenberg/chonk/batched_honk_translator/batched_honk_translator.test.cpp @@ -68,13 +68,13 @@ class BatchedHonkTranslatorTests : public ::testing::Test { size_t circuit_size_param = 500) { auto op_queue = std::make_shared(); - op_queue->no_op_ultra_only(); - add_random_ops(op_queue, TranslatorCircuitBuilder::NUM_RANDOM_OPS_START); - add_mixed_ops(op_queue, circuit_size_param / 2); - op_queue->merge(); + // Construct zk_columns + op_queue->construct_zk_columns(); + // Table with correct final structure for translator add_mixed_ops(op_queue, circuit_size_param / 2); add_random_ops(op_queue, TranslatorCircuitBuilder::NUM_RANDOM_OPS_END); - op_queue->merge(MergeSettings::APPEND, op_queue->get_append_offset()); + // Merge with fixed append + op_queue->merge_fixed_append(op_queue->get_append_offset()); TranslatorCircuitBuilder circuit(batching_challenge_v, evaluation_input_x, op_queue); return std::make_shared(circuit); @@ -140,10 +140,14 @@ class BatchedHonkTranslatorTests : public ::testing::Test { m.add_entry(round, "ECC_OP_WIRE_" + std::to_string(i), G); } // DataBus entities: - for (const auto& label : { "CALLDATA", - "CALLDATA_READ_COUNTS", - "SECONDARY_CALLDATA", - "SECONDARY_CALLDATA_READ_COUNTS", + for (const auto& label : { "KERNEL_CALLDATA", + "KERNEL_CALLDATA_READ_COUNTS", + "FIRST_APP_CALLDATA", + "FIRST_APP_CALLDATA_READ_COUNTS", + "SECOND_APP_CALLDATA", + "SECOND_APP_CALLDATA_READ_COUNTS", + "THIRD_APP_CALLDATA", + "THIRD_APP_CALLDATA_READ_COUNTS", "RETURN_DATA", "RETURN_DATA_READ_COUNTS" }) { m.add_entry(round, label, G); @@ -160,8 +164,10 @@ class BatchedHonkTranslatorTests : public ::testing::Test { // ── Round 2: MegaZK logderiv inverses + Z_PERM + translator Oink ───────── m.add_entry(round, "LOOKUP_INVERSES", G); - m.add_entry(round, "CALLDATA_INVERSES", G); - m.add_entry(round, "SECONDARY_CALLDATA_INVERSES", G); + m.add_entry(round, "KERNEL_CALLDATA_INVERSES", G); + m.add_entry(round, "FIRST_APP_CALLDATA_INVERSES", G); + m.add_entry(round, "SECOND_APP_CALLDATA_INVERSES", G); + m.add_entry(round, "THIRD_APP_CALLDATA_INVERSES", G); m.add_entry(round, "RETURN_DATA_INVERSES", G); m.add_entry(round, "Z_PERM", G); // Translator Oink: vk_hash, masking commitment, 10 wire commitments diff --git a/barretenberg/cpp/src/barretenberg/chonk/batched_honk_translator/batched_honk_translator_verifier.cpp b/barretenberg/cpp/src/barretenberg/chonk/batched_honk_translator/batched_honk_translator_verifier.cpp index cc727a5d0aae..23fb5bd7bbe4 100644 --- a/barretenberg/cpp/src/barretenberg/chonk/batched_honk_translator/batched_honk_translator_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/chonk/batched_honk_translator/batched_honk_translator_verifier.cpp @@ -55,7 +55,7 @@ typename BatchedHonkTranslatorVerifier_::OinkResult BatchedHonkTranslator return OinkResult{ .public_inputs = mega_zk_verifier_instance->public_inputs, - .calldata_commitment = mega_zk_verifier_instance->witness_commitments.calldata, + .kernel_calldata_commitment = mega_zk_verifier_instance->witness_commitments.kernel_calldata, .ecc_op_wires = mega_zk_verifier_instance->witness_commitments.get_ecc_op_wires().get_copy(), }; } diff --git a/barretenberg/cpp/src/barretenberg/chonk/batched_honk_translator/batched_honk_translator_verifier.hpp b/barretenberg/cpp/src/barretenberg/chonk/batched_honk_translator/batched_honk_translator_verifier.hpp index 51dde12dd1ab..07d2d68172df 100644 --- a/barretenberg/cpp/src/barretenberg/chonk/batched_honk_translator/batched_honk_translator_verifier.hpp +++ b/barretenberg/cpp/src/barretenberg/chonk/batched_honk_translator/batched_honk_translator_verifier.hpp @@ -99,7 +99,7 @@ template class BatchedHonkTranslatorVerifier_ { */ struct OinkResult { std::vector public_inputs; - Commitment calldata_commitment; + Commitment kernel_calldata_commitment; std::array ecc_op_wires; }; @@ -114,7 +114,7 @@ template class BatchedHonkTranslatorVerifier_ { /** * @brief Phase 1: Verify the MegaZK Oink phase on the shared transcript. * @details Loads mega_zk_proof into the transcript, runs OinkVerifier, stores verifier instance. - * @return OinkResult with public inputs, calldata commitment, and ECC op wires. + * @return OinkResult with public inputs, kernel calldata commitment, and ECC op wires. */ OinkResult verify_mega_zk_oink(const Proof& mega_zk_proof); diff --git a/barretenberg/cpp/src/barretenberg/chonk/chonk.cpp b/barretenberg/cpp/src/barretenberg/chonk/chonk.cpp index b05ac4daebab..38cc184773af 100644 --- a/barretenberg/cpp/src/barretenberg/chonk/chonk.cpp +++ b/barretenberg/cpp/src/barretenberg/chonk/chonk.cpp @@ -19,6 +19,7 @@ #include "barretenberg/translator_vm/translator_proving_key.hpp" #include "barretenberg/ultra_honk/oink_prover.hpp" #include "barretenberg/ultra_honk/oink_verifier.hpp" +#include namespace bb { @@ -132,7 +133,7 @@ Chonk::FoldingResult Chonk::verify_folding( /** * @brief Process public inputs from a verified circuit and perform databus consistency checks * @details For kernel circuits: reconstructs KernelIO from public inputs, verifies that databus return data commitments - * match witness commitments, checks accumulator hash consistency, and returns the kernel's ECC op table commitments. + * match witness commitments, checks accumulator hash consistency, and returns the kernel's ECC op running hash. * For app circuits: reconstructs AppIO from public inputs and extracts pairing points. * In both cases, updates the bus depot with the appropriate return data commitment. * @@ -164,21 +165,25 @@ Chonk::PublicInputsResult Chonk::process_public_inputs_and_consistency_checks( // Kernel return data bool kernel_return_data_match = - kernel_input.kernel_return_data.get_value() == witness_commitments.calldata.get_value(); + kernel_input.kernel_return_data.get_value() == witness_commitments.kernel_calldata.get_value(); BB_ASSERT_DEBUG(kernel_return_data_match, - "kernel_return_data mismatch: proof contains " << kernel_input.kernel_return_data.get_value() - << " but calldata commitment is " - << witness_commitments.calldata.get_value()); - kernel_input.kernel_return_data.incomplete_assert_equal(witness_commitments.calldata); - - // App return data - bool app_return_data_match = - kernel_input.app_return_data.get_value() == witness_commitments.secondary_calldata.get_value(); - BB_ASSERT_DEBUG(app_return_data_match, - "app_return_data mismatch: proof contains " - << kernel_input.app_return_data.get_value() << " but secondary_calldata commitment is " - << witness_commitments.secondary_calldata.get_value()); - kernel_input.app_return_data.incomplete_assert_equal(witness_commitments.secondary_calldata); + "kernel_return_data mismatch: proof contains " + << kernel_input.kernel_return_data.get_value() << " but kernel_calldata commitment is " + << witness_commitments.kernel_calldata.get_value()); + kernel_input.kernel_return_data.incomplete_assert_equal(witness_commitments.kernel_calldata); + + const std::array app_calldata_commitments{ &witness_commitments.first_app_calldata, + &witness_commitments.second_app_calldata, + &witness_commitments.third_app_calldata }; + for (size_t idx = 0; idx < MAX_APPS_PER_KERNEL; ++idx) { + bool app_return_data_match = + kernel_input.app_return_data[idx].get_value() == app_calldata_commitments[idx]->get_value(); + BB_ASSERT_DEBUG(app_return_data_match, + "app_return_data mismatch: proof contains " + << kernel_input.app_return_data[idx].get_value() << " but app calldata commitment " + << idx << " is " << app_calldata_commitments[idx]->get_value()); + kernel_input.app_return_data[idx].incomplete_assert_equal(*app_calldata_commitments[idx]); + } // ============= Perform accumulator hash consistency check ========================= @@ -195,14 +200,14 @@ Chonk::PublicInputsResult Chonk::process_public_inputs_and_consistency_checks( bus_depot.set_kernel_return_data_commitment(witness_commitments.return_data); - return { std::move(kernel_input.pairing_inputs), std::move(kernel_input.ecc_op_tables) }; + return { std::move(kernel_input.pairing_inputs), std::move(kernel_input.ecc_op_hash) }; } // App circuit path AppIO app_input; // pairing points app_input.reconstruct_from_public(public_inputs); - // Set the app return data commitment to be propagated via the public inputs + // Set the app return data commitment to be propagated via the public inputs. The depot owns slot allocation. bus_depot.set_app_return_data_commitment(witness_commitments.return_data); return { std::move(app_input.pairing_inputs), std::nullopt }; @@ -217,23 +222,19 @@ Chonk::PublicInputsResult Chonk::process_public_inputs_and_consistency_checks( * @param circuit * @param verifier_inputs {proof, vkey, type (Oink/HN)} A set of inputs for recursive verification * @param input_verifier_accumulator The accumulator from the previous step of recursive verification - * @param T_prev_commitments The ECC-op table from the previous step of recursive verification (the concatenation of all - * ECC-op subtables up to the previous folding step) + * @param running_hash Running hash of ECC-op column commitments from prior steps in this kernel. * @param accumulation_recursive_transcript Transcript shared across recursive verification of the folding of * K_{i-1} (kernel), A_{i,1} (app), .., A_{i, n} (app) */ -std::tuple, - std::vector, - Chonk::TableCommitments> +std::tuple, std::vector, Chonk::StdlibFF> Chonk::recursive_verification_and_consistency_checks( ClientCircuit& circuit, const StdlibVerifierInputs& verifier_inputs, const std::optional& input_verifier_accumulator, - const TableCommitments& T_prev_commitments, + const std::optional& running_hash, const std::shared_ptr& accumulation_recursive_transcript) { BB_BENCH_NAME("Chonk::recursive_verification_and_consistency_checks"); - using MergeCommitments = Goblin::MergeRecursiveVerifier::InputCommitments; auto verifier_instance = std::make_shared(verifier_inputs.honk_vk_and_hash); @@ -256,35 +257,28 @@ Chonk::recursive_verification_and_consistency_checks( std::vector public_inputs = std::move(verifier_instance->public_inputs); // Step 2: Process public inputs and perform databus consistency checks - auto [io_pairing_points, T_prev_override] = process_public_inputs_and_consistency_checks( + auto [io_pairing_points, previous_ecc_op_hash] = process_public_inputs_and_consistency_checks( verifier_inputs, public_inputs, witness_commitments, prev_accum_hash); - // Determine T_prev for merge verification - MergeCommitments merge_commitments; - if (verifier_inputs.type == QUEUE_TYPE::OINK) { - // T_prev = 0 in the first recursive verification - merge_commitments.T_prev_commitments = stdlib::recursion::honk::empty_ecc_op_tables(circuit); - } else if (T_prev_override) { - // T_prev_override is set only when the current circuit being folded is a kernel - // in which case it is equal to the ECC-op tables reconstructed from the public inputs - BB_ASSERT_EQ(verifier_inputs.is_kernel, true, "T_prev_override should only be set for kernels"); - merge_commitments.T_prev_commitments = std::move(*T_prev_override); - } else { - merge_commitments.T_prev_commitments = T_prev_commitments; + std::optional updated_hash = running_hash; + if (previous_ecc_op_hash.has_value()) { + BB_ASSERT_EQ(verifier_inputs.is_kernel, true, "previous_ecc_op_hash should only be set for kernels"); + BB_ASSERT(!running_hash.has_value(), "Running hash should not be set when recursively verifying a kernel"); + updated_hash = previous_ecc_op_hash.value(); } - // Step 3: Recursively verify the merge proof - merge_commitments.t_commitments = witness_commitments.get_ecc_op_wires().get_copy(); - auto [merge_pairing_points, merged_table_commitments] = - goblin.recursively_verify_merge(circuit, merge_commitments, accumulation_recursive_transcript); + // Step 3: Update the running ECC op hash with this circuit's ECC op column commitments. + auto ecc_op_col_commitments = witness_commitments.get_ecc_op_wires().get_copy(); + const std::vector ecc_op_col_commitments_vec(ecc_op_col_commitments.begin(), + ecc_op_col_commitments.end()); + updated_hash = Goblin::BatchMergeRecursiveVerifier::ecc_op_hash_step(ecc_op_col_commitments_vec, updated_hash); // Combine all pairing points std::vector all_points; all_points.insert(all_points.end(), folding_points.begin(), folding_points.end()); all_points.emplace_back(std::move(io_pairing_points)); - all_points.emplace_back(merge_pairing_points); - return { std::move(output_accumulator), std::move(all_points), merged_table_commitments }; + return { std::move(output_accumulator), std::move(all_points), updated_hash.value() }; } /** @@ -294,7 +288,7 @@ Chonk::recursive_verification_and_consistency_checks( * proofs for each circuit, this method adds recursive verification constraints to kernel circuits. * * The method performs the following steps: - * 1. SETUP: Initialize transcript, determine kernel type, add ZK masking for tail kernel + * 1. SETUP: Initialize transcript and determine kernel type * 2. VERIFICATION LOOP: Process each entry in stdlib_verification_queue (folding + merge + databus) * 3. OUTPUT: Set public inputs (KernelIO or HidingKernelIO) for propagation to next kernel * @@ -308,8 +302,8 @@ void Chonk::complete_kernel_circuit_logic(ClientCircuit& circuit) // Transcript is shared across recursive verification of the folding of K_{i-1} (kernel) and A_{i} (app) auto accumulation_recursive_transcript = std::make_shared(); - // T_prev: commitment to previous merged table, propagated via public inputs - TableCommitments T_prev_commitments; + // Running Poseidon2 hash over ECC op column commitments, propagated through kernel public inputs. + std::optional running_hash = std::nullopt; // Convert native verification queue to circuit witnesses if (stdlib_verification_queue.empty()) { @@ -317,34 +311,20 @@ void Chonk::complete_kernel_circuit_logic(ClientCircuit& circuit) } // Determine kernel type from queue contents - bool is_init_kernel = - stdlib_verification_queue.size() == 1 && (stdlib_verification_queue.front().type == QUEUE_TYPE::OINK); - - bool is_tail_kernel = - stdlib_verification_queue.size() == 1 && (stdlib_verification_queue.front().type == QUEUE_TYPE::HN_TAIL); + bool is_init_kernel = stdlib_verification_queue.front().type == QUEUE_TYPE::OINK; bool is_hiding_kernel = stdlib_verification_queue.size() == 1 && (stdlib_verification_queue.front().type == QUEUE_TYPE::HN_FINAL); // The ECC-op subtable for a kernel begins with an eq-and-reset to ensure that the preceding circuit's subtable // cannot affect the ECC-op accumulator for the kernel. - if (is_tail_kernel) { - BB_ASSERT_EQ(circuit.op_queue->get_current_subtable_size(), - 0U, - "tail kernel ecc ops table should be empty at this point"); - // Add a no-op to make the op queue wires in Translator shiftable - circuit.queue_ecc_no_op(); - // Add randomness at the beginning of the tail kernel (whose ecc ops fall at the beginning of the op queue - // table) to ensure the CHONK proof doesn't leak information about the actual content of the op queue - hide_op_queue_content_in_tail(circuit); - - // Add the hiding op with random (non-curve) Px, Py values for statistical hiding of accumulated_result. - hide_op_queue_accumulation_result(circuit); - } circuit.queue_ecc_eq(); // Step 2: VERIFICATION LOOP - Recursively verify each proof in the queue + BB_ASSERT(bus_depot.app_return_data_slots_are_empty(), + "DataBusDepot has stale app return-data slots at kernel-completion boundary"); + std::vector points_accumulator; std::optional current_stdlib_verifier_accumulator; if (!is_init_kernel) { @@ -354,15 +334,15 @@ void Chonk::complete_kernel_circuit_logic(ClientCircuit& circuit) while (!stdlib_verification_queue.empty()) { const StdlibVerifierInputs& verifier_input = stdlib_verification_queue.front(); - auto [output_stdlib_verifier_accumulator, pairing_points, merged_table_commitments] = + auto [output_stdlib_verifier_accumulator, pairing_points, updated_hash] = recursive_verification_and_consistency_checks(circuit, verifier_input, current_stdlib_verifier_accumulator, - T_prev_commitments, + running_hash, accumulation_recursive_transcript); points_accumulator.insert(points_accumulator.end(), pairing_points.begin(), pairing_points.end()); - // Update commitment to the status of the op_queue - T_prev_commitments = merged_table_commitments; + running_hash = updated_hash; + // Update the output verifier accumulator current_stdlib_verifier_accumulator = output_stdlib_verifier_accumulator; @@ -370,31 +350,44 @@ void Chonk::complete_kernel_circuit_logic(ClientCircuit& circuit) } // Step 3: OUTPUT - Set public inputs for propagation to next kernel - - PairingPoints pairing_points_aggregator = PairingPoints::aggregate_multiple(points_accumulator); + BB_ASSERT_EQ(running_hash.has_value(), true, "Running hash should be set for public input propagation"); // Output differs based on kernel type: HidingKernelIO (no accum hash) vs KernelIO (with accum hash) if (is_hiding_kernel) { BB_ASSERT_EQ(current_stdlib_verifier_accumulator.has_value(), false); + // Perform batch merge verification + auto [batch_pairing_points, batch_merged_table_commitments] = + goblin.recursively_verify_batch_merge(circuit, running_hash.value()); + + // Append batch merge pairing points to the list of pairing points + points_accumulator.emplace_back(batch_pairing_points); + + // Compute aggregated pairing points for output + PairingPoints pairing_points_aggregator = PairingPoints::aggregate_multiple(points_accumulator); + // Add randomness at the end of the hiding kernel (whose ecc ops fall right at the end of the op queue table) to // ensure the Chonk proof doesn't leak information about the actual content of the op queue hide_op_queue_content_in_hiding(circuit); - // Propagate public inputs HidingKernelIO hiding_output{ pairing_points_aggregator, bus_depot.get_kernel_return_data_commitment(circuit), - T_prev_commitments }; + std::move(batch_merged_table_commitments) }; hiding_output.set_public(); } else { BB_ASSERT_NEQ(current_stdlib_verifier_accumulator.has_value(), false); + // Compute aggregated pairing points for output + PairingPoints pairing_points_aggregator = PairingPoints::aggregate_multiple(points_accumulator); + // Extract native verifier accumulator from the stdlib accum to use it in the next round recursive_verifier_native_accum = current_stdlib_verifier_accumulator->get_value(); - // Get databus commitments auto kernel_return_data_commitment = bus_depot.get_kernel_return_data_commitment(circuit); - auto app_return_data_commitment = bus_depot.get_app_return_data_commitment(circuit); + KernelIO::AppReturnDataCommitments app_return_data_commitments; + for (size_t idx = 0; idx < MAX_APPS_PER_KERNEL; ++idx) { + app_return_data_commitments[idx] = bus_depot.get_app_return_data_commitment(circuit, idx); + } // Compute hash of output accumulator RecursiveTranscript hash_transcript; @@ -410,8 +403,8 @@ void Chonk::complete_kernel_circuit_logic(ClientCircuit& circuit) // Propagate public inputs KernelIO kernel_output{ pairing_points_aggregator, kernel_return_data_commitment, - app_return_data_commitment, - T_prev_commitments, + app_return_data_commitments, + running_hash.value(), current_verifier_accum_hash }; kernel_output.set_public(); } @@ -457,10 +450,20 @@ void Chonk::accumulate_hiding_kernel(ClientCircuit& circuit, const std::shared_p for (auto& block : circuit.blocks.get()) { block.free_data(); } - hiding_vk = std::make_shared(hiding_prover_inst->get_precomputed()); + // MegaZKFlavor inherits VerificationKey from MegaFlavor unchanged, so MegaZKVerificationKey + // and MegaVerificationKey are the same type. Reuse the caller-supplied precomputed VK when + // present to skip the 31 sequential commitments in the NativeVerificationKey_ ctor. + static_assert( + std::is_same_v, + "hiding-kernel precomputed VK reuse relies on MegaZKFlavor inheriting VerificationKey from MegaFlavor"); + if (precomputed_vk) { + hiding_vk = precomputed_vk; + } else { + hiding_vk = std::make_shared(hiding_prover_inst->get_precomputed()); + } // Push VK to queue so get_hiding_kernel_vk_and_hash() can find it. - VerifierInputs queue_entry{ {}, precomputed_vk, QUEUE_TYPE::MEGA, /*is_kernel=*/true }; + VerifierInputs queue_entry{ {}, hiding_vk, QUEUE_TYPE::MEGA, /*is_kernel=*/true }; verification_queue.push_back(queue_entry); num_circuits_accumulated++; } @@ -552,7 +555,8 @@ void Chonk::accumulate_and_fold(ClientCircuit& circuit, #ifndef NDEBUG update_native_verifier_accumulator(queue_entry, verifier_transcript); #endif - goblin.prove_merge(prover_accumulation_transcript); + // Keep one subtable per folded circuit and prove the batched merge after the tail kernel. + goblin.op_queue->merge(); num_circuits_accumulated++; } @@ -586,34 +590,13 @@ void Chonk::accumulate(ClientCircuit& circuit, const std::shared_ptr T_prev_commitments; // set only for kernels + std::optional ecc_op_hash; // set only for kernels }; /** @@ -101,7 +101,7 @@ class Chonk : public IVCBase { * State machine transitions based on `num_circuits_accumulated`: * - OINK: First app (circuit 0) - no prior accumulator, just Oink verification * - HN: Apps 1..n-3, inner kernels, and reset kernels - full HyperNova folding verification - * - HN_TAIL: Circuit n-3 (last kernel before tail) - adds ZK masking at op queue start + * - HN_TAIL: Circuit n-3 (last kernel before tail) * - HN_FINAL: Circuit n-2 (tail kernel) - final folding + decider verification * - MEGA: Circuit n-1 (hiding kernel) - MegaZK proof, no folding * @@ -196,12 +196,12 @@ class Chonk : public IVCBase { const std::vector>& input_keys = {}); [[nodiscard("Pairing points should be accumulated")]] std:: - tuple, std::vector, TableCommitments> + tuple, std::vector, StdlibFF> recursive_verification_and_consistency_checks( ClientCircuit& circuit, const StdlibVerifierInputs& verifier_inputs, const std::optional& input_verifier_accumulator, - const TableCommitments& T_prev_commitments, + const std::optional& running_hash, const std::shared_ptr& accumulation_recursive_transcript); // Complete the logic of a kernel circuit (e.g. HN/merge recursive verification, databus consistency checks) @@ -219,8 +219,6 @@ class Chonk : public IVCBase { ChonkProof prove(); - static void hide_op_queue_accumulation_result(ClientCircuit& circuit); - static void hide_op_queue_content_in_tail(ClientCircuit& circuit); static void hide_op_queue_content_in_hiding(ClientCircuit& circuit); /** diff --git a/barretenberg/cpp/src/barretenberg/chonk/chonk.test.cpp b/barretenberg/cpp/src/barretenberg/chonk/chonk.test.cpp index f45c55269170..8bb61abc00ef 100644 --- a/barretenberg/cpp/src/barretenberg/chonk/chonk.test.cpp +++ b/barretenberg/cpp/src/barretenberg/chonk/chonk.test.cpp @@ -1,3 +1,4 @@ +#include #include #include @@ -22,7 +23,22 @@ using namespace bb; -static constexpr size_t SMALL_LOG_2_NUM_GATES = 5; +namespace { + +constexpr size_t SMALL_LOG_2_NUM_GATES = 5; + +/** + * @brief Enum for specifying which KernelIO field to tamper with in tests. + */ +enum class KernelIOField : uint8_t { + PAIRING_INPUTS, + ACCUMULATOR_HASH, + KERNEL_RETURN_DATA, + APP_RETURN_DATA, + ECC_OP_HASH +}; + +} // namespace class ChonkTests : public ::testing::Test { protected: @@ -40,6 +56,11 @@ class ChonkTests : public ::testing::Test { using ChonkVerifier = ChonkNativeVerifier; public: + /** + * @brief Hook fired after each accumulate() inside run_ivc. + */ + using AccumulateHook = std::function; + /** * @brief Tamper with a proof * @details The first value in the proof after the public inputs is the commitment to the wire w.l (see @@ -58,17 +79,30 @@ class ChonkTests : public ::testing::Test { } } - static std::pair> accumulate_and_prove_ivc( - size_t num_app_circuits, TestSettings settings = {}, bool check_circuit_sizes = false) + static std::pair> run_ivc( + size_t num_app_circuits, + TestSettings settings = {}, + const AccumulateHook& post_hook = nullptr, + bool check_circuit_sizes = false) { CircuitProducer circuit_producer(num_app_circuits); - const size_t num_circuits = circuit_producer.total_num_circuits; - Chonk ivc{ num_circuits }; + return run_ivc_impl(circuit_producer, settings, post_hook, check_circuit_sizes); + }; - for (size_t j = 0; j < num_circuits; ++j) { - circuit_producer.construct_and_accumulate_next_circuit(ivc, settings, check_circuit_sizes); - } - return { ivc.prove(), ivc.get_hiding_kernel_vk_and_hash() }; + static std::pair> run_ivc( + std::vector leading_is_kernel_flags, + TestSettings settings = {}, + const AccumulateHook& post_hook = nullptr, + bool check_circuit_sizes = false) + { + CircuitProducer circuit_producer(std::move(leading_is_kernel_flags), /*large_first_app=*/false); + return run_ivc_impl(circuit_producer, settings, post_hook, check_circuit_sizes); + }; + + static std::pair> accumulate_and_prove_ivc( + size_t num_app_circuits, TestSettings settings = {}, bool check_circuit_sizes = false) + { + return run_ivc(num_app_circuits, settings, /*post_hook=*/nullptr, check_circuit_sizes); }; static bool verify_chonk(const ChonkProof& proof, const std::shared_ptr& vk_and_hash) @@ -77,11 +111,6 @@ class ChonkTests : public ::testing::Test { return verifier.verify(proof); } - /** - * @brief Enum for specifying which KernelIO field to tamper with in tests - */ - enum class KernelIOField { PAIRING_INPUTS, ACCUMULATOR_HASH, KERNEL_RETURN_DATA, APP_RETURN_DATA, ECC_OP_TABLES }; - /** * @brief Helper function to test tampering with AppIO pairing inputs * @details Accumulates circuits, doubles the app pairing points (creating valid but different points), @@ -91,17 +120,8 @@ class ChonkTests : public ::testing::Test { { BB_DISABLE_ASSERTS(); - const size_t NUM_APP_CIRCUITS = 2; - CircuitProducer circuit_producer(NUM_APP_CIRCUITS); - const size_t NUM_CIRCUITS = circuit_producer.total_num_circuits; - Chonk ivc{ NUM_CIRCUITS }; TestSettings settings{ .log2_num_gates = SMALL_LOG_2_NUM_GATES }; - - for (size_t idx = 0; idx < NUM_CIRCUITS; ++idx) { - auto [circuit, vk] = circuit_producer.create_next_circuit_and_vk(ivc, settings); - ivc.accumulate(circuit, vk); - - // After accumulating 3 circuits (app, kernel, app), we have 2 proofs in the queue + auto [proof, vk] = run_ivc(/*num_app_circuits=*/2, settings, [](Chonk& ivc, size_t idx) { if (idx == 2) { EXPECT_EQ(ivc.verification_queue.size(), 2); @@ -120,10 +140,8 @@ class ChonkTests : public ::testing::Test { app_io.to_proof(app_entry.proof, num_public_inputs); } - } - - auto proof = ivc.prove(); - EXPECT_FALSE(verify_chonk(proof, ivc.get_hiding_kernel_vk_and_hash())); + }); + EXPECT_FALSE(verify_chonk(proof, vk)); } /** @@ -135,17 +153,8 @@ class ChonkTests : public ::testing::Test { { BB_DISABLE_ASSERTS(); - const size_t NUM_APP_CIRCUITS = 2; - CircuitProducer circuit_producer(NUM_APP_CIRCUITS); - const size_t NUM_CIRCUITS = circuit_producer.total_num_circuits; - Chonk ivc{ NUM_CIRCUITS }; TestSettings settings{ .log2_num_gates = SMALL_LOG_2_NUM_GATES }; - - for (size_t idx = 0; idx < NUM_CIRCUITS; ++idx) { - auto [circuit, vk] = circuit_producer.create_next_circuit_and_vk(ivc, settings); - ivc.accumulate(circuit, vk); - - // After accumulating 3 circuits (app, kernel, app), we have 2 proofs in the queue + auto [proof, vk] = run_ivc(/*num_app_circuits=*/2, settings, [field_to_tamper](Chonk& ivc, size_t idx) { if (idx == 2) { EXPECT_EQ(ivc.verification_queue.size(), 2); @@ -159,9 +168,12 @@ class ChonkTests : public ::testing::Test { // Tamper with the specified field switch (field_to_tamper) { case KernelIOField::PAIRING_INPUTS: { - // Replace with a different valid pairing: P0 = G1, P1 = -G1 satisfies e(G1,[1])·e(-G1,[x]) != 1 - // so instead use P0 + random offset to break binding without breaking the pairing trivially - kernel_io.pairing_inputs.P0() = kernel_io.pairing_inputs.P0() + Commitment::one(); + // Set P0 to [x]₁ (the first SRS point after [1]) and P1 to [1]₁ + kernel_io.pairing_inputs.P0() = + srs::get_crs_factory()->get_crs(2)->get_monomial_points()[1]; + kernel_io.pairing_inputs.P1() = -Commitment::one(); + + EXPECT_TRUE(kernel_io.pairing_inputs.check()); break; } case KernelIOField::ACCUMULATOR_HASH: @@ -171,19 +183,17 @@ class ChonkTests : public ::testing::Test { kernel_io.kernel_return_data = kernel_io.kernel_return_data + Commitment::one(); break; case KernelIOField::APP_RETURN_DATA: - kernel_io.app_return_data = kernel_io.app_return_data + Commitment::one(); + kernel_io.app_return_data[0] = kernel_io.app_return_data[0] + Commitment::one(); break; - case KernelIOField::ECC_OP_TABLES: - kernel_io.ecc_op_tables[0] = kernel_io.ecc_op_tables[0] + Commitment::one(); + case KernelIOField::ECC_OP_HASH: + kernel_io.ecc_op_hash += FF(1); break; } kernel_io.to_proof(kernel_entry.proof, num_public_inputs); } - } - - auto proof = ivc.prove(); - EXPECT_FALSE(verify_chonk(proof, ivc.get_hiding_kernel_vk_and_hash())); + }); + EXPECT_FALSE(verify_chonk(proof, vk)); } /** @@ -202,33 +212,27 @@ class ChonkTests : public ::testing::Test { using KernelIOSerde = bb::stdlib::recursion::honk::KernelIOSerde; const size_t NUM_APP_CIRCUITS = 2; - CircuitProducer circuit_producer(NUM_APP_CIRCUITS); - const size_t NUM_CIRCUITS = circuit_producer.total_num_circuits; - Chonk ivc{ NUM_CIRCUITS }; + const size_t NUM_TOTAL_CIRCUITS = NUM_APP_CIRCUITS * 2 + /*num_trailing_kernels*/ 3; TestSettings settings{ .log2_num_gates = SMALL_LOG_2_NUM_GATES }; - // Extract tail kernel IO before the last accumulation consumes the verification queue. - // The tail kernel (HN_FINAL) uses KernelIO format; the hiding kernel uses HidingKernelIO. + // Extract tail kernel IO before the hiding kernel consumes the verification queue. KernelIOSerde tail_io; - for (size_t idx = 0; idx < NUM_CIRCUITS; ++idx) { - if (idx == NUM_CIRCUITS - 1) { - for (auto& it : std::ranges::reverse_view(ivc.verification_queue)) { - if (it.is_kernel) { - size_t num_public_inputs = it.honk_vk->num_public_inputs; - ASSERT_EQ(num_public_inputs, KernelIOSerde::PUBLIC_INPUTS_SIZE) - << "Tail kernel should use KernelIO format"; - ASSERT_GT(it.proof.size(), num_public_inputs) << "Tail kernel proof too small"; - tail_io = KernelIOSerde::from_proof(it.proof, num_public_inputs); - break; + auto [proof, vk_and_hash] = + run_ivc(/*num_app_circuits=*/NUM_APP_CIRCUITS, settings, [&tail_io](Chonk& ivc, size_t idx) { + // With 2 apps the layout is [app, kernel, app, kernel, reset, tail, hiding]. + if (idx == NUM_TOTAL_CIRCUITS - 2) { + for (auto& it : std::ranges::reverse_view(ivc.verification_queue)) { + if (it.is_kernel) { + size_t num_public_inputs = it.honk_vk->num_public_inputs; + ASSERT_EQ(num_public_inputs, KernelIOSerde::PUBLIC_INPUTS_SIZE) + << "Tail kernel should use KernelIO format"; + ASSERT_GT(it.proof.size(), num_public_inputs) << "Tail kernel proof too small"; + tail_io = KernelIOSerde::from_proof(it.proof, num_public_inputs); + break; + } } } - } - auto [circuit, vk] = circuit_producer.create_next_circuit_and_vk(ivc, settings); - ivc.accumulate(circuit, vk); - } - - auto proof = ivc.prove(); - auto vk_and_hash = ivc.get_hiding_kernel_vk_and_hash(); + }); size_t hiding_kernel_pub_inputs = vk_and_hash->vk->num_public_inputs; ASSERT_EQ(hiding_kernel_pub_inputs, HidingKernelIOSerde::PUBLIC_INPUTS_SIZE) @@ -240,13 +244,32 @@ class ChonkTests : public ::testing::Test { << "kernel_return_data mismatch: Tail has " << tail_io.kernel_return_data << " but HidingKernel has " << hiding_io.kernel_return_data; } + + private: + static std::pair> run_ivc_impl( + CircuitProducer& circuit_producer, + TestSettings settings, + const AccumulateHook& post_hook, + bool check_circuit_sizes) + { + const size_t num_circuits = circuit_producer.total_num_circuits; + Chonk ivc{ num_circuits }; + + for (size_t idx = 0; idx < num_circuits; ++idx) { + circuit_producer.construct_and_accumulate_next_circuit(ivc, settings, check_circuit_sizes); + if (post_hook) { + post_hook(ivc, idx); + } + } + return { ivc.prove(), ivc.get_hiding_kernel_vk_and_hash() }; + } }; /** * @brief Test sizes of the circuits generated by MockCircuitProducer * * @details The sizes of the circuits depends on the TestSettings: - * - No settings: first app is 2^19, all other apps are 2^17, all the kernels are 2^18 + * - No settings: first app is 2^19, all other apps are 2^17, init kernel is 2^17, inner kernels are 2^18 * - Settings: apps are 2^(log2_num_gates + 2), all kernels are smaller than 2^19 */ TEST_F(ChonkTests, TestCircuitSizes) @@ -405,7 +428,7 @@ HEAVY_TEST(ChonkKernelCapacity, MaxCapacityPassing) { bb::srs::init_file_crs_factory(bb::srs::bb_crs_path()); - const size_t NUM_APP_CIRCUITS = 21; + const size_t NUM_APP_CIRCUITS = (CHONK_MAX_NUM_CIRCUITS - /*trailing kernels*/ 3) / 2; auto [proof, vk] = ChonkTests::accumulate_and_prove_ivc(NUM_APP_CIRCUITS); bool verified = ChonkTests::verify_chonk(proof, vk); @@ -455,15 +478,7 @@ TEST_F(ChonkTests, MsgpackProofFromFileOrBuffer) } }; -/** - * @brief Test that tampering with kernel pairing inputs causes verification to fail - * @details Pairing points (P0, P1) accumulate across the IVC chain through aggregation. - * Even if we replace them with pairing points satisfying pairing check, the public input binding should must catch it. - */ -TEST_F(ChonkTests, KernelPairingInputsTamperingFailure) -{ - ChonkTests::test_kernel_io_tampering(KernelIOField::PAIRING_INPUTS); -} +class KernelIOTamperingTests : public ChonkTests, public testing::WithParamInterface {}; /** * @brief Test that tampering with app pairing inputs causes verification to fail @@ -475,46 +490,33 @@ TEST_F(ChonkTests, AppPairingInputsTamperingFailure) ChonkTests::test_app_io_tampering(); } -/** - * @brief Verify that tampering with the accumulator hash in public inputs causes IVC verification failure - * @details Each kernel outputs `output_hn_accum_hash` as a public input. The next kernel computes the hash of its - * input accumulator and compares it with the hash from the previous kernel's public inputs via assert_equal. - * This test tampers with the hash to verify the binding. - */ -TEST_F(ChonkTests, AccumulatorHashTamperingFailure) -{ - ChonkTests::test_kernel_io_tampering(KernelIOField::ACCUMULATOR_HASH); -} - -/** - * @brief Test that tampering with kernel_return_data causes verification to fail - * @details kernel_return_data is the commitment to the kernel's return data which must match - * the calldata commitment of the next circuit. Tampering should cause databus consistency check to fail. - */ -TEST_F(ChonkTests, KernelReturnDataTamperingFailure) +TEST_P(KernelIOTamperingTests, CausesVerificationFailure) { - ChonkTests::test_kernel_io_tampering(KernelIOField::KERNEL_RETURN_DATA); + test_kernel_io_tampering(GetParam()); } -/** - * @brief Test that tampering with app_return_data causes verification to fail - * @details app_return_data is the commitment to the app's return data which must match - * the secondary_calldata commitment of the next circuit. - */ -TEST_F(ChonkTests, AppReturnDataTamperingFailure) -{ - ChonkTests::test_kernel_io_tampering(KernelIOField::APP_RETURN_DATA); -} - -/** - * @brief Test that tampering with ecc_op_tables causes verification to fail - * @details ecc_op_tables contains commitments to merged ECC operation tables (T_prev). - * Tampering causes the recursive merge verification to fail. - */ -TEST_F(ChonkTests, EccOpTablesTamperingFailure) -{ - ChonkTests::test_kernel_io_tampering(KernelIOField::ECC_OP_TABLES); -} +INSTANTIATE_TEST_SUITE_P(All, + KernelIOTamperingTests, + testing::Values(KernelIOField::PAIRING_INPUTS, + KernelIOField::ACCUMULATOR_HASH, + KernelIOField::KERNEL_RETURN_DATA, + KernelIOField::APP_RETURN_DATA, + KernelIOField::ECC_OP_HASH), + [](const testing::TestParamInfo& info) { + switch (info.param) { + case KernelIOField::PAIRING_INPUTS: + return "PairingInputs"; + case KernelIOField::ACCUMULATOR_HASH: + return "AccumulatorHash"; + case KernelIOField::KERNEL_RETURN_DATA: + return "KernelReturnData"; + case KernelIOField::APP_RETURN_DATA: + return "AppReturnData"; + case KernelIOField::ECC_OP_HASH: + return "EccOpHash"; + } + return "Unknown"; + }); /** * @brief Test that kernel_return_data is consistently propagated from Tail kernel to HidingKernel proof diff --git a/barretenberg/cpp/src/barretenberg/chonk/chonk_transcript_invariants.test.cpp b/barretenberg/cpp/src/barretenberg/chonk/chonk_transcript_invariants.test.cpp index 5a5b50c4aaa3..d4e07c6def0f 100644 --- a/barretenberg/cpp/src/barretenberg/chonk/chonk_transcript_invariants.test.cpp +++ b/barretenberg/cpp/src/barretenberg/chonk/chonk_transcript_invariants.test.cpp @@ -62,22 +62,29 @@ class ChonkTranscriptInvariantTests : public ::testing::Test { * * Per-circuit transcript breakdown (from complete_kernel_circuit_logic): * - App circuits (0, 2): 0 transcripts - use native HN folding prover - * - Non-hiding kernels (1, 3, 4, 5): 3 transcripts each: - * 1. accumulation_recursive_transcript - shared across HN/Merge recursive verification + * - Init kernel (1): 2 transcripts: + * 1. accumulation_recursive_transcript + * 2. hash_transcript - for computing accumulator hash to propagate in public inputs + * - Intermediate kernel (3): 3 transcripts: + * 1. accumulation_recursive_transcript - shared across recursive verification * 2. PairingPoints::aggregate_multiple - for batching pairing points with Fiat-Shamir separator * 3. hash_transcript - for computing accumulator hash to propagate in public inputs - * - Hiding kernel (6): 2 transcripts (no hash_transcript since it doesn't propagate an accumulator): + * - Reset and tail kernels (4, 5): 2 transcripts each: + * 1. accumulation_recursive_transcript + * 2. hash_transcript - for computing accumulator hash to propagate in public inputs + * - Hiding kernel (6): 3 transcripts: * 1. accumulation_recursive_transcript - * 2. PairingPoints::aggregate_multiple + * 2. batch_merge_transcript - for final batch merge verification + * 3. PairingPoints::aggregate_multiple * - * Total: 0 + 3 + 0 + 3 + 3 + 3 + 2 = 14 transcripts + * Total: 0 + 2 + 0 + 3 + 2 + 2 + 3 = 12 transcripts */ TEST_F(ChonkTranscriptInvariantTests, AccumulationTranscriptCount) { // Pinned expected transcript count for 2 app circuits - constexpr size_t EXPECTED_TOTAL_TRANSCRIPTS = 14; + constexpr size_t EXPECTED_TOTAL_TRANSCRIPTS = 12; constexpr size_t EXPECTED_NUM_CIRCUITS = 7; - constexpr std::array EXPECTED_CIRCUIT_TRANSCRIPTS = { 0, 3, 0, 3, 3, 3, 2 }; + constexpr std::array EXPECTED_CIRCUIT_TRANSCRIPTS = { 0, 2, 0, 3, 2, 2, 3 }; // Record transcript index before IVC size_t index_before_ivc = bb::unique_transcript_index.load(); diff --git a/barretenberg/cpp/src/barretenberg/chonk/chonk_verifier.cpp b/barretenberg/cpp/src/barretenberg/chonk/chonk_verifier.cpp index abc9d380747f..4a0664087ca1 100644 --- a/barretenberg/cpp/src/barretenberg/chonk/chonk_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/chonk/chonk_verifier.cpp @@ -42,9 +42,9 @@ template <> ChonkVerifier::IPAReductionResult ChonkVerifier::reduc } // Step 2: Databus consistency check - const Commitment calldata_commitment = oink_result.calldata_commitment; + const Commitment kernel_calldata_commitment = oink_result.kernel_calldata_commitment; const Commitment return_data_commitment = kernel_io.kernel_return_data; - bool databus_consistency_verified = (calldata_commitment == return_data_commitment); + bool databus_consistency_verified = (kernel_calldata_commitment == return_data_commitment); vinfo("ChonkVerifier: databus consistency verified: ", databus_consistency_verified); if (!databus_consistency_verified) { info("ChonkVerifier: verification failed at databus consistency check"); @@ -54,7 +54,7 @@ template <> ChonkVerifier::IPAReductionResult ChonkVerifier::reduc // Step 3: Merge verification MergeCommitments merge_commitments{ .t_commitments = oink_result.ecc_op_wires, .T_prev_commitments = kernel_io.ecc_op_tables }; - GoblinVerifier::MergeVerifier merge_verifier{ MergeSettings::APPEND, transcript }; + GoblinVerifier::MergeVerifier merge_verifier{ transcript }; auto merge_result = merge_verifier.reduce_to_pairing_check(proof.merge_proof, merge_commitments); vinfo("ChonkVerifier: Merge reduced to pairing check: ", merge_result.reduction_succeeded ? "true" : "false"); @@ -146,16 +146,16 @@ template <> ChonkVerifier::Output ChonkVerifier::verify(const Proof& kernel_io.reconstruct_from_public(oink_result.public_inputs); // Step 2: Databus consistency check (in-circuit) - const Commitment calldata_commitment = oink_result.calldata_commitment; - if (kernel_io.kernel_return_data.get_value() != calldata_commitment.get_value()) { + const Commitment kernel_calldata_commitment = oink_result.kernel_calldata_commitment; + if (kernel_io.kernel_return_data.get_value() != kernel_calldata_commitment.get_value()) { info("ChonkRecursiveVerifier: Databus Consistency check failure"); } - kernel_io.kernel_return_data.incomplete_assert_equal(calldata_commitment); + kernel_io.kernel_return_data.incomplete_assert_equal(kernel_calldata_commitment); // Step 3: Merge verification MergeCommitments merge_commitments{ .t_commitments = oink_result.ecc_op_wires, .T_prev_commitments = kernel_io.ecc_op_tables }; - typename GoblinVerifier::MergeVerifier merge_verifier{ MergeSettings::APPEND, transcript }; + typename GoblinVerifier::MergeVerifier merge_verifier{ transcript }; auto merge_result = merge_verifier.reduce_to_pairing_check(proof.merge_proof, merge_commitments); vinfo("ChonkRecursiveVerifier: Merge reduced to pairing check: ", merge_result.reduction_succeeded ? "true" : "false"); diff --git a/barretenberg/cpp/src/barretenberg/chonk/mock_circuit_producer.hpp b/barretenberg/cpp/src/barretenberg/chonk/mock_circuit_producer.hpp index ca14ddb4ff0e..08f4a2d3cb6d 100644 --- a/barretenberg/cpp/src/barretenberg/chonk/mock_circuit_producer.hpp +++ b/barretenberg/cpp/src/barretenberg/chonk/mock_circuit_producer.hpp @@ -13,8 +13,8 @@ namespace { /** * @brief Test utility for coordinating passing of databus data between mocked private function execution circuits * @details Facilitates testing of the databus consistency checks that establish the correct passing of databus data - * between circuits. Generates arbitrary return data for each app/kernel. Sets the kernel calldata and - * secondary_calldata based respectively on the previous kernel return data and app return data. + * between circuits. Generates arbitrary return data for each app/kernel. Sets the kernel calldata and app calldata + * columns based respectively on the previous kernel return data and each app return data. */ class MockDatabusProducer { private: @@ -24,7 +24,7 @@ class MockDatabusProducer { using BusDataArray = std::vector; static constexpr size_t BUS_ARRAY_SIZE = 3; // arbitrary length of mock bus inputs - BusDataArray app_return_data; + std::array app_return_data; BusDataArray kernel_return_data; FF dummy_return_val = 1; // use simple return val for easier test debugging @@ -41,31 +41,38 @@ class MockDatabusProducer { public: /** - * @brief Update the app return data and populate it in the app circuit + * @brief Update the next app return data and populate it in the app circuit. App slots are processed in order. */ void populate_app_databus(ClientCircuit& circuit) { - app_return_data = generate_random_bus_array(); - for (auto& val : app_return_data) { - circuit.add_public_return_data(circuit.add_variable(val)); + for (auto& app_data : app_return_data) { + if (app_data.empty()) { + app_data = generate_random_bus_array(); + for (auto& val : app_data) { + circuit.add_public_return_data(circuit.add_variable(val)); + } + return; + } } }; /** - * @brief Populate the calldata and secondary calldata in the kernel from respectively the previous kernel and app - * return data. Update and populate the return data for the present kernel. + * @brief Populate the kernel calldata and app calldata columns from respectively the previous kernel and app return + * data. Update and populate the return data for the present kernel. */ void populate_kernel_databus(ClientCircuit& circuit) { - // Populate calldata from previous kernel return data (if it exists) + // Populate kernel calldata from previous kernel return data (if it exists) for (auto& val : kernel_return_data) { - circuit.add_public_calldata(circuit.add_variable(val)); + circuit.add_public_calldata(BusId::KERNEL_CALLDATA, circuit.add_variable(val)); } - // Populate secondary_calldata from app return data (if it exists), then clear the app return data - for (auto& val : app_return_data) { - circuit.add_public_secondary_calldata(circuit.add_variable(val)); + // Populate app calldata from app return data (if it exists), then clear the app return data + for (size_t idx = 0; idx < app_return_data.size(); ++idx) { + for (auto& val : app_return_data[idx]) { + circuit.add_public_calldata(static_cast(idx + 1), circuit.add_variable(val)); + } + app_return_data[idx].clear(); } - app_return_data.clear(); // Mock the return data for the present kernel circuit kernel_return_data = generate_random_bus_array(); @@ -73,12 +80,6 @@ class MockDatabusProducer { circuit.add_public_return_data(circuit.add_variable(val)); } }; - - /** - * @brief Add an arbitrary value to the app return data. This leads to a descrepency between the values used by the - * app itself and the secondary_calldata values in the kernel that will be set based on these tampered values. - */ - void tamper_with_app_return_data() { app_return_data.emplace_back(17); } }; /** @@ -133,6 +134,18 @@ class PrivateFunctionExecutionMockCircuitProducer { } } + PrivateFunctionExecutionMockCircuitProducer(std::vector leading_is_kernel_flags, bool large_first_app = false) + : is_kernel_flags(std::move(leading_is_kernel_flags)) + , large_first_app(large_first_app) + { + BB_ASSERT(!is_kernel_flags.empty(), "Mock circuit layout must contain at least one leading circuit"); + BB_ASSERT_EQ(is_kernel_flags[0], false, "Mock circuit layout must start with an app circuit"); + for (size_t i = 0; i < NUM_TRAILING_KERNELS; ++i) { + is_kernel_flags.emplace_back(true); + } + total_num_circuits = is_kernel_flags.size(); + } + /** * @brief Precompute the verification key for the given circuit. */ @@ -225,12 +238,14 @@ class PrivateFunctionExecutionMockCircuitProducer { if (is_trailing_kernel) { // Trailing kernels should be significantly smaller, with hiding kernel < 2^16 BB_ASSERT_LTE(log2_dyadic_size, - 16UL, + 17UL, "Trailing kernel circuit size has exceeded expected bound (should be <= 2^16)."); vinfo("Log number of gates in a trailing kernel circuit is: ", log2_dyadic_size); } else { + const bool is_init_kernel = circuit_counter == 2; + const size_t expected_log2_dyadic_size = is_init_kernel ? 17UL : 18UL; BB_ASSERT_EQ(log2_dyadic_size, - 18UL, + expected_log2_dyadic_size, "There has been a change in the number of gates of a mock kernel circuit."); } } else { @@ -264,11 +279,6 @@ class PrivateFunctionExecutionMockCircuitProducer { auto [circuit, vk] = create_next_circuit_and_vk(ivc, settings, check_circuit_sizes); ivc.accumulate(circuit, vk); } - - /** - * @brief Tamper with databus data to facilitate failure testing - */ - void tamper_with_databus() { mock_databus.tamper_with_app_return_data(); } }; } // namespace diff --git a/barretenberg/cpp/src/barretenberg/chonk/test_bench_shared.hpp b/barretenberg/cpp/src/barretenberg/chonk/test_bench_shared.hpp index 233ac460ac71..9d79e8e3dffc 100644 --- a/barretenberg/cpp/src/barretenberg/chonk/test_bench_shared.hpp +++ b/barretenberg/cpp/src/barretenberg/chonk/test_bench_shared.hpp @@ -7,15 +7,9 @@ namespace bb { -/** - * @brief Perform a specified number of circuit accumulation rounds - * - * @param NUM_CIRCUITS Number of circuits to accumulate (apps + kernels) - */ std::pair> accumulate_and_prove_with_precomputed_vks( - size_t num_app_circuits, auto& precomputed_vks, const bool large_first_app = true) + PrivateFunctionExecutionMockCircuitProducer& circuit_producer, auto& precomputed_vks) { - PrivateFunctionExecutionMockCircuitProducer circuit_producer(num_app_circuits, large_first_app); const size_t NUM_CIRCUITS = circuit_producer.total_num_circuits; Chonk ivc{ NUM_CIRCUITS }; @@ -33,11 +27,28 @@ std::pair> accumulate_and_p return { ivc.prove(), ivc.get_hiding_kernel_vk_and_hash() }; } -std::vector> precompute_vks(const size_t num_app_circuits, - const bool large_first_app = true) +/** + * @brief Perform a specified number of circuit accumulation rounds + * + * @param num_app_circuits Number of app circuits to accumulate + */ +std::pair> accumulate_and_prove_with_precomputed_vks( + size_t num_app_circuits, auto& precomputed_vks, const bool large_first_app = true) +{ + PrivateFunctionExecutionMockCircuitProducer circuit_producer(num_app_circuits, large_first_app); + return accumulate_and_prove_with_precomputed_vks(circuit_producer, precomputed_vks); +} + +std::pair> accumulate_and_prove_with_precomputed_vks( + std::vector leading_is_kernel_flags, auto& precomputed_vks, const bool large_first_app = false) +{ + PrivateFunctionExecutionMockCircuitProducer circuit_producer(std::move(leading_is_kernel_flags), large_first_app); + return accumulate_and_prove_with_precomputed_vks(circuit_producer, precomputed_vks); +} + +std::vector> precompute_vks( + PrivateFunctionExecutionMockCircuitProducer& circuit_producer) { - using CircuitProducer = PrivateFunctionExecutionMockCircuitProducer; - CircuitProducer circuit_producer(num_app_circuits, large_first_app); const size_t NUM_CIRCUITS = circuit_producer.total_num_circuits; Chonk ivc{ NUM_CIRCUITS }; @@ -46,7 +57,7 @@ std::vector> precompute_vk auto circuit = circuit_producer.create_next_circuit(ivc); const bool is_hiding_kernel = (j == NUM_CIRCUITS - 1); - auto vk = CircuitProducer::get_verification_key(circuit, is_hiding_kernel); + auto vk = PrivateFunctionExecutionMockCircuitProducer::get_verification_key(circuit, is_hiding_kernel); vkeys.push_back(vk); ivc.accumulate(circuit, vk); } @@ -54,4 +65,18 @@ std::vector> precompute_vk return vkeys; } +std::vector> precompute_vks(const size_t num_app_circuits, + const bool large_first_app = true) +{ + PrivateFunctionExecutionMockCircuitProducer circuit_producer(num_app_circuits, large_first_app); + return precompute_vks(circuit_producer); +} + +std::vector> precompute_vks( + std::vector leading_is_kernel_flags, const bool large_first_app = false) +{ + PrivateFunctionExecutionMockCircuitProducer circuit_producer(std::move(leading_is_kernel_flags), large_first_app); + return precompute_vks(circuit_producer); +} + } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/circuit_checker/mega_circuit_builder.test.cpp b/barretenberg/cpp/src/barretenberg/circuit_checker/mega_circuit_builder.test.cpp index 05dd9f15a08e..e923638d315e 100644 --- a/barretenberg/cpp/src/barretenberg/circuit_checker/mega_circuit_builder.test.cpp +++ b/barretenberg/cpp/src/barretenberg/circuit_checker/mega_circuit_builder.test.cpp @@ -165,8 +165,8 @@ TEST(MegaCircuitBuilder, GoblinEccOpQueueUltraOps) /** * @brief Check that the selector partitioning is correct for the mega circuit builder - * @details We check that for the arithmetic, delta_range, elliptic, memory, nnf, lookup, busread, poseidon2_external, - * poseidon2_internal blocks, and the other selectors are zero on that block. + * @details We check that for the arithmetic, delta_range, elliptic, memory, nnf, lookup, busread, poseidon2_external + * blocks, and the other selectors are zero on that block. */ TEST(MegaCircuitBuilder, CompleteSelectorPartitioningCheck) { @@ -203,8 +203,12 @@ TEST(MegaCircuitBuilder, CompleteSelectorPartitioningCheck) if (&block != &builder.blocks.poseidon2_external) { EXPECT_EQ(block.q_poseidon2_external()[i], 0); } - if (&block != &builder.blocks.poseidon2_internal) { - EXPECT_EQ(block.q_poseidon2_internal()[i], 0); + // The Mega compressed Poseidon2 relations are selected only inside `poseidon2_quad_internal`; + // all three selectors must be zero on every other block. + if (&block != &builder.blocks.poseidon2_quad_internal) { + EXPECT_EQ(block.q_poseidon2_quad_internal()[i], 0); + EXPECT_EQ(block.q_poseidon2_quad_internal_terminal()[i], 0); + EXPECT_EQ(block.q_poseidon2_transition_entry()[i], 0); } } } @@ -272,9 +276,8 @@ TEST(MegaCircuitBuilder, EmptyCircuitFinalization) EXPECT_EQ(builder.blocks.memory.size(), 0); EXPECT_EQ(builder.blocks.nnf.size(), 0); EXPECT_EQ(builder.blocks.poseidon2_external.size(), 0); - EXPECT_EQ(builder.blocks.poseidon2_internal.size(), 0); - EXPECT_EQ(builder.get_calldata().size(), 0); - EXPECT_EQ(builder.get_secondary_calldata().size(), 0); + EXPECT_EQ(builder.get_calldata(BusId::KERNEL_CALLDATA).size(), 0); + EXPECT_EQ(builder.get_calldata(BusId::APP_CALLDATA).size(), 0); EXPECT_EQ(builder.get_return_data().size(), 0); EXPECT_TRUE(CircuitChecker::check(builder)); @@ -289,13 +292,13 @@ TEST(MegaCircuitBuilder, DatabusOutOfBoundsReadFails) // Add single entry to calldata auto val = builder.add_variable(fr(42)); - builder.add_public_calldata(val); + builder.add_public_calldata(BusId::KERNEL_CALLDATA, val); // Try to read at index 1 (out of bounds - only index 0 exists) auto bad_idx = builder.add_variable(fr(1)); // This should trigger an assertion in read_calldata - EXPECT_THROW(builder.read_calldata(bad_idx), std::runtime_error); + EXPECT_THROW(builder.read_calldata(BusId::KERNEL_CALLDATA, bad_idx), std::runtime_error); } } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/circuit_checker/ultra_circuit_checker.cpp b/barretenberg/cpp/src/barretenberg/circuit_checker/ultra_circuit_checker.cpp index 30b9c10e5f18..71aaa07a188c 100644 --- a/barretenberg/cpp/src/barretenberg/circuit_checker/ultra_circuit_checker.cpp +++ b/barretenberg/cpp/src/barretenberg/circuit_checker/ultra_circuit_checker.cpp @@ -175,9 +175,13 @@ bool UltraCircuitChecker::check_block(Builder& builder, if (!result) { return report_fail("Failed Lookup check relation at row idx = ", idx); } - result = result && check_relation(values, params); - if (!result) { - return report_fail("Failed PoseidonInternal relation at row idx = ", idx); + if constexpr (!IsMegaBuilder) { + // Mega covers all internal rounds via the compressed block; there is no + // q_poseidon2_internal selector in MegaFlavor. + result = result && check_relation(values, params); + if (!result) { + return report_fail("Failed PoseidonInternal relation at row idx = ", idx); + } } result = result && check_relation(values, params); if (!result) { @@ -185,6 +189,22 @@ bool UltraCircuitChecker::check_block(Builder& builder, } if constexpr (IsMegaBuilder) { + result = result && check_relation(values, params); + if (!result) { + return report_fail("Failed PoseidonInitialExternal relation at row idx = ", idx); + } + result = result && check_relation(values, params); + if (!result) { + return report_fail("Failed PoseidonQuadInternal relation at row idx = ", idx); + } + result = result && check_relation(values, params); + if (!result) { + return report_fail("Failed PoseidonQuadInternalTerminal relation at row idx = ", idx); + } + result = result && check_relation(values, params); + if (!result) { + return report_fail("Failed PoseidonTransitionEntry relation at row idx = ", idx); + } result = result && check_databus_read(values, builder); if (!result) { return report_fail("Failed databus read at row idx = ", idx); @@ -242,7 +262,9 @@ template bool UltraCircuitChecker::check_databus_read(auto& v auto value = values.w_l; // Map bus_idx → wire-linear selector on the values struct (mirrors BusData::selector in the relation). - const std::array bus_selectors{ &values.q_l, &values.q_r, &values.q_o }; + const std::array bus_selectors{ + &values.q_l, &values.q_r, &values.q_o, &values.q_4, &values.q_m + }; // Locate the bus column being read (exactly one selector should be active on a busread row) and look up the // expected value from the builder's bus vector. @@ -360,10 +382,16 @@ void UltraCircuitChecker::populate_values( values.q_memory = block.q_memory()[idx]; values.q_nnf = block.q_nnf()[idx]; values.q_lookup = block.q_lookup()[idx]; - values.q_poseidon2_internal = block.q_poseidon2_internal()[idx]; values.q_poseidon2_external = block.q_poseidon2_external()[idx]; if constexpr (IsMegaBuilder) { + values.q_5 = block.q_5()[idx]; + values.q_poseidon2_external_initial = block.q_poseidon2_external_initial()[idx]; + values.q_poseidon2_quad_internal = block.q_poseidon2_quad_internal()[idx]; + values.q_poseidon2_quad_internal_terminal = block.q_poseidon2_quad_internal_terminal()[idx]; + values.q_poseidon2_transition_entry = block.q_poseidon2_transition_entry()[idx]; values.q_busread = block.q_busread()[idx]; + } else { + values.q_poseidon2_internal = block.q_poseidon2_internal()[idx]; } } diff --git a/barretenberg/cpp/src/barretenberg/circuit_checker/ultra_circuit_checker.hpp b/barretenberg/cpp/src/barretenberg/circuit_checker/ultra_circuit_checker.hpp index 43ef54a47ecc..ed59806ccabd 100644 --- a/barretenberg/cpp/src/barretenberg/circuit_checker/ultra_circuit_checker.hpp +++ b/barretenberg/cpp/src/barretenberg/circuit_checker/ultra_circuit_checker.hpp @@ -6,7 +6,11 @@ #include "barretenberg/relations/memory_relation.hpp" #include "barretenberg/relations/non_native_field_relation.hpp" #include "barretenberg/relations/poseidon2_external_relation.hpp" +#include "barretenberg/relations/poseidon2_initial_external_relation.hpp" #include "barretenberg/relations/poseidon2_internal_relation.hpp" +#include "barretenberg/relations/poseidon2_quad_internal_relation.hpp" +#include "barretenberg/relations/poseidon2_quad_internal_terminal_relation.hpp" +#include "barretenberg/relations/poseidon2_transition_entry_relation.hpp" #include "barretenberg/relations/relation_parameters.hpp" #include "barretenberg/relations/ultra_arithmetic_relation.hpp" #include "barretenberg/stdlib_circuit_builders/ultra_circuit_builder.hpp" @@ -24,7 +28,11 @@ class UltraCircuitChecker { using NonNativeField = NonNativeFieldRelation; using DeltaRangeConstraint = DeltaRangeConstraintRelation; using PoseidonExternal = Poseidon2ExternalRelation; + using PoseidonInitialExternal = Poseidon2InitialExternalRelation; using PoseidonInternal = Poseidon2InternalRelation; + using PoseidonQuadInternal = Poseidon2QuadInternalRelation; + using PoseidonQuadInternalTerminal = Poseidon2QuadInternalTerminalRelation; + using PoseidonTransitionEntry = Poseidon2TransitionEntryRelation; using Params = RelationParameters; /** diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/gemini/gemini.hpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/gemini/gemini.hpp index 24479d52b138..0cc9972c85c2 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/gemini/gemini.hpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/gemini/gemini.hpp @@ -183,12 +183,21 @@ template class GeminiProver_ { BB_BENCH_NAME("compute_batched"); Fr running_scalar(1); - // Batch base polynomials; updates running_scalar in place + // Batch base polynomials via a single fused parallel_for over the destination range, + // amortising N× parallel_for startup overhead into 1×. Updates running_scalar in place. auto batch = [&](Polynomial& batched, const RefVector& polynomials_to_batch) { - for (auto& poly : polynomials_to_batch) { - batched.add_scaled(poly, running_scalar); + const size_t n = polynomials_to_batch.size(); + std::vector> sources; + std::vector scalars; + sources.reserve(n); + scalars.reserve(n); + for (size_t i = 0; i < n; ++i) { + sources.emplace_back(polynomials_to_batch[i]); + scalars.push_back(running_scalar); running_scalar *= challenge; } + add_scaled_batch( + batched, std::span>(sources), std::span(scalars)); }; // Batch tails into a small accumulator with the correct rho power per tail. diff --git a/barretenberg/cpp/src/barretenberg/constants.hpp b/barretenberg/cpp/src/barretenberg/constants.hpp index 405f690b9e26..1a6d03a3c0f5 100644 --- a/barretenberg/cpp/src/barretenberg/constants.hpp +++ b/barretenberg/cpp/src/barretenberg/constants.hpp @@ -61,4 +61,16 @@ static constexpr uint32_t NUM_TRANSLATION_EVALUATIONS = 5; // The number of leading zero rows in the execution trace. Used to enable shifted polynomials. static constexpr size_t NUM_ZERO_ROWS = 1; + +// The maximum number of app circuits a single kernel can recursively verify in one accumulation group. +static constexpr uint8_t MAX_APPS_PER_KERNEL = 3; + +static constexpr size_t CHONK_MAX_NUM_CIRCUITS = 48 + /*trailing kernels*/ 3; + +static constexpr size_t BATCH_MERGE_PROOF_SIZE = + /*num subtables*/ 1 + + /*shift sizes*/ CHONK_MAX_NUM_CIRCUITS + + /*commitments*/ (4 * (4 * (CHONK_MAX_NUM_CIRCUITS + /*zk tables, merged tables*/ 2) + /*degree check*/ 1)) + + /*evals*/ (4 * (CHONK_MAX_NUM_CIRCUITS + 2) + 1) + + /*shplonk and kzg*/ 8; } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/crypto/poseidon2/README.md b/barretenberg/cpp/src/barretenberg/crypto/poseidon2/README.md index cf4d815d84f2..198223839525 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/poseidon2/README.md +++ b/barretenberg/cpp/src/barretenberg/crypto/poseidon2/README.md @@ -8,7 +8,7 @@ Poseidon2 hash function for BN254 scalar field. Reference: https://eprint.iacr.o Fast hashing outside circuits: Merkle trees, protocol operations, witness generation. ### Circuit (`stdlib/hash/poseidon2/`) -The circuit implementation uses **native Poseidon2 to compute witness values**, then records them into custom gates. The round function is not re-implemented with arithmetic gates—instead, custom gate relations (`Poseidon2ExternalRelation`, `Poseidon2InternalRelation`) verify the witnesses satisfy Poseidon2 equations. +The circuit implementation uses **native Poseidon2 to compute witness values**, then records them into custom gates. The round function is not re-implemented with arithmetic gates. The canonical circuit documentation, including the Mega K=4 quad-internal layout and soundness argument, lives in `stdlib/hash/poseidon2/README.md`. ### TypeScript (`yarn-project/foundation/src/crypto/poseidon/`) Client-side hashing in Node.js and browser via WASM. The TypeScript wrapper (`poseidon2Hash`, `poseidon2Permutation`) calls `c_bind.cpp` exports through `bb.js`. Used by sequencer, PXE, and wallet for computing hashes that must match on-chain verification. diff --git a/barretenberg/cpp/src/barretenberg/crypto/poseidon2/poseidon2_quad_closed_form.test.cpp b/barretenberg/cpp/src/barretenberg/crypto/poseidon2/poseidon2_quad_closed_form.test.cpp new file mode 100644 index 000000000000..78a6f04e5dc2 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/crypto/poseidon2/poseidon2_quad_closed_form.test.cpp @@ -0,0 +1,180 @@ +// Equivalence test: closed-form 4-round propagation vs. step iteration. +// +// The K=4 compressed Poseidon2 internal-round relation currently computes (out_0..out_3) from +// (w_l, w_r, w_o, w_4, q_l..q_4) via: +// 1) S-boxes u_k = (w_k + q_k)^5 for k = 0..3 +// 2) Vandermonde RHS b_1, b_2, b_3 (linear in w_r, w_o, w_4 and u_0, u_1, u_2) +// 3) Lagrange solve s_j^{(0)} = α_j^(1) b_1 + α_j^(2) b_2 + α_j^(3) b_3 +// 4) Four step iterations of the internal-MDS update on (s_1, s_2, s_3) +// 5) out_0 = D_1 u_3 + T_3, out_{1,2,3} = state[1..3] at round 4 +// +// Steps 2..5 are linear in (w_r, w_o, w_4, u_0..u_3) (everything is linear once the four S-boxes +// are taken as opaque inputs). They can be folded into a single linear map. This test verifies +// that the coefficient tables consumed by the relations agree with explicit step iteration on +// random inputs. + +#include "barretenberg/crypto/poseidon2/poseidon2_quad_params.hpp" +#include "barretenberg/ecc/curves/bn254/fr.hpp" + +#include +#include + +namespace { + +using FF = bb::fr; +using QuadParams = bb::crypto::Poseidon2QuadBn254Params; + +struct Out { + FF out_0, out_1, out_2, out_3; +}; + +// Reference: same body as Poseidon2QuadInternalRelationImpl::accumulate, evaluated +// in plain field arithmetic. +Out reference_step_iter(FF s1, FF s2, FF s3, FF u0, FF u1, FF u2, FF u3) +{ + auto step = [](FF& x1, FF& x2, FF& x3, const FF& u) { + FF sum = x1 + x2 + x3; + FF t = u + sum; + FF n1 = t + x1 * (QuadParams::D2 - FF(1)); + FF n2 = t + x2 * (QuadParams::D3 - FF(1)); + FF n3 = t + x3 * (QuadParams::D4 - FF(1)); + x1 = n1; + x2 = n2; + x3 = n3; + }; + step(s1, s2, s3, u0); + step(s1, s2, s3, u1); + step(s1, s2, s3, u2); + FF T_3 = s1 + s2 + s3; + FF out_0 = u3 * QuadParams::D1 + T_3; + step(s1, s2, s3, u3); + return { out_0, s1, s2, s3 }; +} + +Out closed_form(FF w_r, FF w_o, FF w_4, FF u0, FF u1, FF u2, FF u3) +{ + const auto& table = QuadParams::tables.closed_form; + auto eval = [&](size_t row) { + return table[row][0] * w_r + table[row][1] * w_o + table[row][2] * w_4 + table[row][3] * u0 + + table[row][4] * u1 + table[row][5] * u2 + table[row][6] * u3; + }; + return { eval(0), eval(1), eval(2), eval(3) }; +} + +// Reference path: derive (s_1^{(0)}, s_2^{(0)}, s_3^{(0)}) from (w_*, u_*) via the b_k formulas +// + Lagrange solve, then iterate the internal-MDS steps explicitly. +Out reference_from_wires(FF w_r, FF w_o, FF w_4, FF u0, FF u1, FF u2, FF u3) +{ + const FF D1 = QuadParams::D1; + const FF S = QuadParams::SIGMA; + FF b_1 = w_r - D1 * u0; + FF b_2 = -FF(2) * w_r + w_o + (FF(2) * D1 - FF(3)) * u0 - D1 * u1; + FF b_3 = -(S + FF(2)) * w_r - w_o + w_4 + ((S + FF(2)) * D1 - S - FF(3)) * u0 + (D1 - FF(3)) * u1 - D1 * u2; + FF s1 = QuadParams::alpha_1_1 * b_1 + QuadParams::alpha_1_2 * b_2 + QuadParams::alpha_1_3 * b_3; + FF s2 = QuadParams::alpha_2_1 * b_1 + QuadParams::alpha_2_2 * b_2 + QuadParams::alpha_2_3 * b_3; + FF s3 = QuadParams::alpha_3_1 * b_1 + QuadParams::alpha_3_2 * b_2 + QuadParams::alpha_3_3 * b_3; + return reference_step_iter(s1, s2, s3, u0, u1, u2, u3); +} + +} // namespace + +TEST(Poseidon2QuadClosedForm, ForwardVandermondeLhsMatchesWeightedSum) +{ + // Sanity: each forward-Vandermonde LHS row should equal the weighted sum of the + // out_1, out_2, out_3 closed-form rows. + const std::array, 3> weights = { + { { FF(1), FF(1), FF(1) }, + { QuadParams::D2, QuadParams::D3, QuadParams::D4 }, + { QuadParams::D2 * QuadParams::D2, QuadParams::D3 * QuadParams::D3, QuadParams::D4 * QuadParams::D4 } } + }; + for (size_t k = 0; k < 3; ++k) { + for (size_t i = 0; i < 7; ++i) { + FF expected = weights[k][0] * QuadParams::tables.closed_form[1][i] + + weights[k][1] * QuadParams::tables.closed_form[2][i] + + weights[k][2] * QuadParams::tables.closed_form[3][i]; + EXPECT_EQ(QuadParams::tables.forward_vandermonde_lhs[k][i], expected) << "row " << k << " col " << i; + } + } +} + +TEST(Poseidon2QuadClosedForm, MatchesStepIteration) +{ + for (int trial = 0; trial < 100; ++trial) { + FF w_r = FF::random_element(); + FF w_o = FF::random_element(); + FF w_4 = FF::random_element(); + FF u0 = FF::random_element(); + FF u1 = FF::random_element(); + FF u2 = FF::random_element(); + FF u3 = FF::random_element(); + + Out ref = reference_from_wires(w_r, w_o, w_4, u0, u1, u2, u3); + Out cf = closed_form(w_r, w_o, w_4, u0, u1, u2, u3); + + EXPECT_EQ(ref.out_0, cf.out_0) << "trial " << trial; + EXPECT_EQ(ref.out_1, cf.out_1) << "trial " << trial; + EXPECT_EQ(ref.out_2, cf.out_2) << "trial " << trial; + EXPECT_EQ(ref.out_3, cf.out_3) << "trial " << trial; + } +} + +// The terminal relation `Poseidon2QuadInternalTerminalRelationImpl::accumulate` exploits the +// fact that the U_3 coefficient of out_1, out_2, out_3 in the closed-form table is identically +// 1, allowing it to write `+ u_3` instead of `+ u_3 * C[k][6]` (lines 85-87 of +// poseidon2_quad_internal_terminal_relation.hpp). This invariant is hard-coded by `build_out_j` +// (`r[U_3] = FF(1)`); if a future refactor removes that line, the terminal relation becomes +// silently incorrect. This sentinel test pins the invariant. +TEST(Poseidon2QuadClosedForm, TerminalU3CoefIsOne) +{ + EXPECT_EQ(QuadParams::tables.closed_form[QuadParams::OUT_1][QuadParams::U_3], FF(1)); + EXPECT_EQ(QuadParams::tables.closed_form[QuadParams::OUT_2][QuadParams::U_3], FF(1)); + EXPECT_EQ(QuadParams::tables.closed_form[QuadParams::OUT_3][QuadParams::U_3], FF(1)); +} + +// Edge-case adversarial inputs. The 100 random trials in `MatchesStepIteration` are +// statistically sufficient (false-positive prob ~ 100/p), but they may miss bugs that only +// manifest at special field-arithmetic boundary values. This test exercises: +// - All-zero inputs (a fixed point of the linear dynamics). +// - 1 and -1 (smallest nonzero values). +// - Inputs producing u_k = 0 for various k (testing the "driver = 0" sub-cases). +// - p/2 and other large values (exercising any path that branches on representation). +TEST(Poseidon2QuadClosedForm, MatchesStepIterationOnEdgeCases) +{ + const FF zero = FF::zero(); + const FF one = FF(1); + const FF neg_one = -FF(1); + const FF p_half = -(one * (FF(2).invert())); // (-1)/2 = (p-1)/2 mod p + + struct Trial { + const char* name; + FF w_r, w_o, w_4, u0, u1, u2, u3; + }; + + const std::array trials = { { + { "all_zero", zero, zero, zero, zero, zero, zero, zero }, + { "all_one", one, one, one, one, one, one, one }, + { "all_neg_one", neg_one, neg_one, neg_one, neg_one, neg_one, neg_one, neg_one }, + { "p_half", p_half, p_half, p_half, p_half, p_half, p_half, p_half }, + { "u0_zero", one, one, one, zero, one, one, one }, + { "u3_zero", one, one, one, one, one, one, zero }, + { "all_u_zero", FF::random_element(), FF::random_element(), FF::random_element(), zero, zero, zero, zero }, + { "all_w_zero", + zero, + zero, + zero, + FF::random_element(), + FF::random_element(), + FF::random_element(), + FF::random_element() }, + } }; + + for (const auto& t : trials) { + Out ref = reference_from_wires(t.w_r, t.w_o, t.w_4, t.u0, t.u1, t.u2, t.u3); + Out cf = closed_form(t.w_r, t.w_o, t.w_4, t.u0, t.u1, t.u2, t.u3); + + EXPECT_EQ(ref.out_0, cf.out_0) << "trial " << t.name; + EXPECT_EQ(ref.out_1, cf.out_1) << "trial " << t.name; + EXPECT_EQ(ref.out_2, cf.out_2) << "trial " << t.name; + EXPECT_EQ(ref.out_3, cf.out_3) << "trial " << t.name; + } +} diff --git a/barretenberg/cpp/src/barretenberg/crypto/poseidon2/poseidon2_quad_params.hpp b/barretenberg/cpp/src/barretenberg/crypto/poseidon2/poseidon2_quad_params.hpp new file mode 100644 index 000000000000..6eebc963be1b --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/crypto/poseidon2/poseidon2_quad_params.hpp @@ -0,0 +1,313 @@ +// Derived parameters for the K=4 "quad" compressed Poseidon2 internal-round encoding on BN254. +// Treated like the base Poseidon2 constants: fixed, derivable from the sponge spec, pre-computed. +// +// See `barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/README.md` for the algebraic +// derivation. The short version: +// +// The compressed K=4 row stores state[0] at 4 consecutive internal rounds. Solving for the +// non-S-boxed elements (s_1, s_2, s_3) at row-start reduces (via row-reduction) to a 3x3 +// Vandermonde system with nodes (D_2, D_3, D_4). Its Lagrange-basis inverse has 9 fixed +// coefficients α_j^(k) that let us write s_j = Σ_k α_j^(k) b_k where b_k are linear in wires. +// +// This file exposes those 9 coefficients, the derived diagonal constants used by the entry +// relation, and the closed-form propagation tables consumed by the quad relations. +// +// Static assertions guard invertibility: the three Vandermonde differences (D_3 - D_2), +// (D_4 - D_2), (D_4 - D_3) must all be nonzero. + +#pragma once + +#include "barretenberg/crypto/poseidon2/poseidon2_params.hpp" +#include "barretenberg/ecc/curves/bn254/fr.hpp" + +#include +#include + +namespace bb::crypto { + +struct Poseidon2QuadBn254Params { + using FF = Poseidon2Bn254ScalarFieldParams::FF; + static constexpr size_t VANDERMONDE_SIZE = Poseidon2Bn254ScalarFieldParams::t - 1; + + // Internal matrix diagonal D_i (computed from the stored `D_i - 1` values). + static constexpr FF D1 = FF(1) + Poseidon2Bn254ScalarFieldParams::internal_matrix_diagonal_minus_one[0]; + static constexpr FF D2 = FF(1) + Poseidon2Bn254ScalarFieldParams::internal_matrix_diagonal_minus_one[1]; + static constexpr FF D3 = FF(1) + Poseidon2Bn254ScalarFieldParams::internal_matrix_diagonal_minus_one[2]; + static constexpr FF D4 = FF(1) + Poseidon2Bn254ScalarFieldParams::internal_matrix_diagonal_minus_one[3]; + + static constexpr FF SIGMA = D2 + D3 + D4; // Σ = D_2 + D_3 + D_4, recurs in the relation algebra + + private: + // Vandermonde differences (used below and also asserted non-zero). + static constexpr FF D2_minus_D3 = D2 - D3; + static constexpr FF D2_minus_D4 = D2 - D4; + static constexpr FF D3_minus_D4 = D3 - D4; + + // 1 / ((D_2 - D_3)(D_2 - D_4)) — denominator for α_1^(·) + static constexpr FF inv_denom_1 = (D2_minus_D3 * D2_minus_D4).invert(); + // 1 / ((D_3 - D_2)(D_3 - D_4)) — denominator for α_2^(·) + static constexpr FF inv_denom_2 = ((-D2_minus_D3) * D3_minus_D4).invert(); + // 1 / ((D_4 - D_2)(D_4 - D_3)) — denominator for α_3^(·) + static constexpr FF inv_denom_3 = ((-D2_minus_D4) * (-D3_minus_D4)).invert(); + + // Invertibility guard. det(V) = (D_3 - D_2)(D_4 - D_2)(D_4 - D_3). + static_assert(!D2_minus_D3.is_zero(), "Poseidon2 quad: D_2 == D_3, Vandermonde singular"); + static_assert(!D2_minus_D4.is_zero(), "Poseidon2 quad: D_2 == D_4, Vandermonde singular"); + static_assert(!D3_minus_D4.is_zero(), "Poseidon2 quad: D_3 == D_4, Vandermonde singular"); + + public: + // Lagrange basis coefficients α_j^(k). + // + // s_j = α_j^(1) * b_1 + α_j^(2) * b_2 + α_j^(3) * b_3 + // + // where b_k is the k-th right-hand side of the row-reduced Vandermonde system. These are + // the coefficients of the Lagrange polynomial at node D_{j+1} (taking nodes (D_2, D_3, D_4)): + // + // L_j(x) = α_j^(1) + α_j^(2) * x + α_j^(3) * x^2 + // = Π_{k ≠ j} (x - D_{k+1}) / (D_{j+1} - D_{k+1}) + // + // Concretely: + // α_1^(1) = D_3 * D_4 / ((D_2 - D_3)(D_2 - D_4)) + // α_1^(2) = -(D_3 + D_4) / ((D_2 - D_3)(D_2 - D_4)) + // α_1^(3) = 1 / ((D_2 - D_3)(D_2 - D_4)) + // (and analogously for α_2^(k), α_3^(k)) + // α_j^(1): constant term of L_j. + static constexpr FF alpha_1_1 = D3 * D4 * inv_denom_1; + static constexpr FF alpha_2_1 = D2 * D4 * inv_denom_2; + static constexpr FF alpha_3_1 = D2 * D3 * inv_denom_3; + + // α_j^(2): linear term (negated sum of other nodes, divided by the denominator) + static constexpr FF alpha_1_2 = -(D3 + D4) * inv_denom_1; + static constexpr FF alpha_2_2 = -(D2 + D4) * inv_denom_2; + static constexpr FF alpha_3_2 = -(D2 + D3) * inv_denom_3; + + // α_j^(3): quadratic term (pure reciprocal of the denominator) + static constexpr FF alpha_1_3 = inv_denom_1; + static constexpr FF alpha_2_3 = inv_denom_2; + static constexpr FF alpha_3_3 = inv_denom_3; + + // Closed-form 4-round propagation coefficients. + // + // The four-round internal-block update on the non-S-boxed lanes (s_1, s_2, s_3) is linear + // once the four S-boxed scalars u_k = (w_k + c_k)^5 are taken as opaque inputs: + // + // step(v, u) = A v + u · 1, A = [[D_2,1,1],[1,D_3,1],[1,1,D_4]] + // + // After 4 rounds with inputs u_0..u_3, the state-at-round-4 components (out_1, out_2, out_3) + // and the state-at-round-3 row-sum T_3 (used by out_0 = D_1 u_3 + T_3) are all fixed linear + // combinations of (w_r, w_o, w_4, u_0, u_1, u_2, u_3), where the (w_r, w_o, w_4)-dependence + // enters through s^{(0)} = V^{-1} b and b_k = linear(w_*, u_0..u_2). Composing A^4 V^{-1} + // with the b_k formulas gives the 28 constants below, one per (output, input) cell. + // + // Equivalence to the step iteration is verified in a unit test (see `poseidon2_quad_closed_form.test.cpp`). + // + // Linear round-propagation vectors (A^k · 1)_j for k = 1, 2. + // + // Used by both the entry relation (which checks state[0] at rounds 1, 2 from a standard + // encoded predecessor) and the closed-form table builder below. These simple scalar formulas + // remain constexpr. + // + // A_one[j] = (A · 1)_j = D_{j+1} + 2 + // A2_one[j] = (A^2 · 1)_j = D_{j+1}^2 + D_{j+1} + Σ + 4 + // sum_A_one = 1^T A · 1 = Σ + 6 (also = (A · 1) summed over rows) + static constexpr std::array A_one = { D2 + FF(2), D3 + FF(2), D4 + FF(2) }; + static constexpr std::array A2_one = { + D2 * D2 + D2 + SIGMA + FF(4), + D3* D3 + D3 + SIGMA + FF(4), + D4* D4 + D4 + SIGMA + FF(4), + }; + static constexpr FF sum_A_one = SIGMA + FF(6); + + // Closed-form coefficient table layout. Each row gives coefficients for the inputs + // (w_r, w_o, w_4, u_0, u_1, u_2, u_3), + // where u_k = (s_0^{(k)} + c_k)^5. + // + // closed_form[j] for j in {0,1,2,3}: coefficients of out_j, i.e. state[j] after four + // internal rounds. The terminal relation consumes all + // four rows; the interior relation consumes row 0. + // + // forward_vandermonde_lhs[k] for k in {0,1,2}: coefficients of the forward-Vandermonde + // combinations used by the interior relation: + // row 0 = out_1 + out_2 + out_3 + // row 1 = D_2 out_1 + D_3 out_2 + D_4 out_3 + // row 2 = D_2^2 out_1 + D_3^2 out_2 + D_4^2 out_3 + enum ClosedFormColumn : size_t { + W_R, + W_O, + W_4, + U_0, + U_1, + U_2, + U_3, + }; + enum ClosedFormOutput : size_t { + OUT_0, + OUT_1, + OUT_2, + OUT_3, + }; + static constexpr size_t CLOSED_FORM_INPUT_COUNT = VANDERMONDE_SIZE + Poseidon2Bn254ScalarFieldParams::t; + static_assert(CLOSED_FORM_INPUT_COUNT == U_3 + 1); + using ClosedFormRow = std::array; + using ClosedFormTable = std::array; + using ForwardVandermondeTable = std::array; + + private: + // Derive the coefficient tables once from the fixed Poseidon2 parameters. The relation code + // reads only the resulting `closed_form` and `forward_vandermonde_lhs` tables. + struct Tables { + ClosedFormTable closed_form; + ForwardVandermondeTable forward_vandermonde_lhs; + }; + + using Mat = std::array, VANDERMONDE_SIZE>; + using Vec = std::array; + + static constexpr Mat matrix_multiply(const Mat& a, const Mat& b) + { + Mat r{}; + for (size_t i = 0; i < VANDERMONDE_SIZE; ++i) { + for (size_t j = 0; j < VANDERMONDE_SIZE; ++j) { + FF s = FF(0); + for (size_t k = 0; k < VANDERMONDE_SIZE; ++k) { + s += a[i][k] * b[k][j]; + } + r[i][j] = s; + } + } + return r; + } + + static constexpr Vec matrix_vector_multiply(const Mat& a, const Vec& v) + { + Vec r{}; + for (size_t i = 0; i < VANDERMONDE_SIZE; ++i) { + FF s = FF(0); + for (size_t k = 0; k < VANDERMONDE_SIZE; ++k) { + s += a[i][k] * v[k]; + } + r[i] = s; + } + return r; + } + + static constexpr Vec vector_matrix_multiply(const Vec& v, const Mat& a) + { + Vec r{}; + for (size_t j = 0; j < VANDERMONDE_SIZE; ++j) { + FF s = FF(0); + for (size_t k = 0; k < VANDERMONDE_SIZE; ++k) { + s += v[k] * a[k][j]; + } + r[j] = s; + } + return r; + } + + static constexpr FF vector_sum(const Vec& v) + { + FF result = FF(0); + for (const auto& entry : v) { + result += entry; + } + return result; + } + + static constexpr ClosedFormRow weighted_closed_form_sum(const Vec& weights, const ClosedFormTable& table) + { + ClosedFormRow r{}; + for (size_t i = 0; i < CLOSED_FORM_INPUT_COUNT; ++i) { + r[i] = weights[0] * table[OUT_1][i] + weights[1] * table[OUT_2][i] + weights[2] * table[OUT_3][i]; + } + return r; + } + + static Tables build_tables() + { + const Vec ones = { FF(1), FF(1), FF(1) }; + // A: internal-round update on (s_1, s_2, s_3). step(v, u) = A v + u·1. + const Mat A = { { { D2, FF(1), FF(1) }, { FF(1), D3, FF(1) }, { FF(1), FF(1), D4 } } }; + const Mat A2 = matrix_multiply(A, A); + const Mat A3 = matrix_multiply(A2, A); + const Mat A4 = matrix_multiply(A3, A); + const Vec A_one = matrix_vector_multiply(A, ones); + const Vec A2_one = matrix_vector_multiply(A2, ones); + const Vec A3_one = matrix_vector_multiply(A3, ones); + + // V_inv (rows are Lagrange coefs α_j^(*)). + const Mat Vinv = { { { alpha_1_1, alpha_1_2, alpha_1_3 }, + { alpha_2_1, alpha_2_2, alpha_2_3 }, + { alpha_3_1, alpha_3_2, alpha_3_3 } } }; + // M = A^4 · V_inv: maps b → b-derived part of out_{1,2,3} at round 4. + const Mat M = matrix_multiply(A4, Vinv); + + // B_w: rows are w-coefs of b_1, b_2, b_3 on (w_r, w_o, w_4). + const Mat Bw = { { { FF(1), FF(0), FF(0) }, { -FF(2), FF(1), FF(0) }, { -(SIGMA + FF(2)), -FF(1), FF(1) } } }; + // B_u: rows are (u_0, u_1, u_2)-coefs of b_1, b_2, b_3. + const Mat Bu = { { { -D1, FF(0), FF(0) }, + { FF(2) * D1 - FF(3), -D1, FF(0) }, + { (SIGMA + FF(2)) * D1 - SIGMA - FF(3), D1 - FF(3), -D1 } } }; + + const Mat Mw = matrix_multiply(M, Bw); // w-coefs of out_{1,2,3} + const Mat MBu = matrix_multiply(M, Bu); // b-derived part of u-coefs + + // T_3 = sum of state[1..3] at round 3. + // q_T3 = (1^T A^3) · V_inv: projection coefficients for the b-derived part of T_3. + const Vec col_sum_A3 = vector_matrix_multiply(ones, A3); + const Vec q_T3 = vector_matrix_multiply(col_sum_A3, Vinv); + const FF sum_A_one = vector_sum(A_one); + const FF sum_A2_one = vector_sum(A2_one); + + const Vec row0_wire_coefficients = vector_matrix_multiply(q_T3, Bw); + const Vec row0_u_coefficients = vector_matrix_multiply(q_T3, Bu); + + // out_0 = D_1 u_3 + T_3. + // T_3's wire-coefs: q_T3 · B_w (1×3 · 3×3 → 1×3) + // T_3's u-coefs: q_T3 · B_u + (sum_A2_one, sum_A_one, 3) inhomogeneous additions + ClosedFormRow row0{}; + for (size_t i = 0; i < VANDERMONDE_SIZE; ++i) { + row0[i] = row0_wire_coefficients[i]; + } + row0[U_0] = row0_u_coefficients[0] + sum_A2_one; + row0[U_1] = row0_u_coefficients[1] + sum_A_one; + row0[U_2] = row0_u_coefficients[2] + FF(3); + row0[U_3] = D1; + + // out_j (j=1,2,3): u_3 coefficient is identically 1 (free add at use site). + auto build_out_j = [&](size_t j) { + ClosedFormRow r{}; + r[W_R] = Mw[j][0]; + r[W_O] = Mw[j][1]; + r[W_4] = Mw[j][2]; + r[U_0] = MBu[j][0] + A3_one[j]; + r[U_1] = MBu[j][1] + A2_one[j]; + r[U_2] = MBu[j][2] + A_one[j]; + r[U_3] = FF(1); + return r; + }; + + ClosedFormTable closed_form_table{ row0, build_out_j(0), build_out_j(1), build_out_j(2) }; + + // Forward-Vandermonde LHS rows: linear combinations across out_{1,2,3} weighted by + // row 0: (1, 1, 1) → out_1 + out_2 + out_3 + // row 1: (D_2, D_3, D_4) → D_2 out_1 + D_3 out_2 + D_4 out_3 + // row 2: (D_2², D_3², D_4²) → D_2² out_1 + D_3² out_2 + D_4² out_3 + // Each row's coefficients on (w_*, u_*) are obtained by the same weighted sum applied + // to the corresponding (w_*, u_*) coefficients of out_1..out_3. + const std::array lhs_weights = { + { { FF(1), FF(1), FF(1) }, { D2, D3, D4 }, { D2 * D2, D3 * D3, D4 * D4 } } + }; + ForwardVandermondeTable lhs_table{}; + for (size_t k = 0; k < VANDERMONDE_SIZE; ++k) { + lhs_table[k] = weighted_closed_form_sum(lhs_weights[k], closed_form_table); + } + + return Tables{ closed_form_table, lhs_table }; + } + + public: + // Public coefficient tables consumed by the relations. + static inline const Tables tables = build_tables(); +}; + +} // namespace bb::crypto diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/acir_to_constraint_buf.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/acir_to_constraint_buf.cpp index c8450c7ff09f..3a9beb0f8071 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/acir_to_constraint_buf.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/acir_to_constraint_buf.cpp @@ -802,10 +802,12 @@ BlockConstraint memory_init_to_block_constraint(Acir::Opcode::MemoryInit const& // array. if (std::holds_alternative(mem_init.block_type.value)) { uint32_t calldata_id = std::get(mem_init.block_type.value).value; - BB_ASSERT(calldata_id == 0 || calldata_id == 1, "acir_format::handle_memory_init: Unsupported calldata id"); + BB_ASSERT_LTE(calldata_id, + MAX_APPS_PER_KERNEL, + "acir_format::handle_memory_init: calldata id exceeds kernel + MAX_APPS_PER_KERNEL app columns"); block.type = BlockType::CallData; - block.calldata_id = calldata_id == 0 ? CallDataType::Primary : CallDataType::Secondary; + block.calldata_id = static_cast(calldata_id); } else if (std::holds_alternative(mem_init.block_type.value)) { block.type = BlockType::ReturnData; } diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/block_constraint.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/block_constraint.cpp index b492baa9c1c6..09ea9da9e5ff 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/block_constraint.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/block_constraint.cpp @@ -165,17 +165,15 @@ void process_call_data_operations(Builder& builder, } }; - // Process primary or secondary calldata based on calldata_id - switch (constraint.calldata_id) { - case CallDataType::Primary: - process_calldata(databus.calldata); - break; - case CallDataType::Secondary: - process_calldata(databus.secondary_calldata); - break; - default: - bb::assert_failure("Databus only supports two calldata arrays."); - break; + // Process kernel or app calldata based on the ACIR calldata id. Id 0 is kernel calldata; app calldata ids start at + // 1 and map directly onto app_calldata[id - 1]. + const auto calldata_id = static_cast(constraint.calldata_id); + if (calldata_id == static_cast(CallDataType::KernelCalldata)) { + process_calldata(databus.kernel_calldata); + } else { + const size_t app_calldata_idx = calldata_id - /*shift by kernel calldata*/ 1; + BB_ASSERT_LT(app_calldata_idx, MAX_APPS_PER_KERNEL, "Databus app calldata index out of bounds"); + process_calldata(databus.app_calldata[app_calldata_idx]); } } diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/block_constraint.hpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/block_constraint.hpp index 4123c1e485f3..9051224267d7 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/block_constraint.hpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/block_constraint.hpp @@ -5,6 +5,7 @@ // ===================== #pragma once +#include "barretenberg/constants.hpp" #include "barretenberg/dsl/acir_format/witness_constant.hpp" #include "barretenberg/stdlib/primitives/field/field.hpp" #include @@ -17,10 +18,12 @@ enum AccessType : std::uint8_t { Write = 1, }; -enum CallDataType : std::uint8_t { - None = 0, - Primary = 1, - Secondary = 2, +enum CallDataType : std::uint32_t { + KernelCalldata = 0, + FirstAppCalldata = 1, + SecondAppCalldata = 2, + ThirdAppCalldata = 3, + None = bb::MAX_APPS_PER_KERNEL + 1, // Used for non-calldata blocks }; /** @@ -46,8 +49,8 @@ enum BlockType : std::uint8_t { * @details 1. init holds the initial values of the RAM/ROM/CallData/ReturnData table * 2. trace holds the sequence of memory operations (reads/writes) performed on the table * 3. type indicates the type of memory being constrained (RAM/ROM/CallData/ReturnData) - * 4. calldata_id (used only for CallData) indicates whether we are operating on primary (kernel) or secondary - * (app) calldata + * 4. calldata_id (used only for CallData) indicates whether we are operating on kernel calldata or an app + * calldata slot. The kernel calldata id is 0, app calldata ids are in [1, MAX_APPS_PER_KERNEL]. */ struct BlockConstraint { std::vector init; diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/block_constraint.test.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/block_constraint.test.cpp index 0507713f4e77..f055cb751974 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/block_constraint.test.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/block_constraint.test.cpp @@ -509,12 +509,14 @@ class CallDataTestingFunctions { } }; -using CallDataTestConfigs = testing::Types, - CallDataTestParams, - CallDataTestParams, - CallDataTestParams, - CallDataTestParams, - CallDataTestParams>; +using CallDataTestConfigs = testing::Types, + CallDataTestParams, + CallDataTestParams, + CallDataTestParams, + CallDataTestParams, + CallDataTestParams, + CallDataTestParams, + CallDataTestParams>; template class CallDataTests : public ::testing::Test, diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/gate_count_constants.hpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/gate_count_constants.hpp index 56c67dcc1a58..b7ed640ce3a2 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/gate_count_constants.hpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/gate_count_constants.hpp @@ -41,7 +41,8 @@ inline constexpr size_t ECDSA_SECP256R1 = 72611 + ZERO_GATE + (IsMegaBuilder inline constexpr size_t BLAKE2S = 2952 + ZERO_GATE + MEGA_OFFSET; template inline constexpr size_t BLAKE3 = 2158 + ZERO_GATE + MEGA_OFFSET; template inline constexpr size_t KECCAK_PERMUTATION = 17387 + ZERO_GATE + MEGA_OFFSET; -template inline constexpr size_t POSEIDON2_PERMUTATION = 73 + ZERO_GATE + MEGA_OFFSET; +template +inline constexpr size_t POSEIDON2_PERMUTATION = (IsMegaBuilder ? 27 : 73) + ZERO_GATE + MEGA_OFFSET; template inline constexpr size_t MULTI_SCALAR_MUL = 3563 + ZERO_GATE; template inline constexpr size_t EC_ADD = 84 + ZERO_GATE + MEGA_OFFSET; template inline constexpr size_t BLOCK_ROM_READ = 9 + ZERO_GATE + MEGA_OFFSET; @@ -83,24 +84,24 @@ constexpr std::tuple HONK_RECURSION_CONSTANTS( } else if constexpr (std::is_same_v>) { switch (mode) { case PredicateTestCase::ConstantTrue: - return std::make_tuple(20817, 73); + return std::make_tuple(11848, 73); case PredicateTestCase::WitnessTrue: case PredicateTestCase::WitnessFalse: - return std::make_tuple(21874, 73); + return std::make_tuple(12905, 73); } } else if constexpr (std::is_same_v>) { switch (mode) { case PredicateTestCase::ConstantTrue: - return std::make_tuple(24993, 77); + return std::make_tuple(14506, 77); case PredicateTestCase::WitnessTrue: case PredicateTestCase::WitnessFalse: - return std::make_tuple(26146, 77); + return std::make_tuple(15659, 77); } } else if constexpr (std::is_same_v>) { if (mode != PredicateTestCase::ConstantTrue) { bb::assert_failure("Unhandled mode in MegaZKRecursiveFlavor."); } - return std::make_tuple(768328, 0); + return std::make_tuple(856820, 0); } else { bb::assert_failure("Unhandled recursive flavor."); } @@ -113,7 +114,7 @@ constexpr std::tuple HONK_RECURSION_CONSTANTS( // ======================================== // Gate count for Chonk recursive verification (Ultra with RollupIO) -inline constexpr size_t CHONK_RECURSION_GATES = 1474677; +inline constexpr size_t CHONK_RECURSION_GATES = 1563538; // ======================================== // Hypernova Recursion Constants @@ -123,30 +124,31 @@ inline constexpr size_t CHONK_RECURSION_GATES = 1474677; inline constexpr size_t MSM_ROWS_OFFSET = 2; // Init kernel gate counts (verifies OINK proof) -inline constexpr size_t INIT_KERNEL_GATE_COUNT = 24457; -inline constexpr size_t INIT_KERNEL_ECC_ROWS = 815 + MSM_ROWS_OFFSET; -inline constexpr size_t INIT_KERNEL_ULTRA_OPS = 86; +inline constexpr size_t INIT_KERNEL_GATE_COUNT = 13449; +inline constexpr size_t INIT_KERNEL_ECC_ROWS = 623 + MSM_ROWS_OFFSET; +inline constexpr size_t INIT_KERNEL_ULTRA_OPS = 70; // Inner kernel gate counts (verifies HN proof for previous kernel + HN for app) -inline constexpr size_t INNER_KERNEL_GATE_COUNT_HN = 59251; -inline constexpr size_t INNER_KERNEL_ECC_ROWS = 1634 + MSM_ROWS_OFFSET; -inline constexpr size_t INNER_KERNEL_ULTRA_OPS = 173; +inline constexpr size_t INNER_KERNEL_GATE_COUNT_HN = 31092; +inline constexpr size_t INNER_KERNEL_ECC_ROWS = 1312 + MSM_ROWS_OFFSET; +inline constexpr size_t INNER_KERNEL_ULTRA_OPS = 149; // Tail kernel gate counts (verifies HN_TAIL proof) -inline constexpr size_t TAIL_KERNEL_GATE_COUNT = 32249; -inline constexpr size_t TAIL_KERNEL_ECC_ROWS = 881 + MSM_ROWS_OFFSET; -inline constexpr size_t TAIL_KERNEL_ULTRA_OPS = 93; +inline constexpr size_t TAIL_KERNEL_GATE_COUNT = 17323; +inline constexpr size_t TAIL_KERNEL_ECC_ROWS = 656 + MSM_ROWS_OFFSET; +inline constexpr size_t TAIL_KERNEL_ULTRA_OPS = 72; // Hiding kernel gate counts (verifies HN_FINAL proof) -inline constexpr size_t HIDING_KERNEL_GATE_COUNT = 35622; -inline constexpr size_t HIDING_KERNEL_ECC_ROWS = 1374 + MSM_ROWS_OFFSET; -inline constexpr size_t HIDING_KERNEL_ULTRA_OPS = 124; +inline constexpr size_t HIDING_KERNEL_GATE_COUNT = 39627; +inline constexpr size_t HIDING_KERNEL_ECC_ROWS = 4872 + MSM_ROWS_OFFSET; +inline constexpr size_t HIDING_KERNEL_ULTRA_OPS = 334; // ======================================== // ECCVM Recursive Verifier Constants // ======================================== // Gate count for ECCVM recursive verifier (Ultra-arithmetized) +// Trigger rebuild inline constexpr size_t ECCVM_RECURSIVE_VERIFIER_GATE_COUNT = 220788; // ======================================== diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/hypernova_recursion_constraint.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/hypernova_recursion_constraint.cpp index 256b6cc761b6..8e70a6b34d8c 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/hypernova_recursion_constraint.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/hypernova_recursion_constraint.cpp @@ -24,58 +24,67 @@ using namespace bb; * - INNER kernel: Two HN constraints (verifies previous kernel + new app) * - RESET kernel: Single HN constraint (verifies kernel only, resets accumulation) * - TAIL kernel: Single HN_TAIL constraint (final kernel before hiding kernel) - * - HIDING kernel: Single HN_FINAL constraint (adds ZK hiding) + * - HIDING kernel: Single HN_FINAL constraint (adds ZK hiding and verifies one batch merge proof) * * @param constraints The IVC recursion constraints extracted from an Aztec kernel's ACIR * @return Chonk instance with mock verification queue entries matching the constraint pattern */ std::shared_ptr create_mock_chonk_from_constraints(const std::vector& constraints) { - auto ivc = std::make_shared(std::max(constraints.size(), static_cast(4))); - + auto ivc = std::make_shared(std::max(constraints.size(), static_cast(MAX_APPS_PER_KERNEL + 1))); // Check constraint proof type. Throws if proof_type is not a valid HyperNova type auto constraint_has_type = [](const RecursionConstraint& c, Chonk::QUEUE_TYPE expected) { return proof_type_to_chonk_queue_type(c.proof_type) == expected; }; + BB_ASSERT(!constraints.empty(), "At least one recursion constraint is required to determine Chonk state"); + const bool is_init = constraint_has_type(constraints[0], Chonk::QUEUE_TYPE::OINK); + const bool is_reset = (constraints.size() == 1 && constraint_has_type(constraints[0], Chonk::QUEUE_TYPE::HN)); + const bool is_tail = (constraints.size() == 1 && constraint_has_type(constraints[0], Chonk::QUEUE_TYPE::HN_TAIL)); + const bool is_hiding = + (constraints.size() == 1 && constraint_has_type(constraints[0], Chonk::QUEUE_TYPE::HN_FINAL)); + const size_t upper_bound = is_init ? MAX_APPS_PER_KERNEL : MAX_APPS_PER_KERNEL + 1; + BB_ASSERT_LTE(constraints.size(), upper_bound, "Too many recursion constraints encountered when mocking IVC state"); + // Match constraint patterns to kernel types and populate appropriate mock data: // INIT kernel: Verifies first app circuit (no prior accumulator exists) - if (constraints.size() == 1 && constraint_has_type(constraints[0], Chonk::QUEUE_TYPE::OINK)) { + if (is_init) { mock_chonk_accumulation(ivc, Chonk::QUEUE_TYPE::OINK, /*is_kernel=*/false); + for (size_t idx = 1; idx < constraints.size(); idx++) { + BB_ASSERT(constraint_has_type(constraints[idx], Chonk::QUEUE_TYPE::HN), + "Subsequent constraints in init kernel must be HN type"); + mock_chonk_accumulation(ivc, Chonk::QUEUE_TYPE::HN, /*is_kernel=*/false); + } return ivc; } // RESET kernel: Verifies only a previous kernel (resets the IVC accumulation) - if (constraints.size() == 1 && constraint_has_type(constraints[0], Chonk::QUEUE_TYPE::HN)) { + if (is_reset) { mock_chonk_accumulation(ivc, Chonk::QUEUE_TYPE::HN, /*is_kernel=*/true); return ivc; } - // TAIL kernel: Final kernel in the chain before generating tube proof - if (constraints.size() == 1 && constraint_has_type(constraints[0], Chonk::QUEUE_TYPE::HN_TAIL)) { + // TAIL kernel: Final kernel in the chain before hiding kernel + if (is_tail) { mock_chonk_accumulation(ivc, Chonk::QUEUE_TYPE::HN_TAIL, /*is_kernel=*/true); return ivc; } - // INNER kernel: Verifies previous kernel + new app circuit - if (constraints.size() == 2) { - BB_ASSERT(constraint_has_type(constraints[0], Chonk::QUEUE_TYPE::HN), - "Inner kernel first constraint must be HN type"); - BB_ASSERT(constraint_has_type(constraints[1], Chonk::QUEUE_TYPE::HN), - "Inner kernel second constraint must be HN type"); - mock_chonk_accumulation(ivc, Chonk::QUEUE_TYPE::HN, /*is_kernel=*/true); - mock_chonk_accumulation(ivc, Chonk::QUEUE_TYPE::HN, /*is_kernel=*/false); - return ivc; - } - // HIDING kernel: Adds zero-knowledge hiding to the final proof - if (constraints.size() == 1 && constraint_has_type(constraints[0], Chonk::QUEUE_TYPE::HN_FINAL)) { + if (is_hiding) { mock_chonk_accumulation(ivc, Chonk::QUEUE_TYPE::HN_FINAL, /*is_kernel=*/true); return ivc; } - throw_or_abort("Invalid set of IVC recursion constraints!"); + // INNER kernel: Verifies previous kernel + app circuits + bool is_kernel = true; + for (const auto& constraint : constraints) { + BB_ASSERT(constraint_has_type(constraint, Chonk::QUEUE_TYPE::HN), + "All constraints in inner kernel must be HN type"); + mock_chonk_accumulation(ivc, Chonk::QUEUE_TYPE::HN, /*is_kernel=*/is_kernel); + is_kernel = false; // First constraint verifies previous kernel, subsequent constraints verify apps + } return ivc; } @@ -139,8 +148,7 @@ Chonk::VerifierInputs create_mock_verification_queue_entry(const Chonk::QUEUE_TY * 1. Initializes the recursive verifier accumulator (challenge vector, evaluations, commitments) * - This is hashed in-circuit to bind the accumulator state * 2. Adds a mock verification queue entry (proof + VK) for the accumulated circuit - * 3. Adds a mock merge proof - * 4. For HN_FINAL: also adds a mock decider/PCS proof + * 3. For HN_FINAL: adds one mock batch merge proof and a mock decider/PCS proof * * @param ivc The Chonk instance to populate * @param type Verification queue type determining proof structure @@ -160,8 +168,8 @@ void mock_chonk_accumulation(const std::shared_ptr& ivc, Chonk::QUEUE_TYP Chonk::VerifierInputs entry = acir_format::create_mock_verification_queue_entry(type, is_kernel); ivc->verification_queue.emplace_back(entry); - ivc->goblin.merge_verification_queue.emplace_back(acir_format::create_mock_merge_proof()); if (type == Chonk::QUEUE_TYPE::HN_FINAL) { + ivc->goblin.batch_merge_proof = acir_format::create_mock_batch_merge_proof(); ivc->decider_proof = acir_format::create_mock_pcs_proof(); } ivc->num_circuits_accumulated++; diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/hypernova_recursion_constraint.hpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/hypernova_recursion_constraint.hpp index 57610ec4dc29..3b0c1f8b1241 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/hypernova_recursion_constraint.hpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/hypernova_recursion_constraint.hpp @@ -37,13 +37,13 @@ using namespace bb; * @details Analyzes the constraint types to determine what mock data is needed (Oink proof, HN proof, etc.) * * @param constraints The IVC recursion constraints from an Aztec kernel program - * @return A Chonk instance with mock verification queue entries and merge proofs + * @return A Chonk instance with mock verification queue entries and for the hiding kernels a batch merge proof */ std::shared_ptr create_mock_chonk_from_constraints(const std::vector& constraints); /** * @brief Add mock accumulation data to a Chonk instance - * @details Populates the verification queue with a mock proof/VK entry and adds a mock merge proof. + * @details Populates the verification queue with a mock proof/VK entry. HN_FINAL also gets a mock batch merge proof. * Also initializes the recursive verifier accumulator which is hashed into the circuit. * * @param ivc The Chonk instance to populate diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/hypernova_recursion_constraint.test.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/hypernova_recursion_constraint.test.cpp index 7e7dcdc43287..8bfcdee19220 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/hypernova_recursion_constraint.test.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/hypernova_recursion_constraint.test.cpp @@ -212,10 +212,9 @@ class HypernovaRecursionConstraintTest : public ::testing::Test { program.constraints.max_witness_index = static_cast(program.witness.size() - 1); program.constraints.num_acir_opcodes = static_cast(hn_recursion_constraints.size()); program.constraints.hn_recursion_constraints = hn_recursion_constraints; - program.constraints.original_opcode_indices = - hn_recursion_constraints.size() == 1 - ? AcirFormatOriginalOpcodeIndices{ .hn_recursion_constraints = { 0 } } - : AcirFormatOriginalOpcodeIndices{ .hn_recursion_constraints = { 0, 1 } }; + for (size_t idx = 0; idx < hn_recursion_constraints.size(); ++idx) { + program.constraints.original_opcode_indices.hn_recursion_constraints.push_back(static_cast(idx)); + } return program; } diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/mock_verifier_inputs.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/mock_verifier_inputs.cpp index 892f5c976b7d..0aae5c8887e8 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/mock_verifier_inputs.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/mock_verifier_inputs.cpp @@ -307,6 +307,39 @@ Goblin::MergeProof create_mock_merge_proof() return proof; } +HonkProof create_mock_batch_merge_proof() +{ + HonkProof proof; + + constexpr size_t NUM_WIRES = Goblin::BatchMergeRecursiveVerifier::NUM_WIRES; + constexpr size_t MAX_MERGE_SIZE = Goblin::BatchMergeRecursiveVerifier::MAX_MERGE_SIZE; + + // Commitments to the fixed-width list of subtables. + populate_field_elements_for_mock_commitments(proof, MAX_MERGE_SIZE * NUM_WIRES); + + // Commitments to the ZK masking table. + populate_field_elements_for_mock_commitments(proof, NUM_WIRES); + + // Number of real subtables. Keep it in [1, MAX_MERGE_SIZE] so recursive range checks can be constructed. + populate_field_elements(proof, 1, /*value=*/fr{ 1 }); + + // Shift sizes. + populate_field_elements(proof, 1, /*value=*/fr{ 2 }); + populate_field_elements(proof, MAX_MERGE_SIZE - 1, /*value=*/fr{ 0 }); + + // Merged table commitments and degree-check polynomial commitment. + populate_field_elements_for_mock_commitments(proof, NUM_WIRES + 1); + + // Evaluations: C_i(kappa), optional ZK C_i(kappa), T(kappa), and G(kappa^{-1}). + const size_t num_evaluations = (MAX_MERGE_SIZE * NUM_WIRES) + NUM_WIRES + NUM_WIRES + 1; + populate_field_elements(proof, num_evaluations); + + // Shplonk quotient commitment and KZG opening commitment. + populate_field_elements_for_mock_commitments(proof, 2); + + return proof; +} + /** * @brief Create a mock pre-ipa proof which has the correct structure but is not necessarily valid * diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/mock_verifier_inputs.hpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/mock_verifier_inputs.hpp index ab578e02ac68..63c1c65b8692 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/mock_verifier_inputs.hpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/mock_verifier_inputs.hpp @@ -104,6 +104,12 @@ std::pair> cons */ bb::Goblin::MergeProof create_mock_merge_proof(); +/** + * @brief Create a mock batch merge proof which has the correct structure but is not necessarily valid + * + */ +bb::HonkProof create_mock_batch_merge_proof(); + /** * @brief Create a mock pre-ipa proof which has the correct structure but is not necessarily valid * diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/mock_verifier_inputs.test.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/mock_verifier_inputs.test.cpp index 19752c2db0aa..4207472fefa3 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/mock_verifier_inputs.test.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/mock_verifier_inputs.test.cpp @@ -1,5 +1,6 @@ #include "barretenberg/dsl/acir_format/mock_verifier_inputs.hpp" #include "barretenberg/chonk/chonk_proof.hpp" +#include "barretenberg/flavor/mega_zk_flavor.hpp" #include "barretenberg/honk/proof_length.hpp" #include @@ -22,8 +23,12 @@ static_assert(HIDING_KERNEL_PUBLIC_INPUTS_SIZE == 28, "HIDING_KERNEL_IO_PUBLIC_INPUTS_SIZE changed - update constants.nr"); // Component proof lengths (used in Noir) -static_assert(MERGE_PROOF_SIZE == 42, "MERGE_PROOF_SIZE changed - update constants.nr"); -static_assert(ECCVMFlavor::PROOF_LENGTH == 608, "ECCVM proof size changed - update constants.nr"); +static_assert(MERGE_PROOF_SIZE == 42, + "MERGE_PROOF_SIZE changed - update CHONK_MERGE_PROOF_SIZE in constants.nr " + "and run `yarn remake-constants`"); +static_assert(ECCVMFlavor::PROOF_LENGTH == 608, + "ECCVM proof size changed - update CHONK_ECCVM_PROOF_LENGTH in constants.nr " + "and run `yarn remake-constants`"); static_assert(IPA_PROOF_LENGTH == 64, "IPA_PROOF_LENGTH changed - update constants.nr"); static_assert(TranslatorFlavor::PROOF_LENGTH == 483, "Translator proof size changed - update constants.nr"); @@ -32,7 +37,17 @@ static_assert( ProofLength::Honk::expected_proof_size>( UltraFlavor::VIRTUAL_LOG_N) == 410, "RECURSIVE_PROOF_LENGTH changed - update constants.nr"); -static_assert(ChonkProof::PROOF_LENGTH == 1315, "CHONK_PROOF_LENGTH changed - update constants.nr"); +static_assert(ChonkProof::PROOF_LENGTH == 1349, "CHONK_PROOF_LENGTH changed - update constants.nr"); +static_assert(ChonkProof::HIDING_OINK_LENGTH == 108, + "ChonkProof::HIDING_OINK_LENGTH changed - update CHONK_HIDING_OINK_LENGTH in constants.nr " + "and run `yarn remake-constants`"); +static_assert(ChonkProof::JOINT_PROOF_LENGTH == 499, + "ChonkProof::JOINT_PROOF_LENGTH changed - update CHONK_JOINT_PROOF_LENGTH in constants.nr " + "and run `yarn remake-constants`"); +static_assert(MegaFlavor::VerificationKey::calc_num_data_types() == 143, + "MEGA_VK_LENGTH_IN_FIELDS changed - update constants.nr"); +static_assert(MegaZKFlavor::VerificationKey::calc_num_data_types() == 143, + "MegaZK VK size changed - update constants.nr"); static_assert(ProofLength::MultilinearBatching::LENGTH == 136, "MultilinearBatching proof size changed - update constants.nr"); @@ -45,6 +60,15 @@ TEST_F(MockVerifierInputsTest, MockMergeProofSize) EXPECT_EQ(merge_proof.size(), MERGE_PROOF_SIZE); } +/** + * @brief Check that mock batch merge proof has the expected size + */ +TEST_F(MockVerifierInputsTest, MockBatchMergeProofSize) +{ + HonkProof batch_merge_proof = create_mock_batch_merge_proof(); + EXPECT_EQ(batch_merge_proof.size(), BATCH_MERGE_PROOF_SIZE); +} + /** * @brief Check that mock ECCVM proof has the expected size */ diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/opcode_gate_count.test.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/opcode_gate_count.test.cpp index a12e9ffd9edf..47aa78abb04e 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/opcode_gate_count.test.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/opcode_gate_count.test.cpp @@ -592,13 +592,13 @@ TYPED_TEST(OpcodeGateCountTests, BlockCallData) .value = WitnessOrConstant::from_index(3), // 10 }); - // Primary calldata + // Kernel calldata { BlockConstraint block_constraint{ .init = init, .trace = trace, .type = BlockType::CallData, - .calldata_id = CallDataType::Primary, + .calldata_id = CallDataType::KernelCalldata, }; AcirFormat constraint_system = constraint_to_acir_format(block_constraint); @@ -614,13 +614,13 @@ TYPED_TEST(OpcodeGateCountTests, BlockCallData) EXPECT_EQ(program.constraints.gates_per_opcode, std::vector({ BLOCK_CALLDATA })); } - // Secondary calldata + // App calldata { BlockConstraint block_constraint{ .init = init, .trace = trace, .type = BlockType::CallData, - .calldata_id = CallDataType::Secondary, + .calldata_id = CallDataType::FirstAppCalldata, }; AcirFormat constraint_system = constraint_to_acir_format(block_constraint); diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/test_class.hpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/test_class.hpp index 89a12f07dfa3..749eb5d35f18 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/test_class.hpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/test_class.hpp @@ -105,7 +105,7 @@ inline Acir::BlockType block_type_to_acir_block_type(BlockType type, CallDataTyp // ROM and RAM both map to Memory in ACIR return Acir::BlockType{ .value = Acir::BlockType::Memory{} }; case BlockType::CallData: { - uint32_t id = (calldata_id == CallDataType::Primary) ? 0 : 1; + uint32_t id = static_cast(calldata_id); return Acir::BlockType{ .value = Acir::BlockType::CallData{ .value = id } }; } case BlockType::ReturnData: diff --git a/barretenberg/cpp/src/barretenberg/ecc/curves/bn254/g2.test.cpp b/barretenberg/cpp/src/barretenberg/ecc/curves/bn254/g2.test.cpp index b7f46c27cd45..2a8006308441 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/curves/bn254/g2.test.cpp +++ b/barretenberg/cpp/src/barretenberg/ecc/curves/bn254/g2.test.cpp @@ -182,3 +182,49 @@ TEST(g2, GeneratorIsCorrect) fq("0x090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b") } }; EXPECT_EQ(generator, expected); } + +// The generator, infinity, and arbitrary scalar multiples of the generator must be accepted as +// members of the BN254 G2 prime-order subgroup. +TEST(g2, IsInPrimeSubgroupAcceptsSubgroupPoints) +{ + const g2::affine_element gen(Bn254G2Params::one_x, Bn254G2Params::one_y); + EXPECT_TRUE(gen.is_in_prime_subgroup()); + EXPECT_TRUE(g2::affine_element::infinity().is_in_prime_subgroup()); + + for (size_t i = 0; i < 4; ++i) { + const g2::affine_element P(g2::element(gen) * fr::random_element()); + EXPECT_TRUE(P.is_in_prime_subgroup()); + } +} + +// BN254 G2 has cofactor h2 ≈ 2^254, so on-curve does NOT imply prime-order subgroup membership. The hardcoded point +// below was constructed by sampling x = i + u (for the smallest positive integer i that yields a curve point) and +// recovering y via Fq2 sqrt; because only a 1/h2 fraction of E'(Fq2) lies in G_r, this specimen lies in a cofactor +// subgroup. Such a point must be rejected. Coordinates are in Montgomery form to match `Bn254G2Params::one_x` etc. +TEST(g2, IsInPrimeSubgroupRejectsCofactorPoint) +{ + const g2::affine_element off_subgroup{ + fq2{ fq(2), fq(1) }, + fq2{ fq("0x101f7278419308b95099eca02dcee0c5381f4d26d1d62313f057167f064101ce"), + fq("0x2b76c179599bb92a963dac85546a005a777f7c13f6a7b75d5918b6b5808f5fde") } + }; + ASSERT_TRUE(off_subgroup.on_curve()); + EXPECT_FALSE(off_subgroup.is_in_prime_subgroup()); + + // Sanity check that scalar multiplication via the Fr-typed `*` operator does NOT detect + // subgroup membership — multiplying by `Fr(0)` (the additive identity, which equals `r mod r`) + // gives infinity for every input, including off-subgroup points. This is precisely why + // is_in_prime_subgroup() routes through a uint256_t scalar instead. + EXPECT_TRUE((off_subgroup * fr::zero()).is_point_at_infinity()); +} + +// Off-curve coordinates must be rejected: the Weierstrass group law is unsound off-curve, so the +// [r]·P trick can return a false positive on attacker-supplied (x, y) that happens to satisfy +// y² = x³ + b' for some b' ≠ b with a prime-r factor in its order. +TEST(g2, IsInPrimeSubgroupRejectsOffCurvePoint) +{ + g2::affine_element off_curve(Bn254G2Params::one_x, Bn254G2Params::one_y); + off_curve.y += fq2::one(); + ASSERT_FALSE(off_curve.on_curve()); + EXPECT_FALSE(off_curve.is_in_prime_subgroup()); +} diff --git a/barretenberg/cpp/src/barretenberg/ecc/curves/bn254/pairing.hpp b/barretenberg/cpp/src/barretenberg/ecc/curves/bn254/pairing.hpp index c55643b6f545..0559234f9600 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/curves/bn254/pairing.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/curves/bn254/pairing.hpp @@ -195,10 +195,13 @@ constexpr fq12 final_exponentiation_tricky_part(const fq12& elt); // ====================== // Pairing // -// NOTE: All points supplied for pairing calculations are checked to be on the curve. This is equivalent to a subgroup -// membership check for points in G1 = BN254. We don't implement subgroup membership checks for G2 because the only -// place in the codebase where we use pairings is in PairingPoints::check(), which takes two points P1, P2 in G1 -// and checks e(P1, [1]) * e(P2, [x]) = 1. The points [1] and [x] are taken from the SRS, so we know they belong to G2. +// NOTE: All points supplied for pairing calculations are checked to be on the curve. For G1 = BN254 this is equivalent +// to a subgroup membership check (cofactor 1). For G2 = BN254 twist, on-curve does NOT imply membership in the +// prime-order subgroup because the cofactor is non-trivial. The two G2 points consumed inside pairings come from +// PairingPoints::check(), which feeds them the SRS values [1]_2 and [x]_2 — the SRS ingress (bbapi::SrsInitSrs) +// invokes `affine_element::is_in_prime_subgroup()` on the user-supplied [x]_2 before installation, so by the time +// they reach this file they are already known to be in the prime-order subgroup. Any future code path that brings a +// new G2 point in from outside the SRS must run the subgroup check itself. // // ====================== diff --git a/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element.hpp b/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element.hpp index fbe908ca6ae4..dc64aa6cbf80 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element.hpp @@ -92,6 +92,22 @@ template class alignas(64) affine [[nodiscard]] constexpr bool on_curve() const noexcept; + /** + * @brief Check that the point lies in the prime-order subgroup of size `Fr::modulus`. + * + * @details For curves whose cofactor is 1 (e.g. BN254 G1, Grumpkin) every on-curve point trivially + * satisfies this, so callers that already validated `on_curve()` can skip the call. For curves with + * non-trivial cofactor (notably BN254 G2, with cofactor h2 ≈ 2^254), `on_curve()` alone is + * insufficient: an attacker can supply a curve point that lies in a small subgroup of order + * dividing h2 and pass `on_curve()`. This routine performs the full subgroup check via `[r]·P == ∞`. + * + * @note Not constant-time. Intended for one-shot validation of public, externally-supplied points + * (e.g. at the bbapi boundary or when loading SRS bytes). + * + * @return true iff `*this` is the point at infinity or has order dividing `Fr::modulus`. + */ + [[nodiscard]] bool is_in_prime_subgroup() const noexcept; + static constexpr std::optional derive_from_x_coordinate(const Fq& x, bool sign_bit) noexcept; /** diff --git a/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element.test.cpp b/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element.test.cpp index 474c4f5e113d..66d77b6bf49d 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element.test.cpp +++ b/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element.test.cpp @@ -310,6 +310,19 @@ template class TestAffineElement : public testing::Test { auto reconstructed = FrCodec::deserialize_from_fields(limbs); EXPECT_EQ(reconstructed, point); } + + // The point at infinity, the generator, and any scalar multiple of the generator must all be + // recognized as members of the prime-order subgroup. + static void test_is_in_prime_subgroup_accepts_subgroup_points() + { + EXPECT_TRUE(affine_element::infinity().is_in_prime_subgroup()); + EXPECT_TRUE(affine_element::one().is_in_prime_subgroup()); + + for (size_t i = 0; i < 8; ++i) { + affine_element P = affine_element(element::random_element()); + EXPECT_TRUE(P.is_in_prime_subgroup()); + } + } }; // using TestTypes = testing::Types; @@ -469,6 +482,12 @@ TYPED_TEST(TestAffineElement, DeserializeOffCurveThrows) TestFixture::test_deserialize_off_curve_throws(); } +// Verify is_in_prime_subgroup accepts known prime-order subgroup points +TYPED_TEST(TestAffineElement, IsInPrimeSubgroupAcceptsSubgroupPoints) +{ + TestFixture::test_is_in_prime_subgroup_accepts_subgroup_points(); +} + // Verify that from_compressed returns the (0,0) sentinel for x values with no valid y. TYPED_TEST(TestAffineElement, PointCompressionInvalidX) { diff --git a/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element_impl.hpp b/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element_impl.hpp index 242cbb5dd4d8..336f65cc8efd 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element_impl.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element_impl.hpp @@ -132,6 +132,35 @@ template constexpr bool affine_element: return (xxx == yy); } +template bool affine_element::is_in_prime_subgroup() const noexcept +{ + if (is_point_at_infinity()) { + return true; + } + // Weierstrass group law is unsound for off-curve coordinates, so the [r]·P trick can + // give a false positive on points that satisfy y² = x³ + b' for some b' ≠ b. Reject + // those up front. + if (!on_curve()) { + return false; + } + using Element = element; + + // To compute r * P, we convert modulus r to u256 and perform a left-to-right double-and-add. + constexpr uint256_t r = Fr::modulus; + const uint64_t r_msb = r.get_msb(); + + // Left-to-right double-and-add over the bits of r below the MSB. The MSB itself is consumed by + // initializing `acc` with `*this`. Loop terminates via unsigned underflow (i wraps past 0). + Element acc(*this); + for (uint64_t i = r_msb - 1; i < r_msb; --i) { + acc.self_dbl(); + if (r.get_bit(i)) { + acc += *this; + } + } + return acc.is_point_at_infinity(); +} + template constexpr bool affine_element::operator==(const affine_element& other) const noexcept { diff --git a/barretenberg/cpp/src/barretenberg/ecc/groups/element.hpp b/barretenberg/cpp/src/barretenberg/ecc/groups/element.hpp index 5cfb5870960d..0925671702e9 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/groups/element.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/groups/element.hpp @@ -22,10 +22,12 @@ namespace bb::group_elements { * @brief element class. Implements ecc group arithmetic using Jacobian coordinates * See https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l * - * Note: Currently subgroup checks are NOT IMPLEMENTED - * Our current implementation uses G1 points that have a cofactor of 1. - * All G2 points are precomputed (generator [1]_2 and trusted setup point [x]_2). - * Explicitly assume precomputed points are valid members of the prime-order subgroup for G2. + * Note: BN254 / Grumpkin G1 have cofactor 1, so on-curve membership coincides with prime-order + * subgroup membership. BN254 G2 has a non-trivial cofactor; an explicit subgroup check is provided + * by `affine_element::is_in_prime_subgroup()` and must be applied to externally-supplied G2 bytes + * (see bbapi). The arithmetic in this file does not rederive subgroup membership and assumes the + * caller already ensured operands are valid prime-order subgroup elements. + * * @tparam Fq prime field the curve is defined over * @tparam Fr prime field whose characteristic equals the size of the prime-order elliptic curve subgroup * @tparam Params curve parameters diff --git a/barretenberg/cpp/src/barretenberg/ecc/groups/group.hpp b/barretenberg/cpp/src/barretenberg/ecc/groups/group.hpp index f84796a54d68..eead3428a5ac 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/groups/group.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/groups/group.hpp @@ -24,10 +24,12 @@ namespace bb { * @brief group class. Represents an elliptic curve group element. * Group is parametrised by Fq and Fr * - * Note: Currently subgroup checks are NOT IMPLEMENTED - * Our current implementation uses G1 points that have a cofactor of 1. - * All G2 points are precomputed (generator [1]_2 and trusted setup point [x]_2). - * Explicitly assume precomputed points are valid members of the prime-order subgroup for G2. + * Note: BN254 / Grumpkin G1 have cofactor 1, so `affine_element::on_curve()` is itself a subgroup + * check. BN254 G2 has a non-trivial cofactor, so callers that accept externally-supplied G2 bytes + * must additionally invoke `affine_element::is_in_prime_subgroup()` to reject cofactor-subgroup + * points before they reach pairing-based verifiers; routine internal G2 arithmetic stays inside + * the prime-order subgroup because every starting point is the precomputed generator [1]_2 or the + * SRS point [x]_2. * * @tparam Fq * @tparam subgroup_field diff --git a/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/scalar_multiplication.cpp b/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/scalar_multiplication.cpp index 512712190943..c5f50fdff5b2 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/scalar_multiplication.cpp +++ b/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/scalar_multiplication.cpp @@ -85,29 +85,116 @@ void MSM::transform_scalar_and_get_nonzero_scalar_indices(std::span +void MSM::compute_scalar_slice_weights(std::span scalars, + std::span nonzero_indices, + uint32_t bits_per_slice, + std::vector& weights) noexcept +{ + // weight = ceil(bit_length / bps) + FIXED_PER_SCALAR_WEIGHT. The fixed term approximates the + // O(num_rounds) per-scalar overhead in build_schedule, sort_schedule, and reduce_buckets that + // doesn't scale with bit_length. Without it, threads assigned many lightweight scalars end up + // with disproportionate build/sort/reduce work (empirically observed via per-phase profiling). + // Max is ceil(NUM_BITS_IN_FIELD / 1) + FIXED. + static constexpr uint16_t FIXED_PER_SCALAR_WEIGHT = 4; + static_assert(NUM_BITS_IN_FIELD + FIXED_PER_SCALAR_WEIGHT <= std::numeric_limits::max(), + "slice-count weight overflows uint16_t"); + BB_ASSERT_GT(bits_per_slice, 0U); + + const size_t n = nonzero_indices.size(); + weights.resize(n); + + parallel_for([&](const ThreadChunk& chunk) { + for (size_t k : chunk.range(n)) { + const auto& scalar = scalars[nonzero_indices[k]]; + // Scalars were filtered for nonzero and are in non-Montgomery form, so get_msb() + // returns a valid bit index in [0, NUM_BITS_IN_FIELD). + const uint64_t msb = uint256_t{ scalar.data[0], scalar.data[1], scalar.data[2], scalar.data[3] }.get_msb(); + const size_t bit_length = static_cast(msb) + 1; + weights[k] = + static_cast((bit_length + bits_per_slice - 1) / bits_per_slice) + FIXED_PER_SCALAR_WEIGHT; + } + }); +} + +template +std::vector::ThreadWorkUnits> MSM::partition_by_weight( + std::span> msm_scalar_weights, size_t num_threads) noexcept +{ + BB_ASSERT_GT(num_threads, 0U); + std::vector work_units(num_threads); + + size_t grand_total_weight = 0; + for (const auto& weights : msm_scalar_weights) { + for (uint16_t w : weights) { + grand_total_weight += w; + } + } + if (grand_total_weight == 0) { + return work_units; + } + + const size_t weight_per_thread = numeric::ceil_div(grand_total_weight, num_threads); + + size_t thread_accumulated_weight = 0; + size_t current_thread_idx = 0; + for (size_t i = 0; i < msm_scalar_weights.size(); ++i) { + const auto& weights = msm_scalar_weights[i]; + const size_t n = weights.size(); + + size_t start = 0; + for (size_t k = 0; k < n; ++k) { + thread_accumulated_weight += weights[k]; + + if (current_thread_idx < num_threads - 1 && thread_accumulated_weight >= weight_per_thread) { + work_units[current_thread_idx].push_back(MSMWorkUnit{ + .batch_msm_index = i, + .start_index = start, + .size = k + 1 - start, + }); + start = k + 1; + current_thread_idx++; + thread_accumulated_weight = 0; + } + } + if (start < n) { + work_units[current_thread_idx].push_back(MSMWorkUnit{ + .batch_msm_index = i, + .start_index = start, + .size = n - start, + }); + } + } + return work_units; +} + template std::vector::ThreadWorkUnits> MSM::get_work_units( std::span> scalars, std::vector>& msm_scalar_indices) noexcept { const size_t num_msms = scalars.size(); msm_scalar_indices.resize(num_msms); - for (size_t i = 0; i < num_msms; ++i) { - transform_scalar_and_get_nonzero_scalar_indices(scalars[i], msm_scalar_indices[i]); - } + // Weight scalars by their Pippenger cost (slice count + fixed overhead, see + // compute_scalar_slice_weights) to improve thread balancing. + std::vector> msm_scalar_weights(num_msms); size_t total_work = 0; - for (const auto& indices : msm_scalar_indices) { - total_work += indices.size(); + for (size_t i = 0; i < num_msms; ++i) { + transform_scalar_and_get_nonzero_scalar_indices(scalars[i], msm_scalar_indices[i]); + const size_t n = msm_scalar_indices[i].size(); + total_work += n; + if (n == 0) { + continue; + } + const uint32_t bps = get_optimal_log_num_buckets(n); + compute_scalar_slice_weights(scalars[i], msm_scalar_indices[i], bps, msm_scalar_weights[i]); } const size_t num_threads = get_num_cpus(); - std::vector work_units(num_threads); - - const size_t work_per_thread = numeric::ceil_div(total_work, num_threads); - const size_t work_of_last_thread = total_work - (work_per_thread * (num_threads - 1)); // Only use a single work unit if we don't have enough work for every thread if (num_threads > total_work) { + std::vector work_units(num_threads); for (size_t i = 0; i < num_msms; ++i) { work_units[0].push_back(MSMWorkUnit{ .batch_msm_index = i, @@ -118,37 +205,7 @@ std::vector::ThreadWorkUnits> MSM::get_work_units( return work_units; } - size_t thread_accumulated_work = 0; - size_t current_thread_idx = 0; - for (size_t i = 0; i < num_msms; ++i) { - size_t msm_work_remaining = msm_scalar_indices[i].size(); - const size_t initial_msm_work = msm_work_remaining; - - while (msm_work_remaining > 0) { - BB_ASSERT_LT(current_thread_idx, work_units.size()); - - const size_t total_thread_work = - (current_thread_idx == num_threads - 1) ? work_of_last_thread : work_per_thread; - const size_t available_thread_work = total_thread_work - thread_accumulated_work; - const size_t work_to_assign = std::min(available_thread_work, msm_work_remaining); - - work_units[current_thread_idx].push_back(MSMWorkUnit{ - .batch_msm_index = i, - .start_index = initial_msm_work - msm_work_remaining, - .size = work_to_assign, - }); - - thread_accumulated_work += work_to_assign; - msm_work_remaining -= work_to_assign; - - // Move to next thread if current thread is full - if (thread_accumulated_work >= total_thread_work) { - current_thread_idx++; - thread_accumulated_work = 0; - } - } - } - return work_units; + return partition_by_weight(msm_scalar_weights, num_threads); } /** diff --git a/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/scalar_multiplication.hpp b/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/scalar_multiplication.hpp index 36352c269131..c03f679b61bc 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/scalar_multiplication.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/scalar_multiplication.hpp @@ -240,6 +240,14 @@ template class MSM { /** @brief Compute optimal bits per slice by minimizing cost over c in [1, MAX_SLICE_BITS) */ static uint32_t get_optimal_log_num_buckets(size_t num_points) noexcept; + /** @brief Partition per-MSM scalar weights into num_threads work units of approximately + * equal cumulative weight. + * @details Curve-independent and side-effect-free. The walk closes a work unit every time + * the running weight crosses the per-thread target, except on the last thread + * which absorbs any remainder so rounding drift doesn't leave work stranded. */ + static std::vector partition_by_weight(std::span> msm_scalar_weights, + size_t num_threads) noexcept; + /** @brief Process sorted point schedule into bucket accumulators using batched affine additions */ static void batch_accumulate_points_into_buckets(std::span point_schedule, std::span points, @@ -288,7 +296,20 @@ template class MSM { static void transform_scalar_and_get_nonzero_scalar_indices(std::span scalars, std::vector& nonzero_scalar_indices) noexcept; - /** @brief Distribute multiple MSMs across threads with balanced point counts */ + /** @brief Compute per-scalar slice-count weights ceil(bit_length / bits_per_slice). + * @details Parallel over nonzero_indices. Scalars must be in non-Montgomery form (as left + * by transform_scalar_and_get_nonzero_scalar_indices). Weights drive thread + * partitioning in get_work_units. */ + static void compute_scalar_slice_weights(std::span scalars, + std::span nonzero_indices, + uint32_t bits_per_slice, + std::vector& weights) noexcept; + + /** @brief Distribute multiple MSMs across threads with balanced bucket-accumulation work. + * @details Per-thread assignment is a contiguous range of each MSM's nonzero-scalar + * indices, sized by cumulative slice-count weight ceil(bit_length / c). This is + * the actual number of nonzero c-bit slices a scalar contributes — the quantity + * that drives bucket-accumulation cost. */ static std::vector get_work_units(std::span> scalars, std::vector>& msm_scalar_indices) noexcept; diff --git a/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/scalar_multiplication.test.cpp b/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/scalar_multiplication.test.cpp index 2c7df6638998..24107df4ab30 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/scalar_multiplication.test.cpp +++ b/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/scalar_multiplication.test.cpp @@ -701,6 +701,151 @@ TYPED_TEST(ScalarMultiplicationTest, PippengerUnsafeFreeFunction) this->test_pippenger_unsafe_free_function(); } +// Curve-independent unit tests for the work-unit partitioner. +// partition_by_weight is the load-bearing balancing logic in get_work_units; pinning its +// behavior with synthetic weights makes regressions in the partition algorithm visible +// without needing a full MSM run. +namespace { + +using PartitionMSM = scalar_multiplication::MSM; +using WorkUnit = PartitionMSM::MSMWorkUnit; + +// Total weight assigned to a thread (sum of WorkUnit sizes weighted by the input vector). +size_t thread_weight(const std::vector& units, const std::vector>& weights) +{ + size_t total = 0; + for (const auto& u : units) { + for (size_t k = 0; k < u.size; ++k) { + total += weights[u.batch_msm_index][u.start_index + k]; + } + } + return total; +} + +} // namespace + +TEST(PartitionByWeight, NoMsmsReturnsEmptyThreads) +{ + auto units = PartitionMSM::partition_by_weight({}, 8); + ASSERT_EQ(units.size(), 8U); + for (const auto& t : units) { + EXPECT_TRUE(t.empty()); + } +} + +TEST(PartitionByWeight, AllEmptyMsmsReturnsEmptyThreads) +{ + std::vector> weights{ {}, {}, {} }; + auto units = PartitionMSM::partition_by_weight(weights, 4); + ASSERT_EQ(units.size(), 4U); + for (const auto& t : units) { + EXPECT_TRUE(t.empty()); + } +} + +TEST(PartitionByWeight, SingleThreadGetsEverything) +{ + std::vector> weights{ { 5, 5, 5, 5, 5 } }; + auto units = PartitionMSM::partition_by_weight(weights, 1); + ASSERT_EQ(units.size(), 1U); + ASSERT_EQ(units[0].size(), 1U); + EXPECT_EQ(units[0][0].batch_msm_index, 0U); + EXPECT_EQ(units[0][0].start_index, 0U); + EXPECT_EQ(units[0][0].size, 5U); +} + +TEST(PartitionByWeight, EvenSplitAcrossThreads) +{ + // 8 weights of 5 => total 40, target 10 per thread (4 threads), so 2 weights per thread. + std::vector> weights{ { 5, 5, 5, 5, 5, 5, 5, 5 } }; + auto units = PartitionMSM::partition_by_weight(weights, 4); + ASSERT_EQ(units.size(), 4U); + for (size_t t = 0; t < 4; ++t) { + ASSERT_EQ(units[t].size(), 1U) << "thread " << t; + EXPECT_EQ(units[t][0].size, 2U) << "thread " << t; + EXPECT_EQ(thread_weight(units[t], weights), 10U) << "thread " << t; + } +} + +TEST(PartitionByWeight, HeavyFirstWeightClosesFirstThreadEarly) +{ + // First weight alone exceeds the per-thread target; remainder is evenly split. + std::vector> weights{ { 100, 5, 5, 5, 5 } }; + auto units = PartitionMSM::partition_by_weight(weights, 4); + ASSERT_EQ(units.size(), 4U); + // Thread 0 should close after the heavy weight. + ASSERT_FALSE(units[0].empty()); + EXPECT_EQ(units[0][0].start_index, 0U); + EXPECT_EQ(units[0][0].size, 1U); + // Total assigned across all threads must equal n. + size_t total_assigned = 0; + for (const auto& t : units) { + for (const auto& u : t) { + total_assigned += u.size; + } + } + EXPECT_EQ(total_assigned, 5U); +} + +TEST(PartitionByWeight, BoundaryStraddlesMsm) +{ + // Two MSMs of 4 weights of 5 each => total 40, 4 threads, target 10. + // Boundary should land mid-MSM if weights cross between MSMs. + std::vector> weights{ { 5, 5, 5, 5 }, { 5, 5, 5, 5 } }; + auto units = PartitionMSM::partition_by_weight(weights, 4); + ASSERT_EQ(units.size(), 4U); + size_t total_assigned = 0; + for (const auto& t : units) { + for (const auto& u : t) { + total_assigned += u.size; + } + } + EXPECT_EQ(total_assigned, 8U); + // Each thread should carry exactly weight 10. + for (size_t t = 0; t < 4; ++t) { + EXPECT_EQ(thread_weight(units[t], weights), 10U) << "thread " << t; + } +} + +TEST(PartitionByWeight, LastThreadAbsorbsRemainder) +{ + // weights {7,7,1}, num_threads=3 => total 15, target = ceil(15/3) = 5. + // Walk: T0 closes after weight 7, T1 closes after weight 7, then weight 1 trails. + // Without the "current_thread_idx < num_threads - 1" guard the partitioner would + // refuse to close T2 (running weight 1 < target 5) and the trailing weight would + // be lost. The guard makes T2 absorb it via the post-loop push. + std::vector> weights{ { 7, 7, 1 } }; + auto units = PartitionMSM::partition_by_weight(weights, 3); + ASSERT_EQ(units.size(), 3U); + size_t total_assigned = 0; + for (const auto& t : units) { + for (const auto& u : t) { + total_assigned += u.size; + } + } + EXPECT_EQ(total_assigned, 3U); + ASSERT_EQ(units[2].size(), 1U); + EXPECT_EQ(units[2][0].start_index, 2U); + EXPECT_EQ(units[2][0].size, 1U); + EXPECT_EQ(thread_weight(units[2], weights), 1U); +} + +TEST(PartitionByWeight, MoreThreadsThanScalars) +{ + // 3 weights of 5 => total 15, 8 threads, target ceil(15/8)=2. + // Each weight (5) immediately crosses target => first 3 threads each get one scalar. + std::vector> weights{ { 5, 5, 5 } }; + auto units = PartitionMSM::partition_by_weight(weights, 8); + ASSERT_EQ(units.size(), 8U); + for (size_t t = 0; t < 3; ++t) { + ASSERT_EQ(units[t].size(), 1U) << "thread " << t; + EXPECT_EQ(units[t][0].size, 1U); + } + for (size_t t = 3; t < 8; ++t) { + EXPECT_TRUE(units[t].empty()) << "thread " << t; + } +} + // Non-templated test for explicit small inputs TEST(ScalarMultiplication, SmallInputsExplicit) { diff --git a/barretenberg/cpp/src/barretenberg/flavor/flavor.hpp b/barretenberg/cpp/src/barretenberg/flavor/flavor.hpp index c8fa282fe79f..f0aa5678ac70 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/flavor/flavor.hpp @@ -197,7 +197,7 @@ class NativeVerificationKey_ : public PrecomputedCommitments { * @brief Calculate the number of field elements needed for serialization * @return size_t Number of field elements */ - static size_t calc_num_data_types() + static constexpr size_t calc_num_data_types() { size_t commitments_size = PrecomputedCommitments::size() * Codec::template calc_num_fields(); size_t metadata_size = MetaData::NUM_FIELDS * Codec::template calc_num_fields(); diff --git a/barretenberg/cpp/src/barretenberg/flavor/mega_flavor.hpp b/barretenberg/cpp/src/barretenberg/flavor/mega_flavor.hpp index 81508cb9266f..016c5acb1c64 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/mega_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/flavor/mega_flavor.hpp @@ -24,7 +24,10 @@ #include "barretenberg/relations/non_native_field_relation.hpp" #include "barretenberg/relations/permutation_relation.hpp" #include "barretenberg/relations/poseidon2_external_relation.hpp" -#include "barretenberg/relations/poseidon2_internal_relation.hpp" +#include "barretenberg/relations/poseidon2_initial_external_relation.hpp" +#include "barretenberg/relations/poseidon2_quad_internal_relation.hpp" +#include "barretenberg/relations/poseidon2_quad_internal_terminal_relation.hpp" +#include "barretenberg/relations/poseidon2_transition_entry_relation.hpp" #include "barretenberg/relations/relation_tuple_helpers.hpp" #include "barretenberg/relations/ultra_arithmetic_relation.hpp" #include "barretenberg/stdlib_circuit_builders/mega_circuit_builder.hpp" @@ -71,7 +74,10 @@ class MegaFlavor { bb::EccOpQueueRelation, bb::DatabusLookupRelation, bb::Poseidon2ExternalRelation, - bb::Poseidon2InternalRelation>; + bb::Poseidon2InitialExternalRelation, + bb::Poseidon2QuadInternalRelation, + bb::Poseidon2QuadInternalTerminalRelation, + bb::Poseidon2TransitionEntryRelation>; using Relations = Relations_; static constexpr size_t MAX_PARTIAL_RELATION_LENGTH = compute_max_partial_relation_length(); @@ -93,8 +99,10 @@ class MegaFlavor { * @details Used to build the proving key and verification key. * * These polynomials fall into several categories based on their origin: - * - **Circuit selectors** (q_m, q_c, q_l, q_r, q_o, q_4, q_busread, q_lookup, q_arith, q_delta_range, - * q_elliptic, q_memory, q_nnf, q_poseidon2_external, q_poseidon2_internal): Populated directly from + * - **Circuit selectors** (q_m, q_c, q_l, q_r, q_o, q_4, q_5, q_busread, q_lookup, q_arith, q_delta_range, + * q_elliptic, q_memory, q_nnf, q_poseidon2_external, q_poseidon2_external_initial, + * q_poseidon2_quad_internal, q_poseidon2_quad_internal_terminal, q_poseidon2_transition_entry): + * Populated directly from * the circuit builder's execution trace blocks. * - **Permutation polynomials** (sigma_1-4, id_1-4): Computed from wire copy cycles. * - **Table polynomials** (table_1-4): Populated from lookup tables in the circuit. @@ -109,40 +117,44 @@ class MegaFlavor { bool operator==(const PrecomputedEntities&) const = default; using DataType = DataType_; DEFINE_FLAVOR_MEMBERS(DataType, - q_m, // column 0 - q_c, // column 1 - q_l, // column 2 - q_r, // column 3 - q_o, // column 4 - q_4, // column 5 - q_busread, // column 6 - q_lookup, // column 7 - q_arith, // column 8 - q_delta_range, // column 9 - q_elliptic, // column 10 - q_memory, // column 11 - q_nnf, // column 12 - q_poseidon2_external, // column 13 - q_poseidon2_internal, // column 14 - sigma_1, // column 15 - sigma_2, // column 16 - sigma_3, // column 17 - sigma_4, // column 18 - id_1, // column 19 - id_2, // column 20 - id_3, // column 21 - id_4, // column 22 - table_1, // column 23 - table_2, // column 24 - table_3, // column 25 - table_4, // column 26 - lagrange_first, // column 27 - lagrange_last, // column 28 - lagrange_ecc_op, // column 29 // indicator poly for ecc op gates - databus_id // column 30 // id polynomial, i.e. id_i = i + q_m, // column 0 + q_c, // column 1 + q_l, // column 2 + q_r, // column 3 + q_o, // column 4 + q_4, // column 5 + q_5, // column 6 + q_busread, // column 7 + q_lookup, // column 8 + q_arith, // column 9 + q_delta_range, // column 10 + q_elliptic, // column 11 + q_memory, // column 12 + q_nnf, // column 13 + q_poseidon2_external, // column 14 + q_poseidon2_external_initial, // column 15 + q_poseidon2_quad_internal, // column 16 + q_poseidon2_quad_internal_terminal, // column 17 + q_poseidon2_transition_entry, // column 18 + sigma_1, // column 19 + sigma_2, // column 20 + sigma_3, // column 21 + sigma_4, // column 22 + id_1, // column 23 + id_2, // column 24 + id_3, // column 25 + id_4, // column 26 + table_1, // column 27 + table_2, // column 28 + table_3, // column 29 + table_4, // column 30 + lagrange_first, // column 31 + lagrange_last, // column 32 + lagrange_ecc_op, // column 33 // indicator poly for ecc op gates + databus_id // column 34 // id polynomial, i.e. id_i = i ) - auto get_non_gate_selectors() { return RefArray{ q_m, q_c, q_l, q_r, q_o, q_4 }; }; + auto get_non_gate_selectors() { return RefArray{ q_m, q_c, q_l, q_r, q_o, q_4, q_5 }; }; auto get_gate_selectors() { return RefArray{ @@ -154,7 +166,10 @@ class MegaFlavor { q_memory, q_nnf, q_poseidon2_external, - q_poseidon2_internal, + q_poseidon2_external_initial, + q_poseidon2_quad_internal, + q_poseidon2_quad_internal_terminal, + q_poseidon2_transition_entry, }; } auto get_selectors() { return concatenate(get_non_gate_selectors(), get_gate_selectors()); } @@ -178,23 +193,29 @@ class MegaFlavor { template class DerivedEntities { public: DEFINE_FLAVOR_MEMBERS(DataType, - z_perm, // column 4 - lookup_inverses, // column 5 - lookup_read_counts, // column 6 - lookup_read_tags, // column 7 - ecc_op_wire_1, // column 8 - ecc_op_wire_2, // column 9 - ecc_op_wire_3, // column 10 - ecc_op_wire_4, // column 11 - calldata, // column 12 - calldata_read_counts, // column 13 - calldata_inverses, // column 14 - secondary_calldata, // column 15 - secondary_calldata_read_counts, // column 16 - secondary_calldata_inverses, // column 17 - return_data, // column 18 - return_data_read_counts, // column 19 - return_data_inverses); // column 20 + z_perm, // column 4 + lookup_inverses, // column 5 + lookup_read_counts, // column 6 + lookup_read_tags, // column 7 + ecc_op_wire_1, // column 8 + ecc_op_wire_2, // column 9 + ecc_op_wire_3, // column 10 + ecc_op_wire_4, // column 11 + kernel_calldata, // column 12 + kernel_calldata_read_counts, // column 13 + kernel_calldata_inverses, // column 14 + first_app_calldata, // column 15 + first_app_calldata_read_counts, // column 16 + first_app_calldata_inverses, // column 17 + second_app_calldata, // column 18 + second_app_calldata_read_counts, // column 19 + second_app_calldata_inverses, // column 20 + third_app_calldata, // column 21 + third_app_calldata_read_counts, // column 22 + third_app_calldata_inverses, // column 23 + return_data, // column 24 + return_data_read_counts, // column 25 + return_data_inverses); // column 26 auto get_to_be_shifted() { return RefArray{ z_perm }; }; }; @@ -219,22 +240,30 @@ class MegaFlavor { template auto databus_entities_for_bus() { if constexpr (bus_idx == 0) { - return RefArray{ this->calldata, this->calldata_read_counts }; + return RefArray{ this->kernel_calldata, this->kernel_calldata_read_counts }; } else if constexpr (bus_idx == 1) { - return RefArray{ this->secondary_calldata, this->secondary_calldata_read_counts }; + return RefArray{ this->first_app_calldata, this->first_app_calldata_read_counts }; + } else if constexpr (bus_idx == 2) { + return RefArray{ this->second_app_calldata, this->second_app_calldata_read_counts }; + } else if constexpr (bus_idx == 3) { + return RefArray{ this->third_app_calldata, this->third_app_calldata_read_counts }; } else { - static_assert(bus_idx == 2); + static_assert(bus_idx == 4); return RefArray{ this->return_data, this->return_data_read_counts }; } } template auto databus_inverse_for_bus() { if constexpr (bus_idx == 0) { - return RefArray{ this->calldata_inverses }; + return RefArray{ this->kernel_calldata_inverses }; } else if constexpr (bus_idx == 1) { - return RefArray{ this->secondary_calldata_inverses }; + return RefArray{ this->first_app_calldata_inverses }; + } else if constexpr (bus_idx == 2) { + return RefArray{ this->second_app_calldata_inverses }; + } else if constexpr (bus_idx == 3) { + return RefArray{ this->third_app_calldata_inverses }; } else { - static_assert(bus_idx == 2); + static_assert(bus_idx == 4); return RefArray{ this->return_data_inverses }; } } @@ -389,12 +418,18 @@ class MegaFlavor { ecc_op_wire_2 = "ECC_OP_WIRE_2"; ecc_op_wire_3 = "ECC_OP_WIRE_3"; ecc_op_wire_4 = "ECC_OP_WIRE_4"; - calldata = "CALLDATA"; - calldata_read_counts = "CALLDATA_READ_COUNTS"; - calldata_inverses = "CALLDATA_INVERSES"; - secondary_calldata = "SECONDARY_CALLDATA"; - secondary_calldata_read_counts = "SECONDARY_CALLDATA_READ_COUNTS"; - secondary_calldata_inverses = "SECONDARY_CALLDATA_INVERSES"; + kernel_calldata = "KERNEL_CALLDATA"; + kernel_calldata_read_counts = "KERNEL_CALLDATA_READ_COUNTS"; + kernel_calldata_inverses = "KERNEL_CALLDATA_INVERSES"; + first_app_calldata = "FIRST_APP_CALLDATA"; + first_app_calldata_read_counts = "FIRST_APP_CALLDATA_READ_COUNTS"; + first_app_calldata_inverses = "FIRST_APP_CALLDATA_INVERSES"; + second_app_calldata = "SECOND_APP_CALLDATA"; + second_app_calldata_read_counts = "SECOND_APP_CALLDATA_READ_COUNTS"; + second_app_calldata_inverses = "SECOND_APP_CALLDATA_INVERSES"; + third_app_calldata = "THIRD_APP_CALLDATA"; + third_app_calldata_read_counts = "THIRD_APP_CALLDATA_READ_COUNTS"; + third_app_calldata_inverses = "THIRD_APP_CALLDATA_INVERSES"; return_data = "RETURN_DATA"; return_data_read_counts = "RETURN_DATA_READ_COUNTS"; return_data_inverses = "RETURN_DATA_INVERSES"; @@ -404,6 +439,7 @@ class MegaFlavor { q_r = "Q_R"; q_o = "Q_O"; q_4 = "Q_4"; + q_5 = "Q_5"; q_m = "Q_M"; q_busread = "Q_BUSREAD"; q_lookup = "Q_LOOKUP"; @@ -413,7 +449,10 @@ class MegaFlavor { q_memory = "Q_MEMORY"; q_nnf = "Q_NNF"; q_poseidon2_external = "Q_POSEIDON2_EXTERNAL"; - q_poseidon2_internal = "Q_POSEIDON2_INTERNAL"; + q_poseidon2_external_initial = "Q_POSEIDON2_EXTERNAL_INITIAL"; + q_poseidon2_quad_internal = "Q_POSEIDON2_QUAD_INTERNAL"; + q_poseidon2_quad_internal_terminal = "Q_POSEIDON2_QUAD_INTERNAL_TERMINAL"; + q_poseidon2_transition_entry = "Q_POSEIDON2_TRANSITION_ENTRY"; sigma_1 = "SIGMA_1"; sigma_2 = "SIGMA_2"; sigma_3 = "SIGMA_3"; diff --git a/barretenberg/cpp/src/barretenberg/flavor/partially_evaluated_multivariates.hpp b/barretenberg/cpp/src/barretenberg/flavor/partially_evaluated_multivariates.hpp index c6017731c72d..0f67ddf9b713 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/partially_evaluated_multivariates.hpp +++ b/barretenberg/cpp/src/barretenberg/flavor/partially_evaluated_multivariates.hpp @@ -33,7 +33,8 @@ class PartiallyEvaluatedMultivariatesBase : public AllEntitiesBase { for (auto [poly, full_poly] : zip_view(this->get_all(), full_polynomials.get_all())) { // After the initial sumcheck round, the new size is CEIL(size/2). size_t desired_size = (full_poly.end_index() / 2) + (full_poly.end_index() % 2); - poly = Polynomial(desired_size, circuit_size / 2); + // partially_evaluate writes to [0, desired_size) before any read; backing memory can be left uninitialized. + poly = Polynomial(desired_size, circuit_size / 2, 0, Polynomial::DontZeroMemory::FLAG); } } }; diff --git a/barretenberg/cpp/src/barretenberg/flavor/test_utils/proof_structures.hpp b/barretenberg/cpp/src/barretenberg/flavor/test_utils/proof_structures.hpp index 8b553aa2450e..63c7f427a38d 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/test_utils/proof_structures.hpp +++ b/barretenberg/cpp/src/barretenberg/flavor/test_utils/proof_structures.hpp @@ -318,18 +318,24 @@ template struct MegaStructuredProofBase : StructuredProofHelpe Commitment ecc_op_wire_2_comm; Commitment ecc_op_wire_3_comm; Commitment ecc_op_wire_4_comm; - Commitment calldata_comm; - Commitment calldata_read_counts_comm; - Commitment secondary_calldata_comm; - Commitment secondary_calldata_read_counts_comm; + Commitment kernel_calldata_comm; + Commitment kernel_calldata_read_counts_comm; + Commitment first_app_calldata_comm; + Commitment first_app_calldata_read_counts_comm; + Commitment second_app_calldata_comm; + Commitment second_app_calldata_read_counts_comm; + Commitment third_app_calldata_comm; + Commitment third_app_calldata_read_counts_comm; Commitment return_data_comm; Commitment return_data_read_counts_comm; Commitment lookup_read_counts_comm; Commitment lookup_read_tags_comm; Commitment w_4_comm; Commitment lookup_inverses_comm; - Commitment calldata_inverses_comm; - Commitment secondary_calldata_inverses_comm; + Commitment kernel_calldata_inverses_comm; + Commitment first_app_calldata_inverses_comm; + Commitment second_app_calldata_inverses_comm; + Commitment third_app_calldata_inverses_comm; Commitment return_data_inverses_comm; Commitment z_perm_comm; std::vector> sumcheck_univariates; @@ -358,18 +364,24 @@ template struct MegaStructuredProofBase : StructuredProofHelpe ecc_op_wire_2_comm = this->template deserialize_from_buffer(proof_data, offset); ecc_op_wire_3_comm = this->template deserialize_from_buffer(proof_data, offset); ecc_op_wire_4_comm = this->template deserialize_from_buffer(proof_data, offset); - calldata_comm = this->template deserialize_from_buffer(proof_data, offset); - calldata_read_counts_comm = this->template deserialize_from_buffer(proof_data, offset); - secondary_calldata_comm = this->template deserialize_from_buffer(proof_data, offset); - secondary_calldata_read_counts_comm = this->template deserialize_from_buffer(proof_data, offset); + kernel_calldata_comm = this->template deserialize_from_buffer(proof_data, offset); + kernel_calldata_read_counts_comm = this->template deserialize_from_buffer(proof_data, offset); + first_app_calldata_comm = this->template deserialize_from_buffer(proof_data, offset); + first_app_calldata_read_counts_comm = this->template deserialize_from_buffer(proof_data, offset); + second_app_calldata_comm = this->template deserialize_from_buffer(proof_data, offset); + second_app_calldata_read_counts_comm = this->template deserialize_from_buffer(proof_data, offset); + third_app_calldata_comm = this->template deserialize_from_buffer(proof_data, offset); + third_app_calldata_read_counts_comm = this->template deserialize_from_buffer(proof_data, offset); return_data_comm = this->template deserialize_from_buffer(proof_data, offset); return_data_read_counts_comm = this->template deserialize_from_buffer(proof_data, offset); lookup_read_counts_comm = this->template deserialize_from_buffer(proof_data, offset); lookup_read_tags_comm = this->template deserialize_from_buffer(proof_data, offset); w_4_comm = this->template deserialize_from_buffer(proof_data, offset); lookup_inverses_comm = this->template deserialize_from_buffer(proof_data, offset); - calldata_inverses_comm = this->template deserialize_from_buffer(proof_data, offset); - secondary_calldata_inverses_comm = this->template deserialize_from_buffer(proof_data, offset); + kernel_calldata_inverses_comm = this->template deserialize_from_buffer(proof_data, offset); + first_app_calldata_inverses_comm = this->template deserialize_from_buffer(proof_data, offset); + second_app_calldata_inverses_comm = this->template deserialize_from_buffer(proof_data, offset); + third_app_calldata_inverses_comm = this->template deserialize_from_buffer(proof_data, offset); return_data_inverses_comm = this->template deserialize_from_buffer(proof_data, offset); z_perm_comm = this->template deserialize_from_buffer(proof_data, offset); } @@ -384,18 +396,24 @@ template struct MegaStructuredProofBase : StructuredProofHelpe Base::serialize_to_buffer(ecc_op_wire_2_comm, proof_data); Base::serialize_to_buffer(ecc_op_wire_3_comm, proof_data); Base::serialize_to_buffer(ecc_op_wire_4_comm, proof_data); - Base::serialize_to_buffer(calldata_comm, proof_data); - Base::serialize_to_buffer(calldata_read_counts_comm, proof_data); - Base::serialize_to_buffer(secondary_calldata_comm, proof_data); - Base::serialize_to_buffer(secondary_calldata_read_counts_comm, proof_data); + Base::serialize_to_buffer(kernel_calldata_comm, proof_data); + Base::serialize_to_buffer(kernel_calldata_read_counts_comm, proof_data); + Base::serialize_to_buffer(first_app_calldata_comm, proof_data); + Base::serialize_to_buffer(first_app_calldata_read_counts_comm, proof_data); + Base::serialize_to_buffer(second_app_calldata_comm, proof_data); + Base::serialize_to_buffer(second_app_calldata_read_counts_comm, proof_data); + Base::serialize_to_buffer(third_app_calldata_comm, proof_data); + Base::serialize_to_buffer(third_app_calldata_read_counts_comm, proof_data); Base::serialize_to_buffer(return_data_comm, proof_data); Base::serialize_to_buffer(return_data_read_counts_comm, proof_data); Base::serialize_to_buffer(lookup_read_counts_comm, proof_data); Base::serialize_to_buffer(lookup_read_tags_comm, proof_data); Base::serialize_to_buffer(w_4_comm, proof_data); Base::serialize_to_buffer(lookup_inverses_comm, proof_data); - Base::serialize_to_buffer(calldata_inverses_comm, proof_data); - Base::serialize_to_buffer(secondary_calldata_inverses_comm, proof_data); + Base::serialize_to_buffer(kernel_calldata_inverses_comm, proof_data); + Base::serialize_to_buffer(first_app_calldata_inverses_comm, proof_data); + Base::serialize_to_buffer(second_app_calldata_inverses_comm, proof_data); + Base::serialize_to_buffer(third_app_calldata_inverses_comm, proof_data); Base::serialize_to_buffer(return_data_inverses_comm, proof_data); Base::serialize_to_buffer(z_perm_comm, proof_data); } diff --git a/barretenberg/cpp/src/barretenberg/goblin/BATCH_MERGE_PROTOCOL.md b/barretenberg/cpp/src/barretenberg/goblin/BATCH_MERGE_PROTOCOL.md new file mode 100644 index 000000000000..4702c17981ad --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/goblin/BATCH_MERGE_PROTOCOL.md @@ -0,0 +1,218 @@ +# Batch Merge Protocol + +For a more detailed explanation of Chonk, see [REFERENCE TO CHONK DOC]. + +During Chonk, circuits perform BN254 elliptic-curve operations that are delegated to Goblin rather than executed directly in the circuit. Each circuit exposes four `ecc_op_wire` commitments, one for each operation-table column. + +There are two distinct merge mechanisms: + +- The **Merge Protocol** in `MERGE_PROTOCOL.md` proves the latest pairwise merge relation. It checks one step of the form + $$ + M_j(X) = L_j(X) + X^\ell R_j(X) + $$ + for each column $j \in \{1,2,3,4\}$, together with a degree check for the left table. It should be read as the latest-merge protocol, including the soundness and degree-of-freedom analysis for that final merge. +- The **Batch Merge Protocol** proves, in one proof, that a committed aggregate table is the concatenation of all accumulated subtables bound by a running commitment hash, with a zero-knowledge prefix prepended by the batch merge prover. + +The batch merge protocol therefore does not replace the latest merge proof. Batch merge establishes the accumulated table up to the batch-merge output: all subtables bound by the running hash, plus the ZK prefix. The latest merge protocol is then responsible for the final pairwise merge involving the hiding-kernel table. + +## Relation to the Merge Protocol + +The Merge Protocol proves only a current/latest merge step. Given commitments to two tables $L_j$ and $R_j$, it proves that the output commitment opens to + +$$ +M_j(X) = L_j(X) + X^\ell R_j(X) +$$ + +for each wire column $j$, where $\ell$ is the unshifted size of $L_j$. + +The Batch Merge Protocol proves a different statement. It receives a running hash that binds a sequence of subtable commitments and proves that the output aggregate commitment opens to the concatenation of every accumulated subtable, preceded by the batch-merge ZK prefix: + +$$ +F_j(X) = f_{0,j}(X) + \sum_{i=1}^{N} X^{k_i} f_{i,j}(X), +\qquad +k_i = s_0 + \sum_{m < i} s_m. +$$ + +Here: + +- $j \in \{1,2,3,4\}$ indexes the op-queue columns. +- $f_{0,j}$ is the ZK-prefix column. +- $f_{i,j}$ is the $j$-th column of the $i$-th accumulated subtable. +- $s_0$ is the fixed ZK-prefix size. +- $s_i$ is the claimed size bound for subtable $i$. + +Thus: + +- Merge Protocol: latest pairwise merge only. +- Batch Merge Protocol: batched merge of all accumulated subtables plus the ZK prefix. + +## Running Commitment Hash + +Each kernel updates a running hash of the op-queue commitments it observes. If the previous hash is $h_{i-1}$ and the next subtable commitments are + +$$ +T_i = ([f_{i,1}], [f_{i,2}], [f_{i,3}], [f_{i,4}]), +$$ + +then the next hash is + +$$ +h_i = \text{Poseidon2}(h_{i-1}, T_i). +$$ + +The final kernel receives the resulting hash and passes it to the batch merge verifier. The verifier recomputes the same hash chain from the commitments supplied in the batch merge proof and checks that the selected hash value equals the public input hash. In the implementation this is optimized by reusing transcript challenges: `Transcript::get_challenge("HASH_i")` updates the transcript and yields the hash-chain element. The verifier compares the lower 127 bits of the supplied hash, so the hash-binding collision probability is $2^{-127}$. + +## Protocol Statement + +Let $M$ be the maximum number of subtables supported by the verifier, and let $N \leq M$ be the actual number of accumulated subtables sent by the prover. The prover and verifier work over four columns, but it is useful to write the statement per column. + +Public input: + +- A binding hash $h$ for the accumulated subtable commitments. + +Prover data: + +- Subtable polynomials $f_{i,j}$ for $i = 1,\ldots,N$ and $j = 1,\ldots,4$. +- ZK-prefix polynomials $f_{0,j}$. +- Aggregate polynomials + $$ + F_j(X) = f_{0,j}(X) + \sum_{i=1}^{N} X^{k_i} f_{i,j}(X), + \qquad + k_i = s_0 + \sum_{mN$ are zero and do not affect the concatenation. + +## Implemented Protocol + +The implementation uses flattened table indices. Index $0$ is the ZK prefix, and indices $1,\ldots,M$ are the possible accumulated subtables. For each table index $i$ and column $j$, let $C_{i,j}$ denote the corresponding polynomial: + +$$ +C_{0,j} = f_{0,j}, \qquad C_{i,j} = f_{i,j} \text{ for } i \geq 1. +$$ + +The verifier uses size parameters + +$$ +\sigma_0 = \texttt{UltraEccOpsTable::ZK\_ULTRA\_OPS}, +\qquad +\sigma_i = +\begin{cases} +s_i & i \leq N,\\ +0 & i > N. +\end{cases} +$$ + +### Prover + +1. Commit to all real accumulated subtable columns $[C_{i,j}]$ for $i=1,\ldots,N$. +2. Send identity commitments for unused slots $i=N+1,\ldots,M$. +3. Create and commit to the ZK-prefix columns $[C_{0,j}]$ using `ECCOpQueue::construct_zk_columns()`. +4. Send $N$ and the subtable sizes $s_i$ for all $M$ possible subtable slots. +5. Construct and commit to the merged table columns $[F_j]$. +6. Derive batching challenges $1,\alpha,\alpha^2,\ldots$. +7. Construct the degree-check polynomial over the active slots + $$ + G(X) = \sum_{i=0}^{N}\sum_{j=1}^{4} + \alpha_{i,j}\, X^{\sigma_i-1} C_{i,j}(X^{-1}), + $$ + where the $i=0$ terms are the ZK-prefix columns. Unused columns are treated as zero by the verifier. +8. Derive an evaluation challenge $\kappa$. +9. Send evaluations $C_{i,j}(\kappa)$, $F_j(\kappa)$, and $G(\kappa^{-1})$. +10. Use Shplonk/KZG to prove all claimed openings. + +### Verifier + +The verifier recomputes the hash chain, receives commitments/evaluations, and performs three algebraic checks before reducing all openings to a single KZG pairing check. + +#### Concatenation Check + +For each column $j$, the verifier checks + +$$ +F_j(\kappa) = C_{0,j}(\kappa) + \kappa^{\sigma_0} C_{1,j}(\kappa) + \kappa^{\sigma_0+\sigma_1} C_{2,j}(\kappa) + \cdots + \kappa^{\sum_{m=0}^{M-1}\sigma_m} C_{M,j}(\kappa). +$$ + +In code this is evaluated with Horner's rule from the last table slot down to the ZK prefix. Since $\sigma_i=0$ and $C_{i,j}(\kappa)=0$ for unused slots, indices $i>N$ do not contribute. + +#### Degree Check + +For each committed column $C_{i,j}$, the reversed-polynomial identity gives + +$$ +\left(X^{\sigma_i-1} C_{i,j}(X^{-1})\right)(\kappa^{-1}) = \kappa^{1-\sigma_i} C_{i,j}(\kappa) +$$ + +The verifier checks the batched identity + +$$ +G(\kappa^{-1}) = \sum_{i=0}^{M}\sum_{j=1}^{4} \alpha_{i,j}\, \kappa^{1-\sigma_i} C_{i,j}(\kappa) +$$ + +This proves the degree bounds $\deg(C_{i,j}) < \sigma_i$ for all active table slots, except with the batching and Schwartz-Zippel failure probabilities. In particular, unused slots have $\sigma_i=0$ which means the right hand side has a term of the form +$$ +X^{-1} C_{i,j}(X^{-1}) +$$ +Unless $C_{i,j} = 0$ such a terms contributes negative powers of $X$, which means the right hand side is not a polynomial, while the left hand side is (because it was committed to). + + +#### Hash Consistency Check + +The verifier constructs an indicator array for the prover-supplied $N$ and selects the calculated hash after the $N$-th subtable. It then checks that this selected hash equals the public binding hash. + +The verifier also enforces $1 \leq N \leq M$. In recursive verification this is encoded by the product + +$$ +\prod_{i=1}^{M}(N-i)=0. +$$ + +## Adding ZK + +The batch merge output is part of the Goblin-facing accumulated operation table, so it must not reveal the real accumulated operations. Batch merge adds zero-knowledge by prepending a fixed-size ZK prefix $T_0$. + +This prefix is produced by `ECCOpQueue::construct_zk_columns()` and consists of: + +- one no-op; +- three random Ultra-only ops; +- one valid hiding op included in the ECCVM table. + +The prefix size is fixed: + +$$ +s_0 := \texttt{UltraEccOpsTable::ZK\_ULTRA\_OPS}. +$$ + +The prover therefore does not send $s_0$ as a variable size. The verifier uses the constant prefix size when computing concatenation offsets and degree-check powers. + +This prefix is the beginning-side ZK contribution. The hiding kernel later contributes the final random non-ops at the end of the table, and the latest Merge Protocol proves the corresponding final merge step. See `MERGE_PROTOCOL.md` for the latest-merge soundness and degree-of-freedom analysis. + +## Layout Notes + +The batch merge algebra above is written without duplicating the trace-layout discussion from `MERGE_PROTOCOL.md`. The implementation must still produce commitments with the layout expected downstream by the latest merge, Translator, and ECCVM checks. + +The important separation is: + +- Batch merge proves the hash-bound accumulated subtables plus the ZK prefix. +- Latest merge proves the final append/prepend relation for the current hiding-kernel table and performs the final layout alignment discussed in `MERGE_PROTOCOL.md`. + +## Soundness Considerations + +The prover controls several values: $N$, the subtable sizes $s_i$, the column commitments, the aggregate commitments, and the evaluations. The protocol constrains these as follows: + +- **Commitment binding:** KZG binds each sent commitment to a unique polynomial under the standard binding assumption. +- **Hash binding:** The public hash binds the active subtable commitments up to a $2^{-127}$ collision probability from the transcript-hash optimization. +- **Number of subtables:** The verifier enforces $1 \leq N \leq M$ and masks unused sizes with the indicator array. +- **Unused slots:** Slots $i>N$ are committed as identity and opened at zero. Their sizes are zeroed by the verifier. +- **Degree bounds:** The reversed-polynomial identity proves $\deg(C_{i,j}) < \sigma_i$ for each active slot. +- **Concatenation:** The random evaluation check proves that the aggregate commitments open to the concatenation determined by the same size parameters. +- **PCS openings:** Shplonk batches all openings, and KZG reduces the final claim to pairing points. + +The remaining failure probabilities are the usual Schwartz-Zippel probability for the random evaluation point, batching soundness for the degree-check challenge, Shplonk batching soundness, KZG binding, and the hash-chain collision probability described above. diff --git a/barretenberg/cpp/src/barretenberg/goblin/batch_merge.test.cpp b/barretenberg/cpp/src/barretenberg/goblin/batch_merge.test.cpp new file mode 100644 index 000000000000..28c9f9c115e7 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/goblin/batch_merge.test.cpp @@ -0,0 +1,616 @@ +// === AUDIT STATUS === +// internal: { status: not started, auditors: [], commit: } +// external_1: { status: not started, auditors: [], commit: } +// external_2: { status: not started, auditors: [], commit: } +// ===================== + +#include "barretenberg/boomerang_value_detection/graph.hpp" +#include "barretenberg/circuit_checker/circuit_checker.hpp" +#include "barretenberg/commitment_schemes/shplonk/shplonk.hpp" +#include "barretenberg/common/test.hpp" +#include "barretenberg/crypto/poseidon2/poseidon2.hpp" +#include "barretenberg/goblin/batch_merge_prover.hpp" +#include "barretenberg/goblin/batch_merge_verifier.hpp" +#include "barretenberg/op_queue/ecc_op_queue.hpp" +#include "barretenberg/srs/global_crs.hpp" +#include "barretenberg/stdlib/primitives/curves/bn254.hpp" +#include "barretenberg/stdlib/proof/proof.hpp" +#include "barretenberg/transcript/transcript.hpp" + +namespace bb { + +using NativeCurve = curve::BN254; +using NativeG1 = NativeCurve::AffineElement; + +static constexpr size_t NUM_WIRES = MegaExecutionTraceBlocks::NUM_WIRES; +static constexpr size_t NUM_FRS_COMM = NativeTranscript::Codec::template calc_num_fields(); + +template struct BuilderTypeHelper { + struct DummyBuilder {}; + using type = DummyBuilder; +}; + +template struct BuilderTypeHelper> { + using type = typename Curve::Builder; +}; + +enum class FaultMode : uint8_t { + NONE, + WRONG_MERGED_TABLE, // merged table commitment/evals/opening are self-consistent but table is wrong + BAD_DEGREE_CHECK_POLY, // degree-check commitment/eval/opening are self-consistent but polynomial is wrong + PADDING_NOT_INFINITY, // padded slot sends non-zero shift size and non-zero commitment/eval + SHIFT_SIZE_MINUS_ONE, // send k-1 as shift size for a subtable polynomial of size k + ZK_TABLE_DEGREE_TOO_HIGH, // zk table has degree above verifier hard-coded ZK shift + ZERO_SUBTABLES_CLAIM, // send 0 as number of subtables, + TOO_MANY_SUBTABLES, // send a number of subtables above the max that the verifier is configured for +}; + +void populate_subtable(const std::shared_ptr& op_queue, size_t num_ops) +{ + for (size_t i = 0; i < num_ops; ++i) { + op_queue->add_accumulate(NativeG1::random_element()); + op_queue->mul_accumulate(NativeG1::random_element(), bb::fr::random_element()); + op_queue->eq_and_reset(); + } +} + +std::shared_ptr make_op_queue_with_n_subtables(size_t n) +{ + const size_t max_op_queue_ops = 10; + auto op_queue = std::make_shared(); + for (size_t i = 0; i < n; ++i) { + if (i > 0) { + op_queue->initialize_new_subtable(); + } + populate_subtable(op_queue, ((1 + i) % max_op_queue_ops) + 1); // +1 to avoid empty subtables + op_queue->merge(); + } + return op_queue; +} + +/** + * Running hash over all MAX_SUBTABLES slots. + * Real subtables are in slots [0, ..., N-1]; padded slots [N, ..., MAX_SUBTABLES-1] + * are hashed as well (their commitments should be points at infinity). + */ +bb::fr compute_running_hash(const std::vector& proof, size_t N) +{ + std::vector round_inputs; + bb::fr previous_challenge(0); + bool is_first_challenge = true; + + for (size_t subtable_idx = 0; subtable_idx < N; ++subtable_idx) { + round_inputs.clear(); + if (!is_first_challenge) { + round_inputs.push_back(previous_challenge); + } + for (size_t col = 0; col < NUM_WIRES; ++col) { + const size_t global_col_idx = (subtable_idx * NUM_WIRES) + col; + const size_t base = (global_col_idx * NUM_FRS_COMM); + for (size_t j = 0; j < NUM_FRS_COMM; ++j) { + round_inputs.push_back(proof[base + j]); + } + } + + // Transcript logic: hash full round buffer, then split into two challenge parts; get_challenge uses part[0]. + const bb::fr full_hash = crypto::Poseidon2::hash(round_inputs); + previous_challenge = full_hash; + is_first_challenge = false; + } + + return previous_challenge; +} + +/** + * Local prover copy used only in tests, with controlled fault injection points. + * Important: faults are applied before data is sent to transcript, so Fiat–Shamir remains consistent. + */ +class TweakableBatchMergeProver : public BatchMergeProver { + using Curve = curve::BN254; + using FF = Curve::ScalarField; + using PCS = KZG; + using Polynomial = bb::Polynomial; + using OpeningClaim = ProverOpeningClaim; + using Transcript = NativeTranscript; + + public: + explicit TweakableBatchMergeProver(const std::shared_ptr& op_queue, + size_t max_subtables, + FaultMode mode = FaultMode::NONE) + : BatchMergeProver(op_queue, max_subtables) + , fault_mode(mode) + {} + + MergeProof construct_proof() + { + const size_t M = max_subtables; + + // Step 1 + std::vector> subtable_cols = op_queue->construct_subtable_columns(); + + size_t N = subtable_cols.size(); + + std::vector shift_sizes(N); + size_t max_shift_size = 0; + for (size_t i = 0; i < N; ++i) { + shift_sizes[i] = subtable_cols[i][0].size(); + max_shift_size = std::max(max_shift_size, shift_sizes[i]); + } + + // Step 2: commit subtable columns + Polynomial zero_poly(0); + for (size_t idx = 0; idx < N; ++idx) { + for (size_t col = 0; col < NUM_WIRES; ++col) { + const Polynomial& col_to_commit = + (fault_mode == FaultMode::ZERO_SUBTABLES_CLAIM) ? zero_poly : subtable_cols[idx][col]; + transcript->send_to_verifier("COLUMN_" + std::to_string(col) + "_" + std::to_string(idx), + pcs_commitment_key.commit(col_to_commit)); + } + [[maybe_unused]] FF _ = transcript->template get_challenge("HASH_" + std::to_string(idx)); + } + + Polynomial one_poly(1); + one_poly.at(0) = 1; + for (size_t idx = N; idx < M; ++idx) { + for (size_t col = 0; col < NUM_WIRES; ++col) { + const bool non_infinity_padding = + (fault_mode == FaultMode::PADDING_NOT_INFINITY && idx == N && col == 0); + transcript->send_to_verifier("COLUMN_" + std::to_string(col) + "_" + std::to_string(idx), + pcs_commitment_key.commit(non_infinity_padding ? one_poly : zero_poly)); + } + [[maybe_unused]] FF _ = transcript->template get_challenge("HASH_" + std::to_string(idx)); + } + + // Step 2.b: Send the masking table + std::array zk_columns = op_queue->construct_zk_columns(); + + if (fault_mode == FaultMode::ZK_TABLE_DEGREE_TOO_HIGH) { + for (size_t col = 0; col < NUM_WIRES; ++col) { + // Make zk column degree exceed verifier's hard-coded ZK shift (= ZK_ULTRA_OPS). + Polynomial larger_zk_col(zk_columns[col], zk_columns[col].size() + 1); + larger_zk_col.at(larger_zk_col.size() - 1) = FF(1); + zk_columns[col] = std::move(larger_zk_col); + } + } + + for (size_t col = 0; col < NUM_WIRES; ++col) { + transcript->send_to_verifier("ZK_COLUMN_" + std::to_string(col), + pcs_commitment_key.commit(zk_columns[col])); + } + max_shift_size = std::max(max_shift_size, zk_columns[0].size()); + + // Step 2.c: Flatten the columns for easier utilisation + std::vector flattened_cols; + flattened_cols.reserve((subtable_cols.size() * NUM_WIRES) + NUM_WIRES); + for (size_t col = 0; col < NUM_WIRES; ++col) { + flattened_cols.push_back(std::move(zk_columns[col])); + } + for (auto& subtable_col : subtable_cols) { + for (size_t col = 0; col < NUM_WIRES; ++col) { + if (fault_mode == FaultMode::ZERO_SUBTABLES_CLAIM) { + flattened_cols.push_back(Polynomial(1)); + } else { + flattened_cols.push_back(std::move(subtable_col[col])); + } + } + } + + // Step 3 + uint32_t sent_num_subtables = static_cast(N); + if (fault_mode == FaultMode::ZERO_SUBTABLES_CLAIM) { + sent_num_subtables = 0; + } + transcript->send_to_verifier("NUM_SUBTABLES", sent_num_subtables); + for (size_t i = 0; i < M; ++i) { + uint32_t sent_shift_size = static_cast(i < N ? shift_sizes[i] : 0); + if (fault_mode == FaultMode::PADDING_NOT_INFINITY && i == N && N < M) { + sent_shift_size = 1; + } + if (fault_mode == FaultMode::SHIFT_SIZE_MINUS_ONE && i == 0 && N > 0) { + BB_ASSERT_GT(shift_sizes[0], 0U); + sent_shift_size = static_cast(shift_sizes[0] - 1); + } + if (fault_mode == FaultMode::ZERO_SUBTABLES_CLAIM && i == N && N < M) { + sent_shift_size = 0; + } + transcript->send_to_verifier("SHIFT_SIZE_" + std::to_string(i), sent_shift_size); + } + + // Step 4: merged table + std::array merged_table(op_queue->construct_ultra_ops_table_columns()); + if (fault_mode == FaultMode::WRONG_MERGED_TABLE && !merged_table[0].is_empty()) { + merged_table[0].at(0) += FF(1); + } else if (fault_mode == FaultMode::ZERO_SUBTABLES_CLAIM) { + for (size_t col = 0; col < NUM_WIRES; ++col) { + merged_table[col] = Polynomial(1); + } + } + for (size_t col = 0; col < NUM_WIRES; ++col) { + transcript->send_to_verifier("MERGED_COLUMN_" + std::to_string(col), + pcs_commitment_key.commit(merged_table[col])); + } + + // Step 5 + const FF degree_check_challenge = transcript->template get_challenge("DEGREE_CHECK_CHALLENGE"); + const size_t num_degree_check_challenges = (M * NUM_WIRES) + NUM_WIRES; + std::vector degree_check_challenges = { FF(1), degree_check_challenge }; + for (size_t idx = 2; idx < num_degree_check_challenges; ++idx) { + degree_check_challenges.push_back(degree_check_challenges.back() * degree_check_challenge); + } + + // Step 6: degree-check poly + if (fault_mode == FaultMode::TOO_MANY_SUBTABLES) { + // This is the case in which we test that if the prover sends more columns than the max number of tables + // then the verifier rejects + size_t diff = flattened_cols.size() - num_degree_check_challenges; + for (size_t idx = 0; idx < diff * NUM_WIRES; ++idx) { + // Add challenges for the extra columns sent by the prover + degree_check_challenges.push_back(degree_check_challenges.back() * degree_check_challenge); + } + } + + Polynomial degree_check_poly = + compute_degree_check_polynomial(flattened_cols, degree_check_challenges, max_shift_size); + + if (fault_mode == FaultMode::TOO_MANY_SUBTABLES) { + // Remove the extra challenge added above to keep the degree check poly consistent with the rest of the + // proof + degree_check_challenges.pop_back(); + } + + if (fault_mode == FaultMode::BAD_DEGREE_CHECK_POLY && !degree_check_poly.is_empty()) { + degree_check_poly.at(0) += FF(1); + } + + transcript->send_to_verifier("DEGREE_CHECK_POLY", pcs_commitment_key.commit(degree_check_poly)); + + // Step 7 + const FF kappa = transcript->template get_challenge("KAPPA"); + const FF kappa_inv = kappa.invert(); + + // Step 8: evals + std::vector evals; + const size_t num_actual_flattened_cols = (N * NUM_WIRES) + NUM_WIRES; + const size_t num_flattened_col_evals = (M * NUM_WIRES) + NUM_WIRES; + for (size_t flat_idx = 0; flat_idx < num_flattened_col_evals; ++flat_idx) { + FF eval = FF(0); + if (flat_idx < num_actual_flattened_cols) { + eval = flattened_cols[flat_idx].evaluate(kappa); + } else if (fault_mode == FaultMode::PADDING_NOT_INFINITY && flat_idx == num_actual_flattened_cols) { + eval = FF(1); // matches one_poly commitment at the first padded slot + } + evals.push_back(eval); + transcript->send_to_verifier("C_EVAL_" + std::to_string(flat_idx), eval); + } + + for (size_t col = 0; col < NUM_WIRES; ++col) { + evals.push_back(merged_table[col].evaluate(kappa)); + transcript->send_to_verifier("MERGED_EVAL_" + std::to_string(col), evals.back()); + } + + evals.push_back(degree_check_poly.evaluate(kappa_inv)); + transcript->send_to_verifier("DEGREE_CHECK_EVAL", evals.back()); + + // Step 9 + const size_t num_opening_claims = ((M + 1) * NUM_WIRES) + 1 + NUM_WIRES; + std::vector opening_claims; + opening_claims.reserve(num_opening_claims); + + for (size_t idx = 0; idx < num_flattened_col_evals; ++idx) { + if (idx < num_actual_flattened_cols) { + opening_claims.push_back({ std::move(flattened_cols[idx]), { kappa, evals[idx] } }); + } else { + opening_claims.push_back({ Polynomial(1), { kappa, FF(0) } }); + } + } + + for (size_t idx = 0; idx < NUM_WIRES; ++idx) { + opening_claims.push_back( + { std::move(merged_table[idx]), { kappa, evals[(M * NUM_WIRES) + NUM_WIRES + idx] } }); + } + + opening_claims.push_back({ std::move(degree_check_poly), { kappa_inv, evals.back() } }); + + auto shplonk_opening_claim = ShplonkProver::prove(pcs_commitment_key, opening_claims, transcript); + + PCS::compute_opening_proof(pcs_commitment_key, shplonk_opening_claim, transcript); + return transcript->export_proof(); + } + + private: + FaultMode fault_mode; +}; + +// Custom parameter struct to hold both Curve type and NumSubtables value +template struct TestParam { + using CurveType = Curve; + static constexpr size_t NumSubtables = N; +}; + +// Specialize the fixture to extract both template parameters from TypeParam +template class BatchMergeTests : public testing::Test { + public: + using Curve = typename Param::CurveType; + static constexpr size_t NumSubtables = Param::NumSubtables; + using FF = typename Curve::ScalarField; + using Verifier = BatchMergeVerifier_; + using Proof = typename Verifier::Proof; + using Transcript = typename Verifier::Transcript; + static constexpr bool IsRecursive = Curve::is_stdlib_type; + using BuilderType = typename BuilderTypeHelper::type; + + static constexpr size_t VERIFIER_NUM_GATES = NumSubtables == 9 ? 6362 : 22624; + static constexpr size_t ZK_OFFSET = NumSubtables == 9 ? 666 : 520; + + struct VerifyResult { + bool reduction_ok; + bool pairing_ok; + bool circuit_ok; + }; + + static void SetUpTestSuite() { bb::srs::init_file_crs_factory(bb::srs::bb_crs_path()); } + + static Proof create_proof(BuilderType& builder, const std::vector& native_proof) + { + if constexpr (IsRecursive) { + stdlib::Proof stdlib_proof(builder, native_proof); + return stdlib_proof; + } else { + (void)builder; + return native_proof; + } + } + + static FF create_hash(BuilderType& builder, const bb::fr& native_hash) + { + if constexpr (IsRecursive) { + auto hash = FF::from_witness(&builder, native_hash); + hash.unset_free_witness_tag(); + return hash; + } else { + (void)builder; + return native_hash; + } + } + + static bool check_circuit(BuilderType& builder) + { + if constexpr (IsRecursive) { + return CircuitChecker::check(builder); + } else { + (void)builder; + return true; + } + } + + static VerifyResult prove_and_verify(const std::shared_ptr& op_queue, + FaultMode fault_mode = FaultMode::NONE, + bool wrong_hash = false, + bool check_manifest = false) + { + TranscriptManifest prover_manifest; + std::vector native_proof; + if (fault_mode == FaultMode::NONE) { + BatchMergeProver prover{ op_queue, NumSubtables }; + if (check_manifest) { + prover.transcript->enable_manifest(); + } + + native_proof = prover.construct_proof(); + if (check_manifest) { + prover_manifest = prover.transcript->get_manifest(); + } + } else { + TweakableBatchMergeProver prover{ op_queue, NumSubtables, fault_mode }; + if (check_manifest) { + prover.transcript->enable_manifest(); + } + + native_proof = prover.construct_proof(); + if (check_manifest) { + prover_manifest = prover.transcript->get_manifest(); + } + } + + bb::fr native_hash = compute_running_hash(native_proof, op_queue->num_subtables()); + if (wrong_hash) { + native_hash += bb::fr(1); + } + + BuilderType builder; + Proof proof = create_proof(builder, native_proof); + FF hash = create_hash(builder, native_hash); + + Verifier verifier; + if (check_manifest) { + verifier.transcript->enable_manifest(); + } + auto result = verifier.reduce_to_pairing_check(proof, hash); + + if (check_manifest) { + // Check consistency of manifests + auto verifier_manifest = verifier.transcript->get_manifest(); + EXPECT_EQ(prover_manifest.size(), verifier_manifest.size()); + for (size_t i = 0; i < prover_manifest.size(); ++i) { + EXPECT_EQ(prover_manifest[i], verifier_manifest[i]); + } + } + + if constexpr (Curve::is_stdlib_type) { + EXPECT_EQ(builder.get_num_finalized_gates_inefficient(), VERIFIER_NUM_GATES + ZK_OFFSET); + } + + return { result.reduction_succeeded, result.pairing_points.check(), check_circuit(builder) }; + } +}; + +using TestParams = ::testing::Types, + TestParam, + TestParam, 9>, + TestParam, CHONK_MAX_NUM_CIRCUITS>>; +TYPED_TEST_SUITE(BatchMergeTests, TestParams); + +// Completeness + +TYPED_TEST(BatchMergeTests, ValidProofPassesWithPadding) +{ + auto op_queue = make_op_queue_with_n_subtables(3); + auto res = TestFixture::prove_and_verify(op_queue, FaultMode::NONE, false, /*check_manifest*/ true); + EXPECT_TRUE(res.reduction_ok); + EXPECT_TRUE(res.pairing_ok); + EXPECT_TRUE(res.circuit_ok); +} + +TYPED_TEST(BatchMergeTests, ValidProofMaxSizePasses) +{ + auto op_queue = make_op_queue_with_n_subtables(TestFixture::NumSubtables); + auto res = TestFixture::prove_and_verify(op_queue); + EXPECT_TRUE(res.reduction_ok); + EXPECT_TRUE(res.pairing_ok); + EXPECT_TRUE(res.circuit_ok); +} + +// Soundness + +TYPED_TEST(BatchMergeTests, ZeroSubtablesFails) +{ + BB_DISABLE_ASSERTS(); + auto op_queue = make_op_queue_with_n_subtables(3); + auto res = TestFixture::prove_and_verify(op_queue, FaultMode::ZERO_SUBTABLES_CLAIM); + EXPECT_FALSE(res.reduction_ok); // Caught by product check + EXPECT_TRUE(res.pairing_ok); + if constexpr (TestFixture::IsRecursive) { + EXPECT_FALSE(res.circuit_ok); + } +} + +TYPED_TEST(BatchMergeTests, TooManySubtablesFails) +{ + if constexpr (!TestFixture::Curve::is_stdlib_type) { + GTEST_SKIP() << "This test in native setting fails due to a deserialization failure. The verifier path in the " + "same for native and recursive code, so it's enough to test the recursive code."; + } else { + BB_DISABLE_ASSERTS(); + auto op_queue = make_op_queue_with_n_subtables(TestFixture::NumSubtables + 1); + auto res = TestFixture::prove_and_verify(op_queue, FaultMode::TOO_MANY_SUBTABLES); + EXPECT_FALSE(res.reduction_ok); // Caught by product check + EXPECT_FALSE(res.pairing_ok); // Verifier uses fewer commitments than the one sent + if constexpr (TestFixture::IsRecursive) { + EXPECT_FALSE(res.circuit_ok); // Assertions fail + } + } +} + +TYPED_TEST(BatchMergeTests, WrongMergedTableFails) +{ + auto op_queue = make_op_queue_with_n_subtables(2); + auto res = TestFixture::prove_and_verify(op_queue, FaultMode::WRONG_MERGED_TABLE); + EXPECT_FALSE(res.reduction_ok); // Caught by the concatenation check + EXPECT_TRUE(res.pairing_ok); + if constexpr (TestFixture::IsRecursive) { + EXPECT_FALSE(res.circuit_ok); + } +} + +TYPED_TEST(BatchMergeTests, WrongHashFails) +{ + auto op_queue = make_op_queue_with_n_subtables(4); + auto res = TestFixture::prove_and_verify(op_queue, FaultMode::NONE, true); + EXPECT_FALSE(res.reduction_ok); // Caught by the hash check + EXPECT_TRUE(res.pairing_ok); + if constexpr (TestFixture::IsRecursive) { + EXPECT_FALSE(res.circuit_ok); + } +} + +TYPED_TEST(BatchMergeTests, BadSubtableDegreeCheckFails) +{ + auto op_queue = make_op_queue_with_n_subtables(6); + auto res = TestFixture::prove_and_verify(op_queue, FaultMode::BAD_DEGREE_CHECK_POLY); + EXPECT_FALSE(res.reduction_ok); // Caught by the degree check + EXPECT_TRUE(res.pairing_ok); + if constexpr (TestFixture::IsRecursive) { + EXPECT_FALSE(res.circuit_ok); + } +} + +TYPED_TEST(BatchMergeTests, PaddingTableNotInfinityFails) +{ + auto op_queue = make_op_queue_with_n_subtables(3); + auto res = TestFixture::prove_and_verify(op_queue, FaultMode::PADDING_NOT_INFINITY); + EXPECT_FALSE(res.reduction_ok); // Caught by the degree check: shift sizes are zeroed out >= N + EXPECT_TRUE(res.pairing_ok); // PCS is consistent + if constexpr (TestFixture::IsRecursive) { + EXPECT_FALSE(res.circuit_ok); // Caught by the degree check: shift sizes are zeroed out >= N + } +} + +TYPED_TEST(BatchMergeTests, ShiftSizeMinusOneFailsReductionOnly) +{ + auto op_queue = make_op_queue_with_n_subtables(7); + auto res = TestFixture::prove_and_verify(op_queue, FaultMode::SHIFT_SIZE_MINUS_ONE); + EXPECT_FALSE(res.reduction_ok); // Caught by the degree check + EXPECT_TRUE(res.pairing_ok); + if constexpr (TestFixture::IsRecursive) { + EXPECT_FALSE(res.circuit_ok); + } +} + +TYPED_TEST(BatchMergeTests, ZKTableDegreeTooHighFailsReductionOnly) +{ + auto op_queue = make_op_queue_with_n_subtables(5); + auto res = TestFixture::prove_and_verify(op_queue, FaultMode::ZK_TABLE_DEGREE_TOO_HIGH); + EXPECT_FALSE(res.reduction_ok); // Caught by degree/concatenation reductions via hard-coded ZK shift. + EXPECT_TRUE(res.pairing_ok); // PCS opening remains self-consistent with sent commitments/evals. + if constexpr (TestFixture::IsRecursive) { + EXPECT_FALSE(res.circuit_ok); + } +} + +// Static analysis of the recursive verifier circuit: every variable must belong to a single connected +// component (no disjoint subgraphs) and there must be no variables that participate in only one gate +// (i.e. no unconstrained witnesses). +TYPED_TEST(BatchMergeTests, GraphDescription) +{ + if constexpr (!TestFixture::IsRecursive) { + GTEST_SKIP() << "Graph description analysis only applies to stdlib (recursive) verifier circuits."; + } else { + using BuilderType = typename TestFixture::BuilderType; + using FF = typename TestFixture::FF; + using Proof = typename TestFixture::Proof; + using Verifier = typename TestFixture::Verifier; + + auto op_queue = make_op_queue_with_n_subtables(5); + BatchMergeProver prover{ op_queue, TestFixture::NumSubtables }; + auto native_proof = prover.construct_proof(); + const bb::fr native_hash = compute_running_hash(native_proof, op_queue->num_subtables()); + + BuilderType builder; + Proof proof = TestFixture::create_proof(builder, native_proof); + FF hash = TestFixture::create_hash(builder, native_hash); + // The hash is consumed only via split_challenge, which yields a low/high pair via a single arithmetic + // gate: hash = lo + 2^127 * hi. The verifier subsequently uses only the low half, so hash itself + // appears in only that one gate. Pin it so the StaticAnalyzer doesn't flag it as unconstrained. + hash.fix_witness(); + + Verifier verifier; + auto result = verifier.reduce_to_pairing_check(proof, hash); + + // The pairing points are public outputs from the recursive verifier that will be verified externally via a + // pairing check. Their output coordinates may not appear in multiple constraint gates; fix_witness() pins + // them so the StaticAnalyzer doesn't flag the coordinate limbs as unconstrained. + result.pairing_points.fix_witness(); + + builder.finalize_circuit(); + + using Analyzer = + std::conditional_t, cdg::MegaStaticAnalyzer, cdg::UltraStaticAnalyzer>; + auto graph = Analyzer(builder); + auto [cc, variables_in_one_gate] = graph.analyze_circuit(/*filter_cc=*/true); + + EXPECT_EQ(cc.size(), 1); + EXPECT_EQ(variables_in_one_gate.size(), 0); + } +} + +} // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/goblin/batch_merge_prover.cpp b/barretenberg/cpp/src/barretenberg/goblin/batch_merge_prover.cpp new file mode 100644 index 000000000000..7245b05b1990 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/goblin/batch_merge_prover.cpp @@ -0,0 +1,209 @@ +// === AUDIT STATUS === +// internal: { status: not started, auditors: [], commit: } +// external_1: { status: not started, auditors: [], commit: } +// external_2: { status: not started, auditors: [], commit: } +// ===================== + +#include "batch_merge_prover.hpp" + +#include "barretenberg/commitment_schemes/shplonk/shplonk.hpp" +#include + +namespace bb { + +BatchMergeProver::BatchMergeProver(const std::shared_ptr& op_queue, size_t max_subtables) + : transcript(std::make_shared()) + , op_queue(op_queue) + , max_subtables(max_subtables) +{ + // The commitment key must be large enough for the full merged table (plus the zk offset). + pcs_commitment_key = CommitmentKey(op_queue->get_ultra_ops_table_num_rows() + UltraEccOpsTable::ZK_ULTRA_OPS); +} + +typename BatchMergeProver::Polynomial BatchMergeProver::compute_degree_check_polynomial( + const std::vector& flattened_columns, + const std::vector& degree_check_challenges, + const size_t max_size) +{ + // Zero initialization + Polynomial reversed_batched_poly(max_size); + std::vector reversed_columns; + reversed_columns.reserve(flattened_columns.size()); + for (const auto& poly : flattened_columns) { + reversed_columns.emplace_back(poly.reverse()); + } + + std::vector> reversed_column_spans; + std::vector scalars; + reversed_column_spans.reserve(flattened_columns.size()); + scalars.reserve(flattened_columns.size()); + for (size_t idx = 0; idx < flattened_columns.size(); ++idx) { + reversed_column_spans.emplace_back(reversed_columns[idx]); + scalars.push_back(degree_check_challenges[idx]); + } + + add_scaled_batch(reversed_batched_poly, + std::span>(reversed_column_spans), + std::span(scalars)); + + return reversed_batched_poly; +} + +typename BatchMergeProver::MergeProof BatchMergeProver::construct_proof() +{ + BB_BENCH_NAME("BatchMergeProver::construct_proof"); + const size_t M = max_subtables; + + // ------------------------------------------------------------------------- + // Step 1: Gather subtable column polynomials and their shift sizes + // ------------------------------------------------------------------------- + std::vector> subtable_cols = op_queue->construct_subtable_columns(); + + size_t N = subtable_cols.size(); + BB_ASSERT_LTE(N, M, "BatchMergeProver: more subtables than max_subtables"); + + std::vector shift_sizes(N); + size_t max_shift_size = 0; + for (size_t i = 0; i < N; ++i) { + shift_sizes[i] = subtable_cols[i][0].size(); // number of rows per poly + max_shift_size = std::max(max_shift_size, shift_sizes[i]); + } + + // ------------------------------------------------------------------------- + // Step 2: Commit to columns to be merged + // ------------------------------------------------------------------------- + for (size_t idx = 0; idx < N; ++idx) { + for (size_t col = 0; col < NUM_WIRES; ++col) { + transcript->send_to_verifier("COLUMN_" + std::to_string(col) + "_" + std::to_string(idx), + pcs_commitment_key.commit(subtable_cols[idx][col])); + } + // update hash after each subtable to match verifier's transcript + [[maybe_unused]] FF _ = transcript->template get_challenge("HASH_" + std::to_string(idx)); + } + + Commitment infinity = Commitment::infinity(); + for (size_t idx = N; idx < M; ++idx) { + for (size_t col = 0; col < NUM_WIRES; ++col) { + transcript->send_to_verifier("COLUMN_" + std::to_string(col) + "_" + std::to_string(idx), infinity); + } + // update hash after each subtable to match verifier's transcript + [[maybe_unused]] FF _ = transcript->template get_challenge("HASH_" + std::to_string(idx)); + } + + // ------------------------------------------------------------------------- + // Step 2.b: Send the masking table + // ------------------------------------------------------------------------- + std::array zk_columns = op_queue->construct_zk_columns(); + for (size_t col = 0; col < NUM_WIRES; ++col) { + transcript->send_to_verifier("ZK_COLUMN_" + std::to_string(col), pcs_commitment_key.commit(zk_columns[col])); + } + max_shift_size = std::max(max_shift_size, zk_columns[0].size()); + + // ------------------------------------------------------------------------- + // Step 2.c: Flatten the columns for easier utilization + // ------------------------------------------------------------------------- + std::vector flattened_cols; + flattened_cols.reserve((subtable_cols.size() * NUM_WIRES) + NUM_WIRES); + for (size_t col = 0; col < NUM_WIRES; ++col) { + flattened_cols.push_back(std::move(zk_columns[col])); + } + for (auto& subtable_col : subtable_cols) { + for (size_t col = 0; col < NUM_WIRES; col++) { + flattened_cols.push_back(std::move(subtable_col[col])); + } + } + + // ------------------------------------------------------------------------- + // Step 3: Send N and shift sizes to the verifier + // ------------------------------------------------------------------------- + transcript->send_to_verifier("NUM_SUBTABLES", static_cast(N)); + for (size_t i = 0; i < M; ++i) { + transcript->send_to_verifier("SHIFT_SIZE_" + std::to_string(i), + static_cast(i < N ? shift_sizes[i] : 0)); + } + + // ------------------------------------------------------------------------- + // Step 4: Construct and commit to T (full merged table) + // ------------------------------------------------------------------------- + std::array merged_table(op_queue->construct_ultra_ops_table_columns()); + for (size_t col = 0; col < NUM_WIRES; ++col) { + transcript->send_to_verifier("MERGED_COLUMN_" + std::to_string(col), + pcs_commitment_key.commit(merged_table[col])); + } + + // ------------------------------------------------------------------------- + // Step 5: Compute degree check batching challenges 1, α, α^2, .., α^{(M + 1) * NUM_WIRES -1} + // ------------------------------------------------------------------------- + const FF degree_check_challenge = transcript->template get_challenge("DEGREE_CHECK_CHALLENGE"); + const size_t num_degree_check_challenges = (M + 1) * NUM_WIRES; + std::vector degree_check_challenges = { FF(1), degree_check_challenge }; + for (size_t idx = 2; idx < num_degree_check_challenges; idx++) { + degree_check_challenges.push_back(degree_check_challenges.back() * degree_check_challenge); + } + + // ------------------------------------------------------------------------- + // Step 6: Compute G = sum_i α_i * C_i(1 / X) * X^{shift_size_i - 1}, commit, send [G] + // ------------------------------------------------------------------------- + Polynomial degree_check_poly = + compute_degree_check_polynomial(flattened_cols, degree_check_challenges, max_shift_size); + transcript->send_to_verifier("DEGREE_CHECK_POLY", pcs_commitment_key.commit(degree_check_poly)); + + // ------------------------------------------------------------------------- + // Step 7: Evaluation challenge κ + // ------------------------------------------------------------------------- + const FF kappa = transcript->template get_challenge("KAPPA"); + const FF kappa_inv = kappa.invert(); + + // ------------------------------------------------------------------------- + // Step 8: Compute and send evaluations C_i(κ), T(κ), G(κ^{-1}) + // ------------------------------------------------------------------------- + // C_i_col(κ) + std::vector evals; + const size_t num_actual_flattened_cols = (N * NUM_WIRES) + NUM_WIRES; + const size_t num_flattened_col_evals = (M * NUM_WIRES) + NUM_WIRES; + for (size_t col = 0; col < num_flattened_col_evals; ++col) { + evals.push_back(col < num_actual_flattened_cols ? flattened_cols[col].evaluate(kappa) : FF(0)); + transcript->send_to_verifier("C_EVAL_" + std::to_string(col), evals.back()); + } + + // T_col(κ) + for (size_t col = 0; col < NUM_WIRES; ++col) { + evals.push_back(merged_table[col].evaluate(kappa)); + transcript->send_to_verifier("MERGED_EVAL_" + std::to_string(col), evals.back()); + } + + // G_col(κ^{-1}) + evals.push_back(degree_check_poly.evaluate(kappa_inv)); + transcript->send_to_verifier("DEGREE_CHECK_EVAL", evals.back()); + + // ------------------------------------------------------------------------- + // Step 9: Shplonk to open + // zk columns + // for C_i(κ) + // T(κ) + // for G(κ^{-1}) + // ------------------------------------------------------------------------- + const size_t num_opening_claims = ((M + 2) * NUM_WIRES) + 1; + std::vector opening_claims; + opening_claims.reserve(num_opening_claims); + for (size_t idx = 0; idx < num_flattened_col_evals; ++idx) { + if (idx >= num_actual_flattened_cols || flattened_cols[idx].size() == 0) { + // We use Polynomial(1) to avoid failures in Shplonk due to accessing empty polynomials + opening_claims.push_back({ Polynomial(1), { kappa, FF(0) } }); + } else { + opening_claims.push_back({ std::move(flattened_cols[idx]), { kappa, evals[idx] } }); + } + } + for (size_t idx = 0; idx < NUM_WIRES; ++idx) { + opening_claims.push_back({ std::move(merged_table[idx]), { kappa, evals[((M + 1) * NUM_WIRES) + idx] } }); + } + opening_claims.push_back({ std::move(degree_check_poly), { kappa_inv, evals.back() } }); + + auto shplonk_opening_claim = ShplonkProver::prove(pcs_commitment_key, opening_claims, transcript); + + PCS::compute_opening_proof(pcs_commitment_key, shplonk_opening_claim, transcript); + + return transcript->export_proof(); +} + +} // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/goblin/batch_merge_prover.hpp b/barretenberg/cpp/src/barretenberg/goblin/batch_merge_prover.hpp new file mode 100644 index 000000000000..5d4e2173331f --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/goblin/batch_merge_prover.hpp @@ -0,0 +1,74 @@ +// === AUDIT STATUS === +// internal: { status: not started, auditors: [], commit: } +// external_1: { status: not started, auditors: [], commit: } +// external_2: { status: not started, auditors: [], commit: } +// ===================== + +#pragma once + +#include "barretenberg/commitment_schemes/claim.hpp" +#include "barretenberg/commitment_schemes/shplonk/shplonk.hpp" +#include "barretenberg/flavor/mega_flavor.hpp" +#include "barretenberg/goblin/merge_prover.hpp" +#include "barretenberg/honk/proof_system/types/proof.hpp" +#include "barretenberg/op_queue/ecc_op_queue.hpp" +#include "barretenberg/transcript/transcript.hpp" + +namespace bb { + +/** + * @brief Batch merge prover + * + * @details This prover proves that the full merged table T is the correct concatenation of all N accumulated subtables + * C_0, ..., C_{N-1} (padded to MAX_SUBTABLES M). + */ +class BatchMergeProver { + protected: + using Curve = curve::BN254; + using FF = Curve::ScalarField; + using Commitment = Curve::AffineElement; + using Polynomial = bb::Polynomial; + using CommitmentKey = bb::CommitmentKey; + using PCS = KZG; + using OpeningClaim = ProverOpeningClaim; + using Transcript = NativeTranscript; + using ShplonkProver = ShplonkProver_; + + public: + using MergeProof = std::vector; + + static constexpr size_t NUM_WIRES = MegaExecutionTraceBlocks::NUM_WIRES; + + /** + * @param op_queue The ECC op queue containing all accumulated subtables (N subtables, in append order). + * @param transcript Shared prover transcript. + * @param max_subtables M: the fixed maximum number of subtables (CHONK_MAX_ACCUMULATION_STEPS). + */ + explicit BatchMergeProver(const std::shared_ptr& op_queue, size_t max_subtables); + + /** + * @brief Construct the batch merge proof. + * + * @details Proves that the full merged table T is the correct concatenation of all N subtables + * C_0, ..., C_{N-1} stored in the op_queue in append order (C_0 oldest, C_{N-1} most recently merged) together with + * an additional zero-knowledge commitment C_zk (prepended at the beginning). + * + */ + MergeProof construct_proof(); + + // Exposed for test access + CommitmentKey pcs_commitment_key; + + // Public for testing purposes + std::shared_ptr transcript; + + protected: + std::shared_ptr op_queue; + size_t max_subtables; // M + + static Polynomial compute_degree_check_polynomial(const std::vector& flattened_columns, + const std::vector& degree_check_challenges, + const size_t max_size); +}; + +} // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/goblin/batch_merge_verifier.cpp b/barretenberg/cpp/src/barretenberg/goblin/batch_merge_verifier.cpp new file mode 100644 index 000000000000..1d4d14092a9a --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/goblin/batch_merge_verifier.cpp @@ -0,0 +1,409 @@ +// === AUDIT STATUS === +// internal: { status: not started, auditors: [], commit: } +// external_1: { status: not started, auditors: [], commit: } +// external_2: { status: not started, auditors: [], commit: } +// ===================== + +#include "batch_merge_verifier.hpp" +#include "barretenberg/commitment_schemes/shplonk/shplonk.hpp" +#include "barretenberg/stdlib/primitives/curves/bn254.hpp" +#include "barretenberg/stdlib/proof/proof.hpp" + +namespace bb { + +template +typename BatchMergeVerifier_::ReductionResult BatchMergeVerifier_:: + reduce_to_pairing_check(const Proof& proof, const FF hash) +{ + BB_BENCH_NAME("BatchMergeVerifier::reduce_to_pairing_check"); + + transcript->load_proof(proof); + + // Get the lowest 127 bits of the hash + // We compare the calculated hashes against this value so that we can reuse the transcript hash calculations + // A collision happens with probability 2^{-127} + const FF binding_hash = std::get<0>(Transcript::Codec::split_challenge(hash)); + + // ------------------------------------------------------------------------- + // Step 1: Receive commitments to columns to be merged + // ------------------------------------------------------------------------- + std::vector> subtable_cols(MAX_MERGE_SIZE, std::vector(NUM_WIRES)); + std::vector calculated_hashes; + for (size_t idx = 0; idx < MAX_MERGE_SIZE; ++idx) { + for (size_t col = 0; col < NUM_WIRES; ++col) { + subtable_cols[idx][col] = transcript->template receive_from_prover( + "COLUMN_" + std::to_string(col) + "_" + std::to_string(idx)); + } + calculated_hashes.push_back(transcript->template get_challenge("HASH_" + std::to_string(idx))); + } + + // ------------------------------------------------------------------------- + // Step 1.b: Receive commitments to the masking table + // ------------------------------------------------------------------------- + std::array zk_columns; + for (size_t col = 0; col < NUM_WIRES; ++col) { + zk_columns[col] = transcript->template receive_from_prover("ZK_COLUMN_" + std::to_string(col)); + } + + // ------------------------------------------------------------------------- + // Step 1.c: Flatten the columns for easier utilization + // ------------------------------------------------------------------------- + std::vector flattened_cols; + flattened_cols.reserve(NUM_EVALS_FROM_COLUMNS); + for (size_t col = 0; col < NUM_WIRES; ++col) { + flattened_cols.push_back(std::move(zk_columns[col])); + } + for (auto& subtable_col : subtable_cols) { + for (size_t col = 0; col < NUM_WIRES; col++) { + flattened_cols.push_back(std::move(subtable_col[col])); + } + } + + // ------------------------------------------------------------------------- + // Step 2: Receive N and shift sizes from the proof + // ------------------------------------------------------------------------- + const FF N = transcript->template receive_from_prover("NUM_SUBTABLES"); + + // ------------------------------------------------------------------------- + // Step 2.a: Enforce 1 <= N <= MAX_MERGE_SIZE + // ------------------------------------------------------------------------- + FF running_product = FF(1); + for (size_t idx = 0; idx < MAX_MERGE_SIZE; idx++) { + running_product *= (N - FF(idx + 1)); + } + + bool is_valid_num_subtables = true; + if constexpr (IsRecursive) { + is_valid_num_subtables = running_product.get_value().is_zero(); + running_product.assert_equal(FF(0)); + } else { + is_valid_num_subtables = running_product.is_zero(); + } + + std::vector shift_sizes; + shift_sizes.reserve(NUM_COLUMN_TABLES); + shift_sizes.push_back(FF(UltraEccOpsTable::ZK_ULTRA_OPS)); + // Array s.t. indicator_array[i] = (i < N) + std::vector indicator_array = compute_indicator_array(N); + + for (size_t i = 0; i < MAX_MERGE_SIZE; ++i) { + size_t idx = 1 + i; + shift_sizes.push_back(transcript->template receive_from_prover("SHIFT_SIZE_" + std::to_string(i))); + shift_sizes[idx] = shift_sizes[idx] * indicator_array[i]; // zero out shift sizes for unused subtables + } + + // ------------------------------------------------------------------------- + // Step 3: Receive [T] commitments from proof + // ------------------------------------------------------------------------- + TableCommitments merged_commitments; + for (size_t col = 0; col < NUM_WIRES; ++col) { + merged_commitments[col] = + transcript->template receive_from_prover("MERGED_COLUMN_" + std::to_string(col)); + } + + // ------------------------------------------------------------------------- + // Step 4: Compute degree check challenges 1, α, α^2, .., α^{(M + 1) * NUM_WIRES-1} + // ------------------------------------------------------------------------- + std::vector degree_check_challenges; + degree_check_challenges.reserve(NUM_EVALS_FROM_COLUMNS); + const FF degree_check_challenge = transcript->template get_challenge("DEGREE_CHECK_CHALLENGE"); + degree_check_challenges = { FF(1), degree_check_challenge }; + for (size_t idx = 2; idx < NUM_EVALS_FROM_COLUMNS; idx++) { + degree_check_challenges.push_back(degree_check_challenges.back() * degree_check_challenge); + } + + // ------------------------------------------------------------------------- + // Step 5: Receive [G] commitments from proof + // ------------------------------------------------------------------------- + Commitment degree_check_commitment = transcript->template receive_from_prover("DEGREE_CHECK_POLY"); + + // ------------------------------------------------------------------------- + // Step 6: Compute evaluation challenge κ, powers of kappa and their inverses + // ------------------------------------------------------------------------- + const FF kappa = transcript->template get_challenge("KAPPA"); + const FF kappa_inv = kappa.invert(); + + std::vector powers_of_kappa; + powers_of_kappa.reserve(shift_sizes.size()); + for (const FF& shift_size : shift_sizes) { + if constexpr (IsRecursive) { + // Shift sizes are at most 2^CONST_OP_QUEUE_LOG_SIZE so the implicit range constraint enforced by pow is + // always satisfied + powers_of_kappa.push_back(kappa.template pow(shift_size)); + } else { + BB_ASSERT_LT( + static_cast(shift_size), 1UL << (CONST_OP_QUEUE_LOG_SIZE + 1), "Shift size is too large"); + powers_of_kappa.push_back(kappa.pow(shift_size)); + } + } + + std::vector powers_of_kappa_inv; + powers_of_kappa_inv.reserve(powers_of_kappa.size()); + if constexpr (IsRecursive) { + for (const FF& kappa_pow : powers_of_kappa) { + powers_of_kappa_inv.push_back(kappa_pow.invert()); + } + } else { + powers_of_kappa_inv = powers_of_kappa; + FF::batch_invert(powers_of_kappa_inv); + } + + // ------------------------------------------------------------------------- + // Step 7: Receive evaluations + // ------------------------------------------------------------------------- + // C_i_col(κ) + std::vector evals; + evals.reserve(NUM_EVALS); + for (size_t i = 0; i < NUM_EVALS_FROM_COLUMNS; ++i) { + const FF received_eval = transcript->template receive_from_prover("C_EVAL_" + std::to_string(i)); + evals.push_back(received_eval); + } + + // T_col(κ) + for (size_t col = 0; col < NUM_WIRES; ++col) { + evals.push_back(transcript->template receive_from_prover("MERGED_EVAL_" + std::to_string(col))); + } + + // G_col(κ^{-1}) + evals.push_back(transcript->template receive_from_prover("DEGREE_CHECK_EVAL")); + + // ------------------------------------------------------------------------- + // Step 9: Verify concatenation identity, degree identity, and hash consistency + // ------------------------------------------------------------------------- + + std::vector origin_tags; + if constexpr (IsRecursive) { + // To prevent an OriginTag false positive, we re-tag the powers of kappa with the round + // provenance of evals + for (FF& kappa_pow : powers_of_kappa) { + origin_tags.push_back(kappa_pow.get_origin_tag()); + kappa_pow.set_origin_tag(evals[0].get_origin_tag()); + } + for (FF& kappa_pow : powers_of_kappa_inv) { + kappa_pow.set_origin_tag(evals[0].get_origin_tag()); + } + } + + const bool concatenation_verified = check_concatenation_identity(evals, powers_of_kappa); + const bool degree_check_verified = + check_degree_identity(evals, powers_of_kappa_inv, kappa, degree_check_challenges); + const bool hash_verified = check_hash_consistency(binding_hash, calculated_hashes, indicator_array); + + // Reset origin tags + if constexpr (IsRecursive) { + for (auto [kappa_pow, origin_tag] : zip_view(powers_of_kappa, origin_tags)) { + kappa_pow.set_origin_tag(origin_tag); + } + for (auto [kappa_pow, origin_tag] : zip_view(powers_of_kappa_inv, origin_tags)) { + kappa_pow.set_origin_tag(origin_tag); + } + } + + // ------------------------------------------------------------------------- + // Run Shplonk and reduce to KZG pairing check + // ------------------------------------------------------------------------- + std::vector> opening_claims; + opening_claims.reserve(NUM_OPENING_CLAIMS); + for (size_t idx = 0; idx < NUM_EVALS_FROM_COLUMNS; ++idx) { + opening_claims.push_back(OpeningClaim{ { kappa, evals[idx] }, flattened_cols[idx] }); + } + for (size_t idx = 0; idx < NUM_WIRES; ++idx) { + opening_claims.push_back( + OpeningClaim{ { kappa, evals[NUM_EVALS_FROM_COLUMNS + idx] }, merged_commitments[idx] }); + } + opening_claims.push_back(OpeningClaim{ { kappa_inv, evals.back() }, degree_check_commitment }); + + ShplonkVerifier shplonk_verifier = ShplonkVerifier::reduce_verification_no_finalize(opening_claims, transcript); + + Commitment g1_identity; + if constexpr (IsRecursive) { + g1_identity = Commitment::one(kappa.get_context()); + } else { + g1_identity = Commitment::one(); + } + BatchOpeningClaim batch_claim = shplonk_verifier.export_batch_opening_claim(g1_identity); + + BB_ASSERT(batch_claim.commitments.size() == MERGE_BATCHED_CLAIM_SIZE); + BB_ASSERT(batch_claim.scalars.size() == MERGE_BATCHED_CLAIM_SIZE); + + PairingPoints pairing_points = PCS::reduce_verify_batch_opening_claim(std::move(batch_claim), transcript); + + vinfo("BatchMergeVerifier: concatenation check passed: ", concatenation_verified ? "true" : "false"); + vinfo("BatchMergeVerifier: degree check passed: ", degree_check_verified ? "true" : "false"); + vinfo("BatchMergeVerifier: hash check passed: ", hash_verified ? "true" : "false"); + vinfo("BatchMergeVerifier: is N in [1, MAX_MERGE_SIZE]: ", is_valid_num_subtables ? "true" : "false"); + + return { pairing_points, + merged_commitments, + degree_check_verified && concatenation_verified && hash_verified && is_valid_num_subtables }; +} + +template +std::vector::FF> BatchMergeVerifier_:: + compute_indicator_array(const FF& N) const +{ + // Array s.t. indicator_array[i] = (i < N) + std::vector indicator_array; + if constexpr (IsRecursive) { + BB_ASSERT_GT(N.get_value(), 0U); + + // Create the array + // Note that N is automatically range constrainted because we assert that 1 <= N <= MAX_MERGE_SIZE + for (size_t idx = 0; idx < MAX_MERGE_SIZE; idx++) { + const FF idx_wit = FF(idx); + indicator_array.push_back(idx_wit.template ranged_less_than(N)); + } + } else { + BB_ASSERT_GT(static_cast(N), 0U); + for (size_t idx = 0; idx < MAX_MERGE_SIZE; idx++) { + indicator_array.push_back(idx < static_cast(N) ? FF(1) : FF(0)); + } + } + + return indicator_array; +} + +template +std::vector::FF> BatchMergeVerifier_:: + compute_dirac_array(const std::vector& indicator_array) const +{ + // Shift to the left the indicator array (i < N) to get shifted_indicator_array[i] = (i < N - 1) + std::vector shifted_indicator_array; + shifted_indicator_array.reserve(MAX_MERGE_SIZE); + for (size_t i = 0; i < MAX_MERGE_SIZE - 1; ++i) { + shifted_indicator_array.push_back(indicator_array[i + 1]); + } + shifted_indicator_array.push_back(FF(0)); + + // Construct array s.t. dirac_array[i] = (i == N - 1) + std::vector dirac_array; + dirac_array.reserve(MAX_MERGE_SIZE); + for (size_t i = 0; i < MAX_MERGE_SIZE; ++i) { + dirac_array.push_back(indicator_array[i] - shifted_indicator_array[i]); + } + + return dirac_array; +} + +template +bool BatchMergeVerifier_::check_concatenation_identity( + std::vector& evals, const std::vector& pow_kappa_subtable_size) const +{ + bool concatenation_verified = true; + for (size_t j = 0; j < NUM_WIRES; ++j) { + FF concatenation_diff = evals[((NUM_COLUMN_TABLES - 1) * NUM_WIRES) + j]; + // Horner: i from N-1 down to 0 — accum ← accum · κ^{size_i} + T_{i,j}(κ). + for (size_t i_rev = 1; i_rev < NUM_COLUMN_TABLES; ++i_rev) { + const size_t i = NUM_COLUMN_TABLES - 1 - i_rev; + concatenation_diff *= pow_kappa_subtable_size[i]; + concatenation_diff += evals[(i * NUM_WIRES) + j]; + } + concatenation_diff -= evals[NUM_EVALS_FROM_COLUMNS + j]; + + if constexpr (IsRecursive) { + concatenation_verified &= concatenation_diff.get_value() == 0; + concatenation_diff.assert_equal(FF(0), + "assert_equal: merge concatenation identity failed in Merge Verifier"); + } else { + concatenation_verified &= concatenation_diff == 0; + } + } + return concatenation_verified; +} + +template +bool BatchMergeVerifier_::check_degree_identity( + std::vector& evals, + const std::vector& powers_of_kappa_inv, + const FF& kappa, + const std::vector& degree_check_challenges) const +{ + FF degree_check_diff(0); + for (size_t i = 0; i < powers_of_kappa_inv.size(); ++i) { + for (size_t j = 0; j < NUM_WIRES; ++j) { + degree_check_diff += + degree_check_challenges[(i * NUM_WIRES) + j] * powers_of_kappa_inv[i] * evals[(i * NUM_WIRES) + j]; + } + } + degree_check_diff *= kappa; + degree_check_diff -= evals.back(); + + bool degree_check_verified = true; + if constexpr (IsRecursive) { + degree_check_verified &= degree_check_diff.get_value() == 0; + degree_check_diff.assert_equal(FF(0), "assert_equal: merge degree identity failed in Merge Verifier"); + } else { + degree_check_verified &= degree_check_diff == 0; + } + + return degree_check_verified; +} + +template +typename BatchMergeVerifier_::FF BatchMergeVerifier_::ecc_op_hash_step( + const std::vector& col_commitments, const std::optional& prev_hash) +{ + std::vector hash_inputs; + if (prev_hash.has_value()) { + if constexpr (IsRecursive) { + const FF& h = prev_hash.value(); + h.set_origin_tag(OriginTag::constant()); + hash_inputs.push_back(h); + } else { + hash_inputs.push_back(prev_hash.value()); + } + } + for (const auto& com : col_commitments) { + auto com_serialized = Transcript::Codec::serialize_to_fields(com); + if constexpr (IsRecursive) { + for (auto& el : com_serialized) { + el.set_origin_tag(OriginTag::constant()); + } + } + hash_inputs.insert(hash_inputs.end(), com_serialized.begin(), com_serialized.end()); + } + if constexpr (IsRecursive) { + FF hash_result = stdlib::poseidon2::hash(hash_inputs); + hash_result.unset_free_witness_tag(); + hash_result.set_origin_tag(OriginTag::constant()); + return hash_result; + } else { + return crypto::Poseidon2::hash(hash_inputs); + } +} + +template +bool BatchMergeVerifier_::check_hash_consistency(const FF& hash, + const std::vector& calculated_hashes, + const std::vector& indicator_array) const +{ + // Construct array s.t. dirac_array[i] = (i == N - 1) + std::vector dirac_array = compute_dirac_array(indicator_array); + + // Compute element-wise product of extended_hash and dirac_array + FF expected_hash = dirac_array[0] * calculated_hashes[0]; + for (size_t i = 1; i < MAX_MERGE_SIZE; ++i) { + expected_hash += calculated_hashes[i] * dirac_array[i]; + } + + FF hash_diff = expected_hash - hash; + bool verified = true; + if constexpr (IsRecursive) { + verified = hash_diff.get_value() == 0; + hash_diff.assert_equal(FF(0), "BatchMergeVerifier: column commitments hash mismatch"); + } else { + verified = hash_diff == FF(0); + } + + return verified; +} + +// Explicit template instantiations +template class BatchMergeVerifier_; +template class BatchMergeVerifier_, CHONK_MAX_NUM_CIRCUITS>; + +// For testing +template class BatchMergeVerifier_; +template class BatchMergeVerifier_, 9>; + +} // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/goblin/batch_merge_verifier.hpp b/barretenberg/cpp/src/barretenberg/goblin/batch_merge_verifier.hpp new file mode 100644 index 000000000000..129691b5f9f6 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/goblin/batch_merge_verifier.hpp @@ -0,0 +1,138 @@ +// === AUDIT STATUS === +// internal: { status: not started, auditors: [], commit: } +// external_1: { status: not started, auditors: [], commit: } +// external_2: { status: not started, auditors: [], commit: } +// ===================== + +#pragma once + +#include "barretenberg/commitment_schemes/claim.hpp" +#include "barretenberg/commitment_schemes/kzg/kzg.hpp" +#include "barretenberg/commitment_schemes/shplonk/shplonk.hpp" +#include "barretenberg/flavor/mega_flavor.hpp" +#include "barretenberg/honk/proof_system/types/proof.hpp" +#include "barretenberg/srs/global_crs.hpp" +#include "barretenberg/transcript/transcript.hpp" + +namespace bb { + +/** + * @brief Unified batch verifier for the batch Goblin ECC op queue merge protocol. + * @details Works for both native verification and recursive (in-circuit) verification. + * + * @tparam Curve The curve type (native curve::BN254 or stdlib bn254) + * @tparam MaxMergeSize The maximum number of subtables that can be merged + */ +template class BatchMergeVerifier_ { + public: + using FF = typename Curve::ScalarField; + using Commitment = typename Curve::AffineElement; + using PCS = bb::KZG; + using PairingPoints = + std::conditional_t, bb::PairingPoints>; + using Proof = std::vector; + using Transcript = TranscriptFor_t; + using ShplonkVerifier = ShplonkVerifier_; + + static constexpr size_t NUM_WIRES = MegaExecutionTraceBlocks::NUM_WIRES; + static constexpr size_t MAX_MERGE_SIZE = MaxMergeSize; + static constexpr size_t LOG_MAX_MERGE_SIZE = static_cast(numeric::get_msb(MAX_MERGE_SIZE)); + static constexpr bool IsRecursive = Curve::is_stdlib_type; + static constexpr size_t NUM_COLUMN_TABLES = MAX_MERGE_SIZE + 1; // ZK table + subtables + static constexpr size_t NUM_EVALS_FROM_COLUMNS = NUM_COLUMN_TABLES * NUM_WIRES; + static constexpr size_t NUM_EVALS = + ((MAX_MERGE_SIZE + 2) * NUM_WIRES) + 1; // ZK table, subtables, merged tables, degree check poly + static constexpr size_t NUM_OPENING_CLAIMS = NUM_EVALS; + static constexpr size_t MERGE_BATCHED_CLAIM_SIZE = NUM_OPENING_CLAIMS + 2; // Add Shplonk quotient + identity + + using TableCommitments = std::array; + + /** + * @brief Result of batch merge verification. + */ + struct ReductionResult { + PairingPoints pairing_points; + TableCommitments merged_commitments; // [T_0]..[T_{NUM_WIRES-1}] + bool reduction_succeeded = false; + }; + + // Public for testing purposes + std::shared_ptr transcript; + + explicit BatchMergeVerifier_() + : transcript(std::make_shared()) + {} + + /** + * @brief Reduce the batch merge proof to a pairing check. + * + * + * @param proof Batch merge proof. + * @param hash Running hash of the column commitments [C_0]..[C_{M-1}] + * @return ReductionResult with pairing points and merged table commitments. + */ + [[nodiscard("Verification result should be checked")]] ReductionResult reduce_to_pairing_check(const Proof& proof, + const FF hash); + + /** + * @brief Compute one step of the ECC op running hash + * @details Returns Poseidon2([prev_hash, serialize_to_fields(col_commitments)]). + */ + static FF ecc_op_hash_step(const std::vector& col_commitments, + const std::optional& prev_hash = std::nullopt); + + private: + /** + * @brief Compute array of length M := MaxMergeSize s.t. indicator_array[i] = (i < N). + * + * @param N + * @return std::vector + */ + std::vector compute_indicator_array(const FF& N) const; + + /** + * @brief Compute array of length M := MaxMergeSize s.t. dirac_array[i] = (i == N - 1) + * + * @param indicator_array + * @return std::vector + */ + std::vector compute_dirac_array(const std::vector& indicator_array) const; + + /** + * @brief Verify the concatenation identity T(κ) = Σ_i C_i(κ) · κ^{offset_i} for every column. + * @details offset_i = Σ_{j& evals, const std::vector& pow_kappa_subtable_size) const; + + /** + * @brief Verify the degree identity + * G(κ⁻¹) = Σ_{i,col} α_{i,col} · C_i_col(κ) · κ^{1 − shift_sizes[j]} + * @details This is a single combined check across all subtables and columns. + */ + bool check_degree_identity(std::vector& evals, + const std::vector& powers_of_kappa_inv, + const FF& kappa, + const std::vector& degree_check_challenges) const; + + /** + * @brief Verify that the column commitments in the proof match the running hash from accumulation. + * @details This ensures that the commitments provided in the proof are consistent with the expected + * running hash computed during the accumulation process. + */ + bool check_hash_consistency(const FF& hash, + const std::vector& calculated_hashes, + const std::vector& indicator_array) const; +}; + +// Type aliases for convenience +using BatchMergeVerifier = BatchMergeVerifier_; + +namespace stdlib::recursion::goblin { +template +using BatchMergeRecursiveVerifier = BatchMergeVerifier_, CHONK_MAX_NUM_CIRCUITS>; +} // namespace stdlib::recursion::goblin + +} // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/goblin/goblin.cpp b/barretenberg/cpp/src/barretenberg/goblin/goblin.cpp index bda0d945258e..e6d966bac0c5 100644 --- a/barretenberg/cpp/src/barretenberg/goblin/goblin.cpp +++ b/barretenberg/cpp/src/barretenberg/goblin/goblin.cpp @@ -25,11 +25,11 @@ Goblin::Goblin(const std::shared_ptr& transcript) : transcript(transcript) {} -void Goblin::prove_merge(const std::shared_ptr& transcript, const MergeSettings merge_settings) +Goblin::MergeProof Goblin::prove_merge(const std::shared_ptr& transcript) const { BB_BENCH_NAME("Goblin::prove_merge"); - MergeProver merge_prover{ op_queue, transcript, merge_settings }; - merge_verification_queue.push_back(merge_prover.construct_proof()); + MergeProver merge_prover{ op_queue, transcript }; + return merge_prover.construct_proof(); } void Goblin::prove_eccvm() @@ -70,14 +70,9 @@ GoblinProof Goblin::prove() { BB_BENCH_NAME("Goblin::prove"); - prove_merge(transcript, MergeSettings::APPEND); // Use shared transcript for merge proving + goblin_proof.merge_proof = prove_merge(transcript); // Use shared transcript for merge proving info("Goblin: num ultra ops = ", op_queue->get_ultra_ops_count()); - BB_ASSERT_EQ(merge_verification_queue.size(), - 1U, - "Goblin::prove: merge_verification_queue should contain only a single proof at this stage."); - goblin_proof.merge_proof = merge_verification_queue.back(); - vinfo("prove eccvm..."); prove_eccvm(); vinfo("finished eccvm proving."); @@ -88,29 +83,45 @@ GoblinProof Goblin::prove() } /** - * @brief Recursively verify the next merge proof in the queue. - * @details Merge proofs are verified in FIFO order to match the circuit accumulation order. - * Each kernel verifies the merge proof from its corresponding app circuit. Since circuits - * are accumulated in sequence (e.g., App₀ → Kernel₀ → App₁ → Kernel₁ → ..., though - * in practice there can be repeated kernels such as inner → reset), the merge proofs must be - * verified in the same order to maintain consistency of the op queue commitments. + * @brief Generate proof of the batch merge + * + * @details During Chonk, we accumulate all the ecc ops into subtables. After having accumulated the tail circuit, we + * generate a proof of the batch merge: we take the tables T_1, .., T_N (where T_N is the table of ecc ops coming from + * the tail circuit) and we generate a proof that T_zk || T_1 || .. || T_N = T, where T_zk is a table generated on the + * fly by the prover to make the merged table T zero-knowledge. The consistency between the commitments sent by the + * prover in the batch merge and the ones generated during Chonk accumulation is enforced via a hash check: each kernel + * updates a running hash using the commitments to the ecc op tables of the circuits it folds. The final hash is passed + * to the batch merge verifier, which uses it to enforce the consistency between the data sent by the prover and the one + * used during accumulation. + * */ -std::pair Goblin::recursively_verify_merge( - MegaBuilder& builder, - const RecursiveMergeCommitments& merge_commitments, - const std::shared_ptr& transcript, - const MergeSettings merge_settings) +void Goblin::prove_batch_merge() { - BB_ASSERT(!merge_verification_queue.empty()); - const MergeProof& merge_proof = merge_verification_queue.front(); - const stdlib::Proof stdlib_merge_proof(builder, merge_proof); + BB_BENCH_NAME("Goblin::prove_batch_merge"); + BatchMergeProver prover{ op_queue, CHONK_MAX_NUM_CIRCUITS }; + batch_merge_proof = prover.construct_proof(); +} - MergeRecursiveVerifier merge_verifier{ merge_settings, transcript }; - auto merge_result = merge_verifier.reduce_to_pairing_check(stdlib_merge_proof, merge_commitments); +/** + * @brief Recursively verify the batch merge proof + * + * @param builder + * @param hash Hash computed by the kernels during Chonk accumulation + * + * @details The hash commits to the data used during accumulation and is used by the batch merge verifier to enforce + * consistency between the data sent by the prover and the one used during accumulation. + * @return std::pair + */ +std::pair Goblin::recursively_verify_batch_merge( + MegaBuilder& builder, const BatchMergeRecursiveVerifier::FF& hash) const +{ + BB_ASSERT(!batch_merge_proof.empty(), "Goblin::recursively_verify_batch_merge: no batch merge proof available"); + const stdlib::Proof stdlib_proof(builder, batch_merge_proof); - merge_verification_queue.pop_front(); // remove the processed proof from the queue + BatchMergeRecursiveVerifier verifier; + auto result = verifier.reduce_to_pairing_check(stdlib_proof, hash); - return { merge_result.pairing_points, merge_result.merged_commitments }; + return { result.pairing_points, result.merged_commitments }; } } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/goblin/goblin.hpp b/barretenberg/cpp/src/barretenberg/goblin/goblin.hpp index 543e78266051..fed3a6e869e9 100644 --- a/barretenberg/cpp/src/barretenberg/goblin/goblin.hpp +++ b/barretenberg/cpp/src/barretenberg/goblin/goblin.hpp @@ -9,6 +9,8 @@ #include "barretenberg/eccvm/eccvm_flavor.hpp" #include "barretenberg/eccvm/eccvm_prover.hpp" #include "barretenberg/flavor/mega_flavor.hpp" +#include "barretenberg/goblin/batch_merge_prover.hpp" +#include "barretenberg/goblin/batch_merge_verifier.hpp" #include "barretenberg/goblin/merge_prover.hpp" #include "barretenberg/goblin/merge_verifier.hpp" #include "barretenberg/goblin/types.hpp" @@ -38,12 +40,15 @@ class Goblin { using ECCVMProvingKey = ECCVMFlavor::ProvingKey; using TranslatorBuilder = TranslatorCircuitBuilder; using MergeProof = MergeProver::MergeProof; + using BatchMergeProof = BatchMergeProver::MergeProof; using ECCVMVerificationKey = ECCVMFlavor::VerificationKey; using TranslatorVerificationKey = TranslatorFlavor::VerificationKey; using MergeRecursiveVerifier = stdlib::recursion::goblin::MergeRecursiveVerifier; + using BatchMergeRecursiveVerifier = stdlib::recursion::goblin::BatchMergeRecursiveVerifier; using PairingPoints = MergeRecursiveVerifier::PairingPoints; using TableCommitments = MergeVerifier::TableCommitments; using RecursiveTableCommitments = MergeRecursiveVerifier::TableCommitments; + using BatchRecursiveTableCommitments = BatchMergeRecursiveVerifier::TableCommitments; using MergeCommitments = MergeVerifier::InputCommitments; using RecursiveMergeCommitments = MergeRecursiveVerifier::InputCommitments; using RecursiveCommitment = MergeRecursiveVerifier::Commitment; @@ -59,7 +64,7 @@ class Goblin { fq evaluation_challenge_x; // challenge for evaluating the translation polynomials std::shared_ptr transcript; // shared between ECCVM and Translator - std::deque merge_verification_queue; // queue of merge proofs to be verified + BatchMergeProof batch_merge_proof; // delayed batch merge proof for Chonk struct VerificationKey { std::shared_ptr eccvm_verification_key = std::make_shared(); @@ -70,13 +75,11 @@ class Goblin { Goblin(const std::shared_ptr& transcript = std::make_shared()); /** - * @brief Construct a merge proof for the goblin ECC ops in the provided circuit; append the proof to the - * merge_verification_queue. - * - * @param transcript + * @brief Construct a single-step merge proof for the most recently merged subtable. + * @details In the Chonk flow this is invoked only for the final fixed-location append of the hiding kernel + * subtable; multi-subtable merges are handled by prove_batch_merge(). */ - void prove_merge(const std::shared_ptr& transcript = std::make_shared(), - const MergeSettings merge_settings = MergeSettings::PREPEND); + MergeProof prove_merge(const std::shared_ptr& transcript = std::make_shared()) const; /** * @brief Construct an ECCVM proof and IPA opening proof. @@ -99,20 +102,34 @@ class Goblin { GoblinProof prove(); /** - * @brief Recursively verify the next merge proof in the merge verification queue. - * @details Proofs are verified in a FIFO manner + * @brief Recursively verify the most recent single-step merge proof. + * @details In Chonk this is invoked once per IVC, recursively verifying the hiding kernel's fixed-location + * append against the prior aggregate table. * * @param builder The circuit in which the recursive verification will be performed. - * @param inputs_commitments The commitment used by the Merge verifier + * @param inputs_commitments The commitments used by the Merge verifier (subtable + prior aggregate) * @param transcript The transcript to be passed to the MergeRecursiveVerifier. - * @param merge_settings How the most recent ecc op subtable is going to be merged into the table of ecc ops * @return Pair of PairingPoints and commitments to the merged tables as read from the proof by the Merge verifier */ std::pair recursively_verify_merge( MegaBuilder& builder, const RecursiveMergeCommitments& merge_commitments, - const std::shared_ptr& transcript, - const MergeSettings merge_settings = MergeSettings::PREPEND); + const std::shared_ptr& transcript); + + /** + * @brief Construct a batched merge proof for all subtables accumulated during the IVC. + * @details Proves in a single shot that the full merged table is the correct concatenation of all per-circuit + * subtables. Run once at the end of the IVC. + */ + void prove_batch_merge(); + + /** + * @brief Recursively verify the batched merge proof inside the hiding kernel. + * @details `hash` is the running ECC-op hash chained over all per-circuit subtable commitments observed + * during accumulation; the in-circuit verifier checks the proof's column commitments against it. + */ + std::pair recursively_verify_batch_merge( + MegaBuilder& builder, const BatchMergeRecursiveVerifier::FF& hash) const; }; } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/goblin/goblin_verifier.cpp b/barretenberg/cpp/src/barretenberg/goblin/goblin_verifier.cpp index 9386174b39b8..50c01d4776df 100644 --- a/barretenberg/cpp/src/barretenberg/goblin/goblin_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/goblin/goblin_verifier.cpp @@ -20,7 +20,7 @@ typename GoblinVerifier_::ReductionResult GoblinVerifier_::reduce_ { BB_BENCH_NAME("GoblinVerifier::reduce"); // Step 1: Verify the merge proof - MergeVerifier merge_verifier{ merge_settings, transcript }; + MergeVerifier merge_verifier{ transcript }; auto merge_result = merge_verifier.reduce_to_pairing_check(proof.merge_proof, merge_commitments); vinfo("Goblin: Merge reduced to pairing check successfully: ", merge_result.reduction_succeeded ? "true" : "false"); diff --git a/barretenberg/cpp/src/barretenberg/goblin/goblin_verifier.hpp b/barretenberg/cpp/src/barretenberg/goblin/goblin_verifier.hpp index d48ef3690f7c..96390bb89d02 100644 --- a/barretenberg/cpp/src/barretenberg/goblin/goblin_verifier.hpp +++ b/barretenberg/cpp/src/barretenberg/goblin/goblin_verifier.hpp @@ -71,16 +71,13 @@ template class GoblinVerifier_ { * @param transcript Shared transcript for Fiat-Shamir * @param proof The complete Goblin proof containing Merge, ECCVM, IPA, and Translator proofs * @param merge_commitments The input commitments for the Merge verifier (t and T_prev tables) - * @param merge_settings How the ecc op subtable was merged (PREPEND or APPEND) */ GoblinVerifier_(std::shared_ptr transcript, const GoblinProof& proof, - const MergeCommitments& merge_commitments, - MergeSettings merge_settings) + const MergeCommitments& merge_commitments) : transcript(std::move(transcript)) , proof(proof) , merge_commitments(merge_commitments) - , merge_settings(merge_settings) {} /** @@ -105,7 +102,6 @@ template class GoblinVerifier_ { std::shared_ptr transcript; GoblinProof proof; MergeCommitments merge_commitments; - MergeSettings merge_settings; }; // Type aliases for convenience diff --git a/barretenberg/cpp/src/barretenberg/goblin/goblin_verifier.test.cpp b/barretenberg/cpp/src/barretenberg/goblin/goblin_verifier.test.cpp index 993df8f96547..4e6be6894799 100644 --- a/barretenberg/cpp/src/barretenberg/goblin/goblin_verifier.test.cpp +++ b/barretenberg/cpp/src/barretenberg/goblin/goblin_verifier.test.cpp @@ -64,14 +64,16 @@ class GoblinRecursiveVerifierTests : public testing::Test { Goblin goblin; GoblinMockCircuits::construct_and_merge_mock_circuits(goblin, num_circuits); + goblin.op_queue->construct_zk_columns(); // Merge the ecc ops from the newly constructed circuit auto goblin_proof = goblin.prove(); // Subtable values and commitments - needed for (Recursive)MergeVerifier MergeCommitments merge_commitments; auto t_current = goblin.op_queue->construct_current_ultra_ops_subtable_columns(); - auto T_prev = goblin.op_queue->construct_previous_ultra_ops_table_columns(); - CommitmentKey pcs_commitment_key(goblin.op_queue->get_ultra_ops_table_num_rows()); + auto T_prev = goblin.op_queue->construct_table_columns_up_to_tail(); + CommitmentKey pcs_commitment_key(goblin.op_queue->get_ultra_ops_table_num_rows() + + UltraEccOpsTable::ZK_ULTRA_OPS); for (size_t idx = 0; idx < MegaFlavor::NUM_WIRES; idx++) { merge_commitments.t_commitments[idx] = pcs_commitment_key.commit(t_current[idx]); merge_commitments.T_prev_commitments[idx] = pcs_commitment_key.commit(T_prev[idx]); @@ -105,7 +107,7 @@ TEST_F(GoblinRecursiveVerifierTests, NativeVerification) auto [proof, merge_commitments, _] = create_goblin_prover_output(); auto transcript = std::make_shared(); - bb::GoblinVerifier verifier(transcript, proof, merge_commitments, MergeSettings::APPEND); + bb::GoblinVerifier verifier(transcript, proof, merge_commitments); auto result = verifier.reduce_to_pairing_check_and_ipa_opening(); // Check pairing points (aggregate merge + translator) @@ -132,9 +134,7 @@ TEST_F(GoblinRecursiveVerifierTests, Basic) auto transcript = std::make_shared(); GoblinStdlibProof stdlib_proof(builder, proof); - bb::GoblinRecursiveVerifier verifier{ - transcript, stdlib_proof, recursive_merge_commitments, MergeSettings::APPEND - }; + bb::GoblinRecursiveVerifier verifier{ transcript, stdlib_proof, recursive_merge_commitments }; auto output = verifier.reduce_to_pairing_check_and_ipa_opening(); // Aggregate merge + translator pairing points @@ -181,9 +181,7 @@ TEST_F(GoblinRecursiveVerifierTests, IndependentVKHash) auto transcript = std::make_shared(); GoblinStdlibProof stdlib_proof(builder, proof); - bb::GoblinRecursiveVerifier verifier{ - transcript, stdlib_proof, recursive_merge_commitments, MergeSettings::APPEND - }; + bb::GoblinRecursiveVerifier verifier{ transcript, stdlib_proof, recursive_merge_commitments }; auto output = verifier.reduce_to_pairing_check_and_ipa_opening(); // Aggregate merge + translator pairing points @@ -241,9 +239,7 @@ TEST_F(GoblinRecursiveVerifierTests, MergeToTranslatorBindingFailure) auto transcript = std::make_shared(); GoblinStdlibProof stdlib_proof(builder, proof); - bb::GoblinRecursiveVerifier verifier{ - transcript, stdlib_proof, recursive_merge_commitments, MergeSettings::APPEND - }; + bb::GoblinRecursiveVerifier verifier{ transcript, stdlib_proof, recursive_merge_commitments }; auto goblin_rec_verifier_output = verifier.reduce_to_pairing_check_and_ipa_opening(); // Aggregate merge + translator pairing points @@ -277,9 +273,7 @@ TEST_F(GoblinRecursiveVerifierTests, ECCVMToTranslatorBindingFailure) auto transcript = std::make_shared(); GoblinStdlibProof stdlib_proof(builder, proof); - bb::GoblinRecursiveVerifier verifier{ - transcript, stdlib_proof, recursive_merge_commitments, MergeSettings::APPEND - }; + bb::GoblinRecursiveVerifier verifier{ transcript, stdlib_proof, recursive_merge_commitments }; [[maybe_unused]] auto goblin_rec_verifier_output = verifier.reduce_to_pairing_check_and_ipa_opening(); EXPECT_FALSE(CircuitChecker::check(builder)); diff --git a/barretenberg/cpp/src/barretenberg/goblin/merge.test.cpp b/barretenberg/cpp/src/barretenberg/goblin/merge.test.cpp index 6877a6043b80..a68d3fb3adaf 100644 --- a/barretenberg/cpp/src/barretenberg/goblin/merge.test.cpp +++ b/barretenberg/cpp/src/barretenberg/goblin/merge.test.cpp @@ -50,6 +50,25 @@ template class MergeTests : public testing::Test { enum class TamperProofMode : uint8_t { None, Shift, MCommitment, LEval }; + static std::shared_ptr construct_final_merge_op_queue(const size_t num_subtables_up_to_tail = 1) + { + using InnerFlavor = MegaFlavor; + using InnerBuilder = typename InnerFlavor::CircuitBuilder; + + auto op_queue = std::make_shared(); + for (size_t idx = 0; idx < num_subtables_up_to_tail; ++idx) { + InnerBuilder circuit{ op_queue }; + GoblinMockCircuits::construct_simple_circuit(circuit); + op_queue->merge(); + } + + op_queue->construct_zk_columns(); + + InnerBuilder hiding_circuit{ op_queue }; + GoblinMockCircuits::construct_simple_circuit(hiding_circuit); + return op_queue; + } + /** * @brief Convert a stdlib type to its native value * @details In native context, returns value as-is; in recursive context, extracts the native value @@ -151,19 +170,18 @@ template class MergeTests : public testing::Test { * @details Creates a merge proof, optionally tampers with it, then verifies in the appropriate context */ static void prove_and_verify_merge(const std::shared_ptr& op_queue, - const MergeSettings settings = MergeSettings::PREPEND, const TamperProofMode tampering_mode = TamperProofMode::None, const bool expected = true) { // Create native merge proof auto prover_transcript = std::make_shared(); - MergeProver merge_prover{ op_queue, prover_transcript, settings }; + MergeProver merge_prover{ op_queue, prover_transcript }; auto native_proof = merge_prover.construct_proof(); tamper_with_proof(native_proof, tampering_mode); // Construct shifted column polynomials matching the circuit's ecc_op_wire layout auto t_current = op_queue->construct_current_ultra_ops_subtable_columns(); - auto T_prev = op_queue->construct_previous_ultra_ops_table_columns(); + auto T_prev = op_queue->construct_table_columns_up_to_tail(); std::array native_t_commitments; std::array native_T_prev_commitments; @@ -191,7 +209,7 @@ template class MergeTests : public testing::Test { // Verify the proof auto transcript = std::make_shared(); - MergeVerifierType verifier{ settings, transcript }; + MergeVerifierType verifier{ transcript }; auto result = verifier.reduce_to_pairing_check(proof, input_commitments); // Perform pairing check and verify @@ -220,15 +238,11 @@ template class MergeTests : public testing::Test { */ static void test_merge_proof_size() { - using InnerFlavor = MegaFlavor; - using InnerBuilder = typename InnerFlavor::CircuitBuilder; - - InnerBuilder builder; - GoblinMockCircuits::construct_simple_circuit(builder); + auto op_queue = construct_final_merge_op_queue(); // Construct a merge proof and ensure its size matches expectation auto transcript = std::make_shared(); - MergeProver merge_prover{ builder.op_queue, transcript }; + MergeProver merge_prover{ op_queue, transcript }; auto merge_proof = merge_prover.construct_proof(); EXPECT_EQ(merge_proof.size(), MERGE_PROOF_SIZE); @@ -239,111 +253,48 @@ template class MergeTests : public testing::Test { */ static void test_single_merge() { - using InnerFlavor = MegaFlavor; - using InnerBuilder = typename InnerFlavor::CircuitBuilder; - - auto op_queue = std::make_shared(); - InnerBuilder circuit{ op_queue }; - GoblinMockCircuits::construct_simple_circuit(circuit); + auto op_queue = construct_final_merge_op_queue(); prove_and_verify_merge(op_queue); } /** - * @brief Test multiple merge proofs with prepend mode + * @brief Test a final merge proof with multiple historical subtables up to the tail. */ - static void test_multiple_merges_prepend() + static void test_multiple_merges() { - using InnerFlavor = MegaFlavor; - using InnerBuilder = typename InnerFlavor::CircuitBuilder; - - auto op_queue = std::make_shared(); - - // First circuit - InnerBuilder circuit1{ op_queue }; - GoblinMockCircuits::construct_simple_circuit(circuit1); - prove_and_verify_merge(op_queue); - - // Second circuit - InnerBuilder circuit2{ op_queue }; - GoblinMockCircuits::construct_simple_circuit(circuit2); - prove_and_verify_merge(op_queue); - - // Third circuit - InnerBuilder circuit3{ op_queue }; - GoblinMockCircuits::construct_simple_circuit(circuit3); + auto op_queue = construct_final_merge_op_queue(/*num_subtables_up_to_tail=*/3); prove_and_verify_merge(op_queue); } - /** - * @brief Test merge proof with append mode - */ - static void test_merge_prepend_then_append() - { - using InnerFlavor = MegaFlavor; - using InnerBuilder = typename InnerFlavor::CircuitBuilder; - - auto op_queue = std::make_shared(); - - // First circuit with prepend - InnerBuilder circuit1{ op_queue }; - GoblinMockCircuits::construct_simple_circuit(circuit1); - prove_and_verify_merge(op_queue); - - // Second circuit with prepend - InnerBuilder circuit2{ op_queue }; - GoblinMockCircuits::construct_simple_circuit(circuit2); - prove_and_verify_merge(op_queue); - - // Third circuit with append - InnerBuilder circuit3{ op_queue }; - GoblinMockCircuits::construct_simple_circuit(circuit3); - prove_and_verify_merge(op_queue, MergeSettings::APPEND); - } - /** * @brief Test failure when degree(l) > shift_size (as read from the proof) */ - static void test_degree_check_failure(const MergeSettings settings = MergeSettings::PREPEND) + static void test_degree_check_failure() { - using InnerFlavor = MegaFlavor; - using InnerBuilder = typename InnerFlavor::CircuitBuilder; - - auto op_queue = std::make_shared(); - InnerBuilder circuit{ op_queue }; - GoblinMockCircuits::construct_simple_circuit(circuit); + auto op_queue = construct_final_merge_op_queue(); - prove_and_verify_merge(op_queue, settings, TamperProofMode::Shift, false); + prove_and_verify_merge(op_queue, TamperProofMode::Shift, false); } /** * @brief Test failure when m ≠ l + X^k r */ - static void test_merge_failure(const MergeSettings settings = MergeSettings::PREPEND) + static void test_merge_failure() { - using InnerFlavor = MegaFlavor; - using InnerBuilder = typename InnerFlavor::CircuitBuilder; - - auto op_queue = std::make_shared(); - InnerBuilder circuit{ op_queue }; - GoblinMockCircuits::construct_simple_circuit(circuit); + auto op_queue = construct_final_merge_op_queue(); - prove_and_verify_merge(op_queue, settings, TamperProofMode::MCommitment, false); + prove_and_verify_merge(op_queue, TamperProofMode::MCommitment, false); } /** * @brief Test failure when g_j(kappa) ≠ kappa^{k-1} * l_j(1/kappa) */ - static void test_eval_failure(const MergeSettings settings = MergeSettings::PREPEND) + static void test_eval_failure() { - using InnerFlavor = MegaFlavor; - using InnerBuilder = typename InnerFlavor::CircuitBuilder; - - auto op_queue = std::make_shared(); - InnerBuilder circuit{ op_queue }; - GoblinMockCircuits::construct_simple_circuit(circuit); + auto op_queue = construct_final_merge_op_queue(); - prove_and_verify_merge(op_queue, settings, TamperProofMode::LEval, false); + prove_and_verify_merge(op_queue, TamperProofMode::LEval, false); } /** @@ -372,7 +323,8 @@ template class MergeTests : public testing::Test { op_queue->merge(); // Right table = the merged previous table; Left table = empty - std::array right_table = op_queue->construct_ultra_ops_table_columns(); + std::array right_table = + op_queue->construct_ultra_ops_table_columns(/*include_zk_ops=*/false); std::array left_table; std::array merged_table; for (size_t idx = 0; idx < NUM_WIRES; idx++) { @@ -474,16 +426,15 @@ template class MergeTests : public testing::Test { auto native_proof = prover_transcript->export_proof(); - // === Verify with unmodified verifier (which has BB_ASSERT_GT for shift_size > 0) === - // Build input commitments: t = empty (zero commits), T_prev = right table + // Build input commitments: T_prev = empty (zero commits), t = right table InputCommitments input_commitments; for (size_t idx = 0; idx < NUM_WIRES; idx++) { - input_commitments.t_commitments[idx] = ck.commit(left_table[idx]); - input_commitments.T_prev_commitments[idx] = ck.commit(right_table[idx]); + input_commitments.t_commitments[idx] = ck.commit(right_table[idx]); + input_commitments.T_prev_commitments[idx] = ck.commit(left_table[idx]); } auto verifier_transcript = std::make_shared(); - MergeVerifierType verifier{ MergeSettings::PREPEND, verifier_transcript }; + MergeVerifierType verifier{ verifier_transcript }; auto result = verifier.reduce_to_pairing_check(native_proof, input_commitments); bool pairing_verified = result.pairing_points.check(); @@ -510,44 +461,24 @@ TYPED_TEST(MergeTests, SingleMerge) TestFixture::test_single_merge(); } -TYPED_TEST(MergeTests, MultipleMergesPrepend) -{ - TestFixture::test_multiple_merges_prepend(); -} - -TYPED_TEST(MergeTests, MergePrependThenAppend) -{ - TestFixture::test_merge_prepend_then_append(); -} - -TYPED_TEST(MergeTests, DegreeCheckFailurePrepend) +TYPED_TEST(MergeTests, MultipleMerges) { - TestFixture::test_degree_check_failure(MergeSettings::PREPEND); + TestFixture::test_multiple_merges(); } -TYPED_TEST(MergeTests, DegreeCheckFailureAppend) +TYPED_TEST(MergeTests, DegreeCheckFailure) { - TestFixture::test_degree_check_failure(MergeSettings::APPEND); + TestFixture::test_degree_check_failure(); } -TYPED_TEST(MergeTests, MergeFailurePrepend) +TYPED_TEST(MergeTests, MergeFailure) { - TestFixture::test_merge_failure(MergeSettings::PREPEND); + TestFixture::test_merge_failure(); } -TYPED_TEST(MergeTests, MergeFailureAppend) +TYPED_TEST(MergeTests, EvalFailure) { - TestFixture::test_merge_failure(MergeSettings::APPEND); -} - -TYPED_TEST(MergeTests, EvalFailurePrepend) -{ - TestFixture::test_eval_failure(MergeSettings::PREPEND); -} - -TYPED_TEST(MergeTests, EvalFailureAppend) -{ - TestFixture::test_eval_failure(MergeSettings::APPEND); + TestFixture::test_eval_failure(); } TYPED_TEST(MergeTests, HonestEmptyLeftTable) @@ -577,31 +508,25 @@ TYPED_TEST(MergeTests, DifferentTranscriptOriginTagFailure) using BuilderType = typename TestFixture::BuilderType; using MergeVerifierType = typename TestFixture::MergeVerifierType; using Transcript = typename TestFixture::Transcript; - using InnerFlavor = MegaFlavor; - using InnerBuilder = typename InnerFlavor::CircuitBuilder; constexpr size_t NUM_WIRES = TestFixture::NUM_WIRES; // Create single builder for both verifiers (realistic - both in same circuit) BuilderType builder; // === Generate two separate merge proofs (simulating two independent merge operations) === - auto op_queue_1 = std::make_shared(); - InnerBuilder circuit_1{ op_queue_1 }; - GoblinMockCircuits::construct_simple_circuit(circuit_1); + auto op_queue_1 = TestFixture::construct_final_merge_op_queue(); auto prover_transcript_1 = std::make_shared(); MergeProver prover_1{ op_queue_1, prover_transcript_1 }; auto proof_1 = prover_1.construct_proof(); - auto op_queue_2 = std::make_shared(); - InnerBuilder circuit_2{ op_queue_2 }; - GoblinMockCircuits::construct_simple_circuit(circuit_2); + auto op_queue_2 = TestFixture::construct_final_merge_op_queue(); auto prover_transcript_2 = std::make_shared(); MergeProver prover_2{ op_queue_2, prover_transcript_2 }; auto proof_2 = prover_2.construct_proof(); // Get native commitments for proof 1 (shifted to match circuit ecc_op_wire layout) auto t_1 = op_queue_1->construct_current_ultra_ops_subtable_columns(); - auto T_prev_1 = op_queue_1->construct_previous_ultra_ops_table_columns(); + auto T_prev_1 = op_queue_1->construct_table_columns_up_to_tail(); std::array native_t_commitments_1; std::array native_T_prev_commitments_1; for (size_t idx = 0; idx < NUM_WIRES; idx++) { @@ -611,7 +536,7 @@ TYPED_TEST(MergeTests, DifferentTranscriptOriginTagFailure) // === Create first verifier with its own transcript instance === auto transcript_1 = std::make_shared(); - [[maybe_unused]] MergeVerifierType verifier_1{ MergeSettings::PREPEND, transcript_1 }; + [[maybe_unused]] MergeVerifierType verifier_1{ transcript_1 }; [[maybe_unused]] auto proof_1_recursive = TestFixture::create_proof(builder, proof_1); @@ -627,7 +552,7 @@ TYPED_TEST(MergeTests, DifferentTranscriptOriginTagFailure) // === Create second verifier with a DIFFERENT transcript instance === // This simulates having two independent merge verifiers in the same circuit auto transcript_2 = std::make_shared(); - MergeVerifierType verifier_2{ MergeSettings::PREPEND, transcript_2 }; + MergeVerifierType verifier_2{ transcript_2 }; auto proof_2_recursive = TestFixture::create_proof(builder, proof_2); @@ -738,13 +663,7 @@ class MergeTranscriptTests : public ::testing::Test { */ TEST_F(MergeTranscriptTests, ProverManifestConsistency) { - using InnerFlavor = MegaFlavor; - using InnerBuilder = typename InnerFlavor::CircuitBuilder; - - // Construct a simple circuit to generate merge proof - auto op_queue = std::make_shared(); - InnerBuilder circuit{ op_queue }; - GoblinMockCircuits::construct_simple_circuit(circuit); + auto op_queue = MergeTests::construct_final_merge_op_queue(); // Construct merge proof with manifest enabled auto transcript = std::make_shared(); @@ -770,13 +689,7 @@ TEST_F(MergeTranscriptTests, ProverManifestConsistency) */ TEST_F(MergeTranscriptTests, VerifierManifestConsistency) { - using InnerFlavor = MegaFlavor; - using InnerBuilder = typename InnerFlavor::CircuitBuilder; - - // Construct a simple circuit - auto op_queue = std::make_shared(); - InnerBuilder circuit{ op_queue }; - GoblinMockCircuits::construct_simple_circuit(circuit); + auto op_queue = MergeTests::construct_final_merge_op_queue(); // Generate merge proof with prover manifest enabled auto prover_transcript = std::make_shared(); @@ -787,7 +700,7 @@ TEST_F(MergeTranscriptTests, VerifierManifestConsistency) // Construct commitments for verifier (shifted to match circuit ecc_op_wire layout) MergeVerifier::InputCommitments merge_commitments; auto t_current = op_queue->construct_current_ultra_ops_subtable_columns(); - auto T_prev = op_queue->construct_previous_ultra_ops_table_columns(); + auto T_prev = op_queue->construct_table_columns_up_to_tail(); for (size_t idx = 0; idx < MegaFlavor::NUM_WIRES; idx++) { merge_commitments.t_commitments[idx] = merge_prover.pcs_commitment_key.commit(t_current[idx]); merge_commitments.T_prev_commitments[idx] = merge_prover.pcs_commitment_key.commit(T_prev[idx]); @@ -796,7 +709,7 @@ TEST_F(MergeTranscriptTests, VerifierManifestConsistency) // Verify proof with verifier manifest enabled auto verifier_transcript = std::make_shared(); verifier_transcript->enable_manifest(); - MergeVerifier merge_verifier{ MergeSettings::PREPEND, verifier_transcript }; + MergeVerifier merge_verifier{ verifier_transcript }; auto result = merge_verifier.reduce_to_pairing_check(merge_proof, merge_commitments); // Verification should succeed diff --git a/barretenberg/cpp/src/barretenberg/goblin/merge_prover.cpp b/barretenberg/cpp/src/barretenberg/goblin/merge_prover.cpp index 5e4254917944..62ef391290bd 100644 --- a/barretenberg/cpp/src/barretenberg/goblin/merge_prover.cpp +++ b/barretenberg/cpp/src/barretenberg/goblin/merge_prover.cpp @@ -16,29 +16,23 @@ namespace bb { * @details We require an SRS at least as large as the current ultra ecc ops table * TODO(https://github.com/AztecProtocol/barretenberg/issues/1267): consider possible efficiency improvements */ -MergeProver::MergeProver(const std::shared_ptr& op_queue, - std::shared_ptr transcript, - MergeSettings settings) +MergeProver::MergeProver(const std::shared_ptr& op_queue, std::shared_ptr transcript) : transcript(std::move(transcript)) , op_queue(op_queue) - , settings(settings) { - // Merge the current subtable (for which a merge proof is being constructed) prior to - // procedeing with proving. - if (settings == MergeSettings::APPEND) { - op_queue->merge(settings, op_queue->get_append_offset()); + // MergeProver is used only for the final merge, where the hiding kernel subtable is appended at a fixed offset. + const size_t append_offset = op_queue->get_append_offset(); + fixed_append_shift_size = UltraEccOpsTable::ZK_ULTRA_OPS + (append_offset * UltraEccOpsTable::NUM_ROWS_PER_OP); + op_queue->merge_fixed_append(append_offset); - } else { - op_queue->merge(settings); - } - - pcs_commitment_key = CommitmentKey(op_queue->get_ultra_ops_table_num_rows()); + pcs_commitment_key = CommitmentKey(op_queue->get_ultra_ops_table_num_rows() + UltraEccOpsTable::ZK_ULTRA_OPS); }; MergeProver::Polynomial MergeProver::compute_degree_check_polynomial( - const std::array& left_table, const std::vector& degree_check_challenges) + const std::array& left_table, const std::vector& degree_check_challenges) const { - Polynomial reversed_batched_left_tables(left_table[0].size()); + // The left table has a fixed length, so we need to compute the reverse according to that length + Polynomial reversed_batched_left_tables(fixed_append_shift_size); for (size_t idx = 0; idx < NUM_WIRES; idx++) { reversed_batched_left_tables.add_scaled(left_table[idx], degree_check_challenges[idx]); } @@ -153,8 +147,8 @@ MergeProver::OpeningClaim MergeProver::compute_shplonk_opening_claim( * @details Proves that M_j(X) = L_j(X) + X^k * R_j(X) and deg(L_j) < k for j = 1,2,3,4. * Uses degree-check polynomial G(X) and Shplonk for batched openings. * - * For PREPEND: L = subtable (t), R = previous table (T_prev) - * For APPEND: L = previous table (T_prev), R = subtable (t) + * L = aggregate table up to and including the tail subtable (T_tail), R = the hiding kernel's subtable (t, + * appended at a fixed offset and carrying APPEND_TRACE_OFFSET leading zeros), M = the resulting full table (T). * * @see MERGE_PROTOCOL.md for complete protocol specification. * @return MergeProver::MergeProof @@ -166,19 +160,12 @@ MergeProver::MergeProof MergeProver::construct_proof() std::array right_table; std::array merged_table = op_queue->construct_ultra_ops_table_columns(); // T - if (settings == MergeSettings::PREPEND) { - left_table = op_queue->construct_current_ultra_ops_subtable_columns(); // t - right_table = op_queue->construct_previous_ultra_ops_table_columns(); // T_prev - } else { - left_table = op_queue->construct_previous_ultra_ops_table_columns(); // T_prev - right_table = op_queue->construct_current_ultra_ops_subtable_columns(); // t (hiding kernel subtable, - // carries MegaZKFlavor::TRACE_OFFSET - // leading zeros internally) - } + left_table = op_queue->construct_table_columns_up_to_tail(); // T_tail + right_table = op_queue->construct_current_ultra_ops_subtable_columns(); // t (fixed append carries + // APPEND_TRACE_OFFSET leading zeros) // Send shift_size to the verifier - const size_t shift_size = left_table[0].size(); - transcript->send_to_verifier("shift_size", static_cast(shift_size)); + transcript->send_to_verifier("shift_size", static_cast(fixed_append_shift_size)); // Compute commitments [M_j] and send to the verifier for (size_t idx = 0; idx < NUM_WIRES; ++idx) { diff --git a/barretenberg/cpp/src/barretenberg/goblin/merge_prover.hpp b/barretenberg/cpp/src/barretenberg/goblin/merge_prover.hpp index b62c2fe37857..252a7217225a 100644 --- a/barretenberg/cpp/src/barretenberg/goblin/merge_prover.hpp +++ b/barretenberg/cpp/src/barretenberg/goblin/merge_prover.hpp @@ -17,8 +17,11 @@ namespace bb { /** - * @brief Prover class for the Goblin ECC op queue transcript merge protocol - * + * @brief Prover for the single-step Goblin ECC op queue merge protocol. + * @details Proves that the most recently merged subtable concatenates correctly with the prior aggregate table to + * form the new aggregate table. Used in the Chonk flow only for the final merge of the hiding kernel's subtable + * (placed at a fixed offset). For the multi-subtable batched merge proven once at the end of an IVC, see + * BatchMergeProver. */ class MergeProver { using Curve = curve::BN254; @@ -35,9 +38,7 @@ class MergeProver { using MergeProof = std::vector; static constexpr size_t NUM_WIRES = MegaExecutionTraceBlocks::NUM_WIRES; - explicit MergeProver(const std::shared_ptr& op_queue, - std::shared_ptr transcript, - MergeSettings settings = MergeSettings::PREPEND); + explicit MergeProver(const std::shared_ptr& op_queue, std::shared_ptr transcript); BB_PROFILE MergeProof construct_proof(); @@ -48,7 +49,7 @@ class MergeProver { private: std::shared_ptr transcript; std::shared_ptr op_queue; - MergeSettings settings; + size_t fixed_append_shift_size = 0; std::vector labels_degree_check = { "LEFT_TABLE_DEGREE_CHECK_0", "LEFT_TABLE_DEGREE_CHECK_1", @@ -77,8 +78,8 @@ class MergeProver { * @param degree_check_challenges * @return Polynomial */ - static Polynomial compute_degree_check_polynomial(const std::array& left_table, - const std::vector& degree_check_challenges); + Polynomial compute_degree_check_polynomial(const std::array& left_table, + const std::vector& degree_check_challenges) const; /** * @brief Compute the batched Shplonk quotient polynomial. diff --git a/barretenberg/cpp/src/barretenberg/goblin/merge_verifier.cpp b/barretenberg/cpp/src/barretenberg/goblin/merge_verifier.cpp index b2dcfb66bfd2..bd714c42ed46 100644 --- a/barretenberg/cpp/src/barretenberg/goblin/merge_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/goblin/merge_verifier.cpp @@ -109,7 +109,8 @@ BatchOpeningClaim MergeVerifier_::compute_shplonk_opening_claim( * * @see MERGE_PROTOCOL.md for complete protocol specification. * @param proof The merge proof to verify - * @param input_commitments Commitments to subtable (t) and previous table (T_prev) + * @param input_commitments Commitments to the subtable being merged (t) and to the aggregate table prior to this + * merge (T_prev, covering all subtables up to and including the tail) * @return VerificationResult containing pairing points, merged table commitments, and check results */ template @@ -131,14 +132,11 @@ typename MergeVerifier_::ReductionResult MergeVerifier_::reduce_to // The vector is composed of: [L_1], .., [L_4], [R_1], .., [R_4], [M_1], .., [M_4], [G] std::vector table_commitments; table_commitments.reserve((3 * NUM_WIRES) + 1); - for (size_t idx = 0; idx < NUM_WIRES; ++idx) { - table_commitments.emplace_back(settings == MergeSettings::PREPEND ? input_commitments.t_commitments[idx] - : input_commitments.T_prev_commitments[idx]); - } - for (size_t idx = 0; idx < NUM_WIRES; ++idx) { - table_commitments.emplace_back(settings == MergeSettings::PREPEND ? input_commitments.T_prev_commitments[idx] - : input_commitments.t_commitments[idx]); - } + table_commitments.insert(table_commitments.end(), + input_commitments.T_prev_commitments.begin(), + input_commitments.T_prev_commitments.end()); + table_commitments.insert( + table_commitments.end(), input_commitments.t_commitments.begin(), input_commitments.t_commitments.end()); for (size_t idx = 0; idx < NUM_WIRES; ++idx) { table_commitments.emplace_back( transcript->template receive_from_prover("MERGED_TABLE_" + std::to_string(idx))); diff --git a/barretenberg/cpp/src/barretenberg/goblin/merge_verifier.hpp b/barretenberg/cpp/src/barretenberg/goblin/merge_verifier.hpp index dcbfdf80d8df..b87a1712741d 100644 --- a/barretenberg/cpp/src/barretenberg/goblin/merge_verifier.hpp +++ b/barretenberg/cpp/src/barretenberg/goblin/merge_verifier.hpp @@ -16,8 +16,10 @@ namespace bb { /** - * @brief Unified verifier class for the Goblin ECC op queue transcript merge protocol - * @details Works for both native verification and recursive (in-circuit) verification + * @brief Verifier for the single-step Goblin ECC op queue merge protocol. + * @details Works for both native verification and recursive (in-circuit) verification. In the Chonk flow this + * verifier is used only for the final merge of the hiding kernel's subtable; the multi-subtable batched merge + * proven once at the end of an IVC is handled by BatchMergeVerifier_. * @tparam Curve The curve type (native curve::BN254 or stdlib bn254) */ template class MergeVerifier_ { @@ -43,9 +45,10 @@ template class MergeVerifier_ { /** * Commitments used by the verifier to run the verification algorithm. They contain: - * - `t_commitments`: the subtable commitments data, containing the commitments to t_j read from the transcript by - * the HN verifier with which the Merge verifier shares a transcript - * - `T_prev_commitments`: the commitments to the full op_queue table after the previous iteration of merge + * - `t_commitments`: commitments to the subtable being merged (t_j), read from the transcript by the HN + * verifier with which the Merge verifier shares a transcript + * - `T_prev_commitments`: commitments to the aggregate op_queue table prior to this merge (i.e. covering all + * subtables up to and including the tail subtable, but excluding the one currently being appended) */ struct InputCommitments { TableCommitments t_commitments; @@ -63,13 +66,10 @@ template class MergeVerifier_ { bool reduction_succeeded = false; // Aggregate of degree and concatenation checks }; - MergeSettings settings; std::shared_ptr transcript; - explicit MergeVerifier_(const MergeSettings settings = MergeSettings::PREPEND, - std::shared_ptr transcript = std::make_shared()) - : settings(settings) - , transcript(std::move(transcript)) + explicit MergeVerifier_(std::shared_ptr transcript = std::make_shared()) + : transcript(std::move(transcript)) {} /** diff --git a/barretenberg/cpp/src/barretenberg/goblin/mock_circuits.hpp b/barretenberg/cpp/src/barretenberg/goblin/mock_circuits.hpp index 93d0df6ef0d9..968285021d4a 100644 --- a/barretenberg/cpp/src/barretenberg/goblin/mock_circuits.hpp +++ b/barretenberg/cpp/src/barretenberg/goblin/mock_circuits.hpp @@ -8,6 +8,7 @@ #include "barretenberg/srs/global_crs.hpp" #include "barretenberg/stdlib/encryption/ecdsa/ecdsa.hpp" #include "barretenberg/stdlib/hash/keccak/keccak.hpp" +#include "barretenberg/stdlib/hash/poseidon2/poseidon2.hpp" #include "barretenberg/stdlib/hash/sha256/sha256.hpp" #include "barretenberg/stdlib/primitives/curves/secp256k1.hpp" #include "barretenberg/stdlib/special_public_inputs/special_public_inputs.hpp" @@ -50,6 +51,24 @@ template void generate_sha256_test_circuit(Builder& builder, } } +/** + * @brief Generate a test circuit that computes a single poseidon2 hash over a vector of `num_inputs` field elements. + */ +template void generate_poseidon2_hash_test_circuit(Builder& builder, size_t num_inputs) +{ + using field_ct = stdlib::field_t; + using witness_ct = stdlib::witness_t; + + std::vector inputs; + inputs.reserve(num_inputs); + for (size_t i = 0; i < num_inputs; i++) { + inputs.emplace_back(witness_ct(&builder, bb::fr(i + 1))); + } + + auto out = stdlib::poseidon2::hash(inputs); + out.set_public(); +} + class GoblinMockCircuits { public: using Curve = curve::BN254; @@ -139,22 +158,10 @@ class GoblinMockCircuits { static void construct_and_merge_mock_circuits(Goblin& goblin, const size_t num_circuits = 3) { - using Fq = curve::Grumpkin::ScalarField; for (size_t idx = 0; idx < num_circuits - 1; ++idx) { MegaCircuitBuilder builder{ goblin.op_queue }; - if (idx == num_circuits - 2) { - // The tail circuit's subtable is prepended last and sits at the top of the aggregate op queue table. - // The initial no-op gives the Translator's op queue wires their 2 shiftability-required leading zeros. - builder.queue_ecc_no_op(); - // Add random ops at START for Translator ZK (lands at beginning of op queue table) - randomise_op_queue(builder, TranslatorCircuitBuilder::NUM_RANDOM_OPS_START); - // Add hiding op for ECCVM ZK (prepended to ECCVM ops at row 1) - builder.queue_ecc_hiding_op(Fq::random_element(), Fq::random_element()); - } construct_simple_circuit(builder); - goblin.prove_merge(); - // Pop the merge proof from the queue, Goblin will be verified at the end - goblin.merge_verification_queue.pop_front(); + goblin.op_queue->merge(); } MegaCircuitBuilder builder{ goblin.op_queue }; GoblinMockCircuits::construct_simple_circuit(builder); diff --git a/barretenberg/cpp/src/barretenberg/goblin_avm/goblin_verifier.test.cpp b/barretenberg/cpp/src/barretenberg/goblin_avm/goblin_verifier.test.cpp index 809a35681539..70669da886e6 100644 --- a/barretenberg/cpp/src/barretenberg/goblin_avm/goblin_verifier.test.cpp +++ b/barretenberg/cpp/src/barretenberg/goblin_avm/goblin_verifier.test.cpp @@ -96,7 +96,7 @@ class GoblinAvmRecursiveVerifierTests : public testing::Test { // Commit to op_queue columns. TableCommitments table_commitments; - auto ultra_ops_table_columns = goblin.op_queue->construct_ultra_ops_table_columns(); + auto ultra_ops_table_columns = goblin.op_queue->construct_ultra_ops_table_columns(/*include_zk_ops*/ false); CommitmentKey pcs_commitment_key(goblin.op_queue->get_ultra_ops_table_num_rows()); for (size_t idx = 0; idx < MegaFlavor::NUM_WIRES; idx++) { table_commitments[idx] = pcs_commitment_key.commit(ultra_ops_table_columns[idx]); diff --git a/barretenberg/cpp/src/barretenberg/honk/composer/permutation_lib.hpp b/barretenberg/cpp/src/barretenberg/honk/composer/permutation_lib.hpp index bd3e9648eee9..1e11af85d9ab 100644 --- a/barretenberg/cpp/src/barretenberg/honk/composer/permutation_lib.hpp +++ b/barretenberg/cpp/src/barretenberg/honk/composer/permutation_lib.hpp @@ -134,44 +134,47 @@ PermutationMapping compute_permutation_mapping( // Represents the idx of a variable in circuit_constructor.variables std::span real_variable_tags = circuit_constructor.real_variable_tags; - // Go through each cycle - for (size_t cycle_idx = 0; cycle_idx < wire_copy_cycles.size(); ++cycle_idx) { - // We go through the cycle and fill-out/modify `mapping`. Following the generalized permutation algorithm, we - // take separate care of first/last node handling. - const CyclicPermutation& cycle = wire_copy_cycles[cycle_idx]; - const auto cycle_size = cycle.size(); - if (cycle_size == 0) { - continue; - } + // Cycles are disjoint by construction of the generalized permutation argument: every (gate_idx, wire_idx) position + // belongs to exactly one variable, hence to exactly one cycle. Per-(col, row) writes from different cycles never + // alias, so parallelising across cycle_idx is safe without per-thread staging or merge. + parallel_for_heuristic( + wire_copy_cycles.size(), + [&](size_t cycle_idx) { + const CyclicPermutation& cycle = wire_copy_cycles[cycle_idx]; + const auto cycle_size = cycle.size(); + if (cycle_size == 0) { + return; + } - const cycle_node& first_node = cycle[0]; - const cycle_node& last_node = cycle[cycle_size - 1]; + const cycle_node& first_node = cycle[0]; + const cycle_node& last_node = cycle[cycle_size - 1]; - const auto first_row = static_cast(first_node.gate_idx); - const auto first_col = first_node.wire_idx; - const auto last_row = static_cast(last_node.gate_idx); - const auto last_col = last_node.wire_idx; + const auto first_row = static_cast(first_node.gate_idx); + const auto first_col = first_node.wire_idx; + const auto last_row = static_cast(last_node.gate_idx); + const auto last_col = last_node.wire_idx; - // First node: id gets tagged with the cycle's variable tag - mapping.ids[first_col].is_tag[first_row] = true; - mapping.ids[first_col].row_idx[first_row] = real_variable_tags[cycle_idx]; + // First node: id gets tagged with the cycle's variable tag + mapping.ids[first_col].is_tag[first_row] = true; + mapping.ids[first_col].row_idx[first_row] = real_variable_tags[cycle_idx]; - // Last node: sigma gets tagged and points to tau(tag) instead of wrapping to first node - mapping.sigmas[last_col].is_tag[last_row] = true; - mapping.sigmas[last_col].row_idx[last_row] = circuit_constructor.tau().at(real_variable_tags[cycle_idx]); + // Last node: sigma gets tagged and points to tau(tag) instead of wrapping to first node + mapping.sigmas[last_col].is_tag[last_row] = true; + mapping.sigmas[last_col].row_idx[last_row] = circuit_constructor.tau().at(real_variable_tags[cycle_idx]); - // All nodes except the last: sigma points to the next node in the cycle - for (size_t node_idx = 0; node_idx + 1 < cycle_size; ++node_idx) { - const cycle_node& current_node = cycle[node_idx]; - const cycle_node& next_node = cycle[node_idx + 1]; + // All nodes except the last: sigma points to the next node in the cycle + for (size_t node_idx = 0; node_idx + 1 < cycle_size; ++node_idx) { + const cycle_node& current_node = cycle[node_idx]; + const cycle_node& next_node = cycle[node_idx + 1]; - const auto current_row = static_cast(current_node.gate_idx); - const auto current_col = current_node.wire_idx; - // Point current node to next node. - mapping.sigmas[current_col].row_idx[current_row] = next_node.gate_idx; - mapping.sigmas[current_col].col_idx[current_row] = static_cast(next_node.wire_idx); - } - } + const auto current_row = static_cast(current_node.gate_idx); + const auto current_col = current_node.wire_idx; + // Point current node to next node. + mapping.sigmas[current_col].row_idx[current_row] = next_node.gate_idx; + mapping.sigmas[current_col].col_idx[current_row] = static_cast(next_node.wire_idx); + } + }, + /*heuristic_cost=*/thread_heuristics::FF_COPY_COST * 8); // Add information about public inputs so that the cycles can be altered later; See the construction of the // permutation polynomials for details. This _only_ effects sigma_0, the 0th sigma polynomial, as the structure of diff --git a/barretenberg/cpp/src/barretenberg/honk/execution_trace/execution_trace_block.hpp b/barretenberg/cpp/src/barretenberg/honk/execution_trace/execution_trace_block.hpp index 901b6266fc56..a0c8886c53e3 100644 --- a/barretenberg/cpp/src/barretenberg/honk/execution_trace/execution_trace_block.hpp +++ b/barretenberg/cpp/src/barretenberg/honk/execution_trace/execution_trace_block.hpp @@ -315,9 +315,10 @@ template class ExecutionTraceBlock { Selector& q_2() { return non_gate_selectors[3]; }; Selector& q_3() { return non_gate_selectors[4]; }; Selector& q_4() { return non_gate_selectors[5]; }; + Selector& q_5() { return non_gate_selectors[6]; }; protected: - std::array, 6> non_gate_selectors; + std::array, 7> non_gate_selectors; }; } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/honk/execution_trace/gate_data.hpp b/barretenberg/cpp/src/barretenberg/honk/execution_trace/gate_data.hpp index e5eb13a515f4..ffc8d414ce5e 100644 --- a/barretenberg/cpp/src/barretenberg/honk/execution_trace/gate_data.hpp +++ b/barretenberg/cpp/src/barretenberg/honk/execution_trace/gate_data.hpp @@ -110,6 +110,15 @@ template struct poseidon2_external_gate_ { size_t round_idx; }; +// Initial linear layer gate for Poseidon2. Wires hold the raw permutation input; the next row +// holds M_E * input and is consumed by the first external-round gate. +template struct poseidon2_initial_external_gate_ { + uint32_t a; + uint32_t b; + uint32_t c; + uint32_t d; +}; + // Internal gate data for poseidon2 internal round template struct poseidon2_internal_gate_ { uint32_t a; @@ -118,4 +127,39 @@ template struct poseidon2_internal_gate_ { uint32_t d; size_t round_idx; }; + +// K=4 compressed internal-round gate: processes FOUR consecutive internal rounds per row. +// Wires: a = state[0] at round 4i+0, b = state[0] at round 4i+1, +// c = state[0] at round 4i+2, d = state[0] at round 4i+3. +// (s_1, s_2, s_3) at row start are reconstructed inside the relation via a 3x3 Vandermonde solve. +// +// Round constants on the row (see Poseidon2QuadInternalRelationImpl): +// q_l, q_r, q_o, q_4 = c_{4i}, c_{4i+1}, c_{4i+2}, c_{4i+3} // this quad's 4 S-box constants +// q_m, q_c, q_5 = c_{4(i+1)}, c_{4(i+1)+1}, c_{4(i+1)+2} // next quad's first 3 constants +// // (unused on terminal row) +template struct poseidon2_quad_internal_gate_ { + uint32_t a; // state[0] at round 4i+0 + uint32_t b; // state[0] at round 4i+1 + uint32_t c; // state[0] at round 4i+2 + uint32_t d; // state[0] at round 4i+3 + size_t round_idx_start; // absolute round_constants index of round 4i (this quad's 1st round) + size_t next_pair_start; // absolute round_constants index of round 4(i+1) (next quad's 1st round); + // ignored when is_terminal = true + bool is_terminal; // true on the last compressed row (successor is standard-encoded) +}; + +// Entry transition gate: standard-encoded state (s_0, s_1, s_2, s_3) at round `round_idx_start` +// whose successor is the first K=4 compressed row. The relation forces the successor's +// w_r_shift, w_o_shift, w_4_shift to state[0] at rounds start+1, start+2, start+3 respectively. +// +// Round constants on the row: +// q_l, q_r, q_o = c_{start}, c_{start+1}, c_{start+2} (first 3 internal round constants) +// q_4, q_m, q_c, q_5 = 0 (unused) +template struct poseidon2_transition_entry_gate_ { + uint32_t a; // s_0 + uint32_t b; // s_1 + uint32_t c; // s_2 + uint32_t d; // s_3 + size_t round_idx_start; // absolute round_constants index of the first internal round +}; } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/honk/execution_trace/mega_execution_trace.hpp b/barretenberg/cpp/src/barretenberg/honk/execution_trace/mega_execution_trace.hpp index b9cb166ec021..d32faa088106 100644 --- a/barretenberg/cpp/src/barretenberg/honk/execution_trace/mega_execution_trace.hpp +++ b/barretenberg/cpp/src/barretenberg/honk/execution_trace/mega_execution_trace.hpp @@ -29,7 +29,10 @@ class MegaTraceBlock : public ExecutionTraceBlock { virtual SelectorType& q_memory() { return this->zero_selectors[5]; }; virtual SelectorType& q_nnf() { return this->zero_selectors[6]; }; virtual SelectorType& q_poseidon2_external() { return this->zero_selectors[7]; }; - virtual SelectorType& q_poseidon2_internal() { return this->zero_selectors[8]; }; + virtual SelectorType& q_poseidon2_external_initial() { return this->zero_selectors[8]; }; + virtual SelectorType& q_poseidon2_quad_internal() { return this->zero_selectors[9]; }; + virtual SelectorType& q_poseidon2_quad_internal_terminal() { return this->zero_selectors[10]; }; + virtual SelectorType& q_poseidon2_transition_entry() { return this->zero_selectors[11]; }; virtual const SelectorType& q_busread() const { return this->zero_selectors[0]; }; virtual const SelectorType& q_lookup() const { return this->zero_selectors[1]; }; @@ -39,7 +42,10 @@ class MegaTraceBlock : public ExecutionTraceBlock { virtual const SelectorType& q_memory() const { return this->zero_selectors[5]; }; virtual const SelectorType& q_nnf() const { return this->zero_selectors[6]; }; virtual const SelectorType& q_poseidon2_external() const { return this->zero_selectors[7]; }; - virtual const SelectorType& q_poseidon2_internal() const { return this->zero_selectors[8]; }; + virtual const SelectorType& q_poseidon2_external_initial() const { return this->zero_selectors[8]; }; + virtual const SelectorType& q_poseidon2_quad_internal() const { return this->zero_selectors[9]; }; + virtual const SelectorType& q_poseidon2_quad_internal_terminal() const { return this->zero_selectors[10]; }; + virtual const SelectorType& q_poseidon2_transition_entry() const { return this->zero_selectors[11]; }; RefVector get_gate_selectors() { @@ -52,7 +58,10 @@ class MegaTraceBlock : public ExecutionTraceBlock { q_memory(), q_nnf(), q_poseidon2_external(), - q_poseidon2_internal(), + q_poseidon2_external_initial(), + q_poseidon2_quad_internal(), + q_poseidon2_quad_internal_terminal(), + q_poseidon2_transition_entry(), }; } @@ -65,6 +74,7 @@ class MegaTraceBlock : public ExecutionTraceBlock { q_2(), q_3(), q_4(), + q_5(), q_busread(), q_lookup(), q_arith(), @@ -73,7 +83,10 @@ class MegaTraceBlock : public ExecutionTraceBlock { q_memory(), q_nnf(), q_poseidon2_external(), - q_poseidon2_internal(), + q_poseidon2_external_initial(), + q_poseidon2_quad_internal(), + q_poseidon2_quad_internal_terminal(), + q_poseidon2_transition_entry(), }; } @@ -99,7 +112,7 @@ class MegaTraceBlock : public ExecutionTraceBlock { virtual void set_gate_selector([[maybe_unused]] const fr& value) {} private: - std::array, 9> zero_selectors; + std::array, 12> zero_selectors; }; class MegaTracePublicInputBlock : public MegaTraceBlock {}; @@ -118,7 +131,10 @@ class MegaTraceBusReadBlock : public MegaTraceBlock { q_memory().emplace_back(0); q_nnf().emplace_back(0); q_poseidon2_external().emplace_back(0); - q_poseidon2_internal().emplace_back(0); + q_poseidon2_external_initial().emplace_back(0); + q_poseidon2_quad_internal().emplace_back(0); + q_poseidon2_quad_internal_terminal().emplace_back(0); + q_poseidon2_transition_entry().emplace_back(0); } private: @@ -139,7 +155,10 @@ class MegaTraceLookupBlock : public MegaTraceBlock { q_memory().emplace_back(0); q_nnf().emplace_back(0); q_poseidon2_external().emplace_back(0); - q_poseidon2_internal().emplace_back(0); + q_poseidon2_external_initial().emplace_back(0); + q_poseidon2_quad_internal().emplace_back(0); + q_poseidon2_quad_internal_terminal().emplace_back(0); + q_poseidon2_transition_entry().emplace_back(0); } private: @@ -160,7 +179,10 @@ class MegaTraceArithmeticBlock : public MegaTraceBlock { q_memory().emplace_back(0); q_nnf().emplace_back(0); q_poseidon2_external().emplace_back(0); - q_poseidon2_internal().emplace_back(0); + q_poseidon2_external_initial().emplace_back(0); + q_poseidon2_quad_internal().emplace_back(0); + q_poseidon2_quad_internal_terminal().emplace_back(0); + q_poseidon2_transition_entry().emplace_back(0); } private: @@ -181,7 +203,10 @@ class MegaTraceDeltaRangeBlock : public MegaTraceBlock { q_memory().emplace_back(0); q_nnf().emplace_back(0); q_poseidon2_external().emplace_back(0); - q_poseidon2_internal().emplace_back(0); + q_poseidon2_external_initial().emplace_back(0); + q_poseidon2_quad_internal().emplace_back(0); + q_poseidon2_quad_internal_terminal().emplace_back(0); + q_poseidon2_transition_entry().emplace_back(0); } private: @@ -202,7 +227,10 @@ class MegaTraceEllipticBlock : public MegaTraceBlock { q_memory().emplace_back(0); q_nnf().emplace_back(0); q_poseidon2_external().emplace_back(0); - q_poseidon2_internal().emplace_back(0); + q_poseidon2_external_initial().emplace_back(0); + q_poseidon2_quad_internal().emplace_back(0); + q_poseidon2_quad_internal_terminal().emplace_back(0); + q_poseidon2_transition_entry().emplace_back(0); } private: @@ -223,7 +251,10 @@ class MegaTraceMemoryBlock : public MegaTraceBlock { gate_selector.emplace_back(value); q_nnf().emplace_back(0); q_poseidon2_external().emplace_back(0); - q_poseidon2_internal().emplace_back(0); + q_poseidon2_external_initial().emplace_back(0); + q_poseidon2_quad_internal().emplace_back(0); + q_poseidon2_quad_internal_terminal().emplace_back(0); + q_poseidon2_transition_entry().emplace_back(0); } private: @@ -244,7 +275,10 @@ class MegaTraceNonNativeFieldBlock : public MegaTraceBlock { q_memory().emplace_back(0); gate_selector.emplace_back(value); q_poseidon2_external().emplace_back(0); - q_poseidon2_internal().emplace_back(0); + q_poseidon2_external_initial().emplace_back(0); + q_poseidon2_quad_internal().emplace_back(0); + q_poseidon2_quad_internal_terminal().emplace_back(0); + q_poseidon2_transition_entry().emplace_back(0); } private: @@ -254,7 +288,11 @@ class MegaTraceNonNativeFieldBlock : public MegaTraceBlock { class MegaTracePoseidon2ExternalBlock : public MegaTraceBlock { public: SelectorType& q_poseidon2_external() override { return gate_selector; } + SelectorType& q_poseidon2_external_initial() override { return initial_selector; } + const SelectorType& q_poseidon2_external() const override { return gate_selector; } + const SelectorType& q_poseidon2_external_initial() const override { return initial_selector; } + // Activates q_poseidon2_external on the row; used for ordinary external-round rows. void set_gate_selector(const fr& value) override { q_busread().emplace_back(0); @@ -265,17 +303,45 @@ class MegaTracePoseidon2ExternalBlock : public MegaTraceBlock { q_memory().emplace_back(0); q_nnf().emplace_back(0); gate_selector.emplace_back(value); - q_poseidon2_internal().emplace_back(0); + initial_selector.emplace_back(0); + q_poseidon2_quad_internal().emplace_back(0); + q_poseidon2_quad_internal_terminal().emplace_back(0); + q_poseidon2_transition_entry().emplace_back(0); + } + + // Activates q_poseidon2_external_initial on the row; used for the initial-linear-layer row + // sitting immediately before the first external-round row of each Poseidon2 hash. + void set_initial_gate_selector(const fr& value) + { + q_busread().emplace_back(0); + q_lookup().emplace_back(0); + q_arith().emplace_back(0); + q_delta_range().emplace_back(0); + q_elliptic().emplace_back(0); + q_memory().emplace_back(0); + q_nnf().emplace_back(0); + gate_selector.emplace_back(0); + initial_selector.emplace_back(value); + q_poseidon2_quad_internal().emplace_back(0); + q_poseidon2_quad_internal_terminal().emplace_back(0); + q_poseidon2_transition_entry().emplace_back(0); } private: SlabVectorSelector gate_selector; + SlabVectorSelector initial_selector; }; -class MegaTracePoseidon2InternalBlock : public MegaTraceBlock { +class MegaTracePoseidon2QuadInternalBlock : public MegaTraceBlock { public: - SelectorType& q_poseidon2_internal() override { return gate_selector; } - + SelectorType& q_poseidon2_quad_internal() override { return interior_selector; } + SelectorType& q_poseidon2_quad_internal_terminal() override { return terminal_selector; } + SelectorType& q_poseidon2_transition_entry() override { return entry_selector; } + const SelectorType& q_poseidon2_quad_internal() const override { return interior_selector; } + const SelectorType& q_poseidon2_quad_internal_terminal() const override { return terminal_selector; } + const SelectorType& q_poseidon2_transition_entry() const override { return entry_selector; } + + // Activates q_poseidon2_quad_internal on the row; used for interior compressed rows. void set_gate_selector(const fr& value) override { q_busread().emplace_back(0); @@ -286,11 +352,50 @@ class MegaTracePoseidon2InternalBlock : public MegaTraceBlock { q_memory().emplace_back(0); q_nnf().emplace_back(0); q_poseidon2_external().emplace_back(0); - gate_selector.emplace_back(value); + q_poseidon2_external_initial().emplace_back(0); + interior_selector.emplace_back(value); + terminal_selector.emplace_back(0); + entry_selector.emplace_back(0); + } + + // Activates q_poseidon2_quad_internal_terminal on the row; used for the terminal compressed row. + void set_terminal_gate_selector(const fr& value) + { + q_busread().emplace_back(0); + q_lookup().emplace_back(0); + q_arith().emplace_back(0); + q_delta_range().emplace_back(0); + q_elliptic().emplace_back(0); + q_memory().emplace_back(0); + q_nnf().emplace_back(0); + q_poseidon2_external().emplace_back(0); + q_poseidon2_external_initial().emplace_back(0); + interior_selector.emplace_back(0); + terminal_selector.emplace_back(value); + entry_selector.emplace_back(0); + } + + // Activates q_poseidon2_transition_entry on the row; used for the standard->compressed entry row. + void set_entry_gate_selector(const fr& value) + { + q_busread().emplace_back(0); + q_lookup().emplace_back(0); + q_arith().emplace_back(0); + q_delta_range().emplace_back(0); + q_elliptic().emplace_back(0); + q_memory().emplace_back(0); + q_nnf().emplace_back(0); + q_poseidon2_external().emplace_back(0); + q_poseidon2_external_initial().emplace_back(0); + interior_selector.emplace_back(0); + terminal_selector.emplace_back(0); + entry_selector.emplace_back(value); } private: - SlabVectorSelector gate_selector; + SlabVectorSelector interior_selector; + SlabVectorSelector terminal_selector; + SlabVectorSelector entry_selector; }; /** @@ -318,14 +423,23 @@ struct MegaTraceBlockData { MegaTraceMemoryBlock memory; MegaTraceNonNativeFieldBlock nnf; MegaTracePoseidon2ExternalBlock poseidon2_external; - MegaTracePoseidon2InternalBlock poseidon2_internal; + MegaTracePoseidon2QuadInternalBlock poseidon2_quad_internal; static constexpr size_t NUM_BLOCKS = 11; std::vector get_labels() const { - return { "ecc_op", "busread", "lookup", "pub_inputs", "arithmetic", "delta_range", - "elliptic", "memory", "nnf", "poseidon2_external", "poseidon2_internal" }; + return { "ecc_op", + "busread", + "lookup", + "pub_inputs", + "arithmetic", + "delta_range", + "elliptic", + "memory", + "nnf", + "poseidon2_external", + "poseidon2_quad_internal" }; } auto get() @@ -340,7 +454,7 @@ struct MegaTraceBlockData { &memory, &nnf, &poseidon2_external, - &poseidon2_internal }); + &poseidon2_quad_internal }); } auto get() const @@ -355,12 +469,15 @@ struct MegaTraceBlockData { &memory, &nnf, &poseidon2_external, - &poseidon2_internal }); + &poseidon2_quad_internal }); } auto get_gate_blocks() const { - return RefArray(std::array{ + // Order must match get_gate_selectors() in MegaFlavor: poseidon2_external appears twice + // (regular + initial) and poseidon2_quad_internal appears three times (interior / + // terminal / entry). + return RefArray(std::array{ &busread, &lookup, &arithmetic, @@ -368,8 +485,11 @@ struct MegaTraceBlockData { &elliptic, &memory, &nnf, - &poseidon2_external, - &poseidon2_internal, + &poseidon2_external, // q_poseidon2_external + &poseidon2_external, // q_poseidon2_external_initial + &poseidon2_quad_internal, // q_poseidon2_quad_internal + &poseidon2_quad_internal, // q_poseidon2_quad_internal_terminal + &poseidon2_quad_internal, // q_poseidon2_transition_entry }); } @@ -406,7 +526,7 @@ class MegaExecutionTraceBlocks : public MegaTraceBlockData { info("memory :\t", this->memory.size()); info("nnf :\t", this->nnf.size()); info("poseidon ext :\t", this->poseidon2_external.size()); - info("poseidon int :\t", this->poseidon2_internal.size()); + info("poseidon quad :\t", this->poseidon2_quad_internal.size()); info(""); info("Total size: ", get_total_size()); } diff --git a/barretenberg/cpp/src/barretenberg/honk/relation_checker.hpp b/barretenberg/cpp/src/barretenberg/honk/relation_checker.hpp index 5bbc044a8b83..3d3fc12786b3 100644 --- a/barretenberg/cpp/src/barretenberg/honk/relation_checker.hpp +++ b/barretenberg/cpp/src/barretenberg/honk/relation_checker.hpp @@ -167,18 +167,74 @@ template <> class RelationChecker : public RelationChecker { { using FF = MegaFlavor::FF; - // Start with all relations that are shared with Ultra - AllSubrelationFailures all_subrelation_failures = RelationChecker::check_all(polynomials, params); + AllSubrelationFailures all_subrelation_failures; + + // Linearly independent relations shared with Ultra --- EXCEPT Poseidon2InternalRelation, + // which is not present in MegaFlavor (Mega covers all internal rounds via the compressed + // quad-internal block). + auto arith = Base::check>(polynomials, params, "UltraArithmetic"); + if (!arith.empty()) { + all_subrelation_failures["UltraArithmetic"] = arith; + } + auto perm = Base::check>(polynomials, params, "UltraPermutation"); + if (!perm.empty()) { + all_subrelation_failures["UltraPermutation"] = perm; + } + auto delta_range = Base::check>(polynomials, params, "DeltaRangeConstraint"); + if (!delta_range.empty()) { + all_subrelation_failures["UltraDeltaRange"] = delta_range; + } + auto elliptic = Base::check>(polynomials, params, "Elliptic"); + if (!elliptic.empty()) { + all_subrelation_failures["UltraElliptic"] = elliptic; + } + auto memory = Base::check>(polynomials, params, "Memory"); + if (!memory.empty()) { + all_subrelation_failures["UltraMemory"] = memory; + } + auto nnf = Base::check>(polynomials, params, "NonNativeField"); + if (!nnf.empty()) { + all_subrelation_failures["NonNativeField"] = nnf; + } + auto p2_ext = Base::check>(polynomials, params, "Poseidon2External"); + if (!p2_ext.empty()) { + all_subrelation_failures["UltraPoseidon2External"] = p2_ext; + } + auto p2_initial_ext = + Base::check>(polynomials, params, "Poseidon2InitialExternal"); + if (!p2_initial_ext.empty()) { + all_subrelation_failures["Poseidon2InitialExternal"] = p2_initial_ext; + } + + // Compressed quad-internal relations (Mega-only, replacing Poseidon2InternalRelation). + auto p2_quad = Base::check>(polynomials, params, "Poseidon2QuadInternal"); + if (!p2_quad.empty()) { + all_subrelation_failures["Poseidon2QuadInternal"] = p2_quad; + } + auto p2_quad_term = Base::check>( + polynomials, params, "Poseidon2QuadInternalTerminal"); + if (!p2_quad_term.empty()) { + all_subrelation_failures["Poseidon2QuadInternalTerminal"] = p2_quad_term; + } + auto p2_entry = + Base::check>(polynomials, params, "Poseidon2TransitionEntry"); + if (!p2_entry.empty()) { + all_subrelation_failures["Poseidon2TransitionEntry"] = p2_entry; + } + + // Linearly-dependent log-derivative lookup (shared with Ultra). + auto logderiv = Base::check, true>(polynomials, params, "LogDerivLookup"); + if (!logderiv.empty()) { + all_subrelation_failures["UltraLogDerivative"] = logderiv; + } - // Mega-specific relations - // There is one relation that does not `have_linearly_dependent`. + // Mega-specific relations. auto mega_ecc_op_queue_subrelation_failures = Base::check>(polynomials, params, "EccOpQueue"); if (!mega_ecc_op_queue_subrelation_failures.empty()) { all_subrelation_failures["MegaEccOpQueue"] = mega_ecc_op_queue_subrelation_failures; } - // There is one one relation that satisfies `have_linearly_dependent` auto mega_databus_lookup_subrelation_failures = Base::check, true>(polynomials, params, "DatabusLookup"); if (!mega_databus_lookup_subrelation_failures.empty()) { diff --git a/barretenberg/cpp/src/barretenberg/honk/types/public_inputs_type.hpp b/barretenberg/cpp/src/barretenberg/honk/types/public_inputs_type.hpp index f1803b17f02e..4aa381d53fd0 100644 --- a/barretenberg/cpp/src/barretenberg/honk/types/public_inputs_type.hpp +++ b/barretenberg/cpp/src/barretenberg/honk/types/public_inputs_type.hpp @@ -6,6 +6,7 @@ #pragma once +#include "barretenberg/constants.hpp" #include namespace bb { @@ -53,12 +54,15 @@ static constexpr std::size_t INVALID_PUBLIC_INPUTS_SIZE = 0; static constexpr std::size_t MEGA_EXECUTION_TRACE_NUM_WIRES = 4; // Number of bb::fr elements used to represent the public inputs of an INIT/INNER/RESET/TAIL kernel -static constexpr std::size_t KERNEL_PUBLIC_INPUTS_SIZE = - /*pairing_inputs*/ PAIRING_POINTS_SIZE + - /*kernel_return_data*/ GOBLIN_GROUP_PUBLIC_INPUTS_SIZE + - /*app_return_data*/ GOBLIN_GROUP_PUBLIC_INPUTS_SIZE + - /*table_commitments*/ (MEGA_EXECUTION_TRACE_NUM_WIRES * GOBLIN_GROUP_PUBLIC_INPUTS_SIZE) + - /*output_hn_accum_hash*/ FR_PUBLIC_INPUTS_SIZE; +// verifying num_apps application circuits in its accumulation group. +constexpr std::size_t kernel_public_inputs_size(std::size_t num_apps) +{ + return /*pairing_inputs*/ PAIRING_POINTS_SIZE + + /*kernel_return_data*/ GOBLIN_GROUP_PUBLIC_INPUTS_SIZE + + /*app_return_data[num_apps]*/ (num_apps * GOBLIN_GROUP_PUBLIC_INPUTS_SIZE) + + /*ecc_op_hash*/ FR_PUBLIC_INPUTS_SIZE + + /*output_hn_accum_hash*/ FR_PUBLIC_INPUTS_SIZE; +} // Number of bb::fr elements used to represent the default public inputs, i.e., the pairing points static constexpr std::size_t DEFAULT_PUBLIC_INPUTS_SIZE = PAIRING_POINTS_SIZE; diff --git a/barretenberg/cpp/src/barretenberg/hypernova/hypernova_prover.cpp b/barretenberg/cpp/src/barretenberg/hypernova/hypernova_prover.cpp index 8951b38af7e0..5248dc9dd6a1 100644 --- a/barretenberg/cpp/src/barretenberg/hypernova/hypernova_prover.cpp +++ b/barretenberg/cpp/src/barretenberg/hypernova/hypernova_prover.cpp @@ -91,18 +91,24 @@ Polynomial HypernovaFoldingProver::batch_polynomials max_end = std::max(max_end, polynomials_to_batch[idx].end_index()); } + // Treat polynomials_to_batch[0] as the destination's starting state (its scalar is implicitly 1). + // The remaining N-1 sources are fused into a single parallel_for via add_scaled_batch. + std::vector> sources; + sources.reserve(N - 1); + for (size_t i = 1; i < N; ++i) { + sources.emplace_back(polynomials_to_batch[i]); + } + auto tail_scalars = std::span(challenges).subspan(1); + + auto sources_span = std::span>(sources); if (min_start < polynomials_to_batch[0].start_index() || max_end > polynomials_to_batch[0].end_index()) { Polynomial result(max_end - min_start, full_batched_size, min_start); result += polynomials_to_batch[0]; - for (size_t idx = 1; idx < N; idx++) { - result.add_scaled(polynomials_to_batch[idx], challenges[idx]); - } + add_scaled_batch(result, sources_span, tail_scalars); return result; } - for (size_t idx = 1; idx < N; idx++) { - polynomials_to_batch[0].add_scaled(polynomials_to_batch[idx], challenges[idx]); - } + add_scaled_batch(polynomials_to_batch[0], sources_span, tail_scalars); return polynomials_to_batch[0]; }; diff --git a/barretenberg/cpp/src/barretenberg/hypernova/hypernova_verifier.test.cpp b/barretenberg/cpp/src/barretenberg/hypernova/hypernova_verifier.test.cpp index 65057dff5c11..67a7292b0570 100644 --- a/barretenberg/cpp/src/barretenberg/hypernova/hypernova_verifier.test.cpp +++ b/barretenberg/cpp/src/barretenberg/hypernova/hypernova_verifier.test.cpp @@ -189,7 +189,8 @@ class HypernovaFoldingVerifierTests : public ::testing::Test { for (const auto& wire : { "ECC_OP_WIRE_1", "ECC_OP_WIRE_2", "ECC_OP_WIRE_3", "ECC_OP_WIRE_4" }) { manifest.add_entry(round, wire, frs_per_G); } - for (const auto& bus : { "CALLDATA", "SECONDARY_CALLDATA", "RETURN_DATA" }) { + for (const auto& bus : + { "KERNEL_CALLDATA", "FIRST_APP_CALLDATA", "SECOND_APP_CALLDATA", "THIRD_APP_CALLDATA", "RETURN_DATA" }) { manifest.add_entry(round, bus, frs_per_G); manifest.add_entry(round, std::string(bus) + "_READ_COUNTS", frs_per_G); } @@ -206,8 +207,10 @@ class HypernovaFoldingVerifierTests : public ::testing::Test { manifest.add_challenge(round, "alpha"); manifest.add_challenge(round, "HypernovaFoldingProver:gate_challenge"); manifest.add_entry(round, "LOOKUP_INVERSES", frs_per_G); - manifest.add_entry(round, "CALLDATA_INVERSES", frs_per_G); - manifest.add_entry(round, "SECONDARY_CALLDATA_INVERSES", frs_per_G); + manifest.add_entry(round, "KERNEL_CALLDATA_INVERSES", frs_per_G); + manifest.add_entry(round, "FIRST_APP_CALLDATA_INVERSES", frs_per_G); + manifest.add_entry(round, "SECOND_APP_CALLDATA_INVERSES", frs_per_G); + manifest.add_entry(round, "THIRD_APP_CALLDATA_INVERSES", frs_per_G); manifest.add_entry(round, "RETURN_DATA_INVERSES", frs_per_G); manifest.add_entry(round, "Z_PERM", frs_per_G); round++; @@ -226,7 +229,7 @@ class HypernovaFoldingVerifierTests : public ::testing::Test { for (size_t i = 0; i < MegaFlavor::NUM_SHIFTED_ENTITIES - 1; ++i) { manifest.add_challenge(round, "shifted_challenge_" + std::to_string(i)); } - manifest.add_entry(round, "Sumcheck:evaluations", 57); + manifest.add_entry(round, "Sumcheck:evaluations", MegaFlavor::NUM_ALL_ENTITIES); round++; // Round 25: Sumcheck:alpha + MLB accumulator data (Sumcheck:alpha is consecutive challenge) diff --git a/barretenberg/cpp/src/barretenberg/numeric/random/engine.cpp b/barretenberg/cpp/src/barretenberg/numeric/random/engine.cpp index e54ce8da56ca..e4d377ae683f 100644 --- a/barretenberg/cpp/src/barretenberg/numeric/random/engine.cpp +++ b/barretenberg/cpp/src/barretenberg/numeric/random/engine.cpp @@ -6,7 +6,9 @@ #include "engine.hpp" #include "barretenberg/common/assert.hpp" +#include "barretenberg/common/throw_or_abort.hpp" #include +#include #include #include #include @@ -82,6 +84,9 @@ template std::array std::array 0) { current_offset += read_bytes; bytes_left -= static_cast(read_bytes); + continue; } + // read_bytes <= 0: failure or EOF. On platforms that report EINTR via errno, retry a + // bounded number of times; any other failure (including read_bytes == 0, e.g. a + // sealed/stubbed urandom returning EOF) is fatal. +#if !defined(_WIN32) + if (read_bytes == -1 && errno == EINTR && eintr_retries++ < MAX_EINTR_RETRIES) { + continue; + } +#endif + throw_or_abort("CSPRNG read failed: cannot retrieve entropy from system source"); } random_buffer_wrapper.offset = 0; } diff --git a/barretenberg/cpp/src/barretenberg/numeric/uint128/uint128.test.cpp b/barretenberg/cpp/src/barretenberg/numeric/uint128/uint128.test.cpp index ecf457ad9756..c3dc53198747 100644 --- a/barretenberg/cpp/src/barretenberg/numeric/uint128/uint128.test.cpp +++ b/barretenberg/cpp/src/barretenberg/numeric/uint128/uint128.test.cpp @@ -78,6 +78,9 @@ TEST(uint128, DivAndMod) b.data[3] = (i > 0) ? 0 : b.data[3]; b.data[2] = (i > 1) ? 0 : b.data[2]; b.data[1] = (i > 2) ? 0 : b.data[1]; + if (b == 0) { + b = 1; + } uint128_t q = a / b; uint128_t r = a % b; @@ -89,18 +92,10 @@ TEST(uint128, DivAndMod) } uint128_t a = engine.get_random_uint128(); - uint128_t b = 0; - + uint128_t b = a; uint128_t q = a / b; uint128_t r = a % b; - EXPECT_EQ(q, uint128_t(0)); - EXPECT_EQ(r, uint128_t(0)); - - b = a; - q = a / b; - r = a % b; - EXPECT_EQ(q, uint128_t(1)); EXPECT_EQ(r, uint128_t(0)); } diff --git a/barretenberg/cpp/src/barretenberg/numeric/uint128/uint128_impl.hpp b/barretenberg/cpp/src/barretenberg/numeric/uint128/uint128_impl.hpp index 2d5214b63f8d..ea12c3748bcc 100644 --- a/barretenberg/cpp/src/barretenberg/numeric/uint128/uint128_impl.hpp +++ b/barretenberg/cpp/src/barretenberg/numeric/uint128/uint128_impl.hpp @@ -9,6 +9,7 @@ #include "../bitop/get_msb.hpp" #include "./uint128.hpp" #include "barretenberg/common/assert.hpp" +#include "barretenberg/common/throw_or_abort.hpp" namespace bb::numeric { constexpr std::pair uint128_t::mul_wide(const uint32_t a, const uint32_t b) @@ -83,7 +84,10 @@ constexpr uint32_t uint128_t::mac_discard_hi(const uint32_t a, constexpr std::pair uint128_t::divmod(const uint128_t& b) const { - if (*this == 0 || b == 0) { + if (b == 0) { + throw_or_abort("uint128_t::divmod: divisor must be nonzero"); + } + if (*this == 0) { return { 0, 0 }; } if (b == 1) { @@ -163,6 +167,9 @@ constexpr std::pair uint128_t::mul_extended(const uint128_ */ constexpr uint128_t uint128_t::slice(const uint64_t start, const uint64_t end) const { + // Plain assert is used here because BB_ASSERT_DEBUG defines a std::ostringstream, which is + // a non-literal type and therefore disallowed in the body of a constexpr function before C++23. + assert(start <= end); const uint64_t range = end - start; const uint128_t mask = (range == 128) ? -uint128_t(1) : (uint128_t(1) << range) - 1; return ((*this) >> start) & mask; diff --git a/barretenberg/cpp/src/barretenberg/numeric/uint256/uint256.hpp b/barretenberg/cpp/src/barretenberg/numeric/uint256/uint256.hpp index f679dfd1c4b5..90e6468dea2f 100644 --- a/barretenberg/cpp/src/barretenberg/numeric/uint256/uint256.hpp +++ b/barretenberg/cpp/src/barretenberg/numeric/uint256/uint256.hpp @@ -63,7 +63,7 @@ class alignas(32) uint256_t { {} constexpr uint256_t(uint256_t&& other) noexcept = default; - explicit constexpr uint256_t(const std::string& input) noexcept + explicit constexpr uint256_t(const std::string& input) { /* Quick and dirty conversion from a single character to its hex equivelent */ constexpr auto HexCharToInt = [](uint8_t Input) { diff --git a/barretenberg/cpp/src/barretenberg/numeric/uint256/uint256.test.cpp b/barretenberg/cpp/src/barretenberg/numeric/uint256/uint256.test.cpp index 710d3e0d83f6..b1c2903e71b7 100644 --- a/barretenberg/cpp/src/barretenberg/numeric/uint256/uint256.test.cpp +++ b/barretenberg/cpp/src/barretenberg/numeric/uint256/uint256.test.cpp @@ -97,6 +97,9 @@ TEST(uint256, DivAndMod) b.data[3] = (i > 0) ? 0 : b.data[3]; b.data[2] = (i > 1) ? 0 : b.data[2]; b.data[1] = (i > 2) ? 0 : b.data[1]; + if (b == 0) { + b = 1; + } uint256_t q = a / b; uint256_t r = a % b; @@ -108,18 +111,10 @@ TEST(uint256, DivAndMod) } uint256_t a = engine.get_random_uint256(); - uint256_t b = 0; - + uint256_t b = a; uint256_t q = a / b; uint256_t r = a % b; - EXPECT_EQ(q, uint256_t(0)); - EXPECT_EQ(r, uint256_t(0)); - - b = a; - q = a / b; - r = a % b; - EXPECT_EQ(q, uint256_t(1)); EXPECT_EQ(r, uint256_t(0)); } diff --git a/barretenberg/cpp/src/barretenberg/numeric/uint256/uint256_impl.hpp b/barretenberg/cpp/src/barretenberg/numeric/uint256/uint256_impl.hpp index 21b706759fe4..6e0cecdac05e 100644 --- a/barretenberg/cpp/src/barretenberg/numeric/uint256/uint256_impl.hpp +++ b/barretenberg/cpp/src/barretenberg/numeric/uint256/uint256_impl.hpp @@ -134,7 +134,10 @@ constexpr std::array uint256_t::wasm_convert(const uin #endif constexpr std::pair uint256_t::divmod(const uint256_t& b) const { - if (*this == 0 || b == 0) { + if (b == 0) { + throw_or_abort("uint256_t::divmod: divisor must be nonzero"); + } + if (*this == 0) { return { 0, 0 }; } if (b == 1) { @@ -189,7 +192,10 @@ constexpr std::pair uint256_t::divmod(const uint256_t& b) */ constexpr std::pair uint256_t::divmod(uint64_t b) const { - if (*this == 0 || b == 0) { + if (b == 0) { + throw_or_abort("uint256_t::divmod: divisor must be nonzero"); + } + if (*this == 0) { return { 0, 0 }; } if (b == 1) { @@ -329,7 +335,9 @@ constexpr std::pair uint256_t::mul_extended(const uint256_ */ constexpr uint256_t uint256_t::slice(const uint64_t start, const uint64_t end) const { - assert(start < end); + // Plain assert is used here because BB_ASSERT_DEBUG defines a std::ostringstream, which is + // a non-literal type and therefore disallowed in the body of a constexpr function before C++23. + assert(start <= end); const uint64_t range = end - start; const uint256_t mask = (range == 256) ? -uint256_t(1) : (uint256_t(1) << range) - 1; return ((*this) >> start) & mask; diff --git a/barretenberg/cpp/src/barretenberg/numeric/uintx/uintx.hpp b/barretenberg/cpp/src/barretenberg/numeric/uintx/uintx.hpp index 9e95aee67ba1..5bf55babe908 100644 --- a/barretenberg/cpp/src/barretenberg/numeric/uintx/uintx.hpp +++ b/barretenberg/cpp/src/barretenberg/numeric/uintx/uintx.hpp @@ -80,6 +80,9 @@ template class uintx { */ constexpr uintx slice(const uint64_t start, const uint64_t end) const { + // Plain assert is used here because BB_ASSERT_DEBUG defines a std::ostringstream, which is + // a non-literal type and therefore disallowed in the body of a constexpr function before C++23. + assert(start <= end); const uint64_t range = end - start; const uintx mask = (uintx(1) << range) - 1; return ((*this) >> start) & mask; diff --git a/barretenberg/cpp/src/barretenberg/numeric/uintx/uintx_impl.hpp b/barretenberg/cpp/src/barretenberg/numeric/uintx/uintx_impl.hpp index b4faf8a927c2..7ec76dfccb7b 100644 --- a/barretenberg/cpp/src/barretenberg/numeric/uintx/uintx_impl.hpp +++ b/barretenberg/cpp/src/barretenberg/numeric/uintx/uintx_impl.hpp @@ -63,7 +63,8 @@ std::pair, uintx> uintx::divmod_base(cons /** * Computes invmod. Only for internal usage within the class. * This is an insecure version of the algorithm that doesn't take into account the 0 case and cases when modulus is - *close to the top margin. + * close to the top margin. The result is only meaningful when *this and modulus are coprime; non-coprime inputs + * return an unspecified value (callers must guarantee coprimality or guard against the result being used). * * @param modulus The modulus of the ring * @@ -104,9 +105,7 @@ template uintx uintx::unsafe_invmod(cons template uintx uintx::invmod(const uintx& modulus) const { BB_ASSERT((*this) != 0); - if (modulus == 0) { - return 0; - } + BB_ASSERT(modulus != 0); if (modulus.get_msb() >= (2 * base_uint::length() - 1)) { uintx> a_expanded(*this); uintx> modulus_expanded(modulus); diff --git a/barretenberg/cpp/src/barretenberg/op_queue/ecc_op_queue.hpp b/barretenberg/cpp/src/barretenberg/op_queue/ecc_op_queue.hpp index 7e05175a3518..d2c6fc003ba8 100644 --- a/barretenberg/cpp/src/barretenberg/op_queue/ecc_op_queue.hpp +++ b/barretenberg/cpp/src/barretenberg/op_queue/ecc_op_queue.hpp @@ -31,8 +31,7 @@ namespace bb { * - Updates the native accumulator (shadow computation for verification) * - Appends to both ECCVM and Ultra tables * - * Tables grow via prepending subtables (one per circuit in an IVC). The deque-based storage avoids - * expensive memory reallocation. See ecc_ops_table.hpp for details. + * Tables grow by appending subtables (one per circuit in an IVC). See ecc_ops_table.hpp for details. * * TODO(https://github.com/AztecProtocol/barretenberg/issues/1267): consider possible efficiency improvements */ @@ -50,13 +49,14 @@ class ECCOpQueue { EccvmOpsTable eccvm_ops_table; // table of ops in the ECCVM format UltraEccOpsTable ultra_ops_table; // table of ops in the Ultra-arithmetization format - // Storage for the reconstructed eccvm ops table in contiguous memory. (Intended to be constructed once and for all - // prior to ECCVM construction to avoid repeated prepending of subtables in physical memory). + // Storage for the reconstructed eccvm ops table in contiguous memory. (Intended to be constructed once and for + // all prior to ECCVM construction to avoid repeated traversal of the per-subtable storage.) std::vector eccvm_ops_reconstructed; - // Storage for the reconstructed ultra ops table in contiguous memory. (Intended to be constructed once and for all - // prior to Translator circuit construction to avoid repeated prepending of subtables in physical memory). - std::vector ultra_ops_reconstructed; + // Storage for the reconstructed ultra ops tables in contiguous memory. (Intended to be constructed once and for + // all prior to Translator circuit construction to avoid repeated traversal of the per-subtable storage.) + std::vector ultra_ops_zk_reconstructed; // Chonk table + std::vector ultra_ops_no_zk_reconstructed; // AVM table // Tracks number of muls and size of eccvm in real time as the op queue is updated EccvmRowTracker eccvm_row_tracker; @@ -78,6 +78,8 @@ class ECCOpQueue { ultra_ops_table.create_new_subtable(); } + size_t num_subtables() const { return eccvm_ops_table.num_subtables(); } + size_t get_current_subtable_size() const { return ultra_ops_table.get_current_subtable_size(); } /** @@ -89,25 +91,47 @@ class ECCOpQueue { size_t get_append_offset() const { constexpr size_t reserved_op_slots = UltraEccOpsTable::APPEND_TRACE_OFFSET / UltraEccOpsTable::NUM_ROWS_PER_OP; - return OP_QUEUE_SIZE - get_current_subtable_size() - reserved_op_slots; + constexpr size_t zk_op_slots = UltraEccOpsTable::ZK_ULTRA_OPS / UltraEccOpsTable::NUM_ROWS_PER_OP; + return OP_QUEUE_SIZE - get_current_subtable_size() - reserved_op_slots - zk_op_slots; + } + + void merge() + { + eccvm_ops_table.merge(); + ultra_ops_table.merge(); } - void merge(MergeSettings settings = MergeSettings::PREPEND, std::optional ultra_fixed_offset = std::nullopt) + void merge_fixed_append(size_t ultra_fixed_offset) { - eccvm_ops_table.merge(settings); - ultra_ops_table.merge(settings, ultra_fixed_offset); + eccvm_ops_table.merge(); + ultra_ops_table.merge_with_fixed_append_offset(ultra_fixed_offset); + } + + std::array, ULTRA_TABLE_WIDTH> construct_zk_columns() + { + auto [column_polynomials, hiding_op] = ultra_ops_table.construct_zk_columns(); + this->hiding_op_for_eccvm = hiding_op; + this->has_hiding_op = true; + + return column_polynomials; + } + + std::vector, ULTRA_TABLE_WIDTH>> construct_subtable_columns() const + { + return ultra_ops_table.construct_subtable_columns(); } // Construct column polynomials for the full aggregate ultra ops table - std::array, ULTRA_TABLE_WIDTH> construct_ultra_ops_table_columns() const + std::array, ULTRA_TABLE_WIDTH> construct_ultra_ops_table_columns( + const bool include_zk_ops = true) const { - return ultra_ops_table.construct_table_columns(); + return ultra_ops_table.construct_table_columns(include_zk_ops); } - // Construct column polynomials for the aggregate table excluding the most recent subtable - std::array, ULTRA_TABLE_WIDTH> construct_previous_ultra_ops_table_columns() const + // Construct column polynomials for the aggregate table up to and including the tail subtable. + std::array, ULTRA_TABLE_WIDTH> construct_table_columns_up_to_tail() const { - return ultra_ops_table.construct_previous_table_columns(); + return ultra_ops_table.construct_table_columns_up_to_tail(); } // Construct column polynomials for the most recently merged subtable @@ -119,19 +143,26 @@ class ECCOpQueue { // Reconstruct the full table of eccvm ops in contiguous memory from the independent subtables void construct_full_eccvm_ops_table() { eccvm_ops_reconstructed = eccvm_ops_table.get_reconstructed(); } - // Reconstruct the full table of ultra ops in contiguous memory from the independent subtables - void construct_full_ultra_ops_table() { ultra_ops_reconstructed = ultra_ops_table.get_reconstructed(); } + // Reconstruct the ZK-prefixed full table of ultra ops in contiguous memory from the independent subtables. + void construct_zk_reconstructed_ultra_ops_table() + { + ultra_ops_zk_reconstructed = ultra_ops_table.get_zk_reconstructed_ultra_ops(); + } + // Reconstruct the non-ZK full table of ultra ops in contiguous memory from the independent subtables. + void construct_no_zk_reconstructed_ultra_ops_table() + { + ultra_ops_no_zk_reconstructed = ultra_ops_table.get_no_zk_reconstructed_ultra_ops(); + } + + // Excludes the optional ZK prefix; see UltraEccOpsTable::num_ultra_rows size_t get_ultra_ops_table_num_rows() const { return ultra_ops_table.num_ultra_rows(); } size_t get_ultra_ops_count() const { return ultra_ops_table.num_ops(); } // actual operation count without padding - size_t get_current_ultra_ops_subtable_num_rows() const { return ultra_ops_table.current_ultra_subtable_size(); } - size_t get_previous_ultra_ops_table_num_rows() const { return ultra_ops_table.previous_ultra_table_size(); } - - // TODO(https://github.com/AztecProtocol/barretenberg/issues/1339): Consider making the ultra and eccvm ops - // getters more memory efficient + // Excludes the optional ZK prefix, same as get_ultra_ops_table_num_rows. + size_t get_ultra_ops_table_num_rows_up_to_tail() const { return ultra_ops_table.ultra_table_size_up_to_tail(); } // Get the full table of ECCVM ops in contiguous memory; construct it if it has not been constructed already. - // The hiding op (set via append_hiding_op) is always prepended at index 0. + // The hiding op is always prepended at index 0. std::vector& get_eccvm_ops() { if (eccvm_ops_reconstructed.empty()) { @@ -145,12 +176,20 @@ class ECCOpQueue { return eccvm_ops_reconstructed; } - std::vector& get_ultra_ops() + std::vector& get_no_zk_reconstructed_ultra_ops() + { + if (ultra_ops_no_zk_reconstructed.empty()) { + construct_no_zk_reconstructed_ultra_ops_table(); + } + return ultra_ops_no_zk_reconstructed; + } + + std::vector& get_zk_reconstructed_ultra_ops() { - if (ultra_ops_reconstructed.empty()) { - construct_full_ultra_ops_table(); + if (ultra_ops_zk_reconstructed.empty()) { + construct_zk_reconstructed_ultra_ops_table(); } - return ultra_ops_reconstructed; + return ultra_ops_zk_reconstructed; } /** @@ -249,9 +288,8 @@ class ECCOpQueue { /** * @brief Writes a no-op to the ultra ops table but adds no eccvm operations. * - * @details Used by the tail kernel to ensure the op queue wires in Translator are shiftable: the no-op - * contributes two zero rows at the start of the tail subtable, which ends up at the top of the final aggregate - * table (because the tail is prepended last), giving the Translator's op queue wires two leading zero rows. + * @details Adds two zero rows (one no-op = NUM_ROWS_PER_OP rows) to the ultra ops table. Translator needs two + * leading zero rows for polynomial shiftability. */ UltraOp no_op_ultra_only() { @@ -319,7 +357,8 @@ class ECCOpQueue { * on-curve check is similarly gated. q_reset = 1 is required for Translator compatibility (only opcodes {0,3,4,8} * are allowed). * - * This method should be called ONCE per IVC in the tail kernel, after the random non-ops. + * This method writes the same hiding op to both the ECCVM and Ultra tables in one step, ensuring the two + * representations agree (required for the translation check). * * @param Px Random field element (not necessarily a valid x-coordinate on BN254) * @param Py Random field element (not necessarily a valid y-coordinate on BN254) @@ -327,34 +366,10 @@ class ECCOpQueue { */ UltraOp append_hiding_op(const Fq& Px, const Fq& Py) { - // Create an ECCVM operation with q_eq = 1, q_reset = 1 (opcode = 3) and the random Px, Py values. - // We construct the base_point directly with the raw coordinates - it may not be on the curve. - // Note: reset = true is required for Translator compatibility (only opcodes {0,3,4,8} are allowed) - EccOpCode op_code{ .eq = true, .reset = true }; // q_eq = 1, q_reset = 1 - Point base_point; - base_point.x = Px; - base_point.y = Py; - // Note: We don't call is_point_at_infinity() or any curve operations on this point - - // Store the hiding op for ECCVM - it will be prepended to the front during reconstruction (index 0 -> row 1) - hiding_op_for_eccvm = ECCVMOperation{ .op_code = op_code, .base_point = base_point }; - has_hiding_op = true; + auto [ultra_op, eccvm_op] = UltraEccOpsTable::make_hiding_op_pair(Px, Py); - // Push to Ultra ops through normal flow (appends to current subtable) - // Decompose Px, Py (Fq) into hi-lo chunks (Fr) - const size_t CHUNK_SIZE = 2 * stdlib::NUM_LIMB_BITS_IN_FIELD_SIMULATION; - uint256_t x_256(Px); - uint256_t y_256(Py); - UltraOp ultra_op{ - .op_code = op_code, - .x_lo = Fr(x_256.slice(0, CHUNK_SIZE)), - .x_hi = Fr(x_256.slice(CHUNK_SIZE, CHUNK_SIZE * 2)), - .y_lo = Fr(y_256.slice(0, CHUNK_SIZE)), - .y_hi = Fr(y_256.slice(CHUNK_SIZE, CHUNK_SIZE * 2)), - .z_1 = Fr(0), - .z_2 = Fr(0), - .return_is_infinity = false, - }; + hiding_op_for_eccvm = eccvm_op; + has_hiding_op = true; ultra_ops_table.push(ultra_op); // Do NOT update the accumulator - the hiding op doesn't perform any actual EC computation @@ -363,17 +378,15 @@ class ECCOpQueue { private: // === Hiding Op State === - // The hiding op is handled asymmetrically but ends up at the same functional relative position in both: - // - ECCVM: Stored here and prepended at index 0 during get_eccvm_ops() reconstruction - // - Ultra: Pushed to ultra_ops_table at index 4 (after 1 no-op + 3 random padding ops) - // - // Both end up with hiding op as the first "real" op because: - // - ECCVM: prepending puts it at index 0; padding ops don't exist in ECCVM table - // - Translator: skips first 4 Ultra ops (padding), so accumulation starts at the hiding op - // - // This alignment is required for the translation check (ECCVM and Translator must compute - // the same accumulated_result). ECCVM places it at row 1 (lagrange_second) where on-curve - // and eq constraints are gated off, allowing non-curve (x, y) values. + // The hiding op exists in both the ECCVM and Ultra tables (same Px, Py values, opcode q_eq=q_reset=1) so the + // translation check holds. It is set by exactly one of two entry points, depending on the proving flow: + // - Chonk: UltraEccOpsTable::construct_zk_columns() builds the full ZK prefix (1 no-op + 3 random + 1 hiding) + // at the front of the reconstructed Ultra table; the hiding op lands at index 4. + // - Goblin AVM: append_hiding_op() pushes the Ultra side into the current subtable directly, with no surrounding + // prefix. + // In both cases the ECCVM side is stored here and prepended to the reconstructed ECCVM table at index 0 by + // get_eccvm_ops(), placing it at row 1 (lagrange_second) where the on-curve and eq constraints are gated off + // so that non-curve (x, y) values are accepted. ECCVMOperation hiding_op_for_eccvm; bool has_hiding_op = false; diff --git a/barretenberg/cpp/src/barretenberg/op_queue/ecc_op_queue.test.cpp b/barretenberg/cpp/src/barretenberg/op_queue/ecc_op_queue.test.cpp index 0629ecc6b6b4..69a82589214b 100644 --- a/barretenberg/cpp/src/barretenberg/op_queue/ecc_op_queue.test.cpp +++ b/barretenberg/cpp/src/barretenberg/op_queue/ecc_op_queue.test.cpp @@ -30,42 +30,33 @@ class ECCOpQueueTest { * @brief Check that the table column polynomials reconstructed by the op queue have the correct relationship * */ - static void check_table_column_polynomials(const std::shared_ptr& op_queue, - MergeSettings settings, - std::optional ultra_fixed_offset = std::nullopt) + static void check_final_table_column_polynomials(const std::shared_ptr& op_queue, + std::optional ultra_fixed_offset = std::nullopt) { - // Construct column polynomials corresponding to the full table (T), the previous table (T_prev), and the - // current subtable (t_current) + // Construct column polynomials corresponding to the full table (T), the table up to and including the tail + // (T_tail, the second to last table), and the current subtable (t_current). T and T_tail both include the ZK + // preamble. auto table_polynomials = op_queue->construct_ultra_ops_table_columns(); - auto prev_table_polynomials = op_queue->construct_previous_ultra_ops_table_columns(); + auto tail_table_polynomials = op_queue->construct_table_columns_up_to_tail(); auto subtable_polynomials = op_queue->construct_current_ultra_ops_subtable_columns(); - // Check T(x) = t_current(x) + x^k * T_prev(x) at a single random challenge point + // Check T(x) = T_tail(x) + x^k * t_current(x) at a single random challenge point Fr eval_challenge = Fr::random_element(); - for (auto [table_poly, prev_table_poly, subtable_poly] : - zip_view(table_polynomials, prev_table_polynomials, subtable_polynomials)) { + for (auto [table_poly, tail_table_poly, subtable_poly] : + zip_view(table_polynomials, tail_table_polynomials, subtable_polynomials)) { const Fr table_eval = table_poly.evaluate(eval_challenge); // T(x) - // Check that the previous table polynomial is constructed correctly according to the merge settings by - // checking the identity at a single point - if (settings == MergeSettings::PREPEND) { - // T(x) = t_current(x) + x^k * T_prev(x), where k is the size of the current subtable - const size_t current_subtable_size = op_queue->get_current_ultra_ops_subtable_num_rows(); // k - const Fr subtable_eval = subtable_poly.evaluate(eval_challenge); // t_current(x) - const Fr shifted_previous_table_eval = prev_table_poly.evaluate(eval_challenge) * - eval_challenge.pow(current_subtable_size); // x^k * T_prev(x) - EXPECT_EQ(table_eval, subtable_eval + shifted_previous_table_eval); - } else { - // APPEND merge performs concatenation directly to end of previous table or at a specified fixed offset - const size_t prev_table_size = op_queue->get_previous_ultra_ops_table_num_rows(); // k - const size_t shift_magnitude = ultra_fixed_offset.has_value() - ? ultra_fixed_offset.value() * bb::UltraEccOpsTable::NUM_ROWS_PER_OP - : prev_table_size; // k - // T(x) = T_prev(x) + x^k * t_current(x), where k is the shift magnitude - const Fr prev_table_eval = prev_table_poly.evaluate(eval_challenge); // T_prev(x) - const Fr shifted_subtable_eval = - subtable_poly.evaluate(eval_challenge) * eval_challenge.pow(shift_magnitude); // x^k * t_current(x) - EXPECT_EQ(table_eval, shifted_subtable_eval + prev_table_eval); - } + // APPEND merge performs concatenation directly to end of previous table or at a specified fixed offset. + const size_t tail_table_size = op_queue->get_ultra_ops_table_num_rows_up_to_tail(); // k + const size_t shift_magnitude = + ultra_fixed_offset.has_value() + ? bb::UltraEccOpsTable::ZK_ULTRA_OPS + + (ultra_fixed_offset.value() * bb::UltraEccOpsTable::NUM_ROWS_PER_OP) + : tail_table_size; // k + // T(x) = T_tail(x) + x^k * t_current(x), where k is the shift magnitude. + const Fr tail_table_eval = tail_table_poly.evaluate(eval_challenge); // T_tail(x) + const Fr shifted_subtable_eval = + subtable_poly.evaluate(eval_challenge) * eval_challenge.pow(shift_magnitude); // x^k * t_current(x) + EXPECT_EQ(table_eval, shifted_subtable_eval + tail_table_eval); } } @@ -74,13 +65,18 @@ class ECCOpQueueTest { * * @param op_queue */ - static void check_opcode_consistency_with_eccvm(const std::shared_ptr& op_queue) + static void check_opcode_consistency_with_eccvm(const std::shared_ptr& op_queue, + const bool include_zk_ops = false) { - auto ultra_table = op_queue->get_ultra_ops(); + auto ultra_table = + include_zk_ops ? op_queue->get_zk_reconstructed_ultra_ops() : op_queue->get_no_zk_reconstructed_ultra_ops(); auto eccvm_table = op_queue->get_eccvm_ops(); size_t j = 0; for (const auto& ultra_op : ultra_table) { + if (ultra_op.op_code.is_random_op) { + continue; + } if (ultra_op.op_code.value() == 0) { continue; } @@ -130,9 +126,8 @@ TEST(ECCOpQueueTest, InternalAccumulatorCorrectness) EXPECT_TRUE(op_queue.get_accumulator().is_point_at_infinity()); } -// Check that the ECC op queue correctly constructs the table column polynomials for the full table, the previous table, -// and the current subtable via successive prepending of subtables -TEST(ECCOpQueueTest, ColumnPolynomialConstructionPrependOnly) +// Check that the ECC op queue correctly reconstructs subtables via successive appending of subtables. +TEST(ECCOpQueueTest, ColumnPolynomialConstruction) { using Fq = curve::Grumpkin::ScalarField; @@ -143,81 +138,43 @@ TEST(ECCOpQueueTest, ColumnPolynomialConstructionPrependOnly) const size_t NUM_SUBTABLES = 5; for (size_t i = 0; i < NUM_SUBTABLES; ++i) { op_queue->initialize_new_subtable(); - // For prepend: the last subtable becomes the first in the final table. - // Add hiding op at the START of the last subtable so it lands at index 0. - if (i == NUM_SUBTABLES - 1) { + // Add hiding op to the first subtable so the Ultra and ECCVM opcode streams have matching order. + if (i == 0) { op_queue->append_hiding_op(Fq::random_element(), Fq::random_element()); } ECCOpQueueTest::populate_an_arbitrary_subtable_of_ops(op_queue, /*initialize=*/false); - MergeSettings settings = MergeSettings::PREPEND; - op_queue->merge(settings); - ECCOpQueueTest::check_table_column_polynomials(op_queue, settings); + op_queue->merge(); } + op_queue->construct_zk_columns(); ECCOpQueueTest::check_opcode_consistency_with_eccvm(op_queue); } -TEST(ECCOpQueueTest, ColumnPolynomialConstructionPrependThenAppend) +TEST(ECCOpQueueTest, ColumnPolynomialConstructionUpToTailWithZkThenFixedAppend) { - using Fq = curve::Grumpkin::ScalarField; - // Instantiate an EccOpQueue and populate it with several subtables of ECC ops auto op_queue = std::make_shared(); - // Check that the table polynomials have the correct form after each subtable concatenation - const size_t NUM_SUBTABLES = 2; - for (size_t i = 0; i < NUM_SUBTABLES; ++i) { + // Construct app/kernel subtables followed by the tail subtable. + const size_t NUM_SUBTABLES_THROUGH_TAIL = 3; + for (size_t i = 0; i < NUM_SUBTABLES_THROUGH_TAIL; ++i) { op_queue->initialize_new_subtable(); - // For prepend: the last prepended subtable (i=1) becomes first in the final table. - // Add hiding op at the START of that subtable so it lands at index 0. - if (i == NUM_SUBTABLES - 1) { - op_queue->append_hiding_op(Fq::random_element(), Fq::random_element()); - } ECCOpQueueTest::populate_an_arbitrary_subtable_of_ops(op_queue, /*initialize=*/false); - MergeSettings settings = MergeSettings::PREPEND; - op_queue->merge(settings); - ECCOpQueueTest::check_table_column_polynomials(op_queue, settings); + op_queue->merge(); } - // Do a single append operation (goes at end, after prepended subtables) - ECCOpQueueTest::populate_an_arbitrary_subtable_of_ops(op_queue); - MergeSettings settings = MergeSettings::APPEND; - op_queue->merge(settings); - ECCOpQueueTest::check_table_column_polynomials(op_queue, settings); - - ECCOpQueueTest::check_opcode_consistency_with_eccvm(op_queue); -} - -TEST(ECCOpQueueTest, ColumnPolynomialConstructionPrependThenAppendAtFixedOffset) -{ - using Fq = curve::Grumpkin::ScalarField; - - // Instantiate an EccOpQueue and populate it with several subtables of ECC ops - auto op_queue = std::make_shared(); - - // Check that the table polynomials have the correct form after each subtable concatenation - const size_t NUM_SUBTABLES = 2; - for (size_t i = 0; i < NUM_SUBTABLES; ++i) { - op_queue->initialize_new_subtable(); - // For prepend: the last prepended subtable (i=1) becomes first in the final table. - // Add hiding op at the START of that subtable so it lands at index 0. - if (i == NUM_SUBTABLES - 1) { - op_queue->append_hiding_op(Fq::random_element(), Fq::random_element()); - } - ECCOpQueueTest::populate_an_arbitrary_subtable_of_ops(op_queue, /*initialize=*/false); - MergeSettings settings = MergeSettings::PREPEND; - op_queue->merge(settings); - ECCOpQueueTest::check_table_column_polynomials(op_queue, settings); - } + op_queue->construct_zk_columns(); - // Do a single append operation at a fixed offset (sufficiently large as to not overlap with the existing table) + // Do a single append operation at a fixed offset for the hiding kernel subtable. const size_t ultra_fixed_offset = op_queue->get_ultra_ops_table_num_rows() + 20; ECCOpQueueTest::populate_an_arbitrary_subtable_of_ops(op_queue); - MergeSettings settings = MergeSettings::APPEND; - op_queue->merge(settings, ultra_fixed_offset); - ECCOpQueueTest::check_table_column_polynomials(op_queue, settings, ultra_fixed_offset); + op_queue->merge_fixed_append(ultra_fixed_offset); + auto table_up_to_tail = op_queue->construct_table_columns_up_to_tail(); + EXPECT_EQ(table_up_to_tail[0].size(), + bb::UltraEccOpsTable::ZK_ULTRA_OPS + op_queue->get_ultra_ops_table_num_rows_up_to_tail()); + ECCOpQueueTest::check_final_table_column_polynomials(op_queue, ultra_fixed_offset); - ECCOpQueueTest::check_opcode_consistency_with_eccvm(op_queue); + ECCOpQueueTest::check_opcode_consistency_with_eccvm(op_queue, /*include_zk_ops=*/true); } // Verify correct handling of point at infinity in add and mul operations @@ -305,9 +262,10 @@ TEST(ECCOpQueueTest, HidingOpPositionConsistency) op_queue->eq_and_reset(); op_queue->merge(); - // Get the reconstructed tables + // Get the reconstructed ECCVM table and raw Ultra table. This test is checking the explicitly appended hiding op + // in the raw subtable, not the Chonk ZK-prefixed reconstruction. const auto& eccvm_ops = op_queue->get_eccvm_ops(); - const auto& ultra_ops = op_queue->get_ultra_ops(); + const auto& ultra_ops = op_queue->get_no_zk_reconstructed_ultra_ops(); // === ECCVM Table Checks === // Hiding op should be at index 0 (prepended during get_eccvm_ops()) @@ -318,7 +276,7 @@ TEST(ECCOpQueueTest, HidingOpPositionConsistency) EXPECT_EQ(eccvm_hiding_op.base_point.y, hiding_y); // === Ultra Table Checks === - // Without tail kernel padding, the hiding op should be at index 2: + // By construction, the hiding op should be at index 2: // index 0: add_accumulate(P1) // index 1: mul_accumulate(P2, z) // index 2: append_hiding_op (eq+reset opcode) diff --git a/barretenberg/cpp/src/barretenberg/op_queue/ecc_ops_table.hpp b/barretenberg/cpp/src/barretenberg/op_queue/ecc_ops_table.hpp index 176a054cd5c0..2db17c6c4d93 100644 --- a/barretenberg/cpp/src/barretenberg/op_queue/ecc_ops_table.hpp +++ b/barretenberg/cpp/src/barretenberg/op_queue/ecc_ops_table.hpp @@ -12,14 +12,13 @@ #include "barretenberg/eccvm/eccvm_builder_types.hpp" #include "barretenberg/polynomials/polynomial.hpp" #include "barretenberg/stdlib/primitives/bigfield/constants.hpp" -#include +#include namespace bb { -/** - * @brief The MergeSettings define whether an current subtable will be added at the beginning (PREPEND) or at the end - * (APPEND) of the EccOpQueue. - */ -enum MergeSettings { PREPEND, APPEND }; +// Constants determining the structure of the zk columns. These must match the structure expected by Translator. +static constexpr size_t ECC_NUM_RANDOM_OPS_START = 3; +static constexpr size_t ECC_NUM_NO_OPS_START = 1; +static constexpr size_t ECC_NUM_HIDING_OPS_START = 1; /** * @brief Defines the opcodes for ECC operations used in both the Ultra and ECCVM formats. There are three opcodes that @@ -109,16 +108,14 @@ struct ECCVMOperation { /** * @brief A table of ECC operations - * @details The table is constructed via concatenation of subtables of ECC operations. The table concatentation protocol - * (Merge protocol) requires that the concatenation be achieved via PRE-pending successive tables. To avoid the need for - * expensive memory reallocations associated with physically prepending, the subtables are stored as a std::deque that - * can be traversed to reconstruct the columns of the aggregate tables as needed (e.g. in corresponding polynomials). + * @details The table is constructed via append-only concatenation of subtables of ECC operations. Subtables are stored + * in chronological order. * * @tparam OpFormat Format of the ECC operations stored in the table */ template class EccOpsTable { using Subtable = std::vector; - std::deque table; + std::vector table; Subtable current_subtable; // used to store the current subtable before it is added to the table public: size_t size() const @@ -183,19 +180,9 @@ template class EccOpsTable { return reconstructed_table; } - void merge(MergeSettings settings = MergeSettings::PREPEND) + void merge() { - if (current_subtable.empty()) { - return; // nothing to merge - } - - // Based on merge settings add the current subtable to either the beginning or end of the full table - if (settings == MergeSettings::PREPEND) { - table.push_front(std::move(current_subtable)); - } else { - table.push_back(std::move(current_subtable)); - } - + table.push_back(std::move(current_subtable)); current_subtable.clear(); // clear the current subtable after merging BB_ASSERT(current_subtable.empty(), "current subtable should be empty after merging. Check the merge logic."); } @@ -224,6 +211,8 @@ class UltraEccOpsTable { public: static constexpr size_t TABLE_WIDTH = 4; // dictated by the number of wires in the Ultra arithmetization static constexpr size_t NUM_ROWS_PER_OP = 2; // A single ECC op is split across two width-4 rows + static constexpr size_t ZK_ULTRA_OPS = + (ECC_NUM_RANDOM_OPS_START + ECC_NUM_NO_OPS_START + ECC_NUM_HIDING_OPS_START) * NUM_ROWS_PER_OP; // Leading-zero preamble on the APPEND subtable. Matches the appender flavor's TRACE_OFFSET, i.e. the // number of leading zeros carried by its ecc_op_wire polynomial commitments. Sourced from @@ -232,158 +221,262 @@ class UltraEccOpsTable { static constexpr size_t APPEND_TRACE_OFFSET = NUM_DISABLED_ROWS_IN_SUMCHECK; static_assert(APPEND_TRACE_OFFSET % NUM_ROWS_PER_OP == 0); + /** + * @brief Build a hiding op as paired Ultra and ECCVM operations from raw Fq coordinates. + * + * @details Uses opcode q_eq=q_reset=1 (value 3) for Translator compatibility. The base point is constructed + * directly from (Px, Py); these are not required to lie on the curve since on-curve and equality constraints + * are gated off at the row where the hiding op lands (lagrange_second in ECCVM). z_1 and z_2 are zero in the + * Ultra representation since the hiding op performs no scalar multiplication. + */ + static std::pair make_hiding_op_pair(const curve::BN254::BaseField& Px, + const curve::BN254::BaseField& Py) + { + using Fr = curve::BN254::ScalarField; + using Point = curve::BN254::AffineElement; + + EccOpCode op_code{ .eq = true, .reset = true }; + Point base_point; + base_point.x = Px; + base_point.y = Py; + + constexpr size_t CHUNK_SIZE = 2 * stdlib::NUM_LIMB_BITS_IN_FIELD_SIMULATION; + uint256_t x_256(Px); + uint256_t y_256(Py); + UltraOp ultra_op{ + .op_code = op_code, + .x_lo = Fr(x_256.slice(0, CHUNK_SIZE)), + .x_hi = Fr(x_256.slice(CHUNK_SIZE, CHUNK_SIZE * 2)), + .y_lo = Fr(y_256.slice(0, CHUNK_SIZE)), + .y_hi = Fr(y_256.slice(CHUNK_SIZE, CHUNK_SIZE * 2)), + .z_1 = Fr(0), + .z_2 = Fr(0), + .return_is_infinity = false, + }; + ECCVMOperation eccvm_op{ .op_code = op_code, .base_point = base_point }; + return { ultra_op, eccvm_op }; + } + private: using Curve = curve::BN254; using Fr = Curve::ScalarField; using UltraOpsTable = EccOpsTable; using ColumnPolynomials = std::array, TABLE_WIDTH>; - std::optional current_subtable_idx; // index of the most recently merged subtable (nullopt if empty merge) UltraOpsTable table; + std::vector zk_ops; // ops used to mask real ops in Chonk - // For fixed-location append functionality. - // APPEND mode places the current subtable at a fixed position at the end of the table, ensuring a - // constant total table size (used for zero-knowledge when the appending circuit is ZK). See - // chonk/README.md "Constant Merged Table Size for ZK". (Only applicable for ultra ops.) + // Set by merge_with_fixed_append_offset to record the row offset (in NUM_ROWS_PER_OP units) at which the + // most recent subtable should be placed when constructing the full table polynomials. Setting this value + // also ensures that subsequent reconstructions/polynomial constructions include the APPEND_TRACE_OFFSET + // leading-zero preamble for the appended subtable, so the resulting commitments line up with the + // appender flavor's ecc_op_wire commitments. See chonk/README.md "Constant Merged Table Size for ZK". std::optional fixed_append_offset; - bool has_fixed_append = false; - - // Size of the appended subtable (including its APPEND_TRACE_OFFSET preamble) in rows, if any. - size_t appended_subtable_span() const - { - BB_ASSERT(has_fixed_append, "Appended subtable span called without fixed append"); - BB_ASSERT(!table.get().empty(), "Appended subtable span called on empty table"); - - return APPEND_TRACE_OFFSET + (table.get().back().size() * NUM_ROWS_PER_OP); - } public: // Returns the number of ECC operations in the table size_t num_ops() const { return table.size(); } - // Returns the number of rows in the Ultra execution trace (each op occupies NUM_ROWS_PER_OP rows) + // Returns the number of rows in the Ultra execution trace (each op occupies NUM_ROWS_PER_OP rows). + // NOTE: this count covers the merged subtables only and EXCLUDES the ZK prefix (zk_ops, size ZK_ULTRA_OPS). + // Callers that need the full polynomial size (e.g. for sizing a commitment key) must add ZK_ULTRA_OPS. size_t num_ultra_rows() const { - size_t base_size = (table.size() * NUM_ROWS_PER_OP) + (has_fixed_append ? APPEND_TRACE_OFFSET : 0); - if (has_fixed_append && fixed_append_offset.has_value()) { - // Include zeros gap and final subtable at fixed location (subtable span includes preamble) - return std::max(base_size, (fixed_append_offset.value() * NUM_ROWS_PER_OP) + appended_subtable_span()); + if (!has_fixed_append_offset()) { + return table.size() * NUM_ROWS_PER_OP; } - return base_size; + BB_ASSERT(!table.get().empty(), "Fixed-append set but no subtables present"); + // Last subtable starts at fixed_append_offset (in op units), preceded by APPEND_TRACE_OFFSET zero rows. + const size_t last_subtable_rows = table.get().back().size() * NUM_ROWS_PER_OP; + return (fixed_append_offset.value() * NUM_ROWS_PER_OP) + APPEND_TRACE_OFFSET + last_subtable_rows; } - size_t current_ultra_subtable_size() const + size_t ultra_table_size_up_to_tail() const { - if (!current_subtable_idx.has_value()) { - return 0; + BB_ASSERT_EQ( + table.get_current_subtable_size(), + 0UL, + "Current subtable should be merged before computing the size of table of operations up to the tail."); + BB_ASSERT_GT(table.num_subtables(), 1UL, "Cannot compute tail table size without at least two tables."); + size_t size = 0; + for (size_t subtable_idx = 0; subtable_idx < table.num_subtables() - 1; ++subtable_idx) { + size += table.get()[subtable_idx].size() * NUM_ROWS_PER_OP; } - const bool is_appended_subtable = has_fixed_append && current_subtable_idx.value() == table.num_subtables() - 1; - const size_t ops_rows = table.get()[current_subtable_idx.value()].size() * NUM_ROWS_PER_OP; - return is_appended_subtable ? APPEND_TRACE_OFFSET + ops_rows : ops_rows; + return size; } - size_t previous_ultra_table_size() const { return (num_ultra_rows() - current_ultra_subtable_size()); } void create_new_subtable(size_t size_hint = 0) { table.create_new_subtable(size_hint); } void push(const UltraOp& op) { table.push(op); } - void merge(MergeSettings settings = MergeSettings::PREPEND, std::optional offset = std::nullopt) + bool has_fixed_append_offset() const { return fixed_append_offset.has_value(); } + bool has_zk_ops() const { return !zk_ops.empty(); } + void merge() { - const size_t num_subtables_before = table.num_subtables(); - if (settings == MergeSettings::APPEND) { - // All appends are treated as fixed-location for ultra ops - BB_ASSERT(!has_fixed_append, "Can only perform fixed-location append once"); - // Set fixed location at which to append ultra ops. If nullopt --> append right after prepended tables - fixed_append_offset = offset; - has_fixed_append = true; - table.merge(settings); - current_subtable_idx = table.num_subtables() - 1; - } else { // MergeSettings::PREPEND - table.merge(settings); - // Only update current_subtable_idx if a subtable was actually added (non-empty merge) - current_subtable_idx = - (table.num_subtables() > num_subtables_before) ? std::optional(0) : std::nullopt; + BB_ASSERT(!has_fixed_append_offset(), "Cannot perform regular merge after fixed-location append"); + table.merge(); + } + void merge_with_fixed_append_offset(size_t offset) + { + BB_ASSERT(!has_fixed_append_offset(), "Can only perform fixed-location append once"); + + size_t prior_subtables_size = 0; + for (const auto& subtable : table.get()) { + prior_subtables_size += subtable.size(); } + BB_ASSERT_LTE(prior_subtables_size, + offset, + "Merged table size exceeds fixed append offset. This means that there are too many ops before " + "the last subtable. The last subtable doesn't fit at the end of the op queue."); + + fixed_append_offset = offset; + table.merge(); } size_t get_current_subtable_size() const { return table.get_current_subtable_size(); } - std::vector get_reconstructed() const + std::vector get_no_zk_reconstructed_ultra_ops() const { - if (has_fixed_append && fixed_append_offset.has_value()) { - return get_reconstructed_with_fixed_append(); - } - return table.get_reconstructed(); + return get_reconstructed(/*include_zk_ops=*/false); } - std::vector get_reconstructed_with_fixed_append() const - { - BB_ASSERT(get_current_subtable_size() == 0, - "current subtable should be merged before reconstructing the full table of operations."); + std::vector get_zk_reconstructed_ultra_ops() const { return get_reconstructed(/*include_zk_ops=*/true); } + + private: + // Reconstruct the full table of ultra ops in contiguous memory. When include_zk_ops is set, the result includes + // the ZK prefix at the front. Under fixed-location append, the result then has gap no-ops up to the fixed offset, + // the APPEND_TRACE_OFFSET zero preamble, then the most recently merged subtable. + std::vector get_reconstructed(const bool include_zk_ops) const + { + BB_ASSERT_EQ(get_current_subtable_size(), + 0UL, + "current subtable should be merged before reconstructing the full table of operations."); + BB_ASSERT(!include_zk_ops || has_zk_ops(), "ZK ops must be constructed before reconstructing the Ultra table."); std::vector reconstructed_table; reconstructed_table.reserve(1 << CONST_OP_QUEUE_LOG_SIZE); - for (size_t subtable_idx = 0; subtable_idx < table.num_subtables() - 1; subtable_idx++) { - const auto& subtable = table.get()[subtable_idx]; - for (const auto& op : subtable) { - reconstructed_table.push_back(op); - } + if (include_zk_ops) { + reconstructed_table.insert(reconstructed_table.end(), zk_ops.begin(), zk_ops.end()); } - // Fill gap with no-ops up to fixed_append_offset, then add no-ops to match the APPEND_TRACE_OFFSET preamble - if (has_fixed_append && fixed_append_offset.has_value()) { - size_t current_size = reconstructed_table.size(); - size_t target_offset = fixed_append_offset.value(); - BB_ASSERT_LTE(current_size, target_offset, "Current table size is larger than fixed append offset."); - - constexpr size_t preamble_op_slots = APPEND_TRACE_OFFSET / NUM_ROWS_PER_OP; - reconstructed_table.insert( - reconstructed_table.end(), target_offset + preamble_op_slots - current_size, UltraOp{ /*no-op*/ }); + if (!has_fixed_append_offset()) { + for (const auto& subtable : table.get()) { + reconstructed_table.insert(reconstructed_table.end(), subtable.begin(), subtable.end()); + } + return reconstructed_table; } - // Add the final subtable (appended at fixed location) - const auto& final_subtable = table.get()[table.num_subtables() - 1]; - for (const auto& op : final_subtable) { - reconstructed_table.push_back(op); + // Previously-merged subtables (everything except the most recent) + for (size_t idx = 0; idx + 1 < table.num_subtables(); ++idx) { + const auto& subtable = table.get()[idx]; + reconstructed_table.insert(reconstructed_table.end(), subtable.begin(), subtable.end()); } + + // Pad with no-ops up to fixed offset + APPEND_TRACE_OFFSET preamble + constexpr size_t preamble_op_slots = APPEND_TRACE_OFFSET / NUM_ROWS_PER_OP; + const size_t zk_offset_ops = include_zk_ops ? zk_ops.size() : 0; + const size_t target_op_count = fixed_append_offset.value() + zk_offset_ops + preamble_op_slots; + BB_ASSERT_LTE( + reconstructed_table.size(), target_op_count, "Current table size is larger than fixed append offset."); + reconstructed_table.insert( + reconstructed_table.end(), target_op_count - reconstructed_table.size(), UltraOp{ /* no-op */ }); + + // Final subtable + const auto& final_subtable = table.get().back(); + reconstructed_table.insert(reconstructed_table.end(), final_subtable.begin(), final_subtable.end()); return reconstructed_table; } - // Construct column polynomials for the full ultra ecc ops table - ColumnPolynomials construct_table_columns() const + public: + std::pair construct_zk_columns() { - const size_t poly_size = num_ultra_rows(); + BB_ASSERT(!has_zk_ops(), "ZK ops should only be constructed once."); + + // Construct the table of ops + for (size_t idx = 0; idx < ECC_NUM_NO_OPS_START; idx++) { + zk_ops.push_back(UltraOp{ /* no_op */ }); + } - if (has_fixed_append) { - return construct_column_polynomials_with_fixed_append(poly_size); + // Each random op contributes 8 fresh Fr values to the column polynomials, masking commitments and + // evaluations of the columns in the merge protocol and Translator. + for (size_t idx = 0; idx < ECC_NUM_RANDOM_OPS_START; idx++) { + zk_ops.push_back(UltraOp{ .op_code = EccOpCode{ .is_random_op = true, + .random_value_1 = Fr::random_element(), + .random_value_2 = Fr::random_element() }, + .x_lo = Fr::random_element(), + .x_hi = Fr::random_element(), + .y_lo = Fr::random_element(), + .y_hi = Fr::random_element(), + .z_1 = Fr::random_element(), + .z_2 = Fr::random_element(), + .return_is_infinity = false }); } - return construct_column_polynomials_from_subtables(poly_size, 0, table.num_subtables()); + using Fq = curve::BN254::BaseField; + auto [hiding_ultra_op, hiding_eccvm_op] = make_hiding_op_pair(Fq::random_element(), Fq::random_element()); + zk_ops.push_back(hiding_ultra_op); + + const size_t poly_size = (zk_ops.size() * NUM_ROWS_PER_OP); + BB_ASSERT_EQ(poly_size, ZK_ULTRA_OPS); + + // Construct the column polynomials + ColumnPolynomials column_polynomials; + for (auto& poly : column_polynomials) { + poly = Polynomial(poly_size); + } + + size_t i = 0; + for (const auto& op : zk_ops) { + write_op_to_polynomials(column_polynomials, op, i); + i += NUM_ROWS_PER_OP; + } + + return { column_polynomials, hiding_eccvm_op }; } - // Construct column polynomials for the aggregate table excluding the most recent subtable - ColumnPolynomials construct_previous_table_columns() const + // Construct column polynomials for all subtables + std::vector construct_subtable_columns() const { - const size_t poly_size = previous_ultra_table_size(); - if (!current_subtable_idx.has_value()) { - // Empty merge: the entire table is "previous" - return construct_column_polynomials_from_subtables(poly_size, 0, table.num_subtables()); + std::vector subtable_columns; + + for (size_t idx = 0; idx < table.num_subtables(); idx++) { + const auto& subtable = table.get()[idx]; + const size_t poly_size = (subtable.size() * NUM_ROWS_PER_OP); + ColumnPolynomials columns = construct_columns_in_range(poly_size, idx, idx + 1); + subtable_columns.push_back(std::move(columns)); } - const size_t idx = current_subtable_idx.value(); - const size_t subtable_start_idx = idx == 0 ? 1 : 0; - const size_t subtable_end_idx = idx == 0 ? table.num_subtables() : table.num_subtables() - 1; - return construct_column_polynomials_from_subtables(poly_size, subtable_start_idx, subtable_end_idx); + return subtable_columns; + } + + // Construct column polynomials for the full ultra ecc ops table + ColumnPolynomials construct_table_columns(const bool include_zk_ops = true) const + { + BB_ASSERT(!include_zk_ops || has_zk_ops(), + "ZK ops must be constructed before constructing the full Ultra table with ZK ops."); + return construct_columns_in_range( + num_ultra_rows(), 0, table.num_subtables(), include_zk_ops, fixed_append_offset); + } + + // Construct column polynomials for the aggregate table up to and including the tail subtable. + ColumnPolynomials construct_table_columns_up_to_tail() const + { + BB_ASSERT(has_zk_ops(), "ZK ops should have been constructed before constructing the table up to tail"); + BB_ASSERT_GT(table.num_subtables(), + 1UL, + "There should be at least two subtables (including the tail) to construct the table up to tail"); + BB_ASSERT_GT(table.num_subtables(), 0UL, "Cannot construct table up to tail without a current subtable"); + + return construct_columns_in_range( + ultra_table_size_up_to_tail(), 0, table.num_subtables() - 1, /*include_zk_ops=*/true); } - // Construct the columns of the current subtable (first or last depending on prepend/append). - // For the APPEND path the returned polynomials carry APPEND_TRACE_OFFSET leading zero rows so their + // Construct the columns of the most recently merged subtable. + // Under fixed-location append, the returned polynomials carry APPEND_TRACE_OFFSET leading zero rows so their // commitments match the appender's ecc_op_wire commitments. ColumnPolynomials construct_current_ultra_ops_subtable_columns() const { - if (!current_subtable_idx.has_value()) { - return ColumnPolynomials{}; - } - const bool is_appended_subtable = has_fixed_append && current_subtable_idx.value() == table.num_subtables() - 1; - const size_t leading_zeros = is_appended_subtable ? APPEND_TRACE_OFFSET : 0; - const size_t poly_size = current_ultra_subtable_size(); + BB_ASSERT(table.num_subtables() > 0, "Cannot construct current subtable columns with no merged subtables"); + const size_t leading_zeros = has_fixed_append_offset() ? APPEND_TRACE_OFFSET : 0; + const auto& subtable = table.get().back(); + const size_t poly_size = leading_zeros + (subtable.size() * NUM_ROWS_PER_OP); ColumnPolynomials column_polynomials; if (poly_size == 0) { @@ -394,7 +487,6 @@ class UltraEccOpsTable { } size_t row = leading_zeros; - const auto& subtable = table.get()[current_subtable_idx.value()]; for (const auto& op : subtable) { write_op_to_polynomials(column_polynomials, op, row); row += NUM_ROWS_PER_OP; @@ -423,69 +515,65 @@ class UltraEccOpsTable { } /** - * @brief Construct polynomials with fixed-location append - * @details Process prepended subtables first, then place the appended subtable at the fixed offset. The appended - * subtable's ops are preceded by APPEND_TRACE_OFFSET zero rows. + * @brief Construct column polynomials covering subtables [start, end), optionally with a ZK prefix and an + * optional fixed-location placement of the last-in-range subtable. + * + * @details Layout (rows in NUM_ROWS_PER_OP units, advancing left to right): + * [optional ZK prefix] [subtables [start, sequential_end)] [optional gap] [optional last-in-range subtable + * preceded by APPEND_TRACE_OFFSET] + * + * If `fixed_append_offset_for_last` is set, sequential_end = end - 1 and the last-in-range subtable is placed + * at row `(zk_size + offset * NUM_ROWS_PER_OP + APPEND_TRACE_OFFSET)`. Any intervening rows are left at the + * zero-initialized default. Otherwise sequential_end = end and there is no gap. */ - ColumnPolynomials construct_column_polynomials_with_fixed_append(const size_t poly_size) const + ColumnPolynomials construct_columns_in_range( + const size_t poly_size, + const size_t subtable_start_idx, + const size_t subtable_end_idx, + const bool include_zk_ops = false, + const std::optional fixed_append_offset_for_last = std::nullopt) const { + const size_t final_poly_size = poly_size + (include_zk_ops ? ZK_ULTRA_OPS : 0); + ColumnPolynomials column_polynomials; - if (poly_size == 0) { + if (final_poly_size == 0) { return column_polynomials; } for (auto& poly : column_polynomials) { - poly = Polynomial(poly_size); // Initialized to zeros - } - - // Process all prepended subtables (all except last) - size_t i = 0; - for (size_t subtable_idx = 0; subtable_idx < table.num_subtables() - 1; subtable_idx++) { - const auto& subtable = table.get()[subtable_idx]; - for (const auto& op : subtable) { - write_op_to_polynomials(column_polynomials, op, i); - i += NUM_ROWS_PER_OP; - } + poly = Polynomial(final_poly_size); } - // Place the appended subtable at the fixed offset (skipping its leading-zero preamble). - size_t append_position = fixed_append_offset.has_value() ? fixed_append_offset.value() * NUM_ROWS_PER_OP : i; - const auto& appended_subtable = table.get()[table.num_subtables() - 1]; + size_t row = 0; - size_t j = append_position + APPEND_TRACE_OFFSET; - for (const auto& op : appended_subtable) { - write_op_to_polynomials(column_polynomials, op, j); - j += NUM_ROWS_PER_OP; + if (include_zk_ops) { + BB_ASSERT(has_zk_ops(), "ZK ops should have been constructed before including them in the columns"); + for (const auto& op : zk_ops) { + write_op_to_polynomials(column_polynomials, op, row); + row += NUM_ROWS_PER_OP; + } } - // Any gap between prepended tables and appended table remains zeros (from initialization) - return column_polynomials; - } - - /** - * @brief Construct polynomials corresponding to the columns of the reconstructed ultra ops table for the given - * range of subtables - * @param target_columns - */ - ColumnPolynomials construct_column_polynomials_from_subtables(const size_t poly_size, - const size_t subtable_start_idx, - const size_t subtable_end_idx) const - { - ColumnPolynomials column_polynomials; - if (poly_size == 0) { - return column_polynomials; - } - for (auto& poly : column_polynomials) { - poly = Polynomial(poly_size); + // Lay out subtables sequentially. If a fixed-append target is set, exclude the last-in-range subtable + // from the sequential pass; it is placed at the fixed offset below. + const size_t sequential_end = + fixed_append_offset_for_last.has_value() ? subtable_end_idx - 1 : subtable_end_idx; + for (size_t idx = subtable_start_idx; idx < sequential_end; ++idx) { + for (const auto& op : table.get()[idx]) { + write_op_to_polynomials(column_polynomials, op, row); + row += NUM_ROWS_PER_OP; + } } - size_t i = 0; - for (size_t subtable_idx = subtable_start_idx; subtable_idx < subtable_end_idx; ++subtable_idx) { - const auto& subtable = table.get()[subtable_idx]; - for (const auto& op : subtable) { - write_op_to_polynomials(column_polynomials, op, i); - i += NUM_ROWS_PER_OP; + if (fixed_append_offset_for_last.has_value()) { + const size_t zk_prefix_rows = include_zk_ops ? ZK_ULTRA_OPS : 0; + size_t append_row = + zk_prefix_rows + (fixed_append_offset_for_last.value() * NUM_ROWS_PER_OP) + APPEND_TRACE_OFFSET; + for (const auto& op : table.get()[subtable_end_idx - 1]) { + write_op_to_polynomials(column_polynomials, op, append_row); + append_row += NUM_ROWS_PER_OP; } } + return column_polynomials; } }; diff --git a/barretenberg/cpp/src/barretenberg/op_queue/ecc_ops_table.test.cpp b/barretenberg/cpp/src/barretenberg/op_queue/ecc_ops_table.test.cpp index 9ec2bb2063f5..27e39214530d 100644 --- a/barretenberg/cpp/src/barretenberg/op_queue/ecc_ops_table.test.cpp +++ b/barretenberg/cpp/src/barretenberg/op_queue/ecc_ops_table.test.cpp @@ -61,7 +61,7 @@ class EccOpsTableTest : public ::testing::Test { } }; - // Mock ultra ops table that constructs a concatenated table from successively prepended subtables + // Mock ultra ops table that constructs a concatenated table from successively appended subtables. struct MockUltraOpsTable { std::array, 4> columns; void append(const UltraOp& op) @@ -101,7 +101,7 @@ class EccOpsTableTest : public ::testing::Test { size_t size() const { return columns[0].size(); } }; - // Mock eccvm ops table that constructs a concatenated table from successively prepended subtables + // Mock eccvm ops table that constructs a concatenated table from successively appended subtables. struct MockEccvmOpsTable { std::vector eccvm_ops; @@ -116,8 +116,8 @@ class EccOpsTableTest : public ::testing::Test { }; }; -// Ensure UltraOpsTable correctly constructs a concatenated table from successively prepended subtables -TEST(EccOpsTableTest, UltraOpsTablePrependOnly) +// Ensure UltraOpsTable correctly constructs a concatenated table from successively appended subtables. +TEST(EccOpsTableTest, UltraOpsTable) { using Fr = fr; using TableGenerator = EccOpsTableTest::UltraOpTableGenerator; @@ -139,8 +139,7 @@ TEST(EccOpsTableTest, UltraOpsTablePrependOnly) ultra_ops_table.merge(); } - std::reverse(subtables.begin(), subtables.end()); - // Construct the mock ultra ops table which contains the subtables ordered in reverse (as if prepended) + // Construct the mock ultra ops table which contains the subtables in append order. EccOpsTableTest::MockUltraOpsTable expected_ultra_ops_table(subtables); // Check that the ultra ops table internal to the op queue has the correct size @@ -148,62 +147,19 @@ TEST(EccOpsTableTest, UltraOpsTablePrependOnly) EXPECT_EQ(ultra_ops_table.num_ops(), expected_num_ops); // Construct polynomials corresponding to the columns of the ultra ops table + ultra_ops_table.construct_zk_columns(); std::array, 4> ultra_ops_table_polynomials = ultra_ops_table.construct_table_columns(); + std::array, 4> no_zk_ultra_ops_table_polynomials = + ultra_ops_table.construct_table_columns(/*include_zk_ops=*/false); // Check that the ultra ops table constructed by the op queue matches the expected table - for (auto [expected_column, poly] : zip_view(expected_ultra_ops_table.columns, ultra_ops_table_polynomials)) { - for (auto [expected_value, value] : zip_view(expected_column, poly.coeffs())) { - EXPECT_EQ(expected_value, value); - } - } -} - -TEST(EccOpsTableTest, UltraOpsPrependThenAppend) -{ - using Fr = fr; - using TableGenerator = EccOpsTableTest::UltraOpTableGenerator; - - // Construct sets of ultra ops, each representing those added by a single circuit - const size_t NUM_SUBTABLES = 3; - std::vector subtable_op_counts = { 4, 2, 7 }; - - TableGenerator table_generator; - auto subtables = table_generator.generate_subtables(NUM_SUBTABLES, subtable_op_counts); - - // Construct the concatenated table internal to the op queue - UltraEccOpsTable ultra_ops_table; - std::array merge_settings = { MergeSettings::PREPEND, - MergeSettings::PREPEND, - MergeSettings::APPEND }; - for (const auto& [subtable_ops, setting] : zip_view(subtables, merge_settings)) { - ultra_ops_table.create_new_subtable(); - for (const auto& op : subtable_ops) { - ultra_ops_table.push(op); - } - ultra_ops_table.merge(setting); - } - - std::vector> ordered_subtables; - for (auto [subtable, setting] : zip_view(subtables, merge_settings)) { - auto it = setting == MergeSettings::PREPEND ? ordered_subtables.begin() : ordered_subtables.end(); - ordered_subtables.insert(it, subtable); - } - - // Construct the mock ultra ops table. The final APPEND carries APPEND_TRACE_OFFSET preamble rows. - EccOpsTableTest::MockUltraOpsTable expected_ultra_ops_table(ordered_subtables, - /*last_subtable_has_preamble=*/true); - - // Check that the ultra ops table internal to the op queue has the correct size - auto expected_num_ops = std::accumulate(subtable_op_counts.begin(), subtable_op_counts.end(), size_t(0)); - EXPECT_EQ(ultra_ops_table.num_ops(), expected_num_ops); - - // Construct polynomials corresponding to the columns of the ultra ops table - std::array, 4> ultra_ops_table_polynomials = ultra_ops_table.construct_table_columns(); - - // Check that the ultra ops table constructed by the op queue matches the expected table - for (auto [expected_column, poly] : zip_view(expected_ultra_ops_table.columns, ultra_ops_table_polynomials)) { - for (auto [expected_value, value] : zip_view(expected_column, poly.coeffs())) { - EXPECT_EQ(expected_value, value); + for (auto [expected_column, poly, no_zk_poly] : + zip_view(expected_ultra_ops_table.columns, ultra_ops_table_polynomials, no_zk_ultra_ops_table_polynomials)) { + EXPECT_EQ(poly.size(), UltraEccOpsTable::ZK_ULTRA_OPS + expected_column.size()); + EXPECT_EQ(no_zk_poly.size(), expected_column.size()); + for (size_t row = 0; row < expected_column.size(); ++row) { + EXPECT_EQ(expected_column[row], poly.at(UltraEccOpsTable::ZK_ULTRA_OPS + row)); + EXPECT_EQ(expected_column[row], no_zk_poly.at(row)); } } } @@ -222,23 +178,24 @@ TEST(EccOpsTableTest, UltraOpsFixedLocationAppendNoGap) // Construct the concatenated table with fixed-location append (no explicit offset) UltraEccOpsTable ultra_ops_table; - std::array merge_settings = { MergeSettings::PREPEND, - MergeSettings::PREPEND, - MergeSettings::APPEND }; - for (size_t i = 0; i < NUM_SUBTABLES; ++i) { ultra_ops_table.create_new_subtable(); for (const auto& op : subtables[i]) { ultra_ops_table.push(op); } - // For APPEND (last subtable), don't provide an offset (default to right after prepended tables) - ultra_ops_table.merge(merge_settings[i]); + if (i == NUM_SUBTABLES - 1) { + // No-gap fixed-append: place the appended subtable immediately after the prior subtables. + const size_t no_gap_offset = subtable_op_counts[0] + subtable_op_counts[1]; + ultra_ops_table.merge_with_fixed_append_offset(no_gap_offset); + } else { + ultra_ops_table.merge(); + } } - // Expected order: subtable[1], subtable[0], subtable[2] (no gap). The final APPEND carries + // Expected order: subtable[0], subtable[1], subtable[2] (no gap). The final APPEND carries // APPEND_TRACE_OFFSET preamble rows. - std::vector> ordered_subtables = { subtables[1], subtables[0], subtables[2] }; + std::vector> ordered_subtables = { subtables[0], subtables[1], subtables[2] }; // Construct the mock ultra ops table EccOpsTableTest::MockUltraOpsTable expected_ultra_ops_table(ordered_subtables, @@ -249,12 +206,14 @@ TEST(EccOpsTableTest, UltraOpsFixedLocationAppendNoGap) EXPECT_EQ(ultra_ops_table.num_ops(), expected_num_ops); // Construct polynomials corresponding to the columns of the ultra ops table + ultra_ops_table.construct_zk_columns(); std::array, 4> ultra_ops_table_polynomials = ultra_ops_table.construct_table_columns(); // Check that the ultra ops table matches the expected table for (auto [expected_column, poly] : zip_view(expected_ultra_ops_table.columns, ultra_ops_table_polynomials)) { - for (auto [expected_value, value] : zip_view(expected_column, poly.coeffs())) { - EXPECT_EQ(expected_value, value); + EXPECT_EQ(poly.size(), UltraEccOpsTable::ZK_ULTRA_OPS + expected_column.size()); + for (size_t row = 0; row < expected_column.size(); ++row) { + EXPECT_EQ(expected_column[row], poly.at(UltraEccOpsTable::ZK_ULTRA_OPS + row)); } } } @@ -275,15 +234,11 @@ TEST(EccOpsTableTest, UltraOpsFixedLocationAppendWithGap) // Construct the concatenated table with fixed-location append at specific offset UltraEccOpsTable ultra_ops_table; - std::array merge_settings = { MergeSettings::PREPEND, - MergeSettings::PREPEND, - MergeSettings::APPEND }; - - // Define a fixed offset at which to append the table (must be greater than the total size of the prepended tables) + // Define a fixed offset at which to append the table (must be greater than the total size of the prior tables). const size_t fixed_offset = 20; const size_t fixed_offset_num_rows = fixed_offset * ULTRA_ROWS_PER_OP; - const size_t prepended_size = (subtable_op_counts[0] + subtable_op_counts[1]) * ULTRA_ROWS_PER_OP; - BB_ASSERT(fixed_offset_num_rows > prepended_size); + const size_t prior_subtables_size = (subtable_op_counts[0] + subtable_op_counts[1]) * ULTRA_ROWS_PER_OP; + BB_ASSERT(fixed_offset_num_rows > prior_subtables_size); // Construct the ultra ops table for (size_t i = 0; i < NUM_SUBTABLES; ++i) { @@ -292,11 +247,10 @@ TEST(EccOpsTableTest, UltraOpsFixedLocationAppendWithGap) ultra_ops_table.push(op); } - // For APPEND (last subtable), provide a fixed offset - if (merge_settings[i] == MergeSettings::APPEND) { - ultra_ops_table.merge(merge_settings[i], fixed_offset); + if (i == NUM_SUBTABLES - 1) { + ultra_ops_table.merge_with_fixed_append_offset(fixed_offset); } else { - ultra_ops_table.merge(merge_settings[i]); + ultra_ops_table.merge(); } } @@ -308,32 +262,33 @@ TEST(EccOpsTableTest, UltraOpsFixedLocationAppendWithGap) constexpr size_t LEADING_ZEROS = UltraEccOpsTable::APPEND_TRACE_OFFSET; size_t expected_poly_size = fixed_offset_num_rows + LEADING_ZEROS + (subtable_op_counts[2] * ULTRA_ROWS_PER_OP); EXPECT_EQ(ultra_ops_table.num_ultra_rows(), expected_poly_size); + ultra_ops_table.construct_zk_columns(); + const size_t zk_prefix_rows = UltraEccOpsTable::ZK_ULTRA_OPS; // Construct polynomials corresponding to the columns of the ultra ops table std::array, 4> ultra_ops_table_polynomials = ultra_ops_table.construct_table_columns(); // Verify each polynomial has the expected size for (const auto& poly : ultra_ops_table_polynomials) { - EXPECT_EQ(poly.size(), expected_poly_size); + EXPECT_EQ(poly.size(), zk_prefix_rows + expected_poly_size); } // Construct expected table with zeros in the gap - // Order: subtable[1], subtable[0], zeros, subtable[2] - std::vector> ordered_subtables = { subtables[1], subtables[0] }; - EccOpsTableTest::MockUltraOpsTable expected_prepended_table(ordered_subtables); - - // Check prepended subtables are at the beginning - for (auto [ultra_op_poly, expected_poly] : - zip_view(ultra_ops_table_polynomials, expected_prepended_table.columns)) { - for (size_t row = 0; row < prepended_size; ++row) { - EXPECT_EQ(ultra_op_poly.at(row), expected_poly[row]); + // Order: subtable[0], subtable[1], zeros, subtable[2] + std::vector> ordered_subtables = { subtables[0], subtables[1] }; + EccOpsTableTest::MockUltraOpsTable expected_prior_table(ordered_subtables); + + // Check prior subtables are at the beginning. + for (auto [ultra_op_poly, expected_poly] : zip_view(ultra_ops_table_polynomials, expected_prior_table.columns)) { + for (size_t row = 0; row < prior_subtables_size; ++row) { + EXPECT_EQ(ultra_op_poly.at(zk_prefix_rows + row), expected_poly[row]); } } - // Check gap from prepended tables up to (fixed_offset + preamble) is filled with zeros. + // Check gap from prior tables up to (fixed_offset + preamble) is filled with zeros. for (auto ultra_op_poly : ultra_ops_table_polynomials) { - for (size_t row = prepended_size; row < fixed_offset_num_rows + LEADING_ZEROS; ++row) { - EXPECT_EQ(ultra_op_poly.at(row), Fr::zero()); + for (size_t row = prior_subtables_size; row < fixed_offset_num_rows + LEADING_ZEROS; ++row) { + EXPECT_EQ(ultra_op_poly.at(zk_prefix_rows + row), Fr::zero()); } } @@ -342,7 +297,8 @@ TEST(EccOpsTableTest, UltraOpsFixedLocationAppendWithGap) EccOpsTableTest::MockUltraOpsTable expected_appended_table(appended_subtables); for (auto [ultra_op_poly, expected_poly] : zip_view(ultra_ops_table_polynomials, expected_appended_table.columns)) { for (size_t row = 0; row < subtable_op_counts[2] * ULTRA_ROWS_PER_OP; row++) { - EXPECT_EQ(ultra_op_poly.at(fixed_offset_num_rows + LEADING_ZEROS + row), expected_poly[row]); + EXPECT_EQ(ultra_op_poly.at(zk_prefix_rows + fixed_offset_num_rows + LEADING_ZEROS + row), + expected_poly[row]); } } @@ -352,11 +308,11 @@ TEST(EccOpsTableTest, UltraOpsFixedLocationAppendWithGap) std::vector expected_reconstructed; expected_reconstructed.reserve(expected_num_ops + fixed_offset); - // Order: subtable[1], subtable[0], no-ops range (including APPEND_TRACE_OFFSET preamble), subtable[2] - for (const auto& op : subtables[1]) { + // Order: subtable[0], subtable[1], no-ops range (including APPEND_TRACE_OFFSET preamble), subtable[2] + for (const auto& op : subtables[0]) { expected_reconstructed.push_back(op); } - for (const auto& op : subtables[0]) { + for (const auto& op : subtables[1]) { expected_reconstructed.push_back(op); } @@ -372,14 +328,15 @@ TEST(EccOpsTableTest, UltraOpsFixedLocationAppendWithGap) expected_reconstructed.push_back(op); } - EXPECT_EQ(expected_reconstructed.size(), ultra_ops_table.get_reconstructed().size()); + const auto reconstructed = ultra_ops_table.get_no_zk_reconstructed_ultra_ops(); + EXPECT_EQ(expected_reconstructed.size(), reconstructed.size()); // Compare to the op-queue's reconstruction (should include the gap as no-ops) - EXPECT_EQ(expected_reconstructed, ultra_ops_table.get_reconstructed()); + EXPECT_EQ(expected_reconstructed, reconstructed); } } -// Ensure EccvmOpsTable correctly constructs a concatenated table from successively prepended subtables +// Ensure EccvmOpsTable correctly constructs a concatenated table from successively appended subtables TEST(EccOpsTableTest, EccvmOpsTable) { @@ -403,8 +360,7 @@ TEST(EccOpsTableTest, EccvmOpsTable) eccvm_ops_table.merge(); } - std::reverse(subtables.begin(), subtables.end()); - // Construct the mock eccvm ops table which contains the subtables ordered in reverse (as if prepended) + // Construct the mock eccvm ops table which contains the subtables in append order. EccOpsTableTest::MockEccvmOpsTable expected_eccvm_ops_table(subtables); // Check that the table has the correct size @@ -420,9 +376,9 @@ TEST(EccOpsTableTest, EccvmOpsTable) EXPECT_EQ(expected_eccvm_ops_table.eccvm_ops, eccvm_ops_table.get_reconstructed()); } -// Ensure EccvmOpsTable correctly constructs a concatenated table from successively prepended and then appended +// Ensure EccvmOpsTable correctly constructs a concatenated table from successively appended // subtables -TEST(EccOpsTableTest, EccvmOpsTablePrependThenAppend) +TEST(EccOpsTableTest, EccvmOpsTableAppendOnly) { // Construct sets of eccvm ops, each representing those added by a single circuit @@ -435,27 +391,18 @@ TEST(EccOpsTableTest, EccvmOpsTablePrependThenAppend) TableGenerator table_generator; auto subtables = table_generator.generate_subtables(NUM_SUBTABLES, subtable_op_counts); - std::array merge_settings = { MergeSettings::PREPEND, - MergeSettings::PREPEND, - MergeSettings::APPEND }; // Construct the concatenated eccvm ops table EccvmOpsTable eccvm_ops_table; - for (const auto& [subtable_ops, setting] : zip_view(subtables, merge_settings)) { + for (const auto& subtable_ops : subtables) { eccvm_ops_table.create_new_subtable(); for (const auto& op : subtable_ops) { eccvm_ops_table.push(op); } - eccvm_ops_table.merge(setting); - } - - std::vector> ordered_subtables; - for (auto [subtable, setting] : zip_view(subtables, merge_settings)) { - auto it = setting == MergeSettings::PREPEND ? ordered_subtables.begin() : ordered_subtables.end(); - ordered_subtables.insert(it, subtable); + eccvm_ops_table.merge(); } - // Construct the mock ultra ops table which contains the subtables ordered in reverse (as if prepended) - EccOpsTableTest::MockEccvmOpsTable expected_eccvm_ops_table(ordered_subtables); + // Construct the mock eccvm ops table which contains the subtables in append order. + EccOpsTableTest::MockEccvmOpsTable expected_eccvm_ops_table(subtables); // Check that the table has the correct size auto expected_num_ops = std::accumulate(subtable_op_counts.begin(), subtable_op_counts.end(), size_t(0)); diff --git a/barretenberg/cpp/src/barretenberg/polynomials/polynomial.cpp b/barretenberg/cpp/src/barretenberg/polynomials/polynomial.cpp index 0b0bf79f9b27..a22ff4875daa 100644 --- a/barretenberg/cpp/src/barretenberg/polynomials/polynomial.cpp +++ b/barretenberg/cpp/src/barretenberg/polynomials/polynomial.cpp @@ -284,6 +284,52 @@ void Polynomial::add_scaled_chunk(const ThreadChunk& chunk, } } +template +void add_scaled_batch(Polynomial& dst, + std::span> sources, + std::span scalars) +{ + BB_BENCH_NAME("add_scaled_batch"); + BB_ASSERT_EQ(sources.size(), scalars.size(), "sources and scalars must have the same length"); + if (sources.empty()) { + return; + } + + size_t min_start = sources[0].start_index; + size_t max_end = sources[0].end_index(); + for (size_t i = 1; i < sources.size(); ++i) { + min_start = std::min(min_start, sources[i].start_index); + max_end = std::max(max_end, sources[i].end_index()); + } + BB_ASSERT_LTE(dst.start_index(), min_start); + BB_ASSERT_GTE(dst.end_index(), max_end); + + const size_t union_size = max_end - min_start; + parallel_for([&](const ThreadChunk& chunk) { + BB_BENCH_TRACY_NAME("add_scaled_batch/chunk"); + auto chunk_indices = chunk.range(union_size, min_start); + if (chunk_indices.empty()) { + return; + } + auto chunk_start = chunk_indices.front(); + auto chunk_end = chunk_indices.back(); + + for (size_t k = 0; k < sources.size(); ++k) { + const auto& src = sources[k]; + const Fr& c = scalars[k]; + const size_t src_start = src.start_index; + const size_t src_end = src.end_index(); + + const size_t idx_start = std::max(chunk_start, src_start); + const size_t idx_end = std::min(chunk_end + 1, src_end); + + for (size_t i = idx_start; i < idx_end; ++i) { + dst.at(i) += c * src[i]; + } + } + }); +} + template Polynomial Polynomial::shifted() const { BB_ASSERT_GTE(coefficients_.start_, static_cast(1)); @@ -308,4 +354,11 @@ template Polynomial Polynomial::reverse() const template class Polynomial; template class Polynomial; + +template void add_scaled_batch(Polynomial& dst, + std::span> sources, + std::span scalars); +template void add_scaled_batch(Polynomial& dst, + std::span> sources, + std::span scalars); } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/polynomials/polynomial.hpp b/barretenberg/cpp/src/barretenberg/polynomials/polynomial.hpp index cd97628e0e04..7ac33b963303 100644 --- a/barretenberg/cpp/src/barretenberg/polynomials/polynomial.hpp +++ b/barretenberg/cpp/src/barretenberg/polynomials/polynomial.hpp @@ -126,6 +126,15 @@ template class Polynomial { } return p; } + /** + * @brief Overload of `shiftable` that leaves the backing memory uninitialized. + * @details Use only when the caller writes every cell in [NUM_ZERO_ROWS, NUM_ZERO_ROWS + size) + * before any read. + */ + static Polynomial shiftable(size_t size, size_t virtual_size, DontZeroMemory flag) + { + return Polynomial(/*actual size*/ size - NUM_ZERO_ROWS, virtual_size, /*shiftable offset*/ NUM_ZERO_ROWS, flag); + } // Allow polynomials to be entirely reset/dormant Polynomial() = default; @@ -403,6 +412,22 @@ template class Polynomial { // Namely, it supports polynomial shifts and 'virtual' zeroes past a size up until a 'virtual' size. SharedShiftedVirtualZeroesArray coefficients_; }; + +/** + * @brief Fused parallel batched add: dst += sum_i scalars[i] * sources[i]. + * + * Equivalent to invoking dst.add_scaled(sources[i], scalars[i]) for each i, but issues a single + * parallel_for over the destination range and visits every source within each chunk. Amortises + * the per-call parallel_for startup cost from N× down to 1×, which dominates the cost of small-N + * batched add-scaled patterns at high core counts. + * + * Each source must satisfy add_scaled's precondition: dst's index range covers the source's. + */ +template +void add_scaled_batch(Polynomial& dst, + std::span> sources, + std::span scalars); + // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays) template std::shared_ptr _allocate_aligned_memory(size_t n_elements) { diff --git a/barretenberg/cpp/src/barretenberg/relations/databus_lookup_relation.hpp b/barretenberg/cpp/src/barretenberg/relations/databus_lookup_relation.hpp index 122af1c441e1..122243a676ff 100644 --- a/barretenberg/cpp/src/barretenberg/relations/databus_lookup_relation.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/databus_lookup_relation.hpp @@ -112,28 +112,46 @@ template class DatabusLookupRelationImpl { // Interface for easy access of databus components by column (bus_idx) template struct BusData; - // Specialization for calldata (bus_idx = 0) + // Specialization for kernel_calldata (bus_idx = 0) template struct BusData { - static auto& values(const AllEntities& in) { return in.calldata; } + static auto& values(const AllEntities& in) { return in.kernel_calldata; } static auto& selector(const AllEntities& in) { return in.q_l; } - static auto& inverses(AllEntities& in) { return in.calldata_inverses; } - static auto& inverses(const AllEntities& in) { return in.calldata_inverses; } // const version - static auto& read_counts(const AllEntities& in) { return in.calldata_read_counts; } + static auto& inverses(AllEntities& in) { return in.kernel_calldata_inverses; } + static auto& inverses(const AllEntities& in) { return in.kernel_calldata_inverses; } // const version + static auto& read_counts(const AllEntities& in) { return in.kernel_calldata_read_counts; } }; - // Specialization for secondary_calldata (bus_idx = 1) + // Specialization for first_app_calldata (bus_idx = 1) template struct BusData { - static auto& values(const AllEntities& in) { return in.secondary_calldata; } + static auto& values(const AllEntities& in) { return in.first_app_calldata; } static auto& selector(const AllEntities& in) { return in.q_r; } - static auto& inverses(AllEntities& in) { return in.secondary_calldata_inverses; } - static auto& inverses(const AllEntities& in) { return in.secondary_calldata_inverses; } // const version - static auto& read_counts(const AllEntities& in) { return in.secondary_calldata_read_counts; } + static auto& inverses(AllEntities& in) { return in.first_app_calldata_inverses; } + static auto& inverses(const AllEntities& in) { return in.first_app_calldata_inverses; } // const version + static auto& read_counts(const AllEntities& in) { return in.first_app_calldata_read_counts; } }; - // Specialization for return data (bus_idx = 2) + // Specialization for second_app_calldata (bus_idx = 2) template struct BusData { - static auto& values(const AllEntities& in) { return in.return_data; } + static auto& values(const AllEntities& in) { return in.second_app_calldata; } static auto& selector(const AllEntities& in) { return in.q_o; } + static auto& inverses(AllEntities& in) { return in.second_app_calldata_inverses; } + static auto& inverses(const AllEntities& in) { return in.second_app_calldata_inverses; } // const version + static auto& read_counts(const AllEntities& in) { return in.second_app_calldata_read_counts; } + }; + + // Specialization for third_app_calldata (bus_idx = 3) + template struct BusData { + static auto& values(const AllEntities& in) { return in.third_app_calldata; } + static auto& selector(const AllEntities& in) { return in.q_4; } + static auto& inverses(AllEntities& in) { return in.third_app_calldata_inverses; } + static auto& inverses(const AllEntities& in) { return in.third_app_calldata_inverses; } // const version + static auto& read_counts(const AllEntities& in) { return in.third_app_calldata_read_counts; } + }; + + // Specialization for return data (bus_idx = 4) + template struct BusData { + static auto& values(const AllEntities& in) { return in.return_data; } + static auto& selector(const AllEntities& in) { return in.q_m; } static auto& inverses(AllEntities& in) { return in.return_data_inverses; } static auto& inverses(const AllEntities& in) { return in.return_data_inverses; } // const version static auto& read_counts(const AllEntities& in) { return in.return_data_read_counts; } @@ -143,7 +161,7 @@ template class DatabusLookupRelationImpl { * @brief Compute scalar for read term in log derivative lookup argument * * @details The selector indicating read from bus column \f$j\f$ is given by - * \f$q_{\text{busread}} \cdot q_j\f$, where \f$j \in \{1, 2, 3\}\f$. + * \f$q_{\text{busread}} \cdot q_j\f$, where \f$j \in \{1, 2, 3, 4, 5\}\f$. * */ template diff --git a/barretenberg/cpp/src/barretenberg/relations/databus_lookup_relation_consistency.test.cpp b/barretenberg/cpp/src/barretenberg/relations/databus_lookup_relation_consistency.test.cpp index 3e560312c52b..cfd5f78443f2 100644 --- a/barretenberg/cpp/src/barretenberg/relations/databus_lookup_relation_consistency.test.cpp +++ b/barretenberg/cpp/src/barretenberg/relations/databus_lookup_relation_consistency.test.cpp @@ -32,21 +32,33 @@ struct DatabusInputElements { FF q_busread; // Column selectors (determine which bus column is being read) - FF q_l; // calldata selector - FF q_r; // secondary_calldata selector - FF q_o; // return_data selector - - // Calldata (bus_idx = 0) - FF calldata; - FF calldata_read_counts; - FF calldata_inverses; - - // Secondary calldata (bus_idx = 1) - FF secondary_calldata; - FF secondary_calldata_read_counts; - FF secondary_calldata_inverses; - - // Return data (bus_idx = 2) + FF q_l; // kernel calldata selector + FF q_r; // first app calldata selector + FF q_o; // second app calldata selector + FF q_4; // third app calldata selector + FF q_m; // return data selector + + // Kernel calldata (bus_idx = 0) + FF kernel_calldata; + FF kernel_calldata_read_counts; + FF kernel_calldata_inverses; + + // First app calldata (bus_idx = 1) + FF first_app_calldata; + FF first_app_calldata_read_counts; + FF first_app_calldata_inverses; + + // Second app calldata (bus_idx = 2) + FF second_app_calldata; + FF second_app_calldata_read_counts; + FF second_app_calldata_inverses; + + // Third app calldata (bus_idx = 3) + FF third_app_calldata; + FF third_app_calldata_read_counts; + FF third_app_calldata_inverses; + + // Return data (bus_idx = 4) FF return_data; FF return_data_read_counts; FF return_data_inverses; @@ -61,40 +73,52 @@ struct DatabusInputElements { result.q_l = FF::random_element(); result.q_r = FF::random_element(); result.q_o = FF::random_element(); - result.calldata = FF::random_element(); - result.calldata_read_counts = FF::random_element(); - result.calldata_inverses = FF::random_element(); - result.secondary_calldata = FF::random_element(); - result.secondary_calldata_read_counts = FF::random_element(); - result.secondary_calldata_inverses = FF::random_element(); + result.q_4 = FF::random_element(); + result.q_m = FF::random_element(); + result.kernel_calldata = FF::random_element(); + result.kernel_calldata_read_counts = FF::random_element(); + result.kernel_calldata_inverses = FF::random_element(); + result.first_app_calldata = FF::random_element(); + result.first_app_calldata_read_counts = FF::random_element(); + result.first_app_calldata_inverses = FF::random_element(); + result.second_app_calldata = FF::random_element(); + result.second_app_calldata_read_counts = FF::random_element(); + result.second_app_calldata_inverses = FF::random_element(); + result.third_app_calldata = FF::random_element(); + result.third_app_calldata_read_counts = FF::random_element(); + result.third_app_calldata_inverses = FF::random_element(); result.return_data = FF::random_element(); result.return_data_read_counts = FF::random_element(); result.return_data_inverses = FF::random_element(); return result; } - // Create inputs representing a valid read gate for calldata + // Create inputs representing a valid read gate for kernel_calldata static DatabusInputElements get_valid_calldata_read() { DatabusInputElements result{}; - // Set up a read from calldata at index 5, value 42 - result.w_l = FF(42); // value being read - result.w_r = FF(5); // index - result.databus_id = FF(5); // same index in the bus - result.calldata = FF(42); // value in bus matches + // Set up a read from kernel_calldata at index 5, value 42 + result.w_l = FF(42); // value being read + result.w_r = FF(5); // index + result.databus_id = FF(5); // same index in the bus + result.kernel_calldata = FF(42); // value in bus matches - // Enable read gate for calldata + // Enable read gate for kernel_calldata result.q_busread = FF(1); - result.q_l = FF(1); // calldata selector + result.q_l = FF(1); // kernel_calldata selector result.q_r = FF(0); result.q_o = FF(0); + result.q_4 = FF(0); + result.q_m = FF(0); // Read counts - result.calldata_read_counts = FF(1); + result.kernel_calldata_read_counts = FF(1); // Other columns inactive - result.secondary_calldata_read_counts = FF(0); + result.first_app_calldata_read_counts = FF(0); + result.second_app_calldata_read_counts = FF(0); + result.third_app_calldata_read_counts = FF(0); result.return_data_read_counts = FF(0); return result; @@ -104,7 +128,7 @@ struct DatabusInputElements { class DatabusLookupRelationConsistency : public testing::Test { public: using Relation = DatabusLookupRelationImpl; - static constexpr size_t NUM_SUBRELATIONS = 9; // 3 subrelations per bus column, 3 columns + static constexpr size_t NUM_SUBRELATIONS = Relation::SUBRELATION_PARTIAL_LENGTHS.size(); /** * @brief Validate that the relation's accumulate function produces expected values @@ -122,12 +146,13 @@ class DatabusLookupRelationConsistency : public testing::Test { /** * @brief Helper to compute all expected subrelation values for a given input */ -static std::array compute_expected_values(const DatabusInputElements& in, const RelationParameters& params) +static std::array compute_expected_values( + const DatabusInputElements& in, const RelationParameters& params) { const auto& beta = params.beta; const auto& gamma = params.gamma; - std::array expected_values; + std::array expected_values; std::fill(expected_values.begin(), expected_values.end(), FF(0)); // Read term (same for all columns): value + index * beta + gamma @@ -152,15 +177,24 @@ static std::array compute_expected_values(const DatabusInputElements& in, expected_values[bus_idx * 3 + 2] = (is_read * table_term - read_counts * lookup_term) * inverses; }; - // Bus column 0 (calldata) - compute_column_subrelations(0, in.q_l, in.calldata, in.calldata_read_counts, in.calldata_inverses); + // Bus column 0 (kernel_calldata) + compute_column_subrelations( + 0, in.q_l, in.kernel_calldata, in.kernel_calldata_read_counts, in.kernel_calldata_inverses); + + // Bus column 1 (first_app_calldata) + compute_column_subrelations( + 1, in.q_r, in.first_app_calldata, in.first_app_calldata_read_counts, in.first_app_calldata_inverses); + + // Bus column 2 (second_app_calldata) + compute_column_subrelations( + 2, in.q_o, in.second_app_calldata, in.second_app_calldata_read_counts, in.second_app_calldata_inverses); - // Bus column 1 (secondary_calldata) + // Bus column 3 (third_app_calldata) compute_column_subrelations( - 1, in.q_r, in.secondary_calldata, in.secondary_calldata_read_counts, in.secondary_calldata_inverses); + 3, in.q_4, in.third_app_calldata, in.third_app_calldata_read_counts, in.third_app_calldata_inverses); - // Bus column 2 (return_data) - compute_column_subrelations(2, in.q_o, in.return_data, in.return_data_read_counts, in.return_data_inverses); + // Bus column 4 (return_data) + compute_column_subrelations(4, in.q_m, in.return_data, in.return_data_read_counts, in.return_data_inverses); return expected_values; } @@ -197,16 +231,28 @@ TEST_F(DatabusLookupRelationConsistency, InactiveGates) in.q_l = FF(0); in.q_r = FF(0); in.q_o = FF(0); - in.calldata_read_counts = FF(0); - in.secondary_calldata_read_counts = FF(0); + in.q_4 = FF(0); + in.q_m = FF(0); + in.kernel_calldata_read_counts = FF(0); + in.first_app_calldata_read_counts = FF(0); + in.second_app_calldata_read_counts = FF(0); + in.third_app_calldata_read_counts = FF(0); in.return_data_read_counts = FF(0); // Set other values non-zero to ensure they don't affect inactive gates in.w_l = FF(42); in.w_r = FF(5); in.databus_id = FF(5); - in.calldata = FF(42); - in.calldata_inverses = FF(0); // inverse should be 0 when inactive + in.kernel_calldata = FF(42); + in.kernel_calldata_inverses = FF(0); // inverse should be 0 when inactive + in.first_app_calldata = FF(100); + in.first_app_calldata_inverses = FF(0); + in.second_app_calldata = FF(200); + in.second_app_calldata_inverses = FF(0); + in.third_app_calldata = FF(300); + in.third_app_calldata_inverses = FF(0); + in.return_data = FF(400); + in.return_data_inverses = FF(0); std::array accumulator{}; Relation::accumulate(accumulator, in, parameters, FF(1)); @@ -229,31 +275,35 @@ TEST_F(DatabusLookupRelationConsistency, ValidInverseComputation) DatabusInputElements in{}; - // Set up a read gate for calldata + // Set up a read gate for kernel_calldata in.q_busread = FF(1); - in.q_l = FF(1); // calldata selector + in.q_l = FF(1); // kernel_calldata selector in.q_r = FF(0); in.q_o = FF(0); // Value and index FF value = FF(42); FF index = FF(5); - in.w_l = value; // value being read - in.w_r = index; // index - in.databus_id = index; // same index in the bus - in.calldata = value; // value in bus matches + in.w_l = value; // value being read + in.w_r = index; // index + in.databus_id = index; // same index in the bus + in.kernel_calldata = value; // value in bus matches // Compute the correct inverse auto lookup_term = value + index * beta + gamma; auto table_term = value + index * beta + gamma; // same since value and index match auto inverse = (lookup_term * table_term).invert(); - in.calldata_inverses = inverse; + in.kernel_calldata_inverses = inverse; - in.calldata_read_counts = FF(1); + in.kernel_calldata_read_counts = FF(1); // Other columns inactive - in.secondary_calldata_read_counts = FF(0); - in.secondary_calldata_inverses = FF(0); + in.first_app_calldata_read_counts = FF(0); + in.first_app_calldata_inverses = FF(0); + in.second_app_calldata_read_counts = FF(0); + in.second_app_calldata_inverses = FF(0); + in.third_app_calldata_read_counts = FF(0); + in.third_app_calldata_inverses = FF(0); in.return_data_read_counts = FF(0); in.return_data_inverses = FF(0); @@ -286,7 +336,7 @@ TEST_F(DatabusLookupRelationConsistency, MismatchedReadWriteTerms) DatabusInputElements in{}; - // Set up a read gate for calldata + // Set up a read gate for kernel_calldata in.q_busread = FF(1); in.q_l = FF(1); in.q_r = FF(0); @@ -300,16 +350,20 @@ TEST_F(DatabusLookupRelationConsistency, MismatchedReadWriteTerms) in.w_l = read_value; in.w_r = index; in.databus_id = index; - in.calldata = bus_value; + in.kernel_calldata = bus_value; auto lookup_term = read_value + index * beta + gamma; auto table_term = bus_value + index * beta + gamma; auto inverse = (lookup_term * table_term).invert(); - in.calldata_inverses = inverse; - - in.calldata_read_counts = FF(1); - in.secondary_calldata_read_counts = FF(0); - in.secondary_calldata_inverses = FF(0); + in.kernel_calldata_inverses = inverse; + + in.kernel_calldata_read_counts = FF(1); + in.first_app_calldata_read_counts = FF(0); + in.first_app_calldata_inverses = FF(0); + in.second_app_calldata_read_counts = FF(0); + in.second_app_calldata_inverses = FF(0); + in.third_app_calldata_read_counts = FF(0); + in.third_app_calldata_inverses = FF(0); in.return_data_read_counts = FF(0); in.return_data_inverses = FF(0); @@ -343,19 +397,23 @@ TEST_F(DatabusLookupRelationConsistency, InverseUnconstrainedAtInactiveRows) in.q_l = FF(0); in.q_r = FF(0); in.q_o = FF(0); - in.calldata_read_counts = FF(0); - in.secondary_calldata_read_counts = FF(0); + in.kernel_calldata_read_counts = FF(0); + in.first_app_calldata_read_counts = FF(0); + in.second_app_calldata_read_counts = FF(0); + in.third_app_calldata_read_counts = FF(0); in.return_data_read_counts = FF(0); // Set inverses to arbitrary nonzero values — should not matter - in.calldata_inverses = FF(999); - in.secondary_calldata_inverses = FF(777); - in.return_data_inverses = FF(555); + in.kernel_calldata_inverses = FF(999); + in.first_app_calldata_inverses = FF(777); + in.second_app_calldata_inverses = FF(555); + in.third_app_calldata_inverses = FF(333); + in.return_data_inverses = FF(111); in.w_l = FF(42); in.w_r = FF(5); in.databus_id = FF(5); - in.calldata = FF(42); + in.kernel_calldata = FF(42); std::array accumulator{}; Relation::accumulate(accumulator, in, parameters, FF(1)); @@ -397,13 +455,17 @@ TEST_F(DatabusLookupRelationConsistency, WrongInverseOnReadRowFails) in.w_l = value; in.w_r = index; in.databus_id = index; - in.calldata = value; + in.kernel_calldata = value; // Set a WRONG inverse (just some arbitrary value, not 1/(L*T)) - in.calldata_inverses = FF(777); - in.calldata_read_counts = FF(0); // pure read row, no write - in.secondary_calldata_read_counts = FF(0); - in.secondary_calldata_inverses = FF(0); + in.kernel_calldata_inverses = FF(777); + in.kernel_calldata_read_counts = FF(0); // pure read row, no write + in.first_app_calldata_read_counts = FF(0); + in.first_app_calldata_inverses = FF(0); + in.second_app_calldata_read_counts = FF(0); + in.second_app_calldata_inverses = FF(0); + in.third_app_calldata_read_counts = FF(0); + in.third_app_calldata_inverses = FF(0); in.return_data_read_counts = FF(0); in.return_data_inverses = FF(0); @@ -442,16 +504,20 @@ TEST_F(DatabusLookupRelationConsistency, WrongInverseOnWriteRowFails) FF value = FF(42); FF index = FF(5); in.databus_id = index; - in.calldata = value; + in.kernel_calldata = value; in.w_l = FF(0); // irrelevant (no read gate) in.w_r = FF(0); // Row has nonzero read_count (it's been read from elsewhere) but wrong inverse - in.calldata_read_counts = FF(3); - in.calldata_inverses = FF(999); // WRONG - - in.secondary_calldata_read_counts = FF(0); - in.secondary_calldata_inverses = FF(0); + in.kernel_calldata_read_counts = FF(3); + in.kernel_calldata_inverses = FF(999); // WRONG + + in.first_app_calldata_read_counts = FF(0); + in.first_app_calldata_inverses = FF(0); + in.second_app_calldata_read_counts = FF(0); + in.second_app_calldata_inverses = FF(0); + in.third_app_calldata_read_counts = FF(0); + in.third_app_calldata_inverses = FF(0); in.return_data_read_counts = FF(0); in.return_data_inverses = FF(0); @@ -490,7 +556,7 @@ TEST_F(DatabusLookupRelationConsistency, CorrectInverseOnWriteRow) FF value = FF(42); FF index = FF(5); in.databus_id = index; - in.calldata = value; + in.kernel_calldata = value; in.w_l = FF(0); in.w_r = FF(0); @@ -498,11 +564,15 @@ TEST_F(DatabusLookupRelationConsistency, CorrectInverseOnWriteRow) auto table_term = value + index * beta + gamma; // Correct inverse - in.calldata_inverses = (lookup_term * table_term).invert(); - in.calldata_read_counts = FF(3); - - in.secondary_calldata_read_counts = FF(0); - in.secondary_calldata_inverses = FF(0); + in.kernel_calldata_inverses = (lookup_term * table_term).invert(); + in.kernel_calldata_read_counts = FF(3); + + in.first_app_calldata_read_counts = FF(0); + in.first_app_calldata_inverses = FF(0); + in.second_app_calldata_read_counts = FF(0); + in.second_app_calldata_inverses = FF(0); + in.third_app_calldata_read_counts = FF(0); + in.third_app_calldata_inverses = FF(0); in.return_data_read_counts = FF(0); in.return_data_inverses = FF(0); diff --git a/barretenberg/cpp/src/barretenberg/relations/poseidon2_initial_external_relation.hpp b/barretenberg/cpp/src/barretenberg/relations/poseidon2_initial_external_relation.hpp new file mode 100644 index 000000000000..3bcc759c2251 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/relations/poseidon2_initial_external_relation.hpp @@ -0,0 +1,86 @@ +#pragma once +#include "barretenberg/relations/relation_types.hpp" + +namespace bb { + +/** + * @brief Initial-linear-layer relation for Poseidon2 (Mega). + * + * @details Poseidon2 begins with a linear-only application of the external matrix. Given input + * \f$ \mathbf{x} = (x_0, x_1, x_2, x_3) \f$, this relation enforces + * \f$ \mathbf{y} = M_E \cdot \mathbf{x} \f$. + * + * The row's wires hold the raw input; the next row's wires hold M_E · input. That next row is + * the first external-round row, which consumes M_E · input as its starting state. + * + * Subrelations (each × q_poseidon2_external_initial × gate separator, partial degree 3): + * A_0: 5 w_l + 7 w_r + w_o + 3 w_4 = w_l_shift + * A_1: 4 w_l + 6 w_r + w_o + w_4 = w_r_shift + * A_2: w_l + 3 w_r + 5 w_o + 7 w_4 = w_o_shift + * A_3: w_l + w_r + 4 w_o + 6 w_4 = w_4_shift + */ +template class Poseidon2InitialExternalRelationImpl { + public: + using FF = FF_; + + static constexpr std::array SUBRELATION_PARTIAL_LENGTHS{ + 3, // A_0 + 3, // A_1 + 3, // A_2 + 3, // A_3 + }; + + template inline static bool skip(const AllEntities& in) + { + return in.q_poseidon2_external_initial.is_zero(); + } + + template + void static accumulate(ContainerOverSubrelations& evals, + const AllEntities& in, + const Parameters&, + const FF& scaling_factor) + { + using Accumulator = std::tuple_element_t<0, ContainerOverSubrelations>; + using CoeffAcc = typename Accumulator::CoefficientAccumulator; + + const auto x_0 = CoeffAcc(in.w_l); + const auto x_1 = CoeffAcc(in.w_r); + const auto x_2 = CoeffAcc(in.w_o); + const auto x_3 = CoeffAcc(in.w_4); + const auto y_0 = CoeffAcc(in.w_l_shift); + const auto y_1 = CoeffAcc(in.w_r_shift); + const auto y_2 = CoeffAcc(in.w_o_shift); + const auto y_3 = CoeffAcc(in.w_4_shift); + + const auto q_sel = CoeffAcc(in.q_poseidon2_external_initial); + const auto q_by_scaling = Accumulator(q_sel * scaling_factor); + + // Shared partial sums for M_E: + // y0 = 5x0 + 7x1 + x2 + 3x3 = (4x0 + 6x1 + x2 + x3) + (x0 + x1 + 2x3) + // y1 = 4x0 + 6x1 + x2 + x3 = (2x0 + 2x1) + (2x0 + 2x1) + (2x1 + x2 + x3) + // y2 = x0 + 3x1 + 5x2 + 7x3 = (2x1 + x2 + x3) + (x0 + x1 + 4x2 + 6x3) + // y3 = x0 + x1 + 4x2 + 6x3 + auto t0 = x_0 + x_1; // x0 + x1 + auto t1 = x_2 + x_3; // x2 + x3 + auto t2 = x_1 + x_1 + t1; // 2x1 + x2 + x3 + auto t3 = x_3 + x_3 + t0; // x0 + x1 + 2x3 + + auto y3_calc = t1 + t1; + y3_calc = y3_calc + y3_calc + t3; // 4x2 + 4x3 + (x0 + x1 + 2x3) = x0 + x1 + 4x2 + 6x3 + auto y1_calc = t0 + t0; + y1_calc = y1_calc + y1_calc + t2; // 4x0 + 4x1 + (2x1 + x2 + x3) = 4x0 + 6x1 + x2 + x3 + auto y0_calc = t3 + y1_calc; // (x0 + x1 + 2x3) + (4x0 + 6x1 + x2 + x3) = 5x0 + 7x1 + x2 + 3x3 + auto y2_calc = t2 + y3_calc; // (2x1 + x2 + x3) + (x0 + x1 + 4x2 + 6x3) = x0 + 3x1 + 5x2 + 7x3 + + // Each subrelation: q_sel · (y_k_calc - y_k) = 0. + std::get<0>(evals) += q_by_scaling * Accumulator(y0_calc - y_0); + std::get<1>(evals) += q_by_scaling * Accumulator(y1_calc - y_1); + std::get<2>(evals) += q_by_scaling * Accumulator(y2_calc - y_2); + std::get<3>(evals) += q_by_scaling * Accumulator(y3_calc - y_3); + } +}; + +template using Poseidon2InitialExternalRelation = Relation>; + +} // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/relations/poseidon2_quad_internal_relation.hpp b/barretenberg/cpp/src/barretenberg/relations/poseidon2_quad_internal_relation.hpp new file mode 100644 index 000000000000..c3521d8ccba8 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/relations/poseidon2_quad_internal_relation.hpp @@ -0,0 +1,193 @@ +#pragma once +#include "barretenberg/crypto/poseidon2/poseidon2_params.hpp" +#include "barretenberg/crypto/poseidon2/poseidon2_quad_params.hpp" +#include "relation_types.hpp" + +namespace bb { + +/** + * @brief K=4 compressed internal-round relation for Poseidon2. + * + * @details Each active row stores state[0] at four consecutive internal rounds: + * w_l = s_0^{(0)}, w_r = s_0^{(1)}, w_o = s_0^{(2)}, w_4 = s_0^{(3)} + * and uses q_l, q_r, q_o, q_4 as the four current-round constants. For a non-terminal row, q_m, + * q_c, q_5 contain the next quad's first three constants. + * + * `Poseidon2QuadBn254Params` provides closed-form coefficients for the state after four rounds: + * (out_0, out_1, out_2, out_3). + * This relation connects that output to the next compressed row: + * A_0: out_0 = w_l_shift (direct) + * A_1: out_1 + out_2 + out_3 = b_1_next + * A_2: D_2 out_1 + D_3 out_2 + D_4 out_3 = b_2_next + * A_3: D_2^2 out_1 + D_3^2 out_2 + D_4^2 out_3 = b_3_next + * where b_k_next are the Vandermonde right-hand sides reconstructed from the shifted row. + * + * High-level picture. The relation never recovers any hidden-lane vector — at runtime there is + * no matrix inversion and no committed s_1, s_2, s_3 anywhere. Instead, both sides of the + * cross-row hidden-lane equation are computed as linear combinations of committed wires, and + * only those linear combinations are compared. The trick is: + * + * 1. Predicted output (from current row): the full state vector at internal round 4(i+1) — + * i.e. one round past the four rounds covered by this quad — is denoted + * (out_0, out_1, out_2, out_3). + * Each component is a fixed linear combination of the current row's committed lane-0 chain + * and S-box outputs (u_0..u_3), precomputed as the closed-form matrix C in + * `Poseidon2QuadBn254Params::tables.closed_form`. Then apply V to the predicted hidden + * lanes (out_1, out_2, out_3) — that's also a fixed linear combination of the same wires, + * precomputed as `forward_vandermonde_lhs`. Call the result LHS_k for k = 1, 2, 3. + * 2. Encoded next-row input (from next row): the next row's start-of-row hidden lanes + * (s_1', s_2', s_3') are NOT committed. But Theorem (1) of QUAD_THEOREM.md says + * V · (s_1', s_2', s_3')^T = (b_1', b_2', b_3') + * where the b'-formulas express b_k' as an explicit linear combination of the next row's + * committed lane-0 chain and S-box outputs (using the next quad's first three round + * constants, carried on this row in q_m, q_c, q_5 because Mega lacks shifted selectors). + * So V · (next row's hidden input) is computable without ever committing the hidden input + * — call this RHS_k. + * 3. Set them equal: + * - lane 0: out_0 = w_l_shift directly (subrelation A_0). + * - lanes 1..3: LHS_k = RHS_k for k = 1, 2, 3 (subrelations A_1, A_2, A_3). + * Both sides are polynomials in committed wires; the verifier evaluates them and checks + * equality. No hidden lanes are ever materialized; no V^{-1} is ever applied at runtime. + * + * Why equality of encodings suffices. We're really enforcing + * V · (out_1, out_2, out_3)^T = V · (s_1', s_2', s_3')^T. + * Because V is invertible (D_2, D_3, D_4 pairwise distinct, statically asserted in + * `poseidon2_quad_params.hpp`), this is mathematically equivalent to the desired + * (out_1, out_2, out_3) = (s_1', s_2', s_3'). + * + * Degree: each subrelation has degree 5 in any single sumcheck variable (all S-boxes land on + * distinct wires). Plus selector + gate separator = 7. + */ +template class Poseidon2QuadInternalRelationImpl { + public: + using FF = FF_; + using QuadParams = crypto::Poseidon2QuadBn254Params; + + static constexpr std::array SUBRELATION_PARTIAL_LENGTHS{ + 7, // A_0: out_0 - w_l_shift + 7, // A_1: forward Vandermonde row 1 + 7, // A_2: forward Vandermonde row 2 + 7, // A_3: forward Vandermonde row 3 + }; + + // Constants used by the shifted-row Vandermonde RHS reconstruction. + static constexpr fr D1 = QuadParams::D1; + static constexpr fr SIGMA_PLUS_2 = QuadParams::SIGMA + fr(2); // Σ + 2 + static constexpr fr B3_U0_COEF = SIGMA_PLUS_2 * D1 - QuadParams::SIGMA - fr(3); // (Σ+2) D_1 - Σ - 3 + static constexpr fr D1_MINUS_3 = D1 - fr(3); // D_1 - 3 + + /** + * @brief Skip when the selector is identically zero on this row. + */ + template inline static bool skip(const AllEntities& in) + { + return in.q_poseidon2_quad_internal.is_zero(); + } + + template + void static accumulate(ContainerOverSubrelations& evals, + const AllEntities& in, + const Parameters&, + const FF& scaling_factor) + { + using Accumulator = std::tuple_element_t<0, ContainerOverSubrelations>; + using CoeffAcc = typename Accumulator::CoefficientAccumulator; + + // Wire values: current row's committed lane-0 chain (state[0] at the four rounds covered + // by this quad). + const auto w_l = CoeffAcc(in.w_l); // s_0^(0): row's lane-0 at round 4i + const auto w_r = CoeffAcc(in.w_r); // s_0^(1): row's lane-0 at round 4i+1 + const auto w_o = CoeffAcc(in.w_o); // s_0^(2): row's lane-0 at round 4i+2 + const auto w_4 = CoeffAcc(in.w_4); // s_0^(3): row's lane-0 at round 4i+3 + + // Next row's committed lane-0 chain. + const auto w_l_shift = CoeffAcc(in.w_l_shift); // next row's s_0^(0) (= state[0] at round 4(i+1)) + const auto w_r_shift = CoeffAcc(in.w_r_shift); // next row's s_0^(1) + const auto w_o_shift = CoeffAcc(in.w_o_shift); // next row's s_0^(2) + const auto w_4_shift = CoeffAcc(in.w_4_shift); // next row's s_0^(3) + + // Round constants (current row), used to compute u_0..u_3. + const auto q_l = CoeffAcc(in.q_l); // c_{4i} + const auto q_r = CoeffAcc(in.q_r); // c_{4i+1} + const auto q_o = CoeffAcc(in.q_o); // c_{4i+2} + const auto q_4 = CoeffAcc(in.q_4); // c_{4i+3} + // Next quad's first three round constants. Needed to compute u_0', u_1', u_2', which + // appear in the b'-formulas (RHS of the cross-row encoding equation). Carried on the + // current row in q_m, q_c, q_5 because Mega has no shifted selectors. + const auto q_m = CoeffAcc(in.q_m); // c_{4(i+1)} + const auto q_c = CoeffAcc(in.q_c); // c_{4(i+1)+1} + const auto q_5 = CoeffAcc(in.q_5); // c_{4(i+1)+2} + + const auto q_sel = CoeffAcc(in.q_poseidon2_quad_internal); + + // Helper: compute fifth power as Accumulator (degree 5 in the input wire). + auto pow5 = [](const Accumulator& x) -> Accumulator { + auto sq = x.sqr(); + auto quart = sq.sqr(); + return quart * x; + }; + + // ── Current row: u_k = (s_0^{(k)} + c_k)^5. The four S-box outputs feed the closed-form + // prediction of the row-end state. ── + auto u_0 = pow5(Accumulator(w_l + q_l)); // u_0 = (s_0^(0) + c_{4i})^5 + auto u_1 = pow5(Accumulator(w_r + q_r)); // u_1 = (s_0^(1) + c_{4i+1})^5 + auto u_2 = pow5(Accumulator(w_o + q_o)); // u_2 = (s_0^(2) + c_{4i+2})^5 + auto u_3 = pow5(Accumulator(w_4 + q_4)); // u_3 = (s_0^(3) + c_{4i+3})^5 + + // ── Next row's S-box outputs. Together with the next row's lane-0 wires they let us + // forward-compute V · (next row's hidden lanes) via the b'-formulas, without ever + // materializing the hidden lanes themselves. Only three (not four) because the + // b'-formulas only depend on u_0', u_1', u_2'. ── + auto u_0_next = pow5(Accumulator(w_l_shift + q_m)); // u_0' = (next_s_0^(0) + c_{4(i+1)})^5 + auto u_1_next = pow5(Accumulator(w_r_shift + q_c)); // u_1' = (next_s_0^(1) + c_{4(i+1)+1})^5 + auto u_2_next = pow5(Accumulator(w_o_shift + q_5)); // u_2' = (next_s_0^(2) + c_{4(i+1)+2})^5 + auto u_0_next_D1 = u_0_next * D1; // CSE: D_1 · u_0' is reused in A_1 and A_2 + + // Precomputed coefficient vectors. Each is indexed [W_R, W_O, W_4, U_0, U_1, U_2, U_3]. + const auto& cf0 = QuadParams::tables.closed_form[0]; // out_0 (row-end lane 0) + const auto& l0 = QuadParams::tables.forward_vandermonde_lhs[0]; // out_1 + out_2 + out_3 + const auto& l1 = QuadParams::tables.forward_vandermonde_lhs[1]; // D_2 out_1 + D_3 out_2 + D_4 out_3 + const auto& l2 = QuadParams::tables.forward_vandermonde_lhs[2]; // D_2² out_1 + D_3² out_2 + D_4² out_3 + + // Wire-only parts of the four subrelations. Each subrelation A_K has the form + // A_K = (LHS_K computed from current row) - (RHS_K computed from next row). + // wpK_full collects all the wire-only terms (no u_*) from BOTH sides at once; the u_* + // terms are folded in below in aK_body. + // A_0: LHS = out_0, RHS = w_l_shift. + // A_1: LHS = out_1 + out_2 + out_3, RHS = b_1' = w_r' - D_1 u_0'. + // A_2: LHS = D_2 out_1 + D_3 out_2 + D_4 out_3, RHS = b_2' = w_o' - 2 w_r' + (2 D_1 - 3) u_0' - D_1 u_1'. + // A_3: LHS = D_2² out_1 + D_3² out_2 + D_4² out_3, RHS = b_3' = w_4' - w_o' - (Σ+2) w_r' + ... + // The LHS wire contributions come from cf0 / l0 / l1 / l2 (forward V applied to predicted + // output); the RHS wire contributions are the shifted lane-0 wires multiplied by the + // coefficients with which they appear in the b'-formulas above. + auto wp0_full = w_r * cf0[0] + w_o * cf0[1] + w_4 * cf0[2] - w_l_shift; + auto wp1_full = w_r * l0[0] + w_o * l0[1] + w_4 * l0[2] - w_r_shift; + auto wp2_full = w_r * l1[0] + w_o * l1[1] + w_4 * l1[2] - w_o_shift + w_r_shift + w_r_shift; + auto wp3_full = w_r * l2[0] + w_o * l2[1] + w_4 * l2[2] - w_4_shift + w_o_shift + w_r_shift * SIGMA_PLUS_2; + + const auto q_times_scaling_m = q_sel * scaling_factor; + const auto q_times_scaling = Accumulator(q_times_scaling_m); + + // A_0: out_0 - w_l_shift = 0. + auto a0_body = u_0 * cf0[3] + u_1 * cf0[4] + u_2 * cf0[5] + u_3 * cf0[6] + Accumulator(wp0_full); + std::get<0>(evals) += q_times_scaling * a0_body; + + // A_1: (out_1 + out_2 + out_3) - b_1_next = 0. + auto a1_body = u_0 * l0[3] + u_1 * l0[4] + u_2 * l0[5] + u_3 * l0[6] + u_0_next_D1 + Accumulator(wp1_full); + std::get<1>(evals) += q_times_scaling * a1_body; + + // A_2: (D_2 out_1 + D_3 out_2 + D_4 out_3) - b_2_next = 0. + auto a2_body = u_0 * l1[3] + u_1 * l1[4] + u_2 * l1[5] + u_3 * l1[6] - (u_0_next_D1 + u_0_next_D1) + + (u_0_next + u_0_next + u_0_next) + u_1_next * D1 + Accumulator(wp2_full); + std::get<2>(evals) += q_times_scaling * a2_body; + + // A_3: (D_2^2 out_1 + D_3^2 out_2 + D_4^2 out_3) - b_3_next = 0. + auto a3_body = u_0 * l2[3] + u_1 * l2[4] + u_2 * l2[5] + u_3 * l2[6] - u_0_next * B3_U0_COEF - + u_1_next * D1_MINUS_3 + u_2_next * D1 + Accumulator(wp3_full); + std::get<3>(evals) += q_times_scaling * a3_body; + } +}; + +template using Poseidon2QuadInternalRelation = Relation>; + +} // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/relations/poseidon2_quad_internal_terminal_relation.hpp b/barretenberg/cpp/src/barretenberg/relations/poseidon2_quad_internal_terminal_relation.hpp new file mode 100644 index 000000000000..dc3330182994 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/relations/poseidon2_quad_internal_terminal_relation.hpp @@ -0,0 +1,99 @@ +#pragma once +#include "barretenberg/crypto/poseidon2/poseidon2_params.hpp" +#include "barretenberg/crypto/poseidon2/poseidon2_quad_params.hpp" +#include "relation_types.hpp" + +namespace bb { + +/** + * @brief Terminal variant of the K=4 compressed internal-round relation. + * + * @details Same four-round closed-form computation as `Poseidon2QuadInternalRelationImpl`, but + * the successor is the standard-encoded bridge row rather than another compressed row. The four + * subrelations directly match (out_0, out_1, out_2, out_3) against + * (w_l_shift, w_r_shift, w_o_shift, w_4_shift). + * + * This ties the compressed chain's output state (state[0..3] after 56 internal rounds) to + * witnesses that the first final-external gate consumes via shared witness indices. + * + * Selector layout on the terminal row: + * q_l = c_{4i}, q_r = c_{4i+1}, q_o = c_{4i+2}, q_4 = c_{4i+3} // this final quad + * q_m, q_c, q_5 = 0 (unused — no next quad) + */ +template class Poseidon2QuadInternalTerminalRelationImpl { + public: + using FF = FF_; + using QuadParams = crypto::Poseidon2QuadBn254Params; + + static constexpr std::array SUBRELATION_PARTIAL_LENGTHS{ 7, 7, 7, 7 }; + + template inline static bool skip(const AllEntities& in) + { + return in.q_poseidon2_quad_internal_terminal.is_zero(); + } + + template + void static accumulate(ContainerOverSubrelations& evals, + const AllEntities& in, + const Parameters& /*params*/, + const FF& scaling_factor) + { + using Accumulator = std::tuple_element_t<0, ContainerOverSubrelations>; + using CoeffAcc = typename Accumulator::CoefficientAccumulator; + + const auto w_l = CoeffAcc(in.w_l); + const auto w_r = CoeffAcc(in.w_r); + const auto w_o = CoeffAcc(in.w_o); + const auto w_4 = CoeffAcc(in.w_4); + + const auto w_l_shift = CoeffAcc(in.w_l_shift); + const auto w_r_shift = CoeffAcc(in.w_r_shift); + const auto w_o_shift = CoeffAcc(in.w_o_shift); + const auto w_4_shift = CoeffAcc(in.w_4_shift); + + const auto q_l = CoeffAcc(in.q_l); + const auto q_r = CoeffAcc(in.q_r); + const auto q_o = CoeffAcc(in.q_o); + const auto q_4 = CoeffAcc(in.q_4); + + const auto q_sel = CoeffAcc(in.q_poseidon2_quad_internal_terminal); + + auto pow5 = [](const Accumulator& x) -> Accumulator { + auto sq = x.sqr(); + auto quart = sq.sqr(); + return quart * x; + }; + + // S-boxes for the four rounds. + auto u_0 = pow5(Accumulator(w_l + q_l)); + auto u_1 = pow5(Accumulator(w_r + q_r)); + auto u_2 = pow5(Accumulator(w_o + q_o)); + auto u_3 = pow5(Accumulator(w_4 + q_4)); + + // Closed-form output rows, with shifted bridge-row terms folded into the wire part. + const auto& C = QuadParams::tables.closed_form; + auto wp_0 = w_r * C[0][0] + w_o * C[0][1] + w_4 * C[0][2] - w_l_shift; + auto wp_1 = w_r * C[1][0] + w_o * C[1][1] + w_4 * C[1][2] - w_r_shift; + auto wp_2 = w_r * C[2][0] + w_o * C[2][1] + w_4 * C[2][2] - w_o_shift; + auto wp_3 = w_r * C[3][0] + w_o * C[3][1] + w_4 * C[3][2] - w_4_shift; + + const auto q_by_scaling_m = q_sel * scaling_factor; + const auto q_by_scaling = Accumulator(q_by_scaling_m); + + // Subrelation bodies: out_k - w_*_shift = 0. + auto a0_body = u_0 * C[0][3] + u_1 * C[0][4] + u_2 * C[0][5] + u_3 * C[0][6] + Accumulator(wp_0); + auto a1_body = u_0 * C[1][3] + u_1 * C[1][4] + u_2 * C[1][5] + u_3 + Accumulator(wp_1); + auto a2_body = u_0 * C[2][3] + u_1 * C[2][4] + u_2 * C[2][5] + u_3 + Accumulator(wp_2); + auto a3_body = u_0 * C[3][3] + u_1 * C[3][4] + u_2 * C[3][5] + u_3 + Accumulator(wp_3); + + std::get<0>(evals) += q_by_scaling * a0_body; + std::get<1>(evals) += q_by_scaling * a1_body; + std::get<2>(evals) += q_by_scaling * a2_body; + std::get<3>(evals) += q_by_scaling * a3_body; + } +}; + +template +using Poseidon2QuadInternalTerminalRelation = Relation>; + +} // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/relations/poseidon2_transition_entry_relation.hpp b/barretenberg/cpp/src/barretenberg/relations/poseidon2_transition_entry_relation.hpp new file mode 100644 index 000000000000..2f6568819fc2 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/relations/poseidon2_transition_entry_relation.hpp @@ -0,0 +1,134 @@ +#pragma once +#include "barretenberg/crypto/poseidon2/poseidon2_params.hpp" +#include "barretenberg/crypto/poseidon2/poseidon2_quad_params.hpp" +#include "relation_types.hpp" + +namespace bb { + +/** + * @brief Entry transition relation for the K=4 compressed Poseidon2 internal block. + * + * @details The entry row holds the external-round output in standard encoding: + * (w_l, w_r, w_o, w_4) = (s_0, s_1, s_2, s_3) at the internal-block entry. + * Its wires share witness indices with the `poseidon2_external` propagate row, so they + * are copy-constrained to the true external output by the permutation relation. + * + * The successor is the first compressed row, whose wires encode state[0] at 4 consecutive rounds: + * (w_l_shift, w_r_shift, w_o_shift, w_4_shift) + * = (state[0] at round 0, round 1, round 2, round 3). + * + * w_l_shift = state[0] at round 0 = s_0 is enforced by the permutation relation because it shares + * the entry row's w_l witness. The three subrelations below cover rounds 1, 2, 3. + * + * Each subrelation computes state[0] at the target round using the previous shifted wire, keeping + * the per-variable degree at 5. + * + * Subrelations (each × q_poseidon2_transition_entry × gate separator, degree 7): + * + * A_0 (round 1): + * w_r_shift = D_1 u_0 + w_r + w_o + w_4, u_0 = (w_l + q_l)^5 + * + * A_1 (round 2): + * w_o_shift = D_1 u_1 + 3 u_0 + (D_2+2) w_r + (D_3+2) w_o + (D_4+2) w_4 + * where u_1 = (w_r_shift + q_r)^5. + * + * A_2 (round 3): + * w_4_shift = D_1 u_2 + 3 u_1 + (Σ + 6) u_0 + * + (D_2^2 + D_2 + Σ + 4) w_r + * + (D_3^2 + D_3 + Σ + 4) w_o + * + (D_4^2 + D_4 + Σ + 4) w_4 + * where u_2 = (w_o_shift + q_o)^5. + * + * Selector layout on the entry row: + * q_l = c_{rounds_f_begin + 0} // 1st internal round constant + * q_r = c_{rounds_f_begin + 1} + * q_o = c_{rounds_f_begin + 2} + * q_4, q_m, q_c, q_5 = 0 (unused) + */ +template class Poseidon2TransitionEntryRelationImpl { + public: + using FF = FF_; + using QuadParams = crypto::Poseidon2QuadBn254Params; + + static constexpr std::array SUBRELATION_PARTIAL_LENGTHS{ + 7, // A_0: w_r_shift check (state[0] at round 1) + 7, // A_1: w_o_shift check (state[0] at round 2) + 7, // A_2: w_4_shift check (state[0] at round 3) + }; + + static constexpr fr D1 = QuadParams::D1; + + // Linear round-propagation vectors shared with the closed-form interior relation: + // A_one[j] = D_{j+1} + 2 — wire coefs in A_1 (state[0] at round 2) + // A2_one[j] = D_{j+1}^2 + D_{j+1} + Σ + 4 — wire coefs in A_2 (state[0] at round 3) + // sum_A_one = Σ + 6 — u_0 coefficient in A_2 + static constexpr auto& A_one = QuadParams::A_one; + static constexpr auto& A2_one = QuadParams::A2_one; + static constexpr fr sum_A_one = QuadParams::sum_A_one; + + template inline static bool skip(const AllEntities& in) + { + return in.q_poseidon2_transition_entry.is_zero(); + } + + template + void static accumulate(ContainerOverSubrelations& evals, + const AllEntities& in, + const Parameters& /*params*/, + const FF& scaling_factor) + { + using Accumulator = std::tuple_element_t<0, ContainerOverSubrelations>; + using CoeffAcc = typename Accumulator::CoefficientAccumulator; + + const auto w_l = CoeffAcc(in.w_l); + const auto w_r = CoeffAcc(in.w_r); + const auto w_o = CoeffAcc(in.w_o); + const auto w_4 = CoeffAcc(in.w_4); + + const auto w_r_shift = CoeffAcc(in.w_r_shift); + const auto w_o_shift = CoeffAcc(in.w_o_shift); + const auto w_4_shift = CoeffAcc(in.w_4_shift); + + const auto q_l = CoeffAcc(in.q_l); + const auto q_r = CoeffAcc(in.q_r); + const auto q_o = CoeffAcc(in.q_o); + const auto q_sel = CoeffAcc(in.q_poseidon2_transition_entry); + + auto pow5 = [](const Accumulator& x) -> Accumulator { + auto sq = x.sqr(); + auto quart = sq.sqr(); + return quart * x; + }; + + // u_0 = (w_l + q_l)^5 + auto u_0 = pow5(Accumulator(w_l + q_l)); + // u_1 = (w_r_shift + q_r)^5 + auto u_1 = pow5(Accumulator(w_r_shift + q_r)); + // u_2 = (w_o_shift + q_o)^5 + auto u_2 = pow5(Accumulator(w_o_shift + q_o)); + + const auto q_by_scaling_m = q_sel * scaling_factor; + const auto q_by_scaling = Accumulator(q_by_scaling_m); + + // Wire parts of the three subrelations, including shifted-row targets. + auto wp_0 = w_r + w_o + w_4 - w_r_shift; + auto wp_1 = w_r * A_one[0] + w_o * A_one[1] + w_4 * A_one[2] - w_o_shift; + auto wp_2 = w_r * A2_one[0] + w_o * A2_one[1] + w_4 * A2_one[2] - w_4_shift; + + // A_0: D_1 u_0 + (w_r + w_o + w_4) - w_r_shift = 0. + auto a0_body = u_0 * D1 + Accumulator(wp_0); + std::get<0>(evals) += q_by_scaling * a0_body; + + // A_1: D_1 u_1 + 3 u_0 + (A·1)_j-weighted wire combo - w_o_shift = 0. + auto a1_body = u_1 * D1 + u_0 * fr(3) + Accumulator(wp_1); + std::get<1>(evals) += q_by_scaling * a1_body; + + // A_2: D_1 u_2 + 3 u_1 + (Σ+6) u_0 + (A^2·1)_j-weighted wire combo - w_4_shift = 0. + auto a2_body = u_2 * D1 + u_1 * fr(3) + u_0 * sum_A_one + Accumulator(wp_2); + std::get<2>(evals) += q_by_scaling * a2_body; + } +}; + +template using Poseidon2TransitionEntryRelation = Relation>; + +} // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/srs/factories/bn254_crs_data.hpp b/barretenberg/cpp/src/barretenberg/srs/factories/bn254_crs_data.hpp index dcef92ac7df3..1766ac1465c0 100644 --- a/barretenberg/cpp/src/barretenberg/srs/factories/bn254_crs_data.hpp +++ b/barretenberg/cpp/src/barretenberg/srs/factories/bn254_crs_data.hpp @@ -30,6 +30,34 @@ inline g1::affine_element get_bn254_g1_second_element() return from_buffer(g1_second_data); } +/** + * @brief Raw 128-byte serialization of the BN254 G2 trusted-setup point [x]_2. + * @details Identical to the contents of `bn254_g2.dat` distributed at + * https://crs.aztec-cdn.foundation/g2.dat. Exposed as a public constant so callers can + * SHA-256-pin the exact CDN bytes (see `BN254_G2_ELEMENT_SHA256` below). + */ +inline constexpr std::array BN254_G2_ELEMENT_BYTES = { + 0x01, 0x18, 0xc4, 0xd5, 0xb8, 0x37, 0xbc, 0xc2, 0xbc, 0x89, 0xb5, 0xb3, 0x98, 0xb5, 0x97, 0x4e, 0x9f, 0x59, 0x44, + 0x07, 0x3b, 0x32, 0x07, 0x8b, 0x7e, 0x23, 0x1f, 0xec, 0x93, 0x88, 0x83, 0xb0, 0x26, 0x0e, 0x01, 0xb2, 0x51, 0xf6, + 0xf1, 0xc7, 0xe7, 0xff, 0x4e, 0x58, 0x07, 0x91, 0xde, 0xe8, 0xea, 0x51, 0xd8, 0x7a, 0x35, 0x8e, 0x03, 0x8b, 0x4e, + 0xfe, 0x30, 0xfa, 0xc0, 0x93, 0x83, 0xc1, 0x22, 0xfe, 0xbd, 0xa3, 0xc0, 0xc0, 0x63, 0x2a, 0x56, 0x47, 0x5b, 0x42, + 0x14, 0xe5, 0x61, 0x5e, 0x11, 0xe6, 0xdd, 0x3f, 0x96, 0xe6, 0xce, 0xa2, 0x85, 0x4a, 0x87, 0xd4, 0xda, 0xcc, 0x5e, + 0x55, 0x04, 0xfc, 0x63, 0x69, 0xf7, 0x11, 0x0f, 0xe3, 0xd2, 0x51, 0x56, 0xc1, 0xbb, 0x9a, 0x72, 0x85, 0x9c, 0xf2, + 0xa0, 0x46, 0x41, 0xf9, 0x9b, 0xa4, 0xee, 0x41, 0x3c, 0x80, 0xda, 0x6a, 0x5f, 0xe4 +}; + +/** + * @brief SHA-256 hash of `BN254_G2_ELEMENT_BYTES`. + * @details Pinned so any G2 ingress (network download, on-disk cache, bbapi caller) can verify it + * is delivering the canonical Aztec trusted-setup [x]_2. Mirrors the `BN254_G1_CHUNK_HASHES` + * mechanism used for the (much larger) G1 CRS. Update this constant only in lockstep with + * `BN254_G2_ELEMENT_BYTES`; the test `CrsFactory.Bn254G2HashMatchesPinnedBytes` enforces this. + */ +inline constexpr std::array BN254_G2_ELEMENT_SHA256 = { 0x01, 0x79, 0x7b, 0xfc, 0x4d, 0xe5, 0xa9, 0x6f, + 0x0e, 0x51, 0x6a, 0x9e, 0xa4, 0x53, 0x7d, 0x18, + 0x78, 0x6d, 0xc3, 0x0c, 0xb9, 0x91, 0xac, 0xa4, + 0x27, 0x4c, 0x95, 0x82, 0x2b, 0x69, 0xc3, 0x2f }; + /** * @brief Reference BN254 G2 element from the trusted setup CRS * @details This is the single G2 point used in the BN254 CRS for verification. @@ -37,18 +65,7 @@ inline g1::affine_element get_bn254_g1_second_element() */ inline g2::affine_element get_bn254_g2_crs_element() { - // Hardcoded G2 element (128 bytes) - see reference URL above - static constexpr uint8_t g2_data[128] = { - 0x01, 0x18, 0xc4, 0xd5, 0xb8, 0x37, 0xbc, 0xc2, 0xbc, 0x89, 0xb5, 0xb3, 0x98, 0xb5, 0x97, 0x4e, - 0x9f, 0x59, 0x44, 0x07, 0x3b, 0x32, 0x07, 0x8b, 0x7e, 0x23, 0x1f, 0xec, 0x93, 0x88, 0x83, 0xb0, - 0x26, 0x0e, 0x01, 0xb2, 0x51, 0xf6, 0xf1, 0xc7, 0xe7, 0xff, 0x4e, 0x58, 0x07, 0x91, 0xde, 0xe8, - 0xea, 0x51, 0xd8, 0x7a, 0x35, 0x8e, 0x03, 0x8b, 0x4e, 0xfe, 0x30, 0xfa, 0xc0, 0x93, 0x83, 0xc1, - 0x22, 0xfe, 0xbd, 0xa3, 0xc0, 0xc0, 0x63, 0x2a, 0x56, 0x47, 0x5b, 0x42, 0x14, 0xe5, 0x61, 0x5e, - 0x11, 0xe6, 0xdd, 0x3f, 0x96, 0xe6, 0xce, 0xa2, 0x85, 0x4a, 0x87, 0xd4, 0xda, 0xcc, 0x5e, 0x55, - 0x04, 0xfc, 0x63, 0x69, 0xf7, 0x11, 0x0f, 0xe3, 0xd2, 0x51, 0x56, 0xc1, 0xbb, 0x9a, 0x72, 0x85, - 0x9c, 0xf2, 0xa0, 0x46, 0x41, 0xf9, 0x9b, 0xa4, 0xee, 0x41, 0x3c, 0x80, 0xda, 0x6a, 0x5f, 0xe4 - }; - return from_buffer(g2_data); + return from_buffer(BN254_G2_ELEMENT_BYTES.data()); } /** diff --git a/barretenberg/cpp/src/barretenberg/srs/factories/crs_factory.test.cpp b/barretenberg/cpp/src/barretenberg/srs/factories/crs_factory.test.cpp index 4d89330f886c..5d1585a6175b 100644 --- a/barretenberg/cpp/src/barretenberg/srs/factories/crs_factory.test.cpp +++ b/barretenberg/cpp/src/barretenberg/srs/factories/crs_factory.test.cpp @@ -29,9 +29,8 @@ void check_bn254_consistency(const fs::path& crs_download_path, size_t num_point // Use get_bn254_g1_data to load reference points (handles compressed/uncompressed automatically) auto g1_points = bb::get_bn254_g1_data(bb::srs::bb_crs_path(), num_points, /*allow_download=*/false); - // read G2 - auto g2_buf = read_file(bb::srs::bb_crs_path() / "bn254_g2.dat", sizeof(g2::affine_element)); - auto g2_point = from_buffer(g2_buf); + // read and verify G2 (SHA-256-pinned + subgroup-checked) + auto g2_point = bb::get_bn254_g2_data(bb::srs::bb_crs_path()); // build in-memory CRS MemBn254CrsFactory mem_crs(g1_points, g2_point); @@ -123,6 +122,64 @@ TEST(CrsFactory, DISABLED_Bn254Fallback) fs::remove_all(temp_crs_path); } +// The hardcoded `[x]_2` baked into the BB native binary must be a member of the BN254 G2 prime-order subgroup. +TEST(CrsFactory, Bn254HardcodedG2IsInPrimeSubgroup) +{ + auto g2_point = bb::srs::get_bn254_g2_crs_element(); + ASSERT_TRUE(g2_point.on_curve()); + EXPECT_TRUE(g2_point.is_in_prime_subgroup()); +} + +// Locks `BN254_G2_ELEMENT_SHA256` to the actual hash of `BN254_G2_ELEMENT_BYTES`. If anyone edits +// the bytes without recomputing the hash (or vice versa), this fails and forces them to fix it. +TEST(CrsFactory, Bn254G2HashMatchesPinnedBytes) +{ + auto hash = bb::crypto::sha256( + std::span(bb::srs::BN254_G2_ELEMENT_BYTES.data(), bb::srs::BN254_G2_ELEMENT_BYTES.size())); + EXPECT_EQ(hash, bb::srs::BN254_G2_ELEMENT_SHA256); +} + +// Round-trip: the on-disk `bn254_g2.dat` provisioned by `barretenberg/crs/bootstrap.sh` must +// match the pinned canonical bytes byte-for-byte and pass subgroup validation. This catches +// corruption, accidental SRS swaps, or an outdated CDN payload. +TEST(CrsFactory, Bn254G2DataLoadsAndVerifies) +{ + auto g2_point = bb::get_bn254_g2_data(bb::srs::bb_crs_path()); + EXPECT_EQ(g2_point, bb::srs::get_bn254_g2_crs_element()); +} + +// A tampered `bn254_g2.dat` (corrupted single byte) must be rejected by the SHA-256 check. +TEST(CrsFactory, Bn254G2CorruptionDetected) +{ + const std::filesystem::path temp_path = "barretenberg_srs_test_crs_g2_corruption"; + fs::remove_all(temp_path); + fs::create_directories(temp_path); + + auto corrupted = + std::vector(bb::srs::BN254_G2_ELEMENT_BYTES.begin(), bb::srs::BN254_G2_ELEMENT_BYTES.end()); + corrupted[64] ^= 0xFF; + bb::write_file(temp_path / "bn254_g2.dat", corrupted); + + EXPECT_THROW_OR_ABORT(bb::get_bn254_g2_data(temp_path), "SHA-256 mismatch"); + + fs::remove_all(temp_path); +} + +// Check that a `bn254_g2.dat` containing the point at infinity is rejected, even though it is technically on-curve. +TEST(CrsFactory, Bn254G2InfinityRejected) +{ + const std::filesystem::path temp_path = "barretenberg_srs_test_crs_g2_infinity"; + fs::remove_all(temp_path); + fs::create_directories(temp_path); + + std::vector infinity_bytes(128, 0xFF); + bb::write_file(temp_path / "bn254_g2.dat", infinity_bytes); + + EXPECT_THROW_OR_ABORT(bb::get_bn254_g2_data(temp_path), "point at infinity"); + + fs::remove_all(temp_path); +} + TEST(CrsFactory, Bn254CompressedChunkHashFirstChunk) { // Download the first chunk of compressed CRS from CDN and verify its hash. diff --git a/barretenberg/cpp/src/barretenberg/srs/factories/get_bn254_crs.cpp b/barretenberg/cpp/src/barretenberg/srs/factories/get_bn254_crs.cpp index 4b95fcefb2a9..a1e807ccf0e1 100644 --- a/barretenberg/cpp/src/barretenberg/srs/factories/get_bn254_crs.cpp +++ b/barretenberg/cpp/src/barretenberg/srs/factories/get_bn254_crs.cpp @@ -69,6 +69,18 @@ size_t round_up_to_chunk_boundary(size_t num_points) * @details Verifies all complete chunks in parallel across available cores with early-exit * on first mismatch. Also verifies the partial last chunk (if present) so every downloaded byte * is covered. Uses std::span to avoid per-chunk memory allocation. + * + * @note Intentionally invoked from a single site — `download_bn254_g1_data` below — and not from + * the cache-hit, MemBn254Crs ctor, or `bbapi::SrsInitSrs` paths. This is the only path that + * (a) fetches CRS bytes over plain HTTP, and (b) does not consume bytes that have already been + * hash-verified upstream: + * - the cache-hit paths (uncompressed and compressed) read bytes that were verified by this + * function on a previous run before being cached; + * - `bbapi::SrsInitSrs` (bb.js / WASM) receives bytes that bb.js itself fetched over HTTPS, + * where TLS provides transport integrity; + * - `MemBn254Crs` is constructed in-process from already-validated points. + * Therefore re-anchoring at every additional call site is redundant; this anchor exists + * specifically to compensate for the plain-HTTP fetch on the fresh-download path. */ void verify_bn254_crs_integrity(const std::vector& data) { @@ -264,4 +276,35 @@ std::vector get_bn254_g1_data(const std::filesystem::path& p return get_bn254_g1_data(path, num_points, allow_download, CRS_PRIMARY_URL, CRS_FALLBACK_URL); } +// Loads the canonical 128-byte serialization of [x]_2 from disk and verifies it against the pinned +// SHA-256 and the BN254 G2 prime-order subgroup. +g2::affine_element get_bn254_g2_data(const std::filesystem::path& path) +{ + constexpr size_t G2_BYTES = 128; + auto g2_path = path / "bn254_g2.dat"; + if (get_file_size(g2_path) != G2_BYTES) { + throw_or_abort("bn254 g2 data not found at " + path.string() + + " or has wrong size. Run barretenberg/crs/bootstrap.sh to provision."); + } + auto data = read_file(g2_path, G2_BYTES); + auto point = from_buffer(data.data()); + + // Reject the point at infinity: it is a member of every subgroup (so subgroup check passes) + // but `e(−W, O) = 1` for every W, which collapses the KZG verifier's pairing check and lets + // a malicious prover forge arbitrary openings. + if (point.is_point_at_infinity()) { + throw_or_abort("bn254 g2 cannot be the point at infinity"); + } + + // Verify SHA-256 hash of the raw bytes matches the pinned constant for canonical [x]_2. + auto hash = bb::crypto::sha256(std::span(data.data(), data.size())); + if (hash != bb::srs::BN254_G2_ELEMENT_SHA256) { + throw_or_abort("bn254 g2 SHA-256 mismatch: payload does not match the canonical [x]_2"); + } + if (!point.is_in_prime_subgroup()) { + throw_or_abort("bn254 g2 deserialized to a point outside the prime-order subgroup"); + } + return point; +} + } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/srs/factories/get_bn254_crs.hpp b/barretenberg/cpp/src/barretenberg/srs/factories/get_bn254_crs.hpp index 807c3ef4d60d..38893eaf0e41 100644 --- a/barretenberg/cpp/src/barretenberg/srs/factories/get_bn254_crs.hpp +++ b/barretenberg/cpp/src/barretenberg/srs/factories/get_bn254_crs.hpp @@ -17,5 +17,5 @@ std::vector get_bn254_g1_data(const std::filesystem::path& p const std::string& primary_url, const std::string& fallback_url); -g2::affine_element get_bn254_g2_data(const std::filesystem::path& path, bool allow_download = true); +g2::affine_element get_bn254_g2_data(const std::filesystem::path& path); } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/QUAD_THEOREM.md b/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/QUAD_THEOREM.md new file mode 100644 index 000000000000..3f530bc36b87 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/QUAD_THEOREM.md @@ -0,0 +1,397 @@ +# The Core Soundness Theorem (Abstract Form) + +This note states, in pure linear-algebra terms with no reference to Poseidon2, +the single mathematical fact that makes the K=4 compressed internal-round +layout sound. + +For the Poseidon2-specific construction this theorem underpins — the trace +layout, the explicit $b_k$ formulas, the closed-form coefficient table $C$, +the entry/interior/terminal subrelations, and the witness-materialization +strategy — see [README.md](README.md), in particular the +[Mega Internal Compression](README.md#mega-internal-compression) and +[Soundness Argument](README.md#soundness-argument) sections. + +## Setup + +Fix a field $\mathbb{F}$. + +Fix four scalars $D_1, D_2, D_3, D_4 \in \mathbb{F}$. The only constraint that +matters is + +$$D_2,\ D_3,\ D_4 \text{ are pairwise distinct.}$$ + +$D_1$ is unconstrained — it plays no role in the bijection, only in the output +formula. + +Fix an arbitrary "S-box" function $\sigma : \mathbb{F} \to \mathbb{F}$. For +Poseidon2 on BN254, $\sigma(x) = x^5$. The theorem does **not** use any +property of $\sigma$ other than that it is a deterministic function. The same +soundness argument therefore works for any choice of $\sigma$. (Somewhat surprisingly, it works for the zero function!) + +Define the $3 \times 3$ matrix $A$ acting on $\mathbb{F}^3$ by + +$$A = \mathrm{diag}(D_2, D_3, D_4) + (J - I),$$ + +where $J$ is the $3 \times 3$ all-ones matrix and $I$ the $3 \times 3$ +identity. Equivalently, for $\vec{v} \in \mathbb{F}^3$ and $i \in \{1, 2, 3\}$, + +$$(A \vec{v})_i = D_i\, v_i + \sum_{j \ne i} v_j.$$ + +Written out: + +$$ +A = \begin{bmatrix} D_2 & 1 & 1 \\ 1 & D_3 & 1 \\ 1 & 1 & D_4 \end{bmatrix}. +$$ + +Let $\vec{1}$ denote the all-ones column vector in $\mathbb{F}^3$. + +## The Dynamical System + +The state space is $\mathbb{F} \times \mathbb{F}^3$. We write a state as +$(s, \vec{v})$ with $s \in \mathbb{F}$ (the *observed lane*) and $\vec{v} \in \mathbb{F}^3$ +(the *hidden lanes*). + +Each step is parametrized by a "round constant" $c \in \mathbb{F}$. Given a +current state $(s, \vec{v})$ and round constant $c$, the next state is + +$$\mathrm{next}(s, \vec{v}, c) = \bigl(\, D_1\, \sigma(s + c) + \vec{1}^\top \vec{v},\ \ A \vec{v} + \sigma(s + c) \cdot \vec{1}\, \bigr).$$ + +In words: compute the S-box output $u := \sigma(s + c)$ (which is notably independent of $\vec{v}$). Then the new observed lane is $D_1 u + \vec{1}^\top \vec{v}$, and the new hidden lanes are $A \vec{v}$ shifted by $u$ in every coordinate. + +This is exactly the Poseidon2 internal round acting on +$(\text{state}[0],\ \text{state}[1..3])$ with $\sigma(x) = x^5$ and round +constant $c$. + +## The Four-Step Iteration + +Fix four round constants $c_0, c_1, c_2, c_3 \in \mathbb{F}$. Starting from a +state $(s_0, \vec{v}_0)$, iterate the step four times: + +$$(s_{k+1},\ \vec{v}_{k+1}) = \mathrm{next}(s_k,\ \vec{v}_k,\ c_k), \qquad k = 0, 1, 2, 3.$$ + +Define the four S-box outputs + +$$u_k := \sigma(s_k + c_k), \qquad k = 0, 1, 2, 3.$$ + +The *lane-0 chain* of the iteration is the tuple $(s_0, s_1, s_2, s_3, s_4)$. +The *row commitments* the prover supplies are + +$$(s_0,\ s_1,\ s_2,\ s_3),$$ + +the four observed-lane values *before* each S-box. Crucially, the prover does +**not** commit $\vec{v}_0$. The *row output* is the full final state + +$$\mathrm{out} := (s_4, \vec{v}_4) \in \mathbb{F} \times \mathbb{F}^3.$$ + +## A Crucial Point: Treating the $s_k$ as Independent (or why is the reconstruction of the hidden $\vec{v}_0$ linear?) + +Before stating the theorem, it is essential to be precise about what is "committed" versus "derived" — otherwise the statement is wrong. + +A naive reading of the dynamical system says: starting from $(s_0, \vec{v}_0)$ +together with the round constants, the entire iteration is determined. So +$s_1, s_2, s_3$ are **functions** of $(s_0, \vec{v}_0)$, computed by cascading +$\sigma$ four times. Under this reading, the map $\vec{v}_0 \mapsto (s_1, s_2, s_3)$ +(with $s_0, c_k$ fixed) is **highly nonlinear** — each $s_{k+1}$ depends on +$\sigma(s_k + c_k)$, which depends on $\sigma(s_{k-1} + c_{k-1})$, and so on, +giving a tower of $\sigma$'s with combined degree $5^3$ if $\sigma(x) = x^5$. + +That naive reading is **not** what the circuit checks. The compressed-row +relation does *not* receive $\vec{v}_0$ and recompute the iteration. Instead, the +prover commits **four independent wires** $(s_0, s_1, s_2, s_3)$, and the +relation asks: + +> Does there exist a $\vec{v}_0 \in \mathbb{F}^3$ such that the four committed +> values $(s_0, s_1, s_2, s_3)$, together with the recurrence, are +> consistent — and if so, is that $\vec{v}_0$ unique? + +Under this question, $u_k = \sigma(s_k + c_k)$ for $k = 0, 1, 2, 3$ are +**fixed scalars** computed from the committed wires $s_k$ and the publicly +known round constants. They do not "cascade through $\sigma$" in $\vec{v}_0$, +because the $s_k$ are committed independently rather than derived from +$\vec{v}_0$. With the $u_k$ treated as constants, the constraints + +$$ +\begin{aligned} +s_1 &= D_1 u_0 + \vec{1}^\top \vec{v}_0, \\ +s_2 &= D_1 u_1 + \vec{1}^\top (A \vec{v}_0 + u_0 \vec{1}), \\ +s_3 &= D_1 u_2 + \vec{1}^\top (A^2 \vec{v}_0 + A u_0 \vec{1} + u_1 \vec{1}), +\end{aligned} +$$ + +become a **linear system in $\vec{v}_0$**. This is the system the proof inverts. + +Two consequences of this design choice: + +* **Per-variable degree stays at 5.** Because the $s_k$ are independent + wires, each $u_k$ is a single $\sigma$ applied to one wire — degree 5 in + that wire — rather than a cascade that would push the degree to $5^k$. The + cost is committing three extra wires per row; the benefit is that the + relation polynomial has manageable degree. + +* **The recurrence is checked, not assumed.** What forces the prover to use + the *correct* $s_1, s_2, s_3$ (i.e. the values that the honest dynamics + would produce) is precisely the four subrelations $A_0, A_1, A_2, A_3$ + that constrain consistency between the committed $s_k$, the unique + $\vec{v}_0$ recovered from them, and the adjacent row. Row-locally, every + committed lane-0 chain recovers a unique hidden state; globally, a dishonest + chain is rejected when its resulting output fails to match the successor row + constraints. + +With this understanding in place, "the map $\vec{v}_0 \mapsto (s_1, s_2, s_3)$" +in the theorem below means **the linear-in-$\vec{v}_0$ component of the system +above**, with the committed-wire S-box outputs $u_0, u_1, u_2$ treated as +known scalars. This map is genuinely affine in $\vec{v}_0$. + +## The Theorem + +**Theorem (Quad-row soundness).** *Suppose $D_2, D_3, D_4$ are pairwise +distinct in $\mathbb{F}$. Then:* + +**(1) Hidden-lane uniqueness.** *Fix any committed values +$(s_0, s_1, s_2, s_3) \in \mathbb{F}^4$, any round constants +$(c_0, c_1, c_2, c_3) \in \mathbb{F}^4$, and let $u_k := \sigma(s_k + c_k)$ +for $k = 0, 1, 2, 3$ be the resulting fixed scalars. Then there is at most +one $\vec{v}_0 \in \mathbb{F}^3$ for which the recurrence is consistent with the +committed $s_k$. Equivalently, the affine map* + +$$\vec{v}_0 \ \longmapsto\ (s_1,\ s_2,\ s_3) \qquad (\text{with } s_0,\ c_k,\ u_0, u_1, u_2 \text{ all held fixed})$$ + +*is a bijection $\mathbb{F}^3 \to \mathbb{F}^3$. Its linear part, after +subtracting the parts of $(s_1, s_2, s_3)$ that depend only on $s_0$, the +round constants, and $(u_0, u_1, u_2)$, is captured (after a change of +basis) by the Vandermonde matrix* + +$$ +V = \begin{bmatrix} 1 & 1 & 1 \\ D_2 & D_3 & D_4 \\ D_2^2 & D_3^2 & D_4^2 \end{bmatrix}, +$$ + +*whose determinant is* + +$$\det(V) = (D_3 - D_2)(D_4 - D_2)(D_4 - D_3),$$ + +*nonzero exactly when the three nodes are pairwise distinct.* + +**(2) Closed-form output.** *Once $\vec{v}_0$ is pinned by (1), the output $\mathrm{out}$ +is a fixed $\mathbb{F}$-linear function of $(s_1, s_2, s_3, u_0, u_1, u_2, u_3)$. +There is a $4 \times 7$ matrix $C$ over $\mathbb{F}$, depending only on +$(D_1, D_2, D_3, D_4)$, such that* + +$$\mathrm{out} = C \cdot (s_1,\ s_2,\ s_3,\ u_0,\ u_1,\ u_2,\ u_3)^\top.$$ + +*Note that $s_0$ does not appear in this input vector — it enters only through +$u_0 = \sigma(s_0 + c_0)$.* + +The matrix $C$ is what `poseidon2_quad_params.hpp` precomputes and what +`poseidon2_quad_closed_form.test.cpp` cross-checks against direct iteration. + +## What the Theorem Buys You in the Circuit + +The relation commits only the lane-0 chain $(s_0, s_1, s_2, s_3)$ on each +compressed row. Part (1) says the prover gets no freedom in $\vec{v}_0$ — it is +forced by the committed values together with the publicly fixed round +constants. Part (2) says the row's output state is a fixed linear function of +seven quantities, four of which are already committed wires and three of +which are S-box outputs of committed wires; this is exactly what the four +interior subrelations check. + +In the circuit, the theorem is used in the forward direction: the current +compressed row determines a unique full four-round output. For an interior +transition, the relation checks that the successor compressed row starts from +that output state: its first lane-0 wire equals the output's lane 0, and its +reconstructed hidden start lanes equal the output's hidden lanes. + +The same Vandermonde inversion is also used at the row-to-row boundary: when +both adjacent rows hide their $\vec{v}$'s, comparing $\vec{v}$'s directly is impossible, +so the relation instead compares their image under $V$. Because $V$ is +bijective, equality of images is equivalent to equality of preimages. + +## Proof of Part (1): Why the Vandermonde Appears + +Recall that $u_0, u_1, u_2, u_3$ are taken as **fixed scalars**, computed +from the committed wires $s_0, s_1, s_2, s_3$ and the public round +constants. With the $u_k$ frozen, the recurrence on the hidden lanes is +linear in $\vec{v}_0$: + +$$\vec{v}_{k+1} = A \vec{v}_k + u_k \cdot \vec{1},$$ + +so + +$$\vec{v}_1 = A \vec{v}_0 + u_0 \vec{1},$$ + +$$\vec{v}_2 = A^2 \vec{v}_0 + (A u_0 + u_1)\, \vec{1},$$ + +$$\vec{v}_3 = A^3 \vec{v}_0 + (A^2 u_0 + A u_1 + u_2)\, \vec{1}.$$ + +The observed-lane recurrence reads + +$$s_{k+1} = D_1 u_k + \vec{1}^\top \vec{v}_k.$$ + +Substituting and collecting all terms that do not involve $\vec{v}_0$ — call this +the *driver-only piece*, since it depends only on $(s_0, c_0, c_1, c_2, c_3, +u_0, u_1, u_2)$, all of which are fixed scalars — gives + +$$s_1 - (\text{driver-only}) = \vec{1}^\top \vec{v}_0,$$ + +$$s_2 - (\text{driver-only}) = \vec{1}^\top A \vec{v}_0,$$ + +$$s_3 - (\text{driver-only}) = \vec{1}^\top A^2 \vec{v}_0.$$ + +In matrix form, the linear part of the map $\vec{v}_0 \mapsto (s_1, s_2, s_3)$ is + +$$ +K = \begin{bmatrix} \vec{1}^\top \\ \vec{1}^\top A \\ \vec{1}^\top A^2 \end{bmatrix} +\quad (\text{a } 3 \times 3 \text{ matrix}). +$$ + +The question reduces to: **when is $K$ invertible?** + +### A is Diagonalizable with Eigenvalues $D_2, D_3, D_4$ + +A short calculation gives the characteristic polynomial of $A$: + +$$\det(A - x I) = (D_2 - x)(D_3 - x)(D_4 - x).$$ + +So the eigenvalues of $A$ are exactly $D_2, D_3, D_4$. When they are pairwise +distinct, $A$ is diagonalizable: there is an invertible $P$ with + +$$A = P\, \mathrm{diag}(D_2, D_3, D_4)\, P^{-1}.$$ + +### $K$ Invertible $\iff$ Vandermonde Invertible + +Using $A^k = P\, \mathrm{diag}(D_2^k, D_3^k, D_4^k)\, P^{-1}$, + +$$\vec{1}^\top A^k = (P^\top \vec{1})^\top \mathrm{diag}(D_2^k, D_3^k, D_4^k)\, P^{-1}.$$ + +Set $\vec{w} := P^\top \vec{1} = (w_1, w_2, w_3) \in \mathbb{F}^3$. Stacking the +three rows for $k = 0, 1, 2$, + +$$ +K = \begin{bmatrix} w_1 & w_2 & w_3 \\ w_1 D_2 & w_2 D_3 & w_3 D_4 \\ w_1 D_2^2 & w_2 D_3^2 & w_3 D_4^2 \end{bmatrix} P^{-1}. +$$ + +The middle matrix factors as + +$$ +\begin{bmatrix} w_1 & w_2 & w_3 \\ w_1 D_2 & w_2 D_3 & w_3 D_4 \\ w_1 D_2^2 & w_2 D_3^2 & w_3 D_4^2 \end{bmatrix} = \begin{bmatrix} 1 & 1 & 1 \\ D_2 & D_3 & D_4 \\ D_2^2 & D_3^2 & D_4^2 \end{bmatrix} \begin{bmatrix} w_1 & 0 & 0 \\ 0 & w_2 & 0 \\ 0 & 0 & w_3 \end{bmatrix} = V \cdot \mathrm{diag}(\vec{w}), +$$ + +so $K = V \cdot \mathrm{diag}(\vec{w}) \cdot P^{-1}$ and therefore + +$$\det(K) = \det(V) \cdot w_1 w_2 w_3 \cdot \det(P^{-1}).$$ + +Since $P$ is invertible, $\det(P^{-1}) \ne 0$, so $K$ is invertible iff +$\det(V) \ne 0$ **and** every $w_i \ne 0$. + +* $\det(V) = (D_3 - D_2)(D_4 - D_2)(D_4 - D_3) \ne 0$ iff $D_2, D_3, D_4$ are + pairwise distinct. *(This is the static_assert in `poseidon2_quad_params.hpp`.)* +* $w_i = (P^\top \vec{1})_i$ is the projection of $\vec{1}$ onto the + $i$-th left eigenvector of $A$. For this specific $A = \mathrm{diag}(D) + J - I$, + each eigenvector on a distinct eigenvalue has nonzero coordinate sum, so + $w_i \ne 0$ automatically. + +Therefore $K$ is invertible exactly when $D_2, D_3, D_4$ are pairwise +distinct. + +## Proof of Part (2): The Closed Form $C$ + +Once $\vec{v}_0$ is determined by part (1), every subsequent $\vec{v}_k$ is determined by +the recurrence $\vec{v}_{k+1} = A \vec{v}_k + u_k \vec{1}$. Iterating four times, + +$$\vec{v}_4 = A^4 \vec{v}_0 + (A^3 u_0 + A^2 u_1 + A u_2 + u_3)\, \vec{1},$$ + +$$s_4 = D_1 u_3 + \vec{1}^\top \vec{v}_3.$$ + +Substituting $\vec{v}_0 = K^{-1} \cdot (\text{linear in } s_1, s_2, s_3, u_0, u_1, u_2)$ +into both expressions yields + +$$\mathrm{out} = C \cdot (s_1, s_2, s_3, u_0, u_1, u_2, u_3)^\top$$ + +for a fixed $4 \times 7$ matrix $C$ over $\mathbb{F}$. The explicit +construction of $C$ is in `poseidon2_quad_params.hpp::build_tables`, and the +random equivalence test in `poseidon2_quad_closed_form.test.cpp` cross-checks +it against direct four-step iteration. + +## What This Depends On + +* **$D_2, D_3, D_4$ pairwise distinct.** Asserted at compile time in + `poseidon2_quad_params.hpp`. +* **The all-ones vector has nonzero projection onto every left eigenvector of + $A$.** Automatic for $A = \mathrm{diag}(D) + J - I$ when the $D_i$ are + distinct, but worth flagging as a side condition; it is what lets + "Vandermonde invertible" suffice without a separate runtime check. +* **$\sigma$ is deterministic.** Used implicitly when we treat $u_k$ as a + function of $(s_k, c_k)$. No other property of $\sigma$ matters — in + particular the soundness argument is independent of the algebraic degree of + the S-box. + +That is the entire mathematical content of the soundness proof. + +## Generalization: What Other Matrices Would Work? + +The proof above used very little structure of $A$. Stripped to its essentials, +the construction works for any $3 \times 3$ matrix that satisfies a single +*observability* condition. Stated for general dimension $n$ in place of $3$: + +### The Abstract Setup + +Replace $\mathbb{F}^3$ by $\mathbb{F}^n$, and choose: + +* an $n \times n$ matrix $A$ over $\mathbb{F}$ (the hidden-lane update), +* a column vector $\vec{b} \in \mathbb{F}^n$ (the *input direction*, + controlling how the driver $u$ enters the hidden lanes: $\vec{v}_{k+1} = A \vec{v}_k + u_k \vec{b}$), +* a row vector $\vec{c}^\top \in \mathbb{F}^n$ (the *output direction*, + controlling how the hidden lanes feed back to the observed lane: + $s_{k+1} = D_1 u_k + \vec{c}^\top \vec{v}_k$). + +In Poseidon2's case, $n = 3$ and $\vec{b} = \vec{c} = \vec{1}$, but +nothing in the proof actually requires $\vec{b} = \vec{c}$, nor that +either equals the all-ones vector. + +### The Key Condition + +Running the system $n$ steps, the linear part of the map +$\vec{v}_0 \mapsto (s_1, \ldots, s_n)$ is the **observability matrix** + +$$ +K = \begin{bmatrix} \vec{c}^\top \\ \vec{c}^\top A \\ \vdots \\ \vec{c}^\top A^{n-1} \end{bmatrix}. +$$ + +The construction is sound iff $K$ is invertible — equivalently, iff the pair +$(A, \vec{c}^\top)$ is *observable*. Note that $\vec{b}$ does not appear in +the condition: the input direction affects the driver-only piece but not +recoverability of $\vec{v}_0$. + +When $A$ is diagonalizable with distinct eigenvalues $\lambda_1, \ldots, \lambda_n$, +the same factorization as in the Poseidon2 proof shows $K$ is invertible iff: + +1. The $\lambda_i$ are pairwise distinct (Vandermonde nondegeneracy), **and** +2. $\vec{c}$ has nonzero projection onto every left eigenvector of $A$. + +So observability of $(A, \vec{c}^\top)$ — a single condition — splits into +"distinct eigenvalues" + "output not orthogonal to any eigenvector" in the +diagonalizable case. Poseidon2 satisfies both: condition (1) by the static +assertion that $D_2, D_3, D_4$ are distinct, condition (2) automatically +because eigenvectors of $\mathrm{diag}(D) + J - I$ on distinct eigenvalues +have nonzero coordinate sum. + +### Quick Examples + +**Works:** any pure-diagonal $A = \mathrm{diag}(\lambda_1, \ldots, \lambda_n)$ +with distinct $\lambda_i$ and $\vec{c}$ entrywise nonzero; companion matrices +of separable polynomials with $\vec{c} = \vec{e}_1$; generic circulants over a +field containing the relevant roots of unity. + +**Fails:** $A = \lambda I$ (every vector is an eigenvector, $K$ has rank 1); +any $A$ with a repeated eigenvalue when $\vec{c}^\top$ annihilates the +difference of two eigenvectors on that eigenvalue (then $\vec{v}_0$ and a +shift by that difference produce identical lane-0 chains — a free witness +channel for the prover). + +### Takeaway + +The Poseidon2 quad-row layout is one instance of a general phenomenon: any +observable single-input single-output linear system of dimension $n$ admits +an $n$-step compression where only the observed lane is committed and the +hidden state is recovered by inverting the observability matrix. The +Vandermonde appears as a clean sufficient condition when $A$ is diagonalizable; +the underlying requirement is just observability. diff --git a/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/README.md b/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/README.md index 1c32fa69269c..969f651c6f8a 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/README.md +++ b/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/README.md @@ -1,5 +1,4 @@ - -# stdlib Poseidon2 Hash Implementation +# Stdlib Poseidon2 Hash Implementation Poseidon2 is a **SNARK-friendly cryptographic hash** designed to be efficient inside prime-field arithmetic circuits. It follows the [Poseidon2 paper](https://eprint.iacr.org/2023/323.pdf) and refines the original Poseidon hash. @@ -7,151 +6,579 @@ It follows the [Poseidon2 paper](https://eprint.iacr.org/2023/323.pdf) and refin This implementation includes: - A **sponge construction** over the BN254 scalar field following the (draft) C2SP Poseidon Sponge spec based on the [Duplex Sponge model](https://keccak.team/files/SpongeDuplex.pdf). -- The **Poseidon2 permutation**, i.e.\ the round function used by the sponge. +- The **Poseidon2 permutation**, i.e. the round function used by the sponge. - **Circuit custom gate relations** that enforce the permutation’s correctness. +## Contents + +- [The Sponge Construction](#the-sponge-construction) +- [The Poseidon2 Permutation](#the-poseidon2-permutation) +- [Trace Layouts](#trace-layouts) +- [Initial External Linear Layer](#initial-external-linear-layer) +- [External Round Subrelations](#external-round-subrelations) +- [Mega Internal Compression](#mega-internal-compression) +- [Compressed Block Subrelations](#compressed-block-subrelations) +- [Soundness Argument](#soundness-argument) +- [Witness Materialization](#witness-materialization) +- [Selectors and File Map](#selectors-and-file-map) ## The Sponge Construction -The sponge absorbs input elements into an internal state, applies permutations, and squeezes output elements. +The sponge absorbs input elements into an internal state, applies permutations, and squeezes +output elements. -#### Sponge constants. - - **State size (t)**: 4 field elements - - **Rate (r)**: 3 elements - - **Capacity (c)**: 1 element +| Parameter | Value | +|-----------|-------| +| State size | $t = 4$ field elements | +| Rate | $r = 3$ field elements | +| Capacity | $c = 1$ field element | +| Domain separator | $\mathrm{IV} = \texttt{input\_length} \ll 64$ | +Let the input be: -### Details +$$ +\mathbf{a} = (a_0, a_1, \ldots, a_{N-1}) +$$ -Let the input be -\f[ -\mathbf{a} = (a_0, a_1, \dots, a_{N-1}). -\f] -Partition it into blocks of size \f$r=3\f$: -\f[ -B_j = (a_{3j},, a_{3j+1},, a_{3j+2}) \quad\text{(pad missing entries with 0)},\qquad -m = \left\lceil \frac{N}{3}\right\rceil . -\f] +Partition it into rate-sized blocks: -### Padding -In Poseidon paper, the padding scheme for variable input length hashing suggests padding with \f$ 10^\ast\f$. +$$ +B_j = (a_{3j}, a_{3j+1}, a_{3j+2}), \qquad +m = \left\lceil \frac{N}{3}\right\rceil +$$ -"Domain Separation for Poseidon" section (see 4.2 in [Poseidon](https://eprint.iacr.org/2019/458.pdf)) suggests using domain separation IV defined as follows -\f[ - \mathrm{IV} = (\texttt{input_length}^{64}) -\f] -Initialize the state: -\f[ - \mathbf{s}^{(0)} = (0,0,0,\mathrm{IV}). -\f] +Missing entries in the final block are padded with $0$. This is safe for the variable-length +sponge because the input length is part of the domain separator. The initial state is: -Since we only use Poseidon2 sponge with variable length inputs and the length is a part of domain separation, we can pad the inputs with \f$ 0^\ast \f$, which would not lead to collisions (tested \ref StdlibPoseidon2< Builder >::test_padding_collisions "here"). +$$ +\mathbf{s}^{(0)} = (0, 0, 0, \mathrm{IV}) +$$ -Note that we initialize \f$ \mathrm{IV} \f$ as a fixed witness. It ensures that the first invocation of the Poseidon2 permutation leads to a state where all entries are **normalized** witnesses, i.e. they have `multiplicative_constant` equal 1, and `additive_constant` equal 0. +For each block $j = 0, \ldots, m - 1$: -#### Absorb phase +$$ +\mathbf{s}^{(j+1)} = P\left(\mathbf{s}^{(j)} + (B_j, 0)\right) +$$ -For each block \f$j=0,\dots,m-1\f$, -\f[ -\mathbf{s}^{(j+1)} = P\left(\mathbf{s}^{(j)} + (B_j,0)\right), -\f] -where \f$P\f$ is the Poseidon2 permutation and \f$(B_j,0)\f$ is an array of size \f$ 4 \f$ with \f$r\f$ state elements and a \f$0\f$ capacity limb. +where $P$ is the Poseidon2 permutation. The single-output squeeze is: -#### Squeeze (single output) +$$ +y_0 = \left(P(\mathbf{s}^{(m)})\right)_0 +$$ -After absorption, produce one output field element via one duplex step: -\f[ -y_0 = \big(P(\mathbf{s}^{(m)})\big)_0. -\f] +The IV is created as a fixed witness so the first permutation starts from normalized stdlib +field values. ## The Poseidon2 Permutation -Each permutation consists of: - -1. **Initial linear layer**: multiply state by external matrix \f$M_E\f$. Corresponds to \ref bb::stdlib::Poseidon2Permutation< Builder >::matrix_multiplication_external "matrix_multiplication_external" method. -2. **4 External rounds (full S-box)**: - - Record the state and the correspoding round constants \f$ c_{0}^{(i)} \f$ into a \ref bb::UltraCircuitBuilder_< FF >::create_poseidon2_external_gate "Poseidon2 External Gate". - - _Natively_ compute the next state. - - Re-write the state with the new witnesses. - - After the final round, \ref bb::stdlib::Poseidon2Permutation< Builder >::record_current_state_into_next_row "record the computed state" in the next row of the Poseidon2 **external** gates block, - as it is required for the custom gate relation. -3. **56 Internal rounds (partial S-box)**: - - Record the state and the correspoding round constants \f$ c_{0}^{(i)} \f$ into a \ref bb::UltraCircuitBuilder_< FF >::create_poseidon2_internal_gate "Poseidon2 Internal Gate". - - _Natively_ compute the next state. - - Re-write the state with the new witnesses. - - After the final round, \ref bb::stdlib::Poseidon2Permutation< Builder >::record_current_state_into_next_row "record the computed state" in the next row of the Poseidon2 **internal** gates block, -4. **Final external rounds** (same as step 2). - -Note that in general, step 1 requires 6 arithmetic gates, the steps 2-4 create total number of rounds + 3 gates. Hence a single invocation of Poseidon2 Permutation results in 73 gates. - -### External Matrix -As proposed in Section 5.1 of [Poseidon2 paper](https://eprint.iacr.org/2023/323.pdf), we set -\f[ +The mathematical permutation is identical in Ultra and Mega. The difference is only how the +permutation is encoded in the trace. + +```text +input state + | + v +initial external linear layer M_E + | + v +4 external rounds : full S-box on all 4 state entries, then M_E + | + v +56 internal rounds : S-box only on state[0], then M_I + | + v +4 external rounds : full S-box on all 4 state entries, then M_E + | + v +output state +``` + +External matrix: + +$$ M_E = - \begin{bmatrix} - 5 & 7 & 1 & 3 \\ - 4 & 6 & 1 & 1 \\ - 1 & 3 & 5 & 7 \\ - 1 & 1 & 4 & 6 - \end{bmatrix} -\f] +\begin{bmatrix} +5 & 7 & 1 & 3 \\ +4 & 6 & 1 & 1 \\ +1 & 3 & 5 & 7 \\ +1 & 1 & 4 & 6 +\end{bmatrix} +$$ +Internal matrix, written with the actual diagonal entries $D_i$: -### Internal Matrix - -\f[ +$$ M_I = - \begin{bmatrix} - D_1 & 1 & 1 & 1 \\ - 1 & D_2 & 1 & 1 \\ - 1 & 1 & D_3 & 1 \\ - 1 & 1 & 1 & D_4 - \end{bmatrix} -\f] - -**Implementation note:** The code stores `internal_matrix_diagonal_minus_one[i] = D_i - 1` (not the actual diagonal values \f$D_i\f$). -This is because the algorithm computes \f$v_i = (D_i - 1) \cdot u_i + \text{sum}\f$ where \f$\text{sum} = u_1 + u_2 + u_3 + u_4\f$, -which equals \f$D_i \cdot u_i + (\text{sum of other elements})\f$. - -### Constants - -The constants are generated using the sage [script authored by Markus Schofnegger](https://github.com/HorizenLabs/poseidon2/blob/main/poseidon2_rust_params.sage) from Horizen Labs. - -### Security Level -Based on Section 3.2 of [Poseidon2 paper](https://eprint.iacr.org/2023/323.pdf). - -Given \f$ R_P = 56 \f$, \f$ R_F = 8\f$, \f$ d = 5\f$, \f$ \log_2(p) \approx 254 \f$, we get \f$ 128 \f$ bits of security. - -## Custom Gate Relations - -For an external round with state \f$ \mathbf{u}=(u_1,u_2,u_3,u_4) \f$, define \f$ \mathbf{v}=M_E\cdot\mathbf{u}\f$. -\ref bb::Poseidon2ExternalRelationImpl< FF_ > "Poseidon2 External Relation" enforces that the permuted values equal the values in the next row (accessed via shifts): -\f[ -v_k = w_{k,\mathrm{shift}} \qquad \text{for } k \in \{1,2,3,4\}. -\f] - -We encode four independent constraints under a selector \f$ q_{\mathrm{poseidon2_external}}\f$ and aggregate them with -independent challenges \f$ \alpha_i = \alpha_{i, Poseidon2_ext}\f$ from `SubrelationSeparators`: -\f[ -q_{\mathrm{poseidon2_external}}\cdot -\Big( -\alpha_0\big(v_1 - w_{1,\mathrm{shift}}\big) + -\alpha_1\big(v_2 - w_{2,\mathrm{shift}}\big) + -\alpha_2\big(v_3 - w_{3,\mathrm{shift}}\big) + -\alpha_3\big(v_4 - w_{4,\mathrm{shift}}\big) -\Big) = 0. -\f] -To ensure that the relation holds point-wise on the hypercube, the equation above is also multiplied by the appropriate -scaling factor arising from \ref bb::GateSeparatorPolynomial< FF > "GateSeparatorPolynomial". - -\ref bb::Poseidon2InternalRelationImpl< FF_ > "Internal rounds" follow the same pattern, using \f$ M_I \f$ and the partial S-box on the first element. - - -## Number of Gates - -Hashing a single field element costs \f$ 73 \f$ gates. As above, let \f$ N > 1\f$ be the input size. Define \f$ m = \lceil N/3 \rceil \f$ and let \f$ N_3 = N\pmod{3} \f$. The number of gates depends on the number of padded fields equal to \f$ N_3 \f$. If \f$ N_3 = 0\f$, we get -\f[ 1 + 73\cdot m + 3\cdot (m - 1) \f] -gates, otherwise we get -\f[ 1 + 73\cdot m + 3\cdot (m - 2) + N_3.\f] - -According to TACEO blog post [Poseidon{2} for Noir](https://core.taceo.io/articles/poseidon2-for-noir/), a single permutation cost for \f$ t = 4 \f$ implemented without Poseidon2 custom gates is \f$ 2313 \f$ gates. +\begin{bmatrix} +D_1 & 1 & 1 & 1 \\ +1 & D_2 & 1 & 1 \\ +1 & 1 & D_3 & 1 \\ +1 & 1 & 1 & D_4 +\end{bmatrix} +$$ + +The parameter table stores `internal_matrix_diagonal_minus_one[i] = D_i - 1`, not $D_i$ +itself. This lets the implementation compute the internal matrix product as +`(D_i - 1) * x_i + sum(x)`, which is equal to $D_i x_i + \sum_{j \ne i} x_j$. + +The constants are generated from the Sage script authored by Markus Schofnegger in the Horizen +Labs Poseidon2 parameter tooling. With `R_P = 56`, `R_F = 8`, `d = 5`, and a 254-bit scalar +field, the parameter set targets 128-bit security. + +## Trace Layouts + +Ultra uses the direct layout: one row per internal round, and six arithmetic rows for the +initial external linear layer. + +```text +Ultra permutation rows + +6 arithmetic rows initial M_E +4 poseidon2_external rows first external rounds +1 poseidon2_external propagate +56 poseidon2_internal rows one partial round each +1 poseidon2_internal propagate +4 poseidon2_external rows final external rounds +1 poseidon2_external propagate +-- +73 rows +``` + +Mega keeps the same permutation but uses custom rows for the initial external linear layer and +compresses all 56 internal rounds into K=4 rows. + +```text +Mega permutation rows + +1 poseidon2_external q_poseidon2_external_initial +4 + 1 poseidon2_external first external rounds + propagate +1 poseidon2_quad_internal q_poseidon2_transition_entry +13 poseidon2_quad_internal q_poseidon2_quad_internal +1 poseidon2_quad_internal q_poseidon2_quad_internal_terminal +1 poseidon2_quad_internal selector-unconstrained standard bridge +4 + 1 poseidon2_external final external rounds + propagate +-- +27 rows +``` + +The stdlib hash also has one fixed-witness IV row outside the permutation when it starts from +the sponge IV. + +## Initial External Linear Layer + +The initial external linear layer has no S-boxes. Mega constrains it in one row under +`q_poseidon2_external_initial`, while Ultra emits arithmetic rows for the same matrix product. +Given: + +$$ +\mathbf{x} = +\begin{bmatrix} +w_l \\ +w_r \\ +w_o \\ +w_4 +\end{bmatrix}, +\qquad +\mathbf{y} = +M_E\mathbf{x}, +$$ + +the four subrelations constrain the shifted row: + +$$ +\begin{aligned} +A_0 &: y_0 - w_l' = 0, \\ +A_1 &: y_1 - w_r' = 0, \\ +A_2 &: y_2 - w_o' = 0, \\ +A_3 &: y_3 - w_4' = 0. +\end{aligned} +$$ + +## External Round Subrelations + +An external round starts from a standard-encoded row: + +$$ +(w_l, w_r, w_o, w_4) +$$ + +with round constants in `(q_l, q_r, q_o, q_4)`. The relation computes: + +$$ +\begin{aligned} +u_1 &= (w_l + q_l)^5, \\ +u_2 &= (w_r + q_r)^5, \\ +u_3 &= (w_o + q_o)^5, \\ +u_4 &= (w_4 + q_4)^5, +\end{aligned} +$$ + +then applies the external matrix: + +$$ +\begin{bmatrix} v_1 \\ v_2 \\ v_3 \\ v_4 \end{bmatrix} = M_E \begin{bmatrix} u_1 \\ u_2 \\ u_3 \\ u_4 \end{bmatrix}. +$$ + +The four external subrelations constrain the result against the shifted row: + +$$ +\begin{aligned} +A_0 &: v_1 - w_l' = 0, \\ +A_1 &: v_2 - w_r' = 0, \\ +A_2 &: v_3 - w_o' = 0, \\ +A_3 &: v_4 - w_4' = 0. +\end{aligned} +$$ + +## Mega Internal Compression + +Mega uses a K=4 layout: each compressed row commits four consecutive `state[0]` values instead of the full state at every internal round. This is sound because only `state[0]` passes through the internal-round S-box. Once the four S-box outputs are fixed, the update of `state[1..3]` is linear and can be checked through an invertible 3 by 3 linear encoding. + +For a self-contained linear-algebra statement of the underlying soundness theorem — abstracted away from Poseidon2-specific notation, with a proof and a discussion of which other matrices the same construction would work for — see [QUAD_THEOREM.md](QUAD_THEOREM.md). + +For a quad row that starts at internal round `4i`: + +| Wire | Meaning | +|------|---------| +| `w_l` | `state[0]` at round `4i` | +| `w_r` | `state[0]` at round `4i + 1` | +| `w_o` | `state[0]` at round `4i + 2` | +| `w_4` | `state[0]` at round `4i + 3` | + +The row selectors carry the current quad constants and, for interior rows, the next quad's first +three constants: + +| Selector | Value | +|----------|-------| +| `q_l` | `c_{4i}` | +| `q_r` | `c_{4i+1}` | +| `q_o` | `c_{4i+2}` | +| `q_4` | `c_{4i+3}` | +| `q_m` | `c_{4(i+1)}` | +| `q_c` | `c_{4(i+1)+1}` | +| `q_5` | `c_{4(i+1)+2}` | + +The compression picture is: + +```text +standard state before internal rounds + (s0, s1, s2, s3) + | + | q_poseidon2_transition_entry + v +first quad row + (s0^0, s0^1, s0^2, s0^3) + | + | 13 q_poseidon2_quad_internal rows + v +terminal quad row + (s0^52, s0^53, s0^54, s0^55) + | + | q_poseidon2_quad_internal_terminal + v +standard bridge row + (s0^56, s1^56, s2^56, s3^56) + | + v +final external rounds +``` + +## Compressed Block Subrelations + +Every subrelation in the compressed block enforces the Poseidon2 internal-round recurrence in the encoding appropriate for its boundary: + +| Boundary | What's known | What the subrelations enforce | +|---|---|---| +| **Entry** | full standard state at row-start | first three `state[0]` values of the first compressed row | +| **Interior** | `state[0]` chain on this row and the next | four-round output, with the next row's `state[1..3]` checked through the same linear encoding | +| **Terminal** | `state[0]` chain on this row, full standard state on the next bridge row | four-round output matched directly against the bridge row | + +The interior and terminal boundaries share a four-round closed form that we cover first. + +### Closed Form for Four Rounds + +Write the committed quad-row wires as: + +$$ +(w_l, w_r, w_o, w_4) = (s_0^{(0)}, s_0^{(1)}, s_0^{(2)}, s_0^{(3)}) +$$ + +and define the four S-box outputs: + +$$ +u_k = (s_0^{(k)} + c_{4i+k})^5, \qquad k \in \{0, 1, 2, 3\}. +$$ + +The row does not store `state[1..3]`. Instead, the claimed successor values +$w_r = s_0^{(1)}$, $w_o = s_0^{(2)}$, and $w_4 = s_0^{(3)}$ determine three linear +combinations of the hidden start-of-row values +$(s_1^{(0)}, s_2^{(0)}, s_3^{(0)})$. For this reconstruction, use Vandermonde nodes +$\lambda_1 = D_2$, $\lambda_2 = D_3$, and $\lambda_3 = D_4$: + +$$ +\begin{bmatrix} 1 & 1 & 1 \\ \lambda_1 & \lambda_2 & \lambda_3 \\ \lambda_1^2 & \lambda_2^2 & \lambda_3^2 \end{bmatrix} \begin{bmatrix} s_1^{(0)} \\ s_2^{(0)} \\ s_3^{(0)} \end{bmatrix} = \begin{bmatrix} b_1 \\ b_2 \\ b_3 \end{bmatrix}. +$$ + +Solving the internal-round recurrence gives the right-hand sides: + +$$ +\begin{aligned} +b_1 &= w_r - D_1 u_0, \\ +b_2 &= w_o - 2w_r + (2D_1 - 3)u_0 - D_1u_1, \\ +b_3 &= w_4 - w_o - (\Sigma + 2)w_r \\ + &\quad + ((\Sigma + 2)D_1 - \Sigma - 3)u_0 + + (D_1 - 3)u_1 - D_1u_2, \\ +\Sigma &= D_2 + D_3 + D_4. +\end{aligned} +$$ + +The Vandermonde determinant is: + +$$ +(\lambda_2 - \lambda_1)(\lambda_3 - \lambda_1)(\lambda_3 - \lambda_2). +$$ + +`poseidon2_quad_params.hpp` has `static_assert`s that the three nodes are pairwise distinct, +so the hidden start-of-row `state[1..3]` values are uniquely determined by the committed +`state[0]` chain. + +After this reconstruction, iterating four internal rounds expresses the row-end state as a +fixed linear combination of $(w_r, w_o, w_4, u_0, u_1, u_2, u_3)$: + +$$ +\operatorname{out} = +C \cdot +\begin{bmatrix} +w_r \\ +w_o \\ +w_4 \\ +u_0 \\ +u_1 \\ +u_2 \\ +u_3 +\end{bmatrix}, +\qquad +\operatorname{out} = +(\operatorname{out}_0, \operatorname{out}_1, \operatorname{out}_2, \operatorname{out}_3) += (s_0^{(4)}, s_1^{(4)}, s_2^{(4)}, s_3^{(4)}). +$$ + +The coefficients of $C$ are precomputed in `poseidon2_quad_params.hpp` and unit-tested against +explicit four-step iteration in `poseidon2_quad_closed_form.test.cpp`. + +`w_l` does not appear in the input vector because it enters only through $u_0 = (w_l + c_{4i})^5$. + +Thus `out` is the predicted Poseidon2 state after the four internal rounds represented by this +quad row. The boundary subrelations check this predicted state against the successor row. For a +terminal row, the successor exposes all four output state entries directly. For an interior row, +the successor again exposes only its `state[0]` chain, so the relation compares `out_0` +directly and compares `out_1..out_3` through the same Vandermonde encoding. + +### Entry: Standard to First Quad Row + +The entry row holds $(s_0^{(0)}, s_1^{(0)}, s_2^{(0)}, s_3^{(0)})$ in standard encoding; the +first compressed row encodes $s_0$ at rounds $0, 1, 2, 3$ as $(w_l', w_r', w_o', w_4')$. The +entry row's `w_l` and the first compressed row's `w_l'` share a witness index, so the +permutation argument enforces that both occurrences carry $s_0^{(0)}$. + +The three subrelations enforce the `state[0]` recurrence at $k = 0, 1, 2$. Because +`state[1..3]` are committed on the standard entry row, this boundary does not need a +Vandermonde reconstruction. With $u_0 = (s_0^{(0)} + c_0)^5$, +$u_1 = (w_r' + c_1)^5$, $u_2 = (w_o' + c_2)^5$, and $\Sigma = D_2 + D_3 + D_4$: + +$$ +\begin{aligned} +\operatorname{entry}_1 &= D_1 u_0 + s_1^{(0)} + s_2^{(0)} + s_3^{(0)}, \\ +\operatorname{entry}_2 &= D_1 u_1 + 3 u_0 + + (D_2 + 2) s_1^{(0)} + (D_3 + 2) s_2^{(0)} + (D_4 + 2) s_3^{(0)}, \\ +\operatorname{entry}_3 &= D_1 u_2 + 3 u_1 + (\Sigma + 6) u_0 \\ + &\quad + (D_2^2 + D_2 + \Sigma + 4) s_1^{(0)} + + (D_3^2 + D_3 + \Sigma + 4) s_2^{(0)} + + (D_4^2 + D_4 + \Sigma + 4) s_3^{(0)}. +\end{aligned} +$$ + +The entry subrelations are: + +$$ +\begin{aligned} +A_0 &: \operatorname{entry}_1 - w_r' = 0, \\ +A_1 &: \operatorname{entry}_2 - w_o' = 0, \\ +A_2 &: \operatorname{entry}_3 - w_4' = 0. +\end{aligned} +$$ + +Each later S-box ($u_1, u_2$) consumes an already-committed compressed-row wire instead of +inlining the previous round's S-box, keeping per-variable degree at 5. + +This boundary has no hidden degrees of freedom: after the shared witness index fixes $w_l'$, +the three equations above form a triangular system in the remaining first-compressed-row +variables $(w_r', w_o', w_4')$. `A_0` fixes $w_r'$. Then `A_1` uses that fixed $w_r'$ in +$u_1 = (w_r' + c_1)^5$ and fixes $w_o'$. Then `A_2` uses that fixed $w_o'$ in +$u_2 = (w_o' + c_2)^5$ and fixes $w_4'$. Each subrelation has coefficient $-1$ on the next +wire it solves for, so the first compressed row is uniquely determined by the standard entry +state and the fixed round constants. + +### Terminal: Final Quad Row to Bridge Row + +The terminal row's successor is the standard bridge row carrying $(s_0^{(4)}, s_1^{(4)}, +s_2^{(4)}, s_3^{(4)})$. The four subrelations match the closed-form output directly: + +$$ +\begin{aligned} +A_0 &: \operatorname{out}_0 - w_l' = 0, \\ +A_1 &: \operatorname{out}_1 - w_r' = 0, \\ +A_2 &: \operatorname{out}_2 - w_o' = 0, \\ +A_3 &: \operatorname{out}_3 - w_4' = 0. +\end{aligned} +$$ + +The bridge row's wire indices are reused by the first final-external-round gate, so the same +four witnesses feed the next standard-encoded block. + +This boundary has no hidden degrees of freedom: the successor is a full standard-encoded row, +and each equation has coefficient $-1$ on a distinct shifted bridge wire. Once the current +terminal quad row determines `out`, the four bridge wires $(w_l', w_r', w_o', w_4')$ are +uniquely determined. + +### Interior: Quad Row to Quad Row + +The interior row's successor is another compressed row that commits only $s_0$ at four rounds. +The next row's `state[1..3]` values are not committed. Instead, the relation compares their +Vandermonde encoding against the encoding reconstructed from the next row's `state[0]` chain. + +Note that the predicted outputs $(\operatorname{out}_0, \ldots, \operatorname{out}_3)$ are +themselves **not committed** as wires — they are symbolic linear combinations of the current +row's committed wires and S-box outputs, expanded inline by the relation. Only $w_l'$ on the +next row is a fresh witness; the equalities below relate that wire (and the next row's other +committed lane-0 wires, through the Vandermonde encoding) to a polynomial in the current row's +wires. + +This enforces: + +- **$A_0$:** $\operatorname{out}_0 = w_l'$ — first $s_0$ value of the next row matches the + predicted $s_0^{(4)}$. +- **$A_1, A_2, A_3$:** the three Vandermonde combinations of + $(\operatorname{out}_1, \operatorname{out}_2, \operatorname{out}_3)$ match the next row's + reconstructed encoding. + +Concretely, with $b_1'$, $b_2'$, and $b_3'$ reconstructed from the next row's `state[0]` chain +using the same formulas as above: + +$$ +\begin{aligned} +A_0 &: \operatorname{out}_0 - w_l' = 0, \\ +A_1 &: \operatorname{out}_1 + \operatorname{out}_2 + \operatorname{out}_3 - b_1' = 0, \\ +A_2 &: \lambda_1 \operatorname{out}_1 + \lambda_2 \operatorname{out}_2 + + \lambda_3 \operatorname{out}_3 - b_2' = 0, \\ +A_3 &: \lambda_1^2 \operatorname{out}_1 + \lambda_2^2 \operatorname{out}_2 + + \lambda_3^2 \operatorname{out}_3 - b_3' = 0. +\end{aligned} +$$ + +Each subrelation has per-variable degree 5 before multiplying by selector and gate separator, +giving partial length 7. + +This quad-row-to-quad-row transition has no hidden degrees of freedom either. `A_0` fixes the next row's first +`state[0]` value. The remaining three equations say that the Vandermonde encoding of +$(\operatorname{out}_1, \operatorname{out}_2, \operatorname{out}_3)$ equals +$(b_1', b_2', b_3')$, the encoding reconstructed from the next row's claimed `state[0]` chain. +Because the Vandermonde matrix is invertible, equality of these three encoded values is +equivalent to equality of the underlying `state[1..3]` values. + +## Soundness Argument + +The proof obligation is: every accepting Mega trace describes the same 56 internal rounds as the +direct standard encoding. + +```text +external output + | + | shared witness indices + entry transition + v +quad row 0 + | + | quad-row-to-quad-row transition + v +quad row 1 + | + | repeated for rows 1..12 + v +quad row 13 + | + | terminal transition + shared witness indices + v +final external input +``` + +For each interior quad-row-to-quad-row transition: + +Steps 1 and 2 are the row-local content of [QUAD_THEOREM.md](QUAD_THEOREM.md); this section +composes that theorem with the entry, interior, terminal, and shared-witness boundary checks. + +1. The row's four `state[0]` values uniquely determine the hidden starting `state[1..3]` values + by the Vandermonde reconstruction. +2. The fixed linear map `C` computes the unique four-round output `out`. +3. `A_0` fixes the successor row's first `state[0]` value to `out_0`. +4. `A_1..A_3` force the successor row's reconstructed `state[1..3]` values to equal + `(out_1, out_2, out_3)`. + +The non-interior transitions close the chain: + +| Transition | Why the prover has no freedom | +|----------|-------------------------------| +| External output -> entry row | Standard state wires share witness indices with the external propagate row. | +| Entry row -> first quad row | The entry transition is triangular in `(w_r', w_o', w_4')`, after shared witness indices fix `w_l'`. | +| Final quad row -> bridge row | The terminal transition directly fixes all four shifted bridge wires. | +| Bridge row -> final external rows | Bridge witnesses share witness indices with the final external rows. | + +Thus the compressed block has no independent witness channel: each committed `state[0]` value +is fixed by the previous state, and each uncommitted `state[1..3]` value is fixed implicitly by +an invertible encoding. The terminal bridge then materializes the unique final full state. + +## Witness Materialization + +Interior quad rows materialize only the next `state[0]` witness. The relation reconstructs +`state[1..3]` algebraically when checking the row transition, so those three witnesses are not +created on non-terminal quad rows. + +The terminal row materializes the full four-entry state because the following final external +rounds use the standard encoding. + +```text +non-terminal quad output: create witness for next state[0] only +terminal quad output: create witnesses for state[0], state[1], state[2], state[3] +``` + +This saves 39 witness variables per permutation: 13 non-terminal quad rows times 3 omitted +state entries. + +## Selectors and File Map + +Mega removes `q_poseidon2_internal` and adds the following Poseidon2-specific selectors: + +| Selector | Purpose | +|----------|---------| +| `q_poseidon2_external_initial` | Initial external linear layer | +| `q_poseidon2_transition_entry` | Standard-to-quad boundary | +| `q_poseidon2_quad_internal` | Interior K=4 rows | +| `q_poseidon2_quad_internal_terminal` | Quad-to-standard terminal boundary | +| `q_5` | Non-gate selector for the next quad's third round constant | + +`q_m`, `q_c`, and `q_5` duplicate the next quad row's first three round constants on the current +row. They are carried explicitly because Mega relations currently have shifted wire values but do +not have shifted selector values such as `q_l_shift`, `q_r_shift`, or `q_o_shift`. + +Implementation entry points: + +| File | Purpose | +|------|---------| +| `poseidon2_permutation.cpp` | stdlib permutation trace emission | +| `relations/poseidon2_initial_external_relation.hpp` | Mega initial linear layer relation | +| `relations/poseidon2_external_relation.hpp` | External round relation | +| `relations/poseidon2_transition_entry_relation.hpp` | Entry boundary relation | +| `relations/poseidon2_quad_internal_relation.hpp` | Interior quad relation | +| `relations/poseidon2_quad_internal_terminal_relation.hpp` | Terminal boundary relation | +| `crypto/poseidon2/poseidon2_quad_params.hpp` | Vandermonde constants and static checks | +| `honk/execution_trace/mega_execution_trace.hpp` | Mega trace blocks and selector partitioning | +| `flavor/mega_flavor.hpp` | Mega relation and selector set | diff --git a/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/poseidon2.quad_internal_soundness.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/poseidon2.quad_internal_soundness.test.cpp new file mode 100644 index 000000000000..c6d370fa6155 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/poseidon2.quad_internal_soundness.test.cpp @@ -0,0 +1,186 @@ +// Regression tests for the Mega Poseidon2 quad-internal layout and boundary soundness. +// +// The Mega Poseidon2 permutation uses a compressed internal block with an entry transition +// row (standard -> compressed) and a terminal row (compressed -> standard). The transition +// rows are tied to the surrounding standard-encoded states via copy constraints and shifted +// wires: +// +// - Entry (q_poseidon2_transition_entry): +// w_r_shift - D_1 (w_l + q_l)^5 - w_r - w_o - w_4 = 0 +// ties the first compressed row's `w_r` (= v_0 = state[0] one round ahead) to the +// standard `s_1` at round `rounds_f_begin`. +// +// - Terminal (q_poseidon2_quad_internal_terminal): +// out_k - w_{k,shift} = 0 for k in {0, 1, 2, 3} +// ties the compressed chain's computed state at round `p_end` to the selector-unconstrained +// standard bridge row consumed by the final external rounds via shared witness indices. +// +// CircuitChecker iterates row-major-then-relation-major and short-circuits on the first +// failing relation. This means a corruption that would in principle break multiple relations +// is reported as breaking the first one the checker reaches; the tests below note the +// expected first-detector where it matters. + +#include "barretenberg/circuit_checker/circuit_checker.hpp" +#include "barretenberg/flavor/mega_flavor.hpp" +#include "barretenberg/op_queue/ecc_op_queue.hpp" +#include "barretenberg/stdlib/hash/poseidon2/poseidon2_permutation.hpp" +#include "barretenberg/stdlib/primitives/field/field.hpp" +#include "barretenberg/stdlib_circuit_builders/mega_circuit_builder.hpp" + +#include + +using namespace bb; + +namespace { + +class Poseidon2QuadInternalSoundnessTests : public ::testing::Test { + public: + using Builder = MegaCircuitBuilder; + using FF = MegaFlavor::FF; + // Quad-internal block layout produced by stdlib::Poseidon2Permutation on Mega: + // row 0 : entry transition (standard encoding) + // rows 1 .. 13 : interior compressed rows + // row 14 : terminal compressed row + // row 15 : standard transition row (selector-unconstrained, copy-constrained to final external rounds) + static constexpr size_t quad_entry_row = 0; + static constexpr size_t quad_first_interior_row = 1; + + // Build an honest Poseidon2 circuit: hashes a single fixed field element through the + // `Poseidon2Permutation::permutation` call used by the stdlib. + static std::unique_ptr build_honest_permutation(const FF& input_value) + { + auto builder = std::make_unique(std::make_shared(), /*is_write_vk_mode=*/true); + using State = stdlib::Poseidon2Permutation::State; + State input{ + stdlib::field_t(stdlib::witness_t(builder.get(), input_value)), + stdlib::field_t(stdlib::witness_t(builder.get(), FF::zero())), + stdlib::field_t(stdlib::witness_t(builder.get(), FF::zero())), + stdlib::field_t(stdlib::witness_t(builder.get(), FF::zero())), + }; + (void)stdlib::Poseidon2Permutation::permutation(builder.get(), input); + return builder; + } +}; + +TEST_F(Poseidon2QuadInternalSoundnessTests, DoesNotMaterializeUnusedNonTerminalStateLimbs) +{ + auto builder = std::make_unique(std::make_shared(), /*is_write_vk_mode=*/true); + const size_t initial_num_variables = builder->get_num_variables(); + + using State = stdlib::Poseidon2Permutation::State; + State input{ + stdlib::field_t(stdlib::witness_t(builder.get(), FF(uint256_t(0xdeadbeefULL)))), + stdlib::field_t(stdlib::witness_t(builder.get(), FF::zero())), + stdlib::field_t(stdlib::witness_t(builder.get(), FF::zero())), + stdlib::field_t(stdlib::witness_t(builder.get(), FF::zero())), + }; + (void)stdlib::Poseidon2Permutation::permutation(builder.get(), input); + + // Initial input witnesses + initial-linear-layer output + first-half external-round outputs + + // compressed internal witnesses + final-half external-round outputs. + // + // The non-terminal compressed rows only need state[0] at the next row; state[1..3] are derived by + // the relation and are not materialized until the terminal row bridges back to standard encoding. + constexpr size_t input_witnesses = 4; + constexpr size_t initial_external_output_witnesses = 4; + constexpr size_t external_output_witnesses = 8 * 4; + constexpr size_t compressed_intermediate_witnesses = 14 * 3; + constexpr size_t compressed_next_state_zero_witnesses = 14; + constexpr size_t compressed_terminal_standard_limbs = 3; + constexpr size_t expected_num_variables = input_witnesses + initial_external_output_witnesses + + external_output_witnesses + compressed_intermediate_witnesses + + compressed_next_state_zero_witnesses + compressed_terminal_standard_limbs; + + EXPECT_EQ(builder->get_num_variables() - initial_num_variables, expected_num_variables); + EXPECT_TRUE(CircuitChecker::check(*builder)); +} + +// Entry boundary: tampering the first compressed row's `w_r` (= intermediate_s0) breaks the +// entry-transition relation, which enforces +// w_r_shift = D_1 (s_0 + c)^5 + s_1 + s_2 + s_3 +// on the entry row (w_r_shift lives in the first compressed row and is the tampered witness). +TEST_F(Poseidon2QuadInternalSoundnessTests, EntryBoundaryRejectsTamperedIntermediateS0) +{ + auto builder = build_honest_permutation(FF(uint256_t(0x1234ULL))); + ASSERT_TRUE(CircuitChecker::check(*builder)); + + auto& quad = builder->blocks.poseidon2_quad_internal; + // Shift the first interior compressed row's w_r (= intermediate_s0) by a nonzero delta. + const uint32_t w_r_idx = quad.w_r()[quad_first_interior_row]; + builder->set_variable(w_r_idx, builder->get_variable(w_r_idx) + FF(1)); + + EXPECT_FALSE(CircuitChecker::check(*builder)); +} + +// Entry boundary: tampering the entry row's `w_r` (= standard s_1 at round rounds_f_begin) +// breaks the entry-transition relation as well. `w_r` of the entry row is copy-constrained +// to the external block's propagate row; modifying it invalidates both the external chain +// and the entry relation. +TEST_F(Poseidon2QuadInternalSoundnessTests, EntryBoundaryRejectsTamperedStateOne) +{ + auto builder = build_honest_permutation(FF(uint256_t(0xabcdULL))); + ASSERT_TRUE(CircuitChecker::check(*builder)); + + auto& quad = builder->blocks.poseidon2_quad_internal; + const uint32_t w_r_idx = quad.w_r()[quad_entry_row]; + builder->set_variable(w_r_idx, builder->get_variable(w_r_idx) + FF(7)); + + EXPECT_FALSE(CircuitChecker::check(*builder)); +} + +// Exit boundary: the standard bridge row (last row of poseidon2_quad_internal) holds `state[1]` +// at round p_end in its `w_r`. Shifting that witness breaks the terminal relation, which enforces +// out_1 (computed by the last compressed row) == w_r_shift (the bridge row's w_r). +TEST_F(Poseidon2QuadInternalSoundnessTests, ExitBoundaryRejectsTamperedStateOne) +{ + auto builder = build_honest_permutation(FF(uint256_t(0xcafebabeULL))); + ASSERT_TRUE(CircuitChecker::check(*builder)); + + auto& quad = builder->blocks.poseidon2_quad_internal; + // Last row of the quad-internal block is the standard transition row holding + // (s_0, s_1, s_2, s_3) at round p_end in standard encoding. + const size_t quad_std_transition_row = quad.size() - 1; + const uint32_t state1_idx = quad.w_r()[quad_std_transition_row]; + builder->set_variable(state1_idx, builder->get_variable(state1_idx) + FF(1)); + + EXPECT_FALSE(CircuitChecker::check(*builder)); +} + +// Interior chain: corrupting any wire on an interior compressed row breaks the chain's +// quad-internal relation locally. +TEST_F(Poseidon2QuadInternalSoundnessTests, InteriorRelationRejectsTamperedWire) +{ + auto builder = build_honest_permutation(FF(uint256_t(0xfeedf00dULL))); + ASSERT_TRUE(CircuitChecker::check(*builder)); + + auto& quad = builder->blocks.poseidon2_quad_internal; + // Pick some middle interior row. + const size_t interior_row = quad_first_interior_row + 5; + const uint32_t w_o_idx = quad.w_o()[interior_row]; + builder->set_variable(w_o_idx, builder->get_variable(w_o_idx) + FF(1)); + + EXPECT_FALSE(CircuitChecker::check(*builder)); +} + +// Cross-row encoding test: the interior subrelations A_1, A_2, A_3 compare row i's predicted +// (out_1, out_2, out_3) against row i+1's reconstructed Vandermonde encoding (b_1', b_2', +// b_3'), where b_k' is built from row i+1's lane-0 chain. Tampering an interior row's wire +// perturbs that reconstruction at the *previous* row without touching the previous row's +// own committed wires — exercising the bijectivity-of-V mechanism that lets the relation +// compare uncommitted hidden lanes. The tampered wire is also row i+1's own committed wire, +// so the tamper would also break row i+1's own relation; CircuitChecker may report either +// site, but both are exercising the same Vandermonde-encoding equality. +TEST_F(Poseidon2QuadInternalSoundnessTests, CrossRowVandermondeEncodingMismatchRejected) +{ + auto builder = build_honest_permutation(FF(uint256_t(0xCAFE1234ULL))); + ASSERT_TRUE(CircuitChecker::check(*builder)); + + auto& quad = builder->blocks.poseidon2_quad_internal; + const size_t row_i_plus_1 = quad_first_interior_row + 6; + const uint32_t idx = quad.w_o()[row_i_plus_1]; + builder->set_variable(idx, builder->get_variable(idx) + FF(1)); + + EXPECT_FALSE(CircuitChecker::check(*builder)); +} + +} // namespace diff --git a/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/poseidon2.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/poseidon2.test.cpp index c5ed32d06871..d4436ccf03e5 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/poseidon2.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/poseidon2.test.cpp @@ -12,12 +12,8 @@ auto& engine = numeric::get_debug_randomness(); } template class StdlibPoseidon2 : public testing::Test { - using curve = stdlib::bn254; - - using byte_array_ct = stdlib::byte_array; using field_ct = stdlib::field_t; using witness_ct = stdlib::witness_t; - using public_witness_ct = stdlib::public_witness_t; using poseidon2 = typename stdlib::poseidon2; using native_poseidon2 = crypto::Poseidon2; @@ -27,16 +23,26 @@ template class StdlibPoseidon2 : public testing::Test { static std::size_t gate_count(std::size_t N) { + // Number of Poseidon2 permutation invocations + size_t P_N = (N + 2) / 3; + // Number of extra additions in squeeze + size_t N_3 = N % 3; + + if constexpr (IsMegaBuilder) { + // Mega uses the K=4 compressed encoding with a custom initial-linear-layer row. + if (P_N == 1) { + return 28; + } + return (N_3 == 0) ? (30 * P_N - 2) : (30 * P_N - 5 + N_3); + } + + // Ultra uses the standard single-round encoding (73 gates). + constexpr size_t P_cost = 73; if (N == 1) { - return 73; + return P_cost; } - const size_t P_cost = 73; const size_t D_full_adds = 3; - // Number of Poseidon2 permutation invocations - size_t P_N = (N + 2) / 3; - // Number of extra additions in sqeeze - size_t N_3 = N % 3; if (N_3 == 0) { return (1 + P_N * P_cost + (P_N - 1) * D_full_adds); } else { @@ -95,8 +101,6 @@ template class StdlibPoseidon2 : public testing::Test { left.set_public(); - info("num gates = ", builder.get_num_finalized_gates_inefficient()); - bool result = CircuitChecker::check(builder); EXPECT_EQ(result, true); } @@ -136,7 +140,6 @@ template class StdlibPoseidon2 : public testing::Test { } } - native_poseidon2::hash(inputs); EXPECT_THROW_WITH_MESSAGE(poseidon2::hash(witness_inputs), "Sponge inputs should not be stdlib constants"); } diff --git a/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/poseidon2_permutation.cpp b/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/poseidon2_permutation.cpp index b79ebb541f57..3ef08c15c7d9 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/poseidon2_permutation.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/poseidon2_permutation.cpp @@ -9,76 +9,208 @@ #include "barretenberg/honk/execution_trace/gate_data.hpp" namespace bb::stdlib { +namespace { template -typename Poseidon2Permutation::State Poseidon2Permutation::permutation( - Builder* builder, const typename Poseidon2Permutation::State& input) +void materialize_constants_for_initial_layer(Builder* builder, typename Poseidon2Permutation::State& state) { - State current_state(input); - NativeState current_native_state; - for (size_t i = 0; i < t; ++i) { - current_native_state[i] = current_state[i].get_value(); + // The Mega initial-external custom gate records its four inputs by witness index. A constant field_t has no + // witness index until it is put into the builder's constant table, while the Ultra six-gate computation below can + // use constant field_t values directly. + for (auto& state_limb : state) { + if (state_limb.is_constant()) { + state_limb = + field_t::from_witness_index(builder, builder->put_constant_variable(state_limb.get_value())); + } } +} - // Apply 1st linear layer both natively and in-circuit. - NativePermutation::matrix_multiplication_external(current_native_state); - matrix_multiplication_external(current_state); +template +void sync_native_state_from_state(typename Poseidon2Permutation::NativeState& native_state, + const typename Poseidon2Permutation::State& state) +{ + for (size_t i = 0; i < Poseidon2Permutation::t; ++i) { + native_state[i] = state[i].get_value(); + } +} - // First set of external rounds - constexpr size_t rounds_f_beginning = rounds_f / 2; - for (size_t i = 0; i < rounds_f_beginning; ++i) { +template +void apply_external_rounds(Builder* builder, + typename Poseidon2Permutation::State& current_state, + typename Poseidon2Permutation::NativeState& current_native_state, + const size_t begin, + const size_t end) +{ + using Permutation = Poseidon2Permutation; + using FF = typename Permutation::FF; + using Witness = witness_t; + + for (size_t i = begin; i < end; ++i) { poseidon2_external_gate_ in{ current_state[0].get_witness_index(), current_state[1].get_witness_index(), current_state[2].get_witness_index(), current_state[3].get_witness_index(), i }; builder->create_poseidon2_external_gate(in); - // calculate the new witnesses - NativePermutation::add_round_constants(current_native_state, round_constants[i]); - NativePermutation::apply_sbox(current_native_state); - NativePermutation::matrix_multiplication_external(current_native_state); - for (size_t j = 0; j < t; ++j) { - current_state[j] = witness_t(builder, current_native_state[j]); + Permutation::NativePermutation::add_round_constants(current_native_state, Permutation::round_constants[i]); + Permutation::NativePermutation::apply_sbox(current_native_state); + Permutation::NativePermutation::matrix_multiplication_external(current_native_state); + for (size_t j = 0; j < Permutation::t; ++j) { + current_state[j] = Witness(builder, current_native_state[j]); } } +} - propagate_current_state_to_next_row(builder, current_state, builder->blocks.poseidon2_external); +template +void apply_standard_internal_rounds(Builder* builder, + typename Poseidon2Permutation::State& current_state, + typename Poseidon2Permutation::NativeState& current_native_state, + const size_t rounds_f_beginning, + const size_t p_end) +{ + using Permutation = Poseidon2Permutation; + using Witness = witness_t; - // Internal rounds - const size_t p_end = rounds_f_beginning + rounds_p; for (size_t i = rounds_f_beginning; i < p_end; ++i) { - poseidon2_internal_gate_ in{ current_state[0].get_witness_index(), - current_state[1].get_witness_index(), - current_state[2].get_witness_index(), - current_state[3].get_witness_index(), - i }; + poseidon2_internal_gate_ in{ current_state[0].get_witness_index(), + current_state[1].get_witness_index(), + current_state[2].get_witness_index(), + current_state[3].get_witness_index(), + i }; builder->create_poseidon2_internal_gate(in); - current_native_state[0] += round_constants[i][0]; - NativePermutation::apply_single_sbox(current_native_state[0]); - NativePermutation::matrix_multiplication_internal(current_native_state); - for (size_t j = 0; j < t; ++j) { - current_state[j] = witness_t(builder, current_native_state[j]); + current_native_state[0] += Permutation::round_constants[i][0]; + Permutation::NativePermutation::apply_single_sbox(current_native_state[0]); + Permutation::NativePermutation::matrix_multiplication_internal(current_native_state); + for (size_t j = 0; j < Permutation::t; ++j) { + current_state[j] = Witness(builder, current_native_state[j]); } } + Permutation::propagate_current_state_to_next_row(builder, current_state, builder->blocks.poseidon2_internal); +} - propagate_current_state_to_next_row(builder, current_state, builder->blocks.poseidon2_internal); +void apply_mega_internal_rounds(MegaCircuitBuilder* builder, + typename Poseidon2Permutation::State& current_state, + typename Poseidon2Permutation::NativeState& current_native_state, + const size_t rounds_f_beginning) +{ + using Permutation = Poseidon2Permutation; + using FF = typename Permutation::FF; + using NativeState = typename Permutation::NativeState; + using Witness = witness_t; - // Remaining external rounds - for (size_t i = p_end; i < NUM_ROUNDS; ++i) { - poseidon2_external_gate_ in{ current_state[0].get_witness_index(), - current_state[1].get_witness_index(), - current_state[2].get_witness_index(), - current_state[3].get_witness_index(), - i }; - builder->create_poseidon2_external_gate(in); - // calculate the new witnesses - NativePermutation::add_round_constants(current_native_state, round_constants[i]); - NativePermutation::apply_sbox(current_native_state); - NativePermutation::matrix_multiplication_external(current_native_state); - for (size_t j = 0; j < t; ++j) { - current_state[j] = witness_t(builder, current_native_state[j]); + // K=4 compressed encoding: w_l, w_r, w_o, w_4 = state[0] at rounds 4i+0, 4i+1, 4i+2, 4i+3. + // (s_1, s_2, s_3) at row-start are derived inside the relation via a 3x3 Vandermonde solve. + static_assert(Permutation::rounds_p % 4 == 0); + constexpr size_t num_quad_rows = Permutation::rounds_p / 4; // 14 rows for rounds_p = 56 + + // Entry transition row (standard encoding): its wires share witness indices with the external + // block's propagate row, so they are the true external output. The relation forces the first + // compressed row's (w_r_shift, w_o_shift, w_4_shift) to state[0] at rounds start+1, +2, +3. + { + poseidon2_transition_entry_gate_ in{ + current_state[0].get_witness_index(), + current_state[1].get_witness_index(), + current_state[2].get_witness_index(), + current_state[3].get_witness_index(), + rounds_f_beginning, + }; + builder->create_poseidon2_transition_entry_gate(in); + } + + auto advance_internal_round = [](NativeState& state, const FF& round_constant) { + state[0] += round_constant; + Permutation::NativePermutation::apply_single_sbox(state[0]); + Permutation::NativePermutation::matrix_multiplication_internal(state); + }; + + // Helper: emit one K=4 compressed row (interior or terminal) and advance `current_state` + // by 4 internal rounds. The row wires are state[0] at rounds start, start+1, start+2, start+3. + auto emit_quad_row = [&](size_t quad_idx, bool is_terminal) { + const size_t start = rounds_f_beginning + (4 * quad_idx); + const size_t next_start = start + 4; // ignored on terminal + + NativeState state_after_1 = current_native_state; + advance_internal_round(state_after_1, Permutation::round_constants[start + 0][0]); + auto s0_at_1 = Witness(builder, state_after_1[0]); + + NativeState state_after_2 = state_after_1; + advance_internal_round(state_after_2, Permutation::round_constants[start + 1][0]); + auto s0_at_2 = Witness(builder, state_after_2[0]); + + NativeState state_after_3 = state_after_2; + advance_internal_round(state_after_3, Permutation::round_constants[start + 2][0]); + auto s0_at_3 = Witness(builder, state_after_3[0]); + + poseidon2_quad_internal_gate_ in{ + current_state[0].get_witness_index(), // state[0] at round start + s0_at_1.witness_index, // state[0] at round start+1 + s0_at_2.witness_index, // state[0] at round start+2 + s0_at_3.witness_index, // state[0] at round start+3 + start, + next_start, + is_terminal, + }; + builder->create_poseidon2_quad_internal_gate(in); + + // Advance native state by the 4th round to land on state at round start+4. + current_native_state = state_after_3; + advance_internal_round(current_native_state, Permutation::round_constants[start + 3][0]); + + // The next non-terminal compressed row only consumes state[0] at round start+4. The remaining limbs are + // derived inside the relation and do not need witnesses until the terminal row bridges back to the + // standard encoding consumed by the final external rounds. + current_state[0] = Witness(builder, current_native_state[0]); + if (is_terminal) { + for (size_t j = 1; j < Permutation::t; ++j) { + current_state[j] = Witness(builder, current_native_state[j]); + } } + }; + + // 13 interior compressed rows (covering rounds 0..51 relative) + for (size_t q = 0; q < num_quad_rows - 1; ++q) { + emit_quad_row(q, /*is_terminal=*/false); } + // 1 terminal compressed row (covering rounds 52..55 relative) + emit_quad_row(num_quad_rows - 1, /*is_terminal=*/true); + + // Standard-transition bridge row: selector-unconstrained, holds state at round p_end in standard + // encoding. Shared witness indices with the first final-external gate below. + builder->create_unconstrained_gate(builder->blocks.poseidon2_quad_internal, + current_state[0].get_witness_index(), + current_state[1].get_witness_index(), + current_state[2].get_witness_index(), + current_state[3].get_witness_index()); +} + +} // namespace + +template +typename Poseidon2Permutation::State Poseidon2Permutation::permutation( + Builder* builder, const typename Poseidon2Permutation::State& input) +{ + State current_state(input); + NativeState current_native_state; + + matrix_multiplication_external(current_state); + sync_native_state_from_state(current_native_state, current_state); + + // First set of external rounds + constexpr size_t rounds_f_beginning = rounds_f / 2; + apply_external_rounds(builder, current_state, current_native_state, /*begin=*/0, /*end=*/rounds_f_beginning); + + propagate_current_state_to_next_row(builder, current_state, builder->blocks.poseidon2_external); + + // Internal rounds: Mega uses a K=4 compressed block; Ultra keeps the standard one-round layout. + const size_t p_end = rounds_f_beginning + rounds_p; + if constexpr (IsMegaBuilder) { + apply_mega_internal_rounds(builder, current_state, current_native_state, rounds_f_beginning); + } else { + apply_standard_internal_rounds(builder, current_state, current_native_state, rounds_f_beginning, p_end); + } + + // Remaining external rounds + apply_external_rounds(builder, current_state, current_native_state, /*begin=*/p_end, /*end=*/NUM_ROUNDS); propagate_current_state_to_next_row(builder, current_state, builder->blocks.poseidon2_external); @@ -91,7 +223,8 @@ typename Poseidon2Permutation::State Poseidon2Permutation::per * \text{state}[3])^{\top}\f$. Where \f$ M_E \f$ is the external round matrix. See `Poseidon2ExternalRelationImpl`. */ template -void Poseidon2Permutation::matrix_multiplication_external(typename Poseidon2Permutation::State& state) +void Poseidon2Permutation::matrix_multiplication_external(State& state) + requires(!IsMegaBuilder) { const bb::fr two(2); const bb::fr four(4); @@ -115,6 +248,31 @@ void Poseidon2Permutation::matrix_multiplication_external(typename Pose state[2] = state[3] + tmp2; } +template +void Poseidon2Permutation::matrix_multiplication_external(State& state) + requires IsMegaBuilder +{ + Builder* builder = validate_context(state); + BB_ASSERT(builder != nullptr, "Poseidon2 Mega initial external layer needs a builder context"); + + NativeState native_state; + for (size_t i = 0; i < t; ++i) { + native_state[i] = state[i].get_value(); + } + NativePermutation::matrix_multiplication_external(native_state); + + materialize_constants_for_initial_layer(builder, state); + + poseidon2_initial_external_gate_ in{ state[0].get_witness_index(), + state[1].get_witness_index(), + state[2].get_witness_index(), + state[3].get_witness_index() }; + builder->create_poseidon2_initial_external_gate(in); + for (size_t j = 0; j < t; ++j) { + state[j] = witness_t(builder, native_state[j]); + } +} + template class Poseidon2Permutation; template class Poseidon2Permutation; diff --git a/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/poseidon2_permutation.hpp b/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/poseidon2_permutation.hpp index 8114bf7323d9..15c3c9b14964 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/poseidon2_permutation.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/hash/poseidon2/poseidon2_permutation.hpp @@ -10,6 +10,7 @@ #include #include "barretenberg/crypto/poseidon2/poseidon2_permutation.hpp" +#include "barretenberg/stdlib/primitives/circuit_builders/circuit_builders.hpp" #include "barretenberg/stdlib/primitives/field/field.hpp" namespace bb::stdlib { @@ -19,12 +20,14 @@ namespace bb::stdlib { * @details The permutation consists of one initial linear layer, then a set of external rounds, a set of internal * rounds, and a set of external rounds. * - * Note that except for the inital linear layer, we compute the round results natively and record them into Poseidon2 - * custom gates. This allows us to heavily reduce the number of arithmetic gates, that would have been otherwise - * required to perform expensive non-linear S-box operations in-circuit. + * Note that we compute the round results natively and record them into Poseidon2 custom gates. This allows us to + * heavily reduce the number of arithmetic gates that would have been otherwise required to perform expensive + * non-linear S-box operations in-circuit. * - * The external rounds are constrained via `Poseidon2ExternalRelationImpl`. - * The internal rounds are constrained via `Poseidon2InternalRelationImpl`. + * The external rounds are constrained via `Poseidon2ExternalRelationImpl`; Mega also uses + * `Poseidon2InitialExternalRelationImpl` for the initial external linear layer. Mega constrains the internal rounds via + * a K=4 compressed block (`Poseidon2TransitionEntryRelationImpl`, `Poseidon2QuadInternalRelationImpl`, and + * `Poseidon2QuadInternalTerminalRelationImpl`); Ultra uses `Poseidon2InternalRelationImpl`. * */ template class Poseidon2Permutation { @@ -61,10 +64,13 @@ template class Poseidon2Permutation { static State permutation(Builder* builder, const State& input); /** - * @brief In-circuit method to efficiently multiply the inital state by the external matrix \f$ M_E \f$. Uses 6 - * aritmetic gates. + * @brief In-circuit method to efficiently multiply the initial state by the external matrix \f$ M_E \f$. */ - static void matrix_multiplication_external(State& state); + static void matrix_multiplication_external(State& state) + requires IsMegaBuilder; + + static void matrix_multiplication_external(State& state) + requires(!IsMegaBuilder); /** * @brief The result of applying a round of Poseidon2 is stored in the next row and is accessed by Poseidon2 diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/databus/databus.hpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/databus/databus.hpp index 209125db27c4..6f57902aa8f4 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/databus/databus.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/databus/databus.hpp @@ -23,6 +23,8 @@ template class databus { using field_pt = field_t; public: + bus_vector() = default; + bus_vector(const BusId bus_idx) : bus_idx(bus_idx) {}; @@ -59,9 +61,15 @@ template class databus { }; public: - // The columns of the DataBus - bus_vector calldata{ BusId::CALLDATA }; - bus_vector secondary_calldata{ BusId::SECONDARY_CALLDATA }; + // The columns of the DataBus. + bus_vector kernel_calldata{ BusId::KERNEL_CALLDATA }; + std::array app_calldata = []() { + std::array result{}; + for (uint8_t idx = 0; idx < MAX_APPS_PER_KERNEL; ++idx) { + result[idx] = bus_vector{ static_cast(idx + 1) }; + } + return result; + }(); bus_vector return_data{ BusId::RETURNDATA }; }; @@ -73,13 +81,12 @@ template class databus { * \pi_i, and it has access to [C_i] directly from \pi_i. The consistency checks in circuit (i+1) are thus of the * form \pi_i.public_inputs.[R_{i-1}] = \pi_i.[C_i]. * - * For consistent behavior across kernels, every kernel propagates two return data commitments via its - * public inputs. If one of either the app or kernel return data does not exist, it is populated with a default - * value that will satisfy the consistency check on the next cycle. For example, the first kernel has no previous - * kernel to verify and thus neither receives a previous kernel return data commitment nor a calldata input - * corresponding to a previous kernel. The "empty" calldata will be populated with a default value, resulting in a - * default commitment value. We set the same value for the missing return data herein so that the commitments agree - * and the corresponding consistency check will be satisfied in the kernel in which it's performed. + * For consistent behavior across kernels, every kernel propagates `MAX_APPS_PER_KERNEL + 1` return-data commitments + * via its public inputs: one for the previous kernel and one per app slot. If any of these does not exist (e.g., the + * first kernel has no previous kernel; a kernel with fewer than MAX apps leaves the trailing app slots unset), it is + * populated with a default commitment value that will satisfy the consistency check on the next cycle. The "empty" + * calldata column on the next kernel side will commit to the same default value, so the commitments agree and the + * consistency check passes trivially. * * @tparam Builder */ @@ -91,11 +98,15 @@ template class DataBusDepot { using FrNative = typename Curve::ScalarFieldNative; // Storage for the return data commitments to be propagated via the public inputs - Commitment app_return_data_commitment; + std::array app_return_data_commitments; Commitment kernel_return_data_commitment; // Existence flags indicating whether each return data commitment has been set - bool app_return_data_commitment_exists = false; + std::array app_return_data_commitment_exists = []() { + std::array result{}; + result.fill(false); + return result; + }(); bool kernel_return_data_commitment_exists = false; void set_kernel_return_data_commitment(const Commitment& commitment) @@ -104,10 +115,39 @@ template class DataBusDepot { kernel_return_data_commitment_exists = true; } + /** + * @brief Whether all app return-data slots are currently empty. + * @details Used to assert the kernel-boundary invariant: at the start of each kernel completion, every slot must + * have been drained by the prior kernel's get-loop so that `set_app_return_data_commitment` begins filling from + * slot 0. + */ + bool app_return_data_slots_are_empty() const + { + for (size_t idx = 0; idx < MAX_APPS_PER_KERNEL; ++idx) { + if (app_return_data_commitment_exists[idx]) { + return false; + } + } + return true; + } + + /** + * @brief Record an app return-data commitment in the next available slot. + * @details Slot assignment is implicit: the depot fills slot 0 first, then slot 1, etc., as apps are processed in + * the kernel's verification queue. Slots are released by `get_app_return_data_commitment`; each kernel-completion + * pass drains every slot via the get-loop in `Chonk::complete_kernel_circuit_logic`, so the next kernel begins + * filling from slot 0 again. + */ void set_app_return_data_commitment(const Commitment& commitment) { - app_return_data_commitment = commitment; - app_return_data_commitment_exists = true; + for (size_t idx = 0; idx < MAX_APPS_PER_KERNEL; ++idx) { + if (!app_return_data_commitment_exists[idx]) { + app_return_data_commitments[idx] = commitment; + app_return_data_commitment_exists[idx] = true; + return; + } + } + BB_ASSERT(false, "DataBusDepot has no free app return-data slot"); } /** @@ -139,13 +179,14 @@ template class DataBusDepot { * @brief Get the previously set app return data commitment if it exists, else a default one * */ - Commitment get_app_return_data_commitment(Builder& builder) + Commitment get_app_return_data_commitment(Builder& builder, const size_t idx) { - if (!app_return_data_commitment_exists) { + BB_ASSERT_LT(idx, MAX_APPS_PER_KERNEL, "DataBusDepot app return-data index out of bounds"); + if (!app_return_data_commitment_exists[idx]) { return construct_default_commitment(builder); } - app_return_data_commitment_exists = false; // Reset the existence flag after retrieval - return app_return_data_commitment; + app_return_data_commitment_exists[idx] = false; // Reset the existence flag after retrieval + return app_return_data_commitments[idx]; } }; diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/databus/databus.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/databus/databus.test.cpp index 6a5e11197833..6b7b3482bf5c 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/databus/databus.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/databus/databus.test.cpp @@ -41,7 +41,7 @@ TEST(Databus, CallDataAndReturnData) for (auto& value : raw_calldata_values) { calldata_values.emplace_back(witness_ct(&builder, value)); } - databus.calldata.set_values(calldata_values); + databus.kernel_calldata.set_values(calldata_values); // Populate the return data in the databus std::vector return_data_values; @@ -53,14 +53,14 @@ TEST(Databus, CallDataAndReturnData) // Establish that the first two outputs are simply copied over from the inputs. Each 'copy' requires two read gates. field_ct idx_0(witness_ct(&builder, 0)); field_ct idx_1(witness_ct(&builder, 1)); - databus.calldata[idx_0].assert_equal(databus.return_data[idx_0]); - databus.calldata[idx_1].assert_equal(databus.return_data[idx_1]); + databus.kernel_calldata[idx_0].assert_equal(databus.return_data[idx_0]); + databus.kernel_calldata[idx_1].assert_equal(databus.return_data[idx_1]); // Get the last two entries in calldata and compute their sum field_ct idx_2(witness_ct(&builder, 2)); field_ct idx_3(witness_ct(&builder, 3)); // This line creates an arithmetic gate and two calldata read gates (via operator[]). - field_ct sum = databus.calldata[idx_2] + databus.calldata[idx_3]; + field_ct sum = databus.kernel_calldata[idx_2] + databus.kernel_calldata[idx_3]; // Read the last index of the return data. (Creates a return data read gate via operator[]). field_ct idx(witness_ct(&builder, 2)); @@ -124,14 +124,14 @@ TEST(Databus, UnnormalizedEntryAccess) // add the value to itself to make it unnormalized (the multiplicative constant will be 2) returndata_entries.emplace_back(entry_witness + entry_witness); } - databus.calldata.set_values(calldata_entries); + databus.kernel_calldata.set_values(calldata_entries); databus.return_data.set_values(returndata_entries); field_ct idx_0 = witness_ct(&builder, 0); field_ct idx_1 = witness_ct(&builder, 1); field_ct idx_2 = witness_ct(&builder, 2); - databus.return_data[idx_0].assert_equal(databus.calldata[idx_0] + databus.calldata[idx_0]); - databus.return_data[idx_1].assert_equal(databus.calldata[idx_1] + databus.calldata[idx_1]); - databus.return_data[idx_2].assert_equal(databus.calldata[idx_2] + databus.calldata[idx_2]); + databus.return_data[idx_0].assert_equal(databus.kernel_calldata[idx_0] + databus.kernel_calldata[idx_0]); + databus.return_data[idx_1].assert_equal(databus.kernel_calldata[idx_1] + databus.kernel_calldata[idx_1]); + databus.return_data[idx_2].assert_equal(databus.kernel_calldata[idx_2] + databus.kernel_calldata[idx_2]); EXPECT_TRUE(CircuitChecker::check(builder)); } @@ -150,7 +150,7 @@ TEST(Databus, ConstantAndUnnormalizedIndices) for (auto& value : raw_calldata_values) { calldata_values.emplace_back(witness_ct(&builder, value)); } - databus.calldata.set_values(calldata_values); + databus.kernel_calldata.set_values(calldata_values); // Populate the return data in the databus std::vector returndata_values; @@ -164,10 +164,10 @@ TEST(Databus, ConstantAndUnnormalizedIndices) field_ct idx_1(witness_ct(&builder, 1)); // un-normalized index (with multiplicative constant 2) field_ct idx_2 = idx_1 + idx_1; - field_ct sum = databus.calldata[idx_0] + databus.calldata[idx_1] + databus.calldata[idx_2]; + field_ct sum = databus.kernel_calldata[idx_0] + databus.kernel_calldata[idx_1] + databus.kernel_calldata[idx_2]; - databus.return_data[idx_0].assert_equal(databus.calldata[idx_0]); - databus.return_data[idx_1].assert_equal(databus.calldata[idx_1]); + databus.return_data[idx_0].assert_equal(databus.kernel_calldata[idx_0]); + databus.return_data[idx_1].assert_equal(databus.kernel_calldata[idx_1]); databus.return_data[idx_2].assert_equal(sum); EXPECT_TRUE(CircuitChecker::check(builder)); @@ -218,7 +218,7 @@ TEST(Databus, BadCopyFailure) // Populate calldata with a single input fr input = 13; - databus.calldata.set_values({ witness_ct(&builder, input) }); + databus.kernel_calldata.set_values({ witness_ct(&builder, input) }); // Populate return data with an output different from the input fr output = input - 1; @@ -227,7 +227,7 @@ TEST(Databus, BadCopyFailure) // Attempt to attest that the calldata has been copied into the return data size_t raw_idx = 0; // read at 0th index field_ct idx(witness_ct(&builder, raw_idx)); - databus.calldata[idx].assert_equal(databus.return_data[idx]); + databus.kernel_calldata[idx].assert_equal(databus.return_data[idx]); // Since the output data is not a copy of the input, the checker should fail EXPECT_FALSE(CircuitChecker::check(builder)); @@ -251,7 +251,7 @@ TEST(Databus, DuplicateRead) for (auto& value : raw_calldata_values) { calldata_values.emplace_back(witness_ct(&builder, value)); } - databus.calldata.set_values(calldata_values); + databus.kernel_calldata.set_values(calldata_values); // Populate the return data in the databus std::vector return_data_values; @@ -264,10 +264,10 @@ TEST(Databus, DuplicateRead) field_ct idx_1(witness_ct(&builder, 1)); field_ct idx_2(witness_ct(&builder, 2)); - databus.calldata[idx_1]; - databus.calldata[idx_1]; - databus.calldata[idx_1]; - databus.calldata[idx_2]; + databus.kernel_calldata[idx_1]; + databus.kernel_calldata[idx_1]; + databus.kernel_calldata[idx_1]; + databus.kernel_calldata[idx_2]; databus.return_data[idx_2]; databus.return_data[idx_2]; diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.cpp index b1d7ad6b01ca..978c7eee75ba 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.cpp @@ -464,10 +464,12 @@ template field_t field_t::pow(const uint32_ * @brief Raise a field_t to a power of an exponent (field_t). Note that the exponent must not exceed 32 bits and is * implicitly range constrained. */ -template field_t field_t::pow(const field_t& exponent) const +template +template +field_t field_t::pow(const field_t& exponent) const { uint256_t exponent_value = exponent.get_value(); - BB_ASSERT_LT(exponent_value.get_msb(), 32U, "Exponent too large in field_t::pow"); + BB_ASSERT_LT(exponent_value.get_msb(), num_bits, "Exponent too large in field_t::pow"); if (is_constant() && exponent.is_constant()) { return field_t(get_value().pow(exponent_value)); @@ -479,14 +481,14 @@ template field_t field_t::pow(const field_t auto* ctx = validate_context(context, exponent.context); - std::array, 32> exponent_bits; + std::array, num_bits> exponent_bits; // Collect individual bits as bool_t's for (size_t i = 0; i < exponent_bits.size(); ++i) { uint256_t value_bit = exponent_value & 1; bool_t bit; bit = bool_t(witness_t(ctx, value_bit.data[0])); bit.set_origin_tag(exponent.tag); - exponent_bits[31 - i] = bit; + exponent_bits[num_bits - 1 - i] = bit; exponent_value >>= 1; } @@ -501,7 +503,7 @@ template field_t field_t::pow(const field_t // Compute the result of exponentiation field_t accumulator(ctx, bb::fr::one()); const field_t one(bb::fr::one()); - for (size_t i = 0; i < 32; ++i) { + for (size_t i = 0; i < num_bits; ++i) { accumulator *= accumulator; // If current bit == 1, multiply by the base, else propagate the accumulator const field_t multiplier = conditional_assign_internal(exponent_bits[i], *this, one); @@ -1346,6 +1348,19 @@ std::pair, field_t> field_t::no_wrap_split_at return std::make_pair(lo_wit, hi_wit); } +// Explicit instantations of pow +template field_t field_t::pow<32UL>( + const field_t&) const; + +template field_t field_t::pow<32UL>( + const field_t&) const; + +template field_t field_t::pow( + const field_t&) const; + +template field_t field_t::pow( + const field_t&) const; + template class field_t; template class field_t; diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.hpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.hpp index dc2890ee1bcf..a81eec3564f2 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.hpp @@ -283,8 +283,8 @@ template class field_t { field_t sqr() const { return operator*(*this); } field_t pow(const uint32_t& exponent) const; - // N.B. we implicitly range-constrain 'exponent' to be a 32-bit integer! - field_t pow(const field_t& exponent) const; + // N.B. we implicitly range-constrain 'exponent' to be a num_bits-bit integer! + template field_t pow(const field_t& exponent) const; field_t operator+=(const field_t& other) { diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.test.cpp index 3e93b25b5342..f6d5e5fd1845 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.test.cpp @@ -1045,14 +1045,16 @@ template class stdlib_field : public testing::Test { EXPECT_EQ(result, false); } - static void test_pow() + static void test_pow(uint32_t max_exponent_bits) { Builder builder = Builder(); - std::array const_exponent_values{ 0, 1, engine.get_random_uint32() }; - std::array witness_exponent_values{ witness_ct(&builder, 0), - witness_ct(&builder, 1), - witness_ct(&builder, engine.get_random_uint32()) }; + std::array const_exponent_values{ 0, 1, engine.get_random_uint32() % (1 << max_exponent_bits) }; + std::array witness_exponent_values{ + witness_ct(&builder, 0), + witness_ct(&builder, 1), + witness_ct(&builder, engine.get_random_uint32() % (1 << max_exponent_bits)) + }; std::array base_values{ 0, 1, engine.get_random_uint256() }; for (auto& base : base_values) { @@ -1063,7 +1065,13 @@ template class stdlib_field : public testing::Test { EXPECT_EQ(result.get_value(), bb::fr(base).pow(exponent)); // Test witness base && integer exponent cases field_ct witness_base(witness_ct(&builder, base)); - result = witness_base.pow(exponent); + if (max_exponent_bits == 32) { + result = witness_base.pow(exponent); + } else if (max_exponent_bits == CONST_OP_QUEUE_LOG_SIZE + 1) { + result = witness_base.template pow(exponent); + } else { + bb::assert_failure("Invalid max_exponent_bits value in test_pow"); + } if (exponent != 0) { EXPECT_TRUE(!result.is_constant()); @@ -1085,7 +1093,13 @@ template class stdlib_field : public testing::Test { EXPECT_EQ(result.get_value(), bb::fr(base).pow(exponent.get_value())); // Test witness base && witness exponent cases field_ct witness_base(witness_ct(&builder, base)); - result = witness_base.pow(exponent); + if (max_exponent_bits == 32) { + result = witness_base.pow(exponent); + } else if (max_exponent_bits == CONST_OP_QUEUE_LOG_SIZE + 1) { + result = witness_base.template pow(exponent); + } else { + bb::assert_failure("Invalid max_exponent_bits value in test_pow"); + } EXPECT_TRUE(!result.is_constant()); EXPECT_EQ(result.get_value(), bb::fr(base).pow(exponent.get_value())); @@ -1095,21 +1109,51 @@ template class stdlib_field : public testing::Test { } } - static void test_pow_exponent_out_of_range() + static void test_pow_witness_exponent_out_of_range(uint32_t max_exponent_bits) { - Builder builder = Builder(); fr base_val(engine.get_random_uint256()); - uint64_t exponent_val = engine.get_random_uint32(); - exponent_val += (uint64_t(1) << 32); + uint64_t exponent_val = engine.get_random_uint32() % (uint64_t(1) << max_exponent_bits); + exponent_val += (uint64_t(1) << max_exponent_bits); + + Builder builder = Builder(); [[maybe_unused]] field_ct base = witness_ct(&builder, base_val); field_ct exponent = witness_ct(&builder, exponent_val); - EXPECT_THROW_WITH_MESSAGE(base.pow(exponent), "Exponent too large in field_t::pow"); + field_ct result; + if (max_exponent_bits == 32) { + result = base.pow(exponent); + } else if (max_exponent_bits == CONST_OP_QUEUE_LOG_SIZE + 1) { + result = base.template pow(exponent); + } else { + bb::assert_failure("Invalid max_exponent_bits value in test_pow"); + } + + EXPECT_FALSE(CircuitChecker::check(builder)); + EXPECT_TRUE(builder.failed()); + EXPECT_EQ(builder.err(), "field_t::pow exponent accumulator incorrect"); + } + + static void test_pow_constant_exponent_out_of_range(uint32_t max_exponent_bits) + + { + fr base_val(engine.get_random_uint256()); + uint64_t exponent_val = engine.get_random_uint32() % (uint64_t(1) << max_exponent_bits); + exponent_val += (uint64_t(1) << max_exponent_bits); - exponent = field_ct(exponent_val); - EXPECT_THROW_WITH_MESSAGE(base.pow(exponent), "Exponent too large in field_t::pow"); - }; + Builder builder = Builder(); + + [[maybe_unused]] field_ct base = witness_ct(&builder, base_val); + field_ct exponent = field_ct(exponent_val); + if (max_exponent_bits == 32) { + EXPECT_THROW_WITH_MESSAGE(base.pow(exponent), "Exponent too large in field_t::pow"); + } else if (max_exponent_bits == CONST_OP_QUEUE_LOG_SIZE + 1) { + EXPECT_THROW_WITH_MESSAGE(base.template pow(exponent), + "Exponent too large in field_t::pow"); + } else { + bb::assert_failure("Invalid max_exponent_bits value in test_pow"); + } + } static void test_copy_as_new_witness() { @@ -1701,13 +1745,31 @@ TYPED_TEST(stdlib_field, test_postfix_increment) { TestFixture::test_postfix_increment(); } -TYPED_TEST(stdlib_field, test_pow) +TYPED_TEST(stdlib_field, test_pow_op_queue) +{ + TestFixture::test_pow(/*max_exponent_bits*/ CONST_OP_QUEUE_LOG_SIZE + 1); +} +TYPED_TEST(stdlib_field, test_pow_32) +{ + TestFixture::test_pow(/*max_exponent_bits*/ 32); +} +TYPED_TEST(stdlib_field, test_pow_witness_exponent_out_of_range_op_queue) +{ + BB_DISABLE_ASSERTS(); + TestFixture::test_pow_witness_exponent_out_of_range(/*max_exponent_bits*/ CONST_OP_QUEUE_LOG_SIZE + 1); +} +TYPED_TEST(stdlib_field, test_pow_witness_exponent_out_of_range_32) +{ + BB_DISABLE_ASSERTS(); + TestFixture::test_pow_witness_exponent_out_of_range(/*max_exponent_bits*/ 32); +} +TYPED_TEST(stdlib_field, test_pow_constant_exponent_out_of_range_op_queue) { - TestFixture::test_pow(); + TestFixture::test_pow_constant_exponent_out_of_range(/*max_exponent_bits*/ CONST_OP_QUEUE_LOG_SIZE + 1); } -TYPED_TEST(stdlib_field, test_pow_exponent_out_of_range) +TYPED_TEST(stdlib_field, test_pow_constant_exponent_out_of_range_32) { - TestFixture::test_pow_exponent_out_of_range(); + TestFixture::test_pow_constant_exponent_out_of_range(/*max_exponent_bits*/ 32); } TYPED_TEST(stdlib_field, test_prefix_increment) { diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/pairing_points.hpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/pairing_points.hpp index c87964f35620..bc6d43ae5a0e 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/pairing_points.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/pairing_points.hpp @@ -96,7 +96,10 @@ template struct PairingPoints { static PairingPoints aggregate_multiple(std::vector& pairing_points, bool handle_edge_cases = true) { size_t num_points = pairing_points.size(); - BB_ASSERT_GT(num_points, 1UL, "This method should be used only with more than one pairing point."); + BB_ASSERT_GT(num_points, 0UL, "Must provide at least one PairingPoints for aggregation"); + if (num_points == 1) { + return pairing_points[0]; + } std::vector first_components; first_components.reserve(num_points); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/special_public_inputs/special_public_inputs.hpp b/barretenberg/cpp/src/barretenberg/stdlib/special_public_inputs/special_public_inputs.hpp index da41c21df951..c9d36be8fc9a 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/special_public_inputs/special_public_inputs.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/special_public_inputs/special_public_inputs.hpp @@ -7,7 +7,7 @@ // Special public inputs designed propagate data between Chonk and Rollup circuits. // // These structures are binding several Chonk components: -// - KernelIO: Standard kernel outputs (pairing points, databus, ecc_op_tables, accum hash) +// - KernelIO: Standard kernel outputs (pairing points, databus, ecc_op_hash, accum hash) // - HidingKernelIO: Final kernel outputs (no accum hash since folding terminates) // - AppIO/DefaultIO: App circuit outputs (just pairing points) // - RollupIO: Rollup circuit outputs (pairing points + IPA claim) @@ -55,10 +55,11 @@ std::array::Group, Builder::NUM_WIRES> empty_ecc_op_tabl } /** - * @brief Manages the data that is propagated on the public inputs of a kernel circuit + * @brief Manages the data that is propagated on the public inputs of a kernel circuit. * + * @tparam N Number of app return-data commitments carried by the kernel public inputs. */ -class KernelIO { +template class KernelIO_ { public: using Builder = MegaCircuitBuilder; // kernel builder is always Mega using Curve = stdlib::bn254; // curve is always bn254 @@ -66,19 +67,20 @@ class KernelIO { using FF = Curve::ScalarField; using PairingInputs = stdlib::recursion::PairingPoints; using TableCommitments = std::array; + using AppReturnDataCommitments = std::array; using PublicPoint = stdlib::PublicInputComponent; using PublicPairingPoints = stdlib::PublicInputComponent; using PublicFF = stdlib::PublicInputComponent; - PairingInputs pairing_inputs; // Inputs {P0, P1} to an EC pairing check - G1 kernel_return_data; // Commitment to the return data of a kernel circuit - G1 app_return_data; // Commitment to the return data of an app circuit - TableCommitments ecc_op_tables; // commitments to merged tables obtained from recursive Merge verification - FF output_hn_accum_hash; // hash of the output HN verifier accumulator + PairingInputs pairing_inputs; // Inputs {P0, P1} to an EC pairing check + G1 kernel_return_data; // Commitment to the return data of a kernel circuit + AppReturnDataCommitments app_return_data; // Commitment to each verified app circuit's return data + FF ecc_op_hash; // Running Poseidon2 hash over ECC op column commitments + FF output_hn_accum_hash; // hash of the output HN verifier accumulator // Total size of the kernel IO public inputs - static constexpr size_t PUBLIC_INPUTS_SIZE = KERNEL_PUBLIC_INPUTS_SIZE; + static constexpr size_t PUBLIC_INPUTS_SIZE = kernel_public_inputs_size(N); static constexpr bool HasIPA = false; /** @@ -97,12 +99,12 @@ class KernelIO { index += PairingInputs::PUBLIC_INPUTS_SIZE; kernel_return_data = PublicPoint::reconstruct(public_inputs, PublicComponentKey{ index }); index += G1::PUBLIC_INPUTS_SIZE; - app_return_data = PublicPoint::reconstruct(public_inputs, PublicComponentKey{ index }); - index += G1::PUBLIC_INPUTS_SIZE; - for (auto& table_commitment : ecc_op_tables) { - table_commitment = PublicPoint::reconstruct(public_inputs, PublicComponentKey{ index }); + for (auto& app_commitment : app_return_data) { + app_commitment = PublicPoint::reconstruct(public_inputs, PublicComponentKey{ index }); index += G1::PUBLIC_INPUTS_SIZE; } + ecc_op_hash = PublicFF::reconstruct(public_inputs, PublicComponentKey{ index }); + index += FF::PUBLIC_INPUTS_SIZE; output_hn_accum_hash = PublicFF::reconstruct(public_inputs, PublicComponentKey{ index }); index += FF::PUBLIC_INPUTS_SIZE; } @@ -117,10 +119,10 @@ class KernelIO { pairing_inputs.set_public(builder); kernel_return_data.set_public(); - app_return_data.set_public(); - for (auto& table_commitment : ecc_op_tables) { - table_commitment.set_public(); + for (auto& app_commitment : app_return_data) { + app_commitment.set_public(); } + ecc_op_hash.set_public(); output_hn_accum_hash.set_public(); // Finalize the public inputs to ensure no more public inputs can be added hereafter. @@ -133,22 +135,21 @@ class KernelIO { */ static void add_default(Builder& builder) { - KernelIO inputs; + KernelIO_ inputs; inputs.pairing_inputs = PairingInputs::construct_default(); inputs.kernel_return_data = DataBusDepot::construct_default_commitment(builder); - inputs.app_return_data = DataBusDepot::construct_default_commitment(builder); - for (auto& table_commitment : inputs.ecc_op_tables) { - table_commitment = G1(typename G1::BaseField(nullptr, uint256_t(DEFAULT_ECC_COMMITMENT.x)), - typename G1::BaseField(nullptr, uint256_t(DEFAULT_ECC_COMMITMENT.y)), - /*assert_on_curve=*/false); - table_commitment.convert_constant_to_fixed_witness(&builder); + for (auto& app_commitment : inputs.app_return_data) { + app_commitment = DataBusDepot::construct_default_commitment(builder); } + inputs.ecc_op_hash = FF::from_witness(&builder, typename FF::native(0)); inputs.output_hn_accum_hash = FF::from_witness(&builder, typename FF::native(0)); inputs.set_public(); } }; +using KernelIO = KernelIO_; + /** * @brief Manages the data that is propagated on the public inputs of an application/function circuit * @@ -282,7 +283,8 @@ template class HidingKernelIO { PairingInputs pairing_inputs; // Inputs {P0, P1} to an EC pairing check G1 kernel_return_data; // Commitment to the return data of the tail kernel circuit - TableCommitments ecc_op_tables; // commitments to merged tables obtained from final Merge verification + TableCommitments ecc_op_tables; // commitments to the full merged op queue tables, obtained from the + // batched merge verification performed inside the hiding kernel // Total size of the IO public inputs static constexpr size_t PUBLIC_INPUTS_SIZE = HIDING_KERNEL_PUBLIC_INPUTS_SIZE; diff --git a/barretenberg/cpp/src/barretenberg/stdlib/special_public_inputs/special_public_inputs.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/special_public_inputs/special_public_inputs.test.cpp index 8541f67e6df3..0c489ead75d9 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/special_public_inputs/special_public_inputs.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/special_public_inputs/special_public_inputs.test.cpp @@ -21,16 +21,14 @@ TEST_F(SpecialPublicInputsTests, Basic) using G1Native = Curve::GroupNative::affine_element; using FFNative = Curve::ScalarFieldNative; - static constexpr size_t NUM_WIRES = Builder::NUM_WIRES; - G1Native P0_val = G1Native::random_element(); G1Native P1_val = G1Native::random_element(); G1Native kernel_return_data_val = G1Native::random_element(); - G1Native app_return_data_val = G1Native::random_element(); - std::array ecc_op_tables_val; - for (auto& commitment : ecc_op_tables_val) { - commitment = G1Native::random_element(); + std::array app_return_data_val; + for (auto& value : app_return_data_val) { + value = G1Native::random_element(); } + FFNative ecc_op_hash_val = FFNative::random_element(); FFNative output_hn_accum_hash_val = FFNative::random_element(); // Store the public inputs of the first circuit to be used by the second @@ -45,10 +43,10 @@ TEST_F(SpecialPublicInputsTests, Basic) PairingInputs pairing_inputs{ G1::from_witness(&builder, P0_val), G1::from_witness(&builder, P1_val) }; kernel_output.pairing_inputs = pairing_inputs; kernel_output.kernel_return_data = G1::from_witness(&builder, kernel_return_data_val); - kernel_output.app_return_data = G1::from_witness(&builder, app_return_data_val); - for (auto [table_commitment, table_val] : zip_view(kernel_output.ecc_op_tables, ecc_op_tables_val)) { - table_commitment = G1::from_witness(&builder, table_val); + for (size_t idx = 0; idx < MAX_APPS_PER_KERNEL; ++idx) { + kernel_output.app_return_data[idx] = G1::from_witness(&builder, app_return_data_val[idx]); } + kernel_output.ecc_op_hash = FF::from_witness(&builder, ecc_op_hash_val); kernel_output.output_hn_accum_hash = FF::from_witness(&builder, output_hn_accum_hash_val); // Propagate the kernel output via the public inputs @@ -77,10 +75,10 @@ TEST_F(SpecialPublicInputsTests, Basic) EXPECT_EQ(kernel_input.pairing_inputs.P0().get_value(), P0_val); EXPECT_EQ(kernel_input.pairing_inputs.P1().get_value(), P1_val); EXPECT_EQ(kernel_input.kernel_return_data.get_value(), kernel_return_data_val); - EXPECT_EQ(kernel_input.app_return_data.get_value(), app_return_data_val); - for (auto [reconstructed_commitment, commitment] : zip_view(kernel_input.ecc_op_tables, ecc_op_tables_val)) { - EXPECT_EQ(reconstructed_commitment.get_value(), commitment); + for (size_t idx = 0; idx < MAX_APPS_PER_KERNEL; ++idx) { + EXPECT_EQ(kernel_input.app_return_data[idx].get_value(), app_return_data_val[idx]); } + EXPECT_EQ(kernel_input.ecc_op_hash.get_value(), ecc_op_hash_val); EXPECT_EQ(kernel_input.output_hn_accum_hash.get_value(), output_hn_accum_hash_val); } } diff --git a/barretenberg/cpp/src/barretenberg/stdlib/special_public_inputs/special_public_inputs_test_serde.hpp b/barretenberg/cpp/src/barretenberg/stdlib/special_public_inputs/special_public_inputs_test_serde.hpp index b0046fb376b1..c1e2c3bcbab7 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/special_public_inputs/special_public_inputs_test_serde.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/special_public_inputs/special_public_inputs_test_serde.hpp @@ -12,22 +12,22 @@ namespace bb::stdlib::recursion::honk { /** * @brief **For test purposes only**: Native representation and serde for KernelIO public inputs * @details Used for testing and verification with native bb::fr vectors. - * Mirrors the structure of stdlib KernelIO but works with native types. + * Mirrors the structure of stdlib KernelIO_ but works with native types. */ -class KernelIOSerde { +template class KernelIOSerde_ { public: using NativeFF = bb::fr; using NativeG1 = curve::BN254::AffineElement; using NativeFq = curve::BN254::BaseField; using NativePairingPoints = bb::PairingPoints; - using NativeTableCommitments = std::array; + using NativeAppReturnDataCommitments = std::array; - static constexpr size_t PUBLIC_INPUTS_SIZE = KERNEL_PUBLIC_INPUTS_SIZE; + static constexpr size_t PUBLIC_INPUTS_SIZE = kernel_public_inputs_size(N); NativePairingPoints pairing_inputs; NativeG1 kernel_return_data; - NativeG1 app_return_data; - NativeTableCommitments ecc_op_tables; + NativeAppReturnDataCommitments app_return_data; + NativeFF ecc_op_hash; NativeFF output_hn_accum_hash; /** @@ -37,9 +37,9 @@ class KernelIOSerde { * @details KernelIO is at the END of the public inputs section, so we start at * offset (num_public_inputs - PUBLIC_INPUTS_SIZE) */ - static KernelIOSerde from_proof(const std::vector& proof, size_t num_public_inputs) + static KernelIOSerde_ from_proof(const std::vector& proof, size_t num_public_inputs) { - KernelIOSerde result; + KernelIOSerde_ result; // KernelIO is at the end of public inputs, which are at the start of the proof size_t idx = num_public_inputs - PUBLIC_INPUTS_SIZE; @@ -54,10 +54,10 @@ class KernelIOSerde { result.pairing_inputs.P0() = deserialize_point(); result.pairing_inputs.P1() = deserialize_point(); result.kernel_return_data = deserialize_point(); - result.app_return_data = deserialize_point(); - for (auto& commitment : result.ecc_op_tables) { - commitment = deserialize_point(); + for (auto& app_commitment : result.app_return_data) { + app_commitment = deserialize_point(); } + result.ecc_op_hash = proof[idx++]; result.output_hn_accum_hash = proof[idx]; return result; @@ -96,14 +96,16 @@ class KernelIOSerde { serialize_point(pairing_inputs.P0()); serialize_point(pairing_inputs.P1()); serialize_point(kernel_return_data); - serialize_point(app_return_data); - for (const auto& commitment : ecc_op_tables) { - serialize_point(commitment); + for (const auto& app_commitment : app_return_data) { + serialize_point(app_commitment); } + proof[idx++] = ecc_op_hash; proof[idx] = output_hn_accum_hash; } }; +using KernelIOSerde = KernelIOSerde_; + /** * @brief Native representation and serde for HidingKernelIO public inputs * @details Used for testing and verification with native bb::fr vectors. diff --git a/barretenberg/cpp/src/barretenberg/stdlib/translator_vm_verifier/translator_recursive_verifier.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/translator_vm_verifier/translator_recursive_verifier.test.cpp index 78d2392263ce..b652b40ff24c 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/translator_vm_verifier/translator_recursive_verifier.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/translator_vm_verifier/translator_recursive_verifier.test.cpp @@ -73,13 +73,11 @@ class TranslatorRecursiveTests : public ::testing::Test { // Add the same operations to the ECC op queue; the native computation is performed under the hood. auto op_queue = std::make_shared(); - op_queue->no_op_ultra_only(); - add_random_ops(op_queue, InnerBuilder::NUM_RANDOM_OPS_START); - add_mixed_ops(op_queue, circuit_size_parameter / 2); - op_queue->merge(); + // Construct zk columns + op_queue->construct_zk_columns(); add_mixed_ops(op_queue, circuit_size_parameter / 2); add_random_ops(op_queue, InnerBuilder::NUM_RANDOM_OPS_END); - op_queue->merge(MergeSettings::APPEND, op_queue->get_append_offset()); + op_queue->merge_fixed_append(op_queue->get_append_offset()); return InnerBuilder{ batching_challenge_v, evaluation_challenge_x, op_queue }; } diff --git a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/README.md b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/README.md index 6c22d9f0d223..0f5c66171664 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/README.md +++ b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/README.md @@ -79,7 +79,11 @@ builder.assert_equal(idx_a, idx_b); | `q_memory` | RAM/ROM memory operations | | `q_lookup` | Lookup gate | | `q_poseidon2_external` | Poseidon2 external rounds | -| `q_poseidon2_internal` | Poseidon2 internal rounds | +| `q_poseidon2_internal` | Poseidon2 internal rounds (Ultra) | +| `q_poseidon2_external_initial` | Poseidon2 initial external linear layer (Mega) | +| `q_poseidon2_transition_entry` | Poseidon2 standard-to-quad boundary (Mega) | +| `q_poseidon2_quad_internal` | Poseidon2 K=4 internal rows (Mega) | +| `q_poseidon2_quad_internal_terminal` | Poseidon2 quad-to-standard boundary (Mega) | | `q_busread` | Databus read (Mega only) | Note: no selector exists for gates representing deferred ECC operations in the Mega builder. However, there is a selector in the Mega circuit itself for deferred ECC ops: `lagrange_ecc_op`. This is not stored in the builder; the selector is populated later, in `trace_to_polynomials.cpp`, as it can be efficiently derived once the builder phase has completed. @@ -110,6 +114,10 @@ The execution trace is constructed in the form of **blocks** (`ExecutionTraceBlo **Mega blocks** (includes all Ultra blocks, plus): - `ecc_op` — Deferred ECC operations for Goblin (must be first in trace) - `busread` — Databus read operations +- `poseidon2_quad_internal` — Poseidon2 K=4 internal-round compression + +Mega does not use the Ultra `poseidon2_internal` block for Poseidon2. The canonical Poseidon2 +circuit layout and soundness argument live in `stdlib/hash/poseidon2/README.md`. Within a given block, the corresponding gate selector is not always non-zero (hence why we have to track its values at all). This is because many gates make use of a shift mechanism that allow the constraint at row `i` to incorporate wire values at row `i+1`. In this case, row `i+1` may or may not be otherwise constrained, i.e. the gate selector at row `i+1` may take value 0. @@ -156,11 +164,11 @@ Beyond basic arithmetic, specialized gates provide efficient constraints for var | **Arithmetic** | `ArithmeticRelation` | Width-4 arithmetic with extended modes (see above) | | **Delta Range** | `DeltaRangeConstraintRelation` | Efficient range constraints | | **Elliptic** | `EllipticRelation` | EC point addition/doubling on Grumpkin | -| **Poseidon2** | `Poseidon2External/InternalRelation` | Optimized hash function rounds | +| **Poseidon2** | `Poseidon2ExternalRelation`, `Poseidon2InternalRelation` (Ultra), `Poseidon2InitialExternalRelation`, `Poseidon2QuadInternalRelation` (Mega) | Optimized hash function rounds | | **Lookup** | `LogDerivLookupRelation` | Table-based lookups (plookup) | | **Memory** | `MemoryRelation` | ROM reads, RAM read/write consistency | | **Non-native field** | `NonNativeFieldRelation` | Arithmetic over non-native fields via limb decomposition | -| **Databus** (Mega) | `DatabusLookupRelation` | Reads from calldata/returndata vectors | +| **Databus** (Mega) | `DatabusLookupRelation` | Reads from kernel/app calldata and return data vectors | | **ECC Op Queue** (Mega) | `EccOpQueueRelation` | Deferred ECC operations for Goblin | ## Public Inputs @@ -196,9 +204,9 @@ Mega is used in the context of client-side proving of Aztec transactions and for For large amounts of public data shared between multiple circuits (common in Aztec transactions), MegaCircuitBuilder provides the **DataBus**—a more efficient mechanism where prover cost scales with the number of *reads* rather than total data size. -Mega supports lookup-style reads on three bus vectors: -- `calldata`: Primary input data -- `secondary_calldata`: Additional input data +Mega supports lookup-style reads on five bus vectors: +- `kernel_calldata`: Input from the previous kernel's return data +- `app_calldata[0..2]`: Inputs from up to three app circuits' return data - `returndata`: Output data See `databus.hpp` in this directory for implementation details. diff --git a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/databus.hpp b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/databus.hpp index 07173d8d7386..9a9bbd4d8821 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/databus.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/databus.hpp @@ -7,16 +7,12 @@ #pragma once #include "barretenberg/common/assert.hpp" +#include "barretenberg/constants.hpp" #include "barretenberg/ecc/curves/bn254/fr.hpp" #include "barretenberg/public_input_component/public_component_key.hpp" #include namespace bb { -// We assume all kernels have space for two return data commitments on their public inputs -constexpr uint32_t NUM_DATABUS_COMMITMENTS = 2; -constexpr uint32_t PROPAGATED_DATABUS_COMMITMENT_SIZE = 8; -constexpr uint32_t PROPAGATED_DATABUS_COMMITMENTS_SIZE = PROPAGATED_DATABUS_COMMITMENT_SIZE * NUM_DATABUS_COMMITMENTS; - /** * @brief A DataBus column * @@ -73,9 +69,14 @@ struct BusVector { * in-circuit as we would with public inputs). * */ -constexpr size_t NUM_BUS_COLUMNS = 3; +constexpr size_t NUM_BUS_COLUMNS = MAX_APPS_PER_KERNEL + /*kernel calldata*/ 1 + /*kernel returndata*/ 1; using DataBus = std::array; -enum class BusId { CALLDATA, SECONDARY_CALLDATA, RETURNDATA }; +enum class BusId : uint8_t { + KERNEL_CALLDATA = 0, + APP_CALLDATA = 1, + RETURNDATA = MAX_APPS_PER_KERNEL + 1, +}; +static_assert(static_cast(BusId::RETURNDATA) == NUM_BUS_COLUMNS - 1, "BusId enum must match DataBus layout"); } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/mega_circuit_builder.cpp b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/mega_circuit_builder.cpp index 16e46cb11eb2..f47c9d340f0c 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/mega_circuit_builder.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/mega_circuit_builder.cpp @@ -245,10 +245,94 @@ template void MegaCircuitBuilder_::apply_databus_selectors(con block.q_1().emplace_back(idx == 0 ? 1 : 0); block.q_2().emplace_back(idx == 1 ? 1 : 0); block.q_3().emplace_back(idx == 2 ? 1 : 0); + block.q_4().emplace_back(idx == 3 ? 1 : 0); + block.q_5().emplace_back(0); + block.q_m().emplace_back(idx == 4 ? 1 : 0); + block.q_c().emplace_back(0); + block.set_gate_selector(1); +} + +/** + * @brief Poseidon2 initial linear layer gate, activates the q_poseidon2_external_initial selector and relation. + * @details Constrains the whole initial linear layer with a bespoke row. + */ +template +void MegaCircuitBuilder_::create_poseidon2_initial_external_gate(const poseidon2_initial_external_gate_& in) +{ + auto& block = this->blocks.poseidon2_external; + block.populate_wires(in.a, in.b, in.c, in.d); + block.q_m().emplace_back(0); + block.q_1().emplace_back(0); + block.q_2().emplace_back(0); + block.q_3().emplace_back(0); + block.q_c().emplace_back(0); block.q_4().emplace_back(0); + block.q_5().emplace_back(0); + block.set_initial_gate_selector(1); + this->check_selector_length_consistency(); + this->increment_num_gates(); +} + +/** + * @brief Poseidon2 K=4 compressed internal-round gate: processes FOUR consecutive internal rounds per row. + * @details + * Wires: a, b, c, d = state[0] at rounds 4i+0, 4i+1, 4i+2, 4i+3 + * Selectors: q_1, q_2, q_3, q_4 = c_{4i}, c_{4i+1}, c_{4i+2}, c_{4i+3} (this quad's 4 constants) + * q_m, q_c, q_5 = c_{4(i+1)}, c_{4(i+1)+1}, c_{4(i+1)+2} (next quad's first 3, for + * the shifted Vandermonde check) + * Terminal rows use q_poseidon2_quad_internal_terminal and set q_m, q_c, q_5 = 0 (no next quad). + */ +template +void MegaCircuitBuilder_::create_poseidon2_quad_internal_gate(const poseidon2_quad_internal_gate_& in) +{ + auto& block = this->blocks.poseidon2_quad_internal; + block.populate_wires(in.a, in.b, in.c, in.d); + const auto& rc = crypto::Poseidon2Bn254ScalarFieldParams::round_constants; + block.q_1().emplace_back(rc[in.round_idx_start + 0][0]); + block.q_2().emplace_back(rc[in.round_idx_start + 1][0]); + block.q_3().emplace_back(rc[in.round_idx_start + 2][0]); + block.q_4().emplace_back(rc[in.round_idx_start + 3][0]); + if (in.is_terminal) { + block.q_m().emplace_back(0); + block.q_c().emplace_back(0); + block.q_5().emplace_back(0); + block.set_terminal_gate_selector(1); + } else { + block.q_m().emplace_back(rc[in.next_pair_start + 0][0]); + block.q_c().emplace_back(rc[in.next_pair_start + 1][0]); + block.q_5().emplace_back(rc[in.next_pair_start + 2][0]); + block.set_gate_selector(1); + } + this->check_selector_length_consistency(); + this->increment_num_gates(); +} + +/** + * @brief Poseidon2 transition-entry gate: standard → K=4 compressed encoding boundary. + * @details Placed immediately before the first compressed row. + * Wires: a, b, c, d = (s_0, s_1, s_2, s_3) at round `round_idx_start` (standard encoding) + * Selectors: q_1, q_2, q_3 = c_{start}, c_{start+1}, c_{start+2} + * q_4, q_m, q_c, q_5 = 0 (unused) + * + * Enforces the successor's (w_r_shift, w_o_shift, w_4_shift) equal state[0] at rounds + * `start+1, start+2, start+3` respectively, via 3 degree-7 subrelations. + */ +template +void MegaCircuitBuilder_::create_poseidon2_transition_entry_gate(const poseidon2_transition_entry_gate_& in) +{ + auto& block = this->blocks.poseidon2_quad_internal; + block.populate_wires(in.a, in.b, in.c, in.d); + const auto& rc = crypto::Poseidon2Bn254ScalarFieldParams::round_constants; block.q_m().emplace_back(0); + block.q_1().emplace_back(rc[in.round_idx_start + 0][0]); + block.q_2().emplace_back(rc[in.round_idx_start + 1][0]); + block.q_3().emplace_back(rc[in.round_idx_start + 2][0]); + block.q_4().emplace_back(0); + block.q_5().emplace_back(0); block.q_c().emplace_back(0); - block.set_gate_selector(1); + block.set_entry_gate_selector(1); + this->check_selector_length_consistency(); + this->increment_num_gates(); } template class MegaCircuitBuilder_; diff --git a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/mega_circuit_builder.hpp b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/mega_circuit_builder.hpp index 16c5c4dbbfb4..7c8eb2271310 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/mega_circuit_builder.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/mega_circuit_builder.hpp @@ -122,23 +122,17 @@ template class MegaCircuitBuilder_ : public UltraCircuitBuilder_& in); + void create_poseidon2_quad_internal_gate(const poseidon2_quad_internal_gate_& in); + void create_poseidon2_transition_entry_gate(const poseidon2_transition_entry_gate_& in); - /** - * @brief Add a witness variable to the public calldata. - * - */ - void add_public_calldata(const uint32_t& in) { return append_to_bus_vector(BusId::CALLDATA, in); } + size_t get_num_constant_gates() const override { return 0; } /** - * @brief Add a witness variable to secondary_calldata. - * @details In practice this is used in aztec by the kernel circuit to recieve output from a function circuit + * @brief Add a witness variable to the specified calldata bus. * */ - void add_public_secondary_calldata(const uint32_t& in) - { - return append_to_bus_vector(BusId::SECONDARY_CALLDATA, in); - } + void add_public_calldata(BusId bus_idx, const uint32_t& in) { return append_to_bus_vector(bus_idx, in); } /** * @brief Add a witness variable to the public return_data. @@ -149,25 +143,12 @@ template class MegaCircuitBuilder_ : public UltraCircuitBuilder_ class MegaCircuitBuilder_ : public UltraCircuitBuilder_(bus_idx)].append(witness_idx); } - const BusVector& get_calldata() const { return databus[static_cast(BusId::CALLDATA)]; } - const BusVector& get_secondary_calldata() const { return databus[static_cast(BusId::SECONDARY_CALLDATA)]; } + const BusVector& get_calldata(BusId idx) const { return databus[static_cast(idx)]; } const BusVector& get_return_data() const { return databus[static_cast(BusId::RETURNDATA)]; } // Indexed access to the databus columns; enables NUM_BUS_COLUMNS-driven iteration over bus vectors. const BusVector& get_bus_vector(size_t bus_idx) const { return databus[bus_idx]; } diff --git a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/ultra_circuit_builder.cpp b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/ultra_circuit_builder.cpp index 7afb7a7537b0..a123531888de 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/ultra_circuit_builder.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/ultra_circuit_builder.cpp @@ -128,6 +128,7 @@ void UltraCircuitBuilder_::create_big_mul_add_gate(const mul_qua blocks.arithmetic.q_3().emplace_back(in.c_scaling); blocks.arithmetic.q_c().emplace_back(in.const_scaling); blocks.arithmetic.q_4().emplace_back(in.d_scaling); + blocks.arithmetic.q_5().emplace_back(0); blocks.arithmetic.set_gate_selector(include_next_gate_w_4 ? 2 : 1); check_selector_length_consistency(); this->increment_num_gates(); @@ -153,6 +154,7 @@ void UltraCircuitBuilder_::create_big_add_gate(const add_quad_increment_num_gates(); @@ -175,6 +177,7 @@ void UltraCircuitBuilder_::create_bool_gate(const uint32_t varia blocks.arithmetic.q_3().emplace_back(0); blocks.arithmetic.q_c().emplace_back(0); blocks.arithmetic.q_4().emplace_back(0); + blocks.arithmetic.q_5().emplace_back(0); blocks.arithmetic.set_gate_selector(1); check_selector_length_consistency(); this->increment_num_gates(); @@ -198,6 +201,7 @@ void UltraCircuitBuilder_::create_arithmetic_gate(const arithmet blocks.arithmetic.q_3().emplace_back(in.q_o); blocks.arithmetic.q_c().emplace_back(in.q_c); blocks.arithmetic.q_4().emplace_back(0); + blocks.arithmetic.q_5().emplace_back(0); blocks.arithmetic.set_gate_selector(1); check_selector_length_consistency(); this->increment_num_gates(); @@ -243,6 +247,7 @@ void UltraCircuitBuilder_::create_ecc_add_gate(const ecc_add_gat block.populate_wires(this->zero_idx(), in.x1, in.y1, this->zero_idx()); block.q_3().emplace_back(0); block.q_4().emplace_back(0); + block.q_5().emplace_back(0); block.q_1().emplace_back(q_sign); block.q_2().emplace_back(0); @@ -297,6 +302,7 @@ void UltraCircuitBuilder_::create_ecc_dbl_gate(const ecc_dbl_gat block.q_3().emplace_back(0); block.q_c().emplace_back(0); block.q_4().emplace_back(0); + block.q_5().emplace_back(0); block.set_gate_selector(1); check_selector_length_consistency(); this->increment_num_gates(); @@ -326,6 +332,7 @@ void UltraCircuitBuilder_::fix_witness(const uint32_t witness_in blocks.arithmetic.q_3().emplace_back(0); blocks.arithmetic.q_c().emplace_back(-witness_value); blocks.arithmetic.q_4().emplace_back(0); + blocks.arithmetic.q_5().emplace_back(0); blocks.arithmetic.set_gate_selector(1); check_selector_length_consistency(); this->increment_num_gates(); @@ -397,6 +404,7 @@ void UltraCircuitBuilder_::create_lookup_gate(const uint32_t key blocks.lookup.q_c().emplace_back(column_3_step_size); blocks.lookup.q_1().emplace_back(0); blocks.lookup.q_4().emplace_back(0); + blocks.lookup.q_5().emplace_back(0); check_selector_length_consistency(); this->increment_num_gates(); @@ -778,6 +786,7 @@ void UltraCircuitBuilder_::enforce_small_deltas(const std::vecto blocks.delta_range.q_3().emplace_back(0); blocks.delta_range.q_c().emplace_back(0); blocks.delta_range.q_4().emplace_back(0); + blocks.delta_range.q_5().emplace_back(0); blocks.delta_range.set_gate_selector(1); check_selector_length_consistency(); } @@ -838,6 +847,7 @@ void UltraCircuitBuilder_::create_sort_constraint_with_edges( block.q_3().emplace_back(0); block.q_c().emplace_back(0); block.q_4().emplace_back(0); + block.q_5().emplace_back(0); block.set_gate_selector(1); check_selector_length_consistency(); } @@ -888,6 +898,7 @@ void UltraCircuitBuilder_::apply_memory_selectors(const MEMORY_S block.q_2().emplace_back(1); block.q_3().emplace_back(0); block.q_4().emplace_back(0); + block.q_5().emplace_back(0); block.q_m().emplace_back(0); block.q_c().emplace_back(0); check_selector_length_consistency(); @@ -903,6 +914,7 @@ void UltraCircuitBuilder_::apply_memory_selectors(const MEMORY_S block.q_2().emplace_back(0); block.q_3().emplace_back(1); block.q_4().emplace_back(0); + block.q_5().emplace_back(0); block.q_m().emplace_back(0); block.q_c().emplace_back(0); check_selector_length_consistency(); @@ -915,6 +927,7 @@ void UltraCircuitBuilder_::apply_memory_selectors(const MEMORY_S block.q_2().emplace_back(0); block.q_3().emplace_back(0); block.q_4().emplace_back(1); + block.q_5().emplace_back(0); block.q_m().emplace_back(0); block.q_c().emplace_back(0); check_selector_length_consistency(); @@ -928,6 +941,7 @@ void UltraCircuitBuilder_::apply_memory_selectors(const MEMORY_S block.q_2().emplace_back(0); block.q_3().emplace_back(0); block.q_4().emplace_back(0); + block.q_5().emplace_back(0); block.q_m().emplace_back(1); // validate record witness is correctly computed block.q_c().emplace_back(0); // read/write flag stored in q_c check_selector_length_consistency(); @@ -941,6 +955,7 @@ void UltraCircuitBuilder_::apply_memory_selectors(const MEMORY_S block.q_2().emplace_back(0); block.q_3().emplace_back(0); block.q_4().emplace_back(0); + block.q_5().emplace_back(0); block.q_m().emplace_back(1); // validate record witness is correctly computed block.q_c().emplace_back(0); // read/write flag stored in q_c check_selector_length_consistency(); @@ -954,6 +969,7 @@ void UltraCircuitBuilder_::apply_memory_selectors(const MEMORY_S block.q_2().emplace_back(0); block.q_3().emplace_back(0); block.q_4().emplace_back(0); + block.q_5().emplace_back(0); block.q_m().emplace_back(1); // validate record witness is correctly computed block.q_c().emplace_back(1); // read/write flag stored in q_c check_selector_length_consistency(); @@ -964,6 +980,7 @@ void UltraCircuitBuilder_::apply_memory_selectors(const MEMORY_S block.q_2().emplace_back(0); block.q_3().emplace_back(0); block.q_4().emplace_back(0); + block.q_5().emplace_back(0); block.q_m().emplace_back(0); block.q_c().emplace_back(0); check_selector_length_consistency(); @@ -1006,6 +1023,7 @@ void UltraCircuitBuilder_::apply_nnf_selectors(const NNF_SELECTO block.q_2().emplace_back(0); block.q_3().emplace_back(1); block.q_4().emplace_back(1); + block.q_5().emplace_back(0); block.q_m().emplace_back(0); block.q_c().emplace_back(0); check_selector_length_consistency(); @@ -1016,6 +1034,7 @@ void UltraCircuitBuilder_::apply_nnf_selectors(const NNF_SELECTO block.q_2().emplace_back(0); block.q_3().emplace_back(1); block.q_4().emplace_back(0); + block.q_5().emplace_back(0); block.q_m().emplace_back(1); block.q_c().emplace_back(0); check_selector_length_consistency(); @@ -1026,6 +1045,7 @@ void UltraCircuitBuilder_::apply_nnf_selectors(const NNF_SELECTO block.q_2().emplace_back(1); block.q_3().emplace_back(1); block.q_4().emplace_back(0); + block.q_5().emplace_back(0); block.q_m().emplace_back(0); block.q_c().emplace_back(0); check_selector_length_consistency(); @@ -1036,6 +1056,7 @@ void UltraCircuitBuilder_::apply_nnf_selectors(const NNF_SELECTO block.q_2().emplace_back(1); block.q_3().emplace_back(0); block.q_4().emplace_back(1); + block.q_5().emplace_back(0); block.q_m().emplace_back(0); block.q_c().emplace_back(0); check_selector_length_consistency(); @@ -1046,6 +1067,7 @@ void UltraCircuitBuilder_::apply_nnf_selectors(const NNF_SELECTO block.q_2().emplace_back(1); block.q_3().emplace_back(0); block.q_4().emplace_back(0); + block.q_5().emplace_back(0); block.q_m().emplace_back(1); block.q_c().emplace_back(0); check_selector_length_consistency(); @@ -1056,6 +1078,7 @@ void UltraCircuitBuilder_::apply_nnf_selectors(const NNF_SELECTO block.q_2().emplace_back(0); block.q_3().emplace_back(0); block.q_4().emplace_back(0); + block.q_5().emplace_back(0); block.q_m().emplace_back(0); block.q_c().emplace_back(0); check_selector_length_consistency(); @@ -1502,6 +1525,7 @@ std::array UltraCircuitBuilder_::evaluate_non_nativ block.q_2().emplace_back(-x_mulconst0 * linear_term_scale_factor); block.q_3().emplace_back(-y_mulconst0 * linear_term_scale_factor); block.q_4().emplace_back(0); + block.q_5().emplace_back(0); block.q_c().emplace_back(-addconst0 * linear_term_scale_factor); block.set_gate_selector(3); @@ -1510,6 +1534,7 @@ std::array UltraCircuitBuilder_::evaluate_non_nativ block.q_2().emplace_back(-x_mulconst1); block.q_3().emplace_back(-y_mulconst1); block.q_4().emplace_back(0); + block.q_5().emplace_back(0); block.q_c().emplace_back(-addconst1); block.set_gate_selector(2); @@ -1518,6 +1543,7 @@ std::array UltraCircuitBuilder_::evaluate_non_nativ block.q_2().emplace_back(-y_mulconst2); block.q_3().emplace_back(1); block.q_4().emplace_back(0); + block.q_5().emplace_back(0); block.q_c().emplace_back(-addconst2); block.set_gate_selector(1); @@ -1526,6 +1552,7 @@ std::array UltraCircuitBuilder_::evaluate_non_nativ block.q_2().emplace_back(-y_mulconst3); block.q_3().emplace_back(1); block.q_4().emplace_back(0); + block.q_5().emplace_back(0); block.q_c().emplace_back(-addconst3); block.set_gate_selector(1); @@ -1627,6 +1654,7 @@ std::array UltraCircuitBuilder_::evaluate_non_nativ block.q_2().emplace_back(-x_mulconst0 * linear_term_scale_factor); block.q_3().emplace_back(y_mulconst0 * linear_term_scale_factor); block.q_4().emplace_back(0); + block.q_5().emplace_back(0); block.q_c().emplace_back(-addconst0 * linear_term_scale_factor); block.set_gate_selector(3); @@ -1635,6 +1663,7 @@ std::array UltraCircuitBuilder_::evaluate_non_nativ block.q_2().emplace_back(-x_mulconst1); block.q_3().emplace_back(y_mulconst1); block.q_4().emplace_back(0); + block.q_5().emplace_back(0); block.q_c().emplace_back(-addconst1); block.set_gate_selector(2); @@ -1643,6 +1672,7 @@ std::array UltraCircuitBuilder_::evaluate_non_nativ block.q_2().emplace_back(y_mulconst2); block.q_3().emplace_back(1); block.q_4().emplace_back(0); + block.q_5().emplace_back(0); block.q_c().emplace_back(-addconst2); block.set_gate_selector(1); @@ -1651,6 +1681,7 @@ std::array UltraCircuitBuilder_::evaluate_non_nativ block.q_2().emplace_back(y_mulconst3); block.q_3().emplace_back(1); block.q_4().emplace_back(0); + block.q_5().emplace_back(0); block.q_c().emplace_back(-addconst3); block.set_gate_selector(1); @@ -1800,28 +1831,35 @@ void UltraCircuitBuilder_::create_poseidon2_external_gate(const poseidon2_ex block.q_3().emplace_back(crypto::Poseidon2Bn254ScalarFieldParams::round_constants[in.round_idx][2]); block.q_c().emplace_back(0); block.q_4().emplace_back(crypto::Poseidon2Bn254ScalarFieldParams::round_constants[in.round_idx][3]); + block.q_5().emplace_back(0); block.set_gate_selector(1); this->check_selector_length_consistency(); this->increment_num_gates(); } /** - * @brief Poseidon2 internal round gate, activates the q_poseidon2_internal selector and relation + * @brief Poseidon2 internal round gate, activates the q_poseidon2_internal selector and relation. + * Ultra-only: Mega covers all internal rounds via the compressed quad-internal block. */ template void UltraCircuitBuilder_::create_poseidon2_internal_gate(const poseidon2_internal_gate_& in) { - auto& block = this->blocks.poseidon2_internal; - block.populate_wires(in.a, in.b, in.c, in.d); - block.q_m().emplace_back(0); - block.q_1().emplace_back(crypto::Poseidon2Bn254ScalarFieldParams::round_constants[in.round_idx][0]); - block.q_2().emplace_back(0); - block.q_3().emplace_back(0); - block.q_c().emplace_back(0); - block.q_4().emplace_back(0); - block.set_gate_selector(1); - this->check_selector_length_consistency(); - this->increment_num_gates(); + if constexpr (requires { this->blocks.poseidon2_internal; }) { + auto& block = this->blocks.poseidon2_internal; + block.populate_wires(in.a, in.b, in.c, in.d); + block.q_m().emplace_back(0); + block.q_1().emplace_back(crypto::Poseidon2Bn254ScalarFieldParams::round_constants[in.round_idx][0]); + block.q_2().emplace_back(0); + block.q_3().emplace_back(0); + block.q_c().emplace_back(0); + block.q_4().emplace_back(0); + block.q_5().emplace_back(0); + block.set_gate_selector(1); + this->check_selector_length_consistency(); + this->increment_num_gates(); + } else { + throw_or_abort("create_poseidon2_internal_gate is Ultra-only (Mega uses the compressed block)"); + } } /** diff --git a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/ultra_circuit_builder.hpp b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/ultra_circuit_builder.hpp index 92bf603ef8c8..999d1b4a742b 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/ultra_circuit_builder.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/ultra_circuit_builder.hpp @@ -488,6 +488,7 @@ class UltraCircuitBuilder_ : public CircuitBuilderBase namespace bb { @@ -874,7 +875,11 @@ template class SumcheckVerifier { // For other flavors, we perform the sumcheck univariate consistency check bool verified = true; - ClaimedEvaluations purported_evaluations; + // Heap-allocate ClaimedEvaluations (AllValues) to keep the sumcheck-verify stack frame small. + // For recursive flavors with many columns (e.g. AVM), holding this inline on the stack can exceed the 8 MB + // stack limit once nested inside the inner-Mega AVM recursive verifier chain + auto purported_evaluations_storage = std::make_unique(); + ClaimedEvaluations& purported_evaluations = *purported_evaluations_storage; for (size_t round_idx = 0; round_idx < virtual_log_n; round_idx++) { round.process_round(transcript, multivariate_challenge, gate_separators, round_idx); verified = verified && !round.round_failed; @@ -895,15 +900,16 @@ template class SumcheckVerifier { if constexpr (IsTranslatorFlavor) { // Translator path: receive full-circuit evaluations, set them, and complete // (computable precomputed selectors + L_0 scaling of minicircuit wires already placed above) - auto get_full_circuit_evaluations = + auto get_full_circuit_evaluations = std::make_unique>( transcript->template receive_from_prover>( - "Sumcheck:evaluations"); + "Sumcheck:evaluations")); Flavor::complete_full_circuit_evaluations( - purported_evaluations, get_full_circuit_evaluations, std::span(multivariate_challenge)); + purported_evaluations, *get_full_circuit_evaluations, std::span(multivariate_challenge)); } else { - auto transcript_evaluations = - transcript->template receive_from_prover>("Sumcheck:evaluations"); - for (auto [eval, transcript_eval] : zip_view(purported_evaluations.get_all(), transcript_evaluations)) { + // Heap-allocate transcript_evaluations for the same reason as purported_evaluations above. + auto transcript_evaluations = std::make_unique>( + transcript->template receive_from_prover>("Sumcheck:evaluations")); + for (auto [eval, transcript_eval] : zip_view(purported_evaluations.get_all(), *transcript_evaluations)) { eval = transcript_eval; } } @@ -932,7 +938,7 @@ template class SumcheckVerifier { // For ZK Flavors: the evaluations of Libra univariates are included in the Sumcheck Output return SumcheckOutput{ .challenge = multivariate_challenge, - .claimed_evaluations = purported_evaluations, + .claimed_evaluations = std::move(purported_evaluations), .verified = verified, .claimed_libra_evaluation = zk_correction_handler.get_libra_evaluation(), .round_univariate_commitments = round.get_round_univariate_commitments(), diff --git a/barretenberg/cpp/src/barretenberg/trace_to_polynomials/trace_to_polynomials.cpp b/barretenberg/cpp/src/barretenberg/trace_to_polynomials/trace_to_polynomials.cpp index 85ba07682b47..98e86abc86b1 100644 --- a/barretenberg/cpp/src/barretenberg/trace_to_polynomials/trace_to_polynomials.cpp +++ b/barretenberg/cpp/src/barretenberg/trace_to_polynomials/trace_to_polynomials.cpp @@ -6,6 +6,7 @@ #include "trace_to_polynomials.hpp" #include "barretenberg/common/assert.hpp" +#include "barretenberg/common/thread.hpp" #include "barretenberg/constants.hpp" #include "barretenberg/ext/starknet/flavor/ultra_starknet_flavor.hpp" #include "barretenberg/ext/starknet/flavor/ultra_starknet_zk_flavor.hpp" @@ -52,45 +53,90 @@ std::vector TraceToPolynomials::populate_wires_and_se RefArray wires = polynomials.get_wires(); auto selectors = polynomials.get_selectors(); - // For each block in the trace, populate wire polys, copy cycles and selector polys - for (auto& block : builder.blocks.get()) { - const uint32_t offset = block.trace_offset(); - const uint32_t block_size = static_cast(block.size()); + // Two-phase parallelisation. Phase 1 fans out over blocks to populate wires and emit copy-cycle + // nodes; phase 2 fans out over a flattened (block, selector) task list to fill selectors. + auto blocks_array = builder.blocks.get(); + const size_t num_blocks = blocks_array.size(); - // Update wire polynomials and copy cycles - // NB: The order of row/column loops is arbitrary but needs to be row/column to match old copy_cycle code - { - BB_BENCH_NAME("populating wires and copy_cycles"); + // Pre-pass: count copy-cycle sizes per real-variable index so each copy_cycles[i] can be + // reserve()d once before the serial concat in phase 1.5, avoiding repeated reallocations. + { + BB_BENCH_NAME("counting copy_cycles"); + std::vector cycle_counts(builder.real_variable_index.size(), 0); + for (auto& block : blocks_array) { + const uint32_t block_size = static_cast(block.size()); + for (uint32_t block_row_idx = 0; block_row_idx < block_size; ++block_row_idx) { + for (uint32_t wire_idx = 0; wire_idx < NUM_WIRES; ++wire_idx) { + uint32_t var_idx = block.wires[wire_idx][block_row_idx]; + // var_idx may be untrusted (e.g. from ACIR) so use .at() to catch OOB. This validates real_var_idx + // as an in-range index for both cycle_counts and copy_cycles (same size), which is why phase 1.5 + // below can index copy_cycles[real_var_idx] without .at(). + ++cycle_counts.at(builder.real_variable_index.at(var_idx)); + } + } + } + for (size_t i = 0; i < copy_cycles.size(); ++i) { + copy_cycles[i].reserve(cycle_counts[i]); + } + } + // Phase 1: per-block parallel pass over wires and emit copy-cycle nodes. + std::vector>> per_block_nodes(num_blocks); + { + BB_BENCH_NAME("populate_wires_and_emit_cycles"); + parallel_for(num_blocks, [&](size_t block_idx) { + auto& block = blocks_array[block_idx]; + const uint32_t offset = block.trace_offset(); + const uint32_t block_size = static_cast(block.size()); + auto& local_nodes = per_block_nodes[block_idx]; + local_nodes.reserve(static_cast(block_size) * NUM_WIRES); + + // NB: The order of row/column loops is arbitrary but needs to be row/column to match old copy_cycle code. for (uint32_t block_row_idx = 0; block_row_idx < block_size; ++block_row_idx) { for (uint32_t wire_idx = 0; wire_idx < NUM_WIRES; ++wire_idx) { uint32_t var_idx = block.wires[wire_idx][block_row_idx]; // an index into the variables array - // Use .at() for bounds checking - fuzzer found OOB with malformed ACIR + // Use .at() so out-of-range var_idx is caught instead of producing a silent OOB read. uint32_t real_var_idx = builder.real_variable_index.at(var_idx); uint32_t trace_row_idx = block_row_idx + offset; // Insert the real witness values from this block into the wire polys at the correct offset wires[wire_idx].at(trace_row_idx) = builder.get_variable(var_idx); - // Add the address of the witness value to its corresponding copy cycle - // Note that the copy_cycles are indexed by real_variable_indices. - copy_cycles.at(real_var_idx).emplace_back(cycle_node{ wire_idx, trace_row_idx }); + local_nodes.emplace_back(real_var_idx, cycle_node{ wire_idx, trace_row_idx }); } } + }); + } + + // Phase 1.5: Serial concat in block order to preserve cycle-node ordering within each variable's cycle list. + { + BB_BENCH_NAME("fill_copy_cycles"); + for (const auto& block_nodes : per_block_nodes) { + for (const auto& [real_var_idx, node] : block_nodes) { + copy_cycles[real_var_idx].emplace_back(node); + } } + } - { - BB_BENCH_NAME("populating selectors"); - RefVector> block_selectors = block.get_selectors(); - // Insert the selector values for this block into the selector polynomials at the correct offset - // TODO(https://github.com/AztecProtocol/barretenberg/issues/398): implicit arithmetization/flavor - // consistency - for (size_t selector_idx = 0; selector_idx < block_selectors.size(); selector_idx++) { - auto& selector = block_selectors[selector_idx]; - for (size_t row_idx = 0; row_idx < block_size; ++row_idx) { - size_t trace_row_idx = row_idx + offset; - selectors[selector_idx].set_if_valid_index(trace_row_idx, selector[row_idx]); - } + // Phase 2: parallel selector filling across a flattened (block_idx, selector_idx) task list. + { + BB_BENCH_NAME("populate_selectors"); + std::vector> selector_tasks; + for (size_t block_idx = 0; block_idx < num_blocks; ++block_idx) { + const size_t num_selectors = blocks_array[block_idx].get_selectors().size(); + for (size_t selector_idx = 0; selector_idx < num_selectors; ++selector_idx) { + selector_tasks.emplace_back(block_idx, selector_idx); } } + parallel_for(selector_tasks.size(), [&](size_t task_idx) { + const auto [block_idx, selector_idx] = selector_tasks[task_idx]; + auto& block = blocks_array[block_idx]; + const size_t offset = block.trace_offset(); + const size_t block_size = block.size(); + RefVector> block_selectors = block.get_selectors(); + auto& selector = block_selectors[selector_idx]; + for (size_t row_idx = 0; row_idx < block_size; ++row_idx) { + selectors[selector_idx].set_if_valid_index(row_idx + offset, selector[row_idx]); + } + }); } return copy_cycles; diff --git a/barretenberg/cpp/src/barretenberg/transcript/README.md b/barretenberg/cpp/src/barretenberg/transcript/README.md index b707f9e65e10..d69d6eff24bb 100644 --- a/barretenberg/cpp/src/barretenberg/transcript/README.md +++ b/barretenberg/cpp/src/barretenberg/transcript/README.md @@ -280,9 +280,9 @@ Origin tags prevent these violations by **tainting** each value with metadata tr ```cpp struct OriginTag { - size_t transcript_index; // Which transcript instance created this value - numeric::uint256_t round_provenance; // Which protocol rounds contributed to this value - bool instant_death; // Poison flag - abort on any arithmetic + size_t transcript_index; // Which transcript instance created this value + numeric::uint512_t round_provenance; // Which protocol rounds contributed to this value + bool instant_death; // Poison flag - abort on any arithmetic }; ``` @@ -303,42 +303,42 @@ transcript_index = unique_transcript_index.fetch_add(1); #### round_provenance - Round Tracking Bitmask -A 256-bit value split into two 128-bit halves: +A 512-bit value split into two 256-bit halves: ``` -[Upper 128 bits: Challenges] [Lower 128 bits: Submitted Values] +[Upper 256 bits: Challenges] [Lower 256 bits: Submitted Values] ``` -- **Bit `i` in lower 128 bits**: This value uses data submitted in round `i` -- **Bit `i` in upper 128 bits**: This value uses a challenge generated in round `i` +- **Bit `i` in lower 256 bits**: This value uses data submitted in round `i` +- **Bit `i` in upper 256 bits**: This value uses a challenge generated in round `i` **Construction**: ```cpp // SINGLE BIT EXAMPLES - Values from one round only // Submitted value in round 3 -round_provenance = (1 << 3); // 0x0000...0008 (bit 3 in lower 128 bits) +round_provenance = (1 << 3); // 0x0000...0008 (bit 3 in lower 256 bits) // Challenge from round 5 -round_provenance = (1 << (5 + 128)); // 0x0020...0000 (bit 5 in upper 128 bits) +round_provenance = (1 << (5 + 256)); // 0x0020...0000 (bit 5 in upper 256 bits) // MULTIPLE BITS EXAMPLES - Values combined from multiple rounds // Value depending on submitted data from BOTH round 0 AND round 2 -round_provenance = (1 << 0) | (1 << 2); // 0x0000...0005 (bits 0 and 2 in lower 128) +round_provenance = (1 << 0) | (1 << 2); // 0x0000...0005 (bits 0 and 2 in lower 256) // Meaning: "This value incorporates data submitted in rounds 0 AND 2" // Value depending on challenges from BOTH round 1 AND round 3 -round_provenance = (1 << (1 + 128)) | (1 << (3 + 128)); // 0x000A...0000 (bits 1 and 3 in upper 128) +round_provenance = (1 << (1 + 256)) | (1 << (3 + 256)); // 0x000A...0000 (bits 1 and 3 in upper 256) // Meaning: "This value incorporates challenges from rounds 1 AND 3" // Value depending on submitted data (round 0) AND challenge (round 0) -round_provenance = (1 << 0) | (1 << (0 + 128)); // 0x0001...0001 (bit 0 in both halves) +round_provenance = (1 << 0) | (1 << (0 + 256)); // 0x0001...0001 (bit 0 in both halves) // Meaning: "This value uses both the data submitted in round 0 AND the challenge from round 0" // Complex example: submitted data from rounds 0,1 and challenges from rounds 0,2 -round_provenance = (1 << 0) | (1 << 1) | (1 << (0 + 128)) | (1 << (2 + 128)); +round_provenance = (1 << 0) | (1 << 1) | (1 << (0 + 256)) | (1 << (2 + 256)); // 0x0005...0003 // Meaning: "This value's computation involved: // - Submitted values from rounds 0 and 1 @@ -397,7 +397,7 @@ OriginTag::OriginTag(const OriginTag& tag_a, const OriginTag& tag_b) } // 5. Check cross-round contamination (the critical security check) - check_round_provenances(tag_a.round_provenance, tag_b.round_provenance); + check_round_provenance(tag_a.round_provenance, tag_b.round_provenance); // 6. Merge the tags transcript_index = tag_a.transcript_index; @@ -422,23 +422,23 @@ In recursive verification (in-circuit mode), values receive origin tags when the // Recursive verifier receives wire commitment from proof (round 0) auto comm = transcript->receive_from_prover("wire_comm"); // comm.get_origin_tag() = OriginTag(transcript_id, round=0, is_submitted=true) -// → round_provenance = 0x0000...0001 (bit 0 set in lower 128 bits) +// → round_provenance = 0x0000...0001 (bit 0 set in lower 256 bits) // Verifier generates challenge after round 0 data auto beta = transcript->get_challenge("beta"); // beta.get_origin_tag() = OriginTag(transcript_id, round=0, is_submitted=false) -// → round_provenance = 0x0001...0000 (bit 0 set in upper 128 bits) +// → round_provenance = 0x0001...0000 (bit 0 set in upper 256 bits) ``` **Tag construction** (in `origin_tag.hpp:OriginTag` constructor): ```cpp OriginTag(size_t parent_index, size_t child_index, bool is_submitted = true) : transcript_index(parent_index) - , round_provenance((static_cast(1) << (child_index + (is_submitted ? 0 : 128)))) + , round_provenance((static_cast(1) << (child_index + (is_submitted ? 0 : 256)))) ``` -- Submitted values: bit shifted by `child_index` (lower 128 bits) -- Challenges: bit shifted by `child_index + 128` (upper 128 bits) +- Submitted values: bit shifted by `child_index` (lower 256 bits) +- Challenges: bit shifted by `child_index + 256` (upper 256 bits) #### 2. Tag Merging Example @@ -458,21 +458,26 @@ This merged tag now carries the full provenance: it depends on data submitted in #### 3. The Cross-Round Check -The critical security check in `check_round_provenances()`: +The critical security check in `check_round_provenance()`: ```cpp -void check_round_provenances(const uint256_t& tag_a, const uint256_t& tag_b) +void check_round_provenance(const uint512_t& provenance_a, const uint512_t& provenance_b) { - const uint128_t* challenges_a = (const uint128_t*)(&tag_a.data[2]); // Upper 128 bits - const uint128_t* submitted_a = (const uint128_t*)(&tag_a.data[0]); // Lower 128 bits + // Lower 256 bits = submitted rounds, Upper 256 bits = challenge rounds + const uint256_t& submitted_a = provenance_a.lo; + const uint256_t& submitted_b = provenance_b.lo; - // Similar for tag_b... + // Nothing to check if either has no submitted data or both are from the same round(s) + if (submitted_a == 0 || submitted_b == 0 || submitted_a == submitted_b) { + return; + } - // VIOLATION: Two submitted values from different rounds mixing without challenges - if (*challenges_a == 0 && *challenges_b == 0 && // Neither has challenge bits set - *submitted_a != 0 && *submitted_b != 0 && // Both have submitted bits set - *submitted_a != *submitted_b) { // From different rounds - throw_or_abort("Submitted values from 2 different rounds are mixing without challenges"); + // VIOLATION: max challenge round must be >= max submitted round, otherwise the submitted + // values from different rounds were combined without a challenge that binds the later round. + const int max_challenge_round = highest_set_bit_256(provenance_a.hi | provenance_b.hi); + const int max_submitted_round = highest_set_bit_256(submitted_a | submitted_b); + if (max_challenge_round < max_submitted_round) { + throw_or_abort("Round provenance check failed: max challenge round < max submitted round"); } } ``` @@ -587,27 +592,29 @@ ROUND 2 - LOG DERIVATIVE INVERSE (challenge_generation_phase=true → false, rou │ receive_from_prover("lookup_inverses") │──► Origin tag: OriginTag(42, 2, true) └──────────────────────────────────────────┘ round_provenance = 0x0000...0004 (bit 2 lower) +┌──────────────────────────────────┐ +│ get_challenge("delta") │──► Tag: OriginTag(42, 2, false) +└──────────────────────────────────┘ round_provenance = 0x0004...0000 (bit 2 upper) + ** Example: Tag merging with cross-round values ** - combined = eta * lookup_inverses + combined = delta * lookup_inverses │ │ Tag Merging: - │ eta.tag = OriginTag(42, 0, false) → 0x0001...0000 - │ lookup_inv.tag = OriginTag(42, 2, true) → 0x0000...0004 - │ combined.tag = OriginTag(42, parent) → 0x0001...0004 (OR'd) + │ delta.tag = OriginTag(42, 2, false) → 0x0004...0000 + │ lookup_inv.tag = OriginTag(42, 2, true) → 0x0000...0004 + │ combined.tag = OriginTag(42, parent) → 0x0004...0004 (OR'd) │ ▼ ┌─────────────────────────────────────────────┐ - │ check_round_provenances(eta.tag, lookup_inv.tag) │ + │ check_round_provenance(delta.tag, lookup_inv.tag) │ └─────────────────────────────────────────────┘ │ - │ CHECK: Submitted values from different rounds? - │ challenges_eta = 0x0001...0000 (non-zero) ✓ - │ submitted_eta = 0x0000...0000 (zero) - │ challenges_inv = 0x0000...0000 (zero) - │ submitted_inv = 0x0000...0004 (non-zero) + │ CHECK: max challenge round >= max submitted round? + │ max challenge round = 2 (delta is round-2 challenge) + │ max submitted round = 2 (lookup_inverses submitted in round 2) │ - │ ✓ PASS: eta has challenge bits, prevents cross-round violation + │ ✓ PASS: 2 >= 2. Round-2 challenge correctly binds round-2 submitted data. │ ▼ combined (FF witness with merged tag) @@ -618,35 +625,34 @@ ORIGIN TAG VIOLATION EXAMPLE (Cross-Round Contamination) ┌──────────────────────────────────┐ │ Round 0: w_l (wire commitment) │ tag = OriginTag(42, 0, true) - │ round_provenance = 0x...0001 │ (bit 0 in lower 128 bits) + │ round_provenance = 0x...0001 │ (bit 0 in lower 256 bits) └──────────────────────────────────┘ │ - │ (eta challenges generated) + │ (eta challenges generated → tag has bit 0 in upper 256 bits) │ ┌──────────────────────────────────┐ │ Round 1: w_4 (4th wire) │ tag = OriginTag(42, 1, true) - │ round_provenance = 0x...0002 │ (bit 1 in lower 128 bits) + │ round_provenance = 0x...0002 │ (bit 1 in lower 256 bits) └──────────────────────────────────┘ │ ▼ - ❌ VIOLATION: Direct mixing without challenges - result = w_l + w_4 + ❌ VIOLATION: round-0 challenge cannot bind round-1 submission + result = eta * w_l + w_4 │ - check_round_provenances() detects: - challenges_w_l = 0 (no challenges involved) - challenges_w_4 = 0 (no challenges involved) - submitted_w_l = 0x...0001 (from round 0) - submitted_w_4 = 0x...0002 (from round 1) - submitted_w_l != submitted_w_4 ← DIFFERENT ROUNDS! + check_round_provenance() detects: + max challenge round = 0 (only eta involved, from round 0) + max submitted round = 1 (w_4 was submitted in round 1) + 0 < 1 ← challenge does not reach the latest submitted round! - → throw_or_abort("Submitted values from 2 different - rounds are mixing without challenges") + → throw_or_abort("Round provenance check failed: + max challenge round < max submitted round") - ✅ CORRECT: Must use challenge to combine cross-round values - result = eta * w_l + w_4 + ✅ CORRECT: Use a challenge from round >= max submitted round + auto beta = transcript->get_challenge("beta"); // round-1 challenge + result = beta * w_l + w_4 │ - │ eta.tag has challenge bits from round 0 - │ → Properly binds the two rounds together + │ beta.tag has challenge bits from round 1 + │ max challenge round = 1, max submitted round = 1 → PASS ▼ OK (merged tag tracks both rounds AND challenge) diff --git a/barretenberg/cpp/src/barretenberg/transcript/origin_tag.cpp b/barretenberg/cpp/src/barretenberg/transcript/origin_tag.cpp index e9d60e3a82fe..d8cc514ef3e1 100644 --- a/barretenberg/cpp/src/barretenberg/transcript/origin_tag.cpp +++ b/barretenberg/cpp/src/barretenberg/transcript/origin_tag.cpp @@ -7,7 +7,6 @@ #include "barretenberg/transcript/origin_tag.hpp" #include "barretenberg/common/throw_or_abort.hpp" #include "barretenberg/numeric/uint256/uint256.hpp" -#include #include namespace bb { @@ -16,32 +15,21 @@ using namespace numeric; namespace { /** - * @brief Find the position of the highest set bit in a uint128_t - * @return -1 if no bits are set, otherwise the bit position (0-127) + * @brief Find the position of the highest set bit in a uint256_t + * @return -1 if no bits are set, otherwise the bit position (0-255) */ -inline int highest_set_bit_128(uint128_t value) +inline int highest_set_bit_256(uint256_t value) { if (value == 0) { return -1; } - // Check high 64 bits first - auto high = static_cast(value >> 64); - if (high != 0) { - return 127 - __builtin_clzll(high); + for (int idx = 0; idx < 4; idx++) { + auto chunk = static_cast(value >> (64 * (3 - idx))); + if (chunk != 0) { + return 255 - (idx * 64) - __builtin_clzll(chunk); + } } - // Check low 64 bits - auto low = static_cast(value); - return 63 - __builtin_clzll(low); -} - -/** - * @brief Safely extract uint128_t from uint256_t data array using memcpy to avoid strict aliasing issues - */ -inline uint128_t extract_uint128(const uint64_t* data) -{ - uint128_t result = 0; - std::memcpy(&result, data, sizeof(uint128_t)); - return result; + return -1; } } // namespace @@ -60,11 +48,11 @@ inline uint128_t extract_uint128(const uint64_t* data) * @param provenance_a Round provenance of first element * @param provenance_b Round provenance of second element */ -void check_round_provenance(const uint256_t& provenance_a, const uint256_t& provenance_b) +void check_round_provenance(const uint512_t& provenance_a, const uint512_t& provenance_b) { - // Lower 128 bits = submitted rounds, Upper 128 bits = challenge rounds - const auto submitted_a = extract_uint128(&provenance_a.data[0]); - const auto submitted_b = extract_uint128(&provenance_b.data[0]); + // Lower 256 bits = submitted rounds, Upper 256 bits = challenge rounds + const uint256_t& submitted_a = provenance_a.lo; + const uint256_t& submitted_b = provenance_b.lo; // Nothing to check if either has no submitted data or both are from the same round(s) if (submitted_a == 0 || submitted_b == 0 || submitted_a == submitted_b) { @@ -72,10 +60,8 @@ void check_round_provenance(const uint256_t& provenance_a, const uint256_t& prov } // Ensure that values from different rounds are not mixing without max challenge round >= max submitted round - const auto challenges_a = extract_uint128(&provenance_a.data[2]); - const auto challenges_b = extract_uint128(&provenance_b.data[2]); - const int max_challenge_round = highest_set_bit_128(challenges_a | challenges_b); - const int max_submitted_round = highest_set_bit_128(submitted_a | submitted_b); + const int max_challenge_round = highest_set_bit_256(provenance_a.hi | provenance_b.hi); + const int max_submitted_round = highest_set_bit_256(submitted_a | submitted_b); if (max_challenge_round < max_submitted_round) { throw_or_abort("Round provenance check failed: max challenge round (" + std::to_string(max_challenge_round) + diff --git a/barretenberg/cpp/src/barretenberg/transcript/origin_tag.hpp b/barretenberg/cpp/src/barretenberg/transcript/origin_tag.hpp index 634dc8c3ce02..44873fecf2a8 100644 --- a/barretenberg/cpp/src/barretenberg/transcript/origin_tag.hpp +++ b/barretenberg/cpp/src/barretenberg/transcript/origin_tag.hpp @@ -16,7 +16,7 @@ #include "barretenberg/common/assert.hpp" #include "barretenberg/common/throw_or_abort.hpp" #include "barretenberg/numeric/uint256/uint256.hpp" -#include +#include "barretenberg/numeric/uintx/uintx.hpp" #include #include #include @@ -65,7 +65,7 @@ template constexpr bool is_iterable_v = is_iterable::value; namespace bb { -void check_round_provenance(const uint256_t& provenance_a, const uint256_t& provenance_b); +void check_round_provenance(const uint512_t& provenance_a, const uint512_t& provenance_b); #ifndef AZTEC_NO_ORIGIN_TAGS struct OriginTag { @@ -87,10 +87,10 @@ struct OriginTag { size_t transcript_index = FREE_WITNESS; // round_provenance specifies which submitted values and challenges have been used to generate this element - // The lower 128 bits represent using a submitted value from a corresponding round (the shift represents the - // round) The higher 128 bits represent using a challenge value from an corresponding round (the shift + // The lower 256 bits represent using a submitted value from a corresponding round (the shift represents the + // round) The higher 256 bits represent using a challenge value from an corresponding round (the shift // represents the round) - numeric::uint256_t round_provenance = numeric::uint256_t(0); + numeric::uint512_t round_provenance = numeric::uint512_t(0); // Instant death is used for poisoning values we should never use in arithmetic bool instant_death = false; @@ -117,9 +117,9 @@ struct OriginTag { */ OriginTag(size_t transcript_idx, size_t round_number, bool is_submitted = true) : transcript_index(transcript_idx) - , round_provenance((static_cast(1) << (round_number + (is_submitted ? 0 : 128)))) + , round_provenance((static_cast(1) << (round_number + (is_submitted ? 0 : 256)))) { - BB_ASSERT_LT(round_number, 128U); + BB_ASSERT_LT(round_number, 256U); } /** @@ -166,19 +166,19 @@ struct OriginTag { void set_free_witness() { transcript_index = FREE_WITNESS; - round_provenance = 0; + round_provenance = numeric::uint512_t(0); } void unset_free_witness() { transcript_index = CONSTANT; - round_provenance = numeric::uint256_t(0); + round_provenance = numeric::uint512_t(0); } bool is_constant() const { return transcript_index == CONSTANT && !instant_death; } void set_constant() { transcript_index = CONSTANT; - round_provenance = numeric::uint256_t(0); + round_provenance = numeric::uint512_t(0); } // Static factory methods for cleaner syntax @@ -206,7 +206,7 @@ struct OriginTag { /** * @brief Clear the round_provenance to address round provenance false positives. */ - void clear_round_provenance() { round_provenance = numeric::uint256_t(0); } + void clear_round_provenance() { round_provenance = numeric::uint512_t(0); } }; inline std::ostream& operator<<(std::ostream& os, OriginTag const& v) { diff --git a/barretenberg/cpp/src/barretenberg/translator_vm/relation_correctness.test.cpp b/barretenberg/cpp/src/barretenberg/translator_vm/relation_correctness.test.cpp index 26673dbbb3bc..e79da1cd25e1 100644 --- a/barretenberg/cpp/src/barretenberg/translator_vm/relation_correctness.test.cpp +++ b/barretenberg/cpp/src/barretenberg/translator_vm/relation_correctness.test.cpp @@ -454,10 +454,7 @@ TEST_F(TranslatorRelationCorrectnessTests, NonNative) auto& engine = numeric::get_debug_randomness(); auto op_queue = std::make_shared(); - op_queue->no_op_ultra_only(); - op_queue->random_op_ultra_only(); - op_queue->random_op_ultra_only(); - op_queue->random_op_ultra_only(); + op_queue->construct_zk_columns(); // Generate random EccOpQueue actions @@ -490,7 +487,7 @@ TEST_F(TranslatorRelationCorrectnessTests, NonNative) } op_queue->random_op_ultra_only(); op_queue->random_op_ultra_only(); - op_queue->merge(MergeSettings::APPEND, op_queue->get_append_offset()); + op_queue->merge_fixed_append(op_queue->get_append_offset()); const auto batching_challenge_v = BF::random_element(&engine); const auto evaluation_input_x = BF::random_element(&engine); diff --git a/barretenberg/cpp/src/barretenberg/translator_vm/relation_failure.test.cpp b/barretenberg/cpp/src/barretenberg/translator_vm/relation_failure.test.cpp index 9bbdf7efc014..591d32629ccb 100644 --- a/barretenberg/cpp/src/barretenberg/translator_vm/relation_failure.test.cpp +++ b/barretenberg/cpp/src/barretenberg/translator_vm/relation_failure.test.cpp @@ -125,12 +125,9 @@ ValidTranslatorState build_valid_accumulator_transfer_state() auto& engine = numeric::get_debug_randomness(); auto op_queue = std::make_shared(); - op_queue->no_op_ultra_only(); + op_queue->construct_zk_columns(); - // Add random start ops, then mixed ops, merge, more mixed ops, random end ops, final merge - for (size_t i = 0; i < Flavor::CircuitBuilder::NUM_RANDOM_OPS_START; i++) { - op_queue->random_op_ultra_only(); - } + // Add mixed ops, merge, more mixed ops, random end ops, final merge for (size_t i = 0; i < 50; i++) { op_queue->add_accumulate(GroupElement::random_element(&engine)); op_queue->mul_accumulate(GroupElement::random_element(&engine), FF::random_element(&engine)); @@ -145,7 +142,7 @@ ValidTranslatorState build_valid_accumulator_transfer_state() for (size_t i = 0; i < Flavor::CircuitBuilder::NUM_RANDOM_OPS_END; i++) { op_queue->random_op_ultra_only(); } - op_queue->merge(MergeSettings::APPEND, op_queue->get_append_offset()); + op_queue->merge_fixed_append(op_queue->get_append_offset()); const auto batching_challenge_v = BF::random_element(&engine); const auto evaluation_input_x = BF::random_element(&engine); diff --git a/barretenberg/cpp/src/barretenberg/translator_vm/translator.test.cpp b/barretenberg/cpp/src/barretenberg/translator_vm/translator.test.cpp index 5c8654e80448..6187dd7706b1 100644 --- a/barretenberg/cpp/src/barretenberg/translator_vm/translator.test.cpp +++ b/barretenberg/cpp/src/barretenberg/translator_vm/translator.test.cpp @@ -189,15 +189,14 @@ class TranslatorTests : public ::testing::Test { const size_t circuit_size_parameter = 500) { - // Add the same operations to the ECC op queue; the native computation is performed under the hood. - auto op_queue = std::make_shared(); - op_queue->no_op_ultra_only(); - add_random_ops(op_queue, CircuitBuilder::NUM_RANDOM_OPS_START); + auto op_queue = std::make_shared(); + // Construct zk_columns + op_queue->construct_zk_columns(); + // Table with correct final structure for translator add_mixed_ops(op_queue, circuit_size_parameter / 2); - op_queue->merge(); - add_mixed_ops(op_queue, circuit_size_parameter / 2); - add_random_ops(op_queue, CircuitBuilder::NUM_RANDOM_OPS_END); - op_queue->merge(MergeSettings::APPEND, op_queue->get_append_offset()); + add_random_ops(op_queue, TranslatorCircuitBuilder::NUM_RANDOM_OPS_END); + // Merge with fixed append + op_queue->merge_fixed_append(op_queue->get_append_offset()); return CircuitBuilder{ batching_challenge_v, evaluation_challenge_x, op_queue }; } diff --git a/barretenberg/cpp/src/barretenberg/translator_vm/translator_circuit_builder.cpp b/barretenberg/cpp/src/barretenberg/translator_vm/translator_circuit_builder.cpp index e36eb88eae9a..0e1683e333e2 100644 --- a/barretenberg/cpp/src/barretenberg/translator_vm/translator_circuit_builder.cpp +++ b/barretenberg/cpp/src/barretenberg/translator_vm/translator_circuit_builder.cpp @@ -427,7 +427,11 @@ void TranslatorCircuitBuilder::feed_ecc_op_queue_into_circuit(const std::shared_ { BB_BENCH_NAME("TranslatorCircuitBuilder::feed_ecc_op_queue_into_circuit"); using Fq = bb::fq; - const auto& ultra_ops = ecc_op_queue->get_ultra_ops(); + // If in AVM mode, we use the non-zk reconstructed ultra ops as the structure required by Translator is added by the + // GoblinAVM constructor. In Chonk, this structure is given by the zk columns, so we use the zk reconstructed + // ultra ops. + const auto& ultra_ops = + avm_mode ? ecc_op_queue->get_no_zk_reconstructed_ultra_ops() : ecc_op_queue->get_zk_reconstructed_ultra_ops(); std::vector accumulator_trace; Fq current_accumulator(0); if (ultra_ops.empty()) { diff --git a/barretenberg/cpp/src/barretenberg/translator_vm/translator_circuit_builder.hpp b/barretenberg/cpp/src/barretenberg/translator_vm/translator_circuit_builder.hpp index ac6804b56b2d..7487cf2dff55 100644 --- a/barretenberg/cpp/src/barretenberg/translator_vm/translator_circuit_builder.hpp +++ b/barretenberg/cpp/src/barretenberg/translator_vm/translator_circuit_builder.hpp @@ -246,6 +246,7 @@ class TranslatorCircuitBuilder : public CircuitBuilderBase { // Number of random ops at the beginning of Translator trace static constexpr size_t NUM_RANDOM_OPS_START = 3; static_assert(NUM_RANDOM_OPS_START == 3); + static_assert(NUM_RANDOM_OPS_START == ECC_NUM_RANDOM_OPS_START); // Number of random ops at the end of Translator trace static constexpr size_t NUM_RANDOM_OPS_END = 2; diff --git a/barretenberg/cpp/src/barretenberg/translator_vm/translator_circuit_builder.test.cpp b/barretenberg/cpp/src/barretenberg/translator_vm/translator_circuit_builder.test.cpp index ccf5c0c9867b..0fcf673c8c6e 100644 --- a/barretenberg/cpp/src/barretenberg/translator_vm/translator_circuit_builder.test.cpp +++ b/barretenberg/cpp/src/barretenberg/translator_vm/translator_circuit_builder.test.cpp @@ -13,7 +13,10 @@ auto& engine = numeric::get_debug_randomness(); /** * @brief Helper function to compute the expected accumulator result manually */ -fq compute_expected_result(const std::shared_ptr& op_queue, const fq& batching_challenge, const fq& x) +fq compute_expected_result(const std::shared_ptr& op_queue, + const fq& batching_challenge, + const fq& x, + bool include_zk = true) { using Fq = fq; Fq x_inv = x.invert(); @@ -24,7 +27,8 @@ fq compute_expected_result(const std::shared_ptr& op_queue, const fq Fq z_2_accumulator = Fq(0); Fq x_pow = Fq(1); - const auto& ultra_ops = op_queue->get_ultra_ops(); + const auto& ultra_ops = + include_zk ? op_queue->get_zk_reconstructed_ultra_ops() : op_queue->get_no_zk_reconstructed_ultra_ops(); for (const auto& ultra_op : ultra_ops) { if (ultra_op.op_code.is_random_op || ultra_op.op_code.value() == 0) { continue; @@ -68,10 +72,7 @@ TEST(TranslatorCircuitBuilder, SeveralOperationCorrectness) // Add the same operations to the ECC op queue; the native computation is performed under the hood. auto op_queue = std::make_shared(); - op_queue->no_op_ultra_only(); - op_queue->random_op_ultra_only(); - op_queue->random_op_ultra_only(); - op_queue->random_op_ultra_only(); + op_queue->construct_zk_columns(); op_queue->add_accumulate(P1); op_queue->mul_accumulate(P2, z); op_queue->eq_and_reset(); @@ -86,7 +87,7 @@ TEST(TranslatorCircuitBuilder, SeveralOperationCorrectness) // Placeholder for randomness op_queue->random_op_ultra_only(); op_queue->random_op_ultra_only(); - op_queue->merge(MergeSettings::APPEND, op_queue->get_append_offset()); + op_queue->merge_fixed_append(op_queue->get_append_offset()); Fq batching_challenge = Fq::random_element(); Fq x = Fq::random_element(); @@ -106,15 +107,12 @@ TEST(TranslatorCircuitBuilder, MinimalOperations) using Fq = fq; auto op_queue = std::make_shared(); - op_queue->no_op_ultra_only(); - op_queue->random_op_ultra_only(); - op_queue->random_op_ultra_only(); - op_queue->random_op_ultra_only(); + op_queue->construct_zk_columns(); op_queue->eq_and_reset(); op_queue->merge(); op_queue->random_op_ultra_only(); op_queue->random_op_ultra_only(); - op_queue->merge(MergeSettings::APPEND, op_queue->get_append_offset()); + op_queue->merge_fixed_append(op_queue->get_append_offset()); Fq batching_challenge = Fq::random_element(); Fq x = Fq::random_element(); @@ -133,10 +131,7 @@ TEST(TranslatorCircuitBuilder, OnlyAddOperations) auto P2 = point::random_element(); auto op_queue = std::make_shared(); - op_queue->no_op_ultra_only(); - op_queue->random_op_ultra_only(); - op_queue->random_op_ultra_only(); - op_queue->random_op_ultra_only(); + op_queue->construct_zk_columns(); op_queue->add_accumulate(P1); op_queue->add_accumulate(P2); op_queue->add_accumulate(P1); @@ -144,7 +139,7 @@ TEST(TranslatorCircuitBuilder, OnlyAddOperations) op_queue->merge(); op_queue->random_op_ultra_only(); op_queue->random_op_ultra_only(); - op_queue->merge(MergeSettings::APPEND, op_queue->get_append_offset()); + op_queue->merge_fixed_append(op_queue->get_append_offset()); Fq batching_challenge = Fq::random_element(); Fq x = Fq::random_element(); @@ -169,17 +164,14 @@ TEST(TranslatorCircuitBuilder, OnlyMulOperations) auto z2 = scalar::random_element(); auto op_queue = std::make_shared(); - op_queue->no_op_ultra_only(); - op_queue->random_op_ultra_only(); - op_queue->random_op_ultra_only(); - op_queue->random_op_ultra_only(); + op_queue->construct_zk_columns(); op_queue->mul_accumulate(P, z1); op_queue->mul_accumulate(P, z2); op_queue->eq_and_reset(); op_queue->merge(); op_queue->random_op_ultra_only(); op_queue->random_op_ultra_only(); - op_queue->merge(MergeSettings::APPEND, op_queue->get_append_offset()); + op_queue->merge_fixed_append(op_queue->get_append_offset()); Fq batching_challenge = Fq::random_element(); Fq x = Fq::random_element(); @@ -201,17 +193,14 @@ TEST(TranslatorCircuitBuilder, InterspersedNoOps) auto P = point::random_element(); auto op_queue = std::make_shared(); - op_queue->no_op_ultra_only(); - op_queue->random_op_ultra_only(); - op_queue->random_op_ultra_only(); - op_queue->random_op_ultra_only(); + op_queue->construct_zk_columns(); op_queue->add_accumulate(P); op_queue->add_accumulate(P); op_queue->eq_and_reset(); op_queue->merge(); op_queue->random_op_ultra_only(); op_queue->random_op_ultra_only(); - op_queue->merge(MergeSettings::APPEND, op_queue->get_append_offset()); + op_queue->merge_fixed_append(op_queue->get_append_offset()); Fq batching_challenge = Fq::random_element(); Fq x = Fq::random_element(); @@ -233,16 +222,13 @@ TEST(TranslatorCircuitBuilder, PointAtInfinity) auto P_infinity = point::infinity(); auto op_queue = std::make_shared(); - op_queue->no_op_ultra_only(); - op_queue->random_op_ultra_only(); - op_queue->random_op_ultra_only(); - op_queue->random_op_ultra_only(); + op_queue->construct_zk_columns(); op_queue->add_accumulate(P_infinity); op_queue->eq_and_reset(); op_queue->merge(); op_queue->random_op_ultra_only(); op_queue->random_op_ultra_only(); - op_queue->merge(MergeSettings::APPEND, op_queue->get_append_offset()); + op_queue->merge_fixed_append(op_queue->get_append_offset()); Fq batching_challenge = Fq::random_element(); Fq x = Fq::random_element(); @@ -266,16 +252,13 @@ TEST(TranslatorCircuitBuilder, ZeroScalar) auto zero = scalar::zero(); auto op_queue = std::make_shared(); - op_queue->no_op_ultra_only(); - op_queue->random_op_ultra_only(); - op_queue->random_op_ultra_only(); - op_queue->random_op_ultra_only(); + op_queue->construct_zk_columns(); op_queue->mul_accumulate(P, zero); op_queue->eq_and_reset(); op_queue->merge(); op_queue->random_op_ultra_only(); op_queue->random_op_ultra_only(); - op_queue->merge(MergeSettings::APPEND, op_queue->get_append_offset()); + op_queue->merge_fixed_append(op_queue->get_append_offset()); Fq batching_challenge = Fq::random_element(); Fq x = Fq::random_element(); @@ -296,10 +279,7 @@ TEST(TranslatorCircuitBuilder, ManyOperations) using Fq = fq; auto op_queue = std::make_shared(); - op_queue->no_op_ultra_only(); - op_queue->random_op_ultra_only(); - op_queue->random_op_ultra_only(); - op_queue->random_op_ultra_only(); + op_queue->construct_zk_columns(); // Add many operations for (size_t i = 0; i < 20; ++i) { @@ -313,7 +293,7 @@ TEST(TranslatorCircuitBuilder, ManyOperations) op_queue->merge(); op_queue->random_op_ultra_only(); op_queue->random_op_ultra_only(); - op_queue->merge(MergeSettings::APPEND, op_queue->get_append_offset()); + op_queue->merge_fixed_append(op_queue->get_append_offset()); Fq batching_challenge = Fq::random_element(); Fq x = Fq::random_element(); @@ -340,37 +320,41 @@ TEST(TranslatorCircuitBuilder, Determinism) // Build first circuit auto op_queue1 = std::make_shared(); - op_queue1->no_op_ultra_only(); - op_queue1->random_op_ultra_only(); - op_queue1->random_op_ultra_only(); - op_queue1->random_op_ultra_only(); + op_queue1->construct_zk_columns(); op_queue1->add_accumulate(P); op_queue1->mul_accumulate(P, z); op_queue1->eq_and_reset(); op_queue1->merge(); op_queue1->random_op_ultra_only(); op_queue1->random_op_ultra_only(); - op_queue1->merge(MergeSettings::APPEND, op_queue1->get_append_offset()); + op_queue1->merge_fixed_append(op_queue1->get_append_offset()); auto circuit_builder1 = TranslatorCircuitBuilder(batching_challenge, x, op_queue1); auto result1 = CircuitChecker::get_computation_result(circuit_builder1); // Build second circuit with same operations auto op_queue2 = std::make_shared(); - op_queue2->no_op_ultra_only(); - op_queue2->random_op_ultra_only(); - op_queue2->random_op_ultra_only(); - op_queue2->random_op_ultra_only(); + op_queue2->construct_zk_columns(); op_queue2->add_accumulate(P); op_queue2->mul_accumulate(P, z); op_queue2->eq_and_reset(); op_queue2->merge(); op_queue2->random_op_ultra_only(); op_queue2->random_op_ultra_only(); - op_queue2->merge(MergeSettings::APPEND, op_queue2->get_append_offset()); + op_queue2->merge_fixed_append(op_queue2->get_append_offset()); auto circuit_builder2 = TranslatorCircuitBuilder(batching_challenge, x, op_queue2); auto result2 = CircuitChecker::get_computation_result(circuit_builder2); - EXPECT_EQ(result1, result2); + // Compute contributions + Fq op_queue_1_with_randomness = compute_expected_result(op_queue1, batching_challenge, x, /*include_zk=*/true); + Fq op_queue_2_with_randomness = compute_expected_result(op_queue2, batching_challenge, x, /*include_zk=*/true); + Fq op_queue_1_without_randomness = compute_expected_result(op_queue1, batching_challenge, x, /*include_zk=*/false); + Fq op_queue_2_without_randomness = compute_expected_result(op_queue2, batching_challenge, x, /*include_zk=*/false); + Fq op_queue_1_randomness = + op_queue_1_with_randomness - op_queue_1_without_randomness * x.invert().pow(UltraEccOpsTable::ZK_ULTRA_OPS); + Fq op_queue_2_randomness = + op_queue_2_with_randomness - op_queue_2_without_randomness * x.invert().pow(UltraEccOpsTable::ZK_ULTRA_OPS); + + EXPECT_EQ(result1 - result2, op_queue_1_randomness - op_queue_2_randomness); } diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/README.md b/barretenberg/cpp/src/barretenberg/ultra_honk/README.md index d25571a12e1e..1a8453c825ce 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/README.md +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/README.md @@ -128,7 +128,7 @@ Not all subrelations are enforced pointwise at every row. Some identities are de | # | Relation | Selector | Subrelations | Max Partial Length | Description | |---|---|---|---|---|---| | 10 | `EccOpQueueRelation` | `q_busread` | 8 | 3 | ECC operation queue wire consistency (Goblin) | -| 11 | `DatabusLookupRelation` | (structural) | 9 | 5 | Log-derivative databus reads (calldata, return data, secondary calldata; 3 subrelations each) | +| 11 | `DatabusLookupRelation` | (structural) | 15 | 5 | Log-derivative databus reads (kernel calldata, 3 app calldata columns, return data; 3 subrelations each) | See also: [LogUp README](../relations/LOGUP_README.md), [Permutation Argument README](../relations/PERMUTATION_ARGUMENT_README.md), [Generic LogUp README](../relations/generic_lookup/GENERIC_LOGUP_README.md), [Generic Permutation README](../relations/generic_permutation/GENERIC_PERMUTATION_README.md) diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/databus.test.cpp b/barretenberg/cpp/src/barretenberg/ultra_honk/databus.test.cpp index 2bf5e816818f..9177d7b31433 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/databus.test.cpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/databus.test.cpp @@ -1,3 +1,4 @@ +#include "barretenberg/stdlib_circuit_builders/databus.hpp" #include #include #include @@ -61,10 +62,7 @@ template class DataBusTests : public ::testing::Test { * @param read_bus_data Method for reading from a given bus column * @return Builder */ - static Builder construct_circuit_with_databus_reads( - Builder& builder, - const std::function& add_bus_data, - const std::function& read_bus_data) + static Builder construct_circuit_with_databus_reads(Builder& builder, const BusId& bus_idx) { const uint32_t NUM_BUS_ENTRIES = 5; // number of entries in the bus column @@ -74,49 +72,18 @@ template class DataBusTests : public ::testing::Test { for (size_t i = 0; i < NUM_BUS_ENTRIES; ++i) { FF val = FF::random_element(); uint32_t val_witness_idx = builder.add_variable(val); - add_bus_data(builder, val_witness_idx); + builder.add_public_calldata(bus_idx, val_witness_idx); } // Read from the bus at some random indices for (size_t i = 0; i < NUM_READS; ++i) { uint32_t read_idx = engine.get_random_uint32() % NUM_BUS_ENTRIES; uint32_t read_idx_witness_idx = builder.add_variable(FF(read_idx)); - read_bus_data(builder, read_idx_witness_idx); + builder.read_calldata(bus_idx, read_idx_witness_idx); } return builder; } - - static Builder construct_circuit_with_calldata_reads(Builder& builder) - { - // Define interfaces for the add and read methods for databus calldata - auto add_method = [](Builder& builder, uint32_t witness_idx) { builder.add_public_calldata(witness_idx); }; - auto read_method = [](Builder& builder, uint32_t witness_idx) { return builder.read_calldata(witness_idx); }; - - return construct_circuit_with_databus_reads(builder, add_method, read_method); - } - - static Builder construct_circuit_with_secondary_calldata_reads(Builder& builder) - { - // Define interfaces for the add and read methods for databus secondary_calldata - auto add_method = [](Builder& builder, uint32_t witness_idx) { - builder.add_public_secondary_calldata(witness_idx); - }; - auto read_method = [](Builder& builder, uint32_t witness_idx) { - return builder.read_secondary_calldata(witness_idx); - }; - - return construct_circuit_with_databus_reads(builder, add_method, read_method); - } - - static Builder construct_circuit_with_return_data_reads(Builder& builder) - { - // Define interfaces for the add and read methods for databus return data - auto add_method = [](Builder& builder, uint32_t witness_idx) { builder.add_public_return_data(witness_idx); }; - auto read_method = [](Builder& builder, uint32_t witness_idx) { return builder.read_return_data(witness_idx); }; - - return construct_circuit_with_databus_reads(builder, add_method, read_method); - } }; TYPED_TEST_SUITE(DataBusTests, FlavorTypes); @@ -125,24 +92,27 @@ TYPED_TEST_SUITE(DataBusTests, FlavorTypes); * @brief Test proof construction/verification for a circuit with calldata lookup gates * */ -TYPED_TEST(DataBusTests, CallDataRead) +TYPED_TEST(DataBusTests, KernelCallDataRead) { typename TypeParam::CircuitBuilder builder = this->construct_test_builder(); - this->construct_circuit_with_calldata_reads(builder); + this->construct_circuit_with_databus_reads(builder, BusId::KERNEL_CALLDATA); EXPECT_TRUE(CircuitChecker::check(builder)); EXPECT_TRUE(this->construct_and_verify_proof(builder)); } /** - * @brief Test proof construction/verification for a circuit with secondary_calldata lookup gates + * @brief Test proof construction/verification for circuits with app calldata lookup gates * */ -TYPED_TEST(DataBusTests, CallData2Read) +TYPED_TEST(DataBusTests, AppCallDataRead) { - typename TypeParam::CircuitBuilder builder = this->construct_test_builder(); - this->construct_circuit_with_secondary_calldata_reads(builder); + for (size_t idx = 0; idx < MAX_APPS_PER_KERNEL; ++idx) { + typename TypeParam::CircuitBuilder builder = this->construct_test_builder(); + this->construct_circuit_with_databus_reads(builder, static_cast(idx + 1)); - EXPECT_TRUE(this->construct_and_verify_proof(builder)); + EXPECT_TRUE(CircuitChecker::check(builder)) << "Circuit check failed for app calldata bus with index " << idx; + EXPECT_TRUE(this->construct_and_verify_proof(builder)) << "Failed for app calldata bus with index " << idx; + } } /** @@ -152,8 +122,9 @@ TYPED_TEST(DataBusTests, CallData2Read) TYPED_TEST(DataBusTests, ReturnDataRead) { typename TypeParam::CircuitBuilder builder = this->construct_test_builder(); - this->construct_circuit_with_return_data_reads(builder); + this->construct_circuit_with_databus_reads(builder, BusId::RETURNDATA); + EXPECT_TRUE(CircuitChecker::check(builder)); EXPECT_TRUE(this->construct_and_verify_proof(builder)); } @@ -164,10 +135,13 @@ TYPED_TEST(DataBusTests, ReturnDataRead) TYPED_TEST(DataBusTests, ReadAll) { typename TypeParam::CircuitBuilder builder = this->construct_test_builder(); - this->construct_circuit_with_calldata_reads(builder); - this->construct_circuit_with_secondary_calldata_reads(builder); - this->construct_circuit_with_return_data_reads(builder); + this->construct_circuit_with_databus_reads(builder, BusId::KERNEL_CALLDATA); + for (size_t idx = 0; idx < MAX_APPS_PER_KERNEL; ++idx) { + this->construct_circuit_with_databus_reads(builder, static_cast(idx + 1)); + } + this->construct_circuit_with_databus_reads(builder, BusId::RETURNDATA); + EXPECT_TRUE(CircuitChecker::check(builder)); EXPECT_TRUE(this->construct_and_verify_proof(builder)); } @@ -186,7 +160,7 @@ TYPED_TEST(DataBusTests, CallDataDuplicateRead) std::vector calldata_values = { 7, 10, 3, 12, 1 }; for (auto& val : calldata_values) { - builder.add_public_calldata(builder.add_variable(val)); + builder.add_public_calldata(BusId::KERNEL_CALLDATA, builder.add_variable(val)); } // Define some read indices with a duplicate @@ -198,7 +172,7 @@ TYPED_TEST(DataBusTests, CallDataDuplicateRead) // Create a variable corresponding to the index at which we want to read into calldata uint32_t read_idx_witness_idx = builder.add_variable(FF(read_idx)); - auto value_witness_idx = builder.read_calldata(read_idx_witness_idx); + auto value_witness_idx = builder.read_calldata(BusId::KERNEL_CALLDATA, read_idx_witness_idx); result_witness_indices.emplace_back(value_witness_idx); } diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/honk_transcript.test.cpp b/barretenberg/cpp/src/barretenberg/ultra_honk/honk_transcript.test.cpp index d3b8a054ed94..b9ca7b6557f2 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/honk_transcript.test.cpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/honk_transcript.test.cpp @@ -106,10 +106,14 @@ template class HonkTranscriptTests : public ::testing::Test { manifest_expected.add_entry(round, "ECC_OP_WIRE_2", data_types_per_G); manifest_expected.add_entry(round, "ECC_OP_WIRE_3", data_types_per_G); manifest_expected.add_entry(round, "ECC_OP_WIRE_4", data_types_per_G); - manifest_expected.add_entry(round, "CALLDATA", data_types_per_G); - manifest_expected.add_entry(round, "CALLDATA_READ_COUNTS", data_types_per_G); - manifest_expected.add_entry(round, "SECONDARY_CALLDATA", data_types_per_G); - manifest_expected.add_entry(round, "SECONDARY_CALLDATA_READ_COUNTS", data_types_per_G); + manifest_expected.add_entry(round, "KERNEL_CALLDATA", data_types_per_G); + manifest_expected.add_entry(round, "KERNEL_CALLDATA_READ_COUNTS", data_types_per_G); + manifest_expected.add_entry(round, "FIRST_APP_CALLDATA", data_types_per_G); + manifest_expected.add_entry(round, "FIRST_APP_CALLDATA_READ_COUNTS", data_types_per_G); + manifest_expected.add_entry(round, "SECOND_APP_CALLDATA", data_types_per_G); + manifest_expected.add_entry(round, "SECOND_APP_CALLDATA_READ_COUNTS", data_types_per_G); + manifest_expected.add_entry(round, "THIRD_APP_CALLDATA", data_types_per_G); + manifest_expected.add_entry(round, "THIRD_APP_CALLDATA_READ_COUNTS", data_types_per_G); manifest_expected.add_entry(round, "RETURN_DATA", data_types_per_G); manifest_expected.add_entry(round, "RETURN_DATA_READ_COUNTS", data_types_per_G); } @@ -126,8 +130,10 @@ template class HonkTranscriptTests : public ::testing::Test { manifest_expected.add_entry(round, "LOOKUP_INVERSES", data_types_per_G); // Mega-specific databus inverse commitments if constexpr (IsMegaFlavor) { - manifest_expected.add_entry(round, "CALLDATA_INVERSES", data_types_per_G); - manifest_expected.add_entry(round, "SECONDARY_CALLDATA_INVERSES", data_types_per_G); + manifest_expected.add_entry(round, "KERNEL_CALLDATA_INVERSES", data_types_per_G); + manifest_expected.add_entry(round, "FIRST_APP_CALLDATA_INVERSES", data_types_per_G); + manifest_expected.add_entry(round, "SECOND_APP_CALLDATA_INVERSES", data_types_per_G); + manifest_expected.add_entry(round, "THIRD_APP_CALLDATA_INVERSES", data_types_per_G); manifest_expected.add_entry(round, "RETURN_DATA_INVERSES", data_types_per_G); } manifest_expected.add_entry(round, "Z_PERM", data_types_per_G); diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/mega_honk.test.cpp b/barretenberg/cpp/src/barretenberg/ultra_honk/mega_honk.test.cpp index 7feb27ae1174..3d6737aeaf03 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/mega_honk.test.cpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/mega_honk.test.cpp @@ -308,6 +308,12 @@ TYPED_TEST(MegaHonkTests, WitnessPolynomialsMasked) EXPECT_TRUE(has_masking) << label << " should be masked"; }; + auto check_unmasked = [](const auto& poly, const std::string& label) { + for (size_t j = 0; j < NUM_MASKED_ROWS; j++) { + EXPECT_TRUE(poly[NUM_ZERO_ROWS + j].is_zero()) << label << " should not be masked"; + } + }; + auto& polys = prover_instance->polynomials; check_masked(polys.w_l, "w_l"); check_masked(polys.w_r, "w_r"); @@ -317,11 +323,18 @@ TYPED_TEST(MegaHonkTests, WitnessPolynomialsMasked) check_masked(polys.lookup_read_counts, "lookup_read_counts"); check_masked(polys.lookup_read_tags, "lookup_read_tags"); check_masked(polys.lookup_inverses, "lookup_inverses"); - check_masked(polys.calldata_read_counts, "calldata_read_counts"); - check_masked(polys.calldata_inverses, "calldata_inverses"); - check_masked(polys.secondary_calldata, "secondary_calldata"); - check_masked(polys.secondary_calldata_read_counts, "secondary_calldata_read_counts"); - check_masked(polys.secondary_calldata_inverses, "secondary_calldata_inverses"); + check_unmasked(polys.kernel_calldata, "kernel_calldata"); + check_masked(polys.kernel_calldata_read_counts, "kernel_calldata_read_counts"); + check_masked(polys.kernel_calldata_inverses, "kernel_calldata_inverses"); + check_masked(polys.first_app_calldata, "first_app_calldata"); + check_masked(polys.first_app_calldata_read_counts, "first_app_calldata_read_counts"); + check_masked(polys.first_app_calldata_inverses, "first_app_calldata_inverses"); + check_masked(polys.second_app_calldata, "second_app_calldata"); + check_masked(polys.second_app_calldata_read_counts, "second_app_calldata_read_counts"); + check_masked(polys.second_app_calldata_inverses, "second_app_calldata_inverses"); + check_masked(polys.third_app_calldata, "third_app_calldata"); + check_masked(polys.third_app_calldata_read_counts, "third_app_calldata_read_counts"); + check_masked(polys.third_app_calldata_inverses, "third_app_calldata_inverses"); check_masked(polys.return_data, "return_data"); check_masked(polys.return_data_read_counts, "return_data_read_counts"); check_masked(polys.return_data_inverses, "return_data_inverses"); diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/oink_prover.cpp b/barretenberg/cpp/src/barretenberg/ultra_honk/oink_prover.cpp index 32494b09caea..d2157c11fe73 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/oink_prover.cpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/oink_prover.cpp @@ -186,9 +186,10 @@ template void OinkProver::commit_to_z_perm() template void OinkProver::commit_to_masking_poly() { if constexpr (flavor_has_gemini_masking()) { - // Gemini masking poly only needs to cover the actual polynomial extent, not full dyadic size - const size_t polynomial_size = prover_instance->polynomials.max_end_index(); - prover_instance->polynomials.gemini_masking_poly = Polynomial::random(polynomial_size); + // virtual_size = dyadic_size matches every other witness poly, so sumcheck's pairwise read + // past end_index lands in the virtual-zero region. + prover_instance->polynomials.gemini_masking_poly = Polynomial::random( + prover_instance->polynomials.max_end_index(), prover_instance->dyadic_size(), /*start_index=*/0); // Commit to the masking polynomial and send to transcript auto masking_commitment = commitment_key.commit(prover_instance->polynomials.gemini_masking_poly); diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/prover_instance.cpp b/barretenberg/cpp/src/barretenberg/ultra_honk/prover_instance.cpp index fe42a5e54451..a43e408dc808 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/prover_instance.cpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/prover_instance.cpp @@ -154,12 +154,14 @@ template void ProverInstance_::allocate_permutation_ar { BB_BENCH_NAME("allocate_permutation_argument_polynomials"); - // Sigma and ID polynomials are zero outside the active trace range + // Sigma and ID polynomials are zero outside the active trace range. Inside the active range, + // compute_honk_style_permutation_lagrange_polynomials_from_mapping writes every cell, so the + // backing memory can be left uninitialized. for (auto& sigma : polynomials.get_sigmas()) { - sigma = Polynomial::shiftable(trace_active_range_size(), dyadic_size()); + sigma = Polynomial::shiftable(trace_active_range_size(), dyadic_size(), Polynomial::DontZeroMemory::FLAG); } for (auto& id : polynomials.get_ids()) { - id = Polynomial::shiftable(trace_active_range_size(), dyadic_size()); + id = Polynomial::shiftable(trace_active_range_size(), dyadic_size(), Polynomial::DontZeroMemory::FLAG); } polynomials.z_perm = Polynomial::shiftable(trace_active_range_size(), dyadic_size(), Flavor::HasZK); @@ -248,7 +250,7 @@ void ProverInstance_::allocate_databus_polynomials(const Circuit& circui // Databus data uses NUM_DISABLED_ROWS_IN_SUMCHECK as its offset rather than Flavor::TRACE_OFFSET so that // commitments match across the IVC boundary (a non-ZK kernel's return_data is copy-constrained to a MegaZK - // hiding kernel's calldata). MegaZK additionally requires this offset to clear the masking region + // hiding kernel's kernel_calldata). MegaZK additionally requires this offset to clear the masking region // [1, NUM_DISABLED_ROWS_IN_SUMCHECK); non-ZK Mega mirrors the layout even though it has no masking. const auto offset_size = [](size_t content) -> size_t { return NUM_DISABLED_ROWS_IN_SUMCHECK + content; }; @@ -272,7 +274,7 @@ void ProverInstance_::allocate_databus_polynomials(const Circuit& circui inverse_ref[0] = Polynomial(std::max(offset_size(bus_size), q_busread_end), dyadic_size()); if constexpr (Flavor::HasZK) { - // Mask databus witness polynomials. The calldata values column (bus_idx == 0) is NOT + // Mask databus witness polynomials. The kernel_calldata values column (bus_idx == 0) is NOT // masked; its read_counts column is. auto& values_poly = entities[0]; auto& read_counts_poly = entities[1]; diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/relation_correctness.test.cpp b/barretenberg/cpp/src/barretenberg/ultra_honk/relation_correctness.test.cpp index 46d4af703c5f..8b0858953704 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/relation_correctness.test.cpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/relation_correctness.test.cpp @@ -180,10 +180,13 @@ void create_some_databus_gates(auto& builder) { using FF = typename Flavor::FF; auto val = builder.add_variable(FF::random_element()); - builder.add_public_calldata(val); - builder.read_calldata(builder.add_variable(FF(0))); - builder.add_public_secondary_calldata(val); - builder.read_secondary_calldata(builder.add_variable(FF(0))); + builder.add_public_calldata(BusId::KERNEL_CALLDATA, val); + builder.read_calldata(BusId::KERNEL_CALLDATA, builder.add_variable(FF(0))); + for (size_t app_idx = 0; app_idx < MAX_APPS_PER_KERNEL; ++app_idx) { + auto bus_id = static_cast(static_cast(BusId::APP_CALLDATA) + app_idx); + builder.add_public_calldata(bus_id, val); + builder.read_calldata(bus_id, builder.add_variable(FF(0))); + } builder.add_public_return_data(val); builder.read_return_data(builder.add_variable(FF(0))); } diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_verifier.hpp b/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_verifier.hpp index 5ff2ed46805c..7caa9d3e1ab6 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_verifier.hpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_verifier.hpp @@ -201,12 +201,12 @@ template class UltraVerifier_ { } /** - * @brief Get calldata commitment (MegaFlavor only) + * @brief Get kernel calldata commitment (MegaFlavor only) */ - const Commitment& get_calldata_commitment() const + const Commitment& get_kernel_calldata_commitment() const requires IsMegaFlavor { - return verifier_instance->witness_commitments.calldata; + return verifier_instance->witness_commitments.kernel_calldata; } /** diff --git a/barretenberg/cpp/src/barretenberg/vm2/AGENTS.md b/barretenberg/cpp/src/barretenberg/vm2/AGENTS.md new file mode 120000 index 000000000000..681311eb9cf4 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/vm2/AGENTS.md @@ -0,0 +1 @@ +CLAUDE.md \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/vm2/api_avm.cpp b/barretenberg/cpp/src/barretenberg/vm2/api_avm.cpp index 2252ee3731ba..c7d92325fca7 100644 --- a/barretenberg/cpp/src/barretenberg/vm2/api_avm.cpp +++ b/barretenberg/cpp/src/barretenberg/vm2/api_avm.cpp @@ -2,6 +2,7 @@ #include +#include "barretenberg/api/api_avm.hpp" #include "barretenberg/api/file_io.hpp" #include "barretenberg/common/map.hpp" #include "barretenberg/vm2/avm_api.hpp" @@ -94,4 +95,39 @@ void avm_write_verification_key(const std::filesystem::path& output_path) write_file(output_path / "vk", vk); } +AvmProveResult avm_prove_from_bytes(std::vector inputs) +{ + avm2::AvmAPI avm; + auto proving_inputs = avm2::AvmAPI::ProvingInputs::from(inputs); + auto proof = avm.prove(proving_inputs); + + print_avm_stats(); + + return AvmProveResult{ .proof = std::move(proof) }; +} + +bool avm_verify_from_bytes(std::vector proof, std::vector public_inputs) +{ + auto pi = avm2::PublicInputs::from(public_inputs); + + avm2::AvmAPI avm; + bool res = avm.verify(proof, pi); + info("verification: ", res ? "success" : "failure"); + + print_avm_stats(); + return res; +} + +bool avm_check_circuit_from_bytes(std::vector inputs) +{ + avm2::AvmAPI avm; + auto proving_inputs = avm2::AvmAPI::ProvingInputs::from(inputs); + + bool res = avm.check_circuit(proving_inputs); + info("circuit check: ", res ? "success" : "failure"); + + print_avm_stats(); + return res; +} + } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/vm2/constraining/avm_fixed_vk.hpp b/barretenberg/cpp/src/barretenberg/vm2/constraining/avm_fixed_vk.hpp index 52906d44e2c2..c060244f6965 100644 --- a/barretenberg/cpp/src/barretenberg/vm2/constraining/avm_fixed_vk.hpp +++ b/barretenberg/cpp/src/barretenberg/vm2/constraining/avm_fixed_vk.hpp @@ -17,7 +17,7 @@ class AvmHardCodedVKAndHash { using FF = bb::curve::BN254::ScalarField; // Precomputed VK hash (hash of all commitments below). - static FF vk_hash() { return FF(uint256_t("0x183783fd7c3f269b595307224be99cf3773b781f8d1d59a305ce711101d43a60")); } + static FF vk_hash() { return FF(uint256_t("0x23a03c6f87c465dbecc386b091e8123a8936597b5b0749f276d042a8964bd390")); } static constexpr std::array get_all() { @@ -232,6 +232,18 @@ class AvmHardCodedVKAndHash { uint256_t("0x0752e216f6398f2dc16b86cd762f9bd9f961964f9c6a354530c45b04920f06ab"), uint256_t( "0x062522db0dc283ad1d328147904f0fdc0e44add870aa0b099cf16c3d73352a9e")), // precomputed_sel_addressing_gas + Commitment( + uint256_t("0x095419f3dc475e499012c5d001c266643669a19173217b51fd5f2a86b3e1a8b2"), + uint256_t( + "0x0f9bf4c4f62da52213998f25ab3eca754175cf4580e070f1abb251e2d8a8e64a")), // precomputed_sel_append_l2_l1_msg + Commitment( + uint256_t("0x2932e8961b4b905fe11c2f93092e57d7e541a9bb00aca69af2a6d213577670ea"), + uint256_t( + "0x20b57b640b0186c53727c6f4724dc71b51387a34d8db51af06b1f9ad3a92d467")), // precomputed_sel_append_note_hash + Commitment( + uint256_t("0x1eba8da14083ce2c1b307a5493006a232c89e55fa657f9c193f5654990f06544"), + uint256_t( + "0x07b64d628ee70ee71e89aaabb91abd9005096a24e8c4cd3543cbf1b9344e108f")), // precomputed_sel_append_nullifier Commitment( uint256_t("0x2059be69211e5ea9bb365ab69c1132eb7b7c6814925453953f62bf731e5e42f9"), uint256_t( @@ -280,18 +292,6 @@ class AvmHardCodedVKAndHash { uint256_t("0x089cdab4e8e8381977b093cb267a1b7c8c60f4466c39a99af1247e37fe56ebfe"), uint256_t( "0x1144347d2bfe5c1f4a6d44418562facb9a5c9c7bf2b6b463424e8b0915254710")), // precomputed_sel_mem_tag_out_of_range - Commitment( - uint256_t("0x020ad6e43ccd48a6a39e43897cc85187bd364919be8a3b82d4809715cfe489db"), - uint256_t( - "0x21a79ebae2ea3d92b49c521407d2600ac061146f2c188c6c6a33c598179e4543")), // precomputed_sel_non_revertible_append_l2_l1_msg - Commitment( - uint256_t("0x2d360628289ff943ff6bd1a87bbe4e62abe7fb61ba83effd266f22bdcf31e6f9"), - uint256_t( - "0x26b92a79e563c3f48252cce7feeca2f0f8d33dcb4ef7b0643bf07bd405700aaa")), // precomputed_sel_non_revertible_append_note_hash - Commitment( - uint256_t("0x0000000000000000000000000000000000000000000000000000000000000001"), - uint256_t( - "0x0000000000000000000000000000000000000000000000000000000000000002")), // precomputed_sel_non_revertible_append_nullifier Commitment( uint256_t("0x0bf1970c2e92fee577ba15d063fa78fdd17752cafd19261ff0f176a1d3348769"), uint256_t( @@ -404,18 +404,6 @@ class AvmHardCodedVKAndHash { uint256_t("0x2e51e57417ece86800e7afa2ac53cfffcf35343cfb4bad1f6016a5b657fc3bfe"), uint256_t( "0x2c8617a36d1bbb5e7bf06c192e8ffc9aa90c714d222f8c8c29ed6a8a7e5eb717")), // precomputed_sel_range_8 - Commitment( - uint256_t("0x262d212add82bcbcf96d0773c59926e1b8e68e45c662f9348f2e4f64770595b3"), - uint256_t( - "0x2fe4de705da2b7bfb03cb3baa199ed4cc97e6ce620d0e939b603493223e88703")), // precomputed_sel_revertible_append_l2_l1_msg - Commitment( - uint256_t("0x041008987db8f55ded689b589133da9860150ed8c97b6bb5e87f0a31f78582b8"), - uint256_t( - "0x113ecb4f4d07b4efb19a22b59e5634d58e5f1d5a433b08a32f1ac2bdd0e7c01a")), // precomputed_sel_revertible_append_note_hash - Commitment( - uint256_t("0x2a56ce41f6b0be13b9c26747621b821eee81b23a887f299049b14c11e98460d6"), - uint256_t( - "0x1aa98f2de3ddda547d8f6de4e725ded5827d6338c78656c0d12ca1aea6ef2c7c")), // precomputed_sel_revertible_append_nullifier Commitment( uint256_t("0x2db8d548af3efd182047c9081ce2870f3c2e7a96b4a6469aca26167209285d9b"), uint256_t( diff --git a/barretenberg/cpp/src/barretenberg/vm2/constraining/prover.cpp b/barretenberg/cpp/src/barretenberg/vm2/constraining/prover.cpp index d6d6723a6da8..ab7410838ff5 100644 --- a/barretenberg/cpp/src/barretenberg/vm2/constraining/prover.cpp +++ b/barretenberg/cpp/src/barretenberg/vm2/constraining/prover.cpp @@ -180,7 +180,7 @@ void AvmProver::execute_relation_check_rounds() /** * @brief Run the PCS to prove that the claimed evaluations are correct. * - * @details To optimize the usage of the ECCVM, we batch the polynomials using short scalars before executing Shplemini. + * @details To optimize performance time, we batch the polynomials using short scalars before executing Shplemini. * The batching proceeds in two phases (note that the unshifted polynomials contain copies of the shifted polynomials * that have not been shifted yet; this allows us to save some work by batching the shifted polynomials in their * to_be_shifted form and later shift them): @@ -196,7 +196,7 @@ void AvmProver::execute_pcs_rounds() using PolynomialBatcher = GeminiProver_::PolynomialBatcher; using Challenges = Flavor::AllEntities; - // Batch polynomials using short scalars to reduce ECCVM circuit size + // Batch polynomials using short scalars auto unshifted_polys = prover_polynomials.get_unshifted(); auto shifted_polys = prover_polynomials.get_to_be_shifted(); @@ -216,18 +216,49 @@ void AvmProver::execute_pcs_rounds() return static_cast(std::distance(polys.begin(), it)); }; + auto add_scaled_batched = + [](Polynomial& dst, const std::span& sources, const std::span& scalars, const size_t skip_idx) { + const size_t num_slots = bb::get_num_cpus(); + std::vector batched_polys(num_slots); + for (auto& poly : batched_polys) { + poly = Polynomial(dst.size(), dst.virtual_size(), dst.start_index()); + } + + // Chunks are consumed dynamically via an atomic counter: faster threads naturally pick up + // more chunks while the slot they write to stays fixed for the life of their outer task. + std::atomic next_poly(0); + + // Accumulate polynomials: each thread picks up the next available polynomial + parallel_for(num_slots, [&](size_t slot_id) { + while (true) { + const size_t poly_id = next_poly.fetch_add(1, std::memory_order_relaxed); + if (poly_id >= sources.size()) { + break; + } + if (poly_id == skip_idx) { + continue; + } + + const size_t start_idx = sources[poly_id].start_index(); + const size_t end_idx = sources[poly_id].end_index(); + for (size_t idx = start_idx; idx < end_idx; idx++) { + batched_polys[slot_id].at(idx) += scalars[poly_id] * sources[poly_id][idx]; + } + } + }); + + for (const auto& poly : batched_polys) { + dst += poly; + } + }; + // Batch to be shifted polys in their to_be_shifted form // Search for poly with largest end index to avoid allocating a zero polynomial of circuit size size_t max_idx = index_of_max_end_index(shifted_polys); Polynomial batched_shifted = std::move(shifted_polys[max_idx]); batched_shifted *= shifted_challenges[max_idx]; - for (size_t idx = 0; const auto [poly, challenge] : zip_view(shifted_polys, shifted_challenges)) { - if (idx != max_idx) { - batched_shifted.add_scaled(poly, challenge); - } - idx++; - } + add_scaled_batched(batched_shifted, shifted_polys, shifted_challenges, max_idx); // Batch unshifted polys (to avoid allocating a zero polynomial of circuit size, we initialize the batched // polynomial with the polynomial of the largest size) @@ -236,15 +267,15 @@ void AvmProver::execute_pcs_rounds() Polynomial batched_unshifted = std::move(unshifted_polys[max_idx]); batched_unshifted *= unshifted_challenges[max_idx]; batched_unshifted += batched_shifted; - for (size_t idx = 0; const auto [poly, challenge] : zip_view(unshifted_polys, unshifted_challenges)) { - // Only operate in the range of not to be shifted polys, as the contribution for those has already been added - if (idx < WIRES_TO_BE_SHIFTED_START_IDX || idx >= WIRES_TO_BE_SHIFTED_END_IDX) { - if (idx != max_idx) { - batched_unshifted.add_scaled(poly, challenge); - } - } - idx++; - } + add_scaled_batched(batched_unshifted, + unshifted_polys.subspan(0, WIRES_TO_BE_SHIFTED_START_IDX), + unshifted_challenges.subspan(0, WIRES_TO_BE_SHIFTED_START_IDX), + max_idx); + add_scaled_batched(batched_unshifted, + unshifted_polys.subspan(WIRES_TO_BE_SHIFTED_END_IDX), + unshifted_challenges.subspan(WIRES_TO_BE_SHIFTED_END_IDX), + max_idx > WIRES_TO_BE_SHIFTED_END_IDX ? max_idx - WIRES_TO_BE_SHIFTED_END_IDX + : unshifted_polys.size()); const size_t circuit_dyadic_size = numeric::round_up_power_2(batched_unshifted.end_index()); @@ -289,5 +320,4 @@ HonkProof AvmProver::construct_proof() return export_proof(); } - } // namespace bb::avm2 diff --git a/barretenberg/cpp/src/barretenberg/vm2/constraining/relations/bc_retrieval.test.cpp b/barretenberg/cpp/src/barretenberg/vm2/constraining/relations/bc_retrieval.test.cpp index f0296d21eb02..2614c9590ee1 100644 --- a/barretenberg/cpp/src/barretenberg/vm2/constraining/relations/bc_retrieval.test.cpp +++ b/barretenberg/cpp/src/barretenberg/vm2/constraining/relations/bc_retrieval.test.cpp @@ -104,7 +104,7 @@ TEST_F(BytecodeRetrievalConstrainingTest, SuccessfulRetrieval) ClassIdDerivationTraceBuilder class_id_builder; IndexedTreeCheckTraceBuilder indexed_tree_check_builder; - FF nullifier_root = FF::random_element(); + FF nullifier_tree_root = FF::random_element(); FF public_data_tree_root = FF::random_element(); ContractInstance instance = random_contract_instance(); @@ -123,7 +123,7 @@ TEST_F(BytecodeRetrievalConstrainingTest, SuccessfulRetrieval) contract_instance_retrieval_builder.process({ { .address = instance.deployer, .contract_instance = { instance }, - .nullifier_tree_root = nullifier_root, + .nullifier_tree_root = nullifier_tree_root, .public_data_tree_root = public_data_tree_root, .exists = true, } }, @@ -176,7 +176,7 @@ TEST_F(BytecodeRetrievalConstrainingTest, SuccessfulRetrieval) .address = instance.deployer, .current_class_id = instance.current_contract_class_id, .contract_class = klass, - .nullifier_root = nullifier_root, + .nullifier_tree_root = nullifier_tree_root, .public_data_tree_root = public_data_tree_root, .retrieved_bytecodes_snapshot_before = snapshot_before, .retrieved_bytecodes_snapshot_after = snapshot_after, @@ -197,7 +197,7 @@ TEST_F(BytecodeRetrievalConstrainingTest, TooManyBytecodes) TestTraceContainer trace = init_trace(); BytecodeTraceBuilder builder; - FF nullifier_root = FF::random_element(); + FF nullifier_tree_root = FF::random_element(); FF public_data_tree_root = FF::random_element(); ContractInstance instance = random_contract_instance(); @@ -221,7 +221,7 @@ TEST_F(BytecodeRetrievalConstrainingTest, TooManyBytecodes) .bytecode_id = 0, // bytecode_id equals commitment .address = instance.deployer, .current_class_id = instance.current_contract_class_id, - .nullifier_root = nullifier_root, + .nullifier_tree_root = nullifier_tree_root, .public_data_tree_root = public_data_tree_root, .retrieved_bytecodes_snapshot_before = snapshot_before, .retrieved_bytecodes_snapshot_after = snapshot_after, @@ -328,7 +328,7 @@ TEST_F(BytecodeRetrievalConstrainingTestFewerMocks, SuccessfulRetrievalFewerMock { TestTraceContainer trace = init_trace(); - FF nullifier_root = FF::random_element(); + FF nullifier_tree_root = FF::random_element(); FF public_data_tree_root = FF::random_element(); ContractInstance instance = random_contract_instance(); @@ -356,7 +356,7 @@ TEST_F(BytecodeRetrievalConstrainingTestFewerMocks, SuccessfulRetrievalFewerMock contract_instance_retrieval_builder.process({ { .address = instance.deployer, .contract_instance = { instance }, - .nullifier_tree_root = nullifier_root, + .nullifier_tree_root = nullifier_tree_root, .public_data_tree_root = public_data_tree_root, .exists = true, } }, @@ -386,7 +386,7 @@ TEST_F(BytecodeRetrievalConstrainingTestFewerMocks, SuccessfulRetrievalFewerMock .address = instance.deployer, .current_class_id = instance.current_contract_class_id, .contract_class = klass, - .nullifier_root = nullifier_root, + .nullifier_tree_root = nullifier_tree_root, .public_data_tree_root = public_data_tree_root, .retrieved_bytecodes_snapshot_before = snapshot_before, .retrieved_bytecodes_snapshot_after = snapshot_after, @@ -412,7 +412,7 @@ TEST_F(BytecodeRetrievalConstrainingTestFewerMocks, SuccessfulRepeatedRetrievalF { TestTraceContainer trace = init_trace(); - FF nullifier_root = FF::random_element(); + FF nullifier_tree_root = FF::random_element(); FF public_data_tree_root = FF::random_element(); ContractInstance instance = random_contract_instance(); @@ -441,7 +441,7 @@ TEST_F(BytecodeRetrievalConstrainingTestFewerMocks, SuccessfulRepeatedRetrievalF contract_instance_retrieval_builder.process({ { .address = instance.deployer, .contract_instance = { instance }, - .nullifier_tree_root = nullifier_root, + .nullifier_tree_root = nullifier_tree_root, .public_data_tree_root = public_data_tree_root, .exists = true, } }, @@ -478,7 +478,7 @@ TEST_F(BytecodeRetrievalConstrainingTestFewerMocks, SuccessfulRepeatedRetrievalF .address = instance.deployer, .current_class_id = instance.current_contract_class_id, .contract_class = klass, - .nullifier_root = nullifier_root, + .nullifier_tree_root = nullifier_tree_root, .public_data_tree_root = public_data_tree_root, .retrieved_bytecodes_snapshot_before = snapshot_before, .retrieved_bytecodes_snapshot_after = snapshot_after, @@ -489,7 +489,7 @@ TEST_F(BytecodeRetrievalConstrainingTestFewerMocks, SuccessfulRepeatedRetrievalF .address = instance.deployer, .current_class_id = instance.current_contract_class_id, .contract_class = klass, - .nullifier_root = nullifier_root, + .nullifier_tree_root = nullifier_tree_root, .public_data_tree_root = public_data_tree_root, .retrieved_bytecodes_snapshot_before = snapshot_after, .retrieved_bytecodes_snapshot_after = snapshot_after, diff --git a/barretenberg/cpp/src/barretenberg/vm2/constraining/relations/tx.test.cpp b/barretenberg/cpp/src/barretenberg/vm2/constraining/relations/tx.test.cpp index 23fc4ba1a774..2330f2d13d62 100644 --- a/barretenberg/cpp/src/barretenberg/vm2/constraining/relations/tx.test.cpp +++ b/barretenberg/cpp/src/barretenberg/vm2/constraining/relations/tx.test.cpp @@ -95,7 +95,7 @@ class TxExecutionConstrainingTestHelper : public ::testing::Test { { C::tx_phase_value, static_cast(TransactionPhase::NR_NULLIFIER_INSERTION) }, { C::tx_is_padded, 1 }, { C::tx_is_tree_insert_phase, 1 }, - { C::tx_sel_non_revertible_append_nullifier, 1 }, + { C::tx_sel_append_nullifier, 1 }, { C::tx_read_pi_start_offset, AVM_PUBLIC_INPUTS_PREVIOUS_NON_REVERTIBLE_ACCUMULATED_DATA_NULLIFIERS_ROW_IDX }, @@ -114,7 +114,7 @@ class TxExecutionConstrainingTestHelper : public ::testing::Test { { C::tx_phase_value, static_cast(TransactionPhase::NR_NOTE_INSERTION) }, { C::tx_is_padded, 1 }, { C::tx_is_tree_insert_phase, 1 }, - { C::tx_sel_non_revertible_append_note_hash, 1 }, + { C::tx_sel_append_note_hash, 1 }, { C::tx_read_pi_start_offset, AVM_PUBLIC_INPUTS_PREVIOUS_NON_REVERTIBLE_ACCUMULATED_DATA_NOTE_HASHES_ROW_IDX }, { C::tx_read_pi_offset, @@ -132,7 +132,7 @@ class TxExecutionConstrainingTestHelper : public ::testing::Test { { C::tx_sel, 1 }, { C::tx_phase_value, static_cast(TransactionPhase::NR_L2_TO_L1_MESSAGE) }, { C::tx_is_padded, 1 }, - { C::tx_sel_non_revertible_append_l2_l1_msg, 1 }, + { C::tx_sel_append_l2_l1_msg, 1 }, { C::tx_read_pi_start_offset, AVM_PUBLIC_INPUTS_PREVIOUS_NON_REVERTIBLE_ACCUMULATED_DATA_L2_TO_L1_MSGS_ROW_IDX }, @@ -187,7 +187,7 @@ class TxExecutionConstrainingTestHelper : public ::testing::Test { { C::tx_phase_value, static_cast(TransactionPhase::R_NULLIFIER_INSERTION) }, { C::tx_is_padded, 1 }, { C::tx_is_tree_insert_phase, 1 }, - { C::tx_sel_revertible_append_nullifier, 1 }, + { C::tx_sel_append_nullifier, 1 }, { C::tx_is_revertible, 1 }, { C::tx_read_pi_start_offset, AVM_PUBLIC_INPUTS_PREVIOUS_REVERTIBLE_ACCUMULATED_DATA_NULLIFIERS_ROW_IDX }, @@ -206,7 +206,7 @@ class TxExecutionConstrainingTestHelper : public ::testing::Test { { C::tx_phase_value, static_cast(TransactionPhase::R_NOTE_INSERTION) }, { C::tx_is_padded, 1 }, { C::tx_is_tree_insert_phase, 1 }, - { C::tx_sel_revertible_append_note_hash, 1 }, + { C::tx_sel_append_note_hash, 1 }, { C::tx_is_revertible, 1 }, { C::tx_read_pi_start_offset, AVM_PUBLIC_INPUTS_PREVIOUS_REVERTIBLE_ACCUMULATED_DATA_NOTE_HASHES_ROW_IDX }, @@ -224,7 +224,7 @@ class TxExecutionConstrainingTestHelper : public ::testing::Test { { C::tx_sel, 1 }, { C::tx_phase_value, static_cast(TransactionPhase::R_L2_TO_L1_MESSAGE) }, { C::tx_is_padded, 1 }, - { C::tx_sel_revertible_append_l2_l1_msg, 1 }, + { C::tx_sel_append_l2_l1_msg, 1 }, { C::tx_is_revertible, 1 }, { C::tx_read_pi_start_offset, AVM_PUBLIC_INPUTS_PREVIOUS_REVERTIBLE_ACCUMULATED_DATA_L2_TO_L1_MSGS_ROW_IDX }, @@ -423,7 +423,7 @@ TEST_F(TxExecutionConstrainingTestHelper, JumpOnRevert) trace.set(7, { { { C::tx_is_padded, 0 }, - { C::tx_sel_revertible_append_l2_l1_msg, 0 }, // switch off for testing + { C::tx_sel_append_l2_l1_msg, 0 }, // switch off for testing { C::tx_remaining_phase_counter, 1 }, { C::tx_remaining_phase_inv, 1 }, { C::tx_is_revertible, 1 }, @@ -488,7 +488,7 @@ TEST(TxExecutionConstrainingTest, WriteTreeValue) { C::tx_read_pi_offset, AVM_PUBLIC_INPUTS_PREVIOUS_NON_REVERTIBLE_ACCUMULATED_DATA_L2_TO_L1_MSGS_ROW_IDX }, { C::tx_write_pi_offset, AVM_PUBLIC_INPUTS_AVM_ACCUMULATED_DATA_L2_TO_L1_MSGS_ROW_IDX }, - { C::tx_sel_non_revertible_append_l2_l1_msg, 1 }, + { C::tx_sel_append_l2_l1_msg, 1 }, { C::tx_l2_l1_msg_content, test_public_inputs.previous_non_revertible_accumulated_data.l2_to_l1_msgs[0].message.content }, { C::tx_l2_l1_msg_recipient, @@ -546,7 +546,7 @@ TEST(TxExecutionConstrainingTest, WriteTreeValue) { C::tx_read_pi_offset, AVM_PUBLIC_INPUTS_PREVIOUS_REVERTIBLE_ACCUMULATED_DATA_L2_TO_L1_MSGS_ROW_IDX }, { C::tx_write_pi_offset, AVM_PUBLIC_INPUTS_AVM_ACCUMULATED_DATA_L2_TO_L1_MSGS_ROW_IDX + 1 }, - { C::tx_sel_revertible_append_l2_l1_msg, 1 }, + { C::tx_sel_append_l2_l1_msg, 1 }, { C::tx_l2_l1_msg_content, test_public_inputs.previous_revertible_accumulated_data.l2_to_l1_msgs[0].message.content }, { C::tx_l2_l1_msg_recipient, diff --git a/barretenberg/cpp/src/barretenberg/vm2/generated/columns.hpp b/barretenberg/cpp/src/barretenberg/vm2/generated/columns.hpp index c5f65f828e6a..3d1bfac00e40 100644 --- a/barretenberg/cpp/src/barretenberg/vm2/generated/columns.hpp +++ b/barretenberg/cpp/src/barretenberg/vm2/generated/columns.hpp @@ -7,8 +7,8 @@ namespace bb::avm2 { // clang-format off -#define AVM2_PRECOMPUTED_ENTITIES_E(e) e precomputed_addressing_gas, e precomputed_bitwise_input_a, e precomputed_bitwise_input_b, e precomputed_bitwise_output_and, e precomputed_bitwise_output_or, e precomputed_bitwise_output_xor, e precomputed_dyn_gas_id, e precomputed_envvar_pi_row_idx, e precomputed_exec_opcode, e precomputed_exec_opcode_base_da_gas, e precomputed_exec_opcode_dynamic_da_gas, e precomputed_exec_opcode_dynamic_l2_gas, e precomputed_exec_opcode_opcode_gas, e precomputed_expected_tag_reg_0_, e precomputed_expected_tag_reg_1_, e precomputed_expected_tag_reg_2_, e precomputed_expected_tag_reg_3_, e precomputed_expected_tag_reg_4_, e precomputed_expected_tag_reg_5_, e precomputed_first_row, e precomputed_idx, e precomputed_instr_size, e precomputed_invalid_envvar_enum, e precomputed_is_address, e precomputed_is_class_id, e precomputed_is_cleanup, e precomputed_is_collect_fee, e precomputed_is_dagasleft, e precomputed_is_deployer, e precomputed_is_init_hash, e precomputed_is_isstaticcall, e precomputed_is_l2gasleft, e precomputed_is_public_call_request, e precomputed_is_revertible, e precomputed_is_sender, e precomputed_is_teardown, e precomputed_is_transactionfee, e precomputed_is_tree_padding, e precomputed_is_valid_member_enum, e precomputed_keccak_round_constant, e precomputed_next_phase_on_revert, e precomputed_opcode_out_of_range, e precomputed_out_tag, e precomputed_p_decomposition_limb, e precomputed_p_decomposition_limb_index, e precomputed_p_decomposition_radix, e precomputed_power_of_2, e precomputed_read_pi_length_offset, e precomputed_read_pi_start_offset, e precomputed_rw_reg_0_, e precomputed_rw_reg_1_, e precomputed_rw_reg_2_, e precomputed_rw_reg_3_, e precomputed_rw_reg_4_, e precomputed_rw_reg_5_, e precomputed_sel_addressing_gas, e precomputed_sel_envvar_pi_lookup_col0, e precomputed_sel_envvar_pi_lookup_col1, e precomputed_sel_exec_spec, e precomputed_sel_has_tag, e precomputed_sel_keccak, e precomputed_sel_mem_op_reg_0_, e precomputed_sel_mem_op_reg_1_, e precomputed_sel_mem_op_reg_2_, e precomputed_sel_mem_op_reg_3_, e precomputed_sel_mem_op_reg_4_, e precomputed_sel_mem_op_reg_5_, e precomputed_sel_mem_tag_out_of_range, e precomputed_sel_non_revertible_append_l2_l1_msg, e precomputed_sel_non_revertible_append_note_hash, e precomputed_sel_non_revertible_append_nullifier, e precomputed_sel_op_dc_0, e precomputed_sel_op_dc_1, e precomputed_sel_op_dc_10, e precomputed_sel_op_dc_11, e precomputed_sel_op_dc_12, e precomputed_sel_op_dc_13, e precomputed_sel_op_dc_14, e precomputed_sel_op_dc_15, e precomputed_sel_op_dc_16, e precomputed_sel_op_dc_2, e precomputed_sel_op_dc_3, e precomputed_sel_op_dc_4, e precomputed_sel_op_dc_5, e precomputed_sel_op_dc_6, e precomputed_sel_op_dc_7, e precomputed_sel_op_dc_8, e precomputed_sel_op_dc_9, e precomputed_sel_op_is_address_0_, e precomputed_sel_op_is_address_1_, e precomputed_sel_op_is_address_2_, e precomputed_sel_op_is_address_3_, e precomputed_sel_op_is_address_4_, e precomputed_sel_op_is_address_5_, e precomputed_sel_op_is_address_6_, e precomputed_sel_p_decomposition, e precomputed_sel_phase, e precomputed_sel_range_16, e precomputed_sel_range_8, e precomputed_sel_revertible_append_l2_l1_msg, e precomputed_sel_revertible_append_note_hash, e precomputed_sel_revertible_append_nullifier, e precomputed_sel_sha256_compression, e precomputed_sel_tag_check_reg_0_, e precomputed_sel_tag_check_reg_1_, e precomputed_sel_tag_check_reg_2_, e precomputed_sel_tag_check_reg_3_, e precomputed_sel_tag_check_reg_4_, e precomputed_sel_tag_check_reg_5_, e precomputed_sel_tag_is_op2, e precomputed_sel_tag_parameters, e precomputed_sel_to_radix_p_limb_counts, e precomputed_sha256_compression_round_constant, e precomputed_subtrace_id, e precomputed_subtrace_operation_id, e precomputed_tag_byte_length, e precomputed_tag_max_bits, e precomputed_tag_max_value, e precomputed_to_radix_num_limbs_for_p, e precomputed_to_radix_safe_limbs, e precomputed_zero, e public_inputs_sel -#define AVM2_WIRE_ENTITIES_E(e) e public_inputs_cols_0_, e public_inputs_cols_1_, e public_inputs_cols_2_, e public_inputs_cols_3_, e address_derivation_address, e address_derivation_address_y, e address_derivation_class_id, e address_derivation_const_four, e address_derivation_const_thirteen, e address_derivation_const_three, e address_derivation_const_two, e address_derivation_deployer_addr, e address_derivation_g1_x, e address_derivation_g1_y, e address_derivation_incoming_viewing_key_x, e address_derivation_incoming_viewing_key_y, e address_derivation_init_hash, e address_derivation_nullifier_key_x, e address_derivation_nullifier_key_y, e address_derivation_outgoing_viewing_key_x, e address_derivation_outgoing_viewing_key_y, e address_derivation_partial_address, e address_derivation_partial_address_domain_separator, e address_derivation_preaddress, e address_derivation_preaddress_domain_separator, e address_derivation_preaddress_public_key_x, e address_derivation_preaddress_public_key_y, e address_derivation_public_keys_hash, e address_derivation_public_keys_hash_domain_separator, e address_derivation_salt, e address_derivation_salted_init_hash, e address_derivation_salted_init_hash_domain_separator, e address_derivation_sel, e address_derivation_tagging_key_x, e address_derivation_tagging_key_y, e alu_a_hi, e alu_a_hi_bits, e alu_a_lo, e alu_a_lo_bits, e alu_ab_diff_inv, e alu_ab_tags_diff_inv, e alu_b_hi, e alu_b_inv, e alu_b_lo, e alu_c_hi, e alu_cf, e alu_constant_64, e alu_gt_input_a, e alu_gt_input_b, e alu_gt_result_c, e alu_helper1, e alu_ia, e alu_ia_tag, e alu_ib, e alu_ib_tag, e alu_ic, e alu_ic_tag, e alu_max_bits, e alu_max_value, e alu_mid, e alu_mid_bits, e alu_op_id, e alu_sel, e alu_sel_ab_tag_mismatch, e alu_sel_decompose_a, e alu_sel_div_0_err, e alu_sel_div_no_err, e alu_sel_err, e alu_sel_ff_gt, e alu_sel_int_gt, e alu_sel_is_ff, e alu_sel_is_u128, e alu_sel_mul_div_u128, e alu_sel_mul_no_err_non_ff, e alu_sel_op_add, e alu_sel_op_div, e alu_sel_op_eq, e alu_sel_op_fdiv, e alu_sel_op_lt, e alu_sel_op_lte, e alu_sel_op_mul, e alu_sel_op_not, e alu_sel_op_shl, e alu_sel_op_shr, e alu_sel_op_sub, e alu_sel_op_truncate, e alu_sel_shift_ops_no_overflow, e alu_sel_tag_err, e alu_sel_trunc_gte_128, e alu_sel_trunc_lt_128, e alu_sel_trunc_non_trivial, e alu_sel_trunc_trivial, e alu_shift_lo_bits, e alu_tag_ff_diff_inv, e alu_tag_u128_diff_inv, e alu_two_pow_shift_lo_bits, e bc_decomposition_bytes_pc_plus_36, e bc_decomposition_bytes_rem_inv, e bc_decomposition_bytes_rem_min_one_inv, e bc_decomposition_bytes_to_read, e bc_decomposition_last_of_contract, e bc_decomposition_next_packed_pc_min_pc_inv, e bc_decomposition_packed_field, e bc_decomposition_sel_packed, e bc_decomposition_sel_packed_read_0_, e bc_decomposition_sel_packed_read_1_, e bc_decomposition_sel_packed_read_2_, e bc_decomposition_sel_windows_eq_remaining, e bc_decomposition_windows_min_remaining_inv, e bc_hashing_end, e bc_hashing_input_len, e bc_hashing_packed_fields_0, e bc_hashing_packed_fields_1, e bc_hashing_packed_fields_2, e bc_hashing_pc_index, e bc_hashing_pc_index_2, e bc_hashing_sel_not_padding_1, e bc_hashing_sel_not_padding_2, e bc_hashing_size_in_bytes, e bc_retrieval_address, e bc_retrieval_artifact_hash, e bc_retrieval_bytecode_id, e bc_retrieval_current_class_id, e bc_retrieval_error, e bc_retrieval_instance_exists, e bc_retrieval_is_new_class, e bc_retrieval_next_retrieved_bytecodes_tree_root, e bc_retrieval_next_retrieved_bytecodes_tree_size, e bc_retrieval_no_remaining_bytecodes, e bc_retrieval_nullifier_tree_root, e bc_retrieval_prev_retrieved_bytecodes_tree_root, e bc_retrieval_prev_retrieved_bytecodes_tree_size, e bc_retrieval_private_functions_root, e bc_retrieval_public_data_tree_root, e bc_retrieval_remaining_bytecodes_inv, e bc_retrieval_retrieved_bytecodes_merkle_separator, e bc_retrieval_retrieved_bytecodes_tree_height, e bc_retrieval_sel, e bc_retrieval_should_retrieve, e bitwise_ctr_min_one_inv, e bitwise_end, e bitwise_err, e bitwise_ia_byte, e bitwise_ib_byte, e bitwise_ic_byte, e bitwise_output_and, e bitwise_output_or, e bitwise_output_xor, e bitwise_sel_and, e bitwise_sel_compute, e bitwise_sel_get_ctr, e bitwise_sel_or, e bitwise_sel_tag_ff_err, e bitwise_sel_tag_mismatch_err, e bitwise_sel_xor, e bitwise_start_keccak, e bitwise_start_sha256, e bitwise_tag_a, e bitwise_tag_a_inv, e bitwise_tag_ab_diff_inv, e bitwise_tag_b, e bitwise_tag_c, e calldata_end, e calldata_hashing_end, e calldata_hashing_index_1_, e calldata_hashing_index_2_, e calldata_hashing_input_0_, e calldata_hashing_input_1_, e calldata_hashing_input_2_, e calldata_hashing_input_len, e calldata_hashing_sel_end_not_empty, e calldata_hashing_sel_not_padding_1, e calldata_hashing_sel_not_padding_2, e calldata_hashing_sel_not_start, e calldata_value, e class_id_derivation_artifact_hash, e class_id_derivation_class_id, e class_id_derivation_const_four, e class_id_derivation_gen_index_contract_class_id, e class_id_derivation_private_functions_root, e class_id_derivation_public_bytecode_commitment, e class_id_derivation_sel, e context_stack_bytecode_id, e context_stack_context_id, e context_stack_contract_address, e context_stack_entered_context_id, e context_stack_internal_call_id, e context_stack_internal_call_return_id, e context_stack_is_static, e context_stack_msg_sender, e context_stack_next_internal_call_id, e context_stack_next_pc, e context_stack_note_hash_tree_root, e context_stack_note_hash_tree_size, e context_stack_nullifier_tree_root, e context_stack_nullifier_tree_size, e context_stack_num_l2_to_l1_messages, e context_stack_num_note_hashes_emitted, e context_stack_num_nullifiers_emitted, e context_stack_num_public_log_fields, e context_stack_parent_calldata_addr, e context_stack_parent_calldata_size, e context_stack_parent_da_gas_limit, e context_stack_parent_da_gas_used, e context_stack_parent_id, e context_stack_parent_l2_gas_limit, e context_stack_parent_l2_gas_used, e context_stack_public_data_tree_root, e context_stack_public_data_tree_size, e context_stack_sel, e context_stack_written_public_data_slots_tree_root, e context_stack_written_public_data_slots_tree_size, e contract_instance_retrieval_address, e contract_instance_retrieval_address_sub_one, e contract_instance_retrieval_current_class_id, e contract_instance_retrieval_deployer_addr, e contract_instance_retrieval_deployer_protocol_contract_address, e contract_instance_retrieval_derived_address, e contract_instance_retrieval_derived_address_pi_index, e contract_instance_retrieval_exists, e contract_instance_retrieval_incoming_viewing_key_x, e contract_instance_retrieval_incoming_viewing_key_y, e contract_instance_retrieval_init_hash, e contract_instance_retrieval_is_protocol_contract, e contract_instance_retrieval_max_protocol_contracts, e contract_instance_retrieval_nullifier_key_x, e contract_instance_retrieval_nullifier_key_y, e contract_instance_retrieval_nullifier_merkle_separator, e contract_instance_retrieval_nullifier_tree_height, e contract_instance_retrieval_nullifier_tree_root, e contract_instance_retrieval_original_class_id, e contract_instance_retrieval_outgoing_viewing_key_x, e contract_instance_retrieval_outgoing_viewing_key_y, e contract_instance_retrieval_protocol_contract_derived_address_inv, e contract_instance_retrieval_public_data_tree_root, e contract_instance_retrieval_salt, e contract_instance_retrieval_sel, e contract_instance_retrieval_should_check_for_update, e contract_instance_retrieval_should_check_nullifier, e contract_instance_retrieval_siloing_separator, e contract_instance_retrieval_tagging_key_x, e contract_instance_retrieval_tagging_key_y, e data_copy_cd_copy_col_read, e data_copy_clamped_read_index_upper_bound, e data_copy_dst_out_of_range_err, e data_copy_end, e data_copy_is_top_level, e data_copy_mem_size, e data_copy_offset, e data_copy_offset_plus_size, e data_copy_offset_plus_size_is_gt, e data_copy_parent_id_inv, e data_copy_read_addr_plus_one, e data_copy_read_addr_upper_bound, e data_copy_reads_left_inv, e data_copy_sel_cd_copy_start, e data_copy_sel_has_reads, e data_copy_sel_mem_read, e data_copy_sel_mem_write, e data_copy_sel_rd_copy_start, e data_copy_sel_write_count_is_zero, e data_copy_src_addr, e data_copy_src_data_size, e data_copy_src_reads_exceed_mem, e data_copy_start_no_err, e data_copy_tag, e data_copy_value, e data_copy_write_addr_upper_bound, e data_copy_write_count_minus_one_inv, e data_copy_write_count_zero_inv, e ecc_add_mem_dst_addr_0_, e ecc_add_mem_dst_addr_1_, e ecc_add_mem_dst_addr_2_, e ecc_add_mem_err, e ecc_add_mem_execution_clk, e ecc_add_mem_max_mem_addr, e ecc_add_mem_p_is_inf, e ecc_add_mem_p_is_on_curve_eqn, e ecc_add_mem_p_is_on_curve_eqn_inv, e ecc_add_mem_p_x, e ecc_add_mem_p_x_n, e ecc_add_mem_p_y, e ecc_add_mem_p_y_n, e ecc_add_mem_q_is_inf, e ecc_add_mem_q_is_on_curve_eqn, e ecc_add_mem_q_is_on_curve_eqn_inv, e ecc_add_mem_q_x, e ecc_add_mem_q_x_n, e ecc_add_mem_q_y, e ecc_add_mem_q_y_n, e ecc_add_mem_res_is_inf, e ecc_add_mem_res_x, e ecc_add_mem_res_y, e ecc_add_mem_sel, e ecc_add_mem_sel_dst_out_of_range_err, e ecc_add_mem_sel_p_not_on_curve_err, e ecc_add_mem_sel_q_not_on_curve_err, e ecc_add_mem_sel_should_exec, e ecc_add_mem_space_id, e ecc_add_op, e ecc_double_op, e ecc_inv_2_p_y, e ecc_inv_x_diff, e ecc_inv_y_diff, e ecc_lambda, e ecc_p_is_inf, e ecc_p_x, e ecc_p_y, e ecc_q_is_inf, e ecc_q_x, e ecc_q_y, e ecc_r_is_inf, e ecc_r_x, e ecc_r_y, e ecc_result_infinity, e ecc_sel, e ecc_use_computed_result, e ecc_x_match, e ecc_y_match, e emit_public_log_discard, e emit_public_log_end, e emit_public_log_end_log_address_upper_bound, e emit_public_log_error, e emit_public_log_error_too_many_log_fields, e emit_public_log_expected_next_log_fields, e emit_public_log_is_static, e emit_public_log_log_size, e emit_public_log_max_mem_size, e emit_public_log_max_public_logs_payload_length, e emit_public_log_next_num_public_log_fields, e emit_public_log_prev_num_public_log_fields, e emit_public_log_public_inputs_value, e emit_public_log_remaining_rows_inv, e emit_public_log_sel_read_memory, e emit_public_log_tag, e emit_public_log_tag_inv, e emit_public_log_value, e execution_addressing_error_collection_inv, e execution_addressing_gas, e execution_addressing_mode, e execution_base_address_tag, e execution_base_address_tag_diff_inv, e execution_base_address_val, e execution_base_da_gas, e execution_batched_tags_diff_inv, e execution_batched_tags_diff_inv_reg, e execution_da_gas_left, e execution_da_gas_used, e execution_dying_context_diff_inv, e execution_dying_context_id_inv, e execution_dyn_gas_id, e execution_dynamic_da_gas, e execution_dynamic_da_gas_factor, e execution_dynamic_l2_gas, e execution_dynamic_l2_gas_factor, e execution_enqueued_call_end, e execution_envvar_pi_row_idx, e execution_exec_opcode, e execution_expected_tag_reg_0_, e execution_expected_tag_reg_1_, e execution_expected_tag_reg_2_, e execution_expected_tag_reg_3_, e execution_expected_tag_reg_4_, e execution_expected_tag_reg_5_, e execution_has_parent_ctx, e execution_highest_address, e execution_instr_size, e execution_internal_call_return_id_inv, e execution_is_address, e execution_is_da_gas_left_gt_allocated, e execution_is_dagasleft, e execution_is_dying_context, e execution_is_isstaticcall, e execution_is_l2_gas_left_gt_allocated, e execution_is_l2gasleft, e execution_is_parent_id_inv, e execution_is_sender, e execution_is_transactionfee, e execution_l1_to_l2_msg_leaf_in_range, e execution_l1_to_l2_msg_tree_leaf_count, e execution_l2_gas_left, e execution_l2_gas_used, e execution_max_data_writes_reached, e execution_max_eth_address_value, e execution_mem_tag_reg_0_, e execution_mem_tag_reg_1_, e execution_mem_tag_reg_2_, e execution_mem_tag_reg_3_, e execution_mem_tag_reg_4_, e execution_mem_tag_reg_5_, e execution_nested_failure, e execution_nested_return, e execution_next_pc, e execution_note_hash_leaf_in_range, e execution_note_hash_tree_leaf_count, e execution_note_hash_tree_root, e execution_note_hash_tree_size, e execution_nullifier_merkle_separator, e execution_nullifier_pi_offset, e execution_nullifier_siloing_separator, e execution_nullifier_tree_height, e execution_nullifier_tree_root, e execution_nullifier_tree_size, e execution_num_l2_to_l1_messages, e execution_num_note_hashes_emitted, e execution_num_nullifiers_emitted, e execution_num_p_limbs, e execution_num_public_log_fields, e execution_num_relative_operands_inv, e execution_op_0_, e execution_op_1_, e execution_op_2_, e execution_op_3_, e execution_op_4_, e execution_op_5_, e execution_op_6_, e execution_op_after_relative_0_, e execution_op_after_relative_1_, e execution_op_after_relative_2_, e execution_op_after_relative_3_, e execution_op_after_relative_4_, e execution_op_after_relative_5_, e execution_op_after_relative_6_, e execution_opcode_gas, e execution_out_of_gas_da, e execution_out_of_gas_l2, e execution_public_data_tree_root, e execution_public_data_tree_size, e execution_public_inputs_index, e execution_register_0_, e execution_register_1_, e execution_register_2_, e execution_register_3_, e execution_register_4_, e execution_register_5_, e execution_remaining_data_writes_inv, e execution_remaining_l2_to_l1_msgs_inv, e execution_remaining_note_hashes_inv, e execution_remaining_nullifiers_inv, e execution_retrieved_bytecodes_tree_root, e execution_retrieved_bytecodes_tree_size, e execution_rop_0_, e execution_rop_1_, e execution_rop_2_, e execution_rop_3_, e execution_rop_4_, e execution_rop_5_, e execution_rop_6_, e execution_rop_tag_0_, e execution_rop_tag_1_, e execution_rop_tag_2_, e execution_rop_tag_3_, e execution_rop_tag_4_, e execution_rop_tag_5_, e execution_rop_tag_6_, e execution_rw_reg_0_, e execution_rw_reg_1_, e execution_rw_reg_2_, e execution_rw_reg_3_, e execution_rw_reg_4_, e execution_rw_reg_5_, e execution_sel_addressing_error, e execution_sel_apply_indirection_0_, e execution_sel_apply_indirection_1_, e execution_sel_apply_indirection_2_, e execution_sel_apply_indirection_3_, e execution_sel_apply_indirection_4_, e execution_sel_apply_indirection_5_, e execution_sel_apply_indirection_6_, e execution_sel_base_address_failure, e execution_sel_bytecode_retrieval_failure, e execution_sel_bytecode_retrieval_success, e execution_sel_check_gas, e execution_sel_do_base_check, e execution_sel_enter_call, e execution_sel_envvar_pi_lookup_col0, e execution_sel_envvar_pi_lookup_col1, e execution_sel_error, e execution_sel_exec_dispatch_alu, e execution_sel_exec_dispatch_bitwise, e execution_sel_exec_dispatch_calldata_copy, e execution_sel_exec_dispatch_cast, e execution_sel_exec_dispatch_ecc_add, e execution_sel_exec_dispatch_emit_public_log, e execution_sel_exec_dispatch_execution, e execution_sel_exec_dispatch_get_contract_instance, e execution_sel_exec_dispatch_keccakf1600, e execution_sel_exec_dispatch_poseidon2_perm, e execution_sel_exec_dispatch_returndata_copy, e execution_sel_exec_dispatch_set, e execution_sel_exec_dispatch_sha256_compression, e execution_sel_exec_dispatch_to_radix, e execution_sel_execute_call, e execution_sel_execute_debug_log, e execution_sel_execute_emit_notehash, e execution_sel_execute_emit_nullifier, e execution_sel_execute_get_env_var, e execution_sel_execute_internal_call, e execution_sel_execute_internal_return, e execution_sel_execute_jump, e execution_sel_execute_jumpi, e execution_sel_execute_l1_to_l2_message_exists, e execution_sel_execute_mov, e execution_sel_execute_notehash_exists, e execution_sel_execute_nullifier_exists, e execution_sel_execute_opcode, e execution_sel_execute_return, e execution_sel_execute_returndata_size, e execution_sel_execute_revert, e execution_sel_execute_send_l2_to_l1_msg, e execution_sel_execute_sload, e execution_sel_execute_sstore, e execution_sel_execute_static_call, e execution_sel_execute_success_copy, e execution_sel_exit_call, e execution_sel_failure, e execution_sel_gas_bitwise, e execution_sel_gas_calldata_copy, e execution_sel_gas_emit_public_log, e execution_sel_gas_returndata_copy, e execution_sel_gas_sstore, e execution_sel_gas_to_radix, e execution_sel_instruction_fetching_failure, e execution_sel_instruction_fetching_success, e execution_sel_l2_to_l1_msg_limit_error, e execution_sel_lookup_num_p_limbs, e execution_sel_mem_op_reg_0_, e execution_sel_mem_op_reg_1_, e execution_sel_mem_op_reg_2_, e execution_sel_mem_op_reg_3_, e execution_sel_mem_op_reg_4_, e execution_sel_mem_op_reg_5_, e execution_sel_op_do_overflow_check_0_, e execution_sel_op_do_overflow_check_1_, e execution_sel_op_do_overflow_check_2_, e execution_sel_op_do_overflow_check_3_, e execution_sel_op_do_overflow_check_4_, e execution_sel_op_do_overflow_check_5_, e execution_sel_op_do_overflow_check_6_, e execution_sel_op_is_address_0_, e execution_sel_op_is_address_1_, e execution_sel_op_is_address_2_, e execution_sel_op_is_address_3_, e execution_sel_op_is_address_4_, e execution_sel_op_is_address_5_, e execution_sel_op_is_address_6_, e execution_sel_op_is_indirect_wire_0_, e execution_sel_op_is_indirect_wire_1_, e execution_sel_op_is_indirect_wire_2_, e execution_sel_op_is_indirect_wire_3_, e execution_sel_op_is_indirect_wire_4_, e execution_sel_op_is_indirect_wire_5_, e execution_sel_op_is_indirect_wire_6_, e execution_sel_op_is_indirect_wire_7_, e execution_sel_op_is_relative_wire_0_, e execution_sel_op_is_relative_wire_1_, e execution_sel_op_is_relative_wire_2_, e execution_sel_op_is_relative_wire_3_, e execution_sel_op_is_relative_wire_4_, e execution_sel_op_is_relative_wire_5_, e execution_sel_op_is_relative_wire_6_, e execution_sel_op_is_relative_wire_7_, e execution_sel_op_reg_effective_0_, e execution_sel_op_reg_effective_1_, e execution_sel_op_reg_effective_2_, e execution_sel_op_reg_effective_3_, e execution_sel_op_reg_effective_4_, e execution_sel_op_reg_effective_5_, e execution_sel_opcode_error, e execution_sel_out_of_gas, e execution_sel_radix_gt_256, e execution_sel_reached_max_note_hashes, e execution_sel_reached_max_nullifiers, e execution_sel_read_registers, e execution_sel_read_unwind_call_stack, e execution_sel_register_read_error, e execution_sel_relative_overflow_0_, e execution_sel_relative_overflow_1_, e execution_sel_relative_overflow_2_, e execution_sel_relative_overflow_3_, e execution_sel_relative_overflow_4_, e execution_sel_relative_overflow_5_, e execution_sel_relative_overflow_6_, e execution_sel_some_final_check_failed, e execution_sel_tag_check_reg_0_, e execution_sel_tag_check_reg_1_, e execution_sel_tag_check_reg_2_, e execution_sel_tag_check_reg_3_, e execution_sel_tag_check_reg_4_, e execution_sel_tag_check_reg_5_, e execution_sel_too_large_recipient_error, e execution_sel_use_num_limbs, e execution_sel_write_l2_to_l1_msg, e execution_sel_write_note_hash, e execution_sel_write_nullifier, e execution_sel_write_public_data, e execution_sel_write_registers, e execution_subtrace_id, e execution_subtrace_operation_id, e execution_total_gas_da, e execution_total_gas_l2, e execution_two_five_six, e execution_value_from_pi, e execution_written_public_data_slots_tree_root, e execution_written_public_data_slots_tree_size, e execution_written_slots_merkle_separator, e execution_written_slots_tree_height, e execution_written_slots_tree_siloing_separator, e ff_gt_a, e ff_gt_b, e ff_gt_borrow, e ff_gt_constant_128, e ff_gt_end, e ff_gt_p_a_borrow, e ff_gt_p_b_borrow, e ff_gt_res_hi, e ff_gt_res_lo, e ff_gt_result, e get_contract_instance_clk, e get_contract_instance_contract_address, e get_contract_instance_dst_offset, e get_contract_instance_dst_offset_diff_max_inv, e get_contract_instance_exists_tag, e get_contract_instance_instance_exists, e get_contract_instance_is_class_id, e get_contract_instance_is_deployer, e get_contract_instance_is_init_hash, e get_contract_instance_is_valid_member_enum, e get_contract_instance_is_valid_writes_in_bounds, e get_contract_instance_member_enum, e get_contract_instance_member_tag, e get_contract_instance_member_write_offset, e get_contract_instance_nullifier_tree_root, e get_contract_instance_public_data_tree_root, e get_contract_instance_retrieved_class_id, e get_contract_instance_retrieved_deployer_addr, e get_contract_instance_retrieved_init_hash, e get_contract_instance_sel, e get_contract_instance_sel_error, e get_contract_instance_selected_member, e get_contract_instance_space_id, e gt_abs_diff, e gt_input_a, e gt_input_b, e gt_num_bits, e gt_res, e gt_sel, e gt_sel_addressing, e gt_sel_alu, e gt_sel_gas, e gt_sel_others, e gt_sel_sha256, e indexed_tree_check_address, e indexed_tree_check_const_three, e indexed_tree_check_discard, e indexed_tree_check_exists, e indexed_tree_check_intermediate_root, e indexed_tree_check_low_leaf_hash, e indexed_tree_check_low_leaf_index, e indexed_tree_check_low_leaf_next_index, e indexed_tree_check_low_leaf_next_value, e indexed_tree_check_low_leaf_value, e indexed_tree_check_merkle_hash_separator, e indexed_tree_check_new_leaf_hash, e indexed_tree_check_next_value_inv, e indexed_tree_check_next_value_is_nonzero, e indexed_tree_check_not_exists, e indexed_tree_check_public_inputs_index, e indexed_tree_check_root, e indexed_tree_check_sel, e indexed_tree_check_sel_insert, e indexed_tree_check_sel_silo, e indexed_tree_check_sel_write_to_public_inputs, e indexed_tree_check_siloed_value, e indexed_tree_check_siloing_separator, e indexed_tree_check_tree_height, e indexed_tree_check_tree_size_after_write, e indexed_tree_check_tree_size_before_write, e indexed_tree_check_updated_low_leaf_hash, e indexed_tree_check_updated_low_leaf_next_index, e indexed_tree_check_updated_low_leaf_next_value, e indexed_tree_check_value, e indexed_tree_check_value_low_leaf_value_diff_inv, e indexed_tree_check_write, e indexed_tree_check_write_root, e instr_fetching_addressing_mode, e instr_fetching_bd0, e instr_fetching_bd1, e instr_fetching_bd10, e instr_fetching_bd11, e instr_fetching_bd12, e instr_fetching_bd13, e instr_fetching_bd14, e instr_fetching_bd15, e instr_fetching_bd16, e instr_fetching_bd17, e instr_fetching_bd18, e instr_fetching_bd19, e instr_fetching_bd2, e instr_fetching_bd20, e instr_fetching_bd21, e instr_fetching_bd22, e instr_fetching_bd23, e instr_fetching_bd24, e instr_fetching_bd25, e instr_fetching_bd26, e instr_fetching_bd27, e instr_fetching_bd28, e instr_fetching_bd29, e instr_fetching_bd3, e instr_fetching_bd30, e instr_fetching_bd31, e instr_fetching_bd32, e instr_fetching_bd33, e instr_fetching_bd34, e instr_fetching_bd35, e instr_fetching_bd36, e instr_fetching_bd4, e instr_fetching_bd5, e instr_fetching_bd6, e instr_fetching_bd7, e instr_fetching_bd8, e instr_fetching_bd9, e instr_fetching_bytecode_id, e instr_fetching_bytecode_size, e instr_fetching_bytes_to_read, e instr_fetching_exec_opcode, e instr_fetching_instr_abs_diff, e instr_fetching_instr_out_of_range, e instr_fetching_instr_size, e instr_fetching_op1, e instr_fetching_op2, e instr_fetching_op3, e instr_fetching_op4, e instr_fetching_op5, e instr_fetching_op6, e instr_fetching_op7, e instr_fetching_opcode_out_of_range, e instr_fetching_pc, e instr_fetching_pc_abs_diff, e instr_fetching_pc_out_of_range, e instr_fetching_pc_size_in_bits, e instr_fetching_sel, e instr_fetching_sel_has_tag, e instr_fetching_sel_op_dc_0, e instr_fetching_sel_op_dc_1, e instr_fetching_sel_op_dc_10, e instr_fetching_sel_op_dc_11, e instr_fetching_sel_op_dc_12, e instr_fetching_sel_op_dc_13, e instr_fetching_sel_op_dc_14, e instr_fetching_sel_op_dc_15, e instr_fetching_sel_op_dc_16, e instr_fetching_sel_op_dc_2, e instr_fetching_sel_op_dc_3, e instr_fetching_sel_op_dc_4, e instr_fetching_sel_op_dc_5, e instr_fetching_sel_op_dc_6, e instr_fetching_sel_op_dc_7, e instr_fetching_sel_op_dc_8, e instr_fetching_sel_op_dc_9, e instr_fetching_sel_parsing_err, e instr_fetching_sel_pc_in_range, e instr_fetching_sel_tag_is_op2, e instr_fetching_tag_out_of_range, e instr_fetching_tag_value, e internal_call_stack_call_id, e internal_call_stack_context_id, e internal_call_stack_entered_call_id, e internal_call_stack_return_call_id, e internal_call_stack_return_pc, e internal_call_stack_sel, e keccak_memory_ctr_end, e keccak_memory_end, e keccak_memory_single_tag_error, e keccak_memory_state_size_min_ctr_inv, e keccak_memory_tag, e keccak_memory_tag_min_u64_inv, e keccak_memory_val_24_, e keccakf1600_bitwise_and_op_id, e keccakf1600_bitwise_xor_op_id, e keccakf1600_dst_out_of_range_error, e keccakf1600_end, e keccakf1600_error, e keccakf1600_highest_slice_address, e keccakf1600_rot_64_min_len_01, e keccakf1600_rot_64_min_len_03, e keccakf1600_rot_64_min_len_11, e keccakf1600_rot_64_min_len_13, e keccakf1600_rot_64_min_len_20, e keccakf1600_rot_64_min_len_22, e keccakf1600_rot_64_min_len_24, e keccakf1600_rot_64_min_len_31, e keccakf1600_rot_64_min_len_34, e keccakf1600_rot_64_min_len_42, e keccakf1600_rot_len_02, e keccakf1600_rot_len_04, e keccakf1600_rot_len_10, e keccakf1600_rot_len_12, e keccakf1600_rot_len_14, e keccakf1600_rot_len_21, e keccakf1600_rot_len_23, e keccakf1600_rot_len_30, e keccakf1600_rot_len_32, e keccakf1600_rot_len_33, e keccakf1600_rot_len_40, e keccakf1600_rot_len_41, e keccakf1600_rot_len_43, e keccakf1600_rot_len_44, e keccakf1600_round_cst, e keccakf1600_sel_slice_read, e keccakf1600_sel_slice_write, e keccakf1600_src_addr, e keccakf1600_src_out_of_range_error, e keccakf1600_state_chi_00, e keccakf1600_state_chi_01, e keccakf1600_state_chi_02, e keccakf1600_state_chi_03, e keccakf1600_state_chi_04, e keccakf1600_state_chi_10, e keccakf1600_state_chi_11, e keccakf1600_state_chi_12, e keccakf1600_state_chi_13, e keccakf1600_state_chi_14, e keccakf1600_state_chi_20, e keccakf1600_state_chi_21, e keccakf1600_state_chi_22, e keccakf1600_state_chi_23, e keccakf1600_state_chi_24, e keccakf1600_state_chi_30, e keccakf1600_state_chi_31, e keccakf1600_state_chi_32, e keccakf1600_state_chi_33, e keccakf1600_state_chi_34, e keccakf1600_state_chi_40, e keccakf1600_state_chi_41, e keccakf1600_state_chi_42, e keccakf1600_state_chi_43, e keccakf1600_state_chi_44, e keccakf1600_state_iota_00, e keccakf1600_state_pi_and_00, e keccakf1600_state_pi_and_01, e keccakf1600_state_pi_and_02, e keccakf1600_state_pi_and_03, e keccakf1600_state_pi_and_04, e keccakf1600_state_pi_and_10, e keccakf1600_state_pi_and_11, e keccakf1600_state_pi_and_12, e keccakf1600_state_pi_and_13, e keccakf1600_state_pi_and_14, e keccakf1600_state_pi_and_20, e keccakf1600_state_pi_and_21, e keccakf1600_state_pi_and_22, e keccakf1600_state_pi_and_23, e keccakf1600_state_pi_and_24, e keccakf1600_state_pi_and_30, e keccakf1600_state_pi_and_31, e keccakf1600_state_pi_and_32, e keccakf1600_state_pi_and_33, e keccakf1600_state_pi_and_34, e keccakf1600_state_pi_and_40, e keccakf1600_state_pi_and_41, e keccakf1600_state_pi_and_42, e keccakf1600_state_pi_and_43, e keccakf1600_state_pi_and_44, e keccakf1600_state_pi_not_00, e keccakf1600_state_pi_not_01, e keccakf1600_state_pi_not_02, e keccakf1600_state_pi_not_03, e keccakf1600_state_pi_not_04, e keccakf1600_state_pi_not_10, e keccakf1600_state_pi_not_11, e keccakf1600_state_pi_not_12, e keccakf1600_state_pi_not_13, e keccakf1600_state_pi_not_14, e keccakf1600_state_pi_not_20, e keccakf1600_state_pi_not_21, e keccakf1600_state_pi_not_22, e keccakf1600_state_pi_not_23, e keccakf1600_state_pi_not_24, e keccakf1600_state_pi_not_30, e keccakf1600_state_pi_not_31, e keccakf1600_state_pi_not_32, e keccakf1600_state_pi_not_33, e keccakf1600_state_pi_not_34, e keccakf1600_state_pi_not_40, e keccakf1600_state_pi_not_41, e keccakf1600_state_pi_not_42, e keccakf1600_state_pi_not_43, e keccakf1600_state_pi_not_44, e keccakf1600_state_rho_01, e keccakf1600_state_rho_02, e keccakf1600_state_rho_03, e keccakf1600_state_rho_04, e keccakf1600_state_rho_10, e keccakf1600_state_rho_11, e keccakf1600_state_rho_12, e keccakf1600_state_rho_13, e keccakf1600_state_rho_14, e keccakf1600_state_rho_20, e keccakf1600_state_rho_21, e keccakf1600_state_rho_22, e keccakf1600_state_rho_23, e keccakf1600_state_rho_24, e keccakf1600_state_rho_30, e keccakf1600_state_rho_31, e keccakf1600_state_rho_32, e keccakf1600_state_rho_33, e keccakf1600_state_rho_34, e keccakf1600_state_rho_40, e keccakf1600_state_rho_41, e keccakf1600_state_rho_42, e keccakf1600_state_rho_43, e keccakf1600_state_rho_44, e keccakf1600_state_theta_00, e keccakf1600_state_theta_01, e keccakf1600_state_theta_02, e keccakf1600_state_theta_03, e keccakf1600_state_theta_04, e keccakf1600_state_theta_10, e keccakf1600_state_theta_11, e keccakf1600_state_theta_12, e keccakf1600_state_theta_13, e keccakf1600_state_theta_14, e keccakf1600_state_theta_20, e keccakf1600_state_theta_21, e keccakf1600_state_theta_22, e keccakf1600_state_theta_23, e keccakf1600_state_theta_24, e keccakf1600_state_theta_30, e keccakf1600_state_theta_31, e keccakf1600_state_theta_32, e keccakf1600_state_theta_33, e keccakf1600_state_theta_34, e keccakf1600_state_theta_40, e keccakf1600_state_theta_41, e keccakf1600_state_theta_42, e keccakf1600_state_theta_43, e keccakf1600_state_theta_44, e keccakf1600_state_theta_hi_02, e keccakf1600_state_theta_hi_04, e keccakf1600_state_theta_hi_10, e keccakf1600_state_theta_hi_12, e keccakf1600_state_theta_hi_14, e keccakf1600_state_theta_hi_21, e keccakf1600_state_theta_hi_23, e keccakf1600_state_theta_hi_30, e keccakf1600_state_theta_hi_32, e keccakf1600_state_theta_hi_33, e keccakf1600_state_theta_hi_40, e keccakf1600_state_theta_hi_41, e keccakf1600_state_theta_hi_43, e keccakf1600_state_theta_hi_44, e keccakf1600_state_theta_low_01, e keccakf1600_state_theta_low_03, e keccakf1600_state_theta_low_11, e keccakf1600_state_theta_low_13, e keccakf1600_state_theta_low_20, e keccakf1600_state_theta_low_22, e keccakf1600_state_theta_low_24, e keccakf1600_state_theta_low_31, e keccakf1600_state_theta_low_34, e keccakf1600_state_theta_low_42, e keccakf1600_tag_error, e keccakf1600_tag_u64, e keccakf1600_theta_combined_xor_0, e keccakf1600_theta_combined_xor_1, e keccakf1600_theta_combined_xor_2, e keccakf1600_theta_combined_xor_3, e keccakf1600_theta_combined_xor_4, e keccakf1600_theta_xor_01, e keccakf1600_theta_xor_02, e keccakf1600_theta_xor_03, e keccakf1600_theta_xor_11, e keccakf1600_theta_xor_12, e keccakf1600_theta_xor_13, e keccakf1600_theta_xor_21, e keccakf1600_theta_xor_22, e keccakf1600_theta_xor_23, e keccakf1600_theta_xor_31, e keccakf1600_theta_xor_32, e keccakf1600_theta_xor_33, e keccakf1600_theta_xor_41, e keccakf1600_theta_xor_42, e keccakf1600_theta_xor_43, e keccakf1600_theta_xor_row_0, e keccakf1600_theta_xor_row_1, e keccakf1600_theta_xor_row_2, e keccakf1600_theta_xor_row_3, e keccakf1600_theta_xor_row_4, e keccakf1600_theta_xor_row_msb_0, e keccakf1600_theta_xor_row_msb_1, e keccakf1600_theta_xor_row_msb_2, e keccakf1600_theta_xor_row_msb_3, e keccakf1600_theta_xor_row_msb_4, e keccakf1600_theta_xor_row_rotl1_0, e keccakf1600_theta_xor_row_rotl1_1, e keccakf1600_theta_xor_row_rotl1_2, e keccakf1600_theta_xor_row_rotl1_3, e keccakf1600_theta_xor_row_rotl1_4, e l1_to_l2_message_tree_check_exists, e l1_to_l2_message_tree_check_l1_to_l2_message_tree_height, e l1_to_l2_message_tree_check_leaf_index, e l1_to_l2_message_tree_check_leaf_value, e l1_to_l2_message_tree_check_leaf_value_msg_hash_diff_inv, e l1_to_l2_message_tree_check_merkle_hash_separator, e l1_to_l2_message_tree_check_msg_hash, e l1_to_l2_message_tree_check_root, e l1_to_l2_message_tree_check_sel, e memory_diff, e memory_glob_addr_diff_inv, e memory_last_access, e memory_limb_0_, e memory_limb_1_, e memory_limb_2_, e memory_max_bits, e memory_sel_addressing_base, e memory_sel_addressing_indirect_0_, e memory_sel_addressing_indirect_1_, e memory_sel_addressing_indirect_2_, e memory_sel_addressing_indirect_3_, e memory_sel_addressing_indirect_4_, e memory_sel_addressing_indirect_5_, e memory_sel_addressing_indirect_6_, e memory_sel_data_copy_read, e memory_sel_data_copy_write, e memory_sel_ecc_write_0_, e memory_sel_ecc_write_1_, e memory_sel_ecc_write_2_, e memory_sel_get_contract_instance_exists_write, e memory_sel_get_contract_instance_member_write, e memory_sel_keccak, e memory_sel_poseidon2_read_0_, e memory_sel_poseidon2_read_1_, e memory_sel_poseidon2_read_2_, e memory_sel_poseidon2_read_3_, e memory_sel_poseidon2_write_0_, e memory_sel_poseidon2_write_1_, e memory_sel_poseidon2_write_2_, e memory_sel_poseidon2_write_3_, e memory_sel_public_log_read, e memory_sel_register_op_0_, e memory_sel_register_op_1_, e memory_sel_register_op_2_, e memory_sel_register_op_3_, e memory_sel_register_op_4_, e memory_sel_register_op_5_, e memory_sel_rng_chk, e memory_sel_rng_write, e memory_sel_sha256_op_0_, e memory_sel_sha256_op_1_, e memory_sel_sha256_op_2_, e memory_sel_sha256_op_3_, e memory_sel_sha256_op_4_, e memory_sel_sha256_op_5_, e memory_sel_sha256_op_6_, e memory_sel_sha256_op_7_, e memory_sel_sha256_read, e memory_sel_tag_is_ff, e memory_sel_to_radix_write, e memory_tag_ff_diff_inv, e merkle_check_const_three, e merkle_check_end, e merkle_check_index_is_even, e merkle_check_path_len_min_one_inv, e merkle_check_read_left_node, e merkle_check_read_output_hash, e merkle_check_read_right_node, e merkle_check_sibling, e merkle_check_write_left_node, e merkle_check_write_output_hash, e merkle_check_write_right_node, e note_hash_tree_check_address, e note_hash_tree_check_const_three, e note_hash_tree_check_discard, e note_hash_tree_check_exists, e note_hash_tree_check_first_nullifier, e note_hash_tree_check_first_nullifier_pi_index, e note_hash_tree_check_leaf_index, e note_hash_tree_check_merkle_hash_separator, e note_hash_tree_check_next_leaf_value, e note_hash_tree_check_next_root, e note_hash_tree_check_nonce, e note_hash_tree_check_nonce_separator, e note_hash_tree_check_note_hash, e note_hash_tree_check_note_hash_index, e note_hash_tree_check_note_hash_tree_height, e note_hash_tree_check_prev_leaf_value, e note_hash_tree_check_prev_leaf_value_unique_note_hash_diff_inv, e note_hash_tree_check_prev_root, e note_hash_tree_check_public_inputs_index, e note_hash_tree_check_sel, e note_hash_tree_check_sel_silo, e note_hash_tree_check_sel_unique, e note_hash_tree_check_sel_write_to_public_inputs, e note_hash_tree_check_siloed_note_hash, e note_hash_tree_check_siloing_separator, e note_hash_tree_check_unique_note_hash, e note_hash_tree_check_unique_note_hash_separator, e note_hash_tree_check_write, e poseidon2_hash_b_0, e poseidon2_hash_b_1, e poseidon2_hash_b_2, e poseidon2_hash_b_3, e poseidon2_hash_end, e poseidon2_hash_input_len, e poseidon2_hash_num_perm_rounds_rem_min_one_inv, e poseidon2_hash_padding, e poseidon2_perm_B_10_0, e poseidon2_perm_B_10_1, e poseidon2_perm_B_10_2, e poseidon2_perm_B_10_3, e poseidon2_perm_B_11_0, e poseidon2_perm_B_11_1, e poseidon2_perm_B_11_2, e poseidon2_perm_B_11_3, e poseidon2_perm_B_12_0, e poseidon2_perm_B_12_1, e poseidon2_perm_B_12_2, e poseidon2_perm_B_12_3, e poseidon2_perm_B_13_0, e poseidon2_perm_B_13_1, e poseidon2_perm_B_13_2, e poseidon2_perm_B_13_3, e poseidon2_perm_B_14_0, e poseidon2_perm_B_14_1, e poseidon2_perm_B_14_2, e poseidon2_perm_B_14_3, e poseidon2_perm_B_15_0, e poseidon2_perm_B_15_1, e poseidon2_perm_B_15_2, e poseidon2_perm_B_15_3, e poseidon2_perm_B_16_0, e poseidon2_perm_B_16_1, e poseidon2_perm_B_16_2, e poseidon2_perm_B_16_3, e poseidon2_perm_B_17_0, e poseidon2_perm_B_17_1, e poseidon2_perm_B_17_2, e poseidon2_perm_B_17_3, e poseidon2_perm_B_18_0, e poseidon2_perm_B_18_1, e poseidon2_perm_B_18_2, e poseidon2_perm_B_18_3, e poseidon2_perm_B_19_0, e poseidon2_perm_B_19_1, e poseidon2_perm_B_19_2, e poseidon2_perm_B_19_3, e poseidon2_perm_B_20_0, e poseidon2_perm_B_20_1, e poseidon2_perm_B_20_2, e poseidon2_perm_B_20_3, e poseidon2_perm_B_21_0, e poseidon2_perm_B_21_1, e poseidon2_perm_B_21_2, e poseidon2_perm_B_21_3, e poseidon2_perm_B_22_0, e poseidon2_perm_B_22_1, e poseidon2_perm_B_22_2, e poseidon2_perm_B_22_3, e poseidon2_perm_B_23_0, e poseidon2_perm_B_23_1, e poseidon2_perm_B_23_2, e poseidon2_perm_B_23_3, e poseidon2_perm_B_24_0, e poseidon2_perm_B_24_1, e poseidon2_perm_B_24_2, e poseidon2_perm_B_24_3, e poseidon2_perm_B_25_0, e poseidon2_perm_B_25_1, e poseidon2_perm_B_25_2, e poseidon2_perm_B_25_3, e poseidon2_perm_B_26_0, e poseidon2_perm_B_26_1, e poseidon2_perm_B_26_2, e poseidon2_perm_B_26_3, e poseidon2_perm_B_27_0, e poseidon2_perm_B_27_1, e poseidon2_perm_B_27_2, e poseidon2_perm_B_27_3, e poseidon2_perm_B_28_0, e poseidon2_perm_B_28_1, e poseidon2_perm_B_28_2, e poseidon2_perm_B_28_3, e poseidon2_perm_B_29_0, e poseidon2_perm_B_29_1, e poseidon2_perm_B_29_2, e poseidon2_perm_B_29_3, e poseidon2_perm_B_30_0, e poseidon2_perm_B_30_1, e poseidon2_perm_B_30_2, e poseidon2_perm_B_30_3, e poseidon2_perm_B_31_0, e poseidon2_perm_B_31_1, e poseidon2_perm_B_31_2, e poseidon2_perm_B_31_3, e poseidon2_perm_B_32_0, e poseidon2_perm_B_32_1, e poseidon2_perm_B_32_2, e poseidon2_perm_B_32_3, e poseidon2_perm_B_33_0, e poseidon2_perm_B_33_1, e poseidon2_perm_B_33_2, e poseidon2_perm_B_33_3, e poseidon2_perm_B_34_0, e poseidon2_perm_B_34_1, e poseidon2_perm_B_34_2, e poseidon2_perm_B_34_3, e poseidon2_perm_B_35_0, e poseidon2_perm_B_35_1, e poseidon2_perm_B_35_2, e poseidon2_perm_B_35_3, e poseidon2_perm_B_36_0, e poseidon2_perm_B_36_1, e poseidon2_perm_B_36_2, e poseidon2_perm_B_36_3, e poseidon2_perm_B_37_0, e poseidon2_perm_B_37_1, e poseidon2_perm_B_37_2, e poseidon2_perm_B_37_3, e poseidon2_perm_B_38_0, e poseidon2_perm_B_38_1, e poseidon2_perm_B_38_2, e poseidon2_perm_B_38_3, e poseidon2_perm_B_39_0, e poseidon2_perm_B_39_1, e poseidon2_perm_B_39_2, e poseidon2_perm_B_39_3, e poseidon2_perm_B_40_0, e poseidon2_perm_B_40_1, e poseidon2_perm_B_40_2, e poseidon2_perm_B_40_3, e poseidon2_perm_B_41_0, e poseidon2_perm_B_41_1, e poseidon2_perm_B_41_2, e poseidon2_perm_B_41_3, e poseidon2_perm_B_42_0, e poseidon2_perm_B_42_1, e poseidon2_perm_B_42_2, e poseidon2_perm_B_42_3, e poseidon2_perm_B_43_0, e poseidon2_perm_B_43_1, e poseidon2_perm_B_43_2, e poseidon2_perm_B_43_3, e poseidon2_perm_B_44_0, e poseidon2_perm_B_44_1, e poseidon2_perm_B_44_2, e poseidon2_perm_B_44_3, e poseidon2_perm_B_45_0, e poseidon2_perm_B_45_1, e poseidon2_perm_B_45_2, e poseidon2_perm_B_45_3, e poseidon2_perm_B_46_0, e poseidon2_perm_B_46_1, e poseidon2_perm_B_46_2, e poseidon2_perm_B_46_3, e poseidon2_perm_B_47_0, e poseidon2_perm_B_47_1, e poseidon2_perm_B_47_2, e poseidon2_perm_B_47_3, e poseidon2_perm_B_48_0, e poseidon2_perm_B_48_1, e poseidon2_perm_B_48_2, e poseidon2_perm_B_48_3, e poseidon2_perm_B_49_0, e poseidon2_perm_B_49_1, e poseidon2_perm_B_49_2, e poseidon2_perm_B_49_3, e poseidon2_perm_B_4_0, e poseidon2_perm_B_4_1, e poseidon2_perm_B_4_2, e poseidon2_perm_B_4_3, e poseidon2_perm_B_50_0, e poseidon2_perm_B_50_1, e poseidon2_perm_B_50_2, e poseidon2_perm_B_50_3, e poseidon2_perm_B_51_0, e poseidon2_perm_B_51_1, e poseidon2_perm_B_51_2, e poseidon2_perm_B_51_3, e poseidon2_perm_B_52_0, e poseidon2_perm_B_52_1, e poseidon2_perm_B_52_2, e poseidon2_perm_B_52_3, e poseidon2_perm_B_53_0, e poseidon2_perm_B_53_1, e poseidon2_perm_B_53_2, e poseidon2_perm_B_53_3, e poseidon2_perm_B_54_0, e poseidon2_perm_B_54_1, e poseidon2_perm_B_54_2, e poseidon2_perm_B_54_3, e poseidon2_perm_B_55_0, e poseidon2_perm_B_55_1, e poseidon2_perm_B_55_2, e poseidon2_perm_B_55_3, e poseidon2_perm_B_56_0, e poseidon2_perm_B_56_1, e poseidon2_perm_B_56_2, e poseidon2_perm_B_56_3, e poseidon2_perm_B_57_0, e poseidon2_perm_B_57_1, e poseidon2_perm_B_57_2, e poseidon2_perm_B_57_3, e poseidon2_perm_B_58_0, e poseidon2_perm_B_58_1, e poseidon2_perm_B_58_2, e poseidon2_perm_B_58_3, e poseidon2_perm_B_59_0, e poseidon2_perm_B_59_1, e poseidon2_perm_B_59_2, e poseidon2_perm_B_59_3, e poseidon2_perm_B_5_0, e poseidon2_perm_B_5_1, e poseidon2_perm_B_5_2, e poseidon2_perm_B_5_3, e poseidon2_perm_B_6_0, e poseidon2_perm_B_6_1, e poseidon2_perm_B_6_2, e poseidon2_perm_B_6_3, e poseidon2_perm_B_7_0, e poseidon2_perm_B_7_1, e poseidon2_perm_B_7_2, e poseidon2_perm_B_7_3, e poseidon2_perm_B_8_0, e poseidon2_perm_B_8_1, e poseidon2_perm_B_8_2, e poseidon2_perm_B_8_3, e poseidon2_perm_B_9_0, e poseidon2_perm_B_9_1, e poseidon2_perm_B_9_2, e poseidon2_perm_B_9_3, e poseidon2_perm_EXT_LAYER_4, e poseidon2_perm_EXT_LAYER_5, e poseidon2_perm_EXT_LAYER_6, e poseidon2_perm_EXT_LAYER_7, e poseidon2_perm_T_0_4, e poseidon2_perm_T_0_5, e poseidon2_perm_T_0_6, e poseidon2_perm_T_0_7, e poseidon2_perm_T_1_4, e poseidon2_perm_T_1_5, e poseidon2_perm_T_1_6, e poseidon2_perm_T_1_7, e poseidon2_perm_T_2_4, e poseidon2_perm_T_2_5, e poseidon2_perm_T_2_6, e poseidon2_perm_T_2_7, e poseidon2_perm_T_3_4, e poseidon2_perm_T_3_5, e poseidon2_perm_T_3_6, e poseidon2_perm_T_3_7, e poseidon2_perm_T_60_4, e poseidon2_perm_T_60_5, e poseidon2_perm_T_60_6, e poseidon2_perm_T_60_7, e poseidon2_perm_T_61_4, e poseidon2_perm_T_61_5, e poseidon2_perm_T_61_6, e poseidon2_perm_T_61_7, e poseidon2_perm_T_62_4, e poseidon2_perm_T_62_5, e poseidon2_perm_T_62_6, e poseidon2_perm_T_62_7, e poseidon2_perm_T_63_4, e poseidon2_perm_T_63_5, e poseidon2_perm_T_63_6, e poseidon2_perm_T_63_7, e poseidon2_perm_a_0, e poseidon2_perm_a_1, e poseidon2_perm_a_2, e poseidon2_perm_a_3, e poseidon2_perm_b_0, e poseidon2_perm_b_1, e poseidon2_perm_b_2, e poseidon2_perm_b_3, e poseidon2_perm_mem_batch_tag_inv, e poseidon2_perm_mem_err, e poseidon2_perm_mem_execution_clk, e poseidon2_perm_mem_input_0_, e poseidon2_perm_mem_input_1_, e poseidon2_perm_mem_input_2_, e poseidon2_perm_mem_input_3_, e poseidon2_perm_mem_input_tag_0_, e poseidon2_perm_mem_input_tag_1_, e poseidon2_perm_mem_input_tag_2_, e poseidon2_perm_mem_input_tag_3_, e poseidon2_perm_mem_max_mem_addr, e poseidon2_perm_mem_output_0_, e poseidon2_perm_mem_output_1_, e poseidon2_perm_mem_output_2_, e poseidon2_perm_mem_output_3_, e poseidon2_perm_mem_read_address_0_, e poseidon2_perm_mem_read_address_1_, e poseidon2_perm_mem_read_address_2_, e poseidon2_perm_mem_read_address_3_, e poseidon2_perm_mem_sel, e poseidon2_perm_mem_sel_dst_out_of_range_err, e poseidon2_perm_mem_sel_invalid_tag_err, e poseidon2_perm_mem_sel_should_exec, e poseidon2_perm_mem_sel_should_read_mem, e poseidon2_perm_mem_sel_src_out_of_range_err, e poseidon2_perm_mem_space_id, e poseidon2_perm_mem_write_address_0_, e poseidon2_perm_mem_write_address_1_, e poseidon2_perm_mem_write_address_2_, e poseidon2_perm_mem_write_address_3_, e poseidon2_perm_sel, e public_data_check_address, e public_data_check_clk_diff_hi, e public_data_check_clk_diff_lo, e public_data_check_const_four, e public_data_check_const_three, e public_data_check_discard, e public_data_check_end, e public_data_check_final_value, e public_data_check_intermediate_root, e public_data_check_leaf_not_exists, e public_data_check_leaf_slot, e public_data_check_leaf_slot_low_leaf_slot_diff_inv, e public_data_check_length_pi_idx, e public_data_check_low_leaf_hash, e public_data_check_low_leaf_index, e public_data_check_low_leaf_next_index, e public_data_check_low_leaf_next_slot, e public_data_check_low_leaf_slot, e public_data_check_low_leaf_value, e public_data_check_merkle_hash_separator, e public_data_check_new_leaf_hash, e public_data_check_next_slot_inv, e public_data_check_next_slot_is_nonzero, e public_data_check_non_discarded_write, e public_data_check_non_protocol_write, e public_data_check_not_end, e public_data_check_protocol_write, e public_data_check_public_data_writes_length, e public_data_check_root, e public_data_check_sel_write_to_public_inputs, e public_data_check_should_insert, e public_data_check_siloing_separator, e public_data_check_slot, e public_data_check_tree_height, e public_data_check_tree_size_after_write, e public_data_check_tree_size_before_write, e public_data_check_updated_low_leaf_hash, e public_data_check_updated_low_leaf_next_index, e public_data_check_updated_low_leaf_next_slot, e public_data_check_updated_low_leaf_value, e public_data_check_value, e public_data_check_write, e public_data_check_write_root, e public_data_squash_check_clock, e public_data_squash_clk_diff_hi, e public_data_squash_clk_diff_lo, e public_data_squash_leaf_slot_increase, e public_data_squash_value, e range_check_dyn_diff, e range_check_dyn_rng_chk_bits, e range_check_dyn_rng_chk_pow_2, e range_check_is_lte_u112, e range_check_is_lte_u128, e range_check_is_lte_u16, e range_check_is_lte_u32, e range_check_is_lte_u48, e range_check_is_lte_u64, e range_check_is_lte_u80, e range_check_is_lte_u96, e range_check_rng_chk_bits, e range_check_sel, e range_check_sel_alu, e range_check_sel_gt, e range_check_sel_keccak, e range_check_sel_memory, e range_check_sel_r0_16_bit_rng_lookup, e range_check_sel_r1_16_bit_rng_lookup, e range_check_sel_r2_16_bit_rng_lookup, e range_check_sel_r3_16_bit_rng_lookup, e range_check_sel_r4_16_bit_rng_lookup, e range_check_sel_r5_16_bit_rng_lookup, e range_check_sel_r6_16_bit_rng_lookup, e range_check_u16_r0, e range_check_u16_r1, e range_check_u16_r2, e range_check_u16_r3, e range_check_u16_r4, e range_check_u16_r5, e range_check_u16_r6, e range_check_u16_r7, e range_check_value, e scalar_mul_bit, e scalar_mul_const_two, e scalar_mul_end, e scalar_mul_sel_not_end, e scalar_mul_should_add, e sha256_a_and_b, e sha256_a_and_b_xor_a_and_c, e sha256_a_and_c, e sha256_a_rotr_13, e sha256_a_rotr_2, e sha256_a_rotr_22, e sha256_a_rotr_2_xor_a_rotr_13, e sha256_and_op_id, e sha256_b_and_c, e sha256_batch_tag_inv, e sha256_ch, e sha256_computed_w_lhs, e sha256_computed_w_rhs, e sha256_e_and_f, e sha256_e_rotr_11, e sha256_e_rotr_25, e sha256_e_rotr_6, e sha256_e_rotr_6_xor_e_rotr_11, e sha256_end, e sha256_err, e sha256_input, e sha256_input_rounds_rem_inv, e sha256_input_tag, e sha256_input_tag_diff_inv, e sha256_last, e sha256_lhs_w_10, e sha256_lhs_w_3, e sha256_maj, e sha256_max_input_addr, e sha256_max_mem_addr, e sha256_max_output_addr, e sha256_max_state_addr, e sha256_mem_out_of_range_err, e sha256_memory_address_0_, e sha256_memory_address_1_, e sha256_memory_address_2_, e sha256_memory_address_3_, e sha256_memory_address_4_, e sha256_memory_address_5_, e sha256_memory_address_6_, e sha256_memory_address_7_, e sha256_memory_register_0_, e sha256_memory_register_1_, e sha256_memory_register_2_, e sha256_memory_register_3_, e sha256_memory_register_4_, e sha256_memory_register_5_, e sha256_memory_register_6_, e sha256_memory_register_7_, e sha256_memory_tag_0_, e sha256_memory_tag_1_, e sha256_memory_tag_2_, e sha256_memory_tag_3_, e sha256_memory_tag_4_, e sha256_memory_tag_5_, e sha256_memory_tag_6_, e sha256_memory_tag_7_, e sha256_next_a_lhs, e sha256_next_a_rhs, e sha256_next_e_lhs, e sha256_next_e_rhs, e sha256_not_e, e sha256_not_e_and_g, e sha256_output_a_lhs, e sha256_output_a_rhs, e sha256_output_b_lhs, e sha256_output_b_rhs, e sha256_output_c_lhs, e sha256_output_c_rhs, e sha256_output_d_lhs, e sha256_output_d_rhs, e sha256_output_e_lhs, e sha256_output_e_rhs, e sha256_output_f_lhs, e sha256_output_f_rhs, e sha256_output_g_lhs, e sha256_output_g_rhs, e sha256_output_h_lhs, e sha256_output_h_rhs, e sha256_perform_round, e sha256_rhs_a_13, e sha256_rhs_a_2, e sha256_rhs_a_22, e sha256_rhs_e_11, e sha256_rhs_e_25, e sha256_rhs_e_6, e sha256_rhs_w_10, e sha256_rhs_w_17, e sha256_rhs_w_18, e sha256_rhs_w_19, e sha256_rhs_w_3, e sha256_rhs_w_7, e sha256_round_constant, e sha256_round_count, e sha256_rounds_remaining_inv, e sha256_rw, e sha256_s_0, e sha256_s_1, e sha256_sel_compute_w, e sha256_sel_input_out_of_range_err, e sha256_sel_invalid_input_row_tag_err, e sha256_sel_invalid_state_tag_err, e sha256_sel_is_input_round, e sha256_sel_mem_state_or_output, e sha256_sel_output_out_of_range_err, e sha256_sel_read_input_from_memory, e sha256_sel_state_out_of_range_err, e sha256_state_addr, e sha256_two_pow_10, e sha256_two_pow_11, e sha256_two_pow_13, e sha256_two_pow_17, e sha256_two_pow_18, e sha256_two_pow_19, e sha256_two_pow_2, e sha256_two_pow_22, e sha256_two_pow_25, e sha256_two_pow_3, e sha256_two_pow_32, e sha256_two_pow_6, e sha256_two_pow_7, e sha256_u32_tag, e sha256_w, e sha256_w_15_rotr_18, e sha256_w_15_rotr_7, e sha256_w_15_rotr_7_xor_w_15_rotr_18, e sha256_w_2_rotr_17, e sha256_w_2_rotr_17_xor_w_2_rotr_19, e sha256_w_2_rotr_19, e sha256_w_s_0, e sha256_w_s_1, e sha256_xor_op_id, e to_radix_end, e to_radix_found, e to_radix_is_unsafe_limb, e to_radix_limb_p_diff, e to_radix_limb_radix_diff, e to_radix_mem_err, e to_radix_mem_input_validation_error, e to_radix_mem_last, e to_radix_mem_limb_index_to_lookup, e to_radix_mem_limb_value, e to_radix_mem_max_mem_size, e to_radix_mem_num_limbs_inv, e to_radix_mem_num_limbs_minus_one_inv, e to_radix_mem_output_tag, e to_radix_mem_radix_min_two_inv, e to_radix_mem_sel_dst_out_of_range_err, e to_radix_mem_sel_invalid_bitwise_radix, e to_radix_mem_sel_num_limbs_is_zero, e to_radix_mem_sel_radix_eq_2, e to_radix_mem_sel_radix_gt_256_err, e to_radix_mem_sel_radix_lt_2_err, e to_radix_mem_sel_value_is_zero, e to_radix_mem_two, e to_radix_mem_two_five_six, e to_radix_mem_value_found, e to_radix_mem_value_inv, e to_radix_mem_write_addr_upper_bound, e to_radix_p_limb, e to_radix_rem_inverse, e to_radix_safety_diff_inverse, e tx_array_length_l2_to_l1_messages_pi_offset, e tx_array_length_note_hashes_pi_offset, e tx_array_length_nullifiers_pi_offset, e tx_calldata_hash, e tx_calldata_size, e tx_const_three, e tx_contract_addr, e tx_dom_sep_public_storage_map_slot, e tx_effective_fee_per_da_gas, e tx_effective_fee_per_l2_gas, e tx_end_phase, e tx_fee_juice_balance_slot, e tx_fee_juice_balances_slot_constant, e tx_fee_juice_contract_address, e tx_fee_payer, e tx_fee_payer_balance, e tx_fee_payer_new_balance, e tx_fee_payer_pi_offset, e tx_fields_length_public_logs_pi_offset, e tx_gas_limit_pi_offset, e tx_gas_used_pi_offset, e tx_is_cleanup, e tx_is_collect_fee, e tx_is_padded, e tx_is_public_call_request, e tx_is_static, e tx_is_tree_insert_phase, e tx_is_tree_padding, e tx_l1_l2_pi_offset, e tx_l2_l1_msg_content, e tx_l2_l1_msg_contract_address, e tx_l2_l1_msg_recipient, e tx_leaf_value, e tx_msg_sender, e tx_next_da_gas_used, e tx_next_da_gas_used_sent_to_enqueued_call, e tx_next_l2_gas_used, e tx_next_l2_gas_used_sent_to_enqueued_call, e tx_next_note_hash_tree_root, e tx_next_note_hash_tree_size, e tx_next_nullifier_tree_root, e tx_next_nullifier_tree_size, e tx_next_num_l2_to_l1_messages, e tx_next_num_note_hashes_emitted, e tx_next_num_nullifiers_emitted, e tx_next_num_public_log_fields, e tx_next_phase_on_revert, e tx_next_public_data_tree_root, e tx_next_public_data_tree_size, e tx_next_retrieved_bytecodes_tree_root, e tx_next_retrieved_bytecodes_tree_size, e tx_next_written_public_data_slots_tree_root, e tx_next_written_public_data_slots_tree_size, e tx_note_hash_pi_offset, e tx_nullifier_limit_error, e tx_nullifier_merkle_separator, e tx_nullifier_pi_offset, e tx_nullifier_tree_height, e tx_prev_da_gas_used_sent_to_enqueued_call, e tx_prev_l2_gas_used_sent_to_enqueued_call, e tx_public_data_pi_offset, e tx_read_pi_length_offset, e tx_read_pi_start_offset, e tx_remaining_phase_inv, e tx_remaining_phase_minus_one_inv, e tx_remaining_side_effects_inv, e tx_reverted_pi_offset, e tx_sel_l2_l1_msg_append, e tx_sel_non_revertible_append_l2_l1_msg, e tx_sel_non_revertible_append_note_hash, e tx_sel_non_revertible_append_nullifier, e tx_sel_note_hash_append, e tx_sel_nullifier_append, e tx_sel_process_call_request, e tx_sel_read_phase_length, e tx_sel_read_trees_and_gas_used, e tx_sel_revertible_append_l2_l1_msg, e tx_sel_revertible_append_note_hash, e tx_sel_revertible_append_nullifier, e tx_sel_try_l2_l1_msg_append, e tx_sel_try_note_hash_append, e tx_sel_try_nullifier_append, e tx_setup_phase_value, e tx_should_read_gas_limit, e tx_uint32_max, e tx_write_nullifier_pi_offset, e tx_write_pi_offset, e update_check_address, e update_check_const_three, e update_check_contract_instance_registry_address, e update_check_current_class_id, e update_check_delayed_public_mutable_hash_slot, e update_check_delayed_public_mutable_slot, e update_check_dom_sep_public_storage_map_slot, e update_check_hash_not_zero, e update_check_original_class_id, e update_check_public_data_tree_root, e update_check_sel, e update_check_timestamp, e update_check_timestamp_is_lt_timestamp_of_change, e update_check_timestamp_of_change, e update_check_timestamp_of_change_bit_size, e update_check_timestamp_pi_offset, e update_check_update_hash, e update_check_update_hash_inv, e update_check_update_hi_metadata, e update_check_update_hi_metadata_bit_size, e update_check_update_post_class_id_is_zero, e update_check_update_post_class_inv, e update_check_update_pre_class_id_is_zero, e update_check_update_pre_class_inv, e update_check_update_preimage_metadata, e update_check_update_preimage_post_class_id, e update_check_update_preimage_pre_class_id, e update_check_updated_class_ids_slot, e lookup_range_check_dyn_rng_chk_pow_2_counts, e lookup_range_check_dyn_diff_is_u16_counts, e lookup_range_check_r0_is_u16_counts, e lookup_range_check_r1_is_u16_counts, e lookup_range_check_r2_is_u16_counts, e lookup_range_check_r3_is_u16_counts, e lookup_range_check_r4_is_u16_counts, e lookup_range_check_r5_is_u16_counts, e lookup_range_check_r6_is_u16_counts, e lookup_range_check_r7_is_u16_counts, e lookup_ff_gt_a_lo_range_counts, e lookup_ff_gt_a_hi_range_counts, e lookup_gt_gt_range_counts, e lookup_alu_tag_max_bits_value_counts, e lookup_alu_range_check_decomposition_a_lo_counts, e lookup_alu_range_check_decomposition_a_hi_counts, e lookup_alu_range_check_decomposition_b_lo_counts, e lookup_alu_range_check_decomposition_b_hi_counts, e lookup_alu_range_check_mul_c_hi_counts, e lookup_alu_range_check_div_remainder_counts, e lookup_alu_ff_gt_counts, e lookup_alu_int_gt_counts, e lookup_alu_shifts_two_pow_counts, e lookup_alu_large_trunc_canonical_dec_counts, e lookup_alu_range_check_trunc_mid_counts, e lookup_bitwise_integral_tag_length_counts, e lookup_bitwise_byte_operations_counts, e lookup_memory_range_check_limb_0_counts, e lookup_memory_range_check_limb_1_counts, e lookup_memory_range_check_limb_2_counts, e lookup_memory_tag_max_bits_counts, e lookup_memory_range_check_write_tagged_value_counts, e lookup_data_copy_offset_plus_size_is_gt_data_size_counts, e lookup_data_copy_check_src_addr_in_range_counts, e lookup_data_copy_check_dst_addr_in_range_counts, e lookup_data_copy_sel_has_reads_counts, e lookup_data_copy_col_read_counts, e lookup_ecc_mem_check_dst_addr_in_range_counts, e lookup_ecc_mem_input_output_ecc_add_counts, e lookup_keccakf1600_theta_xor_01_counts, e lookup_keccakf1600_theta_xor_02_counts, e lookup_keccakf1600_theta_xor_03_counts, e lookup_keccakf1600_theta_xor_row_0_counts, e lookup_keccakf1600_theta_xor_11_counts, e lookup_keccakf1600_theta_xor_12_counts, e lookup_keccakf1600_theta_xor_13_counts, e lookup_keccakf1600_theta_xor_row_1_counts, e lookup_keccakf1600_theta_xor_21_counts, e lookup_keccakf1600_theta_xor_22_counts, e lookup_keccakf1600_theta_xor_23_counts, e lookup_keccakf1600_theta_xor_row_2_counts, e lookup_keccakf1600_theta_xor_31_counts, e lookup_keccakf1600_theta_xor_32_counts, e lookup_keccakf1600_theta_xor_33_counts, e lookup_keccakf1600_theta_xor_row_3_counts, e lookup_keccakf1600_theta_xor_41_counts, e lookup_keccakf1600_theta_xor_42_counts, e lookup_keccakf1600_theta_xor_43_counts, e lookup_keccakf1600_theta_xor_row_4_counts, e lookup_keccakf1600_theta_combined_xor_0_counts, e lookup_keccakf1600_theta_combined_xor_1_counts, e lookup_keccakf1600_theta_combined_xor_2_counts, e lookup_keccakf1600_theta_combined_xor_3_counts, e lookup_keccakf1600_theta_combined_xor_4_counts, e lookup_keccakf1600_state_theta_00_counts, e lookup_keccakf1600_state_theta_01_counts, e lookup_keccakf1600_state_theta_02_counts, e lookup_keccakf1600_state_theta_03_counts, e lookup_keccakf1600_state_theta_04_counts, e lookup_keccakf1600_state_theta_10_counts, e lookup_keccakf1600_state_theta_11_counts, e lookup_keccakf1600_state_theta_12_counts, e lookup_keccakf1600_state_theta_13_counts, e lookup_keccakf1600_state_theta_14_counts, e lookup_keccakf1600_state_theta_20_counts, e lookup_keccakf1600_state_theta_21_counts, e lookup_keccakf1600_state_theta_22_counts, e lookup_keccakf1600_state_theta_23_counts, e lookup_keccakf1600_state_theta_24_counts, e lookup_keccakf1600_state_theta_30_counts, e lookup_keccakf1600_state_theta_31_counts, e lookup_keccakf1600_state_theta_32_counts, e lookup_keccakf1600_state_theta_33_counts, e lookup_keccakf1600_state_theta_34_counts, e lookup_keccakf1600_state_theta_40_counts, e lookup_keccakf1600_state_theta_41_counts, e lookup_keccakf1600_state_theta_42_counts, e lookup_keccakf1600_state_theta_43_counts, e lookup_keccakf1600_state_theta_44_counts, e lookup_keccakf1600_theta_limb_02_range_counts, e lookup_keccakf1600_theta_limb_04_range_counts, e lookup_keccakf1600_theta_limb_10_range_counts, e lookup_keccakf1600_theta_limb_12_range_counts, e lookup_keccakf1600_theta_limb_14_range_counts, e lookup_keccakf1600_theta_limb_21_range_counts, e lookup_keccakf1600_theta_limb_23_range_counts, e lookup_keccakf1600_theta_limb_30_range_counts, e lookup_keccakf1600_theta_limb_32_range_counts, e lookup_keccakf1600_theta_limb_33_range_counts, e lookup_keccakf1600_theta_limb_40_range_counts, e lookup_keccakf1600_theta_limb_41_range_counts, e lookup_keccakf1600_theta_limb_43_range_counts, e lookup_keccakf1600_theta_limb_44_range_counts, e lookup_keccakf1600_theta_limb_01_range_counts, e lookup_keccakf1600_theta_limb_03_range_counts, e lookup_keccakf1600_theta_limb_11_range_counts, e lookup_keccakf1600_theta_limb_13_range_counts, e lookup_keccakf1600_theta_limb_20_range_counts, e lookup_keccakf1600_theta_limb_22_range_counts, e lookup_keccakf1600_theta_limb_24_range_counts, e lookup_keccakf1600_theta_limb_31_range_counts, e lookup_keccakf1600_theta_limb_34_range_counts, e lookup_keccakf1600_theta_limb_42_range_counts, e lookup_keccakf1600_state_pi_and_00_counts, e lookup_keccakf1600_state_pi_and_01_counts, e lookup_keccakf1600_state_pi_and_02_counts, e lookup_keccakf1600_state_pi_and_03_counts, e lookup_keccakf1600_state_pi_and_04_counts, e lookup_keccakf1600_state_pi_and_10_counts, e lookup_keccakf1600_state_pi_and_11_counts, e lookup_keccakf1600_state_pi_and_12_counts, e lookup_keccakf1600_state_pi_and_13_counts, e lookup_keccakf1600_state_pi_and_14_counts, e lookup_keccakf1600_state_pi_and_20_counts, e lookup_keccakf1600_state_pi_and_21_counts, e lookup_keccakf1600_state_pi_and_22_counts, e lookup_keccakf1600_state_pi_and_23_counts, e lookup_keccakf1600_state_pi_and_24_counts, e lookup_keccakf1600_state_pi_and_30_counts, e lookup_keccakf1600_state_pi_and_31_counts, e lookup_keccakf1600_state_pi_and_32_counts, e lookup_keccakf1600_state_pi_and_33_counts, e lookup_keccakf1600_state_pi_and_34_counts, e lookup_keccakf1600_state_pi_and_40_counts, e lookup_keccakf1600_state_pi_and_41_counts, e lookup_keccakf1600_state_pi_and_42_counts, e lookup_keccakf1600_state_pi_and_43_counts, e lookup_keccakf1600_state_pi_and_44_counts, e lookup_keccakf1600_state_chi_00_counts, e lookup_keccakf1600_state_chi_01_counts, e lookup_keccakf1600_state_chi_02_counts, e lookup_keccakf1600_state_chi_03_counts, e lookup_keccakf1600_state_chi_04_counts, e lookup_keccakf1600_state_chi_10_counts, e lookup_keccakf1600_state_chi_11_counts, e lookup_keccakf1600_state_chi_12_counts, e lookup_keccakf1600_state_chi_13_counts, e lookup_keccakf1600_state_chi_14_counts, e lookup_keccakf1600_state_chi_20_counts, e lookup_keccakf1600_state_chi_21_counts, e lookup_keccakf1600_state_chi_22_counts, e lookup_keccakf1600_state_chi_23_counts, e lookup_keccakf1600_state_chi_24_counts, e lookup_keccakf1600_state_chi_30_counts, e lookup_keccakf1600_state_chi_31_counts, e lookup_keccakf1600_state_chi_32_counts, e lookup_keccakf1600_state_chi_33_counts, e lookup_keccakf1600_state_chi_34_counts, e lookup_keccakf1600_state_chi_40_counts, e lookup_keccakf1600_state_chi_41_counts, e lookup_keccakf1600_state_chi_42_counts, e lookup_keccakf1600_state_chi_43_counts, e lookup_keccakf1600_state_chi_44_counts, e lookup_keccakf1600_round_cst_counts, e lookup_keccakf1600_state_iota_00_counts, e lookup_keccakf1600_src_out_of_range_toggle_counts, e lookup_keccakf1600_dst_out_of_range_toggle_counts, e lookup_poseidon2_mem_check_src_addr_in_range_counts, e lookup_poseidon2_mem_check_dst_addr_in_range_counts, e lookup_poseidon2_mem_input_output_poseidon2_perm_counts, e lookup_to_radix_limb_range_counts, e lookup_to_radix_limb_less_than_radix_range_counts, e lookup_to_radix_fetch_safe_limbs_counts, e lookup_to_radix_fetch_p_limb_counts, e lookup_to_radix_limb_p_diff_range_counts, e lookup_scalar_mul_to_radix_counts, e lookup_scalar_mul_double_counts, e lookup_scalar_mul_add_counts, e lookup_sha256_range_comp_w_lhs_counts, e lookup_sha256_range_comp_w_rhs_counts, e lookup_sha256_range_rhs_w_7_counts, e lookup_sha256_range_rhs_w_18_counts, e lookup_sha256_range_rhs_w_3_counts, e lookup_sha256_w_s_0_xor_0_counts, e lookup_sha256_w_s_0_xor_1_counts, e lookup_sha256_range_rhs_w_17_counts, e lookup_sha256_range_rhs_w_19_counts, e lookup_sha256_range_rhs_w_10_counts, e lookup_sha256_w_s_1_xor_0_counts, e lookup_sha256_w_s_1_xor_1_counts, e lookup_sha256_range_rhs_e_6_counts, e lookup_sha256_range_rhs_e_11_counts, e lookup_sha256_range_rhs_e_25_counts, e lookup_sha256_s_1_xor_0_counts, e lookup_sha256_s_1_xor_1_counts, e lookup_sha256_ch_and_0_counts, e lookup_sha256_ch_and_1_counts, e lookup_sha256_ch_xor_counts, e lookup_sha256_round_constant_counts, e lookup_sha256_range_rhs_a_2_counts, e lookup_sha256_range_rhs_a_13_counts, e lookup_sha256_range_rhs_a_22_counts, e lookup_sha256_s_0_xor_0_counts, e lookup_sha256_s_0_xor_1_counts, e lookup_sha256_maj_and_0_counts, e lookup_sha256_maj_and_1_counts, e lookup_sha256_maj_and_2_counts, e lookup_sha256_maj_xor_0_counts, e lookup_sha256_maj_xor_1_counts, e lookup_sha256_range_comp_next_a_lhs_counts, e lookup_sha256_range_comp_next_a_rhs_counts, e lookup_sha256_range_comp_next_e_lhs_counts, e lookup_sha256_range_comp_next_e_rhs_counts, e lookup_sha256_range_comp_a_rhs_counts, e lookup_sha256_range_comp_b_rhs_counts, e lookup_sha256_range_comp_c_rhs_counts, e lookup_sha256_range_comp_d_rhs_counts, e lookup_sha256_range_comp_e_rhs_counts, e lookup_sha256_range_comp_f_rhs_counts, e lookup_sha256_range_comp_g_rhs_counts, e lookup_sha256_range_comp_h_rhs_counts, e lookup_sha256_mem_check_state_addr_in_range_counts, e lookup_sha256_mem_check_input_addr_in_range_counts, e lookup_sha256_mem_check_output_addr_in_range_counts, e lookup_to_radix_mem_check_dst_addr_in_range_counts, e lookup_to_radix_mem_check_radix_lt_2_counts, e lookup_to_radix_mem_check_radix_gt_256_counts, e lookup_to_radix_mem_input_output_to_radix_counts, e lookup_poseidon2_hash_poseidon2_perm_counts, e lookup_address_derivation_salted_initialization_hash_poseidon2_0_counts, e lookup_address_derivation_salted_initialization_hash_poseidon2_1_counts, e lookup_address_derivation_partial_address_poseidon2_counts, e lookup_address_derivation_public_keys_hash_poseidon2_0_counts, e lookup_address_derivation_public_keys_hash_poseidon2_1_counts, e lookup_address_derivation_public_keys_hash_poseidon2_2_counts, e lookup_address_derivation_public_keys_hash_poseidon2_3_counts, e lookup_address_derivation_public_keys_hash_poseidon2_4_counts, e lookup_address_derivation_preaddress_poseidon2_counts, e lookup_address_derivation_preaddress_scalar_mul_counts, e lookup_address_derivation_address_ecadd_counts, e lookup_bc_decomposition_bytes_are_bytes_counts, e lookup_bc_hashing_poseidon2_hash_counts, e lookup_merkle_check_merkle_poseidon2_read_counts, e lookup_merkle_check_merkle_poseidon2_write_counts, e lookup_indexed_tree_check_silo_poseidon2_counts, e lookup_indexed_tree_check_low_leaf_value_validation_counts, e lookup_indexed_tree_check_low_leaf_next_value_validation_counts, e lookup_indexed_tree_check_low_leaf_poseidon2_counts, e lookup_indexed_tree_check_updated_low_leaf_poseidon2_counts, e lookup_indexed_tree_check_low_leaf_merkle_check_counts, e lookup_indexed_tree_check_new_leaf_poseidon2_counts, e lookup_indexed_tree_check_new_leaf_merkle_check_counts, e lookup_indexed_tree_check_write_value_to_public_inputs_counts, e lookup_public_data_squash_leaf_slot_increase_ff_gt_counts, e lookup_public_data_squash_clk_diff_range_lo_counts, e lookup_public_data_squash_clk_diff_range_hi_counts, e lookup_public_data_check_clk_diff_range_lo_counts, e lookup_public_data_check_clk_diff_range_hi_counts, e lookup_public_data_check_silo_poseidon2_counts, e lookup_public_data_check_low_leaf_slot_validation_counts, e lookup_public_data_check_low_leaf_next_slot_validation_counts, e lookup_public_data_check_low_leaf_poseidon2_0_counts, e lookup_public_data_check_low_leaf_poseidon2_1_counts, e lookup_public_data_check_updated_low_leaf_poseidon2_0_counts, e lookup_public_data_check_updated_low_leaf_poseidon2_1_counts, e lookup_public_data_check_low_leaf_merkle_check_counts, e lookup_public_data_check_new_leaf_poseidon2_0_counts, e lookup_public_data_check_new_leaf_poseidon2_1_counts, e lookup_public_data_check_new_leaf_merkle_check_counts, e lookup_public_data_check_write_public_data_to_public_inputs_counts, e lookup_public_data_check_write_writes_length_to_public_inputs_counts, e lookup_update_check_timestamp_from_public_inputs_counts, e lookup_update_check_delayed_public_mutable_slot_poseidon2_counts, e lookup_update_check_update_hash_public_data_read_counts, e lookup_update_check_update_hash_poseidon2_counts, e lookup_update_check_update_hi_metadata_range_counts, e lookup_update_check_update_lo_metadata_range_counts, e lookup_update_check_timestamp_is_lt_timestamp_of_change_counts, e lookup_contract_instance_retrieval_check_protocol_address_range_counts, e lookup_contract_instance_retrieval_read_derived_address_from_public_inputs_counts, e lookup_contract_instance_retrieval_deployment_nullifier_read_counts, e lookup_contract_instance_retrieval_address_derivation_counts, e lookup_contract_instance_retrieval_update_check_counts, e lookup_class_id_derivation_class_id_poseidon2_0_counts, e lookup_class_id_derivation_class_id_poseidon2_1_counts, e lookup_bc_retrieval_contract_instance_retrieval_counts, e lookup_bc_retrieval_class_id_derivation_counts, e lookup_bc_retrieval_is_new_class_check_counts, e lookup_bc_retrieval_retrieved_bytecodes_insertion_counts, e lookup_instr_fetching_pc_abs_diff_positive_counts, e lookup_instr_fetching_instr_abs_diff_positive_counts, e lookup_instr_fetching_tag_value_validation_counts, e lookup_instr_fetching_bytecode_size_from_bc_dec_counts, e lookup_instr_fetching_bytes_from_bc_dec_counts, e lookup_instr_fetching_wire_instruction_info_counts, e lookup_emit_public_log_check_memory_out_of_bounds_counts, e lookup_emit_public_log_check_log_fields_count_counts, e lookup_emit_public_log_write_data_to_public_inputs_counts, e lookup_get_contract_instance_precomputed_info_counts, e lookup_get_contract_instance_contract_instance_retrieval_counts, e lookup_l1_to_l2_message_tree_check_merkle_check_counts, e lookup_internal_call_unwind_call_stack_counts, e lookup_context_ctx_stack_rollback_counts, e lookup_context_ctx_stack_return_counts, e lookup_addressing_relative_overflow_result_0_counts, e lookup_addressing_relative_overflow_result_1_counts, e lookup_addressing_relative_overflow_result_2_counts, e lookup_addressing_relative_overflow_result_3_counts, e lookup_addressing_relative_overflow_result_4_counts, e lookup_addressing_relative_overflow_result_5_counts, e lookup_addressing_relative_overflow_result_6_counts, e lookup_gas_addressing_gas_read_counts, e lookup_gas_is_out_of_gas_l2_counts, e lookup_gas_is_out_of_gas_da_counts, e lookup_note_hash_tree_check_silo_poseidon2_counts, e lookup_note_hash_tree_check_read_first_nullifier_counts, e lookup_note_hash_tree_check_nonce_computation_poseidon2_counts, e lookup_note_hash_tree_check_unique_note_hash_poseidon2_counts, e lookup_note_hash_tree_check_merkle_check_counts, e lookup_note_hash_tree_check_write_note_hash_to_public_inputs_counts, e lookup_emit_notehash_notehash_tree_write_counts, e lookup_emit_nullifier_write_nullifier_counts, e lookup_external_call_is_l2_gas_left_gt_allocated_counts, e lookup_external_call_is_da_gas_left_gt_allocated_counts, e lookup_get_env_var_precomputed_info_counts, e lookup_get_env_var_read_from_public_inputs_col0_counts, e lookup_get_env_var_read_from_public_inputs_col1_counts, e lookup_l1_to_l2_message_exists_l1_to_l2_msg_leaf_index_in_range_counts, e lookup_l1_to_l2_message_exists_l1_to_l2_msg_read_counts, e lookup_notehash_exists_note_hash_leaf_index_in_range_counts, e lookup_notehash_exists_note_hash_read_counts, e lookup_nullifier_exists_nullifier_exists_check_counts, e lookup_send_l2_to_l1_msg_recipient_check_counts, e lookup_send_l2_to_l1_msg_write_l2_to_l1_msg_counts, e lookup_sload_storage_read_counts, e lookup_sstore_record_written_storage_slot_counts, e lookup_execution_bytecode_retrieval_result_counts, e lookup_execution_instruction_fetching_result_counts, e lookup_execution_instruction_fetching_body_counts, e lookup_execution_exec_spec_read_counts, e lookup_execution_dyn_l2_factor_bitwise_counts, e lookup_execution_check_radix_gt_256_counts, e lookup_execution_get_p_limbs_counts, e lookup_execution_get_max_limbs_counts, e lookup_execution_check_written_storage_slot_counts, e lookup_execution_dispatch_to_alu_counts, e lookup_execution_dispatch_to_bitwise_counts, e lookup_execution_dispatch_to_cast_counts, e lookup_execution_dispatch_to_set_counts, e lookup_calldata_hashing_get_calldata_field_0_counts, e lookup_calldata_hashing_get_calldata_field_1_counts, e lookup_calldata_hashing_get_calldata_field_2_counts, e lookup_calldata_hashing_poseidon2_hash_counts, e lookup_tx_context_public_inputs_note_hash_tree_counts, e lookup_tx_context_public_inputs_nullifier_tree_counts, e lookup_tx_context_public_inputs_public_data_tree_counts, e lookup_tx_context_public_inputs_l1_l2_tree_counts, e lookup_tx_context_public_inputs_gas_used_counts, e lookup_tx_context_public_inputs_read_gas_limit_counts, e lookup_tx_context_public_inputs_read_reverted_counts, e lookup_tx_context_restore_state_on_revert_counts, e lookup_tx_context_public_inputs_write_note_hash_count_counts, e lookup_tx_context_public_inputs_write_nullifier_count_counts, e lookup_tx_context_public_inputs_write_l2_to_l1_message_count_counts, e lookup_tx_context_public_inputs_write_public_log_count_counts, e lookup_tx_read_phase_spec_counts, e lookup_tx_read_phase_length_counts, e lookup_tx_read_public_call_request_phase_counts, e lookup_tx_read_tree_insert_value_counts, e lookup_tx_note_hash_append_counts, e lookup_tx_nullifier_append_counts, e lookup_tx_read_l2_l1_msg_counts, e lookup_tx_write_l2_l1_msg_counts, e lookup_tx_read_effective_fee_public_inputs_counts, e lookup_tx_read_fee_payer_public_inputs_counts, e lookup_tx_balance_slot_poseidon2_counts, e lookup_tx_balance_read_counts, e lookup_tx_balance_validation_counts, e lookup_tx_write_fee_public_inputs_counts, e bc_decomposition_bytes, e bc_decomposition_bytes_pc_plus_1, e bc_decomposition_bytes_pc_plus_10, e bc_decomposition_bytes_pc_plus_11, e bc_decomposition_bytes_pc_plus_12, e bc_decomposition_bytes_pc_plus_13, e bc_decomposition_bytes_pc_plus_14, e bc_decomposition_bytes_pc_plus_15, e bc_decomposition_bytes_pc_plus_16, e bc_decomposition_bytes_pc_plus_17, e bc_decomposition_bytes_pc_plus_18, e bc_decomposition_bytes_pc_plus_19, e bc_decomposition_bytes_pc_plus_2, e bc_decomposition_bytes_pc_plus_20, e bc_decomposition_bytes_pc_plus_21, e bc_decomposition_bytes_pc_plus_22, e bc_decomposition_bytes_pc_plus_23, e bc_decomposition_bytes_pc_plus_24, e bc_decomposition_bytes_pc_plus_25, e bc_decomposition_bytes_pc_plus_26, e bc_decomposition_bytes_pc_plus_27, e bc_decomposition_bytes_pc_plus_28, e bc_decomposition_bytes_pc_plus_29, e bc_decomposition_bytes_pc_plus_3, e bc_decomposition_bytes_pc_plus_30, e bc_decomposition_bytes_pc_plus_31, e bc_decomposition_bytes_pc_plus_32, e bc_decomposition_bytes_pc_plus_33, e bc_decomposition_bytes_pc_plus_34, e bc_decomposition_bytes_pc_plus_35, e bc_decomposition_bytes_pc_plus_4, e bc_decomposition_bytes_pc_plus_5, e bc_decomposition_bytes_pc_plus_6, e bc_decomposition_bytes_pc_plus_7, e bc_decomposition_bytes_pc_plus_8, e bc_decomposition_bytes_pc_plus_9, e bc_decomposition_bytes_remaining, e bc_decomposition_id, e bc_decomposition_next_packed_pc, e bc_decomposition_pc, e bc_decomposition_sel, e bc_decomposition_sel_windows_gt_remaining, e bc_decomposition_start, e bc_hashing_bytecode_id, e bc_hashing_padding, e bc_hashing_pc_index_1, e bc_hashing_rounds_rem, e bc_hashing_sel, e bc_hashing_sel_not_start, e bc_hashing_start, e bitwise_acc_ia, e bitwise_acc_ib, e bitwise_acc_ic, e bitwise_ctr, e bitwise_op_id, e bitwise_sel, e bitwise_start, e calldata_context_id, e calldata_hashing_calldata_size, e calldata_hashing_context_id, e calldata_hashing_index_0_, e calldata_hashing_output_hash, e calldata_hashing_rounds_rem, e calldata_hashing_sel, e calldata_hashing_start, e calldata_index, e calldata_sel, e data_copy_clk, e data_copy_copy_size, e data_copy_dst_addr, e data_copy_dst_context_id, e data_copy_padding, e data_copy_read_addr, e data_copy_reads_left, e data_copy_sel, e data_copy_sel_cd_copy, e data_copy_src_context_id, e data_copy_start, e emit_public_log_contract_address, e emit_public_log_correct_tag, e emit_public_log_error_out_of_bounds, e emit_public_log_error_tag_mismatch, e emit_public_log_execution_clk, e emit_public_log_is_write_contract_address, e emit_public_log_is_write_memory_value, e emit_public_log_log_address, e emit_public_log_public_inputs_index, e emit_public_log_remaining_rows, e emit_public_log_seen_wrong_tag, e emit_public_log_sel, e emit_public_log_sel_write_to_public_inputs, e emit_public_log_space_id, e emit_public_log_start, e execution_bytecode_id, e execution_clk, e execution_context_id, e execution_contract_address, e execution_da_gas_limit, e execution_discard, e execution_dying_context_id, e execution_enqueued_call_start, e execution_internal_call_id, e execution_internal_call_return_id, e execution_is_static, e execution_l1_l2_tree_root, e execution_l2_gas_limit, e execution_last_child_id, e execution_last_child_returndata_addr, e execution_last_child_returndata_size, e execution_last_child_success, e execution_msg_sender, e execution_next_context_id, e execution_next_internal_call_id, e execution_parent_calldata_addr, e execution_parent_calldata_size, e execution_parent_da_gas_limit, e execution_parent_da_gas_used, e execution_parent_id, e execution_parent_l2_gas_limit, e execution_parent_l2_gas_used, e execution_pc, e execution_prev_da_gas_used, e execution_prev_l2_gas_used, e execution_prev_note_hash_tree_root, e execution_prev_note_hash_tree_size, e execution_prev_nullifier_tree_root, e execution_prev_nullifier_tree_size, e execution_prev_num_l2_to_l1_messages, e execution_prev_num_note_hashes_emitted, e execution_prev_num_nullifiers_emitted, e execution_prev_num_public_log_fields, e execution_prev_public_data_tree_root, e execution_prev_public_data_tree_size, e execution_prev_retrieved_bytecodes_tree_root, e execution_prev_retrieved_bytecodes_tree_size, e execution_prev_written_public_data_slots_tree_root, e execution_prev_written_public_data_slots_tree_size, e execution_sel, e execution_sel_first_row_in_context, e execution_transaction_fee, e ff_gt_a_hi, e ff_gt_a_lo, e ff_gt_b_hi, e ff_gt_b_lo, e ff_gt_cmp_rng_ctr, e ff_gt_p_sub_a_hi, e ff_gt_p_sub_a_lo, e ff_gt_p_sub_b_hi, e ff_gt_p_sub_b_lo, e ff_gt_sel, e ff_gt_sel_dec, e ff_gt_sel_gt, e keccak_memory_addr, e keccak_memory_clk, e keccak_memory_ctr, e keccak_memory_rw, e keccak_memory_sel, e keccak_memory_space_id, e keccak_memory_start_read, e keccak_memory_start_write, e keccak_memory_tag_error, e keccak_memory_val_0_, e keccak_memory_val_10_, e keccak_memory_val_11_, e keccak_memory_val_12_, e keccak_memory_val_13_, e keccak_memory_val_14_, e keccak_memory_val_15_, e keccak_memory_val_16_, e keccak_memory_val_17_, e keccak_memory_val_18_, e keccak_memory_val_19_, e keccak_memory_val_1_, e keccak_memory_val_20_, e keccak_memory_val_21_, e keccak_memory_val_22_, e keccak_memory_val_23_, e keccak_memory_val_2_, e keccak_memory_val_3_, e keccak_memory_val_4_, e keccak_memory_val_5_, e keccak_memory_val_6_, e keccak_memory_val_7_, e keccak_memory_val_8_, e keccak_memory_val_9_, e keccakf1600_clk, e keccakf1600_dst_addr, e keccakf1600_round, e keccakf1600_sel, e keccakf1600_sel_no_error, e keccakf1600_space_id, e keccakf1600_start, e keccakf1600_state_in_00, e keccakf1600_state_in_01, e keccakf1600_state_in_02, e keccakf1600_state_in_03, e keccakf1600_state_in_04, e keccakf1600_state_in_10, e keccakf1600_state_in_11, e keccakf1600_state_in_12, e keccakf1600_state_in_13, e keccakf1600_state_in_14, e keccakf1600_state_in_20, e keccakf1600_state_in_21, e keccakf1600_state_in_22, e keccakf1600_state_in_23, e keccakf1600_state_in_24, e keccakf1600_state_in_30, e keccakf1600_state_in_31, e keccakf1600_state_in_32, e keccakf1600_state_in_33, e keccakf1600_state_in_34, e keccakf1600_state_in_40, e keccakf1600_state_in_41, e keccakf1600_state_in_42, e keccakf1600_state_in_43, e keccakf1600_state_in_44, e memory_address, e memory_clk, e memory_rw, e memory_sel, e memory_space_id, e memory_tag, e memory_value, e merkle_check_index, e merkle_check_merkle_hash_separator, e merkle_check_path_len, e merkle_check_read_node, e merkle_check_read_root, e merkle_check_sel, e merkle_check_start, e merkle_check_write, e merkle_check_write_node, e merkle_check_write_root, e poseidon2_hash_a_0, e poseidon2_hash_a_1, e poseidon2_hash_a_2, e poseidon2_hash_a_3, e poseidon2_hash_input_0, e poseidon2_hash_input_1, e poseidon2_hash_input_2, e poseidon2_hash_num_perm_rounds_rem, e poseidon2_hash_output, e poseidon2_hash_sel, e poseidon2_hash_start, e public_data_check_clk, e public_data_check_sel, e public_data_check_write_idx, e public_data_squash_clk, e public_data_squash_final_value, e public_data_squash_leaf_slot, e public_data_squash_sel, e public_data_squash_write_to_public_inputs, e scalar_mul_bit_idx, e scalar_mul_point_inf, e scalar_mul_point_x, e scalar_mul_point_y, e scalar_mul_res_inf, e scalar_mul_res_x, e scalar_mul_res_y, e scalar_mul_scalar, e scalar_mul_sel, e scalar_mul_start, e scalar_mul_temp_inf, e scalar_mul_temp_x, e scalar_mul_temp_y, e sha256_a, e sha256_b, e sha256_c, e sha256_d, e sha256_e, e sha256_execution_clk, e sha256_f, e sha256_g, e sha256_h, e sha256_helper_w0, e sha256_helper_w1, e sha256_helper_w10, e sha256_helper_w11, e sha256_helper_w12, e sha256_helper_w13, e sha256_helper_w14, e sha256_helper_w15, e sha256_helper_w2, e sha256_helper_w3, e sha256_helper_w4, e sha256_helper_w5, e sha256_helper_w6, e sha256_helper_w7, e sha256_helper_w8, e sha256_helper_w9, e sha256_init_a, e sha256_init_b, e sha256_init_c, e sha256_init_d, e sha256_init_e, e sha256_init_f, e sha256_init_g, e sha256_init_h, e sha256_input_addr, e sha256_input_rounds_rem, e sha256_output_addr, e sha256_rounds_remaining, e sha256_sel, e sha256_sel_invalid_input_tag_err, e sha256_space_id, e sha256_start, e to_radix_acc, e to_radix_acc_under_p, e to_radix_limb, e to_radix_limb_eq_p, e to_radix_limb_index, e to_radix_limb_lt_p, e to_radix_mem_dst_addr, e to_radix_mem_execution_clk, e to_radix_mem_is_output_bits, e to_radix_mem_num_limbs, e to_radix_mem_radix, e to_radix_mem_sel, e to_radix_mem_sel_should_decompose, e to_radix_mem_sel_should_write_mem, e to_radix_mem_space_id, e to_radix_mem_start, e to_radix_mem_value_to_decompose, e to_radix_not_padding_limb, e to_radix_power, e to_radix_radix, e to_radix_safe_limbs, e to_radix_sel, e to_radix_start, e to_radix_value, e tx_da_gas_limit, e tx_discard, e tx_fee, e tx_is_revertible, e tx_is_teardown, e tx_l1_l2_tree_root, e tx_l1_l2_tree_size, e tx_l2_gas_limit, e tx_next_context_id, e tx_phase_value, e tx_prev_da_gas_used, e tx_prev_l2_gas_used, e tx_prev_note_hash_tree_root, e tx_prev_note_hash_tree_size, e tx_prev_nullifier_tree_root, e tx_prev_nullifier_tree_size, e tx_prev_num_l2_to_l1_messages, e tx_prev_num_note_hashes_emitted, e tx_prev_num_nullifiers_emitted, e tx_prev_num_public_log_fields, e tx_prev_public_data_tree_root, e tx_prev_public_data_tree_size, e tx_prev_retrieved_bytecodes_tree_root, e tx_prev_retrieved_bytecodes_tree_size, e tx_prev_written_public_data_slots_tree_root, e tx_prev_written_public_data_slots_tree_size, e tx_read_pi_offset, e tx_remaining_phase_counter, e tx_reverted, e tx_sel, e tx_start_phase, e tx_start_tx, e tx_tx_reverted +#define AVM2_PRECOMPUTED_ENTITIES_E(e) e precomputed_addressing_gas, e precomputed_bitwise_input_a, e precomputed_bitwise_input_b, e precomputed_bitwise_output_and, e precomputed_bitwise_output_or, e precomputed_bitwise_output_xor, e precomputed_dyn_gas_id, e precomputed_envvar_pi_row_idx, e precomputed_exec_opcode, e precomputed_exec_opcode_base_da_gas, e precomputed_exec_opcode_dynamic_da_gas, e precomputed_exec_opcode_dynamic_l2_gas, e precomputed_exec_opcode_opcode_gas, e precomputed_expected_tag_reg_0_, e precomputed_expected_tag_reg_1_, e precomputed_expected_tag_reg_2_, e precomputed_expected_tag_reg_3_, e precomputed_expected_tag_reg_4_, e precomputed_expected_tag_reg_5_, e precomputed_first_row, e precomputed_idx, e precomputed_instr_size, e precomputed_invalid_envvar_enum, e precomputed_is_address, e precomputed_is_class_id, e precomputed_is_cleanup, e precomputed_is_collect_fee, e precomputed_is_dagasleft, e precomputed_is_deployer, e precomputed_is_init_hash, e precomputed_is_isstaticcall, e precomputed_is_l2gasleft, e precomputed_is_public_call_request, e precomputed_is_revertible, e precomputed_is_sender, e precomputed_is_teardown, e precomputed_is_transactionfee, e precomputed_is_tree_padding, e precomputed_is_valid_member_enum, e precomputed_keccak_round_constant, e precomputed_next_phase_on_revert, e precomputed_opcode_out_of_range, e precomputed_out_tag, e precomputed_p_decomposition_limb, e precomputed_p_decomposition_limb_index, e precomputed_p_decomposition_radix, e precomputed_power_of_2, e precomputed_read_pi_length_offset, e precomputed_read_pi_start_offset, e precomputed_rw_reg_0_, e precomputed_rw_reg_1_, e precomputed_rw_reg_2_, e precomputed_rw_reg_3_, e precomputed_rw_reg_4_, e precomputed_rw_reg_5_, e precomputed_sel_addressing_gas, e precomputed_sel_append_l2_l1_msg, e precomputed_sel_append_note_hash, e precomputed_sel_append_nullifier, e precomputed_sel_envvar_pi_lookup_col0, e precomputed_sel_envvar_pi_lookup_col1, e precomputed_sel_exec_spec, e precomputed_sel_has_tag, e precomputed_sel_keccak, e precomputed_sel_mem_op_reg_0_, e precomputed_sel_mem_op_reg_1_, e precomputed_sel_mem_op_reg_2_, e precomputed_sel_mem_op_reg_3_, e precomputed_sel_mem_op_reg_4_, e precomputed_sel_mem_op_reg_5_, e precomputed_sel_mem_tag_out_of_range, e precomputed_sel_op_dc_0, e precomputed_sel_op_dc_1, e precomputed_sel_op_dc_10, e precomputed_sel_op_dc_11, e precomputed_sel_op_dc_12, e precomputed_sel_op_dc_13, e precomputed_sel_op_dc_14, e precomputed_sel_op_dc_15, e precomputed_sel_op_dc_16, e precomputed_sel_op_dc_2, e precomputed_sel_op_dc_3, e precomputed_sel_op_dc_4, e precomputed_sel_op_dc_5, e precomputed_sel_op_dc_6, e precomputed_sel_op_dc_7, e precomputed_sel_op_dc_8, e precomputed_sel_op_dc_9, e precomputed_sel_op_is_address_0_, e precomputed_sel_op_is_address_1_, e precomputed_sel_op_is_address_2_, e precomputed_sel_op_is_address_3_, e precomputed_sel_op_is_address_4_, e precomputed_sel_op_is_address_5_, e precomputed_sel_op_is_address_6_, e precomputed_sel_p_decomposition, e precomputed_sel_phase, e precomputed_sel_range_16, e precomputed_sel_range_8, e precomputed_sel_sha256_compression, e precomputed_sel_tag_check_reg_0_, e precomputed_sel_tag_check_reg_1_, e precomputed_sel_tag_check_reg_2_, e precomputed_sel_tag_check_reg_3_, e precomputed_sel_tag_check_reg_4_, e precomputed_sel_tag_check_reg_5_, e precomputed_sel_tag_is_op2, e precomputed_sel_tag_parameters, e precomputed_sel_to_radix_p_limb_counts, e precomputed_sha256_compression_round_constant, e precomputed_subtrace_id, e precomputed_subtrace_operation_id, e precomputed_tag_byte_length, e precomputed_tag_max_bits, e precomputed_tag_max_value, e precomputed_to_radix_num_limbs_for_p, e precomputed_to_radix_safe_limbs, e precomputed_zero, e public_inputs_sel +#define AVM2_WIRE_ENTITIES_E(e) e public_inputs_cols_0_, e public_inputs_cols_1_, e public_inputs_cols_2_, e public_inputs_cols_3_, e address_derivation_address, e address_derivation_address_y, e address_derivation_class_id, e address_derivation_const_four, e address_derivation_const_thirteen, e address_derivation_const_three, e address_derivation_const_two, e address_derivation_deployer_addr, e address_derivation_g1_x, e address_derivation_g1_y, e address_derivation_incoming_viewing_key_x, e address_derivation_incoming_viewing_key_y, e address_derivation_init_hash, e address_derivation_nullifier_key_x, e address_derivation_nullifier_key_y, e address_derivation_outgoing_viewing_key_x, e address_derivation_outgoing_viewing_key_y, e address_derivation_partial_address, e address_derivation_partial_address_domain_separator, e address_derivation_preaddress, e address_derivation_preaddress_domain_separator, e address_derivation_preaddress_public_key_x, e address_derivation_preaddress_public_key_y, e address_derivation_public_keys_hash, e address_derivation_public_keys_hash_domain_separator, e address_derivation_salt, e address_derivation_salted_init_hash, e address_derivation_salted_init_hash_domain_separator, e address_derivation_sel, e address_derivation_tagging_key_x, e address_derivation_tagging_key_y, e alu_a_hi, e alu_a_hi_bits, e alu_a_lo, e alu_a_lo_bits, e alu_ab_diff_inv, e alu_ab_tags_diff_inv, e alu_b_hi, e alu_b_inv, e alu_b_lo, e alu_c_hi, e alu_cf, e alu_constant_64, e alu_gt_input_a, e alu_gt_input_b, e alu_gt_result_c, e alu_helper1, e alu_ia, e alu_ia_tag, e alu_ib, e alu_ib_tag, e alu_ic, e alu_ic_tag, e alu_max_bits, e alu_max_value, e alu_mid, e alu_mid_bits, e alu_op_id, e alu_sel, e alu_sel_ab_tag_mismatch, e alu_sel_decompose_a, e alu_sel_div_0_err, e alu_sel_div_no_err, e alu_sel_err, e alu_sel_ff_gt, e alu_sel_int_gt, e alu_sel_is_ff, e alu_sel_is_u128, e alu_sel_mul_div_u128, e alu_sel_mul_no_err_non_ff, e alu_sel_op_add, e alu_sel_op_div, e alu_sel_op_eq, e alu_sel_op_fdiv, e alu_sel_op_lt, e alu_sel_op_lte, e alu_sel_op_mul, e alu_sel_op_not, e alu_sel_op_shl, e alu_sel_op_shr, e alu_sel_op_sub, e alu_sel_op_truncate, e alu_sel_shift_ops_no_overflow, e alu_sel_tag_err, e alu_sel_trunc_gte_128, e alu_sel_trunc_lt_128, e alu_sel_trunc_non_trivial, e alu_sel_trunc_trivial, e alu_shift_lo_bits, e alu_tag_ff_diff_inv, e alu_tag_u128_diff_inv, e alu_two_pow_shift_lo_bits, e bc_decomposition_bytes_pc_plus_36, e bc_decomposition_bytes_rem_inv, e bc_decomposition_bytes_rem_min_one_inv, e bc_decomposition_bytes_to_read, e bc_decomposition_last_of_contract, e bc_decomposition_next_packed_pc_min_pc_inv, e bc_decomposition_packed_field, e bc_decomposition_sel_packed, e bc_decomposition_sel_packed_read_0_, e bc_decomposition_sel_packed_read_1_, e bc_decomposition_sel_packed_read_2_, e bc_decomposition_sel_windows_eq_remaining, e bc_decomposition_windows_min_remaining_inv, e bc_hashing_end, e bc_hashing_input_len, e bc_hashing_packed_fields_0, e bc_hashing_packed_fields_1, e bc_hashing_packed_fields_2, e bc_hashing_pc_index, e bc_hashing_pc_index_2, e bc_hashing_sel_not_padding_1, e bc_hashing_sel_not_padding_2, e bc_hashing_size_in_bytes, e bc_retrieval_address, e bc_retrieval_artifact_hash, e bc_retrieval_bytecode_id, e bc_retrieval_current_class_id, e bc_retrieval_error, e bc_retrieval_instance_exists, e bc_retrieval_is_new_class, e bc_retrieval_next_retrieved_bytecodes_tree_root, e bc_retrieval_next_retrieved_bytecodes_tree_size, e bc_retrieval_no_remaining_bytecodes, e bc_retrieval_nullifier_tree_root, e bc_retrieval_prev_retrieved_bytecodes_tree_root, e bc_retrieval_prev_retrieved_bytecodes_tree_size, e bc_retrieval_private_functions_root, e bc_retrieval_public_data_tree_root, e bc_retrieval_remaining_bytecodes_inv, e bc_retrieval_retrieved_bytecodes_merkle_separator, e bc_retrieval_retrieved_bytecodes_tree_height, e bc_retrieval_sel, e bc_retrieval_should_retrieve, e bitwise_ctr_min_one_inv, e bitwise_end, e bitwise_err, e bitwise_ia_byte, e bitwise_ib_byte, e bitwise_ic_byte, e bitwise_output_and, e bitwise_output_or, e bitwise_output_xor, e bitwise_sel_and, e bitwise_sel_compute, e bitwise_sel_get_ctr, e bitwise_sel_or, e bitwise_sel_tag_ff_err, e bitwise_sel_tag_mismatch_err, e bitwise_sel_xor, e bitwise_start_keccak, e bitwise_start_sha256, e bitwise_tag_a, e bitwise_tag_a_inv, e bitwise_tag_ab_diff_inv, e bitwise_tag_b, e bitwise_tag_c, e calldata_end, e calldata_hashing_end, e calldata_hashing_index_1_, e calldata_hashing_index_2_, e calldata_hashing_input_0_, e calldata_hashing_input_1_, e calldata_hashing_input_2_, e calldata_hashing_input_len, e calldata_hashing_sel_end_not_empty, e calldata_hashing_sel_not_padding_1, e calldata_hashing_sel_not_padding_2, e calldata_hashing_sel_not_start, e calldata_value, e class_id_derivation_artifact_hash, e class_id_derivation_class_id, e class_id_derivation_const_four, e class_id_derivation_gen_index_contract_class_id, e class_id_derivation_private_functions_root, e class_id_derivation_public_bytecode_commitment, e class_id_derivation_sel, e context_stack_bytecode_id, e context_stack_context_id, e context_stack_contract_address, e context_stack_entered_context_id, e context_stack_internal_call_id, e context_stack_internal_call_return_id, e context_stack_is_static, e context_stack_msg_sender, e context_stack_next_internal_call_id, e context_stack_next_pc, e context_stack_note_hash_tree_root, e context_stack_note_hash_tree_size, e context_stack_nullifier_tree_root, e context_stack_nullifier_tree_size, e context_stack_num_l2_to_l1_messages, e context_stack_num_note_hashes_emitted, e context_stack_num_nullifiers_emitted, e context_stack_num_public_log_fields, e context_stack_parent_calldata_addr, e context_stack_parent_calldata_size, e context_stack_parent_da_gas_limit, e context_stack_parent_da_gas_used, e context_stack_parent_id, e context_stack_parent_l2_gas_limit, e context_stack_parent_l2_gas_used, e context_stack_public_data_tree_root, e context_stack_public_data_tree_size, e context_stack_sel, e context_stack_written_public_data_slots_tree_root, e context_stack_written_public_data_slots_tree_size, e contract_instance_retrieval_address, e contract_instance_retrieval_address_sub_one, e contract_instance_retrieval_current_class_id, e contract_instance_retrieval_deployer_addr, e contract_instance_retrieval_deployer_protocol_contract_address, e contract_instance_retrieval_derived_address, e contract_instance_retrieval_derived_address_pi_index, e contract_instance_retrieval_exists, e contract_instance_retrieval_incoming_viewing_key_x, e contract_instance_retrieval_incoming_viewing_key_y, e contract_instance_retrieval_init_hash, e contract_instance_retrieval_is_protocol_contract, e contract_instance_retrieval_max_protocol_contracts, e contract_instance_retrieval_nullifier_key_x, e contract_instance_retrieval_nullifier_key_y, e contract_instance_retrieval_nullifier_merkle_separator, e contract_instance_retrieval_nullifier_tree_height, e contract_instance_retrieval_nullifier_tree_root, e contract_instance_retrieval_original_class_id, e contract_instance_retrieval_outgoing_viewing_key_x, e contract_instance_retrieval_outgoing_viewing_key_y, e contract_instance_retrieval_protocol_contract_derived_address_inv, e contract_instance_retrieval_public_data_tree_root, e contract_instance_retrieval_salt, e contract_instance_retrieval_sel, e contract_instance_retrieval_should_check_for_update, e contract_instance_retrieval_should_check_nullifier, e contract_instance_retrieval_siloing_separator, e contract_instance_retrieval_tagging_key_x, e contract_instance_retrieval_tagging_key_y, e data_copy_cd_copy_col_read, e data_copy_clamped_read_index_upper_bound, e data_copy_dst_out_of_range_err, e data_copy_end, e data_copy_is_top_level, e data_copy_mem_size, e data_copy_offset, e data_copy_offset_plus_size, e data_copy_offset_plus_size_is_gt, e data_copy_parent_id_inv, e data_copy_read_addr_plus_one, e data_copy_read_addr_upper_bound, e data_copy_reads_left_inv, e data_copy_sel_cd_copy_start, e data_copy_sel_has_reads, e data_copy_sel_mem_read, e data_copy_sel_mem_write, e data_copy_sel_rd_copy_start, e data_copy_sel_write_count_is_zero, e data_copy_src_addr, e data_copy_src_data_size, e data_copy_src_reads_exceed_mem, e data_copy_start_no_err, e data_copy_tag, e data_copy_value, e data_copy_write_addr_upper_bound, e data_copy_write_count_minus_one_inv, e data_copy_write_count_zero_inv, e ecc_add_mem_dst_addr_0_, e ecc_add_mem_dst_addr_1_, e ecc_add_mem_dst_addr_2_, e ecc_add_mem_err, e ecc_add_mem_execution_clk, e ecc_add_mem_max_mem_addr, e ecc_add_mem_p_is_inf, e ecc_add_mem_p_is_on_curve_eqn, e ecc_add_mem_p_is_on_curve_eqn_inv, e ecc_add_mem_p_x, e ecc_add_mem_p_x_n, e ecc_add_mem_p_y, e ecc_add_mem_p_y_n, e ecc_add_mem_q_is_inf, e ecc_add_mem_q_is_on_curve_eqn, e ecc_add_mem_q_is_on_curve_eqn_inv, e ecc_add_mem_q_x, e ecc_add_mem_q_x_n, e ecc_add_mem_q_y, e ecc_add_mem_q_y_n, e ecc_add_mem_res_is_inf, e ecc_add_mem_res_x, e ecc_add_mem_res_y, e ecc_add_mem_sel, e ecc_add_mem_sel_dst_out_of_range_err, e ecc_add_mem_sel_p_not_on_curve_err, e ecc_add_mem_sel_q_not_on_curve_err, e ecc_add_mem_sel_should_exec, e ecc_add_mem_space_id, e ecc_add_op, e ecc_double_op, e ecc_inv_2_p_y, e ecc_inv_x_diff, e ecc_inv_y_diff, e ecc_lambda, e ecc_p_is_inf, e ecc_p_x, e ecc_p_y, e ecc_q_is_inf, e ecc_q_x, e ecc_q_y, e ecc_r_is_inf, e ecc_r_x, e ecc_r_y, e ecc_result_infinity, e ecc_sel, e ecc_use_computed_result, e ecc_x_match, e ecc_y_match, e emit_public_log_discard, e emit_public_log_end, e emit_public_log_end_log_address_upper_bound, e emit_public_log_error, e emit_public_log_error_too_many_log_fields, e emit_public_log_expected_next_log_fields, e emit_public_log_is_static, e emit_public_log_log_size, e emit_public_log_max_mem_size, e emit_public_log_max_public_logs_payload_length, e emit_public_log_next_num_public_log_fields, e emit_public_log_prev_num_public_log_fields, e emit_public_log_public_inputs_value, e emit_public_log_remaining_rows_inv, e emit_public_log_sel_read_memory, e emit_public_log_tag, e emit_public_log_tag_inv, e emit_public_log_value, e execution_addressing_error_collection_inv, e execution_addressing_gas, e execution_addressing_mode, e execution_base_address_tag, e execution_base_address_tag_diff_inv, e execution_base_address_val, e execution_base_da_gas, e execution_batched_tags_diff_inv, e execution_batched_tags_diff_inv_reg, e execution_da_gas_left, e execution_da_gas_used, e execution_dying_context_diff_inv, e execution_dying_context_id_inv, e execution_dyn_gas_id, e execution_dynamic_da_gas, e execution_dynamic_da_gas_factor, e execution_dynamic_l2_gas, e execution_dynamic_l2_gas_factor, e execution_enqueued_call_end, e execution_envvar_pi_row_idx, e execution_exec_opcode, e execution_expected_tag_reg_0_, e execution_expected_tag_reg_1_, e execution_expected_tag_reg_2_, e execution_expected_tag_reg_3_, e execution_expected_tag_reg_4_, e execution_expected_tag_reg_5_, e execution_has_parent_ctx, e execution_highest_address, e execution_instr_size, e execution_internal_call_return_id_inv, e execution_is_address, e execution_is_da_gas_left_gt_allocated, e execution_is_dagasleft, e execution_is_dying_context, e execution_is_isstaticcall, e execution_is_l2_gas_left_gt_allocated, e execution_is_l2gasleft, e execution_is_parent_id_inv, e execution_is_sender, e execution_is_transactionfee, e execution_l1_to_l2_msg_leaf_in_range, e execution_l1_to_l2_msg_tree_leaf_count, e execution_l2_gas_left, e execution_l2_gas_used, e execution_max_data_writes_reached, e execution_max_eth_address_value, e execution_mem_tag_reg_0_, e execution_mem_tag_reg_1_, e execution_mem_tag_reg_2_, e execution_mem_tag_reg_3_, e execution_mem_tag_reg_4_, e execution_mem_tag_reg_5_, e execution_nested_failure, e execution_nested_return, e execution_next_pc, e execution_note_hash_leaf_in_range, e execution_note_hash_tree_leaf_count, e execution_note_hash_tree_root, e execution_note_hash_tree_size, e execution_nullifier_merkle_separator, e execution_nullifier_pi_offset, e execution_nullifier_siloing_separator, e execution_nullifier_tree_height, e execution_nullifier_tree_root, e execution_nullifier_tree_size, e execution_num_l2_to_l1_messages, e execution_num_note_hashes_emitted, e execution_num_nullifiers_emitted, e execution_num_p_limbs, e execution_num_public_log_fields, e execution_num_relative_operands_inv, e execution_op_0_, e execution_op_1_, e execution_op_2_, e execution_op_3_, e execution_op_4_, e execution_op_5_, e execution_op_6_, e execution_op_after_relative_0_, e execution_op_after_relative_1_, e execution_op_after_relative_2_, e execution_op_after_relative_3_, e execution_op_after_relative_4_, e execution_op_after_relative_5_, e execution_op_after_relative_6_, e execution_opcode_gas, e execution_out_of_gas_da, e execution_out_of_gas_l2, e execution_public_data_tree_root, e execution_public_data_tree_size, e execution_public_inputs_index, e execution_register_0_, e execution_register_1_, e execution_register_2_, e execution_register_3_, e execution_register_4_, e execution_register_5_, e execution_remaining_data_writes_inv, e execution_remaining_l2_to_l1_msgs_inv, e execution_remaining_note_hashes_inv, e execution_remaining_nullifiers_inv, e execution_retrieved_bytecodes_tree_root, e execution_retrieved_bytecodes_tree_size, e execution_rop_0_, e execution_rop_1_, e execution_rop_2_, e execution_rop_3_, e execution_rop_4_, e execution_rop_5_, e execution_rop_6_, e execution_rop_tag_0_, e execution_rop_tag_1_, e execution_rop_tag_2_, e execution_rop_tag_3_, e execution_rop_tag_4_, e execution_rop_tag_5_, e execution_rop_tag_6_, e execution_rw_reg_0_, e execution_rw_reg_1_, e execution_rw_reg_2_, e execution_rw_reg_3_, e execution_rw_reg_4_, e execution_rw_reg_5_, e execution_sel_addressing_error, e execution_sel_apply_indirection_0_, e execution_sel_apply_indirection_1_, e execution_sel_apply_indirection_2_, e execution_sel_apply_indirection_3_, e execution_sel_apply_indirection_4_, e execution_sel_apply_indirection_5_, e execution_sel_apply_indirection_6_, e execution_sel_base_address_failure, e execution_sel_bytecode_retrieval_failure, e execution_sel_bytecode_retrieval_success, e execution_sel_check_gas, e execution_sel_do_base_check, e execution_sel_enter_call, e execution_sel_envvar_pi_lookup_col0, e execution_sel_envvar_pi_lookup_col1, e execution_sel_error, e execution_sel_exec_dispatch_alu, e execution_sel_exec_dispatch_bitwise, e execution_sel_exec_dispatch_calldata_copy, e execution_sel_exec_dispatch_cast, e execution_sel_exec_dispatch_ecc_add, e execution_sel_exec_dispatch_emit_public_log, e execution_sel_exec_dispatch_execution, e execution_sel_exec_dispatch_get_contract_instance, e execution_sel_exec_dispatch_keccakf1600, e execution_sel_exec_dispatch_poseidon2_perm, e execution_sel_exec_dispatch_returndata_copy, e execution_sel_exec_dispatch_set, e execution_sel_exec_dispatch_sha256_compression, e execution_sel_exec_dispatch_to_radix, e execution_sel_execute_call, e execution_sel_execute_debug_log, e execution_sel_execute_emit_notehash, e execution_sel_execute_emit_nullifier, e execution_sel_execute_get_env_var, e execution_sel_execute_internal_call, e execution_sel_execute_internal_return, e execution_sel_execute_jump, e execution_sel_execute_jumpi, e execution_sel_execute_l1_to_l2_message_exists, e execution_sel_execute_mov, e execution_sel_execute_notehash_exists, e execution_sel_execute_nullifier_exists, e execution_sel_execute_opcode, e execution_sel_execute_return, e execution_sel_execute_returndata_size, e execution_sel_execute_revert, e execution_sel_execute_send_l2_to_l1_msg, e execution_sel_execute_sload, e execution_sel_execute_sstore, e execution_sel_execute_static_call, e execution_sel_execute_success_copy, e execution_sel_exit_call, e execution_sel_failure, e execution_sel_gas_bitwise, e execution_sel_gas_calldata_copy, e execution_sel_gas_emit_public_log, e execution_sel_gas_returndata_copy, e execution_sel_gas_sstore, e execution_sel_gas_to_radix, e execution_sel_instruction_fetching_failure, e execution_sel_instruction_fetching_success, e execution_sel_l2_to_l1_msg_limit_error, e execution_sel_lookup_num_p_limbs, e execution_sel_mem_op_reg_0_, e execution_sel_mem_op_reg_1_, e execution_sel_mem_op_reg_2_, e execution_sel_mem_op_reg_3_, e execution_sel_mem_op_reg_4_, e execution_sel_mem_op_reg_5_, e execution_sel_op_do_overflow_check_0_, e execution_sel_op_do_overflow_check_1_, e execution_sel_op_do_overflow_check_2_, e execution_sel_op_do_overflow_check_3_, e execution_sel_op_do_overflow_check_4_, e execution_sel_op_do_overflow_check_5_, e execution_sel_op_do_overflow_check_6_, e execution_sel_op_is_address_0_, e execution_sel_op_is_address_1_, e execution_sel_op_is_address_2_, e execution_sel_op_is_address_3_, e execution_sel_op_is_address_4_, e execution_sel_op_is_address_5_, e execution_sel_op_is_address_6_, e execution_sel_op_is_indirect_wire_0_, e execution_sel_op_is_indirect_wire_1_, e execution_sel_op_is_indirect_wire_2_, e execution_sel_op_is_indirect_wire_3_, e execution_sel_op_is_indirect_wire_4_, e execution_sel_op_is_indirect_wire_5_, e execution_sel_op_is_indirect_wire_6_, e execution_sel_op_is_indirect_wire_7_, e execution_sel_op_is_relative_wire_0_, e execution_sel_op_is_relative_wire_1_, e execution_sel_op_is_relative_wire_2_, e execution_sel_op_is_relative_wire_3_, e execution_sel_op_is_relative_wire_4_, e execution_sel_op_is_relative_wire_5_, e execution_sel_op_is_relative_wire_6_, e execution_sel_op_is_relative_wire_7_, e execution_sel_op_reg_effective_0_, e execution_sel_op_reg_effective_1_, e execution_sel_op_reg_effective_2_, e execution_sel_op_reg_effective_3_, e execution_sel_op_reg_effective_4_, e execution_sel_op_reg_effective_5_, e execution_sel_opcode_error, e execution_sel_out_of_gas, e execution_sel_radix_gt_256, e execution_sel_reached_max_note_hashes, e execution_sel_reached_max_nullifiers, e execution_sel_read_registers, e execution_sel_read_unwind_call_stack, e execution_sel_register_read_error, e execution_sel_relative_overflow_0_, e execution_sel_relative_overflow_1_, e execution_sel_relative_overflow_2_, e execution_sel_relative_overflow_3_, e execution_sel_relative_overflow_4_, e execution_sel_relative_overflow_5_, e execution_sel_relative_overflow_6_, e execution_sel_some_final_check_failed, e execution_sel_tag_check_reg_0_, e execution_sel_tag_check_reg_1_, e execution_sel_tag_check_reg_2_, e execution_sel_tag_check_reg_3_, e execution_sel_tag_check_reg_4_, e execution_sel_tag_check_reg_5_, e execution_sel_too_large_recipient_error, e execution_sel_use_num_limbs, e execution_sel_write_l2_to_l1_msg, e execution_sel_write_note_hash, e execution_sel_write_nullifier, e execution_sel_write_public_data, e execution_sel_write_registers, e execution_subtrace_id, e execution_subtrace_operation_id, e execution_total_gas_da, e execution_total_gas_l2, e execution_two_five_six, e execution_value_from_pi, e execution_written_public_data_slots_tree_root, e execution_written_public_data_slots_tree_size, e execution_written_slots_merkle_separator, e execution_written_slots_tree_height, e execution_written_slots_tree_siloing_separator, e ff_gt_a, e ff_gt_b, e ff_gt_borrow, e ff_gt_constant_128, e ff_gt_end, e ff_gt_p_a_borrow, e ff_gt_p_b_borrow, e ff_gt_res_hi, e ff_gt_res_lo, e ff_gt_result, e get_contract_instance_clk, e get_contract_instance_contract_address, e get_contract_instance_dst_offset, e get_contract_instance_dst_offset_diff_max_inv, e get_contract_instance_exists_tag, e get_contract_instance_instance_exists, e get_contract_instance_is_class_id, e get_contract_instance_is_deployer, e get_contract_instance_is_init_hash, e get_contract_instance_is_valid_member_enum, e get_contract_instance_is_valid_writes_in_bounds, e get_contract_instance_member_enum, e get_contract_instance_member_tag, e get_contract_instance_member_write_offset, e get_contract_instance_nullifier_tree_root, e get_contract_instance_public_data_tree_root, e get_contract_instance_retrieved_class_id, e get_contract_instance_retrieved_deployer_addr, e get_contract_instance_retrieved_init_hash, e get_contract_instance_sel, e get_contract_instance_sel_error, e get_contract_instance_selected_member, e get_contract_instance_space_id, e gt_abs_diff, e gt_input_a, e gt_input_b, e gt_num_bits, e gt_res, e gt_sel, e gt_sel_addressing, e gt_sel_alu, e gt_sel_gas, e gt_sel_others, e gt_sel_sha256, e indexed_tree_check_address, e indexed_tree_check_const_three, e indexed_tree_check_discard, e indexed_tree_check_exists, e indexed_tree_check_intermediate_root, e indexed_tree_check_low_leaf_hash, e indexed_tree_check_low_leaf_index, e indexed_tree_check_low_leaf_next_index, e indexed_tree_check_low_leaf_next_value, e indexed_tree_check_low_leaf_value, e indexed_tree_check_merkle_hash_separator, e indexed_tree_check_new_leaf_hash, e indexed_tree_check_next_value_inv, e indexed_tree_check_next_value_is_nonzero, e indexed_tree_check_not_exists, e indexed_tree_check_public_inputs_index, e indexed_tree_check_root, e indexed_tree_check_sel, e indexed_tree_check_sel_insert, e indexed_tree_check_sel_silo, e indexed_tree_check_sel_write_to_public_inputs, e indexed_tree_check_siloed_value, e indexed_tree_check_siloing_separator, e indexed_tree_check_tree_height, e indexed_tree_check_tree_size_after_write, e indexed_tree_check_tree_size_before_write, e indexed_tree_check_updated_low_leaf_hash, e indexed_tree_check_updated_low_leaf_next_index, e indexed_tree_check_updated_low_leaf_next_value, e indexed_tree_check_value, e indexed_tree_check_value_low_leaf_value_diff_inv, e indexed_tree_check_write, e indexed_tree_check_write_root, e instr_fetching_addressing_mode, e instr_fetching_bd0, e instr_fetching_bd1, e instr_fetching_bd10, e instr_fetching_bd11, e instr_fetching_bd12, e instr_fetching_bd13, e instr_fetching_bd14, e instr_fetching_bd15, e instr_fetching_bd16, e instr_fetching_bd17, e instr_fetching_bd18, e instr_fetching_bd19, e instr_fetching_bd2, e instr_fetching_bd20, e instr_fetching_bd21, e instr_fetching_bd22, e instr_fetching_bd23, e instr_fetching_bd24, e instr_fetching_bd25, e instr_fetching_bd26, e instr_fetching_bd27, e instr_fetching_bd28, e instr_fetching_bd29, e instr_fetching_bd3, e instr_fetching_bd30, e instr_fetching_bd31, e instr_fetching_bd32, e instr_fetching_bd33, e instr_fetching_bd34, e instr_fetching_bd35, e instr_fetching_bd36, e instr_fetching_bd4, e instr_fetching_bd5, e instr_fetching_bd6, e instr_fetching_bd7, e instr_fetching_bd8, e instr_fetching_bd9, e instr_fetching_bytecode_id, e instr_fetching_bytecode_size, e instr_fetching_bytes_to_read, e instr_fetching_exec_opcode, e instr_fetching_instr_abs_diff, e instr_fetching_instr_out_of_range, e instr_fetching_instr_size, e instr_fetching_op1, e instr_fetching_op2, e instr_fetching_op3, e instr_fetching_op4, e instr_fetching_op5, e instr_fetching_op6, e instr_fetching_op7, e instr_fetching_opcode_out_of_range, e instr_fetching_pc, e instr_fetching_pc_abs_diff, e instr_fetching_pc_out_of_range, e instr_fetching_pc_size_in_bits, e instr_fetching_sel, e instr_fetching_sel_has_tag, e instr_fetching_sel_op_dc_0, e instr_fetching_sel_op_dc_1, e instr_fetching_sel_op_dc_10, e instr_fetching_sel_op_dc_11, e instr_fetching_sel_op_dc_12, e instr_fetching_sel_op_dc_13, e instr_fetching_sel_op_dc_14, e instr_fetching_sel_op_dc_15, e instr_fetching_sel_op_dc_16, e instr_fetching_sel_op_dc_2, e instr_fetching_sel_op_dc_3, e instr_fetching_sel_op_dc_4, e instr_fetching_sel_op_dc_5, e instr_fetching_sel_op_dc_6, e instr_fetching_sel_op_dc_7, e instr_fetching_sel_op_dc_8, e instr_fetching_sel_op_dc_9, e instr_fetching_sel_parsing_err, e instr_fetching_sel_pc_in_range, e instr_fetching_sel_tag_is_op2, e instr_fetching_tag_out_of_range, e instr_fetching_tag_value, e internal_call_stack_call_id, e internal_call_stack_context_id, e internal_call_stack_entered_call_id, e internal_call_stack_return_call_id, e internal_call_stack_return_pc, e internal_call_stack_sel, e keccak_memory_ctr_end, e keccak_memory_end, e keccak_memory_single_tag_error, e keccak_memory_state_size_min_ctr_inv, e keccak_memory_tag, e keccak_memory_tag_min_u64_inv, e keccak_memory_val_24_, e keccakf1600_bitwise_and_op_id, e keccakf1600_bitwise_xor_op_id, e keccakf1600_dst_out_of_range_error, e keccakf1600_end, e keccakf1600_error, e keccakf1600_highest_slice_address, e keccakf1600_rot_64_min_len_01, e keccakf1600_rot_64_min_len_03, e keccakf1600_rot_64_min_len_11, e keccakf1600_rot_64_min_len_13, e keccakf1600_rot_64_min_len_20, e keccakf1600_rot_64_min_len_22, e keccakf1600_rot_64_min_len_24, e keccakf1600_rot_64_min_len_31, e keccakf1600_rot_64_min_len_34, e keccakf1600_rot_64_min_len_42, e keccakf1600_rot_len_02, e keccakf1600_rot_len_04, e keccakf1600_rot_len_10, e keccakf1600_rot_len_12, e keccakf1600_rot_len_14, e keccakf1600_rot_len_21, e keccakf1600_rot_len_23, e keccakf1600_rot_len_30, e keccakf1600_rot_len_32, e keccakf1600_rot_len_33, e keccakf1600_rot_len_40, e keccakf1600_rot_len_41, e keccakf1600_rot_len_43, e keccakf1600_rot_len_44, e keccakf1600_round_cst, e keccakf1600_sel_slice_read, e keccakf1600_sel_slice_write, e keccakf1600_src_addr, e keccakf1600_src_out_of_range_error, e keccakf1600_state_chi_00, e keccakf1600_state_chi_01, e keccakf1600_state_chi_02, e keccakf1600_state_chi_03, e keccakf1600_state_chi_04, e keccakf1600_state_chi_10, e keccakf1600_state_chi_11, e keccakf1600_state_chi_12, e keccakf1600_state_chi_13, e keccakf1600_state_chi_14, e keccakf1600_state_chi_20, e keccakf1600_state_chi_21, e keccakf1600_state_chi_22, e keccakf1600_state_chi_23, e keccakf1600_state_chi_24, e keccakf1600_state_chi_30, e keccakf1600_state_chi_31, e keccakf1600_state_chi_32, e keccakf1600_state_chi_33, e keccakf1600_state_chi_34, e keccakf1600_state_chi_40, e keccakf1600_state_chi_41, e keccakf1600_state_chi_42, e keccakf1600_state_chi_43, e keccakf1600_state_chi_44, e keccakf1600_state_iota_00, e keccakf1600_state_pi_and_00, e keccakf1600_state_pi_and_01, e keccakf1600_state_pi_and_02, e keccakf1600_state_pi_and_03, e keccakf1600_state_pi_and_04, e keccakf1600_state_pi_and_10, e keccakf1600_state_pi_and_11, e keccakf1600_state_pi_and_12, e keccakf1600_state_pi_and_13, e keccakf1600_state_pi_and_14, e keccakf1600_state_pi_and_20, e keccakf1600_state_pi_and_21, e keccakf1600_state_pi_and_22, e keccakf1600_state_pi_and_23, e keccakf1600_state_pi_and_24, e keccakf1600_state_pi_and_30, e keccakf1600_state_pi_and_31, e keccakf1600_state_pi_and_32, e keccakf1600_state_pi_and_33, e keccakf1600_state_pi_and_34, e keccakf1600_state_pi_and_40, e keccakf1600_state_pi_and_41, e keccakf1600_state_pi_and_42, e keccakf1600_state_pi_and_43, e keccakf1600_state_pi_and_44, e keccakf1600_state_pi_not_00, e keccakf1600_state_pi_not_01, e keccakf1600_state_pi_not_02, e keccakf1600_state_pi_not_03, e keccakf1600_state_pi_not_04, e keccakf1600_state_pi_not_10, e keccakf1600_state_pi_not_11, e keccakf1600_state_pi_not_12, e keccakf1600_state_pi_not_13, e keccakf1600_state_pi_not_14, e keccakf1600_state_pi_not_20, e keccakf1600_state_pi_not_21, e keccakf1600_state_pi_not_22, e keccakf1600_state_pi_not_23, e keccakf1600_state_pi_not_24, e keccakf1600_state_pi_not_30, e keccakf1600_state_pi_not_31, e keccakf1600_state_pi_not_32, e keccakf1600_state_pi_not_33, e keccakf1600_state_pi_not_34, e keccakf1600_state_pi_not_40, e keccakf1600_state_pi_not_41, e keccakf1600_state_pi_not_42, e keccakf1600_state_pi_not_43, e keccakf1600_state_pi_not_44, e keccakf1600_state_rho_01, e keccakf1600_state_rho_02, e keccakf1600_state_rho_03, e keccakf1600_state_rho_04, e keccakf1600_state_rho_10, e keccakf1600_state_rho_11, e keccakf1600_state_rho_12, e keccakf1600_state_rho_13, e keccakf1600_state_rho_14, e keccakf1600_state_rho_20, e keccakf1600_state_rho_21, e keccakf1600_state_rho_22, e keccakf1600_state_rho_23, e keccakf1600_state_rho_24, e keccakf1600_state_rho_30, e keccakf1600_state_rho_31, e keccakf1600_state_rho_32, e keccakf1600_state_rho_33, e keccakf1600_state_rho_34, e keccakf1600_state_rho_40, e keccakf1600_state_rho_41, e keccakf1600_state_rho_42, e keccakf1600_state_rho_43, e keccakf1600_state_rho_44, e keccakf1600_state_theta_00, e keccakf1600_state_theta_01, e keccakf1600_state_theta_02, e keccakf1600_state_theta_03, e keccakf1600_state_theta_04, e keccakf1600_state_theta_10, e keccakf1600_state_theta_11, e keccakf1600_state_theta_12, e keccakf1600_state_theta_13, e keccakf1600_state_theta_14, e keccakf1600_state_theta_20, e keccakf1600_state_theta_21, e keccakf1600_state_theta_22, e keccakf1600_state_theta_23, e keccakf1600_state_theta_24, e keccakf1600_state_theta_30, e keccakf1600_state_theta_31, e keccakf1600_state_theta_32, e keccakf1600_state_theta_33, e keccakf1600_state_theta_34, e keccakf1600_state_theta_40, e keccakf1600_state_theta_41, e keccakf1600_state_theta_42, e keccakf1600_state_theta_43, e keccakf1600_state_theta_44, e keccakf1600_state_theta_hi_02, e keccakf1600_state_theta_hi_04, e keccakf1600_state_theta_hi_10, e keccakf1600_state_theta_hi_12, e keccakf1600_state_theta_hi_14, e keccakf1600_state_theta_hi_21, e keccakf1600_state_theta_hi_23, e keccakf1600_state_theta_hi_30, e keccakf1600_state_theta_hi_32, e keccakf1600_state_theta_hi_33, e keccakf1600_state_theta_hi_40, e keccakf1600_state_theta_hi_41, e keccakf1600_state_theta_hi_43, e keccakf1600_state_theta_hi_44, e keccakf1600_state_theta_low_01, e keccakf1600_state_theta_low_03, e keccakf1600_state_theta_low_11, e keccakf1600_state_theta_low_13, e keccakf1600_state_theta_low_20, e keccakf1600_state_theta_low_22, e keccakf1600_state_theta_low_24, e keccakf1600_state_theta_low_31, e keccakf1600_state_theta_low_34, e keccakf1600_state_theta_low_42, e keccakf1600_tag_error, e keccakf1600_tag_u64, e keccakf1600_theta_combined_xor_0, e keccakf1600_theta_combined_xor_1, e keccakf1600_theta_combined_xor_2, e keccakf1600_theta_combined_xor_3, e keccakf1600_theta_combined_xor_4, e keccakf1600_theta_xor_01, e keccakf1600_theta_xor_02, e keccakf1600_theta_xor_03, e keccakf1600_theta_xor_11, e keccakf1600_theta_xor_12, e keccakf1600_theta_xor_13, e keccakf1600_theta_xor_21, e keccakf1600_theta_xor_22, e keccakf1600_theta_xor_23, e keccakf1600_theta_xor_31, e keccakf1600_theta_xor_32, e keccakf1600_theta_xor_33, e keccakf1600_theta_xor_41, e keccakf1600_theta_xor_42, e keccakf1600_theta_xor_43, e keccakf1600_theta_xor_row_0, e keccakf1600_theta_xor_row_1, e keccakf1600_theta_xor_row_2, e keccakf1600_theta_xor_row_3, e keccakf1600_theta_xor_row_4, e keccakf1600_theta_xor_row_msb_0, e keccakf1600_theta_xor_row_msb_1, e keccakf1600_theta_xor_row_msb_2, e keccakf1600_theta_xor_row_msb_3, e keccakf1600_theta_xor_row_msb_4, e keccakf1600_theta_xor_row_rotl1_0, e keccakf1600_theta_xor_row_rotl1_1, e keccakf1600_theta_xor_row_rotl1_2, e keccakf1600_theta_xor_row_rotl1_3, e keccakf1600_theta_xor_row_rotl1_4, e l1_to_l2_message_tree_check_exists, e l1_to_l2_message_tree_check_l1_to_l2_message_tree_height, e l1_to_l2_message_tree_check_leaf_index, e l1_to_l2_message_tree_check_leaf_value, e l1_to_l2_message_tree_check_leaf_value_msg_hash_diff_inv, e l1_to_l2_message_tree_check_merkle_hash_separator, e l1_to_l2_message_tree_check_msg_hash, e l1_to_l2_message_tree_check_root, e l1_to_l2_message_tree_check_sel, e memory_diff, e memory_glob_addr_diff_inv, e memory_last_access, e memory_limb_0_, e memory_limb_1_, e memory_limb_2_, e memory_max_bits, e memory_sel_addressing_base, e memory_sel_addressing_indirect_0_, e memory_sel_addressing_indirect_1_, e memory_sel_addressing_indirect_2_, e memory_sel_addressing_indirect_3_, e memory_sel_addressing_indirect_4_, e memory_sel_addressing_indirect_5_, e memory_sel_addressing_indirect_6_, e memory_sel_data_copy_read, e memory_sel_data_copy_write, e memory_sel_ecc_write_0_, e memory_sel_ecc_write_1_, e memory_sel_ecc_write_2_, e memory_sel_get_contract_instance_exists_write, e memory_sel_get_contract_instance_member_write, e memory_sel_keccak, e memory_sel_poseidon2_read_0_, e memory_sel_poseidon2_read_1_, e memory_sel_poseidon2_read_2_, e memory_sel_poseidon2_read_3_, e memory_sel_poseidon2_write_0_, e memory_sel_poseidon2_write_1_, e memory_sel_poseidon2_write_2_, e memory_sel_poseidon2_write_3_, e memory_sel_public_log_read, e memory_sel_register_op_0_, e memory_sel_register_op_1_, e memory_sel_register_op_2_, e memory_sel_register_op_3_, e memory_sel_register_op_4_, e memory_sel_register_op_5_, e memory_sel_rng_chk, e memory_sel_rng_write, e memory_sel_sha256_op_0_, e memory_sel_sha256_op_1_, e memory_sel_sha256_op_2_, e memory_sel_sha256_op_3_, e memory_sel_sha256_op_4_, e memory_sel_sha256_op_5_, e memory_sel_sha256_op_6_, e memory_sel_sha256_op_7_, e memory_sel_sha256_read, e memory_sel_tag_is_ff, e memory_sel_to_radix_write, e memory_tag_ff_diff_inv, e merkle_check_const_three, e merkle_check_end, e merkle_check_index_is_even, e merkle_check_path_len_min_one_inv, e merkle_check_read_left_node, e merkle_check_read_output_hash, e merkle_check_read_right_node, e merkle_check_sibling, e merkle_check_write_left_node, e merkle_check_write_output_hash, e merkle_check_write_right_node, e note_hash_tree_check_address, e note_hash_tree_check_const_three, e note_hash_tree_check_discard, e note_hash_tree_check_exists, e note_hash_tree_check_first_nullifier, e note_hash_tree_check_first_nullifier_pi_index, e note_hash_tree_check_leaf_index, e note_hash_tree_check_merkle_hash_separator, e note_hash_tree_check_next_leaf_value, e note_hash_tree_check_next_root, e note_hash_tree_check_nonce, e note_hash_tree_check_nonce_separator, e note_hash_tree_check_note_hash, e note_hash_tree_check_note_hash_index, e note_hash_tree_check_note_hash_tree_height, e note_hash_tree_check_prev_leaf_value, e note_hash_tree_check_prev_leaf_value_unique_note_hash_diff_inv, e note_hash_tree_check_prev_root, e note_hash_tree_check_public_inputs_index, e note_hash_tree_check_sel, e note_hash_tree_check_sel_silo, e note_hash_tree_check_sel_unique, e note_hash_tree_check_sel_write_to_public_inputs, e note_hash_tree_check_siloed_note_hash, e note_hash_tree_check_siloing_separator, e note_hash_tree_check_unique_note_hash, e note_hash_tree_check_unique_note_hash_separator, e note_hash_tree_check_write, e poseidon2_hash_b_0, e poseidon2_hash_b_1, e poseidon2_hash_b_2, e poseidon2_hash_b_3, e poseidon2_hash_end, e poseidon2_hash_input_len, e poseidon2_hash_num_perm_rounds_rem_min_one_inv, e poseidon2_hash_padding, e poseidon2_perm_B_10_0, e poseidon2_perm_B_10_1, e poseidon2_perm_B_10_2, e poseidon2_perm_B_10_3, e poseidon2_perm_B_11_0, e poseidon2_perm_B_11_1, e poseidon2_perm_B_11_2, e poseidon2_perm_B_11_3, e poseidon2_perm_B_12_0, e poseidon2_perm_B_12_1, e poseidon2_perm_B_12_2, e poseidon2_perm_B_12_3, e poseidon2_perm_B_13_0, e poseidon2_perm_B_13_1, e poseidon2_perm_B_13_2, e poseidon2_perm_B_13_3, e poseidon2_perm_B_14_0, e poseidon2_perm_B_14_1, e poseidon2_perm_B_14_2, e poseidon2_perm_B_14_3, e poseidon2_perm_B_15_0, e poseidon2_perm_B_15_1, e poseidon2_perm_B_15_2, e poseidon2_perm_B_15_3, e poseidon2_perm_B_16_0, e poseidon2_perm_B_16_1, e poseidon2_perm_B_16_2, e poseidon2_perm_B_16_3, e poseidon2_perm_B_17_0, e poseidon2_perm_B_17_1, e poseidon2_perm_B_17_2, e poseidon2_perm_B_17_3, e poseidon2_perm_B_18_0, e poseidon2_perm_B_18_1, e poseidon2_perm_B_18_2, e poseidon2_perm_B_18_3, e poseidon2_perm_B_19_0, e poseidon2_perm_B_19_1, e poseidon2_perm_B_19_2, e poseidon2_perm_B_19_3, e poseidon2_perm_B_20_0, e poseidon2_perm_B_20_1, e poseidon2_perm_B_20_2, e poseidon2_perm_B_20_3, e poseidon2_perm_B_21_0, e poseidon2_perm_B_21_1, e poseidon2_perm_B_21_2, e poseidon2_perm_B_21_3, e poseidon2_perm_B_22_0, e poseidon2_perm_B_22_1, e poseidon2_perm_B_22_2, e poseidon2_perm_B_22_3, e poseidon2_perm_B_23_0, e poseidon2_perm_B_23_1, e poseidon2_perm_B_23_2, e poseidon2_perm_B_23_3, e poseidon2_perm_B_24_0, e poseidon2_perm_B_24_1, e poseidon2_perm_B_24_2, e poseidon2_perm_B_24_3, e poseidon2_perm_B_25_0, e poseidon2_perm_B_25_1, e poseidon2_perm_B_25_2, e poseidon2_perm_B_25_3, e poseidon2_perm_B_26_0, e poseidon2_perm_B_26_1, e poseidon2_perm_B_26_2, e poseidon2_perm_B_26_3, e poseidon2_perm_B_27_0, e poseidon2_perm_B_27_1, e poseidon2_perm_B_27_2, e poseidon2_perm_B_27_3, e poseidon2_perm_B_28_0, e poseidon2_perm_B_28_1, e poseidon2_perm_B_28_2, e poseidon2_perm_B_28_3, e poseidon2_perm_B_29_0, e poseidon2_perm_B_29_1, e poseidon2_perm_B_29_2, e poseidon2_perm_B_29_3, e poseidon2_perm_B_30_0, e poseidon2_perm_B_30_1, e poseidon2_perm_B_30_2, e poseidon2_perm_B_30_3, e poseidon2_perm_B_31_0, e poseidon2_perm_B_31_1, e poseidon2_perm_B_31_2, e poseidon2_perm_B_31_3, e poseidon2_perm_B_32_0, e poseidon2_perm_B_32_1, e poseidon2_perm_B_32_2, e poseidon2_perm_B_32_3, e poseidon2_perm_B_33_0, e poseidon2_perm_B_33_1, e poseidon2_perm_B_33_2, e poseidon2_perm_B_33_3, e poseidon2_perm_B_34_0, e poseidon2_perm_B_34_1, e poseidon2_perm_B_34_2, e poseidon2_perm_B_34_3, e poseidon2_perm_B_35_0, e poseidon2_perm_B_35_1, e poseidon2_perm_B_35_2, e poseidon2_perm_B_35_3, e poseidon2_perm_B_36_0, e poseidon2_perm_B_36_1, e poseidon2_perm_B_36_2, e poseidon2_perm_B_36_3, e poseidon2_perm_B_37_0, e poseidon2_perm_B_37_1, e poseidon2_perm_B_37_2, e poseidon2_perm_B_37_3, e poseidon2_perm_B_38_0, e poseidon2_perm_B_38_1, e poseidon2_perm_B_38_2, e poseidon2_perm_B_38_3, e poseidon2_perm_B_39_0, e poseidon2_perm_B_39_1, e poseidon2_perm_B_39_2, e poseidon2_perm_B_39_3, e poseidon2_perm_B_40_0, e poseidon2_perm_B_40_1, e poseidon2_perm_B_40_2, e poseidon2_perm_B_40_3, e poseidon2_perm_B_41_0, e poseidon2_perm_B_41_1, e poseidon2_perm_B_41_2, e poseidon2_perm_B_41_3, e poseidon2_perm_B_42_0, e poseidon2_perm_B_42_1, e poseidon2_perm_B_42_2, e poseidon2_perm_B_42_3, e poseidon2_perm_B_43_0, e poseidon2_perm_B_43_1, e poseidon2_perm_B_43_2, e poseidon2_perm_B_43_3, e poseidon2_perm_B_44_0, e poseidon2_perm_B_44_1, e poseidon2_perm_B_44_2, e poseidon2_perm_B_44_3, e poseidon2_perm_B_45_0, e poseidon2_perm_B_45_1, e poseidon2_perm_B_45_2, e poseidon2_perm_B_45_3, e poseidon2_perm_B_46_0, e poseidon2_perm_B_46_1, e poseidon2_perm_B_46_2, e poseidon2_perm_B_46_3, e poseidon2_perm_B_47_0, e poseidon2_perm_B_47_1, e poseidon2_perm_B_47_2, e poseidon2_perm_B_47_3, e poseidon2_perm_B_48_0, e poseidon2_perm_B_48_1, e poseidon2_perm_B_48_2, e poseidon2_perm_B_48_3, e poseidon2_perm_B_49_0, e poseidon2_perm_B_49_1, e poseidon2_perm_B_49_2, e poseidon2_perm_B_49_3, e poseidon2_perm_B_4_0, e poseidon2_perm_B_4_1, e poseidon2_perm_B_4_2, e poseidon2_perm_B_4_3, e poseidon2_perm_B_50_0, e poseidon2_perm_B_50_1, e poseidon2_perm_B_50_2, e poseidon2_perm_B_50_3, e poseidon2_perm_B_51_0, e poseidon2_perm_B_51_1, e poseidon2_perm_B_51_2, e poseidon2_perm_B_51_3, e poseidon2_perm_B_52_0, e poseidon2_perm_B_52_1, e poseidon2_perm_B_52_2, e poseidon2_perm_B_52_3, e poseidon2_perm_B_53_0, e poseidon2_perm_B_53_1, e poseidon2_perm_B_53_2, e poseidon2_perm_B_53_3, e poseidon2_perm_B_54_0, e poseidon2_perm_B_54_1, e poseidon2_perm_B_54_2, e poseidon2_perm_B_54_3, e poseidon2_perm_B_55_0, e poseidon2_perm_B_55_1, e poseidon2_perm_B_55_2, e poseidon2_perm_B_55_3, e poseidon2_perm_B_56_0, e poseidon2_perm_B_56_1, e poseidon2_perm_B_56_2, e poseidon2_perm_B_56_3, e poseidon2_perm_B_57_0, e poseidon2_perm_B_57_1, e poseidon2_perm_B_57_2, e poseidon2_perm_B_57_3, e poseidon2_perm_B_58_0, e poseidon2_perm_B_58_1, e poseidon2_perm_B_58_2, e poseidon2_perm_B_58_3, e poseidon2_perm_B_59_0, e poseidon2_perm_B_59_1, e poseidon2_perm_B_59_2, e poseidon2_perm_B_59_3, e poseidon2_perm_B_5_0, e poseidon2_perm_B_5_1, e poseidon2_perm_B_5_2, e poseidon2_perm_B_5_3, e poseidon2_perm_B_6_0, e poseidon2_perm_B_6_1, e poseidon2_perm_B_6_2, e poseidon2_perm_B_6_3, e poseidon2_perm_B_7_0, e poseidon2_perm_B_7_1, e poseidon2_perm_B_7_2, e poseidon2_perm_B_7_3, e poseidon2_perm_B_8_0, e poseidon2_perm_B_8_1, e poseidon2_perm_B_8_2, e poseidon2_perm_B_8_3, e poseidon2_perm_B_9_0, e poseidon2_perm_B_9_1, e poseidon2_perm_B_9_2, e poseidon2_perm_B_9_3, e poseidon2_perm_EXT_LAYER_4, e poseidon2_perm_EXT_LAYER_5, e poseidon2_perm_EXT_LAYER_6, e poseidon2_perm_EXT_LAYER_7, e poseidon2_perm_T_0_4, e poseidon2_perm_T_0_5, e poseidon2_perm_T_0_6, e poseidon2_perm_T_0_7, e poseidon2_perm_T_1_4, e poseidon2_perm_T_1_5, e poseidon2_perm_T_1_6, e poseidon2_perm_T_1_7, e poseidon2_perm_T_2_4, e poseidon2_perm_T_2_5, e poseidon2_perm_T_2_6, e poseidon2_perm_T_2_7, e poseidon2_perm_T_3_4, e poseidon2_perm_T_3_5, e poseidon2_perm_T_3_6, e poseidon2_perm_T_3_7, e poseidon2_perm_T_60_4, e poseidon2_perm_T_60_5, e poseidon2_perm_T_60_6, e poseidon2_perm_T_60_7, e poseidon2_perm_T_61_4, e poseidon2_perm_T_61_5, e poseidon2_perm_T_61_6, e poseidon2_perm_T_61_7, e poseidon2_perm_T_62_4, e poseidon2_perm_T_62_5, e poseidon2_perm_T_62_6, e poseidon2_perm_T_62_7, e poseidon2_perm_T_63_4, e poseidon2_perm_T_63_5, e poseidon2_perm_T_63_6, e poseidon2_perm_T_63_7, e poseidon2_perm_a_0, e poseidon2_perm_a_1, e poseidon2_perm_a_2, e poseidon2_perm_a_3, e poseidon2_perm_b_0, e poseidon2_perm_b_1, e poseidon2_perm_b_2, e poseidon2_perm_b_3, e poseidon2_perm_mem_batch_tag_inv, e poseidon2_perm_mem_err, e poseidon2_perm_mem_execution_clk, e poseidon2_perm_mem_input_0_, e poseidon2_perm_mem_input_1_, e poseidon2_perm_mem_input_2_, e poseidon2_perm_mem_input_3_, e poseidon2_perm_mem_input_tag_0_, e poseidon2_perm_mem_input_tag_1_, e poseidon2_perm_mem_input_tag_2_, e poseidon2_perm_mem_input_tag_3_, e poseidon2_perm_mem_max_mem_addr, e poseidon2_perm_mem_output_0_, e poseidon2_perm_mem_output_1_, e poseidon2_perm_mem_output_2_, e poseidon2_perm_mem_output_3_, e poseidon2_perm_mem_read_address_0_, e poseidon2_perm_mem_read_address_1_, e poseidon2_perm_mem_read_address_2_, e poseidon2_perm_mem_read_address_3_, e poseidon2_perm_mem_sel, e poseidon2_perm_mem_sel_dst_out_of_range_err, e poseidon2_perm_mem_sel_invalid_tag_err, e poseidon2_perm_mem_sel_should_exec, e poseidon2_perm_mem_sel_should_read_mem, e poseidon2_perm_mem_sel_src_out_of_range_err, e poseidon2_perm_mem_space_id, e poseidon2_perm_mem_write_address_0_, e poseidon2_perm_mem_write_address_1_, e poseidon2_perm_mem_write_address_2_, e poseidon2_perm_mem_write_address_3_, e poseidon2_perm_sel, e public_data_check_address, e public_data_check_clk_diff_hi, e public_data_check_clk_diff_lo, e public_data_check_const_four, e public_data_check_const_three, e public_data_check_discard, e public_data_check_end, e public_data_check_final_value, e public_data_check_intermediate_root, e public_data_check_leaf_not_exists, e public_data_check_leaf_slot, e public_data_check_leaf_slot_low_leaf_slot_diff_inv, e public_data_check_length_pi_idx, e public_data_check_low_leaf_hash, e public_data_check_low_leaf_index, e public_data_check_low_leaf_next_index, e public_data_check_low_leaf_next_slot, e public_data_check_low_leaf_slot, e public_data_check_low_leaf_value, e public_data_check_merkle_hash_separator, e public_data_check_new_leaf_hash, e public_data_check_next_slot_inv, e public_data_check_next_slot_is_nonzero, e public_data_check_non_discarded_write, e public_data_check_non_protocol_write, e public_data_check_not_end, e public_data_check_protocol_write, e public_data_check_public_data_writes_length, e public_data_check_root, e public_data_check_sel_write_to_public_inputs, e public_data_check_should_insert, e public_data_check_siloing_separator, e public_data_check_slot, e public_data_check_tree_height, e public_data_check_tree_size_after_write, e public_data_check_tree_size_before_write, e public_data_check_updated_low_leaf_hash, e public_data_check_updated_low_leaf_next_index, e public_data_check_updated_low_leaf_next_slot, e public_data_check_updated_low_leaf_value, e public_data_check_value, e public_data_check_write, e public_data_check_write_root, e public_data_squash_check_clock, e public_data_squash_clk_diff_hi, e public_data_squash_clk_diff_lo, e public_data_squash_leaf_slot_increase, e public_data_squash_value, e range_check_dyn_diff, e range_check_dyn_rng_chk_bits, e range_check_dyn_rng_chk_pow_2, e range_check_is_lte_u112, e range_check_is_lte_u128, e range_check_is_lte_u16, e range_check_is_lte_u32, e range_check_is_lte_u48, e range_check_is_lte_u64, e range_check_is_lte_u80, e range_check_is_lte_u96, e range_check_rng_chk_bits, e range_check_sel, e range_check_sel_alu, e range_check_sel_gt, e range_check_sel_keccak, e range_check_sel_memory, e range_check_sel_r0_16_bit_rng_lookup, e range_check_sel_r1_16_bit_rng_lookup, e range_check_sel_r2_16_bit_rng_lookup, e range_check_sel_r3_16_bit_rng_lookup, e range_check_sel_r4_16_bit_rng_lookup, e range_check_sel_r5_16_bit_rng_lookup, e range_check_sel_r6_16_bit_rng_lookup, e range_check_u16_r0, e range_check_u16_r1, e range_check_u16_r2, e range_check_u16_r3, e range_check_u16_r4, e range_check_u16_r5, e range_check_u16_r6, e range_check_u16_r7, e range_check_value, e scalar_mul_bit, e scalar_mul_const_two, e scalar_mul_end, e scalar_mul_sel_not_end, e scalar_mul_should_add, e sha256_a_and_b, e sha256_a_and_b_xor_a_and_c, e sha256_a_and_c, e sha256_a_rotr_13, e sha256_a_rotr_2, e sha256_a_rotr_22, e sha256_a_rotr_2_xor_a_rotr_13, e sha256_and_op_id, e sha256_b_and_c, e sha256_batch_tag_inv, e sha256_ch, e sha256_computed_w_lhs, e sha256_computed_w_rhs, e sha256_e_and_f, e sha256_e_rotr_11, e sha256_e_rotr_25, e sha256_e_rotr_6, e sha256_e_rotr_6_xor_e_rotr_11, e sha256_end, e sha256_err, e sha256_input, e sha256_input_rounds_rem_inv, e sha256_input_tag, e sha256_input_tag_diff_inv, e sha256_last, e sha256_lhs_w_10, e sha256_lhs_w_3, e sha256_maj, e sha256_max_input_addr, e sha256_max_mem_addr, e sha256_max_output_addr, e sha256_max_state_addr, e sha256_mem_out_of_range_err, e sha256_memory_address_0_, e sha256_memory_address_1_, e sha256_memory_address_2_, e sha256_memory_address_3_, e sha256_memory_address_4_, e sha256_memory_address_5_, e sha256_memory_address_6_, e sha256_memory_address_7_, e sha256_memory_register_0_, e sha256_memory_register_1_, e sha256_memory_register_2_, e sha256_memory_register_3_, e sha256_memory_register_4_, e sha256_memory_register_5_, e sha256_memory_register_6_, e sha256_memory_register_7_, e sha256_memory_tag_0_, e sha256_memory_tag_1_, e sha256_memory_tag_2_, e sha256_memory_tag_3_, e sha256_memory_tag_4_, e sha256_memory_tag_5_, e sha256_memory_tag_6_, e sha256_memory_tag_7_, e sha256_next_a_lhs, e sha256_next_a_rhs, e sha256_next_e_lhs, e sha256_next_e_rhs, e sha256_not_e, e sha256_not_e_and_g, e sha256_output_a_lhs, e sha256_output_a_rhs, e sha256_output_b_lhs, e sha256_output_b_rhs, e sha256_output_c_lhs, e sha256_output_c_rhs, e sha256_output_d_lhs, e sha256_output_d_rhs, e sha256_output_e_lhs, e sha256_output_e_rhs, e sha256_output_f_lhs, e sha256_output_f_rhs, e sha256_output_g_lhs, e sha256_output_g_rhs, e sha256_output_h_lhs, e sha256_output_h_rhs, e sha256_perform_round, e sha256_rhs_a_13, e sha256_rhs_a_2, e sha256_rhs_a_22, e sha256_rhs_e_11, e sha256_rhs_e_25, e sha256_rhs_e_6, e sha256_rhs_w_10, e sha256_rhs_w_17, e sha256_rhs_w_18, e sha256_rhs_w_19, e sha256_rhs_w_3, e sha256_rhs_w_7, e sha256_round_constant, e sha256_round_count, e sha256_rounds_remaining_inv, e sha256_rw, e sha256_s_0, e sha256_s_1, e sha256_sel_compute_w, e sha256_sel_input_out_of_range_err, e sha256_sel_invalid_input_row_tag_err, e sha256_sel_invalid_state_tag_err, e sha256_sel_is_input_round, e sha256_sel_mem_state_or_output, e sha256_sel_output_out_of_range_err, e sha256_sel_read_input_from_memory, e sha256_sel_state_out_of_range_err, e sha256_state_addr, e sha256_two_pow_10, e sha256_two_pow_11, e sha256_two_pow_13, e sha256_two_pow_17, e sha256_two_pow_18, e sha256_two_pow_19, e sha256_two_pow_2, e sha256_two_pow_22, e sha256_two_pow_25, e sha256_two_pow_3, e sha256_two_pow_32, e sha256_two_pow_6, e sha256_two_pow_7, e sha256_u32_tag, e sha256_w, e sha256_w_15_rotr_18, e sha256_w_15_rotr_7, e sha256_w_15_rotr_7_xor_w_15_rotr_18, e sha256_w_2_rotr_17, e sha256_w_2_rotr_17_xor_w_2_rotr_19, e sha256_w_2_rotr_19, e sha256_w_s_0, e sha256_w_s_1, e sha256_xor_op_id, e to_radix_end, e to_radix_found, e to_radix_is_unsafe_limb, e to_radix_limb_p_diff, e to_radix_limb_radix_diff, e to_radix_mem_err, e to_radix_mem_input_validation_error, e to_radix_mem_last, e to_radix_mem_limb_index_to_lookup, e to_radix_mem_limb_value, e to_radix_mem_max_mem_size, e to_radix_mem_num_limbs_inv, e to_radix_mem_num_limbs_minus_one_inv, e to_radix_mem_output_tag, e to_radix_mem_radix_min_two_inv, e to_radix_mem_sel_dst_out_of_range_err, e to_radix_mem_sel_invalid_bitwise_radix, e to_radix_mem_sel_num_limbs_is_zero, e to_radix_mem_sel_radix_eq_2, e to_radix_mem_sel_radix_gt_256_err, e to_radix_mem_sel_radix_lt_2_err, e to_radix_mem_sel_value_is_zero, e to_radix_mem_two, e to_radix_mem_two_five_six, e to_radix_mem_value_found, e to_radix_mem_value_inv, e to_radix_mem_write_addr_upper_bound, e to_radix_p_limb, e to_radix_rem_inverse, e to_radix_safety_diff_inverse, e tx_array_length_l2_to_l1_messages_pi_offset, e tx_array_length_note_hashes_pi_offset, e tx_array_length_nullifiers_pi_offset, e tx_calldata_hash, e tx_calldata_size, e tx_const_three, e tx_contract_addr, e tx_dom_sep_public_storage_map_slot, e tx_effective_fee_per_da_gas, e tx_effective_fee_per_l2_gas, e tx_end_phase, e tx_fee_juice_balance_slot, e tx_fee_juice_balances_slot_constant, e tx_fee_juice_contract_address, e tx_fee_payer, e tx_fee_payer_balance, e tx_fee_payer_new_balance, e tx_fee_payer_pi_offset, e tx_fields_length_public_logs_pi_offset, e tx_gas_limit_pi_offset, e tx_gas_used_pi_offset, e tx_is_cleanup, e tx_is_collect_fee, e tx_is_padded, e tx_is_public_call_request, e tx_is_static, e tx_is_tree_insert_phase, e tx_is_tree_padding, e tx_l1_l2_pi_offset, e tx_l2_l1_msg_content, e tx_l2_l1_msg_contract_address, e tx_l2_l1_msg_recipient, e tx_leaf_value, e tx_msg_sender, e tx_next_da_gas_used, e tx_next_da_gas_used_sent_to_enqueued_call, e tx_next_l2_gas_used, e tx_next_l2_gas_used_sent_to_enqueued_call, e tx_next_note_hash_tree_root, e tx_next_note_hash_tree_size, e tx_next_nullifier_tree_root, e tx_next_nullifier_tree_size, e tx_next_num_l2_to_l1_messages, e tx_next_num_note_hashes_emitted, e tx_next_num_nullifiers_emitted, e tx_next_num_public_log_fields, e tx_next_phase_on_revert, e tx_next_public_data_tree_root, e tx_next_public_data_tree_size, e tx_next_retrieved_bytecodes_tree_root, e tx_next_retrieved_bytecodes_tree_size, e tx_next_written_public_data_slots_tree_root, e tx_next_written_public_data_slots_tree_size, e tx_note_hash_pi_offset, e tx_nullifier_limit_error, e tx_nullifier_merkle_separator, e tx_nullifier_pi_offset, e tx_nullifier_tree_height, e tx_prev_da_gas_used_sent_to_enqueued_call, e tx_prev_l2_gas_used_sent_to_enqueued_call, e tx_public_data_pi_offset, e tx_read_pi_length_offset, e tx_read_pi_start_offset, e tx_remaining_phase_inv, e tx_remaining_phase_minus_one_inv, e tx_remaining_side_effects_inv, e tx_reverted_pi_offset, e tx_sel_append_l2_l1_msg, e tx_sel_append_note_hash, e tx_sel_append_nullifier, e tx_sel_l2_l1_msg_append, e tx_sel_note_hash_append, e tx_sel_nullifier_append, e tx_sel_process_call_request, e tx_sel_read_phase_length, e tx_sel_read_trees_and_gas_used, e tx_sel_try_l2_l1_msg_append, e tx_sel_try_note_hash_append, e tx_sel_try_nullifier_append, e tx_setup_phase_value, e tx_should_read_gas_limit, e tx_uint32_max, e tx_write_nullifier_pi_offset, e tx_write_pi_offset, e update_check_address, e update_check_const_three, e update_check_contract_instance_registry_address, e update_check_current_class_id, e update_check_delayed_public_mutable_hash_slot, e update_check_delayed_public_mutable_slot, e update_check_dom_sep_public_storage_map_slot, e update_check_hash_not_zero, e update_check_original_class_id, e update_check_public_data_tree_root, e update_check_sel, e update_check_timestamp, e update_check_timestamp_is_lt_timestamp_of_change, e update_check_timestamp_of_change, e update_check_timestamp_of_change_bit_size, e update_check_timestamp_pi_offset, e update_check_update_hash, e update_check_update_hash_inv, e update_check_update_hi_metadata, e update_check_update_hi_metadata_bit_size, e update_check_update_post_class_id_is_zero, e update_check_update_post_class_inv, e update_check_update_pre_class_id_is_zero, e update_check_update_pre_class_inv, e update_check_update_preimage_metadata, e update_check_update_preimage_post_class_id, e update_check_update_preimage_pre_class_id, e update_check_updated_class_ids_slot, e lookup_range_check_dyn_rng_chk_pow_2_counts, e lookup_range_check_dyn_diff_is_u16_counts, e lookup_range_check_r0_is_u16_counts, e lookup_range_check_r1_is_u16_counts, e lookup_range_check_r2_is_u16_counts, e lookup_range_check_r3_is_u16_counts, e lookup_range_check_r4_is_u16_counts, e lookup_range_check_r5_is_u16_counts, e lookup_range_check_r6_is_u16_counts, e lookup_range_check_r7_is_u16_counts, e lookup_ff_gt_a_lo_range_counts, e lookup_ff_gt_a_hi_range_counts, e lookup_gt_gt_range_counts, e lookup_alu_tag_max_bits_value_counts, e lookup_alu_range_check_decomposition_a_lo_counts, e lookup_alu_range_check_decomposition_a_hi_counts, e lookup_alu_range_check_decomposition_b_lo_counts, e lookup_alu_range_check_decomposition_b_hi_counts, e lookup_alu_range_check_mul_c_hi_counts, e lookup_alu_range_check_div_remainder_counts, e lookup_alu_ff_gt_counts, e lookup_alu_int_gt_counts, e lookup_alu_shifts_two_pow_counts, e lookup_alu_large_trunc_canonical_dec_counts, e lookup_alu_range_check_trunc_mid_counts, e lookup_bitwise_integral_tag_length_counts, e lookup_bitwise_byte_operations_counts, e lookup_memory_range_check_limb_0_counts, e lookup_memory_range_check_limb_1_counts, e lookup_memory_range_check_limb_2_counts, e lookup_memory_tag_max_bits_counts, e lookup_memory_range_check_write_tagged_value_counts, e lookup_data_copy_offset_plus_size_is_gt_data_size_counts, e lookup_data_copy_check_src_addr_in_range_counts, e lookup_data_copy_check_dst_addr_in_range_counts, e lookup_data_copy_sel_has_reads_counts, e lookup_data_copy_col_read_counts, e lookup_ecc_mem_check_dst_addr_in_range_counts, e lookup_ecc_mem_input_output_ecc_add_counts, e lookup_keccakf1600_theta_xor_01_counts, e lookup_keccakf1600_theta_xor_02_counts, e lookup_keccakf1600_theta_xor_03_counts, e lookup_keccakf1600_theta_xor_row_0_counts, e lookup_keccakf1600_theta_xor_11_counts, e lookup_keccakf1600_theta_xor_12_counts, e lookup_keccakf1600_theta_xor_13_counts, e lookup_keccakf1600_theta_xor_row_1_counts, e lookup_keccakf1600_theta_xor_21_counts, e lookup_keccakf1600_theta_xor_22_counts, e lookup_keccakf1600_theta_xor_23_counts, e lookup_keccakf1600_theta_xor_row_2_counts, e lookup_keccakf1600_theta_xor_31_counts, e lookup_keccakf1600_theta_xor_32_counts, e lookup_keccakf1600_theta_xor_33_counts, e lookup_keccakf1600_theta_xor_row_3_counts, e lookup_keccakf1600_theta_xor_41_counts, e lookup_keccakf1600_theta_xor_42_counts, e lookup_keccakf1600_theta_xor_43_counts, e lookup_keccakf1600_theta_xor_row_4_counts, e lookup_keccakf1600_theta_combined_xor_0_counts, e lookup_keccakf1600_theta_combined_xor_1_counts, e lookup_keccakf1600_theta_combined_xor_2_counts, e lookup_keccakf1600_theta_combined_xor_3_counts, e lookup_keccakf1600_theta_combined_xor_4_counts, e lookup_keccakf1600_state_theta_00_counts, e lookup_keccakf1600_state_theta_01_counts, e lookup_keccakf1600_state_theta_02_counts, e lookup_keccakf1600_state_theta_03_counts, e lookup_keccakf1600_state_theta_04_counts, e lookup_keccakf1600_state_theta_10_counts, e lookup_keccakf1600_state_theta_11_counts, e lookup_keccakf1600_state_theta_12_counts, e lookup_keccakf1600_state_theta_13_counts, e lookup_keccakf1600_state_theta_14_counts, e lookup_keccakf1600_state_theta_20_counts, e lookup_keccakf1600_state_theta_21_counts, e lookup_keccakf1600_state_theta_22_counts, e lookup_keccakf1600_state_theta_23_counts, e lookup_keccakf1600_state_theta_24_counts, e lookup_keccakf1600_state_theta_30_counts, e lookup_keccakf1600_state_theta_31_counts, e lookup_keccakf1600_state_theta_32_counts, e lookup_keccakf1600_state_theta_33_counts, e lookup_keccakf1600_state_theta_34_counts, e lookup_keccakf1600_state_theta_40_counts, e lookup_keccakf1600_state_theta_41_counts, e lookup_keccakf1600_state_theta_42_counts, e lookup_keccakf1600_state_theta_43_counts, e lookup_keccakf1600_state_theta_44_counts, e lookup_keccakf1600_theta_limb_02_range_counts, e lookup_keccakf1600_theta_limb_04_range_counts, e lookup_keccakf1600_theta_limb_10_range_counts, e lookup_keccakf1600_theta_limb_12_range_counts, e lookup_keccakf1600_theta_limb_14_range_counts, e lookup_keccakf1600_theta_limb_21_range_counts, e lookup_keccakf1600_theta_limb_23_range_counts, e lookup_keccakf1600_theta_limb_30_range_counts, e lookup_keccakf1600_theta_limb_32_range_counts, e lookup_keccakf1600_theta_limb_33_range_counts, e lookup_keccakf1600_theta_limb_40_range_counts, e lookup_keccakf1600_theta_limb_41_range_counts, e lookup_keccakf1600_theta_limb_43_range_counts, e lookup_keccakf1600_theta_limb_44_range_counts, e lookup_keccakf1600_theta_limb_01_range_counts, e lookup_keccakf1600_theta_limb_03_range_counts, e lookup_keccakf1600_theta_limb_11_range_counts, e lookup_keccakf1600_theta_limb_13_range_counts, e lookup_keccakf1600_theta_limb_20_range_counts, e lookup_keccakf1600_theta_limb_22_range_counts, e lookup_keccakf1600_theta_limb_24_range_counts, e lookup_keccakf1600_theta_limb_31_range_counts, e lookup_keccakf1600_theta_limb_34_range_counts, e lookup_keccakf1600_theta_limb_42_range_counts, e lookup_keccakf1600_state_pi_and_00_counts, e lookup_keccakf1600_state_pi_and_01_counts, e lookup_keccakf1600_state_pi_and_02_counts, e lookup_keccakf1600_state_pi_and_03_counts, e lookup_keccakf1600_state_pi_and_04_counts, e lookup_keccakf1600_state_pi_and_10_counts, e lookup_keccakf1600_state_pi_and_11_counts, e lookup_keccakf1600_state_pi_and_12_counts, e lookup_keccakf1600_state_pi_and_13_counts, e lookup_keccakf1600_state_pi_and_14_counts, e lookup_keccakf1600_state_pi_and_20_counts, e lookup_keccakf1600_state_pi_and_21_counts, e lookup_keccakf1600_state_pi_and_22_counts, e lookup_keccakf1600_state_pi_and_23_counts, e lookup_keccakf1600_state_pi_and_24_counts, e lookup_keccakf1600_state_pi_and_30_counts, e lookup_keccakf1600_state_pi_and_31_counts, e lookup_keccakf1600_state_pi_and_32_counts, e lookup_keccakf1600_state_pi_and_33_counts, e lookup_keccakf1600_state_pi_and_34_counts, e lookup_keccakf1600_state_pi_and_40_counts, e lookup_keccakf1600_state_pi_and_41_counts, e lookup_keccakf1600_state_pi_and_42_counts, e lookup_keccakf1600_state_pi_and_43_counts, e lookup_keccakf1600_state_pi_and_44_counts, e lookup_keccakf1600_state_chi_00_counts, e lookup_keccakf1600_state_chi_01_counts, e lookup_keccakf1600_state_chi_02_counts, e lookup_keccakf1600_state_chi_03_counts, e lookup_keccakf1600_state_chi_04_counts, e lookup_keccakf1600_state_chi_10_counts, e lookup_keccakf1600_state_chi_11_counts, e lookup_keccakf1600_state_chi_12_counts, e lookup_keccakf1600_state_chi_13_counts, e lookup_keccakf1600_state_chi_14_counts, e lookup_keccakf1600_state_chi_20_counts, e lookup_keccakf1600_state_chi_21_counts, e lookup_keccakf1600_state_chi_22_counts, e lookup_keccakf1600_state_chi_23_counts, e lookup_keccakf1600_state_chi_24_counts, e lookup_keccakf1600_state_chi_30_counts, e lookup_keccakf1600_state_chi_31_counts, e lookup_keccakf1600_state_chi_32_counts, e lookup_keccakf1600_state_chi_33_counts, e lookup_keccakf1600_state_chi_34_counts, e lookup_keccakf1600_state_chi_40_counts, e lookup_keccakf1600_state_chi_41_counts, e lookup_keccakf1600_state_chi_42_counts, e lookup_keccakf1600_state_chi_43_counts, e lookup_keccakf1600_state_chi_44_counts, e lookup_keccakf1600_round_cst_counts, e lookup_keccakf1600_state_iota_00_counts, e lookup_keccakf1600_src_out_of_range_toggle_counts, e lookup_keccakf1600_dst_out_of_range_toggle_counts, e lookup_poseidon2_mem_check_src_addr_in_range_counts, e lookup_poseidon2_mem_check_dst_addr_in_range_counts, e lookup_poseidon2_mem_input_output_poseidon2_perm_counts, e lookup_to_radix_limb_range_counts, e lookup_to_radix_limb_less_than_radix_range_counts, e lookup_to_radix_fetch_safe_limbs_counts, e lookup_to_radix_fetch_p_limb_counts, e lookup_to_radix_limb_p_diff_range_counts, e lookup_scalar_mul_to_radix_counts, e lookup_scalar_mul_double_counts, e lookup_scalar_mul_add_counts, e lookup_sha256_range_comp_w_lhs_counts, e lookup_sha256_range_comp_w_rhs_counts, e lookup_sha256_range_rhs_w_7_counts, e lookup_sha256_range_rhs_w_18_counts, e lookup_sha256_range_rhs_w_3_counts, e lookup_sha256_w_s_0_xor_0_counts, e lookup_sha256_w_s_0_xor_1_counts, e lookup_sha256_range_rhs_w_17_counts, e lookup_sha256_range_rhs_w_19_counts, e lookup_sha256_range_rhs_w_10_counts, e lookup_sha256_w_s_1_xor_0_counts, e lookup_sha256_w_s_1_xor_1_counts, e lookup_sha256_range_rhs_e_6_counts, e lookup_sha256_range_rhs_e_11_counts, e lookup_sha256_range_rhs_e_25_counts, e lookup_sha256_s_1_xor_0_counts, e lookup_sha256_s_1_xor_1_counts, e lookup_sha256_ch_and_0_counts, e lookup_sha256_ch_and_1_counts, e lookup_sha256_ch_xor_counts, e lookup_sha256_round_constant_counts, e lookup_sha256_range_rhs_a_2_counts, e lookup_sha256_range_rhs_a_13_counts, e lookup_sha256_range_rhs_a_22_counts, e lookup_sha256_s_0_xor_0_counts, e lookup_sha256_s_0_xor_1_counts, e lookup_sha256_maj_and_0_counts, e lookup_sha256_maj_and_1_counts, e lookup_sha256_maj_and_2_counts, e lookup_sha256_maj_xor_0_counts, e lookup_sha256_maj_xor_1_counts, e lookup_sha256_range_comp_next_a_lhs_counts, e lookup_sha256_range_comp_next_a_rhs_counts, e lookup_sha256_range_comp_next_e_lhs_counts, e lookup_sha256_range_comp_next_e_rhs_counts, e lookup_sha256_range_comp_a_rhs_counts, e lookup_sha256_range_comp_b_rhs_counts, e lookup_sha256_range_comp_c_rhs_counts, e lookup_sha256_range_comp_d_rhs_counts, e lookup_sha256_range_comp_e_rhs_counts, e lookup_sha256_range_comp_f_rhs_counts, e lookup_sha256_range_comp_g_rhs_counts, e lookup_sha256_range_comp_h_rhs_counts, e lookup_sha256_mem_check_state_addr_in_range_counts, e lookup_sha256_mem_check_input_addr_in_range_counts, e lookup_sha256_mem_check_output_addr_in_range_counts, e lookup_to_radix_mem_check_dst_addr_in_range_counts, e lookup_to_radix_mem_check_radix_lt_2_counts, e lookup_to_radix_mem_check_radix_gt_256_counts, e lookup_to_radix_mem_input_output_to_radix_counts, e lookup_poseidon2_hash_poseidon2_perm_counts, e lookup_address_derivation_salted_initialization_hash_poseidon2_0_counts, e lookup_address_derivation_salted_initialization_hash_poseidon2_1_counts, e lookup_address_derivation_partial_address_poseidon2_counts, e lookup_address_derivation_public_keys_hash_poseidon2_0_counts, e lookup_address_derivation_public_keys_hash_poseidon2_1_counts, e lookup_address_derivation_public_keys_hash_poseidon2_2_counts, e lookup_address_derivation_public_keys_hash_poseidon2_3_counts, e lookup_address_derivation_public_keys_hash_poseidon2_4_counts, e lookup_address_derivation_preaddress_poseidon2_counts, e lookup_address_derivation_preaddress_scalar_mul_counts, e lookup_address_derivation_address_ecadd_counts, e lookup_bc_decomposition_bytes_are_bytes_counts, e lookup_bc_hashing_poseidon2_hash_counts, e lookup_merkle_check_merkle_poseidon2_read_counts, e lookup_merkle_check_merkle_poseidon2_write_counts, e lookup_indexed_tree_check_silo_poseidon2_counts, e lookup_indexed_tree_check_low_leaf_value_validation_counts, e lookup_indexed_tree_check_low_leaf_next_value_validation_counts, e lookup_indexed_tree_check_low_leaf_poseidon2_counts, e lookup_indexed_tree_check_updated_low_leaf_poseidon2_counts, e lookup_indexed_tree_check_low_leaf_merkle_check_counts, e lookup_indexed_tree_check_new_leaf_poseidon2_counts, e lookup_indexed_tree_check_new_leaf_merkle_check_counts, e lookup_indexed_tree_check_write_value_to_public_inputs_counts, e lookup_public_data_squash_leaf_slot_increase_ff_gt_counts, e lookup_public_data_squash_clk_diff_range_lo_counts, e lookup_public_data_squash_clk_diff_range_hi_counts, e lookup_public_data_check_clk_diff_range_lo_counts, e lookup_public_data_check_clk_diff_range_hi_counts, e lookup_public_data_check_silo_poseidon2_counts, e lookup_public_data_check_low_leaf_slot_validation_counts, e lookup_public_data_check_low_leaf_next_slot_validation_counts, e lookup_public_data_check_low_leaf_poseidon2_0_counts, e lookup_public_data_check_low_leaf_poseidon2_1_counts, e lookup_public_data_check_updated_low_leaf_poseidon2_0_counts, e lookup_public_data_check_updated_low_leaf_poseidon2_1_counts, e lookup_public_data_check_low_leaf_merkle_check_counts, e lookup_public_data_check_new_leaf_poseidon2_0_counts, e lookup_public_data_check_new_leaf_poseidon2_1_counts, e lookup_public_data_check_new_leaf_merkle_check_counts, e lookup_public_data_check_write_public_data_to_public_inputs_counts, e lookup_public_data_check_write_writes_length_to_public_inputs_counts, e lookup_update_check_timestamp_from_public_inputs_counts, e lookup_update_check_delayed_public_mutable_slot_poseidon2_counts, e lookup_update_check_update_hash_public_data_read_counts, e lookup_update_check_update_hash_poseidon2_counts, e lookup_update_check_update_hi_metadata_range_counts, e lookup_update_check_update_lo_metadata_range_counts, e lookup_update_check_timestamp_is_lt_timestamp_of_change_counts, e lookup_contract_instance_retrieval_check_protocol_address_range_counts, e lookup_contract_instance_retrieval_read_derived_address_from_public_inputs_counts, e lookup_contract_instance_retrieval_deployment_nullifier_read_counts, e lookup_contract_instance_retrieval_address_derivation_counts, e lookup_contract_instance_retrieval_update_check_counts, e lookup_class_id_derivation_class_id_poseidon2_0_counts, e lookup_class_id_derivation_class_id_poseidon2_1_counts, e lookup_bc_retrieval_contract_instance_retrieval_counts, e lookup_bc_retrieval_class_id_derivation_counts, e lookup_bc_retrieval_is_new_class_check_counts, e lookup_bc_retrieval_retrieved_bytecodes_insertion_counts, e lookup_instr_fetching_pc_abs_diff_positive_counts, e lookup_instr_fetching_instr_abs_diff_positive_counts, e lookup_instr_fetching_tag_value_validation_counts, e lookup_instr_fetching_bytecode_size_from_bc_dec_counts, e lookup_instr_fetching_bytes_from_bc_dec_counts, e lookup_instr_fetching_wire_instruction_info_counts, e lookup_emit_public_log_check_memory_out_of_bounds_counts, e lookup_emit_public_log_check_log_fields_count_counts, e lookup_emit_public_log_write_data_to_public_inputs_counts, e lookup_get_contract_instance_precomputed_info_counts, e lookup_get_contract_instance_contract_instance_retrieval_counts, e lookup_l1_to_l2_message_tree_check_merkle_check_counts, e lookup_internal_call_unwind_call_stack_counts, e lookup_context_ctx_stack_rollback_counts, e lookup_context_ctx_stack_return_counts, e lookup_addressing_relative_overflow_result_0_counts, e lookup_addressing_relative_overflow_result_1_counts, e lookup_addressing_relative_overflow_result_2_counts, e lookup_addressing_relative_overflow_result_3_counts, e lookup_addressing_relative_overflow_result_4_counts, e lookup_addressing_relative_overflow_result_5_counts, e lookup_addressing_relative_overflow_result_6_counts, e lookup_gas_addressing_gas_read_counts, e lookup_gas_is_out_of_gas_l2_counts, e lookup_gas_is_out_of_gas_da_counts, e lookup_note_hash_tree_check_silo_poseidon2_counts, e lookup_note_hash_tree_check_read_first_nullifier_counts, e lookup_note_hash_tree_check_nonce_computation_poseidon2_counts, e lookup_note_hash_tree_check_unique_note_hash_poseidon2_counts, e lookup_note_hash_tree_check_merkle_check_counts, e lookup_note_hash_tree_check_write_note_hash_to_public_inputs_counts, e lookup_emit_notehash_notehash_tree_write_counts, e lookup_emit_nullifier_write_nullifier_counts, e lookup_external_call_is_l2_gas_left_gt_allocated_counts, e lookup_external_call_is_da_gas_left_gt_allocated_counts, e lookup_get_env_var_precomputed_info_counts, e lookup_get_env_var_read_from_public_inputs_col0_counts, e lookup_get_env_var_read_from_public_inputs_col1_counts, e lookup_l1_to_l2_message_exists_l1_to_l2_msg_leaf_index_in_range_counts, e lookup_l1_to_l2_message_exists_l1_to_l2_msg_read_counts, e lookup_notehash_exists_note_hash_leaf_index_in_range_counts, e lookup_notehash_exists_note_hash_read_counts, e lookup_nullifier_exists_nullifier_exists_check_counts, e lookup_send_l2_to_l1_msg_recipient_check_counts, e lookup_send_l2_to_l1_msg_write_l2_to_l1_msg_counts, e lookup_sload_storage_read_counts, e lookup_sstore_record_written_storage_slot_counts, e lookup_execution_bytecode_retrieval_result_counts, e lookup_execution_instruction_fetching_result_counts, e lookup_execution_instruction_fetching_body_counts, e lookup_execution_exec_spec_read_counts, e lookup_execution_dyn_l2_factor_bitwise_counts, e lookup_execution_check_radix_gt_256_counts, e lookup_execution_get_p_limbs_counts, e lookup_execution_get_max_limbs_counts, e lookup_execution_check_written_storage_slot_counts, e lookup_execution_dispatch_to_alu_counts, e lookup_execution_dispatch_to_bitwise_counts, e lookup_execution_dispatch_to_cast_counts, e lookup_execution_dispatch_to_set_counts, e lookup_calldata_hashing_get_calldata_field_0_counts, e lookup_calldata_hashing_get_calldata_field_1_counts, e lookup_calldata_hashing_get_calldata_field_2_counts, e lookup_calldata_hashing_poseidon2_hash_counts, e lookup_tx_context_public_inputs_note_hash_tree_counts, e lookup_tx_context_public_inputs_nullifier_tree_counts, e lookup_tx_context_public_inputs_public_data_tree_counts, e lookup_tx_context_public_inputs_l1_l2_tree_counts, e lookup_tx_context_public_inputs_gas_used_counts, e lookup_tx_context_public_inputs_read_gas_limit_counts, e lookup_tx_context_public_inputs_read_reverted_counts, e lookup_tx_context_restore_state_on_revert_counts, e lookup_tx_context_public_inputs_write_note_hash_count_counts, e lookup_tx_context_public_inputs_write_nullifier_count_counts, e lookup_tx_context_public_inputs_write_l2_to_l1_message_count_counts, e lookup_tx_context_public_inputs_write_public_log_count_counts, e lookup_tx_read_phase_spec_counts, e lookup_tx_read_phase_length_counts, e lookup_tx_read_public_call_request_phase_counts, e lookup_tx_read_tree_insert_value_counts, e lookup_tx_note_hash_append_counts, e lookup_tx_nullifier_append_counts, e lookup_tx_read_l2_l1_msg_counts, e lookup_tx_write_l2_l1_msg_counts, e lookup_tx_read_effective_fee_public_inputs_counts, e lookup_tx_read_fee_payer_public_inputs_counts, e lookup_tx_balance_slot_poseidon2_counts, e lookup_tx_balance_read_counts, e lookup_tx_balance_validation_counts, e lookup_tx_write_fee_public_inputs_counts, e bc_decomposition_bytes, e bc_decomposition_bytes_pc_plus_1, e bc_decomposition_bytes_pc_plus_10, e bc_decomposition_bytes_pc_plus_11, e bc_decomposition_bytes_pc_plus_12, e bc_decomposition_bytes_pc_plus_13, e bc_decomposition_bytes_pc_plus_14, e bc_decomposition_bytes_pc_plus_15, e bc_decomposition_bytes_pc_plus_16, e bc_decomposition_bytes_pc_plus_17, e bc_decomposition_bytes_pc_plus_18, e bc_decomposition_bytes_pc_plus_19, e bc_decomposition_bytes_pc_plus_2, e bc_decomposition_bytes_pc_plus_20, e bc_decomposition_bytes_pc_plus_21, e bc_decomposition_bytes_pc_plus_22, e bc_decomposition_bytes_pc_plus_23, e bc_decomposition_bytes_pc_plus_24, e bc_decomposition_bytes_pc_plus_25, e bc_decomposition_bytes_pc_plus_26, e bc_decomposition_bytes_pc_plus_27, e bc_decomposition_bytes_pc_plus_28, e bc_decomposition_bytes_pc_plus_29, e bc_decomposition_bytes_pc_plus_3, e bc_decomposition_bytes_pc_plus_30, e bc_decomposition_bytes_pc_plus_31, e bc_decomposition_bytes_pc_plus_32, e bc_decomposition_bytes_pc_plus_33, e bc_decomposition_bytes_pc_plus_34, e bc_decomposition_bytes_pc_plus_35, e bc_decomposition_bytes_pc_plus_4, e bc_decomposition_bytes_pc_plus_5, e bc_decomposition_bytes_pc_plus_6, e bc_decomposition_bytes_pc_plus_7, e bc_decomposition_bytes_pc_plus_8, e bc_decomposition_bytes_pc_plus_9, e bc_decomposition_bytes_remaining, e bc_decomposition_id, e bc_decomposition_next_packed_pc, e bc_decomposition_pc, e bc_decomposition_sel, e bc_decomposition_sel_windows_gt_remaining, e bc_decomposition_start, e bc_hashing_bytecode_id, e bc_hashing_padding, e bc_hashing_pc_index_1, e bc_hashing_rounds_rem, e bc_hashing_sel, e bc_hashing_sel_not_start, e bc_hashing_start, e bitwise_acc_ia, e bitwise_acc_ib, e bitwise_acc_ic, e bitwise_ctr, e bitwise_op_id, e bitwise_sel, e bitwise_start, e calldata_context_id, e calldata_hashing_calldata_size, e calldata_hashing_context_id, e calldata_hashing_index_0_, e calldata_hashing_output_hash, e calldata_hashing_rounds_rem, e calldata_hashing_sel, e calldata_hashing_start, e calldata_index, e calldata_sel, e data_copy_clk, e data_copy_copy_size, e data_copy_dst_addr, e data_copy_dst_context_id, e data_copy_padding, e data_copy_read_addr, e data_copy_reads_left, e data_copy_sel, e data_copy_sel_cd_copy, e data_copy_src_context_id, e data_copy_start, e emit_public_log_contract_address, e emit_public_log_correct_tag, e emit_public_log_error_out_of_bounds, e emit_public_log_error_tag_mismatch, e emit_public_log_execution_clk, e emit_public_log_is_write_contract_address, e emit_public_log_is_write_memory_value, e emit_public_log_log_address, e emit_public_log_public_inputs_index, e emit_public_log_remaining_rows, e emit_public_log_seen_wrong_tag, e emit_public_log_sel, e emit_public_log_sel_write_to_public_inputs, e emit_public_log_space_id, e emit_public_log_start, e execution_bytecode_id, e execution_clk, e execution_context_id, e execution_contract_address, e execution_da_gas_limit, e execution_discard, e execution_dying_context_id, e execution_enqueued_call_start, e execution_internal_call_id, e execution_internal_call_return_id, e execution_is_static, e execution_l1_l2_tree_root, e execution_l2_gas_limit, e execution_last_child_id, e execution_last_child_returndata_addr, e execution_last_child_returndata_size, e execution_last_child_success, e execution_msg_sender, e execution_next_context_id, e execution_next_internal_call_id, e execution_parent_calldata_addr, e execution_parent_calldata_size, e execution_parent_da_gas_limit, e execution_parent_da_gas_used, e execution_parent_id, e execution_parent_l2_gas_limit, e execution_parent_l2_gas_used, e execution_pc, e execution_prev_da_gas_used, e execution_prev_l2_gas_used, e execution_prev_note_hash_tree_root, e execution_prev_note_hash_tree_size, e execution_prev_nullifier_tree_root, e execution_prev_nullifier_tree_size, e execution_prev_num_l2_to_l1_messages, e execution_prev_num_note_hashes_emitted, e execution_prev_num_nullifiers_emitted, e execution_prev_num_public_log_fields, e execution_prev_public_data_tree_root, e execution_prev_public_data_tree_size, e execution_prev_retrieved_bytecodes_tree_root, e execution_prev_retrieved_bytecodes_tree_size, e execution_prev_written_public_data_slots_tree_root, e execution_prev_written_public_data_slots_tree_size, e execution_sel, e execution_sel_first_row_in_context, e execution_transaction_fee, e ff_gt_a_hi, e ff_gt_a_lo, e ff_gt_b_hi, e ff_gt_b_lo, e ff_gt_cmp_rng_ctr, e ff_gt_p_sub_a_hi, e ff_gt_p_sub_a_lo, e ff_gt_p_sub_b_hi, e ff_gt_p_sub_b_lo, e ff_gt_sel, e ff_gt_sel_dec, e ff_gt_sel_gt, e keccak_memory_addr, e keccak_memory_clk, e keccak_memory_ctr, e keccak_memory_rw, e keccak_memory_sel, e keccak_memory_space_id, e keccak_memory_start_read, e keccak_memory_start_write, e keccak_memory_tag_error, e keccak_memory_val_0_, e keccak_memory_val_10_, e keccak_memory_val_11_, e keccak_memory_val_12_, e keccak_memory_val_13_, e keccak_memory_val_14_, e keccak_memory_val_15_, e keccak_memory_val_16_, e keccak_memory_val_17_, e keccak_memory_val_18_, e keccak_memory_val_19_, e keccak_memory_val_1_, e keccak_memory_val_20_, e keccak_memory_val_21_, e keccak_memory_val_22_, e keccak_memory_val_23_, e keccak_memory_val_2_, e keccak_memory_val_3_, e keccak_memory_val_4_, e keccak_memory_val_5_, e keccak_memory_val_6_, e keccak_memory_val_7_, e keccak_memory_val_8_, e keccak_memory_val_9_, e keccakf1600_clk, e keccakf1600_dst_addr, e keccakf1600_round, e keccakf1600_sel, e keccakf1600_sel_no_error, e keccakf1600_space_id, e keccakf1600_start, e keccakf1600_state_in_00, e keccakf1600_state_in_01, e keccakf1600_state_in_02, e keccakf1600_state_in_03, e keccakf1600_state_in_04, e keccakf1600_state_in_10, e keccakf1600_state_in_11, e keccakf1600_state_in_12, e keccakf1600_state_in_13, e keccakf1600_state_in_14, e keccakf1600_state_in_20, e keccakf1600_state_in_21, e keccakf1600_state_in_22, e keccakf1600_state_in_23, e keccakf1600_state_in_24, e keccakf1600_state_in_30, e keccakf1600_state_in_31, e keccakf1600_state_in_32, e keccakf1600_state_in_33, e keccakf1600_state_in_34, e keccakf1600_state_in_40, e keccakf1600_state_in_41, e keccakf1600_state_in_42, e keccakf1600_state_in_43, e keccakf1600_state_in_44, e memory_address, e memory_clk, e memory_rw, e memory_sel, e memory_space_id, e memory_tag, e memory_value, e merkle_check_index, e merkle_check_merkle_hash_separator, e merkle_check_path_len, e merkle_check_read_node, e merkle_check_read_root, e merkle_check_sel, e merkle_check_start, e merkle_check_write, e merkle_check_write_node, e merkle_check_write_root, e poseidon2_hash_a_0, e poseidon2_hash_a_1, e poseidon2_hash_a_2, e poseidon2_hash_a_3, e poseidon2_hash_input_0, e poseidon2_hash_input_1, e poseidon2_hash_input_2, e poseidon2_hash_num_perm_rounds_rem, e poseidon2_hash_output, e poseidon2_hash_sel, e poseidon2_hash_start, e public_data_check_clk, e public_data_check_sel, e public_data_check_write_idx, e public_data_squash_clk, e public_data_squash_final_value, e public_data_squash_leaf_slot, e public_data_squash_sel, e public_data_squash_write_to_public_inputs, e scalar_mul_bit_idx, e scalar_mul_point_inf, e scalar_mul_point_x, e scalar_mul_point_y, e scalar_mul_res_inf, e scalar_mul_res_x, e scalar_mul_res_y, e scalar_mul_scalar, e scalar_mul_sel, e scalar_mul_start, e scalar_mul_temp_inf, e scalar_mul_temp_x, e scalar_mul_temp_y, e sha256_a, e sha256_b, e sha256_c, e sha256_d, e sha256_e, e sha256_execution_clk, e sha256_f, e sha256_g, e sha256_h, e sha256_helper_w0, e sha256_helper_w1, e sha256_helper_w10, e sha256_helper_w11, e sha256_helper_w12, e sha256_helper_w13, e sha256_helper_w14, e sha256_helper_w15, e sha256_helper_w2, e sha256_helper_w3, e sha256_helper_w4, e sha256_helper_w5, e sha256_helper_w6, e sha256_helper_w7, e sha256_helper_w8, e sha256_helper_w9, e sha256_init_a, e sha256_init_b, e sha256_init_c, e sha256_init_d, e sha256_init_e, e sha256_init_f, e sha256_init_g, e sha256_init_h, e sha256_input_addr, e sha256_input_rounds_rem, e sha256_output_addr, e sha256_rounds_remaining, e sha256_sel, e sha256_sel_invalid_input_tag_err, e sha256_space_id, e sha256_start, e to_radix_acc, e to_radix_acc_under_p, e to_radix_limb, e to_radix_limb_eq_p, e to_radix_limb_index, e to_radix_limb_lt_p, e to_radix_mem_dst_addr, e to_radix_mem_execution_clk, e to_radix_mem_is_output_bits, e to_radix_mem_num_limbs, e to_radix_mem_radix, e to_radix_mem_sel, e to_radix_mem_sel_should_decompose, e to_radix_mem_sel_should_write_mem, e to_radix_mem_space_id, e to_radix_mem_start, e to_radix_mem_value_to_decompose, e to_radix_not_padding_limb, e to_radix_power, e to_radix_radix, e to_radix_safe_limbs, e to_radix_sel, e to_radix_start, e to_radix_value, e tx_da_gas_limit, e tx_discard, e tx_fee, e tx_is_revertible, e tx_is_teardown, e tx_l1_l2_tree_root, e tx_l1_l2_tree_size, e tx_l2_gas_limit, e tx_next_context_id, e tx_phase_value, e tx_prev_da_gas_used, e tx_prev_l2_gas_used, e tx_prev_note_hash_tree_root, e tx_prev_note_hash_tree_size, e tx_prev_nullifier_tree_root, e tx_prev_nullifier_tree_size, e tx_prev_num_l2_to_l1_messages, e tx_prev_num_note_hashes_emitted, e tx_prev_num_nullifiers_emitted, e tx_prev_num_public_log_fields, e tx_prev_public_data_tree_root, e tx_prev_public_data_tree_size, e tx_prev_retrieved_bytecodes_tree_root, e tx_prev_retrieved_bytecodes_tree_size, e tx_prev_written_public_data_slots_tree_root, e tx_prev_written_public_data_slots_tree_size, e tx_read_pi_offset, e tx_remaining_phase_counter, e tx_reverted, e tx_sel, e tx_start_phase, e tx_start_tx, e tx_tx_reverted #define AVM2_DERIVED_WITNESS_ENTITIES_E(e) e perm_data_copy_mem_write_inv, e perm_data_copy_mem_read_inv, e perm_ecc_mem_write_mem_0_inv, e perm_ecc_mem_write_mem_1_inv, e perm_ecc_mem_write_mem_2_inv, e perm_keccak_memory_slice_to_mem_inv, e perm_keccakf1600_read_to_slice_inv, e perm_keccakf1600_write_to_slice_inv, e perm_poseidon2_mem_pos_read_mem_0_inv, e perm_poseidon2_mem_pos_read_mem_1_inv, e perm_poseidon2_mem_pos_read_mem_2_inv, e perm_poseidon2_mem_pos_read_mem_3_inv, e perm_poseidon2_mem_pos_write_mem_0_inv, e perm_poseidon2_mem_pos_write_mem_1_inv, e perm_poseidon2_mem_pos_write_mem_2_inv, e perm_poseidon2_mem_pos_write_mem_3_inv, e perm_sha256_mem_mem_op_0_inv, e perm_sha256_mem_mem_op_1_inv, e perm_sha256_mem_mem_op_2_inv, e perm_sha256_mem_mem_op_3_inv, e perm_sha256_mem_mem_op_4_inv, e perm_sha256_mem_mem_op_5_inv, e perm_sha256_mem_mem_op_6_inv, e perm_sha256_mem_mem_op_7_inv, e perm_sha256_mem_mem_input_read_inv, e perm_to_radix_mem_write_mem_inv, e perm_bc_hashing_bytecode_length_bytes_inv, e perm_bc_hashing_get_packed_field_0_inv, e perm_bc_hashing_get_packed_field_1_inv, e perm_bc_hashing_get_packed_field_2_inv, e perm_public_data_check_squashing_inv, e perm_emit_public_log_read_mem_inv, e perm_get_contract_instance_mem_write_contract_instance_exists_inv, e perm_get_contract_instance_mem_write_contract_instance_member_inv, e perm_internal_call_push_call_stack_inv, e perm_context_ctx_stack_call_inv, e perm_addressing_base_address_from_memory_inv, e perm_addressing_indirect_from_memory_0_inv, e perm_addressing_indirect_from_memory_1_inv, e perm_addressing_indirect_from_memory_2_inv, e perm_addressing_indirect_from_memory_3_inv, e perm_addressing_indirect_from_memory_4_inv, e perm_addressing_indirect_from_memory_5_inv, e perm_addressing_indirect_from_memory_6_inv, e perm_registers_mem_op_0_inv, e perm_registers_mem_op_1_inv, e perm_registers_mem_op_2_inv, e perm_registers_mem_op_3_inv, e perm_registers_mem_op_4_inv, e perm_registers_mem_op_5_inv, e perm_sstore_storage_write_inv, e perm_execution_dispatch_to_cd_copy_inv, e perm_execution_dispatch_to_rd_copy_inv, e perm_execution_dispatch_to_get_contract_instance_inv, e perm_execution_dispatch_to_emit_public_log_inv, e perm_execution_dispatch_to_poseidon2_perm_inv, e perm_execution_dispatch_to_sha256_compression_inv, e perm_execution_dispatch_to_keccakf1600_inv, e perm_execution_dispatch_to_ecc_add_inv, e perm_execution_dispatch_to_to_radix_inv, e perm_calldata_hashing_check_final_size_inv, e perm_tx_read_calldata_hash_inv, e perm_tx_dispatch_exec_start_inv, e perm_tx_dispatch_exec_end_inv, e perm_tx_balance_update_inv, e lookup_range_check_dyn_rng_chk_pow_2_inv, e lookup_range_check_dyn_diff_is_u16_inv, e lookup_range_check_r0_is_u16_inv, e lookup_range_check_r1_is_u16_inv, e lookup_range_check_r2_is_u16_inv, e lookup_range_check_r3_is_u16_inv, e lookup_range_check_r4_is_u16_inv, e lookup_range_check_r5_is_u16_inv, e lookup_range_check_r6_is_u16_inv, e lookup_range_check_r7_is_u16_inv, e lookup_ff_gt_a_lo_range_inv, e lookup_ff_gt_a_hi_range_inv, e lookup_gt_gt_range_inv, e lookup_alu_tag_max_bits_value_inv, e lookup_alu_range_check_decomposition_a_lo_inv, e lookup_alu_range_check_decomposition_a_hi_inv, e lookup_alu_range_check_decomposition_b_lo_inv, e lookup_alu_range_check_decomposition_b_hi_inv, e lookup_alu_range_check_mul_c_hi_inv, e lookup_alu_range_check_div_remainder_inv, e lookup_alu_ff_gt_inv, e lookup_alu_int_gt_inv, e lookup_alu_shifts_two_pow_inv, e lookup_alu_large_trunc_canonical_dec_inv, e lookup_alu_range_check_trunc_mid_inv, e lookup_bitwise_integral_tag_length_inv, e lookup_bitwise_byte_operations_inv, e lookup_memory_range_check_limb_0_inv, e lookup_memory_range_check_limb_1_inv, e lookup_memory_range_check_limb_2_inv, e lookup_memory_tag_max_bits_inv, e lookup_memory_range_check_write_tagged_value_inv, e lookup_data_copy_offset_plus_size_is_gt_data_size_inv, e lookup_data_copy_check_src_addr_in_range_inv, e lookup_data_copy_check_dst_addr_in_range_inv, e lookup_data_copy_sel_has_reads_inv, e lookup_data_copy_col_read_inv, e lookup_ecc_mem_check_dst_addr_in_range_inv, e lookup_ecc_mem_input_output_ecc_add_inv, e lookup_keccakf1600_theta_xor_01_inv, e lookup_keccakf1600_theta_xor_02_inv, e lookup_keccakf1600_theta_xor_03_inv, e lookup_keccakf1600_theta_xor_row_0_inv, e lookup_keccakf1600_theta_xor_11_inv, e lookup_keccakf1600_theta_xor_12_inv, e lookup_keccakf1600_theta_xor_13_inv, e lookup_keccakf1600_theta_xor_row_1_inv, e lookup_keccakf1600_theta_xor_21_inv, e lookup_keccakf1600_theta_xor_22_inv, e lookup_keccakf1600_theta_xor_23_inv, e lookup_keccakf1600_theta_xor_row_2_inv, e lookup_keccakf1600_theta_xor_31_inv, e lookup_keccakf1600_theta_xor_32_inv, e lookup_keccakf1600_theta_xor_33_inv, e lookup_keccakf1600_theta_xor_row_3_inv, e lookup_keccakf1600_theta_xor_41_inv, e lookup_keccakf1600_theta_xor_42_inv, e lookup_keccakf1600_theta_xor_43_inv, e lookup_keccakf1600_theta_xor_row_4_inv, e lookup_keccakf1600_theta_combined_xor_0_inv, e lookup_keccakf1600_theta_combined_xor_1_inv, e lookup_keccakf1600_theta_combined_xor_2_inv, e lookup_keccakf1600_theta_combined_xor_3_inv, e lookup_keccakf1600_theta_combined_xor_4_inv, e lookup_keccakf1600_state_theta_00_inv, e lookup_keccakf1600_state_theta_01_inv, e lookup_keccakf1600_state_theta_02_inv, e lookup_keccakf1600_state_theta_03_inv, e lookup_keccakf1600_state_theta_04_inv, e lookup_keccakf1600_state_theta_10_inv, e lookup_keccakf1600_state_theta_11_inv, e lookup_keccakf1600_state_theta_12_inv, e lookup_keccakf1600_state_theta_13_inv, e lookup_keccakf1600_state_theta_14_inv, e lookup_keccakf1600_state_theta_20_inv, e lookup_keccakf1600_state_theta_21_inv, e lookup_keccakf1600_state_theta_22_inv, e lookup_keccakf1600_state_theta_23_inv, e lookup_keccakf1600_state_theta_24_inv, e lookup_keccakf1600_state_theta_30_inv, e lookup_keccakf1600_state_theta_31_inv, e lookup_keccakf1600_state_theta_32_inv, e lookup_keccakf1600_state_theta_33_inv, e lookup_keccakf1600_state_theta_34_inv, e lookup_keccakf1600_state_theta_40_inv, e lookup_keccakf1600_state_theta_41_inv, e lookup_keccakf1600_state_theta_42_inv, e lookup_keccakf1600_state_theta_43_inv, e lookup_keccakf1600_state_theta_44_inv, e lookup_keccakf1600_theta_limb_02_range_inv, e lookup_keccakf1600_theta_limb_04_range_inv, e lookup_keccakf1600_theta_limb_10_range_inv, e lookup_keccakf1600_theta_limb_12_range_inv, e lookup_keccakf1600_theta_limb_14_range_inv, e lookup_keccakf1600_theta_limb_21_range_inv, e lookup_keccakf1600_theta_limb_23_range_inv, e lookup_keccakf1600_theta_limb_30_range_inv, e lookup_keccakf1600_theta_limb_32_range_inv, e lookup_keccakf1600_theta_limb_33_range_inv, e lookup_keccakf1600_theta_limb_40_range_inv, e lookup_keccakf1600_theta_limb_41_range_inv, e lookup_keccakf1600_theta_limb_43_range_inv, e lookup_keccakf1600_theta_limb_44_range_inv, e lookup_keccakf1600_theta_limb_01_range_inv, e lookup_keccakf1600_theta_limb_03_range_inv, e lookup_keccakf1600_theta_limb_11_range_inv, e lookup_keccakf1600_theta_limb_13_range_inv, e lookup_keccakf1600_theta_limb_20_range_inv, e lookup_keccakf1600_theta_limb_22_range_inv, e lookup_keccakf1600_theta_limb_24_range_inv, e lookup_keccakf1600_theta_limb_31_range_inv, e lookup_keccakf1600_theta_limb_34_range_inv, e lookup_keccakf1600_theta_limb_42_range_inv, e lookup_keccakf1600_state_pi_and_00_inv, e lookup_keccakf1600_state_pi_and_01_inv, e lookup_keccakf1600_state_pi_and_02_inv, e lookup_keccakf1600_state_pi_and_03_inv, e lookup_keccakf1600_state_pi_and_04_inv, e lookup_keccakf1600_state_pi_and_10_inv, e lookup_keccakf1600_state_pi_and_11_inv, e lookup_keccakf1600_state_pi_and_12_inv, e lookup_keccakf1600_state_pi_and_13_inv, e lookup_keccakf1600_state_pi_and_14_inv, e lookup_keccakf1600_state_pi_and_20_inv, e lookup_keccakf1600_state_pi_and_21_inv, e lookup_keccakf1600_state_pi_and_22_inv, e lookup_keccakf1600_state_pi_and_23_inv, e lookup_keccakf1600_state_pi_and_24_inv, e lookup_keccakf1600_state_pi_and_30_inv, e lookup_keccakf1600_state_pi_and_31_inv, e lookup_keccakf1600_state_pi_and_32_inv, e lookup_keccakf1600_state_pi_and_33_inv, e lookup_keccakf1600_state_pi_and_34_inv, e lookup_keccakf1600_state_pi_and_40_inv, e lookup_keccakf1600_state_pi_and_41_inv, e lookup_keccakf1600_state_pi_and_42_inv, e lookup_keccakf1600_state_pi_and_43_inv, e lookup_keccakf1600_state_pi_and_44_inv, e lookup_keccakf1600_state_chi_00_inv, e lookup_keccakf1600_state_chi_01_inv, e lookup_keccakf1600_state_chi_02_inv, e lookup_keccakf1600_state_chi_03_inv, e lookup_keccakf1600_state_chi_04_inv, e lookup_keccakf1600_state_chi_10_inv, e lookup_keccakf1600_state_chi_11_inv, e lookup_keccakf1600_state_chi_12_inv, e lookup_keccakf1600_state_chi_13_inv, e lookup_keccakf1600_state_chi_14_inv, e lookup_keccakf1600_state_chi_20_inv, e lookup_keccakf1600_state_chi_21_inv, e lookup_keccakf1600_state_chi_22_inv, e lookup_keccakf1600_state_chi_23_inv, e lookup_keccakf1600_state_chi_24_inv, e lookup_keccakf1600_state_chi_30_inv, e lookup_keccakf1600_state_chi_31_inv, e lookup_keccakf1600_state_chi_32_inv, e lookup_keccakf1600_state_chi_33_inv, e lookup_keccakf1600_state_chi_34_inv, e lookup_keccakf1600_state_chi_40_inv, e lookup_keccakf1600_state_chi_41_inv, e lookup_keccakf1600_state_chi_42_inv, e lookup_keccakf1600_state_chi_43_inv, e lookup_keccakf1600_state_chi_44_inv, e lookup_keccakf1600_round_cst_inv, e lookup_keccakf1600_state_iota_00_inv, e lookup_keccakf1600_src_out_of_range_toggle_inv, e lookup_keccakf1600_dst_out_of_range_toggle_inv, e lookup_poseidon2_mem_check_src_addr_in_range_inv, e lookup_poseidon2_mem_check_dst_addr_in_range_inv, e lookup_poseidon2_mem_input_output_poseidon2_perm_inv, e lookup_to_radix_limb_range_inv, e lookup_to_radix_limb_less_than_radix_range_inv, e lookup_to_radix_fetch_safe_limbs_inv, e lookup_to_radix_fetch_p_limb_inv, e lookup_to_radix_limb_p_diff_range_inv, e lookup_scalar_mul_to_radix_inv, e lookup_scalar_mul_double_inv, e lookup_scalar_mul_add_inv, e lookup_sha256_range_comp_w_lhs_inv, e lookup_sha256_range_comp_w_rhs_inv, e lookup_sha256_range_rhs_w_7_inv, e lookup_sha256_range_rhs_w_18_inv, e lookup_sha256_range_rhs_w_3_inv, e lookup_sha256_w_s_0_xor_0_inv, e lookup_sha256_w_s_0_xor_1_inv, e lookup_sha256_range_rhs_w_17_inv, e lookup_sha256_range_rhs_w_19_inv, e lookup_sha256_range_rhs_w_10_inv, e lookup_sha256_w_s_1_xor_0_inv, e lookup_sha256_w_s_1_xor_1_inv, e lookup_sha256_range_rhs_e_6_inv, e lookup_sha256_range_rhs_e_11_inv, e lookup_sha256_range_rhs_e_25_inv, e lookup_sha256_s_1_xor_0_inv, e lookup_sha256_s_1_xor_1_inv, e lookup_sha256_ch_and_0_inv, e lookup_sha256_ch_and_1_inv, e lookup_sha256_ch_xor_inv, e lookup_sha256_round_constant_inv, e lookup_sha256_range_rhs_a_2_inv, e lookup_sha256_range_rhs_a_13_inv, e lookup_sha256_range_rhs_a_22_inv, e lookup_sha256_s_0_xor_0_inv, e lookup_sha256_s_0_xor_1_inv, e lookup_sha256_maj_and_0_inv, e lookup_sha256_maj_and_1_inv, e lookup_sha256_maj_and_2_inv, e lookup_sha256_maj_xor_0_inv, e lookup_sha256_maj_xor_1_inv, e lookup_sha256_range_comp_next_a_lhs_inv, e lookup_sha256_range_comp_next_a_rhs_inv, e lookup_sha256_range_comp_next_e_lhs_inv, e lookup_sha256_range_comp_next_e_rhs_inv, e lookup_sha256_range_comp_a_rhs_inv, e lookup_sha256_range_comp_b_rhs_inv, e lookup_sha256_range_comp_c_rhs_inv, e lookup_sha256_range_comp_d_rhs_inv, e lookup_sha256_range_comp_e_rhs_inv, e lookup_sha256_range_comp_f_rhs_inv, e lookup_sha256_range_comp_g_rhs_inv, e lookup_sha256_range_comp_h_rhs_inv, e lookup_sha256_mem_check_state_addr_in_range_inv, e lookup_sha256_mem_check_input_addr_in_range_inv, e lookup_sha256_mem_check_output_addr_in_range_inv, e lookup_to_radix_mem_check_dst_addr_in_range_inv, e lookup_to_radix_mem_check_radix_lt_2_inv, e lookup_to_radix_mem_check_radix_gt_256_inv, e lookup_to_radix_mem_input_output_to_radix_inv, e lookup_poseidon2_hash_poseidon2_perm_inv, e lookup_address_derivation_salted_initialization_hash_poseidon2_0_inv, e lookup_address_derivation_salted_initialization_hash_poseidon2_1_inv, e lookup_address_derivation_partial_address_poseidon2_inv, e lookup_address_derivation_public_keys_hash_poseidon2_0_inv, e lookup_address_derivation_public_keys_hash_poseidon2_1_inv, e lookup_address_derivation_public_keys_hash_poseidon2_2_inv, e lookup_address_derivation_public_keys_hash_poseidon2_3_inv, e lookup_address_derivation_public_keys_hash_poseidon2_4_inv, e lookup_address_derivation_preaddress_poseidon2_inv, e lookup_address_derivation_preaddress_scalar_mul_inv, e lookup_address_derivation_address_ecadd_inv, e lookup_bc_decomposition_bytes_are_bytes_inv, e lookup_bc_hashing_poseidon2_hash_inv, e lookup_merkle_check_merkle_poseidon2_read_inv, e lookup_merkle_check_merkle_poseidon2_write_inv, e lookup_indexed_tree_check_silo_poseidon2_inv, e lookup_indexed_tree_check_low_leaf_value_validation_inv, e lookup_indexed_tree_check_low_leaf_next_value_validation_inv, e lookup_indexed_tree_check_low_leaf_poseidon2_inv, e lookup_indexed_tree_check_updated_low_leaf_poseidon2_inv, e lookup_indexed_tree_check_low_leaf_merkle_check_inv, e lookup_indexed_tree_check_new_leaf_poseidon2_inv, e lookup_indexed_tree_check_new_leaf_merkle_check_inv, e lookup_indexed_tree_check_write_value_to_public_inputs_inv, e lookup_public_data_squash_leaf_slot_increase_ff_gt_inv, e lookup_public_data_squash_clk_diff_range_lo_inv, e lookup_public_data_squash_clk_diff_range_hi_inv, e lookup_public_data_check_clk_diff_range_lo_inv, e lookup_public_data_check_clk_diff_range_hi_inv, e lookup_public_data_check_silo_poseidon2_inv, e lookup_public_data_check_low_leaf_slot_validation_inv, e lookup_public_data_check_low_leaf_next_slot_validation_inv, e lookup_public_data_check_low_leaf_poseidon2_0_inv, e lookup_public_data_check_low_leaf_poseidon2_1_inv, e lookup_public_data_check_updated_low_leaf_poseidon2_0_inv, e lookup_public_data_check_updated_low_leaf_poseidon2_1_inv, e lookup_public_data_check_low_leaf_merkle_check_inv, e lookup_public_data_check_new_leaf_poseidon2_0_inv, e lookup_public_data_check_new_leaf_poseidon2_1_inv, e lookup_public_data_check_new_leaf_merkle_check_inv, e lookup_public_data_check_write_public_data_to_public_inputs_inv, e lookup_public_data_check_write_writes_length_to_public_inputs_inv, e lookup_update_check_timestamp_from_public_inputs_inv, e lookup_update_check_delayed_public_mutable_slot_poseidon2_inv, e lookup_update_check_update_hash_public_data_read_inv, e lookup_update_check_update_hash_poseidon2_inv, e lookup_update_check_update_hi_metadata_range_inv, e lookup_update_check_update_lo_metadata_range_inv, e lookup_update_check_timestamp_is_lt_timestamp_of_change_inv, e lookup_contract_instance_retrieval_check_protocol_address_range_inv, e lookup_contract_instance_retrieval_read_derived_address_from_public_inputs_inv, e lookup_contract_instance_retrieval_deployment_nullifier_read_inv, e lookup_contract_instance_retrieval_address_derivation_inv, e lookup_contract_instance_retrieval_update_check_inv, e lookup_class_id_derivation_class_id_poseidon2_0_inv, e lookup_class_id_derivation_class_id_poseidon2_1_inv, e lookup_bc_retrieval_contract_instance_retrieval_inv, e lookup_bc_retrieval_class_id_derivation_inv, e lookup_bc_retrieval_is_new_class_check_inv, e lookup_bc_retrieval_retrieved_bytecodes_insertion_inv, e lookup_instr_fetching_pc_abs_diff_positive_inv, e lookup_instr_fetching_instr_abs_diff_positive_inv, e lookup_instr_fetching_tag_value_validation_inv, e lookup_instr_fetching_bytecode_size_from_bc_dec_inv, e lookup_instr_fetching_bytes_from_bc_dec_inv, e lookup_instr_fetching_wire_instruction_info_inv, e lookup_emit_public_log_check_memory_out_of_bounds_inv, e lookup_emit_public_log_check_log_fields_count_inv, e lookup_emit_public_log_write_data_to_public_inputs_inv, e lookup_get_contract_instance_precomputed_info_inv, e lookup_get_contract_instance_contract_instance_retrieval_inv, e lookup_l1_to_l2_message_tree_check_merkle_check_inv, e lookup_internal_call_unwind_call_stack_inv, e lookup_context_ctx_stack_rollback_inv, e lookup_context_ctx_stack_return_inv, e lookup_addressing_relative_overflow_result_0_inv, e lookup_addressing_relative_overflow_result_1_inv, e lookup_addressing_relative_overflow_result_2_inv, e lookup_addressing_relative_overflow_result_3_inv, e lookup_addressing_relative_overflow_result_4_inv, e lookup_addressing_relative_overflow_result_5_inv, e lookup_addressing_relative_overflow_result_6_inv, e lookup_gas_addressing_gas_read_inv, e lookup_gas_is_out_of_gas_l2_inv, e lookup_gas_is_out_of_gas_da_inv, e lookup_note_hash_tree_check_silo_poseidon2_inv, e lookup_note_hash_tree_check_read_first_nullifier_inv, e lookup_note_hash_tree_check_nonce_computation_poseidon2_inv, e lookup_note_hash_tree_check_unique_note_hash_poseidon2_inv, e lookup_note_hash_tree_check_merkle_check_inv, e lookup_note_hash_tree_check_write_note_hash_to_public_inputs_inv, e lookup_emit_notehash_notehash_tree_write_inv, e lookup_emit_nullifier_write_nullifier_inv, e lookup_external_call_is_l2_gas_left_gt_allocated_inv, e lookup_external_call_is_da_gas_left_gt_allocated_inv, e lookup_get_env_var_precomputed_info_inv, e lookup_get_env_var_read_from_public_inputs_col0_inv, e lookup_get_env_var_read_from_public_inputs_col1_inv, e lookup_l1_to_l2_message_exists_l1_to_l2_msg_leaf_index_in_range_inv, e lookup_l1_to_l2_message_exists_l1_to_l2_msg_read_inv, e lookup_notehash_exists_note_hash_leaf_index_in_range_inv, e lookup_notehash_exists_note_hash_read_inv, e lookup_nullifier_exists_nullifier_exists_check_inv, e lookup_send_l2_to_l1_msg_recipient_check_inv, e lookup_send_l2_to_l1_msg_write_l2_to_l1_msg_inv, e lookup_sload_storage_read_inv, e lookup_sstore_record_written_storage_slot_inv, e lookup_execution_bytecode_retrieval_result_inv, e lookup_execution_instruction_fetching_result_inv, e lookup_execution_instruction_fetching_body_inv, e lookup_execution_exec_spec_read_inv, e lookup_execution_dyn_l2_factor_bitwise_inv, e lookup_execution_check_radix_gt_256_inv, e lookup_execution_get_p_limbs_inv, e lookup_execution_get_max_limbs_inv, e lookup_execution_check_written_storage_slot_inv, e lookup_execution_dispatch_to_alu_inv, e lookup_execution_dispatch_to_bitwise_inv, e lookup_execution_dispatch_to_cast_inv, e lookup_execution_dispatch_to_set_inv, e lookup_calldata_hashing_get_calldata_field_0_inv, e lookup_calldata_hashing_get_calldata_field_1_inv, e lookup_calldata_hashing_get_calldata_field_2_inv, e lookup_calldata_hashing_poseidon2_hash_inv, e lookup_tx_context_public_inputs_note_hash_tree_inv, e lookup_tx_context_public_inputs_nullifier_tree_inv, e lookup_tx_context_public_inputs_public_data_tree_inv, e lookup_tx_context_public_inputs_l1_l2_tree_inv, e lookup_tx_context_public_inputs_gas_used_inv, e lookup_tx_context_public_inputs_read_gas_limit_inv, e lookup_tx_context_public_inputs_read_reverted_inv, e lookup_tx_context_restore_state_on_revert_inv, e lookup_tx_context_public_inputs_write_note_hash_count_inv, e lookup_tx_context_public_inputs_write_nullifier_count_inv, e lookup_tx_context_public_inputs_write_l2_to_l1_message_count_inv, e lookup_tx_context_public_inputs_write_public_log_count_inv, e lookup_tx_read_phase_spec_inv, e lookup_tx_read_phase_length_inv, e lookup_tx_read_public_call_request_phase_inv, e lookup_tx_read_tree_insert_value_inv, e lookup_tx_note_hash_append_inv, e lookup_tx_nullifier_append_inv, e lookup_tx_read_l2_l1_msg_inv, e lookup_tx_write_l2_l1_msg_inv, e lookup_tx_read_effective_fee_public_inputs_inv, e lookup_tx_read_fee_payer_public_inputs_inv, e lookup_tx_balance_slot_poseidon2_inv, e lookup_tx_balance_read_inv, e lookup_tx_balance_validation_inv, e lookup_tx_write_fee_public_inputs_inv #define AVM2_SHIFTED_ENTITIES_E(e) e bc_decomposition_bytes_shift, e bc_decomposition_bytes_pc_plus_1_shift, e bc_decomposition_bytes_pc_plus_10_shift, e bc_decomposition_bytes_pc_plus_11_shift, e bc_decomposition_bytes_pc_plus_12_shift, e bc_decomposition_bytes_pc_plus_13_shift, e bc_decomposition_bytes_pc_plus_14_shift, e bc_decomposition_bytes_pc_plus_15_shift, e bc_decomposition_bytes_pc_plus_16_shift, e bc_decomposition_bytes_pc_plus_17_shift, e bc_decomposition_bytes_pc_plus_18_shift, e bc_decomposition_bytes_pc_plus_19_shift, e bc_decomposition_bytes_pc_plus_2_shift, e bc_decomposition_bytes_pc_plus_20_shift, e bc_decomposition_bytes_pc_plus_21_shift, e bc_decomposition_bytes_pc_plus_22_shift, e bc_decomposition_bytes_pc_plus_23_shift, e bc_decomposition_bytes_pc_plus_24_shift, e bc_decomposition_bytes_pc_plus_25_shift, e bc_decomposition_bytes_pc_plus_26_shift, e bc_decomposition_bytes_pc_plus_27_shift, e bc_decomposition_bytes_pc_plus_28_shift, e bc_decomposition_bytes_pc_plus_29_shift, e bc_decomposition_bytes_pc_plus_3_shift, e bc_decomposition_bytes_pc_plus_30_shift, e bc_decomposition_bytes_pc_plus_31_shift, e bc_decomposition_bytes_pc_plus_32_shift, e bc_decomposition_bytes_pc_plus_33_shift, e bc_decomposition_bytes_pc_plus_34_shift, e bc_decomposition_bytes_pc_plus_35_shift, e bc_decomposition_bytes_pc_plus_4_shift, e bc_decomposition_bytes_pc_plus_5_shift, e bc_decomposition_bytes_pc_plus_6_shift, e bc_decomposition_bytes_pc_plus_7_shift, e bc_decomposition_bytes_pc_plus_8_shift, e bc_decomposition_bytes_pc_plus_9_shift, e bc_decomposition_bytes_remaining_shift, e bc_decomposition_id_shift, e bc_decomposition_next_packed_pc_shift, e bc_decomposition_pc_shift, e bc_decomposition_sel_shift, e bc_decomposition_sel_windows_gt_remaining_shift, e bc_decomposition_start_shift, e bc_hashing_bytecode_id_shift, e bc_hashing_padding_shift, e bc_hashing_pc_index_1_shift, e bc_hashing_rounds_rem_shift, e bc_hashing_sel_shift, e bc_hashing_sel_not_start_shift, e bc_hashing_start_shift, e bitwise_acc_ia_shift, e bitwise_acc_ib_shift, e bitwise_acc_ic_shift, e bitwise_ctr_shift, e bitwise_op_id_shift, e bitwise_sel_shift, e bitwise_start_shift, e calldata_context_id_shift, e calldata_hashing_calldata_size_shift, e calldata_hashing_context_id_shift, e calldata_hashing_index_0__shift, e calldata_hashing_output_hash_shift, e calldata_hashing_rounds_rem_shift, e calldata_hashing_sel_shift, e calldata_hashing_start_shift, e calldata_index_shift, e calldata_sel_shift, e data_copy_clk_shift, e data_copy_copy_size_shift, e data_copy_dst_addr_shift, e data_copy_dst_context_id_shift, e data_copy_padding_shift, e data_copy_read_addr_shift, e data_copy_reads_left_shift, e data_copy_sel_shift, e data_copy_sel_cd_copy_shift, e data_copy_src_context_id_shift, e data_copy_start_shift, e emit_public_log_contract_address_shift, e emit_public_log_correct_tag_shift, e emit_public_log_error_out_of_bounds_shift, e emit_public_log_error_tag_mismatch_shift, e emit_public_log_execution_clk_shift, e emit_public_log_is_write_contract_address_shift, e emit_public_log_is_write_memory_value_shift, e emit_public_log_log_address_shift, e emit_public_log_public_inputs_index_shift, e emit_public_log_remaining_rows_shift, e emit_public_log_seen_wrong_tag_shift, e emit_public_log_sel_shift, e emit_public_log_sel_write_to_public_inputs_shift, e emit_public_log_space_id_shift, e emit_public_log_start_shift, e execution_bytecode_id_shift, e execution_clk_shift, e execution_context_id_shift, e execution_contract_address_shift, e execution_da_gas_limit_shift, e execution_discard_shift, e execution_dying_context_id_shift, e execution_enqueued_call_start_shift, e execution_internal_call_id_shift, e execution_internal_call_return_id_shift, e execution_is_static_shift, e execution_l1_l2_tree_root_shift, e execution_l2_gas_limit_shift, e execution_last_child_id_shift, e execution_last_child_returndata_addr_shift, e execution_last_child_returndata_size_shift, e execution_last_child_success_shift, e execution_msg_sender_shift, e execution_next_context_id_shift, e execution_next_internal_call_id_shift, e execution_parent_calldata_addr_shift, e execution_parent_calldata_size_shift, e execution_parent_da_gas_limit_shift, e execution_parent_da_gas_used_shift, e execution_parent_id_shift, e execution_parent_l2_gas_limit_shift, e execution_parent_l2_gas_used_shift, e execution_pc_shift, e execution_prev_da_gas_used_shift, e execution_prev_l2_gas_used_shift, e execution_prev_note_hash_tree_root_shift, e execution_prev_note_hash_tree_size_shift, e execution_prev_nullifier_tree_root_shift, e execution_prev_nullifier_tree_size_shift, e execution_prev_num_l2_to_l1_messages_shift, e execution_prev_num_note_hashes_emitted_shift, e execution_prev_num_nullifiers_emitted_shift, e execution_prev_num_public_log_fields_shift, e execution_prev_public_data_tree_root_shift, e execution_prev_public_data_tree_size_shift, e execution_prev_retrieved_bytecodes_tree_root_shift, e execution_prev_retrieved_bytecodes_tree_size_shift, e execution_prev_written_public_data_slots_tree_root_shift, e execution_prev_written_public_data_slots_tree_size_shift, e execution_sel_shift, e execution_sel_first_row_in_context_shift, e execution_transaction_fee_shift, e ff_gt_a_hi_shift, e ff_gt_a_lo_shift, e ff_gt_b_hi_shift, e ff_gt_b_lo_shift, e ff_gt_cmp_rng_ctr_shift, e ff_gt_p_sub_a_hi_shift, e ff_gt_p_sub_a_lo_shift, e ff_gt_p_sub_b_hi_shift, e ff_gt_p_sub_b_lo_shift, e ff_gt_sel_shift, e ff_gt_sel_dec_shift, e ff_gt_sel_gt_shift, e keccak_memory_addr_shift, e keccak_memory_clk_shift, e keccak_memory_ctr_shift, e keccak_memory_rw_shift, e keccak_memory_sel_shift, e keccak_memory_space_id_shift, e keccak_memory_start_read_shift, e keccak_memory_start_write_shift, e keccak_memory_tag_error_shift, e keccak_memory_val_0__shift, e keccak_memory_val_10__shift, e keccak_memory_val_11__shift, e keccak_memory_val_12__shift, e keccak_memory_val_13__shift, e keccak_memory_val_14__shift, e keccak_memory_val_15__shift, e keccak_memory_val_16__shift, e keccak_memory_val_17__shift, e keccak_memory_val_18__shift, e keccak_memory_val_19__shift, e keccak_memory_val_1__shift, e keccak_memory_val_20__shift, e keccak_memory_val_21__shift, e keccak_memory_val_22__shift, e keccak_memory_val_23__shift, e keccak_memory_val_2__shift, e keccak_memory_val_3__shift, e keccak_memory_val_4__shift, e keccak_memory_val_5__shift, e keccak_memory_val_6__shift, e keccak_memory_val_7__shift, e keccak_memory_val_8__shift, e keccak_memory_val_9__shift, e keccakf1600_clk_shift, e keccakf1600_dst_addr_shift, e keccakf1600_round_shift, e keccakf1600_sel_shift, e keccakf1600_sel_no_error_shift, e keccakf1600_space_id_shift, e keccakf1600_start_shift, e keccakf1600_state_in_00_shift, e keccakf1600_state_in_01_shift, e keccakf1600_state_in_02_shift, e keccakf1600_state_in_03_shift, e keccakf1600_state_in_04_shift, e keccakf1600_state_in_10_shift, e keccakf1600_state_in_11_shift, e keccakf1600_state_in_12_shift, e keccakf1600_state_in_13_shift, e keccakf1600_state_in_14_shift, e keccakf1600_state_in_20_shift, e keccakf1600_state_in_21_shift, e keccakf1600_state_in_22_shift, e keccakf1600_state_in_23_shift, e keccakf1600_state_in_24_shift, e keccakf1600_state_in_30_shift, e keccakf1600_state_in_31_shift, e keccakf1600_state_in_32_shift, e keccakf1600_state_in_33_shift, e keccakf1600_state_in_34_shift, e keccakf1600_state_in_40_shift, e keccakf1600_state_in_41_shift, e keccakf1600_state_in_42_shift, e keccakf1600_state_in_43_shift, e keccakf1600_state_in_44_shift, e memory_address_shift, e memory_clk_shift, e memory_rw_shift, e memory_sel_shift, e memory_space_id_shift, e memory_tag_shift, e memory_value_shift, e merkle_check_index_shift, e merkle_check_merkle_hash_separator_shift, e merkle_check_path_len_shift, e merkle_check_read_node_shift, e merkle_check_read_root_shift, e merkle_check_sel_shift, e merkle_check_start_shift, e merkle_check_write_shift, e merkle_check_write_node_shift, e merkle_check_write_root_shift, e poseidon2_hash_a_0_shift, e poseidon2_hash_a_1_shift, e poseidon2_hash_a_2_shift, e poseidon2_hash_a_3_shift, e poseidon2_hash_input_0_shift, e poseidon2_hash_input_1_shift, e poseidon2_hash_input_2_shift, e poseidon2_hash_num_perm_rounds_rem_shift, e poseidon2_hash_output_shift, e poseidon2_hash_sel_shift, e poseidon2_hash_start_shift, e public_data_check_clk_shift, e public_data_check_sel_shift, e public_data_check_write_idx_shift, e public_data_squash_clk_shift, e public_data_squash_final_value_shift, e public_data_squash_leaf_slot_shift, e public_data_squash_sel_shift, e public_data_squash_write_to_public_inputs_shift, e scalar_mul_bit_idx_shift, e scalar_mul_point_inf_shift, e scalar_mul_point_x_shift, e scalar_mul_point_y_shift, e scalar_mul_res_inf_shift, e scalar_mul_res_x_shift, e scalar_mul_res_y_shift, e scalar_mul_scalar_shift, e scalar_mul_sel_shift, e scalar_mul_start_shift, e scalar_mul_temp_inf_shift, e scalar_mul_temp_x_shift, e scalar_mul_temp_y_shift, e sha256_a_shift, e sha256_b_shift, e sha256_c_shift, e sha256_d_shift, e sha256_e_shift, e sha256_execution_clk_shift, e sha256_f_shift, e sha256_g_shift, e sha256_h_shift, e sha256_helper_w0_shift, e sha256_helper_w1_shift, e sha256_helper_w10_shift, e sha256_helper_w11_shift, e sha256_helper_w12_shift, e sha256_helper_w13_shift, e sha256_helper_w14_shift, e sha256_helper_w15_shift, e sha256_helper_w2_shift, e sha256_helper_w3_shift, e sha256_helper_w4_shift, e sha256_helper_w5_shift, e sha256_helper_w6_shift, e sha256_helper_w7_shift, e sha256_helper_w8_shift, e sha256_helper_w9_shift, e sha256_init_a_shift, e sha256_init_b_shift, e sha256_init_c_shift, e sha256_init_d_shift, e sha256_init_e_shift, e sha256_init_f_shift, e sha256_init_g_shift, e sha256_init_h_shift, e sha256_input_addr_shift, e sha256_input_rounds_rem_shift, e sha256_output_addr_shift, e sha256_rounds_remaining_shift, e sha256_sel_shift, e sha256_sel_invalid_input_tag_err_shift, e sha256_space_id_shift, e sha256_start_shift, e to_radix_acc_shift, e to_radix_acc_under_p_shift, e to_radix_limb_shift, e to_radix_limb_eq_p_shift, e to_radix_limb_index_shift, e to_radix_limb_lt_p_shift, e to_radix_mem_dst_addr_shift, e to_radix_mem_execution_clk_shift, e to_radix_mem_is_output_bits_shift, e to_radix_mem_num_limbs_shift, e to_radix_mem_radix_shift, e to_radix_mem_sel_shift, e to_radix_mem_sel_should_decompose_shift, e to_radix_mem_sel_should_write_mem_shift, e to_radix_mem_space_id_shift, e to_radix_mem_start_shift, e to_radix_mem_value_to_decompose_shift, e to_radix_not_padding_limb_shift, e to_radix_power_shift, e to_radix_radix_shift, e to_radix_safe_limbs_shift, e to_radix_sel_shift, e to_radix_start_shift, e to_radix_value_shift, e tx_da_gas_limit_shift, e tx_discard_shift, e tx_fee_shift, e tx_is_revertible_shift, e tx_is_teardown_shift, e tx_l1_l2_tree_root_shift, e tx_l1_l2_tree_size_shift, e tx_l2_gas_limit_shift, e tx_next_context_id_shift, e tx_phase_value_shift, e tx_prev_da_gas_used_shift, e tx_prev_l2_gas_used_shift, e tx_prev_note_hash_tree_root_shift, e tx_prev_note_hash_tree_size_shift, e tx_prev_nullifier_tree_root_shift, e tx_prev_nullifier_tree_size_shift, e tx_prev_num_l2_to_l1_messages_shift, e tx_prev_num_note_hashes_emitted_shift, e tx_prev_num_nullifiers_emitted_shift, e tx_prev_num_public_log_fields_shift, e tx_prev_public_data_tree_root_shift, e tx_prev_public_data_tree_size_shift, e tx_prev_retrieved_bytecodes_tree_root_shift, e tx_prev_retrieved_bytecodes_tree_size_shift, e tx_prev_written_public_data_slots_tree_root_shift, e tx_prev_written_public_data_slots_tree_size_shift, e tx_read_pi_offset_shift, e tx_remaining_phase_counter_shift, e tx_reverted_shift, e tx_sel_shift, e tx_start_phase_shift, e tx_start_tx_shift, e tx_tx_reverted_shift #define AVM2_TO_BE_SHIFTED_E(e) e bc_decomposition_bytes, e bc_decomposition_bytes_pc_plus_1, e bc_decomposition_bytes_pc_plus_10, e bc_decomposition_bytes_pc_plus_11, e bc_decomposition_bytes_pc_plus_12, e bc_decomposition_bytes_pc_plus_13, e bc_decomposition_bytes_pc_plus_14, e bc_decomposition_bytes_pc_plus_15, e bc_decomposition_bytes_pc_plus_16, e bc_decomposition_bytes_pc_plus_17, e bc_decomposition_bytes_pc_plus_18, e bc_decomposition_bytes_pc_plus_19, e bc_decomposition_bytes_pc_plus_2, e bc_decomposition_bytes_pc_plus_20, e bc_decomposition_bytes_pc_plus_21, e bc_decomposition_bytes_pc_plus_22, e bc_decomposition_bytes_pc_plus_23, e bc_decomposition_bytes_pc_plus_24, e bc_decomposition_bytes_pc_plus_25, e bc_decomposition_bytes_pc_plus_26, e bc_decomposition_bytes_pc_plus_27, e bc_decomposition_bytes_pc_plus_28, e bc_decomposition_bytes_pc_plus_29, e bc_decomposition_bytes_pc_plus_3, e bc_decomposition_bytes_pc_plus_30, e bc_decomposition_bytes_pc_plus_31, e bc_decomposition_bytes_pc_plus_32, e bc_decomposition_bytes_pc_plus_33, e bc_decomposition_bytes_pc_plus_34, e bc_decomposition_bytes_pc_plus_35, e bc_decomposition_bytes_pc_plus_4, e bc_decomposition_bytes_pc_plus_5, e bc_decomposition_bytes_pc_plus_6, e bc_decomposition_bytes_pc_plus_7, e bc_decomposition_bytes_pc_plus_8, e bc_decomposition_bytes_pc_plus_9, e bc_decomposition_bytes_remaining, e bc_decomposition_id, e bc_decomposition_next_packed_pc, e bc_decomposition_pc, e bc_decomposition_sel, e bc_decomposition_sel_windows_gt_remaining, e bc_decomposition_start, e bc_hashing_bytecode_id, e bc_hashing_padding, e bc_hashing_pc_index_1, e bc_hashing_rounds_rem, e bc_hashing_sel, e bc_hashing_sel_not_start, e bc_hashing_start, e bitwise_acc_ia, e bitwise_acc_ib, e bitwise_acc_ic, e bitwise_ctr, e bitwise_op_id, e bitwise_sel, e bitwise_start, e calldata_context_id, e calldata_hashing_calldata_size, e calldata_hashing_context_id, e calldata_hashing_index_0_, e calldata_hashing_output_hash, e calldata_hashing_rounds_rem, e calldata_hashing_sel, e calldata_hashing_start, e calldata_index, e calldata_sel, e data_copy_clk, e data_copy_copy_size, e data_copy_dst_addr, e data_copy_dst_context_id, e data_copy_padding, e data_copy_read_addr, e data_copy_reads_left, e data_copy_sel, e data_copy_sel_cd_copy, e data_copy_src_context_id, e data_copy_start, e emit_public_log_contract_address, e emit_public_log_correct_tag, e emit_public_log_error_out_of_bounds, e emit_public_log_error_tag_mismatch, e emit_public_log_execution_clk, e emit_public_log_is_write_contract_address, e emit_public_log_is_write_memory_value, e emit_public_log_log_address, e emit_public_log_public_inputs_index, e emit_public_log_remaining_rows, e emit_public_log_seen_wrong_tag, e emit_public_log_sel, e emit_public_log_sel_write_to_public_inputs, e emit_public_log_space_id, e emit_public_log_start, e execution_bytecode_id, e execution_clk, e execution_context_id, e execution_contract_address, e execution_da_gas_limit, e execution_discard, e execution_dying_context_id, e execution_enqueued_call_start, e execution_internal_call_id, e execution_internal_call_return_id, e execution_is_static, e execution_l1_l2_tree_root, e execution_l2_gas_limit, e execution_last_child_id, e execution_last_child_returndata_addr, e execution_last_child_returndata_size, e execution_last_child_success, e execution_msg_sender, e execution_next_context_id, e execution_next_internal_call_id, e execution_parent_calldata_addr, e execution_parent_calldata_size, e execution_parent_da_gas_limit, e execution_parent_da_gas_used, e execution_parent_id, e execution_parent_l2_gas_limit, e execution_parent_l2_gas_used, e execution_pc, e execution_prev_da_gas_used, e execution_prev_l2_gas_used, e execution_prev_note_hash_tree_root, e execution_prev_note_hash_tree_size, e execution_prev_nullifier_tree_root, e execution_prev_nullifier_tree_size, e execution_prev_num_l2_to_l1_messages, e execution_prev_num_note_hashes_emitted, e execution_prev_num_nullifiers_emitted, e execution_prev_num_public_log_fields, e execution_prev_public_data_tree_root, e execution_prev_public_data_tree_size, e execution_prev_retrieved_bytecodes_tree_root, e execution_prev_retrieved_bytecodes_tree_size, e execution_prev_written_public_data_slots_tree_root, e execution_prev_written_public_data_slots_tree_size, e execution_sel, e execution_sel_first_row_in_context, e execution_transaction_fee, e ff_gt_a_hi, e ff_gt_a_lo, e ff_gt_b_hi, e ff_gt_b_lo, e ff_gt_cmp_rng_ctr, e ff_gt_p_sub_a_hi, e ff_gt_p_sub_a_lo, e ff_gt_p_sub_b_hi, e ff_gt_p_sub_b_lo, e ff_gt_sel, e ff_gt_sel_dec, e ff_gt_sel_gt, e keccak_memory_addr, e keccak_memory_clk, e keccak_memory_ctr, e keccak_memory_rw, e keccak_memory_sel, e keccak_memory_space_id, e keccak_memory_start_read, e keccak_memory_start_write, e keccak_memory_tag_error, e keccak_memory_val_0_, e keccak_memory_val_10_, e keccak_memory_val_11_, e keccak_memory_val_12_, e keccak_memory_val_13_, e keccak_memory_val_14_, e keccak_memory_val_15_, e keccak_memory_val_16_, e keccak_memory_val_17_, e keccak_memory_val_18_, e keccak_memory_val_19_, e keccak_memory_val_1_, e keccak_memory_val_20_, e keccak_memory_val_21_, e keccak_memory_val_22_, e keccak_memory_val_23_, e keccak_memory_val_2_, e keccak_memory_val_3_, e keccak_memory_val_4_, e keccak_memory_val_5_, e keccak_memory_val_6_, e keccak_memory_val_7_, e keccak_memory_val_8_, e keccak_memory_val_9_, e keccakf1600_clk, e keccakf1600_dst_addr, e keccakf1600_round, e keccakf1600_sel, e keccakf1600_sel_no_error, e keccakf1600_space_id, e keccakf1600_start, e keccakf1600_state_in_00, e keccakf1600_state_in_01, e keccakf1600_state_in_02, e keccakf1600_state_in_03, e keccakf1600_state_in_04, e keccakf1600_state_in_10, e keccakf1600_state_in_11, e keccakf1600_state_in_12, e keccakf1600_state_in_13, e keccakf1600_state_in_14, e keccakf1600_state_in_20, e keccakf1600_state_in_21, e keccakf1600_state_in_22, e keccakf1600_state_in_23, e keccakf1600_state_in_24, e keccakf1600_state_in_30, e keccakf1600_state_in_31, e keccakf1600_state_in_32, e keccakf1600_state_in_33, e keccakf1600_state_in_34, e keccakf1600_state_in_40, e keccakf1600_state_in_41, e keccakf1600_state_in_42, e keccakf1600_state_in_43, e keccakf1600_state_in_44, e memory_address, e memory_clk, e memory_rw, e memory_sel, e memory_space_id, e memory_tag, e memory_value, e merkle_check_index, e merkle_check_merkle_hash_separator, e merkle_check_path_len, e merkle_check_read_node, e merkle_check_read_root, e merkle_check_sel, e merkle_check_start, e merkle_check_write, e merkle_check_write_node, e merkle_check_write_root, e poseidon2_hash_a_0, e poseidon2_hash_a_1, e poseidon2_hash_a_2, e poseidon2_hash_a_3, e poseidon2_hash_input_0, e poseidon2_hash_input_1, e poseidon2_hash_input_2, e poseidon2_hash_num_perm_rounds_rem, e poseidon2_hash_output, e poseidon2_hash_sel, e poseidon2_hash_start, e public_data_check_clk, e public_data_check_sel, e public_data_check_write_idx, e public_data_squash_clk, e public_data_squash_final_value, e public_data_squash_leaf_slot, e public_data_squash_sel, e public_data_squash_write_to_public_inputs, e scalar_mul_bit_idx, e scalar_mul_point_inf, e scalar_mul_point_x, e scalar_mul_point_y, e scalar_mul_res_inf, e scalar_mul_res_x, e scalar_mul_res_y, e scalar_mul_scalar, e scalar_mul_sel, e scalar_mul_start, e scalar_mul_temp_inf, e scalar_mul_temp_x, e scalar_mul_temp_y, e sha256_a, e sha256_b, e sha256_c, e sha256_d, e sha256_e, e sha256_execution_clk, e sha256_f, e sha256_g, e sha256_h, e sha256_helper_w0, e sha256_helper_w1, e sha256_helper_w10, e sha256_helper_w11, e sha256_helper_w12, e sha256_helper_w13, e sha256_helper_w14, e sha256_helper_w15, e sha256_helper_w2, e sha256_helper_w3, e sha256_helper_w4, e sha256_helper_w5, e sha256_helper_w6, e sha256_helper_w7, e sha256_helper_w8, e sha256_helper_w9, e sha256_init_a, e sha256_init_b, e sha256_init_c, e sha256_init_d, e sha256_init_e, e sha256_init_f, e sha256_init_g, e sha256_init_h, e sha256_input_addr, e sha256_input_rounds_rem, e sha256_output_addr, e sha256_rounds_remaining, e sha256_sel, e sha256_sel_invalid_input_tag_err, e sha256_space_id, e sha256_start, e to_radix_acc, e to_radix_acc_under_p, e to_radix_limb, e to_radix_limb_eq_p, e to_radix_limb_index, e to_radix_limb_lt_p, e to_radix_mem_dst_addr, e to_radix_mem_execution_clk, e to_radix_mem_is_output_bits, e to_radix_mem_num_limbs, e to_radix_mem_radix, e to_radix_mem_sel, e to_radix_mem_sel_should_decompose, e to_radix_mem_sel_should_write_mem, e to_radix_mem_space_id, e to_radix_mem_start, e to_radix_mem_value_to_decompose, e to_radix_not_padding_limb, e to_radix_power, e to_radix_radix, e to_radix_safe_limbs, e to_radix_sel, e to_radix_start, e to_radix_value, e tx_da_gas_limit, e tx_discard, e tx_fee, e tx_is_revertible, e tx_is_teardown, e tx_l1_l2_tree_root, e tx_l1_l2_tree_size, e tx_l2_gas_limit, e tx_next_context_id, e tx_phase_value, e tx_prev_da_gas_used, e tx_prev_l2_gas_used, e tx_prev_note_hash_tree_root, e tx_prev_note_hash_tree_size, e tx_prev_nullifier_tree_root, e tx_prev_nullifier_tree_size, e tx_prev_num_l2_to_l1_messages, e tx_prev_num_note_hashes_emitted, e tx_prev_num_nullifiers_emitted, e tx_prev_num_public_log_fields, e tx_prev_public_data_tree_root, e tx_prev_public_data_tree_size, e tx_prev_retrieved_bytecodes_tree_root, e tx_prev_retrieved_bytecodes_tree_size, e tx_prev_written_public_data_slots_tree_root, e tx_prev_written_public_data_slots_tree_size, e tx_read_pi_offset, e tx_remaining_phase_counter, e tx_reverted, e tx_sel, e tx_start_phase, e tx_start_tx, e tx_tx_reverted @@ -36,16 +36,16 @@ enum class ColumnAndShifts { SENTINEL_DO_NOT_USE, }; -constexpr auto NUM_COLUMNS_WITH_SHIFTS = 3444; -constexpr auto NUM_COLUMNS_WITHOUT_SHIFTS = 3080; -constexpr auto NUM_PRECOMPUTED_ENTITIES = 122; -constexpr auto NUM_WIRE_ENTITIES = 2514; +constexpr auto NUM_COLUMNS_WITH_SHIFTS = 3438; +constexpr auto NUM_COLUMNS_WITHOUT_SHIFTS = 3074; +constexpr auto NUM_PRECOMPUTED_ENTITIES = 119; +constexpr auto NUM_WIRE_ENTITIES = 2511; constexpr auto NUM_DERIVED_ENTITIES = 444; constexpr auto NUM_WITNESS_ENTITIES = NUM_WIRE_ENTITIES + NUM_DERIVED_ENTITIES; constexpr auto NUM_WIRES_TO_BE_SHIFTED = 364; constexpr auto NUM_SHIFTED_ENTITIES = 364; constexpr auto NUM_UNSHIFTED_ENTITIES = NUM_COLUMNS_WITHOUT_SHIFTS; -constexpr auto NUM_ALL_ENTITIES = 3444; +constexpr auto NUM_ALL_ENTITIES = 3438; /* * Layout for all entities is: diff --git a/barretenberg/cpp/src/barretenberg/vm2/generated/flavor_variables.hpp b/barretenberg/cpp/src/barretenberg/vm2/generated/flavor_variables.hpp index 6f013f072dab..22c2e6bc3473 100644 --- a/barretenberg/cpp/src/barretenberg/vm2/generated/flavor_variables.hpp +++ b/barretenberg/cpp/src/barretenberg/vm2/generated/flavor_variables.hpp @@ -140,11 +140,11 @@ namespace bb::avm2 { struct AvmFlavorVariables { - static constexpr size_t NUM_PRECOMPUTED_ENTITIES = 122; - static constexpr size_t NUM_WITNESS_ENTITIES = 2958; + static constexpr size_t NUM_PRECOMPUTED_ENTITIES = 119; + static constexpr size_t NUM_WITNESS_ENTITIES = 2955; static constexpr size_t NUM_SHIFTED_ENTITIES = 364; - static constexpr size_t NUM_WIRES = 2514; - static constexpr size_t NUM_ALL_ENTITIES = 3444; + static constexpr size_t NUM_WIRES = 2511; + static constexpr size_t NUM_ALL_ENTITIES = 3438; // Need to be templated for recursive verifier template diff --git a/barretenberg/cpp/src/barretenberg/vm2/generated/relations/lookups_tx.hpp b/barretenberg/cpp/src/barretenberg/vm2/generated/relations/lookups_tx.hpp index 816e1ed908f6..8e91d55c8b49 100644 --- a/barretenberg/cpp/src/barretenberg/vm2/generated/relations/lookups_tx.hpp +++ b/barretenberg/cpp/src/barretenberg/vm2/generated/relations/lookups_tx.hpp @@ -16,27 +16,18 @@ namespace bb::avm2 { struct lookup_tx_read_phase_spec_settings_ { static constexpr std::string_view NAME = "LOOKUP_TX_READ_PHASE_SPEC"; static constexpr std::string_view RELATION_NAME = "tx"; - static constexpr size_t LOOKUP_TUPLE_SIZE = 16; + static constexpr size_t LOOKUP_TUPLE_SIZE = 13; static constexpr Column SRC_SELECTOR = Column::tx_sel; static constexpr Column DST_SELECTOR = Column::precomputed_sel_phase; static constexpr Column COUNTS = Column::lookup_tx_read_phase_spec_counts; static constexpr Column INVERSES = Column::lookup_tx_read_phase_spec_inv; static constexpr std::array SRC_COLUMNS = { - ColumnAndShifts::tx_phase_value, - ColumnAndShifts::tx_is_public_call_request, - ColumnAndShifts::tx_is_teardown, - ColumnAndShifts::tx_is_collect_fee, - ColumnAndShifts::tx_is_tree_padding, - ColumnAndShifts::tx_is_cleanup, - ColumnAndShifts::tx_is_revertible, - ColumnAndShifts::tx_read_pi_start_offset, - ColumnAndShifts::tx_read_pi_length_offset, - ColumnAndShifts::tx_sel_non_revertible_append_note_hash, - ColumnAndShifts::tx_sel_non_revertible_append_nullifier, - ColumnAndShifts::tx_sel_non_revertible_append_l2_l1_msg, - ColumnAndShifts::tx_sel_revertible_append_note_hash, - ColumnAndShifts::tx_sel_revertible_append_nullifier, - ColumnAndShifts::tx_sel_revertible_append_l2_l1_msg, + ColumnAndShifts::tx_phase_value, ColumnAndShifts::tx_is_public_call_request, + ColumnAndShifts::tx_is_teardown, ColumnAndShifts::tx_is_collect_fee, + ColumnAndShifts::tx_is_tree_padding, ColumnAndShifts::tx_is_cleanup, + ColumnAndShifts::tx_is_revertible, ColumnAndShifts::tx_read_pi_start_offset, + ColumnAndShifts::tx_read_pi_length_offset, ColumnAndShifts::tx_sel_append_note_hash, + ColumnAndShifts::tx_sel_append_nullifier, ColumnAndShifts::tx_sel_append_l2_l1_msg, ColumnAndShifts::tx_next_phase_on_revert }; static constexpr std::array DST_COLUMNS = { @@ -49,12 +40,9 @@ struct lookup_tx_read_phase_spec_settings_ { ColumnAndShifts::precomputed_is_revertible, ColumnAndShifts::precomputed_read_pi_start_offset, ColumnAndShifts::precomputed_read_pi_length_offset, - ColumnAndShifts::precomputed_sel_non_revertible_append_note_hash, - ColumnAndShifts::precomputed_sel_non_revertible_append_nullifier, - ColumnAndShifts::precomputed_sel_non_revertible_append_l2_l1_msg, - ColumnAndShifts::precomputed_sel_revertible_append_note_hash, - ColumnAndShifts::precomputed_sel_revertible_append_nullifier, - ColumnAndShifts::precomputed_sel_revertible_append_l2_l1_msg, + ColumnAndShifts::precomputed_sel_append_note_hash, + ColumnAndShifts::precomputed_sel_append_nullifier, + ColumnAndShifts::precomputed_sel_append_l2_l1_msg, ColumnAndShifts::precomputed_next_phase_on_revert }; }; @@ -151,7 +139,7 @@ struct lookup_tx_note_hash_append_settings_ { ColumnAndShifts::tx_prev_note_hash_tree_size, ColumnAndShifts::tx_prev_note_hash_tree_root, ColumnAndShifts::precomputed_zero, - ColumnAndShifts::tx_sel_revertible_append_note_hash, + ColumnAndShifts::tx_is_revertible, ColumnAndShifts::tx_prev_num_note_hashes_emitted, ColumnAndShifts::tx_discard, ColumnAndShifts::tx_next_note_hash_tree_root diff --git a/barretenberg/cpp/src/barretenberg/vm2/generated/relations/tx_context_impl.hpp b/barretenberg/cpp/src/barretenberg/vm2/generated/relations/tx_context_impl.hpp index e4163fab6215..528590e6c894 100644 --- a/barretenberg/cpp/src/barretenberg/vm2/generated/relations/tx_context_impl.hpp +++ b/barretenberg/cpp/src/barretenberg/vm2/generated/relations/tx_context_impl.hpp @@ -40,18 +40,12 @@ void tx_contextImpl::accumulate(ContainerOverSubrelations& evals, FF(uint256_t{ 1521641569468562450UL, 665739211013355724UL, 15332520522532078145UL, 1150206617693738821UL }); const auto constants_AVM_RETRIEVED_BYTECODES_TREE_INITIAL_SIZE = FF(1); const auto tx_NOT_LAST_ROW = in.get(C::tx_sel) * in.get(C::tx_sel_shift); - const auto tx_SEL_CAN_EMIT_NOTE_HASH = in.get(C::tx_is_public_call_request) + - in.get(C::tx_sel_non_revertible_append_note_hash) + - in.get(C::tx_sel_revertible_append_note_hash); - const auto tx_SEL_CAN_EMIT_NULLIFIER = in.get(C::tx_is_public_call_request) + - in.get(C::tx_sel_non_revertible_append_nullifier) + - in.get(C::tx_sel_revertible_append_nullifier); + const auto tx_SEL_CAN_EMIT_NOTE_HASH = in.get(C::tx_is_public_call_request) + in.get(C::tx_sel_append_note_hash); + const auto tx_SEL_CAN_EMIT_NULLIFIER = in.get(C::tx_is_public_call_request) + in.get(C::tx_sel_append_nullifier); const auto tx_SEL_CAN_WRITE_PUBLIC_DATA = in.get(C::tx_is_public_call_request) + in.get(C::tx_is_collect_fee); const auto tx_SEL_CAN_WRITE_WRITTEN_PUBLIC_DATA_SLOTS = in.get(C::tx_is_public_call_request); const auto tx_SEL_CAN_EMIT_PUBLIC_LOG = in.get(C::tx_is_public_call_request); - const auto tx_SEL_CAN_EMIT_L2_L1_MSG = in.get(C::tx_is_public_call_request) + - in.get(C::tx_sel_non_revertible_append_l2_l1_msg) + - in.get(C::tx_sel_revertible_append_l2_l1_msg); + const auto tx_SEL_CAN_EMIT_L2_L1_MSG = in.get(C::tx_is_public_call_request) + in.get(C::tx_sel_append_l2_l1_msg); { using View = typename std::tuple_element_t<0, ContainerOverSubrelations>::View; diff --git a/barretenberg/cpp/src/barretenberg/vm2/generated/relations/tx_impl.hpp b/barretenberg/cpp/src/barretenberg/vm2/generated/relations/tx_impl.hpp index f85ba389f749..5a6610f87be7 100644 --- a/barretenberg/cpp/src/barretenberg/vm2/generated/relations/tx_impl.hpp +++ b/barretenberg/cpp/src/barretenberg/vm2/generated/relations/tx_impl.hpp @@ -241,18 +241,15 @@ void txImpl::accumulate(ContainerOverSubrelations& evals, { using View = typename std::tuple_element_t<29, ContainerOverSubrelations>::View; auto tmp = (static_cast(in.get(C::tx_is_tree_insert_phase)) - - (static_cast(in.get(C::tx_sel_revertible_append_note_hash)) + - static_cast(in.get(C::tx_sel_non_revertible_append_note_hash)) + - static_cast(in.get(C::tx_sel_revertible_append_nullifier)) + - static_cast(in.get(C::tx_sel_non_revertible_append_nullifier)))); + (static_cast(in.get(C::tx_sel_append_note_hash)) + + static_cast(in.get(C::tx_sel_append_nullifier)))); std::get<29>(evals) += (tmp * scaling_factor); } { using View = typename std::tuple_element_t<30, ContainerOverSubrelations>::View; auto tmp = (static_cast(in.get(C::tx_sel_try_note_hash_append)) - (static_cast(in.get(C::tx_sel)) - static_cast(in.get(C::tx_is_padded))) * - (static_cast(in.get(C::tx_sel_revertible_append_note_hash)) + - static_cast(in.get(C::tx_sel_non_revertible_append_note_hash)))); + static_cast(in.get(C::tx_sel_append_note_hash))); std::get<30>(evals) += (tmp * scaling_factor); } { // MAX_NOTE_HASH_WRITES_REACHED @@ -291,8 +288,7 @@ void txImpl::accumulate(ContainerOverSubrelations& evals, using View = typename std::tuple_element_t<35, ContainerOverSubrelations>::View; auto tmp = (static_cast(in.get(C::tx_sel_try_nullifier_append)) - (static_cast(in.get(C::tx_sel)) - static_cast(in.get(C::tx_is_padded))) * - (static_cast(in.get(C::tx_sel_revertible_append_nullifier)) + - static_cast(in.get(C::tx_sel_non_revertible_append_nullifier)))); + static_cast(in.get(C::tx_sel_append_nullifier))); std::get<35>(evals) += (tmp * scaling_factor); } { @@ -365,8 +361,7 @@ void txImpl::accumulate(ContainerOverSubrelations& evals, using View = typename std::tuple_element_t<45, ContainerOverSubrelations>::View; auto tmp = (static_cast(in.get(C::tx_sel_try_l2_l1_msg_append)) - (static_cast(in.get(C::tx_sel)) - static_cast(in.get(C::tx_is_padded))) * - (static_cast(in.get(C::tx_sel_revertible_append_l2_l1_msg)) + - static_cast(in.get(C::tx_sel_non_revertible_append_l2_l1_msg)))); + static_cast(in.get(C::tx_sel_append_l2_l1_msg))); std::get<45>(evals) += (tmp * scaling_factor); } { // MAX_L2_L1_MSG_WRITES_REACHED diff --git a/barretenberg/cpp/src/barretenberg/vm2/simulation/events/bytecode_events.hpp b/barretenberg/cpp/src/barretenberg/vm2/simulation/events/bytecode_events.hpp index c66ad74bf9fd..14e940e371cf 100644 --- a/barretenberg/cpp/src/barretenberg/vm2/simulation/events/bytecode_events.hpp +++ b/barretenberg/cpp/src/barretenberg/vm2/simulation/events/bytecode_events.hpp @@ -35,7 +35,7 @@ struct BytecodeRetrievalEvent { AztecAddress address = 0; ContractClassId current_class_id = 0; ContractClass contract_class{}; - FF nullifier_root = 0; + FF nullifier_tree_root = 0; FF public_data_tree_root = 0; AppendOnlyTreeSnapshot retrieved_bytecodes_snapshot_before; AppendOnlyTreeSnapshot retrieved_bytecodes_snapshot_after; diff --git a/barretenberg/cpp/src/barretenberg/vm2/simulation/gadgets/bytecode_manager.cpp b/barretenberg/cpp/src/barretenberg/vm2/simulation/gadgets/bytecode_manager.cpp index 49ac03f86e85..dc2ba0f4ea73 100644 --- a/barretenberg/cpp/src/barretenberg/vm2/simulation/gadgets/bytecode_manager.cpp +++ b/barretenberg/cpp/src/barretenberg/vm2/simulation/gadgets/bytecode_manager.cpp @@ -55,7 +55,7 @@ BytecodeId TxBytecodeManager::get_bytecode(const AztecAddress& address) .bytecode_id = FF(0), // Use default ID for error cases .address = address, .current_class_id = FF(0), // Use default ID for error cases - .nullifier_root = tree_states.nullifier_tree.tree.root, + .nullifier_tree_root = tree_states.nullifier_tree.tree.root, .public_data_tree_root = tree_states.public_data_tree.tree.root, .retrieved_bytecodes_snapshot_before = before_snapshot, .retrieved_bytecodes_snapshot_after = before_snapshot, @@ -79,7 +79,7 @@ BytecodeId TxBytecodeManager::get_bytecode(const AztecAddress& address) .bytecode_id = FF(0), // Use default ID for error cases .address = address, .current_class_id = current_class_id, - .nullifier_root = tree_states.nullifier_tree.tree.root, + .nullifier_tree_root = tree_states.nullifier_tree.tree.root, .public_data_tree_root = tree_states.public_data_tree.tree.root, .retrieved_bytecodes_snapshot_before = before_snapshot, .retrieved_bytecodes_snapshot_after = before_snapshot, @@ -119,7 +119,7 @@ BytecodeId TxBytecodeManager::get_bytecode(const AztecAddress& address) .address = address, .current_class_id = current_class_id, .contract_class = klass, - .nullifier_root = tree_states.nullifier_tree.tree.root, + .nullifier_tree_root = tree_states.nullifier_tree.tree.root, .public_data_tree_root = tree_states.public_data_tree.tree.root, .retrieved_bytecodes_snapshot_before = before_snapshot, .retrieved_bytecodes_snapshot_after = snapshot_after, diff --git a/barretenberg/cpp/src/barretenberg/vm2/tooling/stats.cpp b/barretenberg/cpp/src/barretenberg/vm2/tooling/stats.cpp index f0de3fc57af9..7f4113e93c77 100644 --- a/barretenberg/cpp/src/barretenberg/vm2/tooling/stats.cpp +++ b/barretenberg/cpp/src/barretenberg/vm2/tooling/stats.cpp @@ -52,4 +52,10 @@ std::string Stats::to_string(int depth) const return joined; } +std::vector> Stats::snapshot() const +{ + std::lock_guard lock(stats_mutex); + return { stats.begin(), stats.end() }; +} + } // namespace bb::avm2 \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/vm2/tooling/stats.hpp b/barretenberg/cpp/src/barretenberg/vm2/tooling/stats.hpp index 1ea55eec0397..5f183814a3f9 100644 --- a/barretenberg/cpp/src/barretenberg/vm2/tooling/stats.hpp +++ b/barretenberg/cpp/src/barretenberg/vm2/tooling/stats.hpp @@ -5,6 +5,8 @@ #include #include #include +#include +#include // To enable stats tracking, compile in RelWithAssert mode. // cmake --preset $PRESET -DCMAKE_BUILD_TYPE=RelWithAssert @@ -46,6 +48,9 @@ class Stats { // prove/logderiv/relation_ms will not be shown. std::string to_string(int depth = 2) const; + // Returns a copy of the stats as (key, value_ms) pairs. Keys retain the "_ms" suffix added by time(). + std::vector> snapshot() const; + private: Stats() = default; diff --git a/barretenberg/cpp/src/barretenberg/vm2/tracegen/bytecode_trace.cpp b/barretenberg/cpp/src/barretenberg/vm2/tracegen/bytecode_trace.cpp index 3d556334c677..b97411c7f7a8 100644 --- a/barretenberg/cpp/src/barretenberg/vm2/tracegen/bytecode_trace.cpp +++ b/barretenberg/cpp/src/barretenberg/vm2/tracegen/bytecode_trace.cpp @@ -282,7 +282,7 @@ void BytecodeTraceBuilder::process_retrieval( // Tree context (for lookup into contract_instance_retrieval) { C::bc_retrieval_public_data_tree_root, event.public_data_tree_root }, - { C::bc_retrieval_nullifier_tree_root, event.nullifier_root }, + { C::bc_retrieval_nullifier_tree_root, event.nullifier_tree_root }, // Retrieved bytecodes tree context (for lookup into indexed_tree_check) { C::bc_retrieval_retrieved_bytecodes_tree_height, AVM_RETRIEVED_BYTECODES_TREE_HEIGHT }, diff --git a/barretenberg/cpp/src/barretenberg/vm2/tracegen/bytecode_trace.test.cpp b/barretenberg/cpp/src/barretenberg/vm2/tracegen/bytecode_trace.test.cpp index 39b6e19a128d..18a39f98773b 100644 --- a/barretenberg/cpp/src/barretenberg/vm2/tracegen/bytecode_trace.test.cpp +++ b/barretenberg/cpp/src/barretenberg/vm2/tracegen/bytecode_trace.test.cpp @@ -42,7 +42,7 @@ TEST(BytecodeTraceGenTest, BasicRetrieval) .address = 0xc0ffee, .current_class_id = 34, .contract_class = { .artifact_hash = 100, .private_functions_root = 200 }, - .nullifier_root = 300, + .nullifier_tree_root = 300, .public_data_tree_root = 400, .retrieved_bytecodes_snapshot_before = snapshot_before, .retrieved_bytecodes_snapshot_after = snapshot_after, @@ -89,7 +89,7 @@ TEST(BytecodeTraceGenTest, RetrievalExistingClass) .address = 0xc0ffee, .current_class_id = 34, .contract_class = { .artifact_hash = 100, .private_functions_root = 200 }, - .nullifier_root = 300, + .nullifier_tree_root = 300, .public_data_tree_root = 400, .retrieved_bytecodes_snapshot_before = snapshot, .retrieved_bytecodes_snapshot_after = snapshot, @@ -141,7 +141,7 @@ TEST(BytecodeTraceGenTest, MultipleRetrievalEvents) .address = 0xc0ffee, .current_class_id = 34, .contract_class = { .artifact_hash = 100, .private_functions_root = 200 }, - .nullifier_root = 300, + .nullifier_tree_root = 300, .public_data_tree_root = 400, .retrieved_bytecodes_snapshot_before = snapshot_before, .retrieved_bytecodes_snapshot_after = snapshot_after_0, @@ -152,7 +152,7 @@ TEST(BytecodeTraceGenTest, MultipleRetrievalEvents) .address = 0xdeadbeef, .current_class_id = 56, .contract_class = { .artifact_hash = 100, .private_functions_root = 200 }, - .nullifier_root = 300, + .nullifier_tree_root = 300, .public_data_tree_root = 400, .retrieved_bytecodes_snapshot_before = snapshot_after_0, .retrieved_bytecodes_snapshot_after = snapshot_after_1, @@ -163,7 +163,7 @@ TEST(BytecodeTraceGenTest, MultipleRetrievalEvents) .address = 0xdeadb33f, .current_class_id = 56, .contract_class = { .artifact_hash = 100, .private_functions_root = 200 }, - .nullifier_root = 300, + .nullifier_tree_root = 300, .public_data_tree_root = 400, .retrieved_bytecodes_snapshot_before = snapshot_after_1, .retrieved_bytecodes_snapshot_after = snapshot_after_1, @@ -256,7 +256,7 @@ TEST(BytecodeTraceGenTest, RetrievalInstanceNotFoundError) .address = 0xc0ffee, .current_class_id = 0, .contract_class = {}, - .nullifier_root = 300, + .nullifier_tree_root = 300, .public_data_tree_root = 400, .retrieved_bytecodes_snapshot_before = snapshot, .retrieved_bytecodes_snapshot_after = snapshot, @@ -306,7 +306,7 @@ TEST(BytecodeTraceGenTest, RetrievalLimitError) .address = 0xc0ffee, .current_class_id = 34, .contract_class = {}, - .nullifier_root = 300, + .nullifier_tree_root = 300, .public_data_tree_root = 400, .retrieved_bytecodes_snapshot_before = snapshot, .retrieved_bytecodes_snapshot_after = snapshot, diff --git a/barretenberg/cpp/src/barretenberg/vm2/tracegen/lib/phase_spec.cpp b/barretenberg/cpp/src/barretenberg/vm2/tracegen/lib/phase_spec.cpp index 637423b8db69..8a804ed18b89 100644 --- a/barretenberg/cpp/src/barretenberg/vm2/tracegen/lib/phase_spec.cpp +++ b/barretenberg/cpp/src/barretenberg/vm2/tracegen/lib/phase_spec.cpp @@ -21,21 +21,21 @@ const std::unordered_map& get_tx_phase_spec_map() .read_pi_start_offset = AVM_PUBLIC_INPUTS_PREVIOUS_NON_REVERTIBLE_ACCUMULATED_DATA_NULLIFIERS_ROW_IDX, .read_pi_length_offset = AVM_PUBLIC_INPUTS_PREVIOUS_NON_REVERTIBLE_ACCUMULATED_DATA_ARRAY_LENGTHS_NULLIFIERS_ROW_IDX, - .non_revertible_append_nullifier = true, + .append_nullifier = true, } }, { TransactionPhase::NR_NOTE_INSERTION, { .read_pi_start_offset = AVM_PUBLIC_INPUTS_PREVIOUS_NON_REVERTIBLE_ACCUMULATED_DATA_NOTE_HASHES_ROW_IDX, .read_pi_length_offset = AVM_PUBLIC_INPUTS_PREVIOUS_NON_REVERTIBLE_ACCUMULATED_DATA_ARRAY_LENGTHS_NOTE_HASHES_ROW_IDX, - .non_revertible_append_note_hash = true, + .append_note_hash = true, } }, { TransactionPhase::NR_L2_TO_L1_MESSAGE, { .read_pi_start_offset = AVM_PUBLIC_INPUTS_PREVIOUS_NON_REVERTIBLE_ACCUMULATED_DATA_L2_TO_L1_MSGS_ROW_IDX, .read_pi_length_offset = AVM_PUBLIC_INPUTS_PREVIOUS_NON_REVERTIBLE_ACCUMULATED_DATA_ARRAY_LENGTHS_L2_TO_L1_MSGS_ROW_IDX, - .non_revertible_append_l2_l1_msg = true, + .append_l2_l1_msg = true, } }, { TransactionPhase::SETUP, { @@ -49,7 +49,7 @@ const std::unordered_map& get_tx_phase_spec_map() .read_pi_start_offset = AVM_PUBLIC_INPUTS_PREVIOUS_REVERTIBLE_ACCUMULATED_DATA_NULLIFIERS_ROW_IDX, .read_pi_length_offset = AVM_PUBLIC_INPUTS_PREVIOUS_REVERTIBLE_ACCUMULATED_DATA_ARRAY_LENGTHS_NULLIFIERS_ROW_IDX, - .revertible_append_nullifier = true, + .append_nullifier = true, .next_phase_on_revert = static_cast(TransactionPhase::TEARDOWN), } }, { TransactionPhase::R_NOTE_INSERTION, @@ -58,7 +58,7 @@ const std::unordered_map& get_tx_phase_spec_map() .read_pi_start_offset = AVM_PUBLIC_INPUTS_PREVIOUS_REVERTIBLE_ACCUMULATED_DATA_NOTE_HASHES_ROW_IDX, .read_pi_length_offset = AVM_PUBLIC_INPUTS_PREVIOUS_REVERTIBLE_ACCUMULATED_DATA_ARRAY_LENGTHS_NOTE_HASHES_ROW_IDX, - .revertible_append_note_hash = true, + .append_note_hash = true, .next_phase_on_revert = static_cast(TransactionPhase::TEARDOWN), } }, { TransactionPhase::R_L2_TO_L1_MESSAGE, @@ -67,7 +67,7 @@ const std::unordered_map& get_tx_phase_spec_map() .read_pi_start_offset = AVM_PUBLIC_INPUTS_PREVIOUS_REVERTIBLE_ACCUMULATED_DATA_L2_TO_L1_MSGS_ROW_IDX, .read_pi_length_offset = AVM_PUBLIC_INPUTS_PREVIOUS_REVERTIBLE_ACCUMULATED_DATA_ARRAY_LENGTHS_L2_TO_L1_MSGS_ROW_IDX, - .revertible_append_l2_l1_msg = true, + .append_l2_l1_msg = true, .next_phase_on_revert = static_cast(TransactionPhase::TEARDOWN), } }, { TransactionPhase::APP_LOGIC, diff --git a/barretenberg/cpp/src/barretenberg/vm2/tracegen/lib/phase_spec.hpp b/barretenberg/cpp/src/barretenberg/vm2/tracegen/lib/phase_spec.hpp index bfb373e26f33..8f7649f5b270 100644 --- a/barretenberg/cpp/src/barretenberg/vm2/tracegen/lib/phase_spec.hpp +++ b/barretenberg/cpp/src/barretenberg/vm2/tracegen/lib/phase_spec.hpp @@ -17,12 +17,9 @@ struct TxPhaseSpec { bool is_revertible = false; uint32_t read_pi_start_offset = 0; uint32_t read_pi_length_offset = 0; - bool non_revertible_append_note_hash = false; - bool non_revertible_append_nullifier = false; - bool non_revertible_append_l2_l1_msg = false; - bool revertible_append_note_hash = false; - bool revertible_append_nullifier = false; - bool revertible_append_l2_l1_msg = false; + bool append_note_hash = false; + bool append_nullifier = false; + bool append_l2_l1_msg = false; uint8_t next_phase_on_revert = 0; }; diff --git a/barretenberg/cpp/src/barretenberg/vm2/tracegen/precomputed_trace.cpp b/barretenberg/cpp/src/barretenberg/vm2/tracegen/precomputed_trace.cpp index 2c0ab208227a..f39415ef39df 100644 --- a/barretenberg/cpp/src/barretenberg/vm2/tracegen/precomputed_trace.cpp +++ b/barretenberg/cpp/src/barretenberg/vm2/tracegen/precomputed_trace.cpp @@ -400,12 +400,9 @@ void PrecomputedTraceBuilder::process_phase_table(TraceContainer& trace) { C::precomputed_is_revertible, spec.is_revertible ? 1 : 0 }, { C::precomputed_read_pi_start_offset, spec.read_pi_start_offset }, { C::precomputed_read_pi_length_offset, spec.read_pi_length_offset }, - { C::precomputed_sel_non_revertible_append_note_hash, spec.non_revertible_append_note_hash ? 1 : 0 }, - { C::precomputed_sel_non_revertible_append_nullifier, spec.non_revertible_append_nullifier ? 1 : 0 }, - { C::precomputed_sel_non_revertible_append_l2_l1_msg, spec.non_revertible_append_l2_l1_msg ? 1 : 0 }, - { C::precomputed_sel_revertible_append_note_hash, spec.revertible_append_note_hash ? 1 : 0 }, - { C::precomputed_sel_revertible_append_nullifier, spec.revertible_append_nullifier ? 1 : 0 }, - { C::precomputed_sel_revertible_append_l2_l1_msg, spec.revertible_append_l2_l1_msg ? 1 : 0 }, + { C::precomputed_sel_append_note_hash, spec.append_note_hash ? 1 : 0 }, + { C::precomputed_sel_append_nullifier, spec.append_nullifier ? 1 : 0 }, + { C::precomputed_sel_append_l2_l1_msg, spec.append_l2_l1_msg ? 1 : 0 }, { C::precomputed_next_phase_on_revert, spec.next_phase_on_revert }, }; diff --git a/barretenberg/cpp/src/barretenberg/vm2/tracegen/trace_container.cpp b/barretenberg/cpp/src/barretenberg/vm2/tracegen/trace_container.cpp index 6726b1b68714..f8fd90b31979 100644 --- a/barretenberg/cpp/src/barretenberg/vm2/tracegen/trace_container.cpp +++ b/barretenberg/cpp/src/barretenberg/vm2/tracegen/trace_container.cpp @@ -132,7 +132,7 @@ void TraceContainer::clear_column(Column col) auto& column_data = (*trace)[static_cast(col)]; std::unique_lock lock(column_data.mutex); column_data.rows.clear(); - column_data.max_row_number = 0; + column_data.max_row_number = -1; column_data.row_number_dirty = false; } diff --git a/barretenberg/cpp/src/barretenberg/vm2/tracegen/tx_trace.cpp b/barretenberg/cpp/src/barretenberg/vm2/tracegen/tx_trace.cpp index c765ab5df280..c041cc37b6fd 100644 --- a/barretenberg/cpp/src/barretenberg/vm2/tracegen/tx_trace.cpp +++ b/barretenberg/cpp/src/barretenberg/vm2/tracegen/tx_trace.cpp @@ -79,30 +79,6 @@ bool is_revertible(TransactionPhase phase) return get_tx_phase_spec_map().at(phase).is_revertible; } -/** - * @brief Returns true if the given phase is a note hash insertion phase. - * - * @param phase - * @return true if the given phase is a note hash insert phase, false otherwise. - */ -bool is_note_hash_insert_phase(TransactionPhase phase) -{ - return get_tx_phase_spec_map().at(phase).non_revertible_append_note_hash || - get_tx_phase_spec_map().at(phase).revertible_append_note_hash; -} - -/** - * @brief Returns true if the given phase is a nullifier insertion phase. - * - * @param phase - * @return true if the given phase is a nullifier insertion phase, false otherwise. - */ -bool is_nullifier_insert_phase(TransactionPhase phase) -{ - return get_tx_phase_spec_map().at(phase).non_revertible_append_nullifier || - get_tx_phase_spec_map().at(phase).revertible_append_nullifier; -} - /** * @brief Returns true if the given phase is a one-shot phase, i.e., a phase with exactly one phase event/row. * One-shot phases are COLLECT_GAS_FEES, TREE_PADDING and CLEANUP. @@ -249,16 +225,13 @@ std::vector> handle_phase_spec(TransactionPhase phase) { C::tx_is_revertible, phase_spec.is_revertible ? 1 : 0 }, { C::tx_read_pi_start_offset, phase_spec.read_pi_start_offset }, { C::tx_read_pi_length_offset, phase_spec.read_pi_length_offset }, - { C::tx_sel_non_revertible_append_note_hash, phase_spec.non_revertible_append_note_hash ? 1 : 0 }, - { C::tx_sel_non_revertible_append_nullifier, phase_spec.non_revertible_append_nullifier ? 1 : 0 }, - { C::tx_sel_non_revertible_append_l2_l1_msg, phase_spec.non_revertible_append_l2_l1_msg ? 1 : 0 }, - { C::tx_sel_revertible_append_note_hash, phase_spec.revertible_append_note_hash ? 1 : 0 }, - { C::tx_sel_revertible_append_nullifier, phase_spec.revertible_append_nullifier ? 1 : 0 }, - { C::tx_sel_revertible_append_l2_l1_msg, phase_spec.revertible_append_l2_l1_msg ? 1 : 0 }, + { C::tx_sel_append_note_hash, phase_spec.append_note_hash ? 1 : 0 }, + { C::tx_sel_append_nullifier, phase_spec.append_nullifier ? 1 : 0 }, + { C::tx_sel_append_l2_l1_msg, phase_spec.append_l2_l1_msg ? 1 : 0 }, { C::tx_next_phase_on_revert, phase_spec.next_phase_on_revert }, // Directly derived from the phase spec but not part of the phase spec struct. - { C::tx_is_tree_insert_phase, (is_note_hash_insert_phase(phase) || is_nullifier_insert_phase(phase)) ? 1 : 0 }, + { C::tx_is_tree_insert_phase, (phase_spec.append_note_hash || phase_spec.append_nullifier) ? 1 : 0 }, }; } @@ -395,10 +368,12 @@ std::vector> handle_append_tree_event(const PrivateAppendTreeEv TransactionPhase phase, const TxContextEvent& state_before) { - if (is_note_hash_insert_phase(phase)) { + const auto& phase_spec = get_tx_phase_spec_map().at(phase); + + if (phase_spec.append_note_hash) { return handle_note_hash_append(event, state_before); } - if (is_nullifier_insert_phase(phase)) { + if (phase_spec.append_nullifier) { return handle_nullifier_append(event, state_before); } diff --git a/barretenberg/cpp/src/barretenberg/vm2/tracegen/tx_trace.test.cpp b/barretenberg/cpp/src/barretenberg/vm2/tracegen/tx_trace.test.cpp index 84bbc00260ca..4218a14892f9 100644 --- a/barretenberg/cpp/src/barretenberg/vm2/tracegen/tx_trace.test.cpp +++ b/barretenberg/cpp/src/barretenberg/vm2/tracegen/tx_trace.test.cpp @@ -169,7 +169,7 @@ TEST(TxTraceGenTest, BasicFirstPaddedRow) ROW_FIELD_EQ(tx_phase_value, static_cast(TransactionPhase::NR_NULLIFIER_INSERTION)), ROW_FIELD_EQ(tx_is_padded, 1), ROW_FIELD_EQ(tx_is_tree_insert_phase, 1), - ROW_FIELD_EQ(tx_sel_non_revertible_append_nullifier, 1), + ROW_FIELD_EQ(tx_sel_append_nullifier, 1), ROW_FIELD_EQ(tx_start_tx, 1), ROW_FIELD_EQ(tx_should_read_gas_limit, 1), ROW_FIELD_EQ(tx_gas_limit_pi_offset, AVM_PUBLIC_INPUTS_GAS_SETTINGS_GAS_LIMITS_ROW_IDX), diff --git a/barretenberg/cpp/src/barretenberg/vm2_stub/api_avm.cpp b/barretenberg/cpp/src/barretenberg/vm2_stub/api_avm.cpp index 5a3469d4ab5e..c73fc614b5eb 100644 --- a/barretenberg/cpp/src/barretenberg/vm2_stub/api_avm.cpp +++ b/barretenberg/cpp/src/barretenberg/vm2_stub/api_avm.cpp @@ -1,4 +1,5 @@ #include "api_avm.hpp" +#include "barretenberg/api/api_avm.hpp" #include "barretenberg/common/throw_or_abort.hpp" #include @@ -34,4 +35,20 @@ void avm_write_verification_key([[maybe_unused]] const std::filesystem::path& ou throw_or_abort("AVM is not supported in this build. Use the 'bb-avm' binary with full AVM support."); } +AvmProveResult avm_prove_from_bytes([[maybe_unused]] std::vector inputs) +{ + throw_or_abort("AVM is not supported in this build. Use the 'bb-avm' binary with full AVM support."); +} + +bool avm_verify_from_bytes([[maybe_unused]] std::vector proof, + [[maybe_unused]] std::vector public_inputs) +{ + throw_or_abort("AVM is not supported in this build. Use the 'bb-avm' binary with full AVM support."); +} + +bool avm_check_circuit_from_bytes([[maybe_unused]] std::vector inputs) +{ + throw_or_abort("AVM is not supported in this build. Use the 'bb-avm' binary with full AVM support."); +} + } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/vm2_stub/stats.cpp b/barretenberg/cpp/src/barretenberg/vm2_stub/stats.cpp new file mode 100644 index 000000000000..bc3004edcc4b --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/vm2_stub/stats.cpp @@ -0,0 +1,34 @@ +// Stub implementations of bb::avm2::Stats. Linked into binaries that don't pull in vm2_sim +// (e.g. lightweight `bb` and WASM builds), so callers like bbapi_avm.cpp resolve symbols in +// every build mode. The stub no-ops because AVM proving cannot run on these targets anyway. + +#include "barretenberg/vm2/tooling/stats.hpp" + +namespace bb::avm2 { + +Stats& Stats::get() +{ + static Stats stats; + return stats; +} + +void Stats::reset() {} + +void Stats::increment(const std::string& /*key*/, uint64_t /*value*/) {} + +void Stats::time(const std::string& /*key*/, const std::function& f) +{ + f(); +} + +std::string Stats::to_string(int /*depth*/) const +{ + return {}; +} + +std::vector> Stats::snapshot() const +{ + return {}; +} + +} // namespace bb::avm2 diff --git a/barretenberg/sol/AGENTS.md b/barretenberg/sol/AGENTS.md new file mode 120000 index 000000000000..681311eb9cf4 --- /dev/null +++ b/barretenberg/sol/AGENTS.md @@ -0,0 +1 @@ +CLAUDE.md \ No newline at end of file diff --git a/barretenberg/ts/src/index.ts b/barretenberg/ts/src/index.ts index 8b1b40242fcf..b6705ff52134 100644 --- a/barretenberg/ts/src/index.ts +++ b/barretenberg/ts/src/index.ts @@ -21,6 +21,7 @@ export { BBApiException } from './bbapi_exception.js'; // Export Point types for use in foundation and other packages export type { + AvmStat, Bn254G1Point, Bn254G2Point, ChonkProof, diff --git a/bootstrap.sh b/bootstrap.sh index b9bc7bacff0e..45d42f2ebdd8 100755 --- a/bootstrap.sh +++ b/bootstrap.sh @@ -785,7 +785,7 @@ case "$cmd" in ;; "ci-network-bench-10tps") # Args: [docker_image] - # Deploys bench-10tps and runs the 38-min sustained 10 TPS benchmark. + # Deploys bench-10tps and runs the 10-min sustained 10 TPS benchmark. # Cleanup is done separately via ci-network-teardown. export CI=1 env_file="${1:?env_file is required}" @@ -823,6 +823,17 @@ case "$cmd" in # RELEASES # ############ "ci-release") + # Verification build for a release tag. Does NOT publish — publishing happens in + # ci-release-publish, gated on ci-compat-e2e so a compat regression blocks the release. + export CI=1 + export USE_TEST_CACHE=1 + if ! semver check $REF_NAME; then + exit 1 + fi + build + ;; + "ci-release-publish") + # Actual publish step. `build` cache-hits against ci-release's build of the same commit. export CI=1 export USE_TEST_CACHE=1 if ! semver check $REF_NAME; then @@ -902,6 +913,82 @@ case "$cmd" in build yarn-project/end-to-end/bootstrap.sh avm_check_circuit ;; + ############################################# + # BACKWARDS COMPATIBILITY E2E TESTS # + ############################################# + "ci-compat-e2e") + # Runs e2e tests with contract artifacts from every prior stable release since 4.2.0 (version where we committed to + # backwards compatibility). This Validates that old contract artifacts work on current release. + export CI=1 + export USE_TEST_CACHE=0 + export CI_FULL=0 + export NO_FAIL_FAST=1 + + build + + # TODO: bump when v5 commits to backwards-compatible contract artifacts. + # compat_major: major version that has compat guarantees today. + # compat_min_version: earliest stable tag of that major to test against + # (artifacts before this are incompatible due to oracle interface changes). + compat_major="4" + compat_min_version="4.2.0" + + # Get current major version. + current_version=$(jq -r '."."' .release-please-manifest.json) + major=$(semver major "$current_version") + if [ "$major" != "$compat_major" ]; then + echo "Compat e2e tests only apply to v${compat_major}. Current major: v${major}. Skipping." + exit 0 + fi + min_version="$compat_min_version" + + # Fetch tags (EC2 clone may not have them). Fail loud: a silent fetch failure plus an empty + # tag list would publish a real release with zero compat coverage. + if ! git fetch origin 'refs/tags/v*:refs/tags/v*'; then + echo "ERROR: failed to fetch release tags." >&2 + exit 1 + fi + + # Discover stable tags for this major version (no prerelease suffixes). + versions=() + while IFS= read -r tag; do + ver=${tag#v} + # Include only versions >= min_version (sort -V puts smaller first). + if [ "$(printf '%s\n%s' "$min_version" "$ver" | sort -V | head -1)" = "$min_version" ]; then + versions+=("$ver") + fi + done < <(git tag -l "v${major}.*" | grep -E "^v[0-9]+\.[0-9]+\.[0-9]+$" | sort -V) + + # Exclude the current tag when running on a release tag push. + if [[ "${REF_NAME:-}" =~ ^v([0-9]+\.[0-9]+\.[0-9]+)$ ]]; then + current_tag="${BASH_REMATCH[1]}" + filtered=() + for v in "${versions[@]}"; do + [ "$v" != "$current_tag" ] && filtered+=("$v") + done + versions=("${filtered[@]}") + fi + + if [ ${#versions[@]} -eq 0 ]; then + echo "No prior stable versions found for v${major}.x (>= $min_version). Skipping compat tests." + exit 0 + fi + + echo_header "Backwards compatibility e2e tests" + echo "Testing against ${#versions[@]} prior stable version(s): ${versions[*]}" + + # Pre-populate the legacy contract cache on the host. Test containers run with --net=none, so the + # jest resolver's on-demand npm install would fail with EAI_AGAIN. Install here where we have network. + for ver in "${versions[@]}"; do + node yarn-project/end-to-end/src/install_legacy_contracts.cjs "$ver" + done + + # Generate compat test commands for all versions and run them in parallel. + for ver in "${versions[@]}"; do + yarn-project/end-to-end/bootstrap.sh compat_test_cmds "$ver" + done | filter_test_cmds | parallelize + ;; + ########################################## # ROLLUP UPGRADE DEPLOYMENT # ########################################## diff --git a/boxes/boxes/react/src/hooks/useContract.tsx b/boxes/boxes/react/src/hooks/useContract.tsx index ff180dfa5b59..d74eb9ed9458 100644 --- a/boxes/boxes/react/src/hooks/useContract.tsx +++ b/boxes/boxes/react/src/hooks/useContract.tsx @@ -15,13 +15,11 @@ export function useContract() { setWait(true); const wallet = await deployerEnv.getWallet(); const defaultAccountAddress = deployerEnv.getDefaultAccountAddress(); - const salt = Fr.random(); const { BoxReactContract } = await import('../../artifacts/BoxReact'); const deploymentPromise = BoxReactContract.deploy(wallet, Fr.random(), defaultAccountAddress).send({ from: defaultAccountAddress, - contractAddressSalt: salt, }); const { contract } = await toast.promise(deploymentPromise, { diff --git a/boxes/boxes/vanilla/scripts/deploy.ts b/boxes/boxes/vanilla/scripts/deploy.ts index 83a24cc3f913..9699dd35bebf 100644 --- a/boxes/boxes/vanilla/scripts/deploy.ts +++ b/boxes/boxes/vanilla/scripts/deploy.ts @@ -72,12 +72,10 @@ async function deployContract(wallet: Wallet, deployer: AztecAddress) { const sponsoredPFCContract = await getSponsoredPFCContract(); - const { contract } = await PrivateVotingContract.deploy( - wallet, - deployer - ).send({ + const { contract } = await PrivateVotingContract.deploy(wallet, deployer, { + salt, + }).send({ from: deployer, - contractAddressSalt: salt, fee: { paymentMethod: new SponsoredFeePaymentMethod( sponsoredPFCContract.address diff --git a/boxes/boxes/vite/src/hooks/useContract.tsx b/boxes/boxes/vite/src/hooks/useContract.tsx index ff180dfa5b59..d74eb9ed9458 100644 --- a/boxes/boxes/vite/src/hooks/useContract.tsx +++ b/boxes/boxes/vite/src/hooks/useContract.tsx @@ -15,13 +15,11 @@ export function useContract() { setWait(true); const wallet = await deployerEnv.getWallet(); const defaultAccountAddress = deployerEnv.getDefaultAccountAddress(); - const salt = Fr.random(); const { BoxReactContract } = await import('../../artifacts/BoxReact'); const deploymentPromise = BoxReactContract.deploy(wallet, Fr.random(), defaultAccountAddress).send({ from: defaultAccountAddress, - contractAddressSalt: salt, }); const { contract } = await toast.promise(deploymentPromise, { diff --git a/ci.sh b/ci.sh index ee31f6309d4a..68675e915327 100755 --- a/ci.sh +++ b/ci.sh @@ -35,6 +35,7 @@ function print_usage { echo_cmd "network-teardown" "Spin up an EC2 instance to teardown a network deployment." echo_cmd "network-tests-kind" "Spin up an EC2 instance to run a KIND-based spartan test." echo_cmd "deploy-rollup-upgrade" "Spin up an EC2 instance to deploy a rollup upgrade." + echo_cmd "compat-e2e" "Spin up an EC2 instance and run backwards compat e2e tests." echo_cmd "release" "Spin up an EC2 instance and run bootstrap release." echo_cmd "shell-new" "Spin up an EC2 instance, clone the repo, and drop into a shell." echo_cmd "shell" "Drop into a shell in the current running build instance container." @@ -268,9 +269,10 @@ case "$cmd" in ;; network-bench-10tps) # Args: [docker_image] - # Deploys the bench-10tps network and runs the 38-min 10 TPS benchmark. + # Deploys the bench-10tps network and runs the 10-min 10 TPS benchmark. export CI_DASHBOARD="network" export JOB_ID="x-${2:?namespace is required}-network-bench-10tps" CPUS=16 + export AWS_SHUTDOWN_TIME=${AWS_SHUTDOWN_TIME:-180} export INSTANCE_POSTFIX="n-bench-10tps" bootstrap_ec2 "./bootstrap.sh ci-network-bench-10tps $*" ;; @@ -301,16 +303,50 @@ case "$cmd" in bootstrap_ec2 "./bootstrap.sh ci-deploy-rollup-upgrade $*" ;; + ############################## + # BACKWARDS COMPATIBILITY # + ############################## + compat-e2e) + # Spin up an EC2 instance and run backwards compatibility e2e tests + # against contract artifacts from prior stable releases. + export CI_DASHBOARD="releases" + export JOB_ID="x-compat-e2e" + export AWS_SHUTDOWN_TIME=300 + rc=0 + bootstrap_ec2 "./bootstrap.sh ci-compat-e2e" || rc=$? + # On nightly tags compat-e2e is non-blocking (continue-on-error in ci3.yml), so + # failures otherwise go unnoticed. Notify #team-fairies so they get picked up. + if [ "$rc" -ne 0 ] && [[ "${REF_NAME:-}" == *-nightly.* ]]; then + run_url="https://github.com/${GITHUB_REPOSITORY:-AztecProtocol/aztec-packages}/actions/runs/${GITHUB_RUN_ID:-unknown}" + "$ci3/slack_notify" "Backwards compatibility e2e tests FAILED on nightly tag <${run_url}|${REF_NAME}>" "#team-fairies" + fi + exit "$rc" + ;; + ############ # RELEASES # ############ release) - # Spin up ec2 instance and run the release flow. + # Spin up ec2 instance and run the release-tag verification build (no publish). export CI_DASHBOARD="releases" multi_job_run \ 'x-release amd64 ci-release' \ 'a-release arm64 ci-release' ;; + release-publish) + # Spin up ec2 instance and run the actual publish flow. Gated in ci3.yml on ci + ci-compat-e2e. + export CI_DASHBOARD="releases" + export DENOISE=1 + export DENOISE_WIDTH=32 + run() { + PARENT_LOG_ID=$RUN_ID JOB_ID=$1 INSTANCE_POSTFIX=$1 ARCH=$2 exec denoise "bootstrap_ec2 './bootstrap.sh ci-release-publish'" + } + export -f run + + parallel --termseq 'TERM,10000' --tagstring '{= $_=~s/run (\w+).*/$1/; =}' --line-buffered --halt now,fail=1 ::: \ + 'run x-release-publish amd64' \ + 'run a-release-publish arm64' | DUP=1 cache_log "Release Publish CI run" $RUN_ID + ;; ################## # SHELL SESSIONS # diff --git a/ci3/cache_content_hash b/ci3/cache_content_hash index 4190f1704142..89e49e0f60c1 100755 --- a/ci3/cache_content_hash +++ b/ci3/cache_content_hash @@ -53,9 +53,11 @@ diff="$({ # Check for uncommitted files/changes. # Only use this check if AZTEC_CACHE_COMMIT is the default, otherwise the user is being explicit about using git history. if [ -n "$diff" ] && [ "$AZTEC_CACHE_COMMIT" == "HEAD" ]; then - # Fail. We shouldn't be changing files during the CI run. + # In CI we shouldn't be changing files during the run, so a 'disabled-cache' content hash here + # means something is wrong — fail loudly instead of silently disabling the cache. if [ "$CI" -eq 1 ]; then - echo_stderr "WARNING: Noticed changes to rebuild patterns: $diff" + echo_stderr "ERROR: Noticed changes to rebuild patterns during CI run: $diff" + exit 1 fi # Signal to cache_upload and cache_download to not touch this file. echo "disabled-cache" diff --git a/ci3/source_cache b/ci3/source_cache index af14c3d4f6c9..f9d88c651601 100644 --- a/ci3/source_cache +++ b/ci3/source_cache @@ -14,7 +14,7 @@ function cache_s3_transfer { local bucket="${no_scheme%%/*}" local base_prefix="${no_scheme#*/}" - aws s3 ${S3_BUILD_CACHE_AWS_PARAMS} cp - "s3://${bucket}/${base_prefix}/${prefix}/${key}.log.gz" &>/dev/null + aws s3 ${S3_BUILD_CACHE_AWS_PARAMS:-} cp - "s3://${bucket}/${base_prefix}/${prefix}/${key}.log.gz" &>/dev/null } # Transfer to S3 with explicit subfolder (accepts gzipped data on stdin). @@ -27,7 +27,7 @@ function cache_s3_transfer_to { local bucket="${no_scheme%%/*}" local base_prefix="${no_scheme#*/}" - aws s3 ${S3_BUILD_CACHE_AWS_PARAMS} cp - "s3://${bucket}/${base_prefix}/${subfolder}/${key}.log.gz" &>/dev/null + aws s3 ${S3_BUILD_CACHE_AWS_PARAMS:-} cp - "s3://${bucket}/${base_prefix}/${subfolder}/${key}.log.gz" &>/dev/null } # Transfer log to bastion via SSH (accepts gzipped data on stdin) diff --git a/docs/AGENTS.md b/docs/AGENTS.md new file mode 120000 index 000000000000..681311eb9cf4 --- /dev/null +++ b/docs/AGENTS.md @@ -0,0 +1 @@ +CLAUDE.md \ No newline at end of file diff --git a/docs/CLAUDE.md b/docs/CLAUDE.md index 3f2d4498d367..14675526d0d2 100644 --- a/docs/CLAUDE.md +++ b/docs/CLAUDE.md @@ -29,6 +29,8 @@ This project uses Yarn 4.13.0 as specified in the `packageManager` field of pack - `yarn generate:typescript-api` - Generate TypeScript API docs (requires yarn-project to be built) - `yarn generate:typescript-api v3.0.0-devnet.6` - Generate for a specific version - `RELEASE_TYPE=mainnet yarn generate:typescript-api v4.2.0` - Generate with explicit release type +- `yarn generate:node-api-reference` - Generate Node JSON-RPC API reference (requires yarn-project source files and yarn-project/node_modules; no build needed) +- `yarn generate:node-api-reference --target-dir ` - Generate into a specific versioned docs directory The `RELEASE_TYPE` env var overrides version string pattern matching for output folder selection. This is useful when the version string doesn't self-identify its release type. diff --git a/docs/README.md b/docs/README.md index 289b73a69000..158de3509dee 100644 --- a/docs/README.md +++ b/docs/README.md @@ -280,7 +280,7 @@ The Node JSON-RPC API reference is auto-generated from the TypeScript interface - `yarn-project/stdlib/src/interfaces/aztec-node-admin.ts` — `AztecNodeAdmin` interface (`nodeAdmin_` methods) - `yarn-project/stdlib/src/block/l2_block_source.ts` — `L2BlockSource` interface (JSDoc for inherited methods) -**Prerequisites:** Only `typescript` is needed (no yarn-project build required — the generator parses source files, not compiled output). +**Prerequisites:** `yarn-project` must have `node_modules/` installed so `npx tsx` can resolve `typescript`. Run `yarn install` from `yarn-project` if needed. No build is required — the generator parses source `.ts` files via the TypeScript Compiler API, not compiled output. **Generate the reference doc:** diff --git a/docs/docs-developers/ai_tooling.md b/docs/docs-developers/ai_tooling.md index 85c82d65033a..15521e3eec61 100644 --- a/docs/docs-developers/ai_tooling.md +++ b/docs/docs-developers/ai_tooling.md @@ -28,7 +28,7 @@ This is an Aztec smart contract project. Always use the `aztec` CLI wrapper inst - **Compile**: `aztec compile` (NOT `nargo compile`). Using `nargo compile` alone produces incomplete artifacts. - **Test**: `aztec test` (NOT `nargo test`). -- **Other nargo commands** like `nargo fmt` and `nargo doc` are fine to use directly. +- **Other nargo commands** like `aztec-nargo fmt` and `aztec-nargo doc` are fine to use directly. The Aztec installer exposes the bundled `nargo` as `aztec-nargo`; bare `nargo` resolves to your own install (if any), not the bundled one. ## Error Handling diff --git a/docs/docs-developers/docs/aztec-js/how_to_test.md b/docs/docs-developers/docs/aztec-js/how_to_test.md index d22443b724e2..1ae388802997 100644 --- a/docs/docs-developers/docs/aztec-js/how_to_test.md +++ b/docs/docs-developers/docs/aztec-js/how_to_test.md @@ -67,6 +67,22 @@ Test that invalid operations revert as expected: Use `.simulate()` to test reverts without spending gas. The simulation will throw if the transaction would fail onchain. +## Simulating with overrides + +`.simulate()` accepts an `overrides` option that injects values into the simulator's (ephemeral) world-state fork and contract DB before the call runs. The override is scoped to that single simulation and thrown away afterwards. + +Override a public-storage slot: + +```typescript +const result = await contract.methods.read_balance(account).simulate({ + overrides: { + publicStorage: [{ contract: contract.address, slot: BALANCE_SLOT, value: new Fr(1_000_000n) }], + }, +}); +``` + +Use this to set up state preconditions, reproduce production bugs against pinned storage, or exercise rare value branches without orchestrating the contract calls that produce them. + ## Further reading - [How to read contract data](./how_to_read_data.md) diff --git a/docs/docs-developers/docs/aztec-nr/debugging.md b/docs/docs-developers/docs/aztec-nr/debugging.md index a3cd449bb554..720b2b6da3e3 100644 --- a/docs/docs-developers/docs/aztec-nr/debugging.md +++ b/docs/docs-developers/docs/aztec-nr/debugging.md @@ -35,6 +35,33 @@ LOG_LEVEL=verbose aztec start --local-network | `No public key registered for address` | Call `wallet.registerSender(...)` | | `Direct invocation of ... functions is not supported` | Use `self.call()`, `self.view()`, or `self.enqueue()` to [call contract functions](framework-description/calling_contracts.md) | | `Failed to solve brillig function` | Check function parameters and note validity | +| `Cross-contract utility call denied` | Configure an `authorizeUtilityCall` [execution hook](#cross-contract-utility-call-denied) on your PXE | + +#### Cross-contract utility call denied + +When a contract executes a utility function that calls into a different contract, PXE asks an **execution hook** whether the call should be allowed. If no hook is configured, or the hook denies the request, you will see: + +``` +Cross-contract utility call denied: . attempted to call : (). +``` + +To fix this, pass an `authorizeUtilityCall` hook when creating your PXE: + +```typescript +import { PXE } from "@aztec/pxe/server"; + +const pxe = await PXE.create({ + // ...other options + hooks: { + authorizeUtilityCall: async (request) => { + // Inspect request.caller, request.target, request.functionSelector, etc. + return { authorized: true }; + }, + }, +}); +``` + +The hook receives a `UtilityCallAuthorizationRequest` with the caller address, target address, function selector, function name, arguments, and caller context (`'private'` or `'utility'`). Return `{ authorized: true }` to allow or `{ authorized: false, reason: '...' }` to deny with a message. ### Circuit Errors diff --git a/docs/docs-developers/docs/aztec-nr/framework-description/advanced/how_to_profile_transactions.md b/docs/docs-developers/docs/aztec-nr/framework-description/advanced/how_to_profile_transactions.md index 8fa480a2bf70..1374771ef16d 100644 --- a/docs/docs-developers/docs/aztec-nr/framework-description/advanced/how_to_profile_transactions.md +++ b/docs/docs-developers/docs/aztec-nr/framework-description/advanced/how_to_profile_transactions.md @@ -63,6 +63,14 @@ BB=/path/to/bb aztec profile gates ./target ``` ::: +:::tip Machine-readable output +For build automation, use `--json` to emit gate counts as a JSON array. Each entry has `name`, `type` (`contract-function` or `program`), and `gates`: + +```bash +aztec profile gates --json ./target +``` +::: + ### Flamegraphs To generate an interactive flamegraph SVG for a specific function: diff --git a/docs/docs-developers/docs/aztec-nr/framework-description/functions/attributes.md b/docs/docs-developers/docs/aztec-nr/framework-description/functions/attributes.md index 8e96cbc6f9e3..15581e997f04 100644 --- a/docs/docs-developers/docs/aztec-nr/framework-description/functions/attributes.md +++ b/docs/docs-developers/docs/aztec-nr/framework-description/functions/attributes.md @@ -42,10 +42,10 @@ A private function operates on private information, and is executed by the user `#[external("private")]` is just syntactic sugar. At compile time, the Aztec.nr framework inserts code that allows the function to interact with the [kernel](../../../foundational-topics/advanced/circuits/private_kernel.md). -If you are interested in what exactly the macros are doing we encourage you to run `nargo expand` on your contract. +If you are interested in what exactly the macros are doing we encourage you to run `aztec-nargo expand` on your contract. This will display your contract's code after the transformations are performed. -(If you are using VSCode you can display the expanded code by pressing `CMD + Shift + P` and typing `nargo expand` and selecting `Noir: nargo expand on current package`.) +(If you are using VSCode you can display the expanded code by pressing `CMD + Shift + P` and typing `nargo expand` and selecting `Noir: nargo expand on current package`. Make sure the Noir extension's `Nargo Path` is set to `aztec-nargo` — see the [Noir VSCode extension guide](../../installation.md) for setup.) Under the hood, the macro: @@ -85,7 +85,7 @@ Under the hood, the macro: - Wraps the function body in a scope that handles context setup and return values - Marks the function as `pub` and `unconstrained`, meaning it doesn't generate proofs and is executed directly by the sequencer -To see the exact generated code, run `nargo expand` on your contract. +To see the exact generated code, run `aztec-nargo expand` on your contract. ## Constrained `view` Functions #[view] @@ -213,7 +213,7 @@ struct CustomNote { The `owner` is passed as a runtime parameter to the `compute_note_hash` and `compute_nullifier` functions, not stored as a field on the note. -To see the exact generated code, run `nargo expand` on your contract. +To see the exact generated code, run `aztec-nargo expand` on your contract. Key things to keep in mind: @@ -246,7 +246,7 @@ struct Storage { } ``` -To see the exact generated code, run `nargo expand` on your contract. Alternatively, use `#[storage_no_init]` if you need manual control over storage slot allocation. +To see the exact generated code, run `aztec-nargo expand` on your contract. Alternatively, use `#[storage_no_init]` if you need manual control over storage slot allocation. Key things to keep in mind: diff --git a/docs/docs-developers/docs/aztec-nr/installation.md b/docs/docs-developers/docs/aztec-nr/installation.md index 7aa7a90b5a02..5eb244c3c715 100644 --- a/docs/docs-developers/docs/aztec-nr/installation.md +++ b/docs/docs-developers/docs/aztec-nr/installation.md @@ -7,22 +7,24 @@ description: Learn how to install and configure the Noir Language Server for a b Install the [Noir Language Support extension](https://marketplace.visualstudio.com/items?itemName=noir-lang.vscode-noir) to get syntax highlighting, syntax error detection, and go-to definitions for your Aztec contracts. -The extension drives its language server with `nargo`. The Aztec installer installs its own version of `nargo` and adds that directory to your `PATH`, so in most cases you do not need to configure anything else. Verify the binary is on your `PATH`: +The extension drives its language server with `nargo`. The Aztec installer ships a bundled `nargo` and exposes it as the `aztec-nargo` symlink on your `PATH`. Bare `nargo` is intentionally not provided so it does not shadow your own install (if any). Verify the symlink is on your `PATH`: ```bash -which nargo -# expected: $HOME/.aztec/ +which aztec-nargo +# expected: $HOME/.aztec/current/bin/aztec-nargo ``` If you have not installed the Aztec toolchain yet, follow [Getting Started on Local Network](../../getting_started_on_local_network.md) first. ## Configure the extension -Leave the extension's `Noir: Nargo Path` setting empty so it auto-discovers `nargo` from your `PATH`. To confirm, hover over **Nargo** in the VSCode status bar in the bottom right corner — it should show the path under the result from `which nargo`. +Set the extension's `Noir: Nargo Path` setting to the absolute path printed by `which aztec-nargo` (for example `$HOME/.aztec/current/bin/aztec-nargo`), then reload the window. `aztec-nargo` is a symlink to the bundled `nargo`, so any tool that invokes it speaks plain `nargo` (LSP included). -If auto-discovery fails, set `Noir: Nargo Path` to the absolute path printed by `which nargo`, then reload the window. +To confirm the extension is using the bundled toolchain, hover over **Nargo** in the VSCode status bar in the bottom right corner: it should show the path you set. + +If you have your own `nargo` install and want the extension to use that instead, leave `Noir: Nargo Path` empty so the extension auto-discovers `nargo` from your `PATH`. ## Troubleshooting -- **LSP reports `startFailed` after setting a custom path**: clear `Noir: Nargo Path`, reload the window, and let auto-discovery take over. -- **`which nargo` points outside `$HOME/.aztec/current/bin`**: another `nargo` earlier on your `PATH` is shadowing the Aztec-provided one. Either remove it or set `Noir: Nargo Path` explicitly to the Aztec-provided `nargo`. +- **LSP reports `startFailed` after setting a custom path**: confirm `aztec-nargo` is executable and that the path is correct, reload the window, and check the **Output** panel for the language server log. +- **Extension picks up the wrong `nargo`**: the Aztec installer no longer puts bare `nargo` on `PATH`. Set `Noir: Nargo Path` explicitly to `aztec-nargo` (for the bundled version) or to your own install (for any other version). diff --git a/docs/docs-developers/docs/cli/aztec_cli_reference.md b/docs/docs-developers/docs/cli/aztec_cli_reference.md index 47b1da49b3e3..7da0e201c0e5 100644 --- a/docs/docs-developers/docs/cli/aztec_cli_reference.md +++ b/docs/docs-developers/docs/cli/aztec_cli_reference.md @@ -791,6 +791,7 @@ aztec profile gates [options] [target-dir] **Options:** +- `--json` - output gate counts as JSON instead of a table (default: false) - `-h --help` - display help for command ### aztec propose-with-lock diff --git a/docs/docs-developers/docs/resources/glossary.md b/docs/docs-developers/docs/resources/glossary.md index eb3bae3f06d8..69c58b110e41 100644 --- a/docs/docs-developers/docs/resources/glossary.md +++ b/docs/docs-developers/docs/resources/glossary.md @@ -68,6 +68,8 @@ Merkle trees in Aztec are used to store cryptographic commitments. They are used With `nargo`, you can start new projects, compile, execute, and test your Noir programs. +The Aztec installer ships its own pinned `nargo` and exposes it as the `aztec-nargo` wrapper on `PATH` (bare `nargo` is intentionally not provided so it does not shadow your own install). For Aztec contract work, prefer `aztec compile` and `aztec test`; for plain Noir commands, use `aztec-nargo` (or your own `nargo` install). + You can find more information in the nargo installation docs [here](https://noir-lang.org/docs/getting_started/quick_start#installation) and the nargo command reference [here](https://noir-lang.org/docs/reference/nargo_commands). ### Noir diff --git a/docs/docs-developers/docs/resources/migration_notes.md b/docs/docs-developers/docs/resources/migration_notes.md index 21177ac51407..f1333f13d158 100644 --- a/docs/docs-developers/docs/resources/migration_notes.md +++ b/docs/docs-developers/docs/resources/migration_notes.md @@ -9,6 +9,182 @@ Aztec is in active development. Each version may introduce breaking changes that ## TBD +### [Aztec.nr] TXE `call_public_incognito` no longer takes a `from` parameter + +`TestEnvironment::call_public_incognito` previously accepted a `from` address that was silently ignored (the function always uses a null `msg_sender`). The `from` parameter has been removed. + +```diff +- env.call_public_incognito(sender, SampleContract::at(addr).some_function()); ++ env.call_public_incognito(SampleContract::at(addr).some_function()); +``` + +If you need to call a public function *with* a sender, use `call_public` instead. + +### [Aztec.nr] TXE `view_public_incognito` is deprecated + +`TestEnvironment::view_public_incognito` is now deprecated in favor of `view_public`, which has the same behavior (null `msg_sender`, static call). + +```diff +- env.view_public_incognito(SampleContract::at(addr).some_view()); ++ env.view_public(SampleContract::at(addr).some_view()); +``` + +### [Aztec.js] `DeployMethod` address-affecting parameters move to construction time + +Salt, deployer, and public keys are now passed when the `DeployMethod` is constructed, not on every call to `send` / `simulate` / `request` / `getInstance`. This locks the contract address once it is determined and prevents the silent salt-cache poisoning bug where the address could change between calls. + +`contractAddressSalt`, `deployer`, and `universalDeploy` have been removed from `DeployOptions`, `RequestDeployOptions`, and `SimulateDeployOptions`. They now live on a new `DeployInstantiationOptions` argument passed at construction. `deployer` and `universalDeploy` are mutually exclusive; passing both throws. `Contract.deployWithPublicKeys` and the generated `MyContract.deployWithPublicKeys(...)` factories have been removed; pass `publicKeys` via the `instantiation` argument of `deploy(...)` instead. The buggy synchronous `address` and `partialAddress` getters have been removed and replaced with `getAddress()` and `getPartialAddress()` (both `async`). + +The compact form keeps working: `MyContract.deploy(wallet, ...args).send({ from: alice })` deploys with `deployer = alice` and `salt = random()`, exactly as before. The deployer is locked the first time `send` / `simulate` / `profile` is called (from `options.from`, with `NO_FROM` or undefined → universal) and cannot change after that: + +- Subsequent `send` / `simulate` / `profile` calls with a `from` that would imply a different deployer throw, instead of silently producing a different address. +- A lock to universal (`AztecAddress.ZERO`) is the only one compatible with any sender, since the universal address does not depend on `from`. +- A lock to a concrete address only accepts that exact `from` on subsequent calls. + +**Migration:** + +Universal deployment with a fixed salt: + +```diff +- const deploy = MyContract.deploy(wallet, ...args); +- await deploy.send({ +- from: alice, +- contractAddressSalt: salt, +- universalDeploy: true, +- }); ++ const deploy = MyContract.deploy(wallet, ...args, { salt, universalDeploy: true }); ++ await deploy.send({ from: alice }); +``` + +Non-universal deploy where `from` doubles as the deployer: + +```diff +- const deploy = MyContract.deploy(wallet, ...args); +- await deploy.send({ from: alice, contractAddressSalt: salt }); ++ const deploy = MyContract.deploy(wallet, ...args, { salt }); ++ await deploy.send({ from: alice }); +``` + +If you need to read the address before sending, lock the deployer at construction: + +```typescript +const deploy = MyContract.deploy(wallet, ...args, { salt, deployer: alice }); +const address = await deploy.getAddress(); // resolves; deployer was locked at construction +await deploy.send({ from: alice }); // deploys at the address `getAddress` returned +``` + +Universal deploys can be sent by any account, since the universal address does not depend on `from`: + +```typescript +const deploy = MyContract.deploy(wallet, ...args, { universalDeploy: true }); +await deploy.send({ from: bob }); // OK, universal accepts any sender +``` + +A lock to a concrete deployer rejects sending from a different account, instead of silently deploying at a different address: + +```typescript +const deploy = MyContract.deploy(wallet, ...args, { deployer: alice }); +await deploy.send({ from: bob }); // throws: deployer is locked to alice +``` + +`deployWithPublicKeys` is gone; pass `publicKeys` in the instantiation options instead: + +```diff +- const deploy = MyContract.deployWithPublicKeys(publicKeys, wallet, ...args); ++ const deploy = MyContract.deploy(wallet, ...args, { publicKeys }); +``` + +`ContractDeployer.deploy(...)` now takes the instantiation argument as its first parameter (pass `{}` to use defaults and rely on lazy locking from `from`): + +```diff +- const cd = new ContractDeployer(artifact, wallet); +- await cd.deploy(...ctorArgs).send({ from: alice, contractAddressSalt: salt }); ++ const cd = new ContractDeployer(artifact, wallet); ++ await cd.deploy(ctorArgs, { salt }).send({ from: alice }); +``` + +The synchronous `address` / `partialAddress` getters are gone: + +```diff +- const address = deploy.address; // sync, possibly undefined +- const partial = await deploy.partialAddress; // sync getter wrapping async value ++ const address = await deploy.getAddress(); // requires the deployer to be locked ++ const partial = await deploy.getPartialAddress(); // requires the deployer to be locked +``` + +`getInstance()` no longer takes options; use the construction-time instantiation instead: + +```diff +- const instance = await deploy.getInstance({ contractAddressSalt: salt }); ++ const deploy = MyContract.deploy(wallet, ...args, { salt, deployer: alice }); ++ const instance = await deploy.getInstance(); +``` + +### [aztec-up] Bundled binaries are no longer exposed under bare names on `PATH` + +The Aztec installer previously placed bundled binaries directly into `$HOME/.aztec/current/bin` under bare names (`forge`, `nargo`, `bb`, `pxe`, ...). Anything with the same name in your own `PATH` was silently shadowed in unrelated projects. + +Every bundled binary is now exposed only under an `aztec-` prefixed name in `$HOME/.aztec/current/bin`. Bare names are not on `PATH` at all and resolve to your own install (if any). + +| Was on `PATH` | Now | +| ------------------ | ------------------------ | +| `forge` | `aztec-forge` | +| `cast` | `aztec-cast` | +| `anvil` | `aztec-anvil` | +| `chisel` | `aztec-chisel` | +| `nargo` | `aztec-nargo` | +| `noir-profiler` | `aztec-noir-profiler` | +| `bb` | `aztec-bb` | +| `bb-cli` | `aztec-bb-cli` | +| `pxe` | `aztec-pxe` | +| `txe` | `aztec-txe` | +| `validator-client` | `aztec-validator-client` | +| `blob-client` | `aztec-blob-client` | + +`aztec`, `aztec-wallet`, and `aztec-up` keep their existing names. + +If you relied on a bundled bare-name binary for general use: + +- For Aztec contract work, prefer `aztec compile` and `aztec test`. +- For other Noir / Foundry commands, invoke the `aztec-*` symlink directly (e.g. `aztec-nargo fmt`, `aztec-forge build`). +- Or install Foundry / nargo separately via `foundryup` / `noirup`. + +If you set `Noir: Nargo Path` in the VS Code Noir extension to `$HOME/.aztec/current/bin/nargo`, change it to `$HOME/.aztec/current/bin/aztec-nargo` (the symlink is a drop-in for `nargo`). See the [Noir VSCode Extension guide](../aztec-nr/installation.md) for details. + +### [Stdlib] `SimulationOverrides.contracts` entries no longer carry an artifact + +`ContractOverrides` entries are now `{ instance }` only. To override a contract's artifact, pre-register the target class via `pxe.registerContractClass(artifact)` and set the override instance's `currentContractClassId` to that class id: + +```diff +- const instance = await getContractInstanceFromInstantiationParams(stubArtifact, { salt: Fr.random() }); ++ const instance = await pxe.getContractInstance(addr); ++ await pxe.registerContractClass(stubArtifact); ++ const stubClassId = (await getContractClassFromArtifact(stubArtifact)).id; +- overrides = { contracts: { [addr.toString()]: { instance, artifact: stubArtifact } } }; ++ overrides = { contracts: { [addr.toString()]: { instance: { ...instance, currentContractClassId: stubClassId } } } }; +``` + +### [Aztec.js] `simulate` accepts `overrides` for testing "what if storage value was X?" + +`Contract.methods.foo(...).simulate(...)` now accepts an `overrides` option that injects values into the simulator's (ephemeral) world-state fork and contract DB before the call runs. The supported field is `publicStorage`, which writes a `(contract, slot, value)` into the public-data tree as if a previous tx had set it. Overrides are thrown away after simulation completes. + +```typescript +const result = await contract.methods.read_balance(account).simulate({ + overrides: { + publicStorage: [{ contract: contract.address, slot: BALANCE_SLOT, value: new Fr(1_000_000n) }], + }, +}); +``` + +The same option flows through `wallet.simulateTx` and eventually to `simulatePublicCalls` RPC on `AztecNode`. + +Direct callers of the `SimulationOverrides` constructor must switch from a positional `contracts` argument to an options bag: + +```diff +- new SimulationOverrides(contracts); ++ new SimulationOverrides({ contracts }); +``` + ### [PXE] `proveTx` takes an options bag `PXE.proveTx` used to accept `scopes` as a positional argument; it now takes an options bag consistent with `simulateTx` and `profileTx`, and adds an optional `senderForTags` field. Update direct callers: @@ -36,6 +212,58 @@ The wallet SDK now supplies the default sender-for-tags from the transaction's ` The save/restore idiom previously used in account-contract constructors (`get` → `set(self.address)` → work → `set(prev)`) is also no longer needed and has been removed: the override never leaks out of the constructor, so there is nothing to restore. +### [Aztec Node] Unified `getBlock` / `getCheckpoint` RPC API + +The Aztec Node JSON-RPC surface for fetching blocks and checkpoints has been consolidated. The unified `getBlock` and `getCheckpoint` methods return uniform `BlockResponse` / `CheckpointResponse` shapes. The extra fields a caller cares about (tx bodies, L1 publish info, committee attestations, nested blocks) are now controlled by an `options` argument rather than by picking the right method. `getBlocks` and `getCheckpoints` retain their names but now return the new response shapes. + +**Removed methods:** + +| Removed | Replacement | +| ---------------------------------- | -------------------------------------------- | +| `getBlockByHash(hash)` | `getBlock(hash)` or `getBlock({ hash })` | +| `getBlockByArchive(archive)` | `getBlock({ archive })` | +| `getBlockHeaderByArchive(archive)` | `getBlock({ archive }).then(r => r?.header)` | +| `getProvenBlockNumber()` | `getBlockNumber('proven')` | +| `getCheckpointedBlockNumber()` | `getBlockNumber('checkpointed')` | + +**Deprecated but still present** (scheduled for removal once internal consumers of the archiver shape are rewired): `getL2Tips` (use `getChainTips`), `getBlockHeader` (use `getBlock(param).then(r => r?.header)`), `getCheckpointedBlocks` (use `getBlocks(from, limit, { includeL1PublishInfo: true, includeAttestations: true })`), `getCheckpointsDataForEpoch` (use `getCheckpoints(from, limit)` over the epoch's checkpoint range). Do not adopt these in new code. + +**New response shapes:** `BlockResponse` always carries `header`, `archive`, `hash`, `number`, `checkpointNumber`, and `indexWithinCheckpoint`. `body`, `l1` (an `L1PublishInfo` discriminated union), and `attestations` are present only when the matching include option is set. `CheckpointResponse` mirrors this for checkpoints, with `blocks` gated on `includeBlocks`, and always carries `feeAssetPriceModifier` as a base field. The response types are generic over the options object, so passing a literal `{ includeTransactions: true }` narrows the return type and `response.body` becomes non-optional. + +**Nested blocks on `getCheckpoint`:** only `includeTransactions` is forwarded to the blocks embedded by `includeBlocks: true`. `includeL1PublishInfo` and `includeAttestations` on a checkpoint request attach L1 / attestation data to the checkpoint itself, not to its nested blocks. + +**Return type changes for `getBlocks` / `getCheckpoints`:** the return type is now `BlockResponse[]` / `CheckpointResponse[]` instead of `L2Block[]` / `PublishedCheckpoint[]`. Callers that previously consumed fields of `L2Block` (e.g. `.body`) must now opt in via `{ includeTransactions: true }`; callers that consumed `PublishedCheckpoint.checkpoint.blocks` must opt in via `{ includeBlocks: true }`. + +**Migration for wallet/SDK consumers (`@aztec/aztec.js`, `@aztec/wallet-sdk`):** + +```diff +- const block = await node.getBlockByHash(hash); ++ const block = await node.getBlock(hash, { includeTransactions: true }); + +- const archiveBlock = await node.getBlockByArchive(archive); ++ const archiveBlock = await node.getBlock({ archive }, { includeTransactions: true }); + +- const provenNumber = await node.getProvenBlockNumber(); ++ const provenNumber = await node.getBlockNumber('proven'); + +- const checkpointedNumber = await node.getCheckpointedBlockNumber(); ++ const checkpointedNumber = await node.getBlockNumber('checkpointed'); + +- const tips = await node.getL2Tips(); ++ const tips = await node.getChainTips(); +``` + +`getBlockHeader`, `getCheckpointedBlocks`, `getCheckpointsDataForEpoch`, and `getL2Tips` continue to work in this release but are deprecated; migrate to the replacements above. + +**Chain-tip selectors:** `getBlockNumber` and `getCheckpointNumber` now accept an optional `ChainTip` argument (`'proposed' | 'checkpointed' | 'proven' | 'finalized'`). Note the semantic difference: on the block side `'proposed'` means the latest proposed block (chain head), whereas on the checkpoint side `'proposed'` resolves to the latest L1-confirmed checkpoint. Pre-L1-confirmation checkpoints are not exposed over RPC. + +**Block parameter variants:** `BlockParameter` now also accepts a block hash, an archive root, and chain-tip names. The existing `number | 'latest'` forms continue to work — `'latest'` is an alias for `'proposed'`. + +**Impact**: Source changes are required anywhere the removed methods are called. Type changes are required anywhere `L2Block` / `BlockHeader` / `CheckpointedL2Block` were consumed from the RPC — those call sites now receive `BlockResponse` / `CheckpointResponse` and must request the fields they need via `options`. Production nodes will reject JSON-RPC calls to the removed method names. + +### [Aztec Node] `feeAssetPriceModifier` now correctly populated on confirmed checkpoints + +Confirmed checkpoints previously reported `feeAssetPriceModifier = 0n` regardless of the value observed on L1, because the archiver dropped the field on checkpoint confirmation. The field is now persisted and returned correctly on `CheckpointResponse`. Any wallet or indexer logic that special-cased `0n` as a sentinel for "no modifier" will need to be updated; it is now a valid value in its own right. ### [CLI] `aztec-up` no longer exposes transitive npm bins on PATH @@ -92,6 +320,7 @@ Regenerate these values from a fresh build of this release — do not copy them `poseidon2HashWithSeparator` is exported from `@aztec/foundation/crypto/poseidon`; the `DomainSeparator` enum and the matching `DOM_SEP__*` constants are defined in `@aztec/constants`. The new entries listed above are additions — existing separator names are unchanged. For TypeScript consumers, `@aztec/stdlib/hash` exports ready-made helpers that wrap the right separator: `computeMerkleHash` (append-only), `computeNullifierMerkleHash`, and `computePublicDataMerkleHash`. Prefer these over calling `poseidon2HashWithSeparator` directly so the separator choice stays colocated with the tree. + ### [Aztec.nr] `emit_private_log_unsafe` / `emit_raw_note_log_unsafe` are deprecated `emit_private_log_unsafe` and `emit_raw_note_log_unsafe` are deprecated and will be removed in a future release. Migrate to the new `emit_private_log_vec_unsafe` / `emit_raw_note_log_vec_unsafe` functions, which take a `BoundedVec` instead of the `(log: [Field; PRIVATE_LOG_CIPHERTEXT_LEN], length: u32)` pair. @@ -145,6 +374,7 @@ This has been done because this is the format expected by the functionality in p The empire slashing model has been removed. Only the tally-based slashing model remains, and it has been renamed from `TallySlashingProposer` to `SlashingProposer`. **L1 contract changes:** + - `SlasherFlavor` enum removed from `ISlasher.sol` - `RollupConfigInput.slasherFlavor` (enum) replaced with `slasherEnabled` (bool) - `TallySlashingProposer` contract renamed to `SlashingProposer` @@ -154,6 +384,7 @@ The empire slashing model has been removed. Only the tally-based slashing model - All `TallySlashingProposer__` error prefixes renamed to `SlashingProposer__` **Environment variable changes:** + ```diff - AZTEC_SLASHER_FLAVOR=tally # was: "tally" | "empire" | "none" + AZTEC_SLASHER_ENABLED=true # now a boolean @@ -166,6 +397,7 @@ The empire slashing model has been removed. Only the tally-based slashing model **Node admin API:** `getSlashPayloads()` method removed. **TypeScript config changes:** + ```diff - slasherFlavor: 'tally' | 'none' + slasherEnabled: boolean diff --git a/docs/docs-developers/docs/tutorials/contract_tutorials/recursive_verification.md b/docs/docs-developers/docs/tutorials/contract_tutorials/recursive_verification.md index acc4aec289f2..dd2e4562854b 100644 --- a/docs/docs-developers/docs/tutorials/contract_tutorials/recursive_verification.md +++ b/docs/docs-developers/docs/tutorials/contract_tutorials/recursive_verification.md @@ -51,7 +51,7 @@ This pattern transforms arbitrarily large computations into fixed-size proof ver The recursive verification pattern follows this data flow: 1. **Circuit Definition**: Write a Noir circuit that defines the computation you want to prove -2. **Compilation**: Compile the circuit with `nargo compile` to produce bytecode +2. **Compilation**: Compile the circuit with `aztec-nargo compile` (or your own `nargo compile` install) to produce bytecode 3. **Proof Generation**: Execute the circuit offchain and generate an UltraHonk proof using [Barretenberg](https://github.com/AztecProtocol/barretenberg) 4. **Onchain Verification**: Submit the proof to an Aztec contract that verifies it using the stored [verification key](../../resources/glossary.md#verification-key) hash @@ -106,10 +106,10 @@ Start by writing a simple circuit that proves two field values are not equal. Th ### Create the Circuit Project -Use `nargo new` to generate the project structure: +Use `aztec-nargo new` to generate the project structure (the Aztec installer ships `nargo` as `aztec-nargo`; substitute your own `nargo` if its version matches `aztec-nargo --version`): ```bash -nargo new circuit +aztec-nargo new circuit ``` This creates the following structure: @@ -163,7 +163,7 @@ Update `circuit/Nargo.toml` (see [Noir crates and packages](https://noir-lang.or ```bash cd circuit -nargo compile +aztec-nargo compile ``` This generates `target/hello_circuit.json` containing: @@ -176,7 +176,7 @@ The TypeScript code uses the ABI to correctly format inputs during witness gener ### Test the Circuit ```bash -nargo test +aztec-nargo test ``` Expected output: @@ -669,7 +669,7 @@ If you want to run all commands at once, or if you're starting fresh, here's the yarn install # Compile the Noir circuit -cd circuit && nargo compile && cd .. +cd circuit && aztec-nargo compile && cd .. # Compile the Aztec contract and generate TypeScript bindings yarn ccc diff --git a/docs/docs-developers/docs/tutorials/js_tutorials/aave_bridge.md b/docs/docs-developers/docs/tutorials/js_tutorials/aave_bridge.md index 7ae089ed6e70..e4b4166768eb 100644 --- a/docs/docs-developers/docs/tutorials/js_tutorials/aave_bridge.md +++ b/docs/docs-developers/docs/tutorials/js_tutorials/aave_bridge.md @@ -270,7 +270,7 @@ For local testing, you'll use simplified mocks of Aave's lending pool. The mock :::tip Mock vs Real Aave -In production, replace `MockAavePool` with Aave V3's `IPool` interface at `0x87870Bca3F3fD6335C3F4ce8392D69350B4fA4E2` (Ethereum mainnet). The portal contract's `IAavePool` interface already matches Aave V3's function signatures. For realistic testing, fork mainnet with `anvil --fork-url `. +In production, replace `MockAavePool` with Aave V3's `IPool` interface at `0x87870Bca3F3fD6335C3F4ce8392D69350B4fA4E2` (Ethereum mainnet). The portal contract's `IAavePool` interface already matches Aave V3's function signatures. For realistic testing, fork mainnet with `aztec-anvil --fork-url ` (the Aztec installer ships Foundry's `anvil` as `aztec-anvil`; substitute your own `anvil` if its version matches `aztec-anvil --version`). ::: @@ -504,7 +504,7 @@ L1-to-L2 messages need 2 L2 blocks after the L1 transaction before they become c ## Next Steps -- **Test with a mainnet fork**: Use `anvil --fork-url` to test against real Aave +- **Test with a mainnet fork**: Use `aztec-anvil --fork-url` (or your own `anvil` install) to test against real Aave - **Add private deposits**: Use the `claim_private` and `exit_to_l1_private` functions for privacy-preserving DeFi - **Build a frontend**: Add a web UI for easy depositing and claiming - **Compose with other protocols**: The same pattern works for Uniswap, Compound, or any L1 DeFi protocol diff --git a/docs/docs-developers/docs/tutorials/testing_governance_rollup_upgrade.md b/docs/docs-developers/docs/tutorials/testing_governance_rollup_upgrade.md index 9794f7af1f4c..d57fa538d4d6 100644 --- a/docs/docs-developers/docs/tutorials/testing_governance_rollup_upgrade.md +++ b/docs/docs-developers/docs/tutorials/testing_governance_rollup_upgrade.md @@ -70,8 +70,10 @@ git clone --depth 1 https://github.com/foundry-rs/forge-std forge-std git clone --depth 1 https://github.com/OpenZeppelin/openzeppelin-contracts openzeppelin-contracts cd .. -# Install solc (uses forge's built-in svm) -forge build --use 0.8.30 src/core/libraries/ConstantsGen.sol +# Install solc (uses forge's built-in svm). The Aztec installer ships +# Foundry as `aztec-forge`/`aztec-cast`/`aztec-anvil` -- substitute your +# own `forge` install if you have one. +aztec-forge build --use 0.8.30 src/core/libraries/ConstantsGen.sol cp ~/.svm/0.8.30/solc-0.8.30 ./solc-0.8.30 # Copy the HonkVerifier to the generated directory (required for build) @@ -130,7 +132,7 @@ export AZTEC_INITIAL_ETH_PER_FEE_ASSET=10000000 ## Step 4: Deploy New Rollup ```bash -forge script script/deploy/DeployRollupForUpgrade.s.sol:DeployRollupForUpgrade \ +aztec-forge script script/deploy/DeployRollupForUpgrade.s.sol:DeployRollupForUpgrade \ --rpc-url $L1_RPC_URL \ --broadcast \ --private-key $PRIVATE_KEY @@ -151,7 +153,7 @@ export NEW_ROLLUP_ADDRESS=0x... ```bash cd l1-contracts -forge create \ +aztec-forge create \ --rpc-url $L1_RPC_URL \ --private-key $PRIVATE_KEY \ --broadcast \ @@ -376,7 +378,7 @@ If you just want to test the governance flow without deploying a real rollup: cd l1-contracts # Deploy empty payload (no constructor args needed) -forge create \ +aztec-forge create \ --rpc-url $L1_RPC_URL \ --private-key $PRIVATE_KEY \ --broadcast \ diff --git a/docs/docs-developers/getting_started_on_local_network.md b/docs/docs-developers/getting_started_on_local_network.md index b390b3f39838..3a4dbbdb0c55 100644 --- a/docs/docs-developers/getting_started_on_local_network.md +++ b/docs/docs-developers/getting_started_on_local_network.md @@ -39,17 +39,19 @@ import { General, Fees } from '@site/src/components/Snippets/general_snippets'; Run: ```bash -VERSION=#include_version_without_prefix bash -i <(curl -sL https://install.aztec.network/#include_version_without_prefix) +VERSION=#include_version_without_prefix bash -i <(curl -sL https://install.aztec.network) ``` This will install the following tools and add them to your `PATH`: -- **nargo** - the Noir programming language compiler and simulator -- **noir-profiler** - a profiler for analyzing and visualizing Noir programs -- **bb** - the Barretenberg proving backend - **aztec** - compiles and tests Aztec contracts and launches various infrastructure subsystems (full local network, sequencer, prover, PXE, etc.) and provides utility commands to interact with the network - **aztec-up** - a version manager for the Aztec toolchain. Use `aztec-up install ` to install a new version, `aztec-up use ` to switch between installed versions, or `aztec-up list` to see installed versions. - **aztec-wallet** - a tool for interacting with the Aztec network +- **aztec-bb** - the Barretenberg proving backend +- **aztec-nargo** - the Noir compiler and simulator +- **aztec-forge**, **aztec-cast**, **aztec-anvil**, **aztec-chisel** - the bundled Foundry tools + +Foundry, Noir, and Barretenberg are bundled at the versions `aztec` needs. Your own `forge` / `nargo` / `bb` installs still work under their bare names. For syntax highlighting and LSP support while editing contracts, see the [Noir VSCode Extension guide](./docs/aztec-nr/installation.md). diff --git a/docs/docs-operate/operators/reference/node-api-reference.md b/docs/docs-operate/operators/reference/node-api-reference.md index d68617cf4c4e..225b6321a493 100644 --- a/docs/docs-operate/operators/reference/node-api-reference.md +++ b/docs/docs-operate/operators/reference/node-api-reference.md @@ -109,9 +109,9 @@ Get a block specified by its block number or 'latest'. **Parameters**: -1. `blockParameter` - `number | "latest"` - The block parameter (block number, block hash, or 'latest'). +1. `blockParameter` - `BlockHash | number | "latest"` - The block parameter (block number, block hash, or 'latest'). -**Returns**: `L2Block` - The requested block. +**Returns**: `L2Block | undefined` - The requested block. **Example**: @@ -129,7 +129,7 @@ Get a block specified by its hash. 1. `blockHash` - `BlockHash` - The block hash being requested. -**Returns**: `L2Block` - The requested block. +**Returns**: `L2Block | undefined` - The requested block. **Example**: @@ -147,7 +147,7 @@ Get a block specified by its archive root. 1. `archive` - `Fr` - The archive root being requested. -**Returns**: `L2Block` - The requested block. +**Returns**: `L2Block | undefined` - The requested block. **Example**: @@ -173,7 +173,7 @@ Method to request blocks. Will attempt to return all requested blocks but will r ```bash curl -X POST http://localhost:8080 \ -H 'Content-Type: application/json' \ - -d '{"jsonrpc":"2.0","method":"node_getBlocks","params":[12345,12345],"id":1}' + -d '{"jsonrpc":"2.0","method":"node_getBlocks","params":[1,100],"id":1}' ``` ### node_getBlockHeader @@ -182,9 +182,9 @@ Returns the block header for a given block number, block hash, or 'latest'. **Parameters**: -1. `block` - `number | "latest" | undefined` - The block parameter (block number, block hash, or 'latest'). Defaults to 'latest'. +1. `block` - `BlockHash | number | "latest" | undefined` - The block parameter (block number, block hash, or 'latest'). Defaults to 'latest'. -**Returns**: `BlockHeader` - The requested block header. +**Returns**: `BlockHeader | undefined` - The requested block header. **Example**: @@ -202,7 +202,7 @@ Get a block header specified by its archive root. 1. `archive` - `Fr` - The archive root being requested. -**Returns**: `BlockHeader` - The requested block header. +**Returns**: `BlockHeader | undefined` - The requested block header. **Example**: @@ -228,7 +228,7 @@ Retrieves a collection of checkpoints. ```bash curl -X POST http://localhost:8080 \ -H 'Content-Type: application/json' \ - -d '{"jsonrpc":"2.0","method":"node_getCheckpoints","params":[12345,12345],"id":1}' + -d '{"jsonrpc":"2.0","method":"node_getCheckpoints","params":[1,100],"id":1}' ``` ### node_getCheckpointedBlocks @@ -245,7 +245,7 @@ curl -X POST http://localhost:8080 \ ```bash curl -X POST http://localhost:8080 \ -H 'Content-Type: application/json' \ - -d '{"jsonrpc":"2.0","method":"node_getCheckpointedBlocks","params":[12345,12345],"id":1}' + -d '{"jsonrpc":"2.0","method":"node_getCheckpointedBlocks","params":[1,100],"id":1}' ``` ### node_getCheckpointsDataForEpoch @@ -332,7 +332,7 @@ Method to retrieve a single pending tx. 1. `txHash` - `TxHash` - The transaction hash to return. -**Returns**: `Tx` - The pending tx if it exists. +**Returns**: `Tx | undefined` - The pending tx if it exists. **Example**: @@ -376,7 +376,7 @@ Method to retrieve pending txs. ```bash curl -X POST http://localhost:8080 \ -H 'Content-Type: application/json' \ - -d '{"jsonrpc":"2.0","method":"node_getPendingTxs","params":[12345,"0x1234..."],"id":1}' + -d '{"jsonrpc":"2.0","method":"node_getPendingTxs","params":[100,"0x1234..."],"id":1}' ``` ### node_getPendingTxCount @@ -442,9 +442,12 @@ curl -X POST http://localhost:8080 \ Gets the storage value at the given contract storage slot. +**Remarks**: The storage slot here refers to the slot as it is defined in Noir not the index in the merkle tree. +Aztec's version of `eth_getStorageAt`. + **Parameters**: -1. `referenceBlock` - `number | "latest"` - The block parameter (block number, block hash, or 'latest') at which to get the data. +1. `referenceBlock` - `BlockHash | number | "latest"` - The block parameter (block number, block hash, or 'latest') at which to get the data. 2. `contract` - `AztecAddress` - Address of the contract to query. 3. `slot` - `Fr` - Slot to query. @@ -483,11 +486,11 @@ the leaves were inserted. **Parameters**: -1. `referenceBlock` - `number | "latest"` - The block parameter (block number, block hash, or 'latest') at which to get the data. +1. `referenceBlock` - `BlockHash | number | "latest"` - The block parameter (block number, block hash, or 'latest') at which to get the data. 2. `treeId` - `MerkleTreeId` - The tree to search in. 3. `leafValues` - `Fr[]` - The values to search for. -**Returns**: `DataInBlock | undefined[]` - The indices of leaves and the block metadata of a block in which the leaves were inserted. +**Returns**: `(DataInBlock | undefined)[]` - The indices of leaves and the block metadata of a block in which the leaves were inserted. **Example**: @@ -503,10 +506,10 @@ Returns a nullifier membership witness for a given nullifier at a given block. **Parameters**: -1. `referenceBlock` - `number | "latest"` - The block parameter (block number, block hash, or 'latest') at which to get the data. +1. `referenceBlock` - `BlockHash | number | "latest"` - The block parameter (block number, block hash, or 'latest') at which to get the data. 2. `nullifier` - `Fr` - Nullifier we try to find witness for. -**Returns**: `NullifierMembershipWitness` - The nullifier membership witness (if found). +**Returns**: `NullifierMembershipWitness | undefined` - The nullifier membership witness (if found). **Example**: @@ -520,12 +523,16 @@ curl -X POST http://localhost:8080 \ Returns a low nullifier membership witness for a given nullifier at a given block. +**Remarks**: Low nullifier witness can be used to perform a nullifier non-inclusion proof by leveraging the "linked +list structure" of leaves and proving that a lower nullifier is pointing to a bigger next value than the nullifier +we are trying to prove non-inclusion for. + **Parameters**: -1. `referenceBlock` - `number | "latest"` - The block parameter (block number, block hash, or 'latest') at which to get the data. +1. `referenceBlock` - `BlockHash | number | "latest"` - The block parameter (block number, block hash, or 'latest') at which to get the data. 2. `nullifier` - `Fr` - Nullifier we try to find the low nullifier witness for. -**Returns**: `NullifierMembershipWitness` - The low nullifier membership witness (if found). +**Returns**: `NullifierMembershipWitness | undefined` - The low nullifier membership witness (if found). **Example**: @@ -539,12 +546,16 @@ curl -X POST http://localhost:8080 \ Returns a public data tree witness for a given leaf slot at a given block. +**Remarks**: The witness can be used to compute the current value of the public data tree leaf. If the low leaf preimage corresponds to an +"in range" slot, means that the slot doesn't exist and the value is 0. If the low leaf preimage corresponds to the exact slot, the current value +is contained in the leaf preimage. + **Parameters**: -1. `referenceBlock` - `number | "latest"` - The block parameter (block number, block hash, or 'latest') at which to get the data. +1. `referenceBlock` - `BlockHash | number | "latest"` - The block parameter (block number, block hash, or 'latest') at which to get the data. 2. `leafSlot` - `Fr` - The leaf slot we try to find the witness for. -**Returns**: `PublicDataWitness` - The public data witness (if found). +**Returns**: `PublicDataWitness | undefined` - The public data witness (if found). **Example**: @@ -565,7 +576,7 @@ a specific block exists in the chain's history. **Parameters**: -1. `referenceBlock` - `number | "latest"` - The block parameter (block number, block hash, or 'latest') at which to get the data +1. `referenceBlock` - `BlockHash | number | "latest"` - The block parameter (block number, block hash, or 'latest') at which to get the data (which contains the root of the archive tree in which we are searching for the block hash). 2. `blockHash` - `BlockHash` - The block hash to find in the archive tree. @@ -585,7 +596,7 @@ Returns a membership witness for a given note hash at a given block. **Parameters**: -1. `referenceBlock` - `number | "latest"` - The block parameter (block number, block hash, or 'latest') at which to get the data. +1. `referenceBlock` - `BlockHash | number | "latest"` - The block parameter (block number, block hash, or 'latest') at which to get the data. 2. `noteHash` - `Fr` - The note hash we try to find the witness for. **Returns**: `MembershipWitness | undefined` @@ -606,7 +617,7 @@ Returns the index and a sibling path for a leaf in the committed l1 to l2 data t **Parameters**: -1. `referenceBlock` - `number | "latest"` - The block parameter (block number, block hash, or 'latest') at which to get the data. +1. `referenceBlock` - `BlockHash | number | "latest"` - The block parameter (block number, block hash, or 'latest') at which to get the data. 2. `l1ToL2Message` - `Fr` - The l1ToL2Message to get the index / sibling path for. **Returns**: `[bigint, SiblingPath] | undefined` - A tuple of the index and the sibling path of the L1ToL2Message (undefined if not found). @@ -736,7 +747,7 @@ for a tag, the caller should fetch the next page to check for more logs. ```bash curl -X POST http://localhost:8080 \ -H 'Content-Type: application/json' \ - -d '{"jsonrpc":"2.0","method":"node_getPrivateLogsByTags","params":[["0x1234..."],12345,"0x1234..."],"id":1}' + -d '{"jsonrpc":"2.0","method":"node_getPrivateLogsByTags","params":[["0x1234..."],0,"0x1234..."],"id":1}' ``` ### node_getPublicLogsByTagsFromContract @@ -762,7 +773,7 @@ for a tag, the caller should fetch the next page to check for more logs. ```bash curl -X POST http://localhost:8080 \ -H 'Content-Type: application/json' \ - -d '{"jsonrpc":"2.0","method":"node_getPublicLogsByTagsFromContract","params":["0x1234...",["0x1234..."],12345,"0x1234..."],"id":1}' + -d '{"jsonrpc":"2.0","method":"node_getPublicLogsByTagsFromContract","params":["0x1234...",["0x1234..."],0,"0x1234..."],"id":1}' ``` ## Contract queries @@ -821,6 +832,26 @@ curl -X POST http://localhost:8080 \ -d '{"jsonrpc":"2.0","method":"node_getCurrentMinFees","params":[],"id":1}' ``` +### node_getPredictedMinFees + +Returns predicted min fees for the current slot and next N slots. +Each entry accounts for the L1 gas oracle transition and congestion growth based on the +given mana usage estimate. Defaults to target usage (steady state). + +**Parameters**: + +1. `manaUsage` - `ManaUsageEstimate | undefined` - Expected mana usage per checkpoint (none, target, or limit). + +**Returns**: `GasFees[]` - An array of GasFees, one per slot in the prediction window. + +**Example**: + +```bash +curl -X POST http://localhost:8080 \ + -H 'Content-Type: application/json' \ + -d '{"jsonrpc":"2.0","method":"node_getPredictedMinFees","params":["target"],"id":1}' +``` + ### node_getMaxPriorityFees Method to fetch the current max priority fee of txs in the mempool. @@ -1169,6 +1200,8 @@ Pauses syncing and rolls back the database to the target L2 block number. **Parameters**: 1. `targetBlockNumber` - `number` - The block number to roll back to. +2. `force` - `boolean | undefined` - If true, clears the world state db and p2p dbs if rolling back to behind the finalized block. +3. `resumeSync` - `boolean | undefined` - If true (default), resumes archiver and world state sync after rollback. **Returns**: `void` @@ -1177,7 +1210,7 @@ Pauses syncing and rolls back the database to the target L2 block number. ```bash curl -X POST http://localhost:8880 \ -H 'Content-Type: application/json' \ - -d '{"jsonrpc":"2.0","method":"nodeAdmin_rollbackTo","params":[12345],"id":1}' + -d '{"jsonrpc":"2.0","method":"nodeAdmin_rollbackTo","params":[12345,true,true],"id":1}' ``` **Example (Docker)**: @@ -1185,7 +1218,7 @@ curl -X POST http://localhost:8880 \ ```bash docker exec -it aztec-node curl -X POST http://localhost:8880 \ -H 'Content-Type: application/json' \ - -d '{"jsonrpc":"2.0","method":"nodeAdmin_rollbackTo","params":[12345],"id":1}' + -d '{"jsonrpc":"2.0","method":"nodeAdmin_rollbackTo","params":[12345,true,true],"id":1}' ``` ### nodeAdmin_startSnapshotUpload @@ -1214,30 +1247,6 @@ docker exec -it aztec-node curl -X POST http://localhost:8880 \ -d '{"jsonrpc":"2.0","method":"nodeAdmin_startSnapshotUpload","params":["0x1234..."],"id":1}' ``` -### nodeAdmin_getSlashPayloads - -Returns all monitored payloads by the slasher for the current round. - -**Parameters**: None - -**Returns**: `SlashPayloadRound[]` - -**Example (CLI)**: - -```bash -curl -X POST http://localhost:8880 \ - -H 'Content-Type: application/json' \ - -d '{"jsonrpc":"2.0","method":"nodeAdmin_getSlashPayloads","params":[],"id":1}' -``` - -**Example (Docker)**: - -```bash -docker exec -it aztec-node curl -X POST http://localhost:8880 \ - -H 'Content-Type: application/json' \ - -d '{"jsonrpc":"2.0","method":"nodeAdmin_getSlashPayloads","params":[],"id":1}' -``` - ### nodeAdmin_getSlashOffenses Returns all offenses applicable for the given round. @@ -1253,7 +1262,7 @@ Returns all offenses applicable for the given round. ```bash curl -X POST http://localhost:8880 \ -H 'Content-Type: application/json' \ - -d '{"jsonrpc":"2.0","method":"nodeAdmin_getSlashOffenses","params":["0x1234..."],"id":1}' + -d '{"jsonrpc":"2.0","method":"nodeAdmin_getSlashOffenses","params":["current"],"id":1}' ``` **Example (Docker)**: @@ -1261,7 +1270,7 @@ curl -X POST http://localhost:8880 \ ```bash docker exec -it aztec-node curl -X POST http://localhost:8880 \ -H 'Content-Type: application/json' \ - -d '{"jsonrpc":"2.0","method":"nodeAdmin_getSlashOffenses","params":["0x1234..."],"id":1}' + -d '{"jsonrpc":"2.0","method":"nodeAdmin_getSlashOffenses","params":["current"],"id":1}' ``` ### nodeAdmin_reloadKeystore diff --git a/docs/docusaurus.config.js b/docs/docusaurus.config.js index 987e164bdcd9..4f30ccddd447 100644 --- a/docs/docusaurus.config.js +++ b/docs/docusaurus.config.js @@ -264,12 +264,16 @@ const config = { { generateLLMsTxt: true, generateLLMsFullTxt: true, - docsDir: `developer_versioned_docs/version-${mainnetDeveloperVersion || developerTestnetVersion}/`, + docsDir: `developer_versioned_docs/version-${mainnetDeveloperVersion || developerTestnetVersion}`, title: "Aztec Protocol Documentation", excludeImports: true, version: mainnetDeveloperVersion || developerTestnetVersion, + addMdExtension: false, pathTransformation: { - ignorePaths: ["docs"], + ignorePaths: [ + `developer_versioned_docs/version-${mainnetDeveloperVersion || developerTestnetVersion}`, + ], + addPaths: ["developers"], }, }, ], @@ -285,6 +289,9 @@ const config = { ], // ["./src/plugins/plugin-embed-code", {}], ], + clientModules: [ + './src/clientModules/docsgpt.js', + ], customFields: {}, themeConfig: /** @type {import('@docusaurus/preset-classic').ThemeConfig} */ diff --git a/docs/examples/ts/aave_bridge/index.ts b/docs/examples/ts/aave_bridge/index.ts index 760d77cf58dc..01f618b05d67 100644 --- a/docs/examples/ts/aave_bridge/index.ts +++ b/docs/examples/ts/aave_bridge/index.ts @@ -24,11 +24,16 @@ import { AaveBridgeContract } from "./artifacts/AaveBridge.js"; // docs:start:setup // Setup L1 client using anvil's default mnemonic const MNEMONIC = "test test test test test test test test test test test junk"; -const l1Client = createExtendedL1Client([process.env.ETHEREUM_HOST ?? "http://localhost:8545"], MNEMONIC); +const l1Client = createExtendedL1Client( + [process.env.ETHEREUM_HOST ?? "http://localhost:8545"], + MNEMONIC, +); // Setup L2 using Aztec's local network console.log("Setting up L2...\n"); -const node = createAztecNodeClient(process.env.AZTEC_NODE_URL ?? "http://localhost:8080"); +const node = createAztecNodeClient( + process.env.AZTEC_NODE_URL ?? "http://localhost:8080", +); await waitForNode(node); const aztecWallet = await EmbeddedWallet.create(node, { ephemeral: true }); const [accData] = await getInitialTestAccountsData(); @@ -174,7 +179,6 @@ async function mine2Blocks( EthAddress.ZERO, ).send({ from: accountAddress, - contractAddressSalt: Fr.random(), }); await AaveBridgeContract.deploy( aztecWallet, @@ -182,7 +186,6 @@ async function mine2Blocks( EthAddress.ZERO, ).send({ from: accountAddress, - contractAddressSalt: Fr.random(), }); } // docs:end:mine_blocks @@ -204,7 +207,11 @@ const burnAuthwit = await SetPublicAuthwitContractInteraction.create( account.address, { caller: l2Bridge.address, - action: l2Token.methods.burn_public(account.address, amountToDeposit, burnNonce), + action: l2Token.methods.burn_public( + account.address, + amountToDeposit, + burnNonce, + ), }, true, ); @@ -231,7 +238,10 @@ console.log(`Exit sent (block: ${exitReceipt.blockNumber})`); // toFunctionSelector computes keccak256 of the signature and takes the first 4 bytes. const portalEthAddress = EthAddress.fromString(portalAddress.toString()); const withdrawContent = sha256ToField([ - Buffer.from(toFunctionSelector("withdraw(address,uint256,address)").substring(2), "hex"), + Buffer.from( + toFunctionSelector("withdraw(address,uint256,address)").substring(2), + "hex", + ), portalEthAddress.toBuffer32(), new Fr(amountToDeposit).toBuffer(), EthAddress.ZERO.toBuffer32(), @@ -258,18 +268,22 @@ if (!exitReceipt.blockNumber) { } const exitBlockNumber = exitReceipt.blockNumber; console.log("Waiting for block to be proven..."); -let provenBlockNumber = await node.getProvenBlockNumber(); +let provenBlockNumber = await node.getBlockNumber("proven"); while (provenBlockNumber < exitBlockNumber) { console.log( ` Waiting... (proven: ${provenBlockNumber}, needed: ${exitBlockNumber})`, ); await new Promise((resolve) => setTimeout(resolve, 10000)); - provenBlockNumber = await node.getProvenBlockNumber(); + provenBlockNumber = await node.getBlockNumber("proven"); } console.log("Block proven!\n"); // Compute the membership witness using the message hash and the L2 tx hash -const witness = await computeL2ToL1MembershipWitness(node, msgLeaf, exitReceipt.txHash); +const witness = await computeL2ToL1MembershipWitness( + node, + msgLeaf, + exitReceipt.txHash, +); const epoch = witness!.epochNumber; const siblingPathHex = witness!.siblingPath @@ -369,7 +383,9 @@ await mine2Blocks(aztecWallet, account.address); // The mock Aave pool returns 10% yield, so 500 DAI becomes 550 DAI const expectedWithYield = amountToDeposit + (amountToDeposit * 1000n) / 10000n; -console.log(`Expected amount with yield: ${expectedWithYield / 10n ** 18n} tokens`); +console.log( + `Expected amount with yield: ${expectedWithYield / 10n ** 18n} tokens`, +); // On L2: consume the L1->L2 message and mint tokens (with yield) console.log("Claiming tokens on L2..."); @@ -392,7 +408,9 @@ const expectedFinal = initialRemaining + expectedWithYield; // 500 + 550 = 1050 console.log(`Initial deposit: ${depositAmount / 10n ** 18n} tokens`); console.log(`Deposited to Aave: ${amountToDeposit / 10n ** 18n} tokens`); -console.log(`Yield earned (10%): ${(expectedWithYield - amountToDeposit) / 10n ** 18n} tokens`); +console.log( + `Yield earned (10%): ${(expectedWithYield - amountToDeposit) / 10n ** 18n} tokens`, +); console.log(`Expected balance: ${expectedFinal / 10n ** 18n} tokens`); console.log(`Actual balance: ${finalBalance / 10n ** 18n} tokens`); console.log( diff --git a/docs/examples/ts/aztecjs_advanced/index.ts b/docs/examples/ts/aztecjs_advanced/index.ts index 7d22adf120c4..90f60cc5c675 100644 --- a/docs/examples/ts/aztecjs_advanced/index.ts +++ b/docs/examples/ts/aztecjs_advanced/index.ts @@ -16,14 +16,22 @@ import { getPublicEvents } from "@aztec/aztec.js/events"; import { GasSettings } from "@aztec/stdlib/gas"; // Setup: connect to network -const node = createAztecNodeClient(process.env.AZTEC_NODE_URL ?? "http://localhost:8080"); +const node = createAztecNodeClient( + process.env.AZTEC_NODE_URL ?? "http://localhost:8080", +); await waitForNode(node); const wallet = await EmbeddedWallet.create(node, { ephemeral: true }); const testAccounts = await getInitialTestAccountsData(); const [aliceAddress, bobAddress] = await Promise.all( testAccounts.slice(0, 2).map(async (account) => { - return (await wallet.createSchnorrAccount(account.secret, account.salt, account.signingKey)).address; + return ( + await wallet.createSchnorrAccount( + account.secret, + account.salt, + account.signingKey, + ) + ).address; }), ); @@ -45,8 +53,13 @@ const sponsoredFPCInstance = await getContractInstanceFromInstantiationParams( SponsoredFPCContract.artifact, { salt: new Fr(0) }, ); -await wallet.registerContract(sponsoredFPCInstance, SponsoredFPCContract.artifact); -const sponsoredPaymentMethod = new SponsoredFeePaymentMethod(sponsoredFPCInstance.address); +await wallet.registerContract( + sponsoredFPCInstance, + SponsoredFPCContract.artifact, +); +const sponsoredPaymentMethod = new SponsoredFeePaymentMethod( + sponsoredFPCInstance.address, +); // wallet is from the connection guide; sponsoredPaymentMethod is from the fees guide const { contract: sponsoredContract } = await TokenContract.deploy( @@ -68,9 +81,9 @@ const { contract: saltedContract } = await TokenContract.deploy( "SaltedToken", "SALT", 18, + { salt: customSalt }, ).send({ from: aliceAddress, - contractAddressSalt: customSalt, }); // docs:end:deploy_custom_salt @@ -84,8 +97,9 @@ const deployMethod = TokenContract.deploy( "PredictedToken", "PRED", 18, + { salt: deploymentSalt, deployer: aliceAddress }, ); -const instance = await deployMethod.getInstance({ contractAddressSalt: deploymentSalt }); +const instance = await deployMethod.getInstance(); const predictedAddress = instance.address; console.log(`Contract will deploy at: ${predictedAddress}`); @@ -235,15 +249,21 @@ console.log(`Derived token at: ${derivedToken.address.toString()}`); // docs:start:parallel_deploy // Deploy contracts in parallel using Promise.all const contracts = await Promise.all([ - TokenContract.deploy(wallet, aliceAddress, "Token1", "T1", 18).send({ - from: aliceAddress, - }).then(({ contract }) => contract), - TokenContract.deploy(wallet, aliceAddress, "Token2", "T2", 18).send({ - from: aliceAddress, - }).then(({ contract }) => contract), - TokenContract.deploy(wallet, aliceAddress, "Token3", "T3", 18).send({ - from: aliceAddress, - }).then(({ contract }) => contract), + TokenContract.deploy(wallet, aliceAddress, "Token1", "T1", 18) + .send({ + from: aliceAddress, + }) + .then(({ contract }) => contract), + TokenContract.deploy(wallet, aliceAddress, "Token2", "T2", 18) + .send({ + from: aliceAddress, + }) + .then(({ contract }) => contract), + TokenContract.deploy(wallet, aliceAddress, "Token3", "T3", 18) + .send({ + from: aliceAddress, + }) + .then(({ contract }) => contract), ]); console.log(`Contract 1 at: ${contracts[0].address}`); @@ -293,8 +313,12 @@ async function pollForTransferEvents() { for (const { event, metadata } of events) { // Process each transfer event - console.log(`Transfer: ${event.amount} from ${event.from} to ${event.to}`); - console.log(` in block ${metadata.l2BlockNumber}, tx ${metadata.txHash}`); + console.log( + `Transfer: ${event.amount} from ${event.from} to ${event.to}`, + ); + console.log( + ` in block ${metadata.l2BlockNumber}, tx ${metadata.txHash}`, + ); } lastProcessedBlock = currentBlock; @@ -307,7 +331,11 @@ await pollForTransferEvents(); // docs:start:connect_to_contract // wallet is from the connection guide; token is the contract deployed in the deploy guide -const contract = await Contract.at(token.address, TokenContract.artifact, wallet); +const contract = await Contract.at( + token.address, + TokenContract.artifact, + wallet, +); // docs:end:connect_to_contract // docs:start:basic_send_transaction @@ -333,7 +361,10 @@ console.log("DA gas limit:", metaResult.estimatedGas.gasLimits.daGas); // docs:end:simulate_with_metadata // docs:start:read_public_logs -const publicLogs = await node.getPublicLogs({ fromBlock: 1, toBlock: await node.getBlockNumber() + 1 }); +const publicLogs = await node.getPublicLogs({ + fromBlock: 1, + toBlock: (await node.getBlockNumber()) + 1, +}); if (publicLogs.logs.length > 0) { const rawFields = publicLogs.logs[0].log.getEmittedFields(); // Fr[] console.log("Raw log fields:", rawFields.length); @@ -385,16 +416,21 @@ const networkFees = await node.getCurrentMinFees(); const gasSettings = GasSettings.from({ gasLimits: { daGas: 100_000, l2Gas: 2_000_000 }, teardownGasLimits: { daGas: 100_000, l2Gas: 2_000_000 }, - maxFeesPerGas: { feePerDaGas: networkFees.feePerDaGas * 2n, feePerL2Gas: networkFees.feePerL2Gas * 2n }, + maxFeesPerGas: { + feePerDaGas: networkFees.feePerDaGas * 2n, + feePerL2Gas: networkFees.feePerL2Gas * 2n, + }, maxPriorityFeesPerGas: { feePerDaGas: 0n, feePerL2Gas: 0n }, }); // docs:end:custom_gas_settings // docs:start:send_with_gas_settings -const { receipt: gsReceipt } = await token.methods.mint_to_public(aliceAddress, 1n).send({ - from: aliceAddress, - fee: { gasSettings }, -}); +const { receipt: gsReceipt } = await token.methods + .mint_to_public(aliceAddress, 1n) + .send({ + from: aliceAddress, + fee: { gasSettings }, + }); // docs:end:send_with_gas_settings // docs:start:read_logs_by_filter @@ -402,7 +438,10 @@ const { receipt: gsReceipt } = await token.methods.mint_to_public(aliceAddress, const txLogs = await node.getPublicLogs({ txHash: gsReceipt.txHash }); // Get logs for a block range -const rangeLogs = await node.getPublicLogs({ fromBlock: 1, toBlock: await node.getBlockNumber() + 1 }); +const rangeLogs = await node.getPublicLogs({ + fromBlock: 1, + toBlock: (await node.getBlockNumber()) + 1, +}); // docs:end:read_logs_by_filter // docs:start:auto_gas_estimation @@ -435,5 +474,4 @@ const { result: privateBalance } = await token.methods .simulate({ from: aliceAddress }); // docs:end:simulate_private_access - console.log("All advanced examples completed successfully"); diff --git a/docs/examples/ts/example_swap/index.ts b/docs/examples/ts/example_swap/index.ts index f8e30c7c9a1b..883c120c5bad 100644 --- a/docs/examples/ts/example_swap/index.ts +++ b/docs/examples/ts/example_swap/index.ts @@ -43,8 +43,7 @@ const account = await wallet.createSchnorrAccount( console.log(`Account: ${account.address.toString()}\n`); const nodeInfo = await node.getNodeInfo(); -const registryAddress = - nodeInfo.l1ContractAddresses.registryAddress.toString(); +const registryAddress = nodeInfo.l1ContractAddresses.registryAddress.toString(); const inboxAddress = nodeInfo.l1ContractAddresses.inboxAddress.toString(); // docs:end:setup @@ -302,11 +301,9 @@ async function mine2Blocks( ) { await TokenContract.deploy(wallet, accountAddress, "T", "T", 18).send({ from: accountAddress, - contractAddressSalt: Fr.random(), }); await TokenContract.deploy(wallet, accountAddress, "T", "T", 18).send({ from: accountAddress, - contractAddressSalt: Fr.random(), }); } // docs:end:mine_blocks @@ -325,7 +322,9 @@ const { result: wethBalanceBefore } = await l2Weth.methods .simulate({ from: account.address }); console.log(`L2 WETH balance after claim: ${wethBalanceBefore}\n`); if (wethBalanceBefore !== SWAP_AMOUNT) { - throw new Error(`Expected WETH balance ${SWAP_AMOUNT}, got ${wethBalanceBefore}`); + throw new Error( + `Expected WETH balance ${SWAP_AMOUNT}, got ${wethBalanceBefore}`, + ); } console.log("✓ WETH claimed successfully on L2\n"); // docs:end:claim_on_l2 @@ -384,13 +383,13 @@ console.log("✓ WETH transferred to bridge for swap\n"); // docs:start:wait_for_proof console.log("Waiting for block to be proven...\n"); -let provenBlockNumber = await node.getProvenBlockNumber(); +let provenBlockNumber = await node.getBlockNumber("proven"); while (provenBlockNumber < swapReceipt.blockNumber!) { console.log( ` Waiting... (proven: ${provenBlockNumber}, needed: ${swapReceipt.blockNumber})`, ); await new Promise((resolve) => setTimeout(resolve, 10000)); - provenBlockNumber = await node.getProvenBlockNumber(); + provenBlockNumber = await node.getBlockNumber("proven"); } console.log("Block proven!\n"); @@ -488,7 +487,9 @@ const swapContentEncoded = encodeFunctionData({ ], }); -const swapContentHash = sha256ToField([Buffer.from(swapContentEncoded.slice(2), "hex")]); +const swapContentHash = sha256ToField([ + Buffer.from(swapContentEncoded.slice(2), "hex"), +]); const swapMsgLeaf = computeL2ToL1MessageHash({ l2Sender: l2Uniswap.address, diff --git a/docs/examples/ts/token_bridge/index.ts b/docs/examples/ts/token_bridge/index.ts index 728f285164bc..ac0a1d5b6a29 100644 --- a/docs/examples/ts/token_bridge/index.ts +++ b/docs/examples/ts/token_bridge/index.ts @@ -63,7 +63,10 @@ console.log(`NFTPortal: ${portalAddress}\n`); // docs:start:deploy_l2_contracts console.log("Deploying L2 contracts...\n"); -const { contract: l2Nft } = await NFTPunkContract.deploy(aztecWallet, account.address).send({ +const { contract: l2Nft } = await NFTPunkContract.deploy( + aztecWallet, + account.address, +).send({ from: account.address, }); @@ -201,11 +204,9 @@ async function mine2Blocks( ) { await NFTPunkContract.deploy(aztecWallet, accountAddress).send({ from: accountAddress, - contractAddressSalt: Fr.random(), }); await NFTPunkContract.deploy(aztecWallet, accountAddress).send({ from: accountAddress, - contractAddressSalt: Fr.random(), }); } // docs:end:mine_blocks @@ -289,7 +290,7 @@ const msgLeaf = computeL2ToL1MessageHash({ console.log("Waiting for block to be proven..."); console.log(` Exit block number: ${exitReceipt.blockNumber}`); -let provenBlockNumber = await node.getProvenBlockNumber(); +let provenBlockNumber = await node.getBlockNumber("proven"); console.log(` Current proven block: ${provenBlockNumber}`); while (provenBlockNumber < exitReceipt.blockNumber!) { @@ -297,13 +298,17 @@ while (provenBlockNumber < exitReceipt.blockNumber!) { ` Waiting... (proven: ${provenBlockNumber}, needed: ${exitReceipt.blockNumber})`, ); await new Promise((resolve) => setTimeout(resolve, 10000)); // Wait 10 seconds - provenBlockNumber = await node.getProvenBlockNumber(); + provenBlockNumber = await node.getBlockNumber("proven"); } console.log("Block proven!\n"); // Compute the membership witness using the message hash and the L2 tx hash -const witness = await computeL2ToL1MembershipWitness(node, msgLeaf, exitReceipt.txHash); +const witness = await computeL2ToL1MembershipWitness( + node, + msgLeaf, + exitReceipt.txHash, +); const epoch = witness!.epochNumber; console.log(` Epoch for block ${exitReceipt.blockNumber}: ${epoch}`); diff --git a/docs/examples/webapp-tutorial/test-extension/src/background.ts b/docs/examples/webapp-tutorial/test-extension/src/background.ts index e3c1a3342aed..a0e66e61b939 100644 --- a/docs/examples/webapp-tutorial/test-extension/src/background.ts +++ b/docs/examples/webapp-tutorial/test-extension/src/background.ts @@ -15,12 +15,23 @@ import { type BackgroundTransport, type BackgroundConnectionCallbacks, type ActiveSession, -} from '@aztec/wallet-sdk/extension/handlers'; +} from "@aztec/wallet-sdk/extension/handlers"; -import { WALLET_CONFIG, MessageTarget, MessageTypes, AUTO_LOCK_MINUTES, log } from './config'; -import { getErrorMessage } from './utils'; -import { STORAGE_KEYS } from './wallet/storage'; -import type { PendingTransaction, PendingSessionVerification, PendingCapabilities, BackgroundTask } from './shared-types'; +import { + WALLET_CONFIG, + MessageTarget, + MessageTypes, + AUTO_LOCK_MINUTES, + log, +} from "./config"; +import { getErrorMessage } from "./utils"; +import { STORAGE_KEYS } from "./wallet/storage"; +import type { + PendingTransaction, + PendingSessionVerification, + PendingCapabilities, + BackgroundTask, +} from "./shared-types"; // docs:start:offscreen-management let offscreenCreating: Promise | null = null; @@ -43,18 +54,18 @@ async function ensureOffscreenDocument(): Promise { return; } - const offscreenUrl = chrome.runtime.getURL('dist/offscreen.html'); - log.debug('[background] Creating offscreen document:', offscreenUrl); + const offscreenUrl = chrome.runtime.getURL("dist/offscreen.html"); + log.debug("[background] Creating offscreen document:", offscreenUrl); offscreenCreating = chrome.offscreen.createDocument({ url: offscreenUrl, reasons: [chrome.offscreen.Reason.WORKERS], - justification: 'Aztec PXE requires long-running WASM operations', + justification: "Aztec PXE requires long-running WASM operations", }); await offscreenCreating; offscreenCreating = null; - log.debug('[background] Offscreen document created'); + log.debug("[background] Offscreen document created"); } // docs:end:offscreen-management @@ -67,26 +78,29 @@ async function ensureOffscreenDocument(): Promise { * - No `return true`/`false` landmine for async responses */ let offscreenPort: chrome.runtime.Port | null = null; -const pendingOffscreenCalls = new Map void; - reject: (error: Error) => void; - timer: ReturnType; -}>(); +const pendingOffscreenCalls = new Map< + string, + { + resolve: (value: any) => void; + reject: (error: Error) => void; + timer: ReturnType; + } +>(); let offscreenMessageId = 0; const OFFSCREEN_TIMEOUT_MS = 5 * 60 * 1000; // 5 minutes function connectOffscreenPort() { - const port = chrome.runtime.connect({ name: 'offscreen' }); + const port = chrome.runtime.connect({ name: "offscreen" }); offscreenPort = port; port.onMessage.addListener((message: any) => { // Progress updates — relay to popup - if (message.type === 'task-progress') { - const runningTask = backgroundTasks.find((t) => t.status === 'running'); + if (message.type === "task-progress") { + const runningTask = backgroundTasks.find((t) => t.status === "running"); if (runningTask) { runningTask.progress = message.stage; - notifyPopup({ type: 'task-update', task: { ...runningTask } }); + notifyPopup({ type: "task-update", task: { ...runningTask } }); } return; } @@ -100,17 +114,17 @@ function connectOffscreenPort() { if (message.success) { pending.resolve(message.result); } else { - pending.reject(new Error(message.error || 'Unknown error')); + pending.reject(new Error(message.error || "Unknown error")); } }); port.onDisconnect.addListener(() => { - log.debug('[background] Offscreen port disconnected'); + log.debug("[background] Offscreen port disconnected"); offscreenPort = null; // Reject all pending calls — sendToOffscreen will retry for (const [id, pending] of pendingOffscreenCalls) { clearTimeout(pending.timer); - pending.reject(new Error('Offscreen port disconnected')); + pending.reject(new Error("Offscreen port disconnected")); pendingOffscreenCalls.delete(id); } }); @@ -140,7 +154,7 @@ async function sendToOffscreen(message: any, _retried = false): Promise { try { if (!offscreenPort) { - throw new Error('Offscreen port not connected'); + throw new Error("Offscreen port not connected"); } offscreenPort.postMessage({ ...message, messageId }); } catch (err: unknown) { @@ -149,7 +163,7 @@ async function sendToOffscreen(message: any, _retried = false): Promise { // Port may have disconnected — retry once if (!_retried) { - log.warn('[background] Offscreen port send failed, retrying...'); + log.warn("[background] Offscreen port send failed, retrying..."); offscreenPort = null; offscreenCreating = null; sendToOffscreen(message, true).then(resolve, reject); @@ -176,7 +190,8 @@ const capabilitiesApprovedSessions = new Set(); * The extension must confirm the session before any wallet method calls are processed. * This prevents the dApp from bypassing verification by sending messages immediately. */ -const queuedMessages: Map = new Map(); +const queuedMessages: Map = + new Map(); /** * Trusted origins persistence for auto-reconnect. (#30) @@ -187,7 +202,7 @@ const queuedMessages: Map = * exchange still happens every time for security). Disconnecting a site * removes it from the trusted list. */ -const TRUSTED_ORIGINS_KEY = 'aztec_trusted_origins'; +const TRUSTED_ORIGINS_KEY = "aztec_trusted_origins"; interface TrustedOrigin { origin: string; @@ -203,26 +218,37 @@ async function getTrustedOrigins(): Promise { async function addTrustedOrigin(origin: string, appId: string): Promise { const trusted = await getTrustedOrigins(); - if (!trusted.some(t => t.origin === origin && t.appId === appId)) { + if (!trusted.some((t) => t.origin === origin && t.appId === appId)) { trusted.push({ origin, appId, trustedAt: Date.now() }); await chrome.storage.local.set({ [TRUSTED_ORIGINS_KEY]: trusted }); } } -async function removeTrustedOrigin(origin: string, appId: string): Promise { +async function removeTrustedOrigin( + origin: string, + appId: string, +): Promise { const trusted = await getTrustedOrigins(); - const filtered = trusted.filter(t => !(t.origin === origin && t.appId === appId)); + const filtered = trusted.filter( + (t) => !(t.origin === origin && t.appId === appId), + ); await chrome.storage.local.set({ [TRUSTED_ORIGINS_KEY]: filtered }); } -async function isTrustedOrigin(origin: string, appId: string): Promise { +async function isTrustedOrigin( + origin: string, + appId: string, +): Promise { const trusted = await getTrustedOrigins(); - return trusted.some(t => t.origin === origin && t.appId === appId); + return trusted.some((t) => t.origin === origin && t.appId === appId); } -async function getStoredCapabilities(origin: string, appId: string): Promise | null> { +async function getStoredCapabilities( + origin: string, + appId: string, +): Promise | null> { const trusted = await getTrustedOrigins(); - const entry = trusted.find(t => t.origin === origin && t.appId === appId); + const entry = trusted.find((t) => t.origin === origin && t.appId === appId); return entry?.grantedCapabilities ?? null; } @@ -245,19 +271,24 @@ async function persistState(): Promise { sw_pendingTransactions: pendingTransactions, }); } catch (err) { - log.warn('[background] Failed to persist state:', err); + log.warn("[background] Failed to persist state:", err); } } async function restoreState(): Promise { try { const data = await chrome.storage.session.get([ - 'sw_walletUnlocked', - 'sw_pendingTransactions', + "sw_walletUnlocked", + "sw_pendingTransactions", ]); walletUnlocked = data.sw_walletUnlocked ?? false; pendingTransactions = data.sw_pendingTransactions ?? []; - log.debug('[background] Restored state: unlocked =', walletUnlocked, ', pendingTx =', pendingTransactions.length); + log.debug( + "[background] Restored state: unlocked =", + walletUnlocked, + ", pendingTx =", + pendingTransactions.length, + ); // Validate: if the offscreen document was torn down, the cached master key is gone. // Probe offscreen to confirm — if it fails, the wallet needs re-unlock. @@ -265,13 +296,15 @@ async function restoreState(): Promise { try { await sendToOffscreen({ type: MessageTypes.GET_ACCOUNTS }); } catch { - log.warn('[background] Offscreen unreachable after restore — marking wallet as locked'); + log.warn( + "[background] Offscreen unreachable after restore — marking wallet as locked", + ); walletUnlocked = false; await persistState(); } } } catch (err) { - log.warn('[background] Failed to restore state:', err); + log.warn("[background] Failed to restore state:", err); } } @@ -284,22 +317,27 @@ let backgroundTasks: BackgroundTask[] = []; function startBackgroundTask(type: string, promise: Promise): string { const id = `${type}-${Date.now()}`; - const task: BackgroundTask = { id, type, status: 'running', startedAt: Date.now() }; + const task: BackgroundTask = { + id, + type, + status: "running", + startedAt: Date.now(), + }; backgroundTasks.push(task); promise .then((result) => { - task.status = 'success'; + task.status = "success"; task.result = result; - notifyPopup({ type: 'task-update', task: { ...task } }); + notifyPopup({ type: "task-update", task: { ...task } }); }) .catch((error) => { - task.status = 'error'; + task.status = "error"; task.error = getErrorMessage(error); - notifyPopup({ type: 'task-update', task: { ...task } }); + notifyPopup({ type: "task-update", task: { ...task } }); }); - notifyPopup({ type: 'task-update', task: { ...task } }); + notifyPopup({ type: "task-update", task: { ...task } }); return id; } @@ -311,7 +349,7 @@ function cleanupTasks() { const FIVE_MINUTES = 5 * 60 * 1000; const cutoff = Date.now() - FIVE_MINUTES; backgroundTasks = backgroundTasks.filter( - (t) => t.status === 'running' || t.startedAt > cutoff + (t) => t.status === "running" || t.startedAt > cutoff, ); } @@ -323,13 +361,13 @@ function cleanupTasks() { let popupPort: chrome.runtime.Port | null = null; chrome.runtime.onConnect.addListener((port) => { - if (port.name !== 'popup') return; + if (port.name !== "popup") return; - log.debug('[background] Popup connected'); + log.debug("[background] Popup connected"); popupPort = port; port.onDisconnect.addListener(() => { - log.debug('[background] Popup disconnected'); + log.debug("[background] Popup disconnected"); popupPort = null; }); @@ -348,7 +386,7 @@ function notifyPopup(message: any) { } function pushStateToPopup() { - notifyPopup({ type: 'state', data: getFullState() }); + notifyPopup({ type: "state", data: getFullState() }); } /** @@ -361,15 +399,17 @@ function openPopupWindow() { pushStateToPopup(); return; } - chrome.windows.create({ - url: chrome.runtime.getURL('popup/popup.html'), - type: 'popup', - width: 400, - height: 600, - focused: true, - }).catch((err) => { - log.error('[background] Failed to create popup window:', err); - }); + chrome.windows + .create({ + url: chrome.runtime.getURL("popup/popup.html"), + type: "popup", + width: 400, + height: 600, + focused: true, + }) + .catch((err) => { + log.error("[background] Failed to create popup window:", err); + }); } /** @@ -384,8 +424,8 @@ function openPopupWithFallback() { } chrome.action.openPopup().catch(() => { chrome.windows.create({ - url: chrome.runtime.getURL('popup/popup.html'), - type: 'popup', + url: chrome.runtime.getURL("popup/popup.html"), + type: "popup", width: 400, height: 600, focused: true, @@ -416,17 +456,19 @@ function getFullState() { * Auto-lock via chrome.alarms. (#28) * Resets the timer on every popup interaction. */ -const AUTO_LOCK_ALARM = 'aztec-auto-lock'; +const AUTO_LOCK_ALARM = "aztec-auto-lock"; function resetAutoLockTimer() { if (walletUnlocked) { - chrome.alarms.create(AUTO_LOCK_ALARM, { delayInMinutes: AUTO_LOCK_MINUTES }); + chrome.alarms.create(AUTO_LOCK_ALARM, { + delayInMinutes: AUTO_LOCK_MINUTES, + }); } } chrome.alarms.onAlarm.addListener(async (alarm) => { if (alarm.name === AUTO_LOCK_ALARM) { - log.debug('[background] Auto-lock triggered'); + log.debug("[background] Auto-lock triggered"); walletUnlocked = false; await persistState(); // Tell offscreen to clear the cached CryptoKey @@ -443,9 +485,13 @@ chrome.alarms.onAlarm.addListener(async (alarm) => { * Updates the extension badge to show pending items count. */ function updateBadge() { - const count = pendingTransactions.length + handler.getPendingDiscoveryCount() + pendingSessionVerifications.length + pendingCapabilities.length; - chrome.action.setBadgeText({ text: count > 0 ? count.toString() : '' }); - chrome.action.setBadgeBackgroundColor({ color: '#FF6B00' }); + const count = + pendingTransactions.length + + handler.getPendingDiscoveryCount() + + pendingSessionVerifications.length + + pendingCapabilities.length; + chrome.action.setBadgeText({ text: count > 0 ? count.toString() : "" }); + chrome.action.setBadgeBackgroundColor({ color: "#FF6B00" }); pushStateToPopup(); persistState(); } @@ -453,18 +499,32 @@ function updateBadge() { // docs:start:transport const transport: BackgroundTransport = { sendToTab: (tabId, message) => { - log.debug('[background] sendToTab:', tabId, message.type, message.sessionId); + log.debug( + "[background] sendToTab:", + tabId, + message.type, + message.sessionId, + ); chrome.tabs.sendMessage(tabId, message); }, addContentListener: (handler) => { chrome.runtime.onMessage.addListener((message, sender) => { // Skip targeted messages (popup, offscreen), storage proxy, and progress updates if (message.target) return; - if (message.type === 'storage-get' || message.type === 'storage-set') return; - - log.debug('[background] Content message received:', message.origin, message.type, 'from tab:', sender.tab?.id); + if (message.type === "storage-get" || message.type === "storage-set") + return; + + log.debug( + "[background] Content message received:", + message.origin, + message.type, + "from tab:", + sender.tab?.id, + ); handler(message, { - tab: sender.tab ? { id: sender.tab.id, url: sender.tab.url } : undefined, + tab: sender.tab + ? { id: sender.tab.id, url: sender.tab.url } + : undefined, }); }); }, @@ -476,18 +536,24 @@ const transport: BackgroundTransport = { * or the first account as fallback. Used by both processWalletMessage (for * getAccounts/getRegisteredAccounts filtering) and APPROVE_CAPABILITIES. */ -async function getGrantableAccounts(): Promise> { +async function getGrantableAccounts(): Promise< + Array<{ alias: string; item: string }> +> { const [accountsData, activeData] = await Promise.all([ chrome.storage.local.get(STORAGE_KEYS.ACCOUNTS), chrome.storage.local.get(STORAGE_KEYS.ACTIVE_ACCOUNT), ]); const allAccounts = accountsData[STORAGE_KEYS.ACCOUNTS] || []; const activeAddress = activeData[STORAGE_KEYS.ACTIVE_ACCOUNT]; - const activeAccount = allAccounts.find((a: any) => a.address === activeAddress); + const activeAccount = allAccounts.find( + (a: any) => a.address === activeAddress, + ); if (activeAccount) { return [{ alias: activeAccount.alias, item: activeAccount.address }]; } - return allAccounts.slice(0, 1).map((a: any) => ({ alias: a.alias, item: a.address })); + return allAccounts + .slice(0, 1) + .map((a: any) => ({ alias: a.alias, item: a.address })); } /** @@ -497,11 +563,17 @@ async function getGrantableAccounts(): Promise m.name === 'sendTx')); + message.args[0].some((m: any) => m.name === "sendTx")); if (needsApproval) { // Extract `from` address from the method args: // - sendTx args: [executionPayload, sendOptions] → from is in sendOptions // - batch args: [methodsArray] → find the sendTx entry and get from from its opts - let from = ''; - if (message.type === 'sendTx') { - from = message.args?.[1]?.from?.toString?.() || ''; - } else if (message.type === 'batch') { - const sendTxMethod = message.args[0].find((m: any) => m.name === 'sendTx'); - from = sendTxMethod?.args?.[1]?.from?.toString?.() || ''; + let from = ""; + if (message.type === "sendTx") { + from = message.args?.[1]?.from?.toString?.() || ""; + } else if (message.type === "batch") { + const sendTxMethod = message.args[0].find( + (m: any) => m.name === "sendTx", + ); + from = sendTxMethod?.args?.[1]?.from?.toString?.() || ""; } const pending: PendingTransaction = { @@ -541,7 +615,7 @@ async function processWalletMessage(session: ActiveSession, message: any) { pendingTransactions.push(pending); updateBadge(); - log.debug('[background] Transaction pending approval:', pending.method); + log.debug("[background] Transaction pending approval:", pending.method); openPopupWithFallback(); return; @@ -549,23 +623,27 @@ async function processWalletMessage(session: ActiveSession, message: any) { // docs:end:approval-check // Capability requests require user approval — push to pending state. - if (message.type === 'requestCapabilities') { + if (message.type === "requestCapabilities") { // Auto-approve for trusted origins if the requested capabilities match what was previously granted const requestedManifest = message.args?.[0]; const requestedCaps: any[] = requestedManifest?.capabilities || []; if (await isTrustedOrigin(session.origin, session.appId)) { - const savedCaps = await getStoredCapabilities(session.origin, session.appId); + const savedCaps = await getStoredCapabilities( + session.origin, + session.appId, + ); if (savedCaps) { // Verify the requested capability types match the previously approved set const requestedTypes = requestedCaps.map((c: any) => c.type).sort(); const savedTypes = savedCaps.map((c: any) => c.type).sort(); - const capsMatch = requestedTypes.length === savedTypes.length && + const capsMatch = + requestedTypes.length === savedTypes.length && requestedTypes.every((t: string, i: number) => t === savedTypes[i]); if (capsMatch) { const grantedAccounts = await getGrantableAccounts(); const granted = savedCaps.map((cap: any) => { - if (cap.type === 'accounts') { + if (cap.type === "accounts") { return { ...cap, accounts: grantedAccounts }; } return { ...cap }; @@ -574,17 +652,25 @@ async function processWalletMessage(session: ActiveSession, message: any) { await handler.sendResponse(session.sessionId, { messageId: message.messageId, result: { - version: '1.0', + version: "1.0", granted, - wallet: { name: WALLET_CONFIG.walletName, version: WALLET_CONFIG.walletVersion }, + wallet: { + name: WALLET_CONFIG.walletName, + version: WALLET_CONFIG.walletVersion, + }, }, walletId: WALLET_CONFIG.walletId, }); capabilitiesApprovedSessions.add(session.sessionId); - log.debug('[background] Auto-approved capabilities for trusted origin:', session.origin); + log.debug( + "[background] Auto-approved capabilities for trusted origin:", + session.origin, + ); return; } - log.debug('[background] Requested capabilities differ from saved — requiring approval'); + log.debug( + "[background] Requested capabilities differ from saved — requiring approval", + ); } } @@ -594,7 +680,10 @@ async function processWalletMessage(session: ActiveSession, message: any) { sessionId: session.sessionId, messageId: message.messageId, origin: session.origin, - appMetadata: manifest?.metadata || { name: 'Unknown App', version: '0.0.0' }, + appMetadata: manifest?.metadata || { + name: "Unknown App", + version: "0.0.0", + }, capabilities: requestedCaps, timestamp: Date.now(), }; @@ -615,12 +704,19 @@ async function processWalletMessage(session: ActiveSession, message: any) { }); // MetaMask-like behavior: return only the active account for getAccounts - if (message.type === 'getAccounts' || message.type === 'getRegisteredAccounts') { - const activeData = await chrome.storage.local.get(STORAGE_KEYS.ACTIVE_ACCOUNT); + if ( + message.type === "getAccounts" || + message.type === "getRegisteredAccounts" + ) { + const activeData = await chrome.storage.local.get( + STORAGE_KEYS.ACTIVE_ACCOUNT, + ); const activeAddress = activeData[STORAGE_KEYS.ACTIVE_ACCOUNT]; if (activeAddress && Array.isArray(result)) { - const activeAccount = result.find((acc: any) => - acc.item === activeAddress || acc.item?.toString() === activeAddress + const activeAccount = result.find( + (acc: any) => + acc.item === activeAddress || + acc.item?.toString() === activeAddress, ); result = activeAccount ? [activeAccount] : result; } @@ -632,7 +728,11 @@ async function processWalletMessage(session: ActiveSession, message: any) { walletId: WALLET_CONFIG.walletId, }); })().catch(async (error: any) => { - log.error('[background] Error handling wallet message:', message.type, error); + log.error( + "[background] Error handling wallet message:", + message.type, + error, + ); await handler.sendResponse(session.sessionId, { messageId: message.messageId, error: error.message, @@ -644,13 +744,22 @@ async function processWalletMessage(session: ActiveSession, message: any) { // docs:start:callbacks const callbacks: BackgroundConnectionCallbacks = { onPendingDiscovery: async (discovery) => { - log.debug('[background] Pending discovery:', discovery.requestId, 'from', discovery.origin); + log.debug( + "[background] Pending discovery:", + discovery.requestId, + "from", + discovery.origin, + ); // Clean up stale sessions from this tab (e.g. page refresh creates a new // discovery while the old session is still in activeSessions). for (const session of handler.getActiveSessions()) { if (session.tabId === discovery.tabId) { - log.debug('[background] Terminating stale session for tab:', discovery.tabId, session.sessionId); + log.debug( + "[background] Terminating stale session for tab:", + discovery.tabId, + session.sessionId, + ); capabilitiesApprovedSessions.delete(session.sessionId); queuedMessages.delete(session.sessionId); handler.terminateSession(session.sessionId); @@ -658,16 +767,22 @@ const callbacks: BackgroundConnectionCallbacks = { } // Deduplicate: reject any existing discovery from the same tab - const existing = handler.getPendingDiscoveries().find( - (d) => d.tabId === discovery.tabId && d.requestId !== discovery.requestId - ); + const existing = handler + .getPendingDiscoveries() + .find( + (d) => + d.tabId === discovery.tabId && d.requestId !== discovery.requestId, + ); if (existing) { handler.rejectDiscovery(existing.requestId); } // Auto-approve if origin is already trusted (reconnection after page refresh) if (await isTrustedOrigin(discovery.origin, discovery.appId)) { - log.debug('[background] Auto-approving trusted origin:', discovery.origin); + log.debug( + "[background] Auto-approving trusted origin:", + discovery.origin, + ); handler.approveDiscovery(discovery.requestId); return; } @@ -678,14 +793,20 @@ const callbacks: BackgroundConnectionCallbacks = { }, onSessionEstablished: async (session: ActiveSession) => { - log.debug('[background] Session established:', session.sessionId); + log.debug("[background] Session established:", session.sessionId); // Auto-confirm if origin is already trusted (skip emoji verification) if (await isTrustedOrigin(session.origin, session.appId)) { - log.debug('[background] Auto-confirming trusted session:', session.sessionId); + log.debug( + "[background] Auto-confirming trusted session:", + session.sessionId, + ); // Pre-approve capabilities if previously granted (enables seamless reconnect) - const savedCaps = await getStoredCapabilities(session.origin, session.appId); + const savedCaps = await getStoredCapabilities( + session.origin, + session.appId, + ); if (savedCaps) { capabilitiesApprovedSessions.add(session.sessionId); } @@ -701,7 +822,10 @@ const callbacks: BackgroundConnectionCallbacks = { } // New origin — require emoji verification - log.debug('[background] Awaiting emoji verification for:', session.sessionId); + log.debug( + "[background] Awaiting emoji verification for:", + session.sessionId, + ); // SDK automatically removes the discovery when key exchange completes. // Show emojis in approvals so user can compare with the webapp pendingSessionVerifications.push({ @@ -730,15 +854,23 @@ const callbacks: BackgroundConnectionCallbacks = { * user must confirm before any dApp calls are processed. */ onWalletMessage: async (session: ActiveSession, message: any) => { - log.debug('[background] Wallet message:', message.type, 'from session:', session.sessionId); + log.debug( + "[background] Wallet message:", + message.type, + "from session:", + session.sessionId, + ); // Block wallet messages until the user confirms emoji verification in the extension. // The dApp's calls (e.g. getAccounts) will wait until the extension user approves. const awaitingVerification = pendingSessionVerifications.some( - (v) => v.sessionId === session.sessionId + (v) => v.sessionId === session.sessionId, ); if (awaitingVerification) { - log.debug('[background] Session awaiting verification, queuing message:', message.type); + log.debug( + "[background] Session awaiting verification, queuing message:", + message.type, + ); const queue = queuedMessages.get(session.sessionId) ?? []; queue.push({ session, message }); queuedMessages.set(session.sessionId, queue); @@ -751,7 +883,11 @@ const callbacks: BackgroundConnectionCallbacks = { }; // docs:end:callbacks -const handler = new BackgroundConnectionHandler(WALLET_CONFIG, transport, callbacks); +const handler = new BackgroundConnectionHandler( + { ...WALLET_CONFIG, logger: log }, + transport, + callbacks, +); handler.initialize(); // Clean up sessions when a tab is closed @@ -774,27 +910,39 @@ chrome.runtime.onMessage.addListener((message, sender, sendResponse) => { * Storage proxy for the offscreen document. (#7) * Validates that the request comes from the extension itself (not content scripts or external). */ - if (message.type === 'storage-get' || message.type === 'storage-set') { + if (message.type === "storage-get" || message.type === "storage-set") { // Security: only allow storage proxy from extension pages (offscreen, popup) (#7) // Content scripts have sender.tab set; extension pages (offscreen, popup) do not. if (sender.tab) { - log.warn('[background] Rejected storage proxy from content script, tab:', sender.tab.id); - sendResponse({ success: false, error: 'Storage proxy not allowed from content scripts' }); + log.warn( + "[background] Rejected storage proxy from content script, tab:", + sender.tab.id, + ); + sendResponse({ + success: false, + error: "Storage proxy not allowed from content scripts", + }); return false; } - if (message.type === 'storage-get') { - chrome.storage.local.get(message.key).then((result) => { - sendResponse({ success: true, result: result[message.key] }); - }).catch((err) => { - sendResponse({ success: false, error: err.message }); - }); + if (message.type === "storage-get") { + chrome.storage.local + .get(message.key) + .then((result) => { + sendResponse({ success: true, result: result[message.key] }); + }) + .catch((err) => { + sendResponse({ success: false, error: err.message }); + }); } else { - chrome.storage.local.set(message.data).then(() => { - sendResponse({ success: true }); - }).catch((err) => { - sendResponse({ success: false, error: err.message }); - }); + chrome.storage.local + .set(message.data) + .then(() => { + sendResponse({ success: true }); + }) + .catch((err) => { + sendResponse({ success: false, error: err.message }); + }); } return true; // async response (#23) } @@ -803,7 +951,7 @@ chrome.runtime.onMessage.addListener((message, sender, sendResponse) => { return false; } - log.debug('[background] Popup message:', message.type); + log.debug("[background] Popup message:", message.type); // Reset auto-lock on any popup interaction (#28) resetAutoLockTimer(); @@ -825,37 +973,38 @@ chrome.runtime.onMessage.addListener((message, sender, sendResponse) => { case MessageTypes.APPROVE_TRANSACTION: { const pending = pendingTransactions.find( - (t) => t.messageId === message.messageId + (t) => t.messageId === message.messageId, ); if (pending) { pendingTransactions = pendingTransactions.filter( - (t) => t.messageId !== message.messageId + (t) => t.messageId !== message.messageId, ); updateBadge(); - const taskId = startBackgroundTask(`tx:${pending.method}`, - handleTransactionApproval(pending) + const taskId = startBackgroundTask( + `tx:${pending.method}`, + handleTransactionApproval(pending), ); sendResponse({ success: true, result: { taskId } }); } else { - sendResponse({ success: false, error: 'Transaction not found' }); + sendResponse({ success: false, error: "Transaction not found" }); } return false; } case MessageTypes.REJECT_TRANSACTION: { const pending = pendingTransactions.find( - (t) => t.messageId === message.messageId + (t) => t.messageId === message.messageId, ); if (pending) { handler.sendResponse(pending.sessionId, { messageId: pending.messageId, - error: 'Transaction rejected by user', + error: "Transaction rejected by user", walletId: WALLET_CONFIG.walletId, }); pendingTransactions = pendingTransactions.filter( - (t) => t.messageId !== message.messageId + (t) => t.messageId !== message.messageId, ); updateBadge(); } @@ -865,56 +1014,67 @@ chrome.runtime.onMessage.addListener((message, sender, sendResponse) => { case MessageTypes.APPROVE_CAPABILITIES: { const pending = pendingCapabilities.find( - (c) => c.messageId === message.messageId + (c) => c.messageId === message.messageId, ); if (pending) { pendingCapabilities = pendingCapabilities.filter( - (c) => c.messageId !== message.messageId + (c) => c.messageId !== message.messageId, ); // Build granted capabilities using the shared active-account helper - getGrantableAccounts().then((grantedAccounts) => { - const granted = pending.capabilities.map((cap: any) => { - if (cap.type === 'accounts') { - return { ...cap, accounts: grantedAccounts }; - } - return { ...cap }; - }); - - return handler.sendResponse(pending.sessionId, { - messageId: pending.messageId, - result: { - version: '1.0', - granted, - wallet: { - name: WALLET_CONFIG.walletName, - version: WALLET_CONFIG.walletVersion, + getGrantableAccounts() + .then((grantedAccounts) => { + const granted = pending.capabilities.map((cap: any) => { + if (cap.type === "accounts") { + return { ...cap, accounts: grantedAccounts }; + } + return { ...cap }; + }); + + return handler.sendResponse(pending.sessionId, { + messageId: pending.messageId, + result: { + version: "1.0", + granted, + wallet: { + name: WALLET_CONFIG.walletName, + version: WALLET_CONFIG.walletVersion, + }, }, - }, - walletId: WALLET_CONFIG.walletId, - }); - }).then(async () => { - capabilitiesApprovedSessions.add(pending.sessionId); - - // Persist granted capabilities for auto-reconnect - const approvedSession = handler.getSession(pending.sessionId); - if (approvedSession) { - const trusted = await getTrustedOrigins(); - const entry = trusted.find(t => t.origin === approvedSession.origin && t.appId === approvedSession.appId); - if (entry) { - entry.grantedCapabilities = pending.capabilities.map((cap: any) => ({ ...cap })); - await chrome.storage.local.set({ [TRUSTED_ORIGINS_KEY]: trusted }); + walletId: WALLET_CONFIG.walletId, + }); + }) + .then(async () => { + capabilitiesApprovedSessions.add(pending.sessionId); + + // Persist granted capabilities for auto-reconnect + const approvedSession = handler.getSession(pending.sessionId); + if (approvedSession) { + const trusted = await getTrustedOrigins(); + const entry = trusted.find( + (t) => + t.origin === approvedSession.origin && + t.appId === approvedSession.appId, + ); + if (entry) { + entry.grantedCapabilities = pending.capabilities.map( + (cap: any) => ({ ...cap }), + ); + await chrome.storage.local.set({ + [TRUSTED_ORIGINS_KEY]: trusted, + }); + } } - } - updateBadge(); - sendResponse({ success: true }); - }).catch((err) => { - log.error('[background] Failed to approve capabilities:', err); - sendResponse({ success: false, error: getErrorMessage(err) }); - }); + updateBadge(); + sendResponse({ success: true }); + }) + .catch((err) => { + log.error("[background] Failed to approve capabilities:", err); + sendResponse({ success: false, error: getErrorMessage(err) }); + }); } else { - sendResponse({ success: false, error: 'Capability request not found' }); + sendResponse({ success: false, error: "Capability request not found" }); return false; } return true; @@ -922,17 +1082,17 @@ chrome.runtime.onMessage.addListener((message, sender, sendResponse) => { case MessageTypes.REJECT_CAPABILITIES: { const pending = pendingCapabilities.find( - (c) => c.messageId === message.messageId + (c) => c.messageId === message.messageId, ); if (pending) { pendingCapabilities = pendingCapabilities.filter( - (c) => c.messageId !== message.messageId + (c) => c.messageId !== message.messageId, ); handler.sendResponse(pending.sessionId, { messageId: pending.messageId, result: { - version: '1.0', + version: "1.0", granted: [], wallet: { name: WALLET_CONFIG.walletName, @@ -952,12 +1112,12 @@ chrome.runtime.onMessage.addListener((message, sender, sendResponse) => { // User confirmed emojis match — session is now fully active. // Flush any wallet messages that were queued while awaiting verification. pendingSessionVerifications = pendingSessionVerifications.filter( - (v) => v.sessionId !== message.sessionId + (v) => v.sessionId !== message.sessionId, ); const queued = queuedMessages.get(message.sessionId) ?? []; queuedMessages.delete(message.sessionId); for (const { session, message: msg } of queued) { - log.debug('[background] Flushing queued message:', msg.type); + log.debug("[background] Flushing queued message:", msg.type); processWalletMessage(session, msg); } @@ -975,14 +1135,14 @@ chrome.runtime.onMessage.addListener((message, sender, sendResponse) => { case MessageTypes.REJECT_SESSION: { // User rejected emoji verification — reject queued messages and terminate the session. pendingSessionVerifications = pendingSessionVerifications.filter( - (v) => v.sessionId !== message.sessionId + (v) => v.sessionId !== message.sessionId, ); const rejected = queuedMessages.get(message.sessionId) ?? []; queuedMessages.delete(message.sessionId); for (const { session, message: msg } of rejected) { handler.sendResponse(session.sessionId, { messageId: msg.messageId, - error: 'Session verification rejected by user', + error: "Session verification rejected by user", walletId: WALLET_CONFIG.walletId, }); } @@ -997,7 +1157,10 @@ chrome.runtime.onMessage.addListener((message, sender, sendResponse) => { // Remove from trusted origins so next connection requires full approval (#30) const disconnectedSession = handler.getSession(message.sessionId); if (disconnectedSession) { - removeTrustedOrigin(disconnectedSession.origin, disconnectedSession.appId); + removeTrustedOrigin( + disconnectedSession.origin, + disconnectedSession.appId, + ); } capabilitiesApprovedSessions.delete(message.sessionId); handler.terminateSession(message.sessionId); @@ -1006,7 +1169,7 @@ chrome.runtime.onMessage.addListener((message, sender, sendResponse) => { return false; } - case 'getPendingItems': { + case "getPendingItems": { sendResponse({ success: true, result: getFullState(), @@ -1015,37 +1178,52 @@ chrome.runtime.onMessage.addListener((message, sender, sendResponse) => { } case MessageTypes.GET_ACCOUNTS: { - chrome.storage.local.get(STORAGE_KEYS.ACCOUNTS) + chrome.storage.local + .get(STORAGE_KEYS.ACCOUNTS) .then((data) => { - const accounts = (data[STORAGE_KEYS.ACCOUNTS] || []).map((acc: any) => ({ - address: acc.address, - alias: acc.alias, - isDeployed: acc.isDeployed, - })); + const accounts = (data[STORAGE_KEYS.ACCOUNTS] || []).map( + (acc: any) => ({ + address: acc.address, + alias: acc.alias, + isDeployed: acc.isDeployed, + }), + ); sendResponse({ success: true, result: accounts }); }) - .catch((error) => sendResponse({ success: false, error: error.message })); + .catch((error) => + sendResponse({ success: false, error: error.message }), + ); return true; // async (#23) } case MessageTypes.GET_ACTIVE_ACCOUNT: { - chrome.storage.local.get(STORAGE_KEYS.ACTIVE_ACCOUNT) + chrome.storage.local + .get(STORAGE_KEYS.ACTIVE_ACCOUNT) .then((data) => { - sendResponse({ success: true, result: data[STORAGE_KEYS.ACTIVE_ACCOUNT] || null }); + sendResponse({ + success: true, + result: data[STORAGE_KEYS.ACTIVE_ACCOUNT] || null, + }); }) - .catch((error) => sendResponse({ success: false, error: error.message })); + .catch((error) => + sendResponse({ success: false, error: error.message }), + ); return true; } case MessageTypes.SET_ACTIVE_ACCOUNT: { - chrome.storage.local.set({ [STORAGE_KEYS.ACTIVE_ACCOUNT]: message.address }) + chrome.storage.local + .set({ [STORAGE_KEYS.ACTIVE_ACCOUNT]: message.address }) .then(() => sendResponse({ success: true })) - .catch((error) => sendResponse({ success: false, error: error.message })); + .catch((error) => + sendResponse({ success: false, error: error.message }), + ); return true; } case MessageTypes.UNLOCK_WALLET: { - const taskId = startBackgroundTask('unlock', + const taskId = startBackgroundTask( + "unlock", sendToOffscreen({ type: MessageTypes.UNLOCK_WALLET, password: message.password, @@ -1054,14 +1232,15 @@ chrome.runtime.onMessage.addListener((message, sender, sendResponse) => { persistState(); resetAutoLockTimer(); return result; - }) + }), ); sendResponse({ success: true, result: { taskId } }); return false; } case MessageTypes.GET_WALLET_STATUS: { - chrome.storage.local.get(STORAGE_KEYS.PASSWORD_DATA) + chrome.storage.local + .get(STORAGE_KEYS.PASSWORD_DATA) .then((data) => { sendResponse({ success: true, @@ -1071,12 +1250,15 @@ chrome.runtime.onMessage.addListener((message, sender, sendResponse) => { }, }); }) - .catch((error) => sendResponse({ success: false, error: error.message })); + .catch((error) => + sendResponse({ success: false, error: error.message }), + ); return true; } case MessageTypes.SETUP_PASSWORD: { - const taskId = startBackgroundTask('setup-password', + const taskId = startBackgroundTask( + "setup-password", sendToOffscreen({ type: MessageTypes.SETUP_PASSWORD, password: message.password, @@ -1085,52 +1267,62 @@ chrome.runtime.onMessage.addListener((message, sender, sendResponse) => { persistState(); resetAutoLockTimer(); return result; - }) + }), ); sendResponse({ success: true, result: { taskId } }); return false; } case MessageTypes.MARK_DEPLOYED: { - chrome.storage.local.get(STORAGE_KEYS.ACCOUNTS) + chrome.storage.local + .get(STORAGE_KEYS.ACCOUNTS) .then((data) => { const accounts = data[STORAGE_KEYS.ACCOUNTS] || []; - const account = accounts.find((a: any) => a.address === message.address); + const account = accounts.find( + (a: any) => a.address === message.address, + ); if (account) { account.isDeployed = true; - return chrome.storage.local.set({ [STORAGE_KEYS.ACCOUNTS]: accounts }); + return chrome.storage.local.set({ + [STORAGE_KEYS.ACCOUNTS]: accounts, + }); } }) .then(() => sendResponse({ success: true, result: { success: true } })) - .catch((error) => sendResponse({ success: false, error: error.message })); + .catch((error) => + sendResponse({ success: false, error: error.message }), + ); return true; } case MessageTypes.CREATE_ACCOUNT: { - const taskId = startBackgroundTask('create-account', + const taskId = startBackgroundTask( + "create-account", sendToOffscreen({ type: MessageTypes.CREATE_ACCOUNT, alias: message.alias, - }) + }), ); sendResponse({ success: true, result: { taskId } }); return false; } case MessageTypes.DEPLOY_ACCOUNT: { - const taskId = startBackgroundTask('deploy-account', + const taskId = startBackgroundTask( + "deploy-account", sendToOffscreen({ type: MessageTypes.DEPLOY_ACCOUNT, address: message.address, - }) + }), ); sendResponse({ success: true, result: { taskId } }); return false; } case MessageTypes.EXPORT_WALLET: { - const taskId = startBackgroundTask('export-wallet', - sendToOffscreen({ type: MessageTypes.EXPORT_WALLET }) + const taskId = startBackgroundTask( + "export-wallet", + sendToOffscreen({ type: MessageTypes.EXPORT_WALLET }), ); sendResponse({ success: true, result: { taskId } }); return false; @@ -1138,7 +1330,11 @@ chrome.runtime.onMessage.addListener((message, sender, sendResponse) => { case MessageTypes.IMPORT_WALLET: { // Wipe wallet data from chrome.storage.local and lock the wallet - chrome.storage.local.remove([STORAGE_KEYS.ACCOUNTS, STORAGE_KEYS.PASSWORD_DATA, STORAGE_KEYS.ACTIVE_ACCOUNT]); + chrome.storage.local.remove([ + STORAGE_KEYS.ACCOUNTS, + STORAGE_KEYS.PASSWORD_DATA, + STORAGE_KEYS.ACTIVE_ACCOUNT, + ]); walletUnlocked = false; persistState(); // Tell offscreen to clear cached key @@ -1148,7 +1344,8 @@ chrome.runtime.onMessage.addListener((message, sender, sendResponse) => { } case MessageTypes.IMPORT_WALLET_ACCOUNTS: { - const taskId = startBackgroundTask('import-wallet-accounts', + const taskId = startBackgroundTask( + "import-wallet-accounts", sendToOffscreen({ type: MessageTypes.IMPORT_WALLET_ACCOUNTS, accounts: message.accounts, @@ -1158,21 +1355,21 @@ chrome.runtime.onMessage.addListener((message, sender, sendResponse) => { persistState(); resetAutoLockTimer(); return result; - }) + }), ); sendResponse({ success: true, result: { taskId } }); return false; } default: { - log.warn('[background] Unknown message type:', message.type); + log.warn("[background] Unknown message type:", message.type); return false; } } }); async function handleTransactionApproval( - pending: PendingTransaction + pending: PendingTransaction, ): Promise { try { const result = await sendToOffscreen({ @@ -1189,7 +1386,11 @@ async function handleTransactionApproval( return result; } catch (error: any) { - log.error('[background] Transaction approval failed:', pending.method, error); + log.error( + "[background] Transaction approval failed:", + pending.method, + error, + ); await handler.sendResponse(pending.sessionId, { messageId: pending.messageId, error: error.message, @@ -1204,7 +1405,7 @@ async function handleTransactionApproval( * Extension lifecycle handlers. (#17) */ chrome.runtime.onInstalled.addListener((details) => { - log.debug('[background] Extension installed/updated:', details.reason); + log.debug("[background] Extension installed/updated:", details.reason); // Clear stale pending state — sessions don't survive extension reload pendingTransactions = []; @@ -1214,23 +1415,28 @@ chrome.runtime.onInstalled.addListener((details) => { capabilitiesApprovedSessions.clear(); persistState(); - if (details.reason === 'install') { + if (details.reason === "install") { // First install — nothing to migrate - log.debug('[background] First install, no migration needed'); - } else if (details.reason === 'update') { + log.debug("[background] First install, no migration needed"); + } else if (details.reason === "update") { // Version update — could add migration logic here - log.debug('[background] Updated from', details.previousVersion); + log.debug("[background] Updated from", details.previousVersion); } }); // Restore state on service worker startup (#8) restoreState().then(() => { - log.debug('[background] Service worker initialized'); + log.debug("[background] Service worker initialized"); }); // Eagerly preload the offscreen document so WASM and PXE deps are warm -ensureOffscreenDocument().then(() => { - log.debug('[background] Offscreen document preloaded'); -}).catch((err) => { - log.error('[background] Offscreen preload failed (will retry on demand):', err); -}); +ensureOffscreenDocument() + .then(() => { + log.debug("[background] Offscreen document preloaded"); + }) + .catch((err) => { + log.error( + "[background] Offscreen preload failed (will retry on demand):", + err, + ); + }); diff --git a/docs/netlify.toml b/docs/netlify.toml index 995dc8eaf80a..a80bea176f71 100644 --- a/docs/netlify.toml +++ b/docs/netlify.toml @@ -813,3 +813,8 @@ # PXE: capsule operation attempted with a scope not in the allowed scopes list from = "/errors/10" to = "/developers/docs/aztec-nr/framework-description/advanced/how_to_use_capsules" + +[[redirects]] + # PXE: cross-contract utility call denied by execution hook + from = "/errors/11" + to = "/developers/docs/aztec-nr/debugging#cross-contract-utility-call-denied" diff --git a/docs/network_versioned_docs/version-v4.2.0/operators/reference/node-api-reference.md b/docs/network_versioned_docs/version-v4.2.0/operators/reference/node-api-reference.md index d68617cf4c4e..225b6321a493 100644 --- a/docs/network_versioned_docs/version-v4.2.0/operators/reference/node-api-reference.md +++ b/docs/network_versioned_docs/version-v4.2.0/operators/reference/node-api-reference.md @@ -109,9 +109,9 @@ Get a block specified by its block number or 'latest'. **Parameters**: -1. `blockParameter` - `number | "latest"` - The block parameter (block number, block hash, or 'latest'). +1. `blockParameter` - `BlockHash | number | "latest"` - The block parameter (block number, block hash, or 'latest'). -**Returns**: `L2Block` - The requested block. +**Returns**: `L2Block | undefined` - The requested block. **Example**: @@ -129,7 +129,7 @@ Get a block specified by its hash. 1. `blockHash` - `BlockHash` - The block hash being requested. -**Returns**: `L2Block` - The requested block. +**Returns**: `L2Block | undefined` - The requested block. **Example**: @@ -147,7 +147,7 @@ Get a block specified by its archive root. 1. `archive` - `Fr` - The archive root being requested. -**Returns**: `L2Block` - The requested block. +**Returns**: `L2Block | undefined` - The requested block. **Example**: @@ -173,7 +173,7 @@ Method to request blocks. Will attempt to return all requested blocks but will r ```bash curl -X POST http://localhost:8080 \ -H 'Content-Type: application/json' \ - -d '{"jsonrpc":"2.0","method":"node_getBlocks","params":[12345,12345],"id":1}' + -d '{"jsonrpc":"2.0","method":"node_getBlocks","params":[1,100],"id":1}' ``` ### node_getBlockHeader @@ -182,9 +182,9 @@ Returns the block header for a given block number, block hash, or 'latest'. **Parameters**: -1. `block` - `number | "latest" | undefined` - The block parameter (block number, block hash, or 'latest'). Defaults to 'latest'. +1. `block` - `BlockHash | number | "latest" | undefined` - The block parameter (block number, block hash, or 'latest'). Defaults to 'latest'. -**Returns**: `BlockHeader` - The requested block header. +**Returns**: `BlockHeader | undefined` - The requested block header. **Example**: @@ -202,7 +202,7 @@ Get a block header specified by its archive root. 1. `archive` - `Fr` - The archive root being requested. -**Returns**: `BlockHeader` - The requested block header. +**Returns**: `BlockHeader | undefined` - The requested block header. **Example**: @@ -228,7 +228,7 @@ Retrieves a collection of checkpoints. ```bash curl -X POST http://localhost:8080 \ -H 'Content-Type: application/json' \ - -d '{"jsonrpc":"2.0","method":"node_getCheckpoints","params":[12345,12345],"id":1}' + -d '{"jsonrpc":"2.0","method":"node_getCheckpoints","params":[1,100],"id":1}' ``` ### node_getCheckpointedBlocks @@ -245,7 +245,7 @@ curl -X POST http://localhost:8080 \ ```bash curl -X POST http://localhost:8080 \ -H 'Content-Type: application/json' \ - -d '{"jsonrpc":"2.0","method":"node_getCheckpointedBlocks","params":[12345,12345],"id":1}' + -d '{"jsonrpc":"2.0","method":"node_getCheckpointedBlocks","params":[1,100],"id":1}' ``` ### node_getCheckpointsDataForEpoch @@ -332,7 +332,7 @@ Method to retrieve a single pending tx. 1. `txHash` - `TxHash` - The transaction hash to return. -**Returns**: `Tx` - The pending tx if it exists. +**Returns**: `Tx | undefined` - The pending tx if it exists. **Example**: @@ -376,7 +376,7 @@ Method to retrieve pending txs. ```bash curl -X POST http://localhost:8080 \ -H 'Content-Type: application/json' \ - -d '{"jsonrpc":"2.0","method":"node_getPendingTxs","params":[12345,"0x1234..."],"id":1}' + -d '{"jsonrpc":"2.0","method":"node_getPendingTxs","params":[100,"0x1234..."],"id":1}' ``` ### node_getPendingTxCount @@ -442,9 +442,12 @@ curl -X POST http://localhost:8080 \ Gets the storage value at the given contract storage slot. +**Remarks**: The storage slot here refers to the slot as it is defined in Noir not the index in the merkle tree. +Aztec's version of `eth_getStorageAt`. + **Parameters**: -1. `referenceBlock` - `number | "latest"` - The block parameter (block number, block hash, or 'latest') at which to get the data. +1. `referenceBlock` - `BlockHash | number | "latest"` - The block parameter (block number, block hash, or 'latest') at which to get the data. 2. `contract` - `AztecAddress` - Address of the contract to query. 3. `slot` - `Fr` - Slot to query. @@ -483,11 +486,11 @@ the leaves were inserted. **Parameters**: -1. `referenceBlock` - `number | "latest"` - The block parameter (block number, block hash, or 'latest') at which to get the data. +1. `referenceBlock` - `BlockHash | number | "latest"` - The block parameter (block number, block hash, or 'latest') at which to get the data. 2. `treeId` - `MerkleTreeId` - The tree to search in. 3. `leafValues` - `Fr[]` - The values to search for. -**Returns**: `DataInBlock | undefined[]` - The indices of leaves and the block metadata of a block in which the leaves were inserted. +**Returns**: `(DataInBlock | undefined)[]` - The indices of leaves and the block metadata of a block in which the leaves were inserted. **Example**: @@ -503,10 +506,10 @@ Returns a nullifier membership witness for a given nullifier at a given block. **Parameters**: -1. `referenceBlock` - `number | "latest"` - The block parameter (block number, block hash, or 'latest') at which to get the data. +1. `referenceBlock` - `BlockHash | number | "latest"` - The block parameter (block number, block hash, or 'latest') at which to get the data. 2. `nullifier` - `Fr` - Nullifier we try to find witness for. -**Returns**: `NullifierMembershipWitness` - The nullifier membership witness (if found). +**Returns**: `NullifierMembershipWitness | undefined` - The nullifier membership witness (if found). **Example**: @@ -520,12 +523,16 @@ curl -X POST http://localhost:8080 \ Returns a low nullifier membership witness for a given nullifier at a given block. +**Remarks**: Low nullifier witness can be used to perform a nullifier non-inclusion proof by leveraging the "linked +list structure" of leaves and proving that a lower nullifier is pointing to a bigger next value than the nullifier +we are trying to prove non-inclusion for. + **Parameters**: -1. `referenceBlock` - `number | "latest"` - The block parameter (block number, block hash, or 'latest') at which to get the data. +1. `referenceBlock` - `BlockHash | number | "latest"` - The block parameter (block number, block hash, or 'latest') at which to get the data. 2. `nullifier` - `Fr` - Nullifier we try to find the low nullifier witness for. -**Returns**: `NullifierMembershipWitness` - The low nullifier membership witness (if found). +**Returns**: `NullifierMembershipWitness | undefined` - The low nullifier membership witness (if found). **Example**: @@ -539,12 +546,16 @@ curl -X POST http://localhost:8080 \ Returns a public data tree witness for a given leaf slot at a given block. +**Remarks**: The witness can be used to compute the current value of the public data tree leaf. If the low leaf preimage corresponds to an +"in range" slot, means that the slot doesn't exist and the value is 0. If the low leaf preimage corresponds to the exact slot, the current value +is contained in the leaf preimage. + **Parameters**: -1. `referenceBlock` - `number | "latest"` - The block parameter (block number, block hash, or 'latest') at which to get the data. +1. `referenceBlock` - `BlockHash | number | "latest"` - The block parameter (block number, block hash, or 'latest') at which to get the data. 2. `leafSlot` - `Fr` - The leaf slot we try to find the witness for. -**Returns**: `PublicDataWitness` - The public data witness (if found). +**Returns**: `PublicDataWitness | undefined` - The public data witness (if found). **Example**: @@ -565,7 +576,7 @@ a specific block exists in the chain's history. **Parameters**: -1. `referenceBlock` - `number | "latest"` - The block parameter (block number, block hash, or 'latest') at which to get the data +1. `referenceBlock` - `BlockHash | number | "latest"` - The block parameter (block number, block hash, or 'latest') at which to get the data (which contains the root of the archive tree in which we are searching for the block hash). 2. `blockHash` - `BlockHash` - The block hash to find in the archive tree. @@ -585,7 +596,7 @@ Returns a membership witness for a given note hash at a given block. **Parameters**: -1. `referenceBlock` - `number | "latest"` - The block parameter (block number, block hash, or 'latest') at which to get the data. +1. `referenceBlock` - `BlockHash | number | "latest"` - The block parameter (block number, block hash, or 'latest') at which to get the data. 2. `noteHash` - `Fr` - The note hash we try to find the witness for. **Returns**: `MembershipWitness | undefined` @@ -606,7 +617,7 @@ Returns the index and a sibling path for a leaf in the committed l1 to l2 data t **Parameters**: -1. `referenceBlock` - `number | "latest"` - The block parameter (block number, block hash, or 'latest') at which to get the data. +1. `referenceBlock` - `BlockHash | number | "latest"` - The block parameter (block number, block hash, or 'latest') at which to get the data. 2. `l1ToL2Message` - `Fr` - The l1ToL2Message to get the index / sibling path for. **Returns**: `[bigint, SiblingPath] | undefined` - A tuple of the index and the sibling path of the L1ToL2Message (undefined if not found). @@ -736,7 +747,7 @@ for a tag, the caller should fetch the next page to check for more logs. ```bash curl -X POST http://localhost:8080 \ -H 'Content-Type: application/json' \ - -d '{"jsonrpc":"2.0","method":"node_getPrivateLogsByTags","params":[["0x1234..."],12345,"0x1234..."],"id":1}' + -d '{"jsonrpc":"2.0","method":"node_getPrivateLogsByTags","params":[["0x1234..."],0,"0x1234..."],"id":1}' ``` ### node_getPublicLogsByTagsFromContract @@ -762,7 +773,7 @@ for a tag, the caller should fetch the next page to check for more logs. ```bash curl -X POST http://localhost:8080 \ -H 'Content-Type: application/json' \ - -d '{"jsonrpc":"2.0","method":"node_getPublicLogsByTagsFromContract","params":["0x1234...",["0x1234..."],12345,"0x1234..."],"id":1}' + -d '{"jsonrpc":"2.0","method":"node_getPublicLogsByTagsFromContract","params":["0x1234...",["0x1234..."],0,"0x1234..."],"id":1}' ``` ## Contract queries @@ -821,6 +832,26 @@ curl -X POST http://localhost:8080 \ -d '{"jsonrpc":"2.0","method":"node_getCurrentMinFees","params":[],"id":1}' ``` +### node_getPredictedMinFees + +Returns predicted min fees for the current slot and next N slots. +Each entry accounts for the L1 gas oracle transition and congestion growth based on the +given mana usage estimate. Defaults to target usage (steady state). + +**Parameters**: + +1. `manaUsage` - `ManaUsageEstimate | undefined` - Expected mana usage per checkpoint (none, target, or limit). + +**Returns**: `GasFees[]` - An array of GasFees, one per slot in the prediction window. + +**Example**: + +```bash +curl -X POST http://localhost:8080 \ + -H 'Content-Type: application/json' \ + -d '{"jsonrpc":"2.0","method":"node_getPredictedMinFees","params":["target"],"id":1}' +``` + ### node_getMaxPriorityFees Method to fetch the current max priority fee of txs in the mempool. @@ -1169,6 +1200,8 @@ Pauses syncing and rolls back the database to the target L2 block number. **Parameters**: 1. `targetBlockNumber` - `number` - The block number to roll back to. +2. `force` - `boolean | undefined` - If true, clears the world state db and p2p dbs if rolling back to behind the finalized block. +3. `resumeSync` - `boolean | undefined` - If true (default), resumes archiver and world state sync after rollback. **Returns**: `void` @@ -1177,7 +1210,7 @@ Pauses syncing and rolls back the database to the target L2 block number. ```bash curl -X POST http://localhost:8880 \ -H 'Content-Type: application/json' \ - -d '{"jsonrpc":"2.0","method":"nodeAdmin_rollbackTo","params":[12345],"id":1}' + -d '{"jsonrpc":"2.0","method":"nodeAdmin_rollbackTo","params":[12345,true,true],"id":1}' ``` **Example (Docker)**: @@ -1185,7 +1218,7 @@ curl -X POST http://localhost:8880 \ ```bash docker exec -it aztec-node curl -X POST http://localhost:8880 \ -H 'Content-Type: application/json' \ - -d '{"jsonrpc":"2.0","method":"nodeAdmin_rollbackTo","params":[12345],"id":1}' + -d '{"jsonrpc":"2.0","method":"nodeAdmin_rollbackTo","params":[12345,true,true],"id":1}' ``` ### nodeAdmin_startSnapshotUpload @@ -1214,30 +1247,6 @@ docker exec -it aztec-node curl -X POST http://localhost:8880 \ -d '{"jsonrpc":"2.0","method":"nodeAdmin_startSnapshotUpload","params":["0x1234..."],"id":1}' ``` -### nodeAdmin_getSlashPayloads - -Returns all monitored payloads by the slasher for the current round. - -**Parameters**: None - -**Returns**: `SlashPayloadRound[]` - -**Example (CLI)**: - -```bash -curl -X POST http://localhost:8880 \ - -H 'Content-Type: application/json' \ - -d '{"jsonrpc":"2.0","method":"nodeAdmin_getSlashPayloads","params":[],"id":1}' -``` - -**Example (Docker)**: - -```bash -docker exec -it aztec-node curl -X POST http://localhost:8880 \ - -H 'Content-Type: application/json' \ - -d '{"jsonrpc":"2.0","method":"nodeAdmin_getSlashPayloads","params":[],"id":1}' -``` - ### nodeAdmin_getSlashOffenses Returns all offenses applicable for the given round. @@ -1253,7 +1262,7 @@ Returns all offenses applicable for the given round. ```bash curl -X POST http://localhost:8880 \ -H 'Content-Type: application/json' \ - -d '{"jsonrpc":"2.0","method":"nodeAdmin_getSlashOffenses","params":["0x1234..."],"id":1}' + -d '{"jsonrpc":"2.0","method":"nodeAdmin_getSlashOffenses","params":["current"],"id":1}' ``` **Example (Docker)**: @@ -1261,7 +1270,7 @@ curl -X POST http://localhost:8880 \ ```bash docker exec -it aztec-node curl -X POST http://localhost:8880 \ -H 'Content-Type: application/json' \ - -d '{"jsonrpc":"2.0","method":"nodeAdmin_getSlashOffenses","params":["0x1234..."],"id":1}' + -d '{"jsonrpc":"2.0","method":"nodeAdmin_getSlashOffenses","params":["current"],"id":1}' ``` ### nodeAdmin_reloadKeystore diff --git a/docs/package.json b/docs/package.json index 5ce7a7181a8b..a6af133f4eae 100644 --- a/docs/package.json +++ b/docs/package.json @@ -3,7 +3,7 @@ "version": "0.0.0", "private": true, "scripts": { - "build": "yarn clean && yarn preprocess && yarn spellcheck && yarn preprocess:move && yarn validate:redirects && yarn validate:api-ref-links && docusaurus build && node scripts/append_api_docs_to_llms.js", + "build": "yarn clean && yarn preprocess && yarn spellcheck && yarn preprocess:move && yarn validate:redirects && yarn validate:api-ref-links && docusaurus build && node scripts/augment_sitemap.js && node scripts/append_api_docs_to_llms.js", "validate:redirects": "./scripts/validate_redirect_targets.sh", "validate:api-ref-links": "./scripts/validate_api_ref_links.sh", "clean": "./scripts/clean.sh", @@ -21,7 +21,6 @@ "test:preprocess": "node --test src/preprocess/__tests__/*.test.js" }, "dependencies": { - "@cookbookdev/docsbot": "^4.25.15", "@docusaurus/core": "3.9.1", "@docusaurus/plugin-content-docs": "3.9.1", "@docusaurus/plugin-ideal-image": "3.9.1", @@ -36,13 +35,14 @@ "react-markdown": "6.0.3", "react-player": "^2.16.1", "rehype-katex": "7", + "remark-gfm": "^1.0.0", "remark-math": "6" }, "devDependencies": { "@docusaurus/tsconfig": "3.9.1", "@tsconfig/docusaurus": "^1.0.7", "cspell": "^8.19.4", - "docusaurus-plugin-llms": "^0.2.0", + "docusaurus-plugin-llms": "^0.4.0", "dotenv": "^16.6.1", "netlify-cli": "^24.0.1" }, diff --git a/docs/scripts/append_api_docs_to_llms.js b/docs/scripts/append_api_docs_to_llms.js index 79f4c40e8426..32aa9ce7b45e 100644 --- a/docs/scripts/append_api_docs_to_llms.js +++ b/docs/scripts/append_api_docs_to_llms.js @@ -38,10 +38,19 @@ if (defaultType && fs.existsSync(path.join(STATIC_DIR, `aztec-nr-api/${defaultTy name: "Aztec.nr API Reference", dir: `aztec-nr-api/${defaultType}`, description: `Auto-generated API documentation for Aztec.nr (${defaultVersion})`, + format: "html", }); } else if (!defaultType) { console.warn("Warning: No default version found for API docs"); } +if (defaultType && fs.existsSync(path.join(STATIC_DIR, `typescript-api/${defaultType}`))) { + API_DIRS.push({ + name: "TypeScript API Reference", + dir: `typescript-api/${defaultType}`, + description: `Auto-generated TypeScript API documentation for Aztec packages (${defaultVersion})`, + format: "markdown", + }); +} /** * Extract text content from HTML, stripping tags and normalizing whitespace. @@ -94,9 +103,9 @@ function htmlToText(html) { } /** - * Recursively find all HTML files in a directory. + * Recursively find all files with a given extension in a directory. */ -function findHtmlFiles(dir, files = []) { +function findFiles(dir, ext, files = []) { if (!fs.existsSync(dir)) { return files; } @@ -106,8 +115,8 @@ function findHtmlFiles(dir, files = []) { for (const entry of entries) { const fullPath = path.join(dir, entry.name); if (entry.isDirectory()) { - findHtmlFiles(fullPath, files); - } else if (entry.name.endsWith(".html")) { + findFiles(fullPath, ext, files); + } else if (entry.name.endsWith(ext)) { files.push(fullPath); } } @@ -115,6 +124,21 @@ function findHtmlFiles(dir, files = []) { return files; } +/** + * Recursively find all HTML files in a directory. + */ +function findHtmlFiles(dir) { + return findFiles(dir, ".html"); +} + +/** + * Recursively find all markdown files in a directory. + * Note: `llm-summary.txt` is naturally excluded since it does not end in `.md`. + */ +function findMarkdownFiles(dir) { + return findFiles(dir, ".md"); +} + /** * Get the relative URL path for a file. */ @@ -171,6 +195,7 @@ function main() { : ""; let totalFiles = 0; + let sectionsAdded = 0; let linksSection = "\n\n# API Reference Documentation\n\n"; let fullContentSection = "\n\n---\n\n# API Reference Documentation\n\n"; @@ -182,43 +207,66 @@ function main() { continue; } - const htmlFiles = sortByImportance(findHtmlFiles(dirPath)); - console.log(`Found ${htmlFiles.length} HTML files in ${apiDir.dir}`); + const isMarkdown = apiDir.format === "markdown"; + const files = isMarkdown + ? findMarkdownFiles(dirPath).sort() + : sortByImportance(findHtmlFiles(dirPath)); + const ext = isMarkdown ? ".md" : ".html"; + console.log(`Found ${files.length} ${isMarkdown ? "markdown" : "HTML"} files in ${apiDir.dir}`); - if (htmlFiles.length === 0) { + if (files.length === 0) { continue; } + sectionsAdded++; + // Add section header linksSection += `## ${apiDir.name}\n\n`; linksSection += `${apiDir.description}\n\n`; fullContentSection += `## ${apiDir.name}\n\n`; fullContentSection += `${apiDir.description}\n\n`; - // Process only index files for links to avoid overwhelming the llms.txt - const indexFiles = htmlFiles.filter( - (f) => f.endsWith("index.html") || f.includes("/fn.") || f.includes("/struct.") || f.includes("/trait.") - ); - - // Add links for key files - for (const file of indexFiles.slice(0, 100)) { - // Limit to 100 links per section - const urlPath = getUrlPath(file, STATIC_DIR); - const fileName = path.basename(file, ".html"); - linksSection += `- [${fileName}](${urlPath})\n`; - } + if (isMarkdown) { + // For markdown API docs, add a link per file and include llm-summary.txt if present + const summaryPath = path.join(dirPath, "llm-summary.txt"); + if (fs.existsSync(summaryPath)) { + linksSection += fs.readFileSync(summaryPath, "utf-8") + "\n\n"; + } + // Cap link list at 100 entries to bound llms.txt size as the API surface grows. + for (const file of files.slice(0, 100)) { + const urlPath = getUrlPath(file, STATIC_DIR); + const fileName = path.basename(file, ext); + linksSection += `- [${fileName}](${urlPath})\n`; + } + if (files.length > 100) { + linksSection += `- ... and ${files.length - 100} more files\n`; + } + } else { + // For HTML API docs, process only index files for links + const indexFiles = files.filter( + (f) => f.endsWith("index.html") || f.includes("/fn.") || f.includes("/struct.") || f.includes("/trait.") + ); + + // Add links for key files + for (const file of indexFiles.slice(0, 100)) { + // Limit to 100 links per section + const urlPath = getUrlPath(file, STATIC_DIR); + const fileName = path.basename(file, ext); + linksSection += `- [${fileName}](${urlPath})\n`; + } - if (indexFiles.length > 100) { - linksSection += `- ... and ${indexFiles.length - 100} more files\n`; + if (indexFiles.length > 100) { + linksSection += `- ... and ${indexFiles.length - 100} more files\n`; + } } linksSection += "\n"; // Add full content for all files - for (const file of htmlFiles) { + for (const file of files) { try { - const html = fs.readFileSync(file, "utf-8"); - const text = htmlToText(html); + const raw = fs.readFileSync(file, "utf-8"); + const text = isMarkdown ? raw.trim() : htmlToText(raw); if (text.length > 100) { // Only include if there's meaningful content @@ -233,6 +281,11 @@ function main() { } } + if (sectionsAdded === 0) { + console.log("No API docs found on disk — leaving llms.txt and llms-full.txt unchanged"); + return; + } + // Append to llms.txt fs.writeFileSync(llmsTxtPath, llmsTxtContent + linksSection); console.log(`Updated llms.txt with API reference links`); diff --git a/docs/scripts/augment_sitemap.js b/docs/scripts/augment_sitemap.js new file mode 100644 index 000000000000..2f6998b16fb3 --- /dev/null +++ b/docs/scripts/augment_sitemap.js @@ -0,0 +1,103 @@ +#!/usr/bin/env node +/** + * Post-build script to add static API documentation URLs to the sitemap. + * + * Docusaurus only includes its managed routes in sitemap.xml. This script + * appends entries for the auto-generated API docs in static/ that are + * copied to build/ but not indexed by the sitemap plugin. + */ + +const fs = require("fs"); +const path = require("path"); + +const BUILD_DIR = path.join(__dirname, "..", "build"); +// Override with SITE_URL env var if the canonical URL ever changes. +// Kept in sync with `url` in docusaurus.config.js. +// Trailing slash is stripped because entries below prepend "/". +const SITE_URL = (process.env.SITE_URL || "https://docs.aztec.network").replace(/\/+$/, ""); + +// Load version config to determine which version subdirectory to index. +let developerVersionConfig; +try { + developerVersionConfig = require("../developer_version_config.json"); +} catch { + developerVersionConfig = null; +} + +const defaultType = developerVersionConfig?.mainnet + ? "mainnet" + : developerVersionConfig?.testnet + ? "testnet" + : null; + +if (!defaultType) { + console.warn("Warning: No default version found — skipping sitemap augmentation"); + process.exit(0); +} + +/** + * Recursively find all files with a given extension. + */ +function findFiles(dir, ext) { + const results = []; + if (!fs.existsSync(dir)) return results; + + const entries = fs.readdirSync(dir, { withFileTypes: true }); + for (const entry of entries) { + const fullPath = path.join(dir, entry.name); + if (entry.isDirectory()) { + results.push(...findFiles(fullPath, ext)); + } else if (entry.name.endsWith(ext)) { + results.push(fullPath); + } + } + return results; +} + +function main() { + const sitemapPath = path.join(BUILD_DIR, "sitemap.xml"); + + if (!fs.existsSync(sitemapPath)) { + console.error("Error: build/sitemap.xml not found. Run the build first."); + process.exit(1); + } + + let sitemap = fs.readFileSync(sitemapPath, "utf-8"); + + // Aztec.nr API HTML files (skip raw markdown — those aren't browsable pages) + const nrApiDir = path.join(BUILD_DIR, `aztec-nr-api/${defaultType}`); + // Exclude Noir stdlib (duplicated at noir-lang.org), the all.html mega-index, + // and per-constant global.*.html pages. Keeps the sitemap aligned with the + // Typesense stop_urls list so both discovery paths surface the same content. + const EXCLUDE_RE = new RegExp( + `aztec-nr-api/${defaultType}/(std/|all\\.html$|.*/global\\.[^/]+\\.html$)` + ); + const htmlFiles = findFiles(nrApiDir, ".html").filter( + (f) => !EXCLUDE_RE.test(f.replace(/\\/g, "/")) + ); + + if (htmlFiles.length === 0) { + console.log("No static API docs found to add to sitemap"); + return; + } + + // Build XML entries + const entries = htmlFiles + .map((file) => { + const relativePath = path.relative(BUILD_DIR, file).replace(/\\/g, "/"); + return `${SITE_URL}/${relativePath}monthly0.3`; + }) + .join(""); + + // Insert before closing + if (!sitemap.includes("")) { + console.error("Error: build/sitemap.xml missing closing tag — aborting."); + process.exit(1); + } + sitemap = sitemap.replace("", entries + ""); + + fs.writeFileSync(sitemapPath, sitemap); + console.log(`Added ${htmlFiles.length} Aztec.nr API doc URLs to sitemap.xml`); +} + +main(); diff --git a/docs/scripts/node_api_reference_generation/generate_node_api_reference.sh b/docs/scripts/node_api_reference_generation/generate_node_api_reference.sh index 4a8810b18435..c9af35f766f8 100755 --- a/docs/scripts/node_api_reference_generation/generate_node_api_reference.sh +++ b/docs/scripts/node_api_reference_generation/generate_node_api_reference.sh @@ -48,9 +48,23 @@ done OUTPUT_PATH="$OUTPUT_DIR/$OUTPUT_FILE" -# Verify yarn-project is built -if [[ ! -d "$YARN_PROJECT_DIR/stdlib/dest" ]]; then - echo_error "yarn-project/stdlib/dest/ not found. Run 'yarn build' from yarn-project first." +# Verify the source files the generator parses are present. +# The generator reads .ts source via the TS Compiler API, so no build is required — +# but yarn-project must have node_modules installed so that `npx tsx` can resolve typescript. +REQUIRED_SOURCES=( + "$YARN_PROJECT_DIR/stdlib/src/interfaces/aztec-node.ts" + "$YARN_PROJECT_DIR/stdlib/src/interfaces/aztec-node-admin.ts" + "$YARN_PROJECT_DIR/stdlib/src/block/l2_block_source.ts" +) +for src in "${REQUIRED_SOURCES[@]}"; do + if [[ ! -f "$src" ]]; then + echo_error "Required source file not found: $src" + exit 1 + fi +done + +if [[ ! -d "$YARN_PROJECT_DIR/node_modules" ]]; then + echo_error "yarn-project/node_modules/ not found. Run 'yarn install' from yarn-project first." exit 1 fi diff --git a/docs/scripts/node_api_reference_generation/generate_node_api_reference.ts b/docs/scripts/node_api_reference_generation/generate_node_api_reference.ts index 2138d6aebd18..df491aa20277 100644 --- a/docs/scripts/node_api_reference_generation/generate_node_api_reference.ts +++ b/docs/scripts/node_api_reference_generation/generate_node_api_reference.ts @@ -69,7 +69,7 @@ function extractJSDocFromNode(node: ts.Node): MethodJSDoc | undefined { const jsDocs = (node as any).jsDoc as ts.JSDoc[] | undefined; if (!jsDocs || jsDocs.length === 0) return undefined; - const jsDoc = jsDocs[jsDocs.length - 1]; + const jsDoc = jsDocs[0]; const description = extractJSDocComment(jsDoc); const params: { name: string; description: string }[] = []; @@ -275,6 +275,15 @@ function splitTopLevelArgs(text: string): string[] { function simplifyZodType(expr: string): string { const e = expr.trim(); + // Optional handling must run first so `.optional()` suffixes aren't swallowed by + // broader patterns below (e.g. `^(\w+)\.schema\b` would otherwise strip the optional + // from expressions like `L2Block.schema.optional()`). + if (e.endsWith('.optional()')) { + return simplifyZodType(e.replace(/\.optional\(\)$/, '')) + ' | undefined'; + } + const optionalWrapperMatch = e.match(/^optional\(([\s\S]+)\)$/); + if (optionalWrapperMatch) return simplifyZodType(optionalWrapperMatch[1]) + ' | undefined'; + // Simple types if (e === 'z.string()') return 'string'; if (e === 'z.number()') return 'number'; @@ -295,7 +304,7 @@ function simplifyZodType(expr: string): string { if (e === 'CheckpointNumberSchema') return 'number'; if (e === 'CheckpointNumberPositiveSchema') return 'number'; if (e === 'EpochNumberSchema') return 'number'; - if (e === 'BlockParameterSchema') return 'number | "latest"'; + if (e === 'BlockParameterSchema') return 'BlockHash | number | "latest"'; // Known schema objects if (e === 'L2TipsSchema') return 'L2Tips'; @@ -322,21 +331,15 @@ function simplifyZodType(expr: string): string { const classSchemaMatch2 = e.match(/^(\w+)\.schema\b/); if (classSchemaMatch2) return classSchemaMatch2[1]; - // z.string().optional() / z.number().optional() - if (e.endsWith('.optional()')) { - return simplifyZodType(e.replace(/\.optional\(\)$/, '')) + ' | undefined'; - } - - // optional(X) - const optionalMatch = e.match(/^optional\(([\s\S]+)\)$/); - if (optionalMatch) return simplifyZodType(optionalMatch[1]) + ' | undefined'; - // z.array(...) — use balanced paren matching to extract inner type if (e.startsWith('z.array(')) { const innerEnd = findMatchingParen(e, 8 - 1); // 8 = 'z.array('.length, -1 to point at '(' if (innerEnd !== -1) { - const inner = e.substring(8, innerEnd); - return simplifyZodType(inner) + '[]'; + const inner = simplifyZodType(e.substring(8, innerEnd)); + // Parenthesize unions so `optional(Foo)` inside an array renders as `(Foo | undefined)[]`, + // not the (incorrectly parsed) `Foo | undefined[]`. + const wrapped = inner.includes('|') ? `(${inner})` : inner; + return wrapped + '[]'; } } @@ -397,16 +400,11 @@ function simplifyZodType(expr: string): string { return schemaRefMatch[1]; } - // Chained expressions with .optional() - if (e.includes('.optional()')) { - const base = e.replace(/\.optional\(\)\s*$/, ''); - return simplifyZodType(base) + ' | undefined'; - } - // Admin config schemas if (e.includes('ConfigSchema')) return 'object'; - // Fallback + // Fallback — surface unrecognized expressions so they can be added to the mapping. + console.warn(`[simplifyZodType] unrecognized expression, falling back to 'object': ${e}`); return 'object'; } @@ -532,27 +530,39 @@ const METHOD_GROUPS: { heading: string; namespace: string; methods: string[] }[] 'resumeSync', 'rollbackTo', 'startSnapshotUpload', - 'getSlashPayloads', 'getSlashOffenses', 'reloadKeystore', ], }, ]; -function generateExampleParam(paramType: string): string { +function generateExampleParam(paramType: string, paramName?: string): string { const t = paramType.replace(/\s*\|\s*undefined$/, '').trim(); + + // Name-aware overrides for common parameter names so paired numeric args like + // (from, limit) and (page) render as more illustrative examples than `12345, 12345`. + // `from` defaults to 1 because some schemas (BlockNumberPositiveSchema) require >= 1 + // and 1 is also valid for the relaxed BlockNumberSchema. + if (paramName && (t === 'number' || t === 'bigint')) { + const stringify = (n: number) => (t === 'bigint' ? `"${n}"` : String(n)); + if (paramName === 'from' || paramName === 'fromBlock') return stringify(1); + if (paramName === 'limit') return stringify(100); + if (paramName === 'page') return stringify(0); + if (paramName === 'checkpointNumber') return stringify(1); + } + if (t === 'number') return '12345'; if (t === 'bigint') return '"100"'; if (t === 'string') return '"0x1234..."'; if (t === 'boolean') return 'true'; - if (t === 'number | "latest"') return '"latest"'; - if (t.includes('"all"') || t.includes('"current"')) return '"current"'; + if (t === 'number | "latest"' || t === 'BlockHash | number | "latest"') return '"latest"'; + if (/['"]all['"]|['"]current['"]/.test(t)) return '"current"'; if (t === 'Fr' || t === 'AztecAddress' || t === 'BlockHash') return '"0x1234..."'; if (t === 'EthAddress') return '"0x1234..."'; if (t === 'SlotNumber') return '"100"'; if (t === 'MerkleTreeId') return '1'; - if (t === 'ManaUsageEstimate') return '1'; - if (t.endsWith('[]')) return `[${generateExampleParam(t.slice(0, -2))}]`; + if (t === 'ManaUsageEstimate') return '"target"'; + if (t.endsWith('[]')) return `[${generateExampleParam(t.slice(0, -2), paramName)}]`; if (t === 'Tx') return '{"data":"0x..."}'; if (t === 'TxHash') return '"0x1234..."'; if (t === 'SiloedTag') return '"0x1234..."'; @@ -580,6 +590,11 @@ function generateMethodMarkdown(method: MethodInfo, isAdmin: boolean): string { lines.push(''); } + if (method.jsdoc.remarks) { + lines.push(`**Remarks**: ${method.jsdoc.remarks}`); + lines.push(''); + } + // Parameters if (method.paramTypes.length === 0) { lines.push('**Parameters**: None'); @@ -602,7 +617,9 @@ function generateMethodMarkdown(method: MethodInfo, isAdmin: boolean): string { lines.push(''); // Example - const exampleParams = method.paramTypes.map(t => generateExampleParam(t)).join(','); + const exampleParams = method.paramTypes + .map((t, i) => generateExampleParam(t, method.paramNames[i])) + .join(','); if (isAdmin) { lines.push('**Example (CLI)**:'); @@ -790,6 +807,9 @@ function main() { for (const name of nodeMethodNames) { const schema = nodeSchemaInfo.get(name)!; const jsdoc = mergedJSDoc.get(name) || { description: '', params: [], returns: '' }; + if (!mergedJSDoc.has(name)) { + console.warn(`WARNING: node_${name} is missing JSDoc — rendered without description`); + } const paramNames = mergedParamNames.get(name) || []; allMethods.set(`node:${name}`, { @@ -805,6 +825,9 @@ function main() { for (const name of adminMethodNames) { const schema = adminSchemaInfo.get(name)!; const jsdoc = adminInterface.jsdoc.get(name) || { description: '', params: [], returns: '' }; + if (!adminInterface.jsdoc.has(name)) { + console.warn(`WARNING: nodeAdmin_${name} is missing JSDoc — rendered without description`); + } const paramNames = adminInterface.paramNames.get(name) || []; allMethods.set(`nodeAdmin:${name}`, { diff --git a/docs/src/clientModules/docsgpt.js b/docs/src/clientModules/docsgpt.js new file mode 100644 index 000000000000..edfd106d1282 --- /dev/null +++ b/docs/src/clientModules/docsgpt.js @@ -0,0 +1,29 @@ +import ExecutionEnvironment from '@docusaurus/ExecutionEnvironment'; + +if (ExecutionEnvironment.canUseDOM) { + const React = require('react'); + const ReactDOM = require('react-dom/client'); + const AztecDocsWidget = require('@site/src/components/AztecDocsWidget').default; + + const container = document.createElement('div'); + container.id = 'docsgpt-widget'; + document.body.appendChild(container); + + const root = ReactDOM.createRoot(container); + root.render( + React.createElement(AztecDocsWidget, { + apiHost: 'https://aztec.adjacentpossible.dev', + apiKey: '44420ab5-6be3-4b30-af35-559c38bfce6d', + title: 'Ask about Aztec', + heroTitle: 'Aztec Docs Assistant', + heroDescription: + 'Searches Aztec v4.2.0 developer docs, Aztec.nr, aztec.js SDK, protocol circuits, and more.', + theme: 'ink', + accent: 'chartreuse', + buttonStyle: 'symbol', + size: 'roomy', + position: 'br', + motif: true, + }), + ); +} diff --git a/docs/src/components/AztecDocsWidget/Hero.jsx b/docs/src/components/AztecDocsWidget/Hero.jsx new file mode 100644 index 000000000000..13b09fda70d4 --- /dev/null +++ b/docs/src/components/AztecDocsWidget/Hero.jsx @@ -0,0 +1,147 @@ +import React from "react"; +import { AztecMark, Icons } from "./Icons"; + +export default function Hero({ + heroTitle, + heroDescription, + suggestedPrompts, + motif, + tokens, + onSuggest, +}) { + const { isInk, accentColor, panelFg, panelFg2, panelSurface, panelSurface2 } = + tokens; + return ( +
+ {motif && ( +
+ +
+ )} +
+
+ + Aztec Docs · AI +
+

+ {heroTitle} +

+

+ {heroDescription} +

+ + {suggestedPrompts?.length > 0 && ( + <> +
+ Try asking — +
+
+ {suggestedPrompts.map((p) => ( + + ))} +
+ + )} +
+
+ ); +} diff --git a/docs/src/components/AztecDocsWidget/Icons.jsx b/docs/src/components/AztecDocsWidget/Icons.jsx new file mode 100644 index 000000000000..7bea8dbba69b --- /dev/null +++ b/docs/src/components/AztecDocsWidget/Icons.jsx @@ -0,0 +1,157 @@ +import React from "react"; + +export function AztecMark({ size = 24, color = "currentColor", stroke = 0 }) { + const c = size / 2; + const rings = [0.96, 0.7, 0.44, 0.2]; + return ( + + ); +} + +export const Icons = { + close: (p) => ( + + + + + ), + send: (p) => ( + + + + + ), + chat: (p) => ( + + + + ), + refresh: (p) => ( + + + + + + + ), + arrowUpRight: (p) => ( + + + + + ), + doc: (p) => ( + + + + + ), + expand: (p) => ( + + + + + + + ), + collapse: (p) => ( + + + + + + + ), +}; diff --git a/docs/src/components/AztecDocsWidget/LauncherButton.jsx b/docs/src/components/AztecDocsWidget/LauncherButton.jsx new file mode 100644 index 000000000000..d8bd4ca47248 --- /dev/null +++ b/docs/src/components/AztecDocsWidget/LauncherButton.jsx @@ -0,0 +1,163 @@ +import React from "react"; +import { Icons } from "./Icons"; + +export default function LauncherButton({ buttonStyle, position, onOpen }) { + const side = position === "br" ? { right: 32 } : { left: 32 }; + const base = { + position: "fixed", + ...side, + bottom: 32, + cursor: "pointer", + border: "none", + zIndex: 2147483000, + transition: + "transform 200ms var(--azw-ease), box-shadow 200ms var(--azw-ease)", + }; + + if (buttonStyle === "label") { + return ( + + ); + } + + if (buttonStyle === "chat") { + return ( + + ); + } + + return ( + + ); +} diff --git a/docs/src/components/AztecDocsWidget/Message.jsx b/docs/src/components/AztecDocsWidget/Message.jsx new file mode 100644 index 000000000000..8dc1de8750ea --- /dev/null +++ b/docs/src/components/AztecDocsWidget/Message.jsx @@ -0,0 +1,159 @@ +import React from "react"; +import ReactMarkdown from "react-markdown"; +import { AztecMark, Icons } from "./Icons"; +import { REMARK_PLUGINS, repairInlineTables } from "./markdown"; + +export function UserBubble({ text, tokens }) { + const { accentColor } = tokens; + return ( +
+
+ {text} +
+
+ ); +} + +export function AssistantBody({ + text, + sources, + thinking, + tokens, + mdComponents, +}) { + const { isInk, accentColor, panelFg, panelFg2 } = tokens; + return ( +
+
+ +
+
+
+ {thinking ? ( + <> + Thinking + + · + · + · + + + ) : ( + "Aztec Assistant" + )} +
+
+ {text ? ( + + {repairInlineTables(text)} + + ) : null} +
+ {sources?.length > 0 && ( +
+
+ Sources +
+
+ {sources.map((s, k) => { + const href = s.source || s.url || s.link || "#"; + const label = + s.title || s.filename || s.source || s.url || "Source"; + return ( + + + {label} + + ); + })} +
+
+ )} +
+
+ ); +} diff --git a/docs/src/components/AztecDocsWidget/Panel.jsx b/docs/src/components/AztecDocsWidget/Panel.jsx new file mode 100644 index 000000000000..2f1264efe7a4 --- /dev/null +++ b/docs/src/components/AztecDocsWidget/Panel.jsx @@ -0,0 +1,346 @@ +import React from "react"; +import { AztecMark, Icons } from "./Icons"; +import Hero from "./Hero"; +import { UserBubble, AssistantBody } from "./Message"; + +export default function Panel({ + title, + heroTitle, + heroDescription, + suggestedPrompts, + motif, + position, + size, + tokens, + mdComponents, + messages, + streaming, + streamText, + streamSources, + input, + onInputChange, + onSend, + onSuggest, + onReset, + onClose, + expanded, + onToggleExpanded, + scrollRef, +}) { + const { + isInk, + accentColor, + panelBg, + panelFg, + panelFg2, + panelBorder, + inputBg, + softBorder, + } = tokens; + + const side = position === "br" ? { right: 32 } : { left: 32 }; + const panelWidth = size === "compact" ? 380 : 420; + const panelHeight = size === "compact" ? 540 : 620; + const showHero = messages.length === 0 && !streaming; + + return ( +
+ {/* Header */} +
+
+
+ +
+
+
+ {title} +
+
+ + Online +
+
+
+
+ {messages.length > 0 && ( + + )} + + +
+
+ + {/* Body */} +
+
+ {showHero && ( + + )} + {messages.length > 0 && ( +
+ {messages.map((m, i) => { + const isLast = i === messages.length - 1; + const text = isLast && streaming ? streamText : m.response; + const sources = isLast && streaming ? streamSources : m.sources; + const isStreamingLast = isLast && streaming; + return ( + + + {(text || isStreamingLast) && ( + + )} + + ); + })} +
+ )} +
+
+ + {/* Composer */} +
+
+
{ + e.preventDefault(); + onSend(); + }} + style={{ + display: "flex", + alignItems: "stretch", + gap: 0, + background: inputBg, + border: `1px solid ${isInk ? "rgba(242,238,225,0.25)" : "var(--azw-ink-tint-1)"}`, + }} + > + onInputChange(e.target.value)} + placeholder="Ask about Aztec —" + disabled={streaming} + style={{ + flex: 1, + padding: "11px 12px", + background: "transparent", + border: "none", + outline: "none", + color: panelFg, + fontFamily: "var(--azw-font-sans)", + fontSize: 13.5, + letterSpacing: "-0.01em", + }} + /> + +
+
+ + AI-generated. Informational only. Not investment, tax, or legal + advice. + + Privacy + + {" · "} + + Terms + + +
+
+
+
+ ); +} diff --git a/docs/src/components/AztecDocsWidget/index.jsx b/docs/src/components/AztecDocsWidget/index.jsx new file mode 100644 index 000000000000..0ab41396af12 --- /dev/null +++ b/docs/src/components/AztecDocsWidget/index.jsx @@ -0,0 +1,155 @@ +import React, { useEffect, useRef, useState } from "react"; +import "./styles.css"; +import { DEFAULT_SUGGESTED, getTheme } from "./theme"; +import { makeMarkdownComponents } from "./markdown"; +import { streamAnswer } from "./streamAnswer"; +import LauncherButton from "./LauncherButton"; +import Panel from "./Panel"; + +export default function AztecDocsWidget({ + apiHost, + apiKey, + title = "Ask about Aztec", + heroTitle = "Aztec Docs Assistant", + heroDescription = "Ask me anything about building on the privacy network — Noir, rollups, nullifiers, testnet setup.", + suggestedPrompts = DEFAULT_SUGGESTED, + theme = "ink", + accent = "chartreuse", + buttonStyle = "symbol", + size = "roomy", + position = "br", + motif = true, +}) { + const [open, setOpen] = useState(false); + const [expanded, setExpanded] = useState(false); + const [input, setInput] = useState(""); + const [messages, setMessages] = useState([]); + const [streaming, setStreaming] = useState(false); + const [streamText, setStreamText] = useState(""); + const [streamSources, setStreamSources] = useState([]); + const [conversationId, setConversationId] = useState(null); + const scrollRef = useRef(null); + const abortRef = useRef(null); + + const tokens = React.useMemo(() => getTheme(theme, accent), [theme, accent]); + const mdComponents = React.useMemo( + () => makeMarkdownComponents(tokens.isInk, tokens.accentColor), + [tokens.isInk, tokens.accentColor], + ); + + // Only auto-scroll to the bottom when a new message is appended (user + // sends a question). Don't follow streaming tokens — the user should + // be free to scroll away while the response is generating. + useEffect(() => { + if (scrollRef.current) + scrollRef.current.scrollTop = scrollRef.current.scrollHeight; + }, [messages.length]); + + useEffect(() => () => abortRef.current?.abort(), []); + + async function handleSend(text) { + const question = (text ?? input).trim(); + if (!question || streaming) return; + setInput(""); + const nextHistory = messages.map((m) => ({ + prompt: m.prompt, + response: m.response, + })); + const nextMessages = [ + ...messages, + { prompt: question, response: "", sources: [] }, + ]; + setMessages(nextMessages); + setStreaming(true); + setStreamText(""); + setStreamSources([]); + + abortRef.current?.abort(); + const controller = new AbortController(); + abortRef.current = controller; + + let acc = ""; + let sources = []; + try { + await streamAnswer({ + apiHost, + apiKey, + question, + history: nextHistory, + conversationId, + signal: controller.signal, + onToken: (chunk) => { + acc += chunk; + setStreamText(acc); + }, + onSource: (src) => { + sources = sources.concat(src); + setStreamSources(sources); + }, + onConversationId: (id) => setConversationId(id), + onDone: () => {}, + }); + } catch (err) { + if (err.name !== "AbortError") { + acc = + acc || "Something went wrong fetching an answer. Please try again."; + } + } + + setMessages((prev) => { + const copy = [...prev]; + copy[copy.length - 1] = { prompt: question, response: acc, sources }; + return copy; + }); + setStreaming(false); + setStreamText(""); + setStreamSources([]); + } + + function handleReset() { + abortRef.current?.abort(); + setMessages([]); + setStreaming(false); + setStreamText(""); + setStreamSources([]); + setConversationId(null); + } + + return ( +
+ {!open && ( + setOpen(true)} + /> + )} + {open && ( + setOpen(false)} + expanded={expanded} + onToggleExpanded={() => setExpanded((v) => !v)} + scrollRef={scrollRef} + /> + )} +
+ ); +} diff --git a/docs/src/components/AztecDocsWidget/markdown.jsx b/docs/src/components/AztecDocsWidget/markdown.jsx new file mode 100644 index 000000000000..bf10efaa125d --- /dev/null +++ b/docs/src/components/AztecDocsWidget/markdown.jsx @@ -0,0 +1,226 @@ +import React from "react"; +import remarkGfm from "remark-gfm"; +import { Highlight, themes as prismThemes } from "prism-react-renderer"; + +export const REMARK_PLUGINS = [remarkGfm]; + +const LANGUAGE_ALIASES = { + noir: "rust", + nr: "rust", +}; + +function CodeBlock({ code, language, isInk, codeBorder }) { + const theme = isInk ? prismThemes.vsDark : prismThemes.github; + const resolved = LANGUAGE_ALIASES[language] || language || "text"; + return ( + + {({ className, style, tokens, getLineProps, getTokenProps }) => ( +
+          {tokens.map((line, i) => {
+            const { key: _lk, ...lineProps } = getLineProps({ line });
+            return (
+              
+ {line.map((token, j) => { + const { key: _tk, ...tokenProps } = getTokenProps({ token }); + return ; + })} +
+ ); + })} +
+ )} +
+ ); +} + +// DocsGPT sometimes returns GFM tables as a single line with no +// newlines between rows. Restore the structural newlines so remark-gfm +// can parse the table. +export function repairInlineTables(md) { + if (!md || !md.includes("|")) return md; + let out = md; + // Put the separator row (|---|---|---|) on its own line. + out = out.replace(/ +(\|(?:\s*:?-+:?\s*\|)+) +/g, "\n$1\n"); + // Split " | |" boundary between rows whose first cell is empty. + out = out.replace(/ \| \|/g, " |\n|"); + return out; +} + +export function makeMarkdownComponents(isInk, accentColor) { + const codeBg = isInk + ? "rgba(212,255,40,0.12)" + : "var(--azw-chartreuse-tint-2)"; + const codeFg = isInk ? "var(--azw-chartreuse)" : "var(--azw-ink)"; + const codeBorder = isInk ? "rgba(212,255,40,0.25)" : "var(--azw-ink-tint-1)"; + const linkColor = isInk ? accentColor : "var(--azw-ink)"; + const dividerColor = isInk + ? "rgba(242,238,225,0.15)" + : "var(--azw-ink-tint-1)"; + const quoteColor = isInk + ? "var(--azw-ink-tint-1)" + : "var(--azw-parchment-shade-1)"; + + const inlineCode = { + fontFamily: "var(--azw-font-mono)", + fontSize: 12, + background: codeBg, + color: codeFg, + padding: "1px 5px", + border: `1px solid ${codeBorder}`, + }; + + return { + p: ({ node, ...props }) => ( +

+ ), + a: ({ node, ...props }) => ( + + ), + ul: ({ node, ordered, ...props }) => ( +