1+ import type { AztecAddress } from '@aztec/aztec.js/addresses' ;
2+ import { NO_WAIT } from '@aztec/aztec.js/contracts' ;
3+ import { Fr } from '@aztec/aztec.js/fields' ;
14import type { ChainMonitorEventMap } from '@aztec/ethereum/test' ;
25import { CheckpointNumber , SlotNumber } from '@aztec/foundation/branded-types' ;
6+ import { timesAsync } from '@aztec/foundation/collection' ;
37import { AbortError } from '@aztec/foundation/error' ;
48import { sleep } from '@aztec/foundation/sleep' ;
59import { executeTimeout } from '@aztec/foundation/timer' ;
10+ import type { TestContract } from '@aztec/noir-test-contracts.js/Test' ;
611import { SequencerState } from '@aztec/sequencer-client' ;
712import { getTimestampForSlot } from '@aztec/stdlib/epoch-helpers' ;
813
914import { jest } from '@jest/globals' ;
1015
16+ import { proveInteraction } from '../test-wallet/utils.js' ;
1117import { EpochsTestContext } from './epochs_test.js' ;
1218
1319jest . setTimeout ( 1000 * 60 * 10 ) ;
@@ -16,28 +22,86 @@ jest.setTimeout(1000 * 60 * 10);
1622// all L1 blocks of the previous slot. This happens when an L1 slot is missed (no block produced).
1723// The fix relies on getSyncedL2SlotNumber using the latest synced checkpoint slot as a signal,
1824// bypassing the stale L1 timestamp when L1 blocks are missing.
19- // Regression test for https://github.com/AztecProtocol/aztec-packages/issues/14766
25+ // Regression test for https://github.com/AztecProtocol/aztec-packages/issues/14766.
26+ //
27+ // ├──────── L2 slot N ────────┤├─────── L2 slot N+1 ───────┤├── L2 slot N+2 ──┤
28+ // │ ││ ││
29+ // L1: │ mining → CP_N pub → FREEZE│├══════ paused L1 ══════════┤│RESUME → mining
30+ // │ ▲ ▲ ││ ▲│
31+ // │ (1) checkpoint │ ││ (4) │
32+ // │ in first half │ ││ eth.mine()
33+ // │ of slot N │ ││
34+ // │ (2) eth.setIntervalMining(0)
35+ //
36+ // Cycle@wallClock =N (target=N+1):
37+ // checkSync(slot=N) ─→ PROPOSER_CHECK(slot=N) ─→ INITIALIZING_CHECKPOINT(target=N+1)
38+ // ─→ ... ─→ PUBLISHING_CHECKPOINT(target=N+1) ✗ blocked on L1 pause until RESUME
39+ //
40+ // Cycle@wallClock =N+1 (target=N+2) ← THE BUG-FIX CYCLE
41+ // checkSync(slot=N+1) — requires syncedSlot ≥ N
42+ // ✗ without fix: slotFromL1Sync stuck at N-1
43+ // (L1 frozen mid-slot N) → STUCK FOREVER
44+ // ✓ with fix: slotFromCheckpoint = N (CP_N is on L1)
45+ // → checkSync passes
46+ // ─→ PROPOSER_CHECK(slot=N+1) ← TEST WAITS
47+ // ─→ canProposeAt rollup check ✗ blocks further progress until parent CP_N+1 is on L1
48+ // (pipelining override needs hasProposedCheckpoint, which is sourced from L1 and
49+ // is false while CP_N+1's tx sits in mempool during the pause).
50+ //
51+ // Test signal: state-changed with newState=PROPOSER_CHECK && slot=N+1 (wall-clock).
52+ // - PROPOSER_CHECK is reached only after `checkSync` returns syncedTo (line ~290 of
53+ // sequencer.ts), so observing it for wall-clock slot N+1 directly proves the bug fix:
54+ // without the fix, checkSync would block on slot N+1 forever during the L1 pause.
55+ // - Slot N+1 (wall-clock) is unique to this cycle: the prior cycle ran at wall-clock N.
2056describe ( 'e2e_epochs/epochs_missed_l1_slot' , ( ) => {
2157 let test : EpochsTestContext ;
58+ let contract : TestContract ;
59+ let from : AztecAddress ;
2260
2361 // Use enough L1 slots per L2 slot to have room for pausing mining mid-slot.
2462 // With 6 L1 slots per L2 slot (L1=8s, L2=48s), we have plenty of time to
2563 // publish a checkpoint and pause mining without accidentally skipping a slot.
2664 const L1_SLOTS_PER_L2_SLOT = 6 ;
2765
66+ // Block duration tuned to reliably produce 2+ blocks per checkpoint under pipelining:
67+ // timeAvailableForBlocks = aztecSlotDuration - checkpointInitializationTime - timeReservedAtEnd
68+ // = 48 - 1 - (1 + 4 + 8) = 34s, which fits ~4 blocks of 8s each.
69+ const BLOCK_DURATION_MS = 8_000 ;
70+
71+ // Pre-prove this many txs at the start so blocks have content during the test.
72+ const TX_COUNT = 12 ;
73+
2874 beforeEach ( async ( ) => {
29- // Note: pipelining is NOT enabled for this test because it deliberately pauses L1 mining
30- // to simulate missed L1 slots, which conflicts with pipelining's assumption that previous
31- // checkpoints land on L1 promptly.
3275 test = await EpochsTestContext . setup ( {
3376 numberOfAccounts : 0 ,
77+ // The 8s blockDurationMs leaves a per-block DA gas budget too small to fit an account
78+ // deploy, so use the hardcoded-account fast-path (funded via genesis) even though we
79+ // keep the initial sequencer running for the test.
80+ useHardcodedAccount : true ,
3481 minTxsPerBlock : 0 ,
82+ maxTxsPerBlock : 1 ,
83+ blockDurationMs : BLOCK_DURATION_MS ,
3584 aztecSlotDurationInL1Slots : L1_SLOTS_PER_L2_SLOT ,
3685 startProverNode : false ,
3786 aztecProofSubmissionEpochs : 1024 ,
3887 disableAnvilTestWatcher : true ,
3988 enforceTimeTable : true ,
89+ enableProposerPipelining : true ,
90+ inboxLag : 2 ,
91+ // Required for the proposer's own broadcasts to route through the local
92+ // proposal handler (the dummy p2p service drops them). Without this, the
93+ // archiver's #proposedCheckpoints map stays empty and the pipelining
94+ // override path is never taken.
95+ mockGossipSubNetwork : true ,
96+ // With L1=12s on CI, aztecSlotDuration=72s and blockDurationMs=8000ms gives only ~1/9 of
97+ // slot mana per block — too small for emit_nullifier's daGas (~196k) under the default
98+ // 1.2 allocation. Bump it so the pre-proved txs actually land and step 6's
99+ // assertMultipleBlocksPerSlot has data to verify against.
100+ perBlockAllocationMultiplier : 8 ,
40101 } ) ;
102+
103+ from = test . context . accounts [ 0 ] ;
104+ contract = await test . registerTestContract ( test . context . wallet ) ;
41105 } ) ;
42106
43107 afterEach ( async ( ) => {
@@ -51,30 +115,44 @@ describe('e2e_epochs/epochs_missed_l1_slot', () => {
51115 const L1_BLOCK_TIME = test . L1_BLOCK_TIME_IN_S ;
52116 const L2_SLOT_DURATION = test . L2_SLOT_DURATION_IN_S ;
53117
54- // Step 1: Wait for a checkpoint that's published NOT in the last L1 slot of its L2 slot.
55- // We need the checkpoint to land early enough that when we pause mining, the archiver's
56- // L1 timestamp is still in the middle of the slot (not at the end).
57- logger . info ( 'Waiting for a checkpoint published early in its L2 slot...' ) ;
118+ // Pre-prove a batch of txs and send them so blocks have content while building checkpoints.
119+ // Done before waiting for the early checkpoint so that mbps is exercised by the time we pause.
120+ logger . info ( `Pre-proving ${ TX_COUNT } transactions` ) ;
121+ const txs = await timesAsync ( TX_COUNT , i =>
122+ proveInteraction ( context . wallet , contract . methods . emit_nullifier ( new Fr ( i + 1 ) ) , { from } ) ,
123+ ) ;
124+ const txHashes = await Promise . all ( txs . map ( tx => tx . send ( { wait : NO_WAIT } ) ) ) ;
125+ logger . info ( `Sent ${ txHashes . length } transactions` ) ;
126+
127+ // Step 1: Wait for a checkpoint published in the first half of its L2 slot.
128+ // We need CP_N's L1 timestamp to be solidly mid-slot so that slotFromL1Sync (computed from
129+ // the *next* L1 block's slot) is still N-1 when we pause. If CP_N landed too late in the
130+ // slot (e.g. in the last L1 slot of L2 slot N), slotFromL1Sync would already be N and the
131+ // bug would not be exercised.
132+ logger . info ( 'Waiting for a checkpoint published in the first half of its L2 slot...' ) ;
58133 const checkpointEvent = await executeTimeout (
59134 signal =>
60135 new Promise < ChainMonitorEventMap [ 'checkpoint' ] [ 0 ] > ( ( res , rej ) => {
61136 const handleCheckpoint = ( ...[ ev ] : ChainMonitorEventMap [ 'checkpoint' ] ) => {
62- // Skip the initial checkpoint ( genesis state) .
137+ // Skip the genesis checkpoint .
63138 if ( ev . checkpointNumber === 0 ) {
64139 return ;
65140 }
66141 const slotStart = getTimestampForSlot ( ev . l2SlotNumber , constants ) ;
67- const lastL1SlotStart = slotStart + BigInt ( L2_SLOT_DURATION - L1_BLOCK_TIME ) ;
68- if ( ev . timestamp < lastL1SlotStart ) {
142+ // Half-slot cutoff keeps slotFromL1Sync at N-1 with comfortable margin: at the cutoff
143+ // the next L1 block lands at slotStart + L2_SLOT_DURATION/2 + L1_BLOCK_TIME, which is
144+ // still well within slot N (since L1 < L2/2).
145+ const cutoff = slotStart + BigInt ( Math . floor ( L2_SLOT_DURATION / 2 ) ) ;
146+ if ( ev . timestamp < cutoff ) {
69147 logger . info (
70148 `Checkpoint ${ ev . checkpointNumber } in slot ${ ev . l2SlotNumber } at L1 timestamp ${ ev . timestamp } ` ,
71- { slotStart, lastL1SlotStart } ,
149+ { slotStart, cutoff } ,
72150 ) ;
73151 res ( ev ) ;
74152 monitor . off ( 'checkpoint' , handleCheckpoint ) ;
75153 } else {
76154 logger . info (
77- `Skipping checkpoint ${ ev . checkpointNumber } : published at ${ ev . timestamp } (last L1 slot starts at ${ lastL1SlotStart } )` ,
155+ `Skipping checkpoint ${ ev . checkpointNumber } : published at ${ ev . timestamp } (cutoff ${ cutoff } )` ,
78156 ) ;
79157 }
80158 } ;
@@ -84,20 +162,20 @@ describe('e2e_epochs/epochs_missed_l1_slot', () => {
84162 } ;
85163 monitor . on ( 'checkpoint' , handleCheckpoint ) ;
86164 } ) ,
87- 60_000 ,
165+ 120_000 ,
88166 'Wait for early checkpoint' ,
89167 ) ;
90168
91169 const checkpointSlotNumber = checkpointEvent . l2SlotNumber ;
92170 const nextSlotNumber = SlotNumber ( checkpointSlotNumber + 1 ) ;
93- const nextSlotTimestamp = Number ( getTimestampForSlot ( nextSlotNumber , constants ) ) ;
171+ const lastL1SlotStart =
172+ getTimestampForSlot ( checkpointSlotNumber , constants ) + BigInt ( L2_SLOT_DURATION - L1_BLOCK_TIME ) ;
94173
95174 logger . info ( `Using checkpoint ${ checkpointEvent . checkpointNumber } in L2 slot ${ checkpointSlotNumber } ` , {
96175 nextSlotNumber,
97- nextSlotTimestamp,
98176 } ) ;
99177
100- // Step 2: Wait briefly for the sequencer to finish its current work cycle , then pause mining.
178+ // Step 2: Brief pause so the sequencer settles , then freeze L1 mining.
101179 await sleep ( 1500 ) ;
102180
103181 logger . info ( 'Pausing L1 block production (simulating missed L1 slots)...' ) ;
@@ -107,19 +185,29 @@ describe('e2e_epochs/epochs_missed_l1_slot', () => {
107185 const frozenL1Timestamp = await eth . lastBlockTimestamp ( ) ;
108186 logger . info ( `L1 mining paused at L1 timestamp ${ frozenL1Timestamp } ` ) ;
109187
110- // Step 3: Wait until the sequencer reaches PUBLISHING_CHECKPOINT during the mining pause.
111- // With the fix: the sequencer sees the checkpoint for slot N, so getSyncedL2SlotNumber
112- // returns N, checkSync passes for slot N+1, and it advances all the way to publishing.
113- // Without the fix: getSyncedL2SlotNumber is stuck at N-1, checkSync fails, sequencer
114- // stays in IDLE/SYNCHRONIZING and never reaches PUBLISHING_CHECKPOINT.
188+ // Sanity: the frozen L1 timestamp must be before the last L1 slot of L2 slot N. Otherwise
189+ // slotFromL1Sync already advanced to N and the regression isn't being exercised.
190+ expect ( BigInt ( frozenL1Timestamp ) ) . toBeLessThan ( lastL1SlotStart ) ;
191+
192+ // Step 3: During the pause, wait for the sequencer cycle running at wall-clock = N+1
193+ // to pass `checkSync(slot=N+1)`. We wait for `state-changed` with
194+ // `newState=PROPOSER_CHECK && slot=N+1`: PROPOSER_CHECK is set right after `checkSync`
195+ // returns a non-undefined sync result (sequencer.ts line ~290/330), so observing it for
196+ // wall-clock slot N+1 directly proves the regression is fixed. We do NOT wait for any
197+ // later state because the canProposeAt rollup-contract check fails while CP_N+1's L1 tx
198+ // sits in mempool during the pause (pipelining's override depends on
199+ // `hasProposedCheckpoint`, which is sourced from L1 and is false in this window).
115200 const sequencer = context . sequencer ! . getSequencer ( ) ;
201+ const targetSlotForBugFixCycle = SlotNumber ( nextSlotNumber + 1 ) ;
116202
117- logger . info ( 'Waiting for sequencer to reach PUBLISHING_CHECKPOINT during mining pause...' ) ;
203+ logger . info (
204+ `Waiting for sequencer to reach INITIALIZING_CHECKPOINT for target slot ${ targetSlotForBugFixCycle } during mining pause...` ,
205+ ) ;
118206 await executeTimeout (
119207 signal =>
120208 new Promise < void > ( ( res , rej ) => {
121- const stateListener = ( { newState } : { newState : SequencerState } ) => {
122- if ( newState === SequencerState . PUBLISHING_CHECKPOINT ) {
209+ const stateListener = ( args : { newState : SequencerState ; slot ?: SlotNumber } ) => {
210+ if ( args . newState === SequencerState . INITIALIZING_CHECKPOINT && args . slot === targetSlotForBugFixCycle ) {
123211 sequencer . off ( 'state-changed' , stateListener ) ;
124212 res ( ) ;
125213 }
@@ -130,26 +218,31 @@ describe('e2e_epochs/epochs_missed_l1_slot', () => {
130218 } ;
131219 sequencer . on ( 'state-changed' , stateListener ) ;
132220 } ) ,
133- L2_SLOT_DURATION * 2 * 1000 ,
134- ' Wait for sequencer to reach PUBLISHING_CHECKPOINT' ,
221+ L2_SLOT_DURATION * 3 * 1000 ,
222+ ` Wait for sequencer INITIALIZING_CHECKPOINT at target slot ${ targetSlotForBugFixCycle } ` ,
135223 ) ;
136224
137- logger . info ( 'Sequencer reached PUBLISHING_CHECKPOINT during mining pause' ) ;
225+ logger . info (
226+ `Sequencer reached INITIALIZING_CHECKPOINT for target slot ${ targetSlotForBugFixCycle } during mining pause` ,
227+ ) ;
138228
139- // Step 4: Resume mining so the pending L1 tx lands and the test can clean up.
229+ // Step 4: Resume mining so the pending L1 txs land and the test can clean up.
140230 logger . info ( 'Resuming L1 block production...' ) ;
141231 const resumeTimestamp = Math . floor ( context . dateProvider . now ( ) / 1000 ) ;
142232 await eth . setNextBlockTimestamp ( resumeTimestamp ) ;
143233 await eth . mine ( ) ;
144234 await eth . setIntervalMining ( L1_BLOCK_TIME ) ;
145235
146- // Step 5: Wait for the next checkpoint to confirm the block was actually published .
236+ // Step 5: Wait for the next checkpoint to confirm block production resumed cleanly .
147237 const finalCheckpoint = CheckpointNumber ( checkpointEvent . checkpointNumber + 1 ) ;
148238 logger . info ( `Waiting for checkpoint ${ finalCheckpoint } ...` ) ;
149239 await test . waitUntilCheckpointNumber ( finalCheckpoint , 60 ) ;
150240 await monitor . run ( ) ;
151241 logger . info ( `Checkpoint ${ finalCheckpoint } published in slot ${ monitor . l2SlotNumber } ` ) ;
152242
153243 expect ( monitor . checkpointNumber ) . toBeGreaterThanOrEqual ( finalCheckpoint ) ;
244+
245+ // Step 6: Verify multi-blocks-per-slot was actually exercised.
246+ await test . assertMultipleBlocksPerSlot ( 2 ) ;
154247 } ) ;
155248} ) ;
0 commit comments