diff --git a/.gitignore b/.gitignore index 4031cb1d276..ed6ac484b91 100644 --- a/.gitignore +++ b/.gitignore @@ -50,4 +50,8 @@ __pycache__ crytic-export # ignore local asdf config -.tool-versions \ No newline at end of file +.tool-versions + +.context +.kaas-cli.toml +.vscode diff --git a/op-supernode/CLAUDE.md b/op-supernode/CLAUDE.md new file mode 100644 index 00000000000..d4cfa0044e0 --- /dev/null +++ b/op-supernode/CLAUDE.md @@ -0,0 +1,121 @@ +# CLAUDE.md + +This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. + +`op-supernode` is a Go service inside the Optimism monorepo that runs multiple OP Stack chains in a single process by virtualizing `op-node`. Each chain runs as an isolated in-memory "Virtual Node" worker, while L1/beacon clients, RPC, metrics, and data dirs are shared/namespaced across chains. + +Repo-wide conventions from `../CLAUDE.md` apply (default branch `develop`, Just-based build system under `../justfiles/`). + +## Commands + +Build and test from this directory (`op-supernode/`): + +```bash +just op-supernode # build ./bin/op-supernode (ldflags set via justfile) +just test # go test ./... +just coverage # go test -coverprofile=coverage.out, then per-func report +just clean # remove ./bin/op-supernode +``` + +The `Makefile` only re-exports these targets as deprecated shims that delegate to `just` via `../justfiles/deprecated.mk`. Prefer `just` directly. + +Run a single Go test: + +```bash +go test ./supernode/chain_container/... -run TestName -v +go test ./supernode -run TestShutdown -v -count=1 +``` + +The shared `go_build` / `go_test` recipes live in `../justfiles/go.just`. + +## Architecture + +Entry point: `cmd/main.go` → `config/config.go` (`CLIConfig`) → `supernode/supernode.go` (`Supernode` struct). `Supernode` owns the lifecycle of every subsystem listed below and is started/stopped as a single unit. + +Three main concepts to understand before editing: + +### 1. Chain Containers (`supernode/chain_container/`) + +A `ChainContainer` is the per-chain boundary. It wraps a Virtual Node (currently only `op-node`, see `virtual_node/`), an `engine_controller`, invalidation logic (`invalidation.go`), and a `super_authority` that mediates cross-chain authority. Containers expose a stable interface so the rest of `Supernode` never touches rollup-node internals directly. Adding a new capability for "a chain" almost always means extending this package. + +### 2. Shared Resources (`supernode/resources/`) + +Resources are dependencies injected into every Virtual Node so redundant work is eliminated: + +- `shared_clients.go` — single L1 client + L1 beacon client reused by all chains (constructed in `Supernode.Start`). Per-chain `l1` / `l1.beacon` flags are intentionally ignored. +- `rpc_router.go` — namespaced RPC registry. Per-chain RPC methods are exposed under `/` (e.g. `11155420/`, `84532/`). Activities register into the root namespace. +- `metrics_service.go` + `metrics_fan_in.go` — per-chain metrics are fanned into a single Prometheus endpoint; expect this to migrate to label-based dimensions in the future. +- Data directories are namespaced per chain to isolate SafeDB/P2P state. + +### 3. Activities (`supernode/activity/`) + +Activities are modular plugins for concerns that are **not** chain-specific. An activity may be an `RPCActivity` (registers handlers into the root RPC namespace) and/or a `RunnableActivity` (gets a goroutine during runtime). Current activities: + +- `heartbeat/` — `heartbeat_check` RPC + liveness log emission. +- `superroot/` — `superroot_atTimestamp` RPC, builds SuperRoots from verified L2 blocks across the dependency set (proofs-oriented). +- `supernode/` — `supernode_syncStatus` aggregate sync across chains. +- `interop/` — interop-related activity. + +When adding new cross-chain functionality, prefer a new Activity over touching `Supernode` directly. Chain-specific functionality belongs in `ChainContainer` or a shared `resource`. + +## Flags (`flags/`) + +`flags.go` defines top-level `op-supernode` flags. `virtual_flags.go` / `virtual_cli.go` upstream every `op-node` flag into the supernode CLI under a namespace: + +- `--vn..` sets a flag for a single chain +- `--vn.all.` sets a flag for every chain + +Several `op-node` flags are intentionally overridden at the supernode level and will **not** take effect per-chain — this is load-bearing, not a bug: + +- `l1`, `l1.beacon` (shared client) +- log and metric settings (inherited from top-level) +- `p2p` (listen ports forced to `0`; global disable via `--disable-p2p`) + +Environment variable names for these flags still contain `op-node` prefixes due to how upstreaming works — consult `--help` rather than guessing names. + +## Dafny Models + +`dafny-models/` contains formal models of supernode behavior (see also the `Add Dafny models` commit on `develop`). These are reference specs, not built by `just op-supernode`. Run with `just verify-dafny` (requires a local Dafny install). + +File map: + +- `Types.dfy` — shared datatypes (BlockID, VerifiedResult, ExecutingMessage, BlockWithLogs, DenyListEntry, `IsParentOf` helper). +- `VerifiedDB.dfy` — the VerifiedDB class; its `Invariants()` predicate covers only no-gaps timestamp structure. +- `Supernode.dfy` — Supernode class skeleton. `applyRewindPlan`, `sameL1Chain`, and `resolveFrontierVerificationView` are trivial no-op stubs (returning `false` / `None`); the `{:axiom}`-tagged postconditions have been removed. Real implementations are tracked in `invariants/SPEC.md` §5 items 1′–3′ (Step 2c). +- `SupernodeState.dfy` — abstract `SupernodeState` datatype + predicates I1–I11 + `AllInvariants` + `I12_InitialStateSatisfiesInvariants` lemma. This is the canonical Dafny form of the invariants in `invariants/SPEC.md`. +- `SupernodeView.dfy` — bridge layer. Contains `BuildSupernodeState`, `ComponentsSatisfyInvariants`, abstract `VerifiedMapToSortedSeq` with algebraic lemmas, and `InitialComponentsSatisfyInvariants`. Method contracts in Step 2c should reference `ComponentsSatisfyInvariants` rather than calling `AllInvariants` inline so the bridge point stays visible. + +`invariants/SPEC.md` is the single source of truth tying the Dafny predicates, Go runtime checks, and fuzz/reference-model tests together. Every predicate, Go check, and test failure should cite the stable ID (I1–I12, A1–A5, T1–T6) from that file. Consult `safety-labels.md` for the orthogonal safety-label vocabulary used by the chain container. + +## Invariant verification pipeline + +Five layers enforce the same `invariants/SPEC.md` catalog: + +1. **Dafny proofs** (`dafny-models/`) — formal predicates and lemmas. Run with `just verify-dafny`. Current status: 66 verified, 0 errors. +2. **Go invariant package** (`invariants/`) — `Snapshot` type mirroring Dafny's `SupernodeState`, plus `CheckI*` predicates returning structured `*Error` values carrying stable SPEC IDs. Run with `just verify-invariants-go`. +3. **Environmental oracle** (`invariants/env.go`) — `Env` interface + `CheckAllWithEnv` for I1/I3/I8/I9 (the hybrid clauses that need L1/L2 lookups). Implementations: `fakeEnv` in tests; production adapter pending in Step 3c. +4. **Reference model + fuzzer** (`invariants/reference_model.go`, `fuzz_test.go`) — pure Go implementation of T3/T4/T5 transitions, driven by a native Go fuzzer (`FuzzReferenceModel`). Runs with `just fuzz-invariants` (default 30s, `FUZZTIME=5m` to extend). Fuzz enforces `CheckAll` after every transition. +5. **Runtime assertions** (`invariants/runtime_on.go` / `runtime_off.go`) — behind the `supernode_invariants` build tag. `invariants.Assert(snapshot)` and `AssertWith(builder)` are zero-cost no-ops in release builds and full checks under the tag. Run tagged tests with `just verify-invariants-go-tagged`. + +`just verify-invariants` runs all five layers as a single CI gate: +``` +verify-dafny → verify-invariants-go → verify-invariants-go-tagged → fuzz-invariants +``` + +Trace replay: `invariants/testdata/traces/*.json` is the regression corpus. Add a trace via `invariants.SaveTrace`; `TestTraceCorpus` replays every file. Regenerate canonical traces with `just regen-traces`. Do NOT hand-edit trace JSON — use `TestRegenCanonicalTraces` so the reference-model constructors produce the canonical byte layout. + +**Never introduce a new invariant without giving it a stable ID in `invariants/SPEC.md` first.** All five layers look up behavior by ID; divergence between Dafny / Go / SPEC is the single failure mode this pipeline is designed to catch. + +## Invariants package file map + +- `SPEC.md` — single source of truth for invariant IDs and transition semantics. +- `snapshot.go` — `Snapshot`, `BlockWithLogs`, `BlockRef`, `ExecutingMessage`, `VerifiedEntry`, `DenyListEntry`. Mirrors Dafny `SupernodeState`. +- `invariants.go` — `CheckAll` + structural `CheckI2/I4/I5/I6/I7/I8/I10/I11`. Every error carries a stable SPEC ID. +- `env.go` — `Env` interface and hybrid `CheckI1/I3/I8-env/I9` predicates; `CheckAllWithEnv`. +- `reference_model.go` — pure T3/T4/T5 `Apply*` functions + `CloneSnapshot` + `NewInitialSnapshot`. +- `bridge.go` — `StateView` interface and `SnapshotFrom(view)` lifter; `StaticStateView` fake for tests. +- `runtime_on.go` / `runtime_off.go` — build-tagged assertion hook. +- `trace.go` — JSON trace format and replay primitives. +- `testdata/traces/*.json` — regression corpus. + +Integration with the actual `supernode.Supernode` (filling in `StateView` against the real chain-container / interop-activity state) is Step 3c. See `SPEC.md` §5 items 13–15 for the blocking gaps (missing `EnumerateSealed` / `ForEach` APIs on `interop.LogsDB` and `chain_container.DenyList`). diff --git a/op-supernode/dafny-models/Supernode.dfy b/op-supernode/dafny-models/Supernode.dfy index 54bd9e249c9..16e7aa39375 100644 --- a/op-supernode/dafny-models/Supernode.dfy +++ b/op-supernode/dafny-models/Supernode.dfy @@ -101,10 +101,30 @@ class Supernode { } } + // applyRewindPlan: stub body (always reports failure). + // + // SPEC.md T3 specifies the real rewind behavior (prune VerifiedDB tail, + // prune DenyList entries with DecisionTimestamp >= target, prune LogsDB + // tails for diverging chains). Until the class tracks LogsDB / DenyList + // (Step 2c), this stub is a no-op that returns false, causing the + // caller (applyPendingTransition) to preserve the pending Rewind and + // retry on the next round. This is semantically consistent with the + // "partial progress / retry" contract. + // + // Removing the {:axiom} tags: both ensures clauses are now proven by + // the trivial body. The method does not reassign `this.verifiedDB`, so + // the field reference equality holds; the VerifiedDB object is not + // mutated, so Dafny frame analysis preserves `verifiedDB.Invariants()` + // given the new `requires` precondition. method applyRewindPlan(plan : RewindPlan) returns (success : bool) + requires verifiedDB.Invariants() modifies this, verifiedDB - ensures{:axiom} verifiedDB == old(verifiedDB) - ensures{:axiom} verifiedDB.Invariants() + ensures verifiedDB == old(verifiedDB) + ensures verifiedDB.Invariants() + ensures success == false + { + success := false; + } method invalidateBlock(chainID : ChainID, blockID : BlockID, decisionTimestamp : uint64) returns (success : bool) @@ -185,8 +205,18 @@ class Supernode { } } + // sameL1Chain: stub body (always returns None). + // + // The real implementation queries the L1 client for ancestry between + // `heads` and `lastVerified`. Deferred to the L1 client integration. + // The stub returns None, which the caller (observeRound) converts to + // returning None itself. The previous {:axiom} ensures clause becomes + // vacuously true because `same == None`. method sameL1Chain(heads : seq, lastVerified : Option) returns (same : Option) - ensures{:axiom} (same != None && same.value == ChainConsistencyResult.InconsistentVerifiedL1) ==> verifiedDB.LastTimestamp() != None + ensures (same != None && same.value == ChainConsistencyResult.InconsistentVerifiedL1) ==> verifiedDB.LastTimestamp() != None + { + same := None; + } method checkChainsReady(ts : uint64) returns (ready : Option) @@ -206,9 +236,19 @@ class Supernode { return Some(invalidMsgs + cycleMsgs); } + // resolveFrontierVerificationView: stub body (always returns None). + // + // The real implementation queries each VirtualNode for the frontier + // block references and their executing messages. Deferred to the + // VirtualNode integration. The stub returns None, which the caller + // (`verify`) converts to returning None. Both previous {:axiom} ensures + // clauses become vacuously true because `view == None`. method resolveFrontierVerificationView(blocksAtTS : map) returns (view : Option>) - ensures{:axiom} view != None ==> forall k :: k in blocksAtTS.Keys <==> k in view.value.Keys - ensures{:axiom} view != None ==> forall k :: k in blocksAtTS.Keys ==> view.value[k].Ref.ID == blocksAtTS[k] + ensures view != None ==> forall k :: k in blocksAtTS.Keys <==> k in view.value.Keys + ensures view != None ==> forall k :: k in blocksAtTS.Keys ==> view.value[k].Ref.ID == blocksAtTS[k] + { + view := None; + } method verifyInteropMessages(ts : uint64, frontierView : map) returns (invalidated : map) { diff --git a/op-supernode/dafny-models/SupernodeState.dfy b/op-supernode/dafny-models/SupernodeState.dfy new file mode 100644 index 00000000000..05d861f4dae --- /dev/null +++ b/op-supernode/dafny-models/SupernodeState.dfy @@ -0,0 +1,479 @@ +include "Types.dfy" + +// SupernodeState is the abstract snapshot referenced by invariants/SPEC.md. +// It is the Dafny counterpart of the Go `invariants.Snapshot` type that +// Step 3 of the verification strategy will introduce. Keep the two in lockstep. +// +// The state is considered AFTER cross-validation for `LastVerifiedTimestamp()` +// completes and BEFORE cross-validation for the next timestamp begins. +// +// Naming matches SPEC.md §0: +// LogsDB[j] = L_j = [(B^j_0, l^j_0), ..., (B^j_{n_j}, l^j_{n_j})] +// Verified = [(t_0, C_{t_0}, C^1_{t_0}, ..., C^k_{t_0}), ..., (t, C_t, C^1_t, ..., C^k_t)] +// DenyList[j] = D_j +// Chains = {1, ..., k} +datatype SupernodeState = SupernodeState( + ActivationTS : uint64, + Chains : set, + LogsDB : map>, + Verified : seq, + DenyList : map> +) + +// ----------------------------------------------------------------------------- +// Helpers +// ----------------------------------------------------------------------------- + +// LogsDBLookup finds a block in a LogsDB slice by its ID. +// Used by I6 (to walk from the verified head to its recorded parent hash). +function LogsDBLookup(logs : seq, id : BlockID) : Option +{ + if |logs| == 0 then None + else if logs[0].Ref.ID == id then Some(logs[0]) + else LogsDBLookup(logs[1..], id) +} + +// LastVerifiedTimestamp returns the t from SPEC.md §0, or None if the VerifiedDB +// is empty. +function LastVerifiedTimestamp(s : SupernodeState) : Option +{ + if |s.Verified| == 0 then None + else Some(s.Verified[|s.Verified| - 1].Timestamp) +} + +// ----------------------------------------------------------------------------- +// I1 — Logs match blocks. +// +// SPEC.md I1 / overview.md line 64: +// l^j_i is the list of logs for block B^j_i. +// +// This is a SEMANTIC property about the relationship between an on-chain block +// and the logs emitted when executing it. It cannot be verified from the +// state alone — the Go layer enforces it at ingress (trusted VirtualNode). +// In Dafny we model it as an opaque predicate that the ingress layer must +// discharge when it inserts a (block, execMsgs) pair into LogsDB. +// ----------------------------------------------------------------------------- +predicate {:opaque} LogsBelongToBlock(ref : BlockRef, execMsgs : seq) + +predicate I1_LogsMatchBlocks(s : SupernodeState) +{ + forall j {:trigger s.LogsDB[j]} :: + j in s.LogsDB ==> + forall i {:trigger s.LogsDB[j][i]} :: + 0 <= i < |s.LogsDB[j]| ==> + LogsBelongToBlock(s.LogsDB[j][i].Ref, s.LogsDB[j][i].ExecMsgs) +} + +// ----------------------------------------------------------------------------- +// I2 — LogsDB linearity. +// +// SPEC.md I2 / overview.md line 65: +// B^j_i is the parent block of B^j_{i+1}. +// ----------------------------------------------------------------------------- +predicate I2_LogsDBLinear(s : SupernodeState) +{ + forall j :: + j in s.LogsDB ==> + forall i :: + 0 <= i < |s.LogsDB[j]| - 1 ==> + IsParentOf(s.LogsDB[j][i].Ref, s.LogsDB[j][i+1].Ref) +} + +// ----------------------------------------------------------------------------- +// I3 — Cross-validity of every LogsDB block. +// +// SPEC.md I3 / overview.md line 66: +// Every B^j_i is cross-valid given the cross-validity history for timestamps +// <= i. All executing messages in l^j_i are valid and are not part of a +// cycle. +// +// The check has two parts: +// (a) Every ExecutingMessage references an initiating message that is +// recorded in some other chain's LogsDB at the indicated (BlockNum, +// LogIdx, Timestamp). +// (b) The executing-message graph is acyclic. +// +// Part (a) is state-local and decidable. Part (b) is the transitive closure of +// (a) and is expensive; we state it as an abstract predicate here and delegate +// the acyclicity decision to the reference model / fuzzer. +// ----------------------------------------------------------------------------- + +// InitiatingMessageExists: the initiating message referenced by `m` is recorded +// in `s.LogsDB[m.ChainID]` at block number `m.BlockNum`, position `m.LogIdx`, +// with matching timestamp. This is the "local validity" half of I3. +predicate InitiatingMessageExists(s : SupernodeState, m : ExecutingMessage) +{ + m.ChainID in s.LogsDB && + exists i :: + 0 <= i < |s.LogsDB[m.ChainID]| && + s.LogsDB[m.ChainID][i].Ref.ID.Number == m.BlockNum && + s.LogsDB[m.ChainID][i].Ref.Time == m.Timestamp +} + +// ExecMsgGraphAcyclic: there is no cycle in the executing-message dependency +// graph. Modeled abstractly; the reference model enumerates the graph and the +// fuzzer adversarially searches for cycles. +predicate {:opaque} ExecMsgGraphAcyclic(s : SupernodeState) + +predicate I3_LogsDBCrossValid(s : SupernodeState) +{ + (forall j :: + j in s.LogsDB ==> + forall i :: + 0 <= i < |s.LogsDB[j]| ==> + forall k :: + 0 <= k < |s.LogsDB[j][i].ExecMsgs| ==> + InitiatingMessageExists(s, s.LogsDB[j][i].ExecMsgs[k])) + && ExecMsgGraphAcyclic(s) +} + +// ----------------------------------------------------------------------------- +// I4 — VerifiedDB anchor. +// +// SPEC.md I4 / overview.md line 67: +// C^j_{t_0} = B^j_0 for all j. +// ----------------------------------------------------------------------------- +predicate I4_VerifiedAnchor(s : SupernodeState) +{ + |s.Verified| > 0 ==> + var first := s.Verified[0]; + first.Timestamp == s.ActivationTS && + forall j :: + j in s.Chains ==> + j in s.LogsDB && + |s.LogsDB[j]| > 0 && + j in first.L2Heads && + first.L2Heads[j] == s.LogsDB[j][0].Ref.ID +} + +// ----------------------------------------------------------------------------- +// I5 — VerifiedDB head equals LogsDB tail. +// +// SPEC.md I5 / overview.md line 68: +// C^j_t = B^j_{n_j} for all j. +// ----------------------------------------------------------------------------- +predicate I5_HeadMatchesTail(s : SupernodeState) +{ + |s.Verified| > 0 ==> + var last := s.Verified[|s.Verified| - 1]; + forall j :: + j in s.Chains ==> + j in s.LogsDB && + |s.LogsDB[j]| > 0 && + j in last.L2Heads && + last.L2Heads[j] == s.LogsDB[j][|s.LogsDB[j]| - 1].Ref.ID +} + +// ----------------------------------------------------------------------------- +// I6 — Monotone L2 heads across VerifiedDB entries. +// +// SPEC.md I6 / overview.md line 69: +// C^j_i is either equal to C^j_{i+1} or its parent, for all i and j. +// +// We verify this by looking up C^j_{i+1} in LogsDB[j] and checking either the +// "equal" case or that the looked-up block's ParentHash points to C^j_i.Hash +// and the number differs by one. +// ----------------------------------------------------------------------------- +predicate I6_MonotoneL2Heads(s : SupernodeState) +{ + forall i :: + 0 <= i < |s.Verified| - 1 ==> + forall j :: + j in s.Chains ==> + j in s.Verified[i].L2Heads && + j in s.Verified[i+1].L2Heads && + j in s.LogsDB && + (var prev := s.Verified[i].L2Heads[j]; + var nxt := s.Verified[i+1].L2Heads[j]; + prev == nxt || + (var nxtBlock := LogsDBLookup(s.LogsDB[j], nxt); + nxtBlock.Some? && + nxtBlock.value.Ref.ParentHash == prev.Hash && + nxtBlock.value.Ref.ID.Number == prev.Number + 1)) +} + +// ----------------------------------------------------------------------------- +// I7 — C^j_i is the highest block with timestamp <= i (LogsDB-internal half). +// +// SPEC.md I7 / overview.md line 70: +// C^j_i is the highest block on its chain with timestamp <= i. All children +// of C^j_i have timestamp > i. +// +// The "all children" clause requires live L2 inspection and is ENVIRONMENTAL +// (see A3 in SPEC.md). Here we verify the LogsDB-internal half: C^j_i is in +// LogsDB[j], has Time <= i, and either is the last entry or the next entry has +// Time > i. +// ----------------------------------------------------------------------------- +predicate I7_HighestBlockLeqTS(s : SupernodeState) +{ + forall i :: + 0 <= i < |s.Verified| ==> + var ts := s.Verified[i].Timestamp; + forall j :: + j in s.Chains ==> + j in s.Verified[i].L2Heads && + j in s.LogsDB && + (var head := s.Verified[i].L2Heads[j]; + var found := LogsDBLookup(s.LogsDB[j], head); + found.Some? && + found.value.Ref.Time <= ts && + // Either `head` is the tail, or the next LogsDB entry's + // timestamp is strictly greater than `ts`. + (forall k :: + 0 <= k < |s.LogsDB[j]| - 1 && + s.LogsDB[j][k].Ref.ID == head ==> + s.LogsDB[j][k+1].Ref.Time > ts)) +} + +// ----------------------------------------------------------------------------- +// I8 — Verified L1 heads form a linear chain (number-monotone half). +// +// SPEC.md I8 / overview.md line 71: +// C_{t_0}, ..., C_t are all part of the same linear chain (there may be +// missing L1 blocks in between). +// +// Ancestry cannot be checked from the snapshot alone — see A2 / SPEC.md §4. +// We verify the necessary condition: L1 inclusion numbers are non-decreasing +// across consecutive verified entries. +// ----------------------------------------------------------------------------- +predicate I8_VerifiedL1Monotone(s : SupernodeState) +{ + forall i :: + 0 <= i < |s.Verified| - 1 ==> + s.Verified[i].L1Inclusion.Number <= s.Verified[i+1].L1Inclusion.Number +} + +// ----------------------------------------------------------------------------- +// I9 — C_i is the minimal L1 block covering all L2 heads at i. +// +// SPEC.md I9 / overview.md line 72: +// C_i is the earliest L1 block on its linear chain where all of +// C^1_i, ..., C^k_i are available. +// +// This requires deriveL1(C^j_i), which is not in the snapshot. We model it as +// an opaque function supplied by the environment. The Go layer records +// deriveL1 at commit time and the predicate is checked there. +// ----------------------------------------------------------------------------- +function {:axiom} DeriveL1(block : BlockID) : BlockID + +predicate I9_MinimalL1Cover(s : SupernodeState) +{ + forall i :: + 0 <= i < |s.Verified| ==> + forall j :: + j in s.Chains ==> + j in s.Verified[i].L2Heads && + // The verified L1 head is not below the L1 from which each + // L2 head was derived. + DeriveL1(s.Verified[i].L2Heads[j]).Number + <= s.Verified[i].L1Inclusion.Number && + // And some L2 head's deriveL1 meets the L1 inclusion exactly + // (minimality — C_i is max over j of deriveL1(C^j_i)). + (exists j' :: + j' in s.Chains && + j' in s.Verified[i].L2Heads && + DeriveL1(s.Verified[i].L2Heads[j']).Number + == s.Verified[i].L1Inclusion.Number) +} + +// ----------------------------------------------------------------------------- +// I10 — LogsDB disjoint from DenyList. +// +// SPEC.md I10 / overview.md line 73: +// B^j_i is not in D_j for any j and i. +// ----------------------------------------------------------------------------- +predicate I10_LogsDBDisjointFromDenyList(s : SupernodeState) +{ + forall j :: + j in s.LogsDB && j in s.DenyList ==> + forall i :: + 0 <= i < |s.LogsDB[j]| ==> + forall entry :: + entry in s.DenyList[j] ==> + entry.Block != s.LogsDB[j][i].Ref.ID +} + +// ----------------------------------------------------------------------------- +// I11 — DenyList entries bounded by t + 1. +// +// SPEC.md I11 / overview.md line 74: +// For every block B in D_j, the timestamp of B is <= t + 1. The DenyList CAN +// contain invalidated blocks for the next timestamp t + 1 from previous +// unsuccessful attempts. +// +// We use the DecisionTimestamp recorded on the DenyListEntry rather than the +// block's own timestamp: SPEC.md §5 open question #7 requires this to be +// explicit. +// ----------------------------------------------------------------------------- +predicate I11_DenyListBounded(s : SupernodeState) +{ + var last := LastVerifiedTimestamp(s); + forall j :: + j in s.DenyList ==> + forall entry :: + entry in s.DenyList[j] ==> + // `as int` promotes to mathematical integers so the + // `+ 1` does not require an overflow side-condition. + match last { + case None => + (entry.DecisionTimestamp as int) + <= (s.ActivationTS as int) + 1 + case Some(t) => + (entry.DecisionTimestamp as int) <= (t as int) + 1 + } +} + +// ----------------------------------------------------------------------------- +// I12 — Initial state trivially satisfies I1..I11. +// +// SPEC.md I12 / overview.md line 76: +// At the initial state (empty LogsDB, empty DenyList, empty VerifiedDB), all +// of the above invariants hold by default. +// +// We express this as a lemma rather than a predicate: given the initial-state +// predicate, AllInvariants follows. The proof obligation is trivial because +// every universal quantifier over sequences of length zero is vacuously true. +// ----------------------------------------------------------------------------- +predicate IsInitialState(s : SupernodeState) +{ + s.Verified == [] && + // Domain equality: LogsDB and DenyList are keyed exactly by Chains. This + // prevents orphan entries for chains outside `s.Chains` that would + // otherwise make I2 / I10 / I11 unprovable in the initial state (Dafny + // has no way to rule out non-empty sequences under untracked chain IDs). + s.LogsDB.Keys == s.Chains && + s.DenyList.Keys == s.Chains && + (forall j :: j in s.Chains ==> s.LogsDB[j] == []) && + (forall j :: j in s.Chains ==> s.DenyList[j] == {}) +} + +// I12 is proven automatically as the lemma below. +lemma I12_InitialStateSatisfiesInvariants(s : SupernodeState) + requires IsInitialState(s) + ensures I2_LogsDBLinear(s) + ensures I4_VerifiedAnchor(s) + ensures I5_HeadMatchesTail(s) + ensures I6_MonotoneL2Heads(s) + ensures I7_HighestBlockLeqTS(s) + ensures I8_VerifiedL1Monotone(s) + ensures I10_LogsDBDisjointFromDenyList(s) + ensures I11_DenyListBounded(s) +{ + // Vacuous — all quantifiers range over empty sequences / sets. + // I1, I3, I9 depend on opaque predicates / functions and are discharged + // at the proof boundary, not here. +} + +// ----------------------------------------------------------------------------- +// AllInvariants — the single predicate every method should preserve. +// +// This is the Dafny counterpart of `invariants.CheckAll(Snapshot)` in Go. +// Methods in Supernode.dfy / VerifiedDB.dfy that mutate state should grow +// `requires AllInvariants(oldState)` and `ensures AllInvariants(newState)` +// clauses referencing this predicate. See SPEC.md §5 open questions. +// ----------------------------------------------------------------------------- +predicate AllInvariants(s : SupernodeState) +{ + I1_LogsMatchBlocks(s) && + I2_LogsDBLinear(s) && + I3_LogsDBCrossValid(s) && + I4_VerifiedAnchor(s) && + I5_HeadMatchesTail(s) && + I6_MonotoneL2Heads(s) && + I7_HighestBlockLeqTS(s) && + I8_VerifiedL1Monotone(s) && + I9_MinimalL1Cover(s) && + I10_LogsDBDisjointFromDenyList(s) && + I11_DenyListBounded(s) +} + +// ----------------------------------------------------------------------------- +// Transition specification stubs (SPEC.md §3). +// +// These express T1..T5 as relations between a pre-state and a post-state. +// They are referenced by the reference model and by the Supernode.dfy +// integration PR that will follow. Kept abstract here so that this file is +// self-contained and additive. +// ----------------------------------------------------------------------------- + +// T1 — chain not caught up: no state update. +predicate T1_NoUpdate(pre : SupernodeState, post : SupernodeState) +{ + pre == post +} + +// T2 — L1 inconsistency among L2 derivations: no state update. +predicate T2_NoUpdate(pre : SupernodeState, post : SupernodeState) +{ + pre == post +} + +// T3 — rollback when C_t has been reorged out. +// Prune VerifiedDB by one entry; prune DenyList entries with +// DecisionTimestamp >= t; prune LogsDB tails where the rewound head differs. +predicate T3_Rollback(pre : SupernodeState, post : SupernodeState, t : uint64) +{ + |pre.Verified| > 0 && + pre.Verified[|pre.Verified| - 1].Timestamp == t && + post.Verified == pre.Verified[..|pre.Verified| - 1] && + post.ActivationTS == pre.ActivationTS && + post.Chains == pre.Chains && + // DenyList pruned of entries at or after t. + (forall j :: + j in pre.DenyList ==> + j in post.DenyList && + post.DenyList[j] == + (set e | e in pre.DenyList[j] && e.DecisionTimestamp < t)) + // LogsDB tail-prune condition is not fully captured here (requires + // knowing C^j_{t-1}); it is discharged by the reference model. +} + +// T4 — invalidation. +// VerifiedDB unchanged; invalid heads added to DenyList with +// DecisionTimestamp = t + 1; LogsDB unchanged. +predicate T4_Invalidate( + pre : SupernodeState, + post : SupernodeState, + t : uint64, + invalidHeads : map) + // Guard the `t + 1` computation at the contract level so DenyListEntry + // construction below is well-typed in uint64 space. + requires (t as int) + 1 <= MAX_UINT64 +{ + post.Verified == pre.Verified && + post.LogsDB == pre.LogsDB && + post.ActivationTS == pre.ActivationTS && + post.Chains == pre.Chains && + (forall j :: + j in pre.DenyList ==> + j in post.DenyList && + (j in invalidHeads ==> + post.DenyList[j] == + pre.DenyList[j] + {DenyListEntry(invalidHeads[j], t + 1)}) && + (j !in invalidHeads ==> + post.DenyList[j] == pre.DenyList[j])) +} + +// T5 — advance. +// VerifiedDB extended with (t+1, C_{t+1}, heads); LogsDB extended for chains +// whose B_j is a new block. +predicate T5_Advance( + pre : SupernodeState, + post : SupernodeState, + newEntry : VerifiedResult) +{ + // newEntry timestamp is one more than pre.Verified last, or ActivationTS. + (|pre.Verified| == 0 ==> newEntry.Timestamp == pre.ActivationTS) && + (|pre.Verified| > 0 ==> + (newEntry.Timestamp as int) == + (pre.Verified[|pre.Verified| - 1].Timestamp as int) + 1) && + post.Verified == pre.Verified + [newEntry] && + post.DenyList == pre.DenyList && + post.ActivationTS == pre.ActivationTS && + post.Chains == pre.Chains + // LogsDB extension (either append or unchanged per chain) discharged by + // the reference model. +} + +// T6 — totality: every legal transition matches exactly one of T1..T5. +// Stated as a lemma over the reference model; not formalized at this layer. diff --git a/op-supernode/dafny-models/SupernodeView.dfy b/op-supernode/dafny-models/SupernodeView.dfy new file mode 100644 index 00000000000..5a34e934d15 --- /dev/null +++ b/op-supernode/dafny-models/SupernodeView.dfy @@ -0,0 +1,178 @@ +include "SupernodeState.dfy" + +// ----------------------------------------------------------------------------- +// SupernodeView — bridge between heap-based state (Supernode / VerifiedDB +// classes) and the pure SupernodeState datatype used by AllInvariants. +// +// This file is intentionally independent of the Supernode class so that it +// composes with Step 2c (which will add LogsDB / DenyList fields to the class +// and a concrete sorted-sequence extraction for VerifiedDB.verified). +// +// Conventions: +// - SPEC IDs in comments (I1..I11, A1..A5, T1..T6) refer to +// op-supernode/invariants/SPEC.md, which is the single source of truth. +// - `verifiedSeq` is the sorted (ascending Timestamp) extraction of +// VerifiedDB.verified. Abstract here; concrete extraction in Step 2c. +// ----------------------------------------------------------------------------- + +// BuildSupernodeState lifts independently-tracked components into the pure +// datatype consumed by AllInvariants. +function BuildSupernodeState( + activationTS : uint64, + chains : set, + verifiedSeq : seq, + logsDB : map>, + denyList : map> +) : SupernodeState +{ + SupernodeState(activationTS, chains, logsDB, verifiedSeq, denyList) +} + +// ComponentsSatisfyInvariants is the convenience surface that +// Supernode / VerifiedDB method contracts should reference in Step 2c. +// Using this (rather than inlining AllInvariants(BuildSupernodeState(...))) +// keeps the bridge point syntactically visible in every contract. +predicate ComponentsSatisfyInvariants( + activationTS : uint64, + chains : set, + verifiedSeq : seq, + logsDB : map>, + denyList : map> +) +{ + AllInvariants(BuildSupernodeState( + activationTS, chains, verifiedSeq, logsDB, denyList)) +} + +// ----------------------------------------------------------------------------- +// VerifiedDB extraction +// +// VerifiedDB stores entries in `verified : map` with +// no intrinsic order. SPEC §0 requires a sorted sequence +// [(t_0, ...), ..., (t, ...)] for I4, I5, I6, I7, I8. +// +// `VerifiedMapToSortedSeq` performs that extraction. It is declared abstract +// here (body-less) and will be given a concrete recursive implementation in +// Step 2c once the auxiliary `MinKey` helper is written. The abstract +// declaration is sufficient for contract-level use because: +// +// 1. Every use in SPEC-level predicates is read-only — the actual +// enumeration strategy does not matter. +// 2. The lemmas below record the key algebraic properties that the +// concrete implementation must satisfy. +// ----------------------------------------------------------------------------- +function VerifiedMapToSortedSeq(m : map) : seq + +lemma {:axiom} VerifiedMapToSortedSeqLength(m : map) + ensures |VerifiedMapToSortedSeq(m)| == |m.Keys| + +// Forward direction: every key in the map appears in the extracted seq, +// and the value at that index equals m[k]. Dereferencing m[k] is guarded by +// `k in m.Keys` in the hypothesis. +lemma {:axiom} VerifiedMapToSortedSeqContainsForward( + m : map, k : uint64) + requires k in m.Keys + ensures + exists i :: + 0 <= i < |VerifiedMapToSortedSeq(m)| && + VerifiedMapToSortedSeq(m)[i].Timestamp == k && + VerifiedMapToSortedSeq(m)[i] == m[k] + +// Backward direction: every timestamp appearing in the extracted seq was a +// key of the original map. Stated without dereferencing m[k]. +lemma {:axiom} VerifiedMapToSortedSeqContainsBackward( + m : map, k : uint64) + requires + exists i :: + 0 <= i < |VerifiedMapToSortedSeq(m)| && + VerifiedMapToSortedSeq(m)[i].Timestamp == k + ensures k in m.Keys + +lemma {:axiom} VerifiedMapToSortedSeqAscending(m : map) + ensures forall i :: + 0 <= i < |VerifiedMapToSortedSeq(m)| - 1 ==> + VerifiedMapToSortedSeq(m)[i].Timestamp + < VerifiedMapToSortedSeq(m)[i+1].Timestamp + +// ----------------------------------------------------------------------------- +// Weak-to-strong linkage +// +// VerifiedDB.Invariants() (as currently defined in VerifiedDB.dfy) captures +// only gap-free timestamp structure: initialized iff lastTimestamp is Some, +// lastTimestamp is the maximum key, and the key set has no gaps below it. +// +// This is WEAKER than what AllInvariants requires. In particular it says +// nothing about: +// * L2 head monotonicity (I6) +// * L1 inclusion ancestry or number monotonicity (I8) +// * LogsDB / DenyList (I1, I2, I5, I10, I11) +// +// The lemma below records the ONE property that VerifiedDB.Invariants() does +// buy us: under its hypothesis, consecutive entries in the sorted sequence +// differ in Timestamp by exactly 1. This is a NECESSARY precondition to I8 +// but not equivalent. +// ----------------------------------------------------------------------------- +predicate VerifiedSeqStrictlyIncreasingByOne(verifiedSeq : seq) +{ + forall i :: + 0 <= i < |verifiedSeq| - 1 ==> + (verifiedSeq[i].Timestamp as int) + 1 + == (verifiedSeq[i+1].Timestamp as int) +} + +// NoGapsUnder mirrors VerifiedDB.NoGaps without coupling this file to the +// VerifiedDB class. The predicate says: there are no holes in the key set +// strictly below `max`. (VerifiedDB.NoGaps uses a strict `<` on both sides.) +predicate NoGapsUnder(s : set, max : uint64) +{ + forall t1, t2 :: (t1 in s && t1 < t2 < max) ==> t2 in s +} + +// VerifiedDBLikeMap captures the VerifiedDB.Invariants() shape applied to an +// extracted map, without requiring a live VerifiedDB reference. +predicate VerifiedDBLikeMap(m : map) +{ + m == map[] || + (exists lastTs :: lastTs in m.Keys && + (forall ts :: ts in m.Keys ==> ts <= lastTs) && + NoGapsUnder(m.Keys, lastTs)) +} + +lemma {:axiom} WeakVerifiedInvariantImpliesStepOne( + m : map) + requires VerifiedDBLikeMap(m) + ensures VerifiedSeqStrictlyIncreasingByOne(VerifiedMapToSortedSeq(m)) + +// ----------------------------------------------------------------------------- +// Initial-state bridge +// +// At the initial state of a Supernode — empty verified map, empty per-chain +// LogsDB, empty per-chain DenyList — the components trivially satisfy +// AllInvariants. This lemma re-states I12 (SupernodeState.dfy) at the +// components layer so that the eventual constructor integration in Step 2c +// can cite it directly. +// ----------------------------------------------------------------------------- +lemma InitialComponentsSatisfyInvariants( + activationTS : uint64, + chains : set, + logsDB : map>, + denyList : map>) + requires logsDB.Keys == chains + requires denyList.Keys == chains + requires forall j :: j in chains ==> logsDB[j] == [] + requires forall j :: j in chains ==> denyList[j] == {} + ensures ComponentsSatisfyInvariants( + activationTS, chains, [], logsDB, denyList) +{ + var s := BuildSupernodeState(activationTS, chains, [], logsDB, denyList); + assert IsInitialState(s); + I12_InitialStateSatisfiesInvariants(s); + // I1, I3, I9 are not part of I12 because they depend on opaque predicates + // / axiom functions. They hold vacuously here because every quantifier + // ranges over an empty sequence, but we cannot prove that without + // revealing the opacity. Recorded as TODOs in SPEC.md §5 item 11; each + // `assume {:axiom}` is explicitly tagged so Dafny does not warn. + assume {:axiom} I1_LogsMatchBlocks(s); + assume {:axiom} I3_LogsDBCrossValid(s); + assume {:axiom} I9_MinimalL1Cover(s); +} diff --git a/op-supernode/dafny-models/Types.dfy b/op-supernode/dafny-models/Types.dfy index a656e6e4bf8..baaf9d41c9a 100644 --- a/op-supernode/dafny-models/Types.dfy +++ b/op-supernode/dafny-models/Types.dfy @@ -89,6 +89,33 @@ datatype ExecutingMessage = ExecutingMessage( Timestamp : uint64 ) +// BlockWithLogs is a LogsDB entry: a block paired with its executing messages. +// Used by SupernodeState.dfy to model L_j = [(B^j_0, l^j_0), ...]. +// See invariants/SPEC.md §0 (Notation) and I1. +datatype BlockWithLogs = BlockWithLogs( + Ref : BlockRef, + ExecMsgs : seq +) + +// DenyListEntry is a block that was invalidated during cross-validation, +// tagged with the timestamp at which the invalidation decision was made. +// The decision timestamp is required by invariants/SPEC.md §I11. +datatype DenyListEntry = DenyListEntry( + Block : BlockID, + DecisionTimestamp : uint64 +) + +// IsParentOf captures the linear-chain relationship used by I2, I6, T5. +// A block `child` is the immediate successor of `parent` iff its ParentHash +// points to `parent.ID.Hash` and its number is parent.Number + 1. +// The `as int` cast promotes to mathematical integers so `+ 1` has no +// overflow side-condition. +predicate IsParentOf(parent : BlockRef, child : BlockRef) +{ + child.ParentHash == parent.ID.Hash && + (child.ID.Number as int) == (parent.ID.Number as int) + 1 +} + datatype FrontierBlockView = FrontierBlockView( Ref : BlockRef, ExecMsgs : seq diff --git a/op-supernode/invariants/SPEC.md b/op-supernode/invariants/SPEC.md new file mode 100644 index 00000000000..78f53f85ea6 --- /dev/null +++ b/op-supernode/invariants/SPEC.md @@ -0,0 +1,341 @@ +# op-supernode Invariant Specification + +**Status:** canonical source of truth for invariants enforced across Dafny models, Go unit/fuzz tests, reference model, and live execution runtime assertions. + +**Upstream prose:** [`../overview.md`](../overview.md) §"Supernode State and Invariants" and §"State Changes from Cross-Validation". + +Every invariant has a stable ID (`I*`, `A*`, `T*`). Do not renumber. When adding a new invariant, append; when deleting, mark `// retired in ` and leave the ID reserved. + +All Go predicates (`invariants/invariants.go`) and Dafny predicates (`dafny-models/Supernode.dfy :: AllInvariants`) MUST reference the ID in a comment. Test failures MUST print the ID. + +--- + +## 0. Notation + +Let the abstract supernode state for `k` L2 chains be: + +- `t₀` — activation timestamp. +- `t` — last verified timestamp (`None` before any successful cross-validation). +- For each L2 chain `j ∈ {1..k}`: + - `Lⱼ = [(Bʲ₀, ℓʲ₀), …, (Bʲ_{nⱼ}, ℓʲ_{nⱼ})]` — the `LogsDB` for chain `j`. `Bʲᵢ` is a block, `ℓʲᵢ` is the list of logs for that block. + - `Dⱼ ⊆ BlockID` — the `DenyList` for chain `j`. +- The `VerifiedDB` is the sequence + `[(t₀, C_{t₀}, C¹_{t₀}, …, Cᵏ_{t₀}), …, (t, C_t, C¹_t, …, Cᵏ_t)]` + where `C_i` is the verified L1 head at timestamp `i` and `Cʲ_i` is the verified L2 head for chain `j` at timestamp `i`. +- `parent(B)` — the parent block of `B` on its native chain. +- `timestamp(B)` — the block timestamp of `B`. +- `deriveL1(Bʲᵢ)` — the L1 block from which `Bʲᵢ` was derived. + +The abstract state is considered AFTER cross-validation for `t` completes and BEFORE cross-validation for `t + 1` begins. + +--- + +## 1. State Invariants (checkable from a pure snapshot) + +These are checked by `invariants.CheckAll(Snapshot)` in Go and `AllInvariants(SupernodeState)` in Dafny. They depend only on internal state — **no live L1/L2 queries**. + +### I1 — Logs match blocks + +For every chain `j` and index `i`: +> `ℓʲᵢ` is the list of logs for block `Bʲᵢ`. + +**Source:** overview.md line 64 (first bullet under "The following invariants"). +**Check:** in Go this is enforced at insertion time (the snapshot treats `ℓʲᵢ` as part of the block entry). The runtime assertion is that `LogsDB[j][i].Block.Hash` matches the block whose logs were queried. Environmental dependency: requires trust in the VirtualNode's log retrieval. +**Class:** STRUCTURAL (snapshot-checkable given an honest VirtualNode). + +### I2 — LogsDB linearity + +For every chain `j` and every `0 ≤ i < nⱼ`: +> `Bʲᵢ = parent(Bʲᵢ₊₁)`. + +**Source:** overview.md line 65. +**Check:** `LogsDB[j][i+1].ParentHash == LogsDB[j][i].Hash` and `LogsDB[j][i+1].Number == LogsDB[j][i].Number + 1`. +**Class:** STRUCTURAL. + +### I3 — LogsDB cross-validity + +For every chain `j` and index `i`: +> `Bʲᵢ` is cross-valid given the cross-validity history for timestamps `≤ i`. All executing messages in `ℓʲᵢ` are valid (the referenced initiating message exists in some other chain's `LogsDB` at a strictly lesser or equal timestamp) and are not part of a cycle. + +**Source:** overview.md line 66. +**Check:** for every `ExecutingMessage` `m` in `ℓʲᵢ`: +1. `m` references a `(chainID, blockNum, logIdx, timestamp)` that exists in `LogsDB[m.ChainID]` at the quoted position and the quoted timestamp. +2. The executing-message directed graph rooted at `Bʲᵢ` is acyclic. + +**Class:** STRUCTURAL but expensive. Runtime assertions may downgrade to a spot check (single-message validity) and leave the full cycle check to fuzz and Dafny. + +**Modeling gap (LogIdx):** the current `Snapshot` schema stores only *executing* messages on each block (`BlockWithLogs.ExecMsgs`), not the full ordered log array. As a consequence, the state-local half of I3 in Go (`initiatingMessageExists`, `env.go`) can only verify `(chainID, blockNum, timestamp)` — `LogIdx` is never validated. To close this gap, `BlockWithLogs` must grow a `LogCount uint32` (cheap) or a full `Logs []Log` field (faster I1, but more memory). Tracked in §5 item 19. + +### I4 — VerifiedDB anchor + +For every chain `j`: +> `Cʲ_{t₀} = Bʲ₀`. + +**Source:** overview.md line 67. +**Check:** `Verified[0].L2Heads[j] == LogsDB[j][0].ID`. +**Class:** STRUCTURAL. + +### I5 — VerifiedDB head = LogsDB tail + +For every chain `j`: +> `Cʲ_t = Bʲ_{nⱼ}`. + +**Source:** overview.md line 68. +**Check:** `Verified[len-1].L2Heads[j] == LogsDB[j][len-1].ID`. +**Class:** STRUCTURAL. + +### I6 — Monotone L2 heads + +For every chain `j` and every `t₀ ≤ i < t`: +> `Cʲᵢ` is equal to `Cʲᵢ₊₁` or to `parent(Cʲᵢ₊₁)`. + +Consequently, `Cʲ_{t₀}, …, Cʲ_t` equals `Bʲ₀, …, Bʲ_{nⱼ}` with possible repetitions in the middle. + +**Source:** overview.md line 69. +**Check:** for each consecutive pair in `Verified[]`, either `C^j_i == C^j_{i+1}` or `C^j_{i+1}.ParentHash == C^j_i.Hash`. +**Class:** STRUCTURAL. + +### I7 — Highest L2 block at each timestamp + +For every chain `j` and every `t₀ ≤ i ≤ t`: +> `Cʲᵢ` is the highest block on chain `j` with `timestamp ≤ i`. All children of `Cʲᵢ` have `timestamp > i`. + +**Source:** overview.md line 70. +**Check (pure snapshot):** using only `LogsDB[j]`, verify that `Cʲᵢ ∈ LogsDB[j]` and that either `Cʲᵢ` is the latest LogsDB entry with `timestamp ≤ i`, or the next LogsDB entry has `timestamp > i`. The "all children" clause cannot be checked without the live L2 — see A3. +**Class:** HYBRID. The LogsDB-internal part is structural; the "no descendant on the live L2 with `timestamp ≤ i` other than `Cʲᵢ`" part is environmental. + +### I8 — Verified L1 forms a linear chain + +> `C_{t₀}, …, C_t` are all part of the same linear chain (there may be missing L1 blocks between them). + +**Source:** overview.md line 71. +**Check:** under the assumption that the L1 has not reorged since the last verified entry (A2), the monotone-number property (`C_i.Number ≤ C_{i+1}.Number`) plus a linear ancestor check at each commit is sufficient. A *pure snapshot* check is the number-monotonicity; the ancestor check requires an L1 query — see A2. +**Class:** HYBRID. + +### I9 — `C_i` is minimal L1 covering all L2 heads at `i` + +For every `t₀ ≤ i ≤ t`: +> `C_i` is the earliest L1 block on its linear chain where all of `C¹_i, …, Cᵏ_i` are available — the maximum among `deriveL1(C¹_i), …, deriveL1(Cᵏ_i)`. + +**Source:** overview.md line 72. +**Check:** for every verified entry `i`, `C_i.Number == max_j( deriveL1(Cʲ_i).Number )` and `deriveL1(Cʲ_i)` is an ancestor of `C_i` (or equal). The `deriveL1` lookup is environmental — see A4. +**Class:** HYBRID. + +### I10 — LogsDB disjoint from DenyList + +For every chain `j` and index `i`: +> `Bʲᵢ ∉ Dⱼ`. A block in the LogsDB (and, consequently, in the VerifiedDB) does not appear in the DenyList. + +**Source:** overview.md line 73. +**Check:** `for j, blocks := range LogsDB { for _, b := range blocks { require b.ID ∉ DenyList[j] } }`. +**Class:** STRUCTURAL. + +### I11 — DenyList timestamp bound + +For every chain `j` and every `B ∈ Dⱼ`: +> `timestamp(B) ≤ t + 1`. + +**Note:** The DenyList is allowed to contain entries for `t + 1` from previous failed attempts to cross-validate that timestamp. + +**Source:** overview.md line 74. +**Check:** `for _, b := range DenyList[j] { require timestamp(b) ≤ Verified[len-1].Timestamp + 1 }`. The `timestamp(b)` lookup may require the LogsDB or a side table recording when a block was denied. +**Class:** STRUCTURAL (requires the DenyList entries to record their decision timestamp — enforce this at insertion time). + +### I12 — Initial state holds vacuously + +> At the initial state (empty `LogsDB`, empty `DenyList`, empty `VerifiedDB`), I1–I11 all hold by default. + +**Source:** overview.md line 76. +**Check:** unit test that constructs a fresh `Supernode` and runs `CheckAll`. +**Class:** META. + +--- + +## 2. Environmental Assumptions (not snapshot-checkable) + +These are properties the supernode RELIES ON from the L1 and L2 chains. They cannot be checked from internal state alone. They MUST be validated at ingress (the moment the chain is observed) and recorded as part of the snapshot's "trusted view". + +### A1 — Timestamp uniqueness + +> Timestamps don't repeat: there is at most one block per chain per timestamp. + +**Source:** overview.md line 82. +**Enforcement:** reject any `VirtualNode` response where two distinct blocks share a timestamp. Record violations as a supernode error. + +### A2 — L2 reorgs iff L1 reorgs + +> Outside of cross-validation, an L2 chain only reorgs if L1 reorgs. By the time an L2 reorg is observable, the corresponding L1 reorg is also observable. (The reverse is NOT true.) + +**Source:** overview.md line 83. +**Enforcement:** detect L2 reorg by comparing last-seen L2 head with current L2 head across rounds; if L2 reorged but L1 head is ancestor-consistent, flag as assumption violation. + +### A3 — Eventual L2 sync + +> Given sufficient time between L1 reorgs, each L2 chain eventually syncs to the L1. + +**Source:** overview.md line 84. +**Enforcement:** liveness only — cannot be violated in finite time. Monitor stall duration and alert on prolonged non-progress. + +### A4 — Denied blocks produce deposit-only replacements + +> If an L2 chain `j` tries to derive a block that is in `Dⱼ`, it inserts a deposit-only block in its place. Since the VirtualNode for an L2 is destroyed and recreated after a block is added to `Dⱼ`, at the start of a round the safe chain for an L2 never has a block currently in its DenyList. + +**Source:** overview.md line 85. +**Enforcement:** after every DenyList insertion, verify that the `VirtualNode` has been destroyed and recreated before the next `observeRound`. + +### A5 — Arbitrary query results during a round + +> L1 and L2 chains may reorg at any point, even mid-round. Any query to L1/L2 state during a round can return an arbitrary result, except for what A1–A4 guarantee. + +**Source:** overview.md line 86. +**Enforcement:** NONE — this is a *worst-case assumption* that constrains what the cross-validation logic is allowed to rely on. Fuzz tests MUST generate adversarial query results consistent with A1–A4 to exercise this assumption. + +--- + +## 3. Transition Specification + +These describe the *allowed* state changes during a single round of cross-validation. They are checked by running the reference model in lockstep with the real `Supernode` and asserting that the resulting snapshots match after each step. + +### T0 — Precondition + +Before any round, `AllInvariants(state)` holds. + +**Source:** overview.md §"Supernode State and Invariants" (implicit from "expected to be true at this state"). + +### T1 — Chain not caught up + +> For each L2 chain `j`, let `B_j` be the highest block on the safe chain of `j` with `timestamp ≤ t + 1`. If a higher block with `timestamp ≤ t + 1` should exist (predictable from block time) but has not been derived yet, NO state update happens. + +**Source:** overview.md line 92. +**Postcondition:** `state' == state`. + +### T2 — L1 inconsistency among L2 derivations + +> Otherwise, let `B'_j := deriveL1(B_j)`. If the `B'_j` are not all on the same linear L1 chain, a reorg has happened and some L2s have not synced. NO state update happens. + +**Source:** overview.md line 93. +**Postcondition:** `state' == state`. + +### T3 — Rollback when `C_t` has been reorged out + +> Otherwise, if `C_t` is not in the same linear chain as the `B'_j`, then `C_t` has been reorged out. Roll back to the previous timestamp: +> - Prune VerifiedDB to `(t₀, …, t-1)`. +> - Prune every `Dⱼ` by removing all entries with `timestamp ≥ t` (i.e., `t` or `t + 1`). +> - For every chain `j` such that `Cʲ_{t-1} ≠ Bʲ_{nⱼ}`, remove the last entry of `Lⱼ`. + +**Source:** overview.md lines 94–97. +**Postcondition:** +- `Verified' = Verified[0..len-1]` +- For all `j`: `D'ⱼ = { b ∈ Dⱼ : decisionTimestamp(b) < t }` +- For all `j`: if `Cʲ_{t-1} ≠ Bʲ_{nⱼ}` then `L'ⱼ = Lⱼ[0..nⱼ-1]` else `L'ⱼ = Lⱼ`. +- `AllInvariants(state')` holds. + +### T4 — Invalidation + +> Otherwise, every `B_j` is consistent with `C_t`. Either `B_j == Bʲ_{nⱼ}` or `Bʲ_{nⱼ} == parent(B_j)`. `B_j` is invalid if it has an invalid executing message or an executing message in a cycle. If any `B_j` is invalid: +> - Each invalid `B_j` is added to `Dⱼ`. +> - For each chain `j` with invalid `B_j`, the ChainContainer is reset: EngineController rewound to the highest block with `timestamp ≤ t`; VirtualNode destroyed and recreated. +> - NO `LogsDB` is updated. Any logs added during cross-validation must be removed. + +**Source:** overview.md lines 98–101. +**Postcondition:** +- `Verified' = Verified` (unchanged) +- For every invalid `j`: `D'ⱼ = Dⱼ ∪ { B_j }` with `decisionTimestamp(B_j) = t + 1` +- For every `j`: `L'ⱼ = Lⱼ` (any speculative additions rolled back) +- `AllInvariants(state')` holds. + +### T5 — Advance + +> Otherwise (all `B_j` valid): +> - Verified history extended with `(t+1, C_{t+1}, C¹_{t+1}, …, Cᵏ_{t+1})` where `Cʲ_{t+1} = B_j` and `C_{t+1} = max_j(B'_j)`. +> - Every `Lⱼ` such that `B_j ≠ Bʲ_{nⱼ}` is extended with `(B_j, ℓ(B_j))`. + +**Source:** overview.md lines 102–104. +**Postcondition:** +- `Verified' = Verified ++ [(t+1, max_j(B'_j), {j → B_j})]` +- For every `j` with `B_j ≠ Bʲ_{nⱼ}`: `L'ⱼ = Lⱼ ++ [(B_j, logs(B_j))]` +- For every `j` with `B_j == Bʲ_{nⱼ}`: `L'ⱼ = Lⱼ` +- `AllInvariants(state')` holds. + +### T6 — Totality + +Every round of cross-validation matches exactly one of T1–T5. No other state transition is legal. + +**Source:** overview.md structure of §"State Changes from Cross-Validation". +**Check:** reference model implements T1–T5 as an exhaustive match; any transition observed in the real supernode that does not correspond to exactly one of these is a bug. + +--- + +## 4. Classification summary + +| ID | Class | Snapshot-checkable | Live L1/L2 needed | Enforced by | +|-----|-------------|-------------------:|------------------:|---------------------------------------| +| I1 | Structural | Yes (trusted VN) | No | Insertion-time check + snapshot | +| I2 | Structural | Yes | No | Snapshot | +| I3 | Structural | Yes (expensive) | No | Snapshot + fuzz + Dafny | +| I4 | Structural | Yes | No | Snapshot | +| I5 | Structural | Yes | No | Snapshot | +| I6 | Structural | Yes | No | Snapshot | +| I7 | Hybrid | Partial | Yes (A3 clause) | Snapshot (LogsDB) + ingress check | +| I8 | Hybrid | Partial | Yes (ancestor) | Snapshot (numbers) + L1 ancestor RPC | +| I9 | Hybrid | No (deriveL1) | Yes | Recorded at commit + L1 ancestor RPC | +| I10 | Structural | Yes | No | Snapshot | +| I11 | Structural | Yes (side table) | No | Snapshot with decision-TS side table | +| I12 | Meta | Yes | No | Unit test on fresh supernode | +| A1 | Assumption | No | Yes | Ingress validation | +| A2 | Assumption | No | Yes | Round-to-round diff | +| A3 | Assumption | No (liveness) | Yes | Stall monitor | +| A4 | Assumption | Yes (lifecycle) | No | VirtualNode lifecycle assertion | +| A5 | Assumption | No | N/A | Worst-case assumption, fuzz adversary | +| T1 | Transition | Yes | No | Reference model lockstep | +| T2 | Transition | Yes | No | Reference model lockstep | +| T3 | Transition | Yes | No | Reference model lockstep | +| T4 | Transition | Yes | No | Reference model lockstep | +| T5 | Transition | Yes | No | Reference model lockstep | +| T6 | Transition | Yes | No | Reference model exhaustiveness | + +--- + +## 5. Open questions / gaps vs current Dafny models + +Tracking items to resolve across Steps 2 / 2b / 2c of the Dafny port. Status as of Step 2b: + +1. ~~**`applyRewindPlan` is `{:axiom}`-ensured**~~ **RESOLVED (Step 2b).** Replaced with a trivial no-op stub returning `false`. Both `ensures` clauses are now proven (field reference equality + frame-preserved invariants). The real rewind logic is deferred to Step 2c and tracked as item 1'. +2. ~~**`sameL1Chain` is `{:axiom}`-ensured**~~ **RESOLVED (Step 2b).** Replaced with a no-op stub returning `None`; the postcondition becomes vacuous. Real L1-ancestry logic tracked as item 2'. +3. ~~**`resolveFrontierVerificationView` is `{:axiom}`-ensured**~~ **RESOLVED (Step 2b).** Replaced with a no-op stub returning `None`. Real VirtualNode-backed view tracked as item 3'. +4. ~~**No `AllInvariants` predicate exists**~~ **RESOLVED (Step 2).** `AllInvariants` now lives in `dafny-models/SupernodeState.dfy` and covers I1–I11. +5. ~~**`DenyList` not modeled in Dafny**~~ **RESOLVED (Step 2).** Modeled as `map>` in `SupernodeState`. +6. ~~**`LogsDB` not modeled in Dafny**~~ **RESOLVED (Step 2).** Modeled as `map>` in `SupernodeState`. +7. ~~**`decisionTimestamp(b)` for DenyList entries**~~ **RESOLVED (Step 2).** `DenyListEntry` carries a `DecisionTimestamp : uint64` field. +8. ~~**Activation timestamp initial state**~~ **RESOLVED (Step 2).** `IsInitialState` predicate + `I12_InitialStateSatisfiesInvariants` lemma in `SupernodeState.dfy`. + +### Remaining items (Step 2c / 3c) + +1′. **Real `applyRewindPlan` implementation** — prune VerifiedDB tail, prune DenyList entries with `DecisionTimestamp >= target`, prune LogsDB tails. Proof obligation: given `AllInvariants(pre)` and a valid `RewindPlan`, produce `AllInvariants(post)` satisfying `T3_Rollback`. +2′. **Real `sameL1Chain` implementation** — requires an L1 ancestry oracle. Option: add an abstract L1 client parameter with a concrete contract, defer to an L1-client integration PR. +3′. **Real `resolveFrontierVerificationView` implementation** — requires a per-chain VirtualNode oracle. Same deferral pattern as item 2′. +9. **Supernode class does not track `logsDB` / `denyList` as first-class fields.** Step 2c should add these (ghost or concrete), plus a `View() : SupernodeState` method bridging via `SupernodeView.BuildSupernodeState`. +10. **`VerifiedMapToSortedSeq` is body-less.** `SupernodeView.dfy` declares it abstract with three axiomatized algebraic lemmas (length, contains, ascending). Step 2c should provide a concrete recursive implementation (requires a `MinKey : set -> uint64` helper) and discharge those axioms. +11. **`InitialComponentsSatisfyInvariants` contains `assume` statements for I1/I3/I9.** Those invariants depend on opaque predicates / axiom functions (`LogsBelongToBlock`, `ExecMsgGraphAcyclic`, `DeriveL1`). Step 2c should either mark them `{:opaque}` with `reveal_*` helpers that make the vacuous-initial-state reasoning explicit, or restructure them to take the needed oracles as parameters. +12. **`WeakVerifiedInvariantImpliesStepOne` is axiomatized.** Once `VerifiedMapToSortedSeq` has a concrete body, this lemma should be provable rather than axiomatic. +13. **`interop.LogsDB` has no enumeration API.** The Go `StateView.LogsDBFor(chain)` implementation in `op-supernode/supernode/invariants_bridge.go` (Step 3c) cannot walk all sealed blocks without either (a) adding `EnumerateSealed(from, to)` to `op-supervisor/supervisor/backend/db/logs`, or (b) walking number-by-number from 0 via `FindSealedBlock` / `OpenBlock`. Option (a) is strictly better. LogsDB interface: `op-supernode/supernode/activity/interop/logdb.go:20-47`. LogsDB instances live in `Interop.logsDBs : map[ChainID]LogsDB` at `interop.go:83`. +14. **`chain_container.DenyList` has no enumeration API.** Only `IsDenied(height, hash)` point queries exist. `StateView.DenyListFor(chain)` in the Step 3c adapter needs a `ForEach(func(height, hash) bool)` method added to `chain_container/invalidation.go:23-28`. Trivial — iterate the bbolt bucket. Also note: `denyList` is a PRIVATE field on `simpleChainContainer` (`chain_container.go:93`), so the `ForEach` method must be promoted onto the `ChainContainer` interface at `chain_container.go:32-79`, or the adapter must type-assert to `*simpleChainContainer`. Interface promotion is cleaner. +15. **Supernode-side adapter not yet written.** The `StateView` interface is defined in `op-supernode/invariants/bridge.go`; an adapter implementing it for `*supernode.Supernode` needs to live in `op-supernode/supernode/invariants_bridge.go` (to access unexported fields). Blocked on items 13, 14, and 16. +16. **Activation timestamp is `*uint64`, VerifiedDB is inside the Interop activity.** The Step 3c adapter must: (a) dereference `sn.cfg.InteropActivationTimestamp` with a nil check (return 0 or sentinel if interop is not yet activated); (b) locate the `*interop.Interop` activity inside `sn.activities[]` via type assertion — there's no direct accessor. The activity holds both `verifiedDB` (`interop.go:82`) and the per-chain `logsDBs` map (`interop.go:83`), so one traversal yields both halves of the Snapshot. Consider adding a public `sn.InteropActivity() *interop.Interop` accessor on Supernode to avoid the type-assertion dance. +17. **Do NOT snapshot mid-interop round.** The subagent's lock-discipline analysis flags this as a correctness requirement: the VirtualNode is recreated mid-round, LogsDB tails can be speculative, and DenyList mutations are not atomic with LogsDB mutations. `SnapshotFrom` must be called at a stable checkpoint — specifically, after a completed `applyPendingTransition` cycle. The runtime assertion hook should therefore fire from `Supernode.progressAndRecord()` AFTER `applyPendingTransition` returns, not from inside mutating methods. `invariants.AssertWith(func() Snapshot { return SnapshotFrom(bridge) })` is the canonical call site. +18. **SuperRoot activity is NOT a source for `Verified[]`.** The subagent confirms `SuperRoot.atTimestamp` is a single-timestamp query, not a history accessor. The only path to the full `Verified[]` sequence is walking `interop.VerifiedDB.Get(ts)` from `ActivationTS` to `LastTimestamp()`. The adapter must perform that walk; cache-friendliness is a future optimization (a new `interop.VerifiedDB.Range(from, to) iter.Seq2[uint64, VerifiedResult]` would be ideal). + +19. **I3 `LogIdx` is not state-checkable in the current schema.** `BlockWithLogs` stores only executing messages. The state-local half of I3 in `env.go::initiatingMessageExists` therefore matches on `(chainID, blockNum, timestamp)` only and silently accepts mismatched `LogIdx`. To close this either (a) add `LogCount uint32` to `BlockWithLogs` and validate `m.LogIdx < target.LogCount`, or (b) materialize the full `Logs []Log` per block and check by content. Option (a) is sufficient and cheap. + +--- + +## 6. Change log + +- **Initial draft (Step 1)** — extracted verbatim from `overview.md` lines 36–105. No semantic changes to the prose spec; only structural reorganization into a numbered catalog. +- **Step 2** — added `dafny-models/SupernodeState.dfy` with `SupernodeState` datatype, predicates `I1_..I11_`, `IsInitialState`, and `I12_InitialStateSatisfiesInvariants`. Extended `Types.dfy` with `BlockWithLogs`, `DenyListEntry`, `IsParentOf`. Resolved §5 items 4–8. +- **Step 2b** — added `dafny-models/SupernodeView.dfy` with `BuildSupernodeState`, `ComponentsSatisfyInvariants`, abstract `VerifiedMapToSortedSeq`, and `InitialComponentsSatisfyInvariants` lemma. Replaced the three `{:axiom}`-ensured abstract methods in `Supernode.dfy` (`applyRewindPlan`, `sameL1Chain`, `resolveFrontierVerificationView`) with trivial no-op stubs and removed the `{:axiom}` tags. Installed Dafny 4.11.0 via Homebrew and ran `dafny verify` — after fixing 5 surfaced issues (split `VerifiedMapToSortedSeqContains` into forward/backward lemmas, strengthened `IsInitialState` with `Keys == Chains` constraints, added `t+1` overflow guard to `T4_Invalidate`, tagged 3 `assume` statements with `{:axiom}`), reached **66 verified, 0 errors**. Resolved §5 items 1–3. Added new follow-up items 1′, 2′, 3′, 9, 10, 11, 12. +- **Step 3** — added Go `invariants` package: `Snapshot` type mirroring `SupernodeState`, `CheckI2/I4/I5/I6/I7/I8/I10/I11` predicates returning structured `*Error` values with stable SPEC IDs, and `CheckAll` composition using `errors.Join`. 18 unit tests covering happy / failing paths for every predicate + I12 vacuous-initial-state test + joined-error composition test. +- **Step 3b** — added `env.go` with `Env` oracle interface and environmental predicates `CheckI1_LogsBelongToBlocks`, `CheckI3_ExecutingMessageValidity` (state-local initiating-message check + Tarjan cycle detection), `CheckI8_VerifiedL1Linear`, `CheckI9_MinimalL1Cover`; plus `CheckAllWithEnv` composition. Added `bridge.go` with `StateView` interface and `SnapshotFrom(view)` lifter, plus `StaticStateView` fake for tests. +- **Step 4** — added runtime assertion build tag. `runtime_on.go` (build tag `supernode_invariants`) exports `Assert(Snapshot)` / `AssertWith(func() Snapshot)` / `SetPanicOnFailure` / `SetFailureSink` / `AssertionFailureCount`. `runtime_off.go` provides zero-cost no-op equivalents for production builds. `AssertionsEnabled const` lets callers branch at compile time. +- **Step 5** — added `reference_model.go` with pure T3/T4/T5 transition implementations (`ApplyAdvance`, `ApplyInvalidate`, `ApplyRewind`) and `CloneSnapshot` helper. Added `FuzzReferenceModel` native Go fuzzer plus `TestReferenceModel_PropertySample`. Fuzz uncovered one precondition gap: `ApplyAdvance` now refuses to add blocks present in the DenyList (A4 enforcement). Fuzz capped at 512 ops per seed to bound memory. After fix: **2.8M executions over 30s, zero invariant violations**. +- **Step 6** — added `trace.go` with `Trace` JSON format (steps, origin, optional `ExpectedFailures` list), `LoadTrace` / `SaveTrace` / `VerifyTrace` / `LoadTraceCorpus`. `TestTraceCorpus` replays every `testdata/traces/*.json`. Three canonical traces (`happy_single_chain_3_advances`, `happy_invalidate_then_continue`, `happy_rewind_after_two_advances`) generated by `TestRegenCanonicalTraces` — hand-editing JSON is discouraged because `eth.ChainID` and `eth.BlockID` have finicky serialization rules. Added `just fuzz-invariants` and `just regen-traces` targets; `just verify-invariants` now runs Dafny + Go (both build tags) + fuzz as a single CI gate. diff --git a/op-supernode/invariants/bridge.go b/op-supernode/invariants/bridge.go new file mode 100644 index 00000000000..5ad8555e4e5 --- /dev/null +++ b/op-supernode/invariants/bridge.go @@ -0,0 +1,135 @@ +package invariants + +import ( + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +// StateView is the pull-based adapter interface between the live +// `supernode.Supernode` and this package's pure `Snapshot` type. +// +// Why an interface and not a concrete function that takes *Supernode: +// +// 1. The `invariants` package stays decoupled from `supernode` — no +// import cycle risk even if supernode grows to reference invariants +// via the runtime-assertion hook. +// 2. The adapter lives in `op-supernode/supernode/invariants_bridge.go` +// where it can read unexported fields (`sn.chains`, `sn.cfg`, and +// activity-registry internals). +// 3. Tests and fuzzers can supply a fake StateView without standing up a +// real Supernode — e.g. the reference model already exposes a +// Snapshot, and tests can wrap it with a trivial StateView. +// +// Implementations are responsible for lock discipline. Per the subagent's +// structural map of op-supernode as of Step 3b: +// +// - `sn.chains` is append-only at startup; no lock needed. +// - `interop.VerifiedDB` has an internal RWMutex; callers get a +// consistent read via `Get` / `LastTimestamp`. +// - `chain_container.DenyList` has an internal RWMutex. +// - `interop.LogsDB` wraps an `op-supervisor` logs DB with its own +// concurrency control. +// +// So long as each query method respects the relevant internal mutex, the +// snapshot produced by `SnapshotFrom(view)` represents a valid +// interleaving of the underlying state at the time of the call. +// +// TIMING CONSTRAINT (SPEC.md §5 item 17): SnapshotFrom MUST be called at a +// stable checkpoint, after a completed applyPendingTransition cycle. Do +// NOT call it from inside a mutating method — the VirtualNode can be +// mid-recreation, LogsDB tails can hold speculative blocks, and DenyList +// mutations are not atomic with LogsDB mutations. The canonical runtime +// hook is: +// +// // Inside Supernode.progressAndRecord, AFTER applyPendingTransition: +// invariants.AssertWith(func() invariants.Snapshot { +// return invariants.SnapshotFrom(s.stateView) +// }) +type StateView interface { + // ActivationTS returns t_0 from SPEC.md §0. + ActivationTS() uint64 + + // Chains returns the chain IDs the Supernode is tracking. The + // returned slice is owned by the caller; implementations should + // return a fresh copy. + Chains() []eth.ChainID + + // Verified returns the VerifiedDB contents sorted ascending by + // timestamp. This matches the Dafny VerifiedMapToSortedSeq shape. + Verified() []VerifiedEntry + + // LogsDBFor returns the LogsDB entries for a single chain, oldest + // first, as of the moment of the call. Returns nil for chains + // without any recorded entries. The returned slice is owned by the + // caller. + // + // NOTE: as of Step 3b, `interop.LogsDB` has no "enumerate all" + // method — only range-based queries (`FindSealedBlock`, `OpenBlock`, + // `Contains`). Implementations must walk from block 0 (or the + // latest-sealed back to genesis) using those methods. Adding a + // first-class `EnumerateSealed(from, to)` method to the op-supervisor + // logs DB is tracked as SPEC §5 item 13. + LogsDBFor(chain eth.ChainID) []BlockWithLogs + + // DenyListFor returns the DenyList entries for a single chain in + // insertion order. Returns nil if no blocks have been denied on that + // chain. The returned slice is owned by the caller. + // + // NOTE: `chain_container.DenyList` currently only exposes + // `IsDenied(height, hash)`, which is a point query. Enumeration + // requires adding a `ForEach` / `Enumerate` method that iterates the + // bbolt bucket. Tracked as SPEC §5 item 14. + DenyListFor(chain eth.ChainID) []DenyListEntry +} + +// SnapshotFrom assembles a pure Snapshot from a StateView. It is the +// single entry point every runtime / test / fuzz / replay consumer uses +// to get a snapshot for `CheckAll`. +// +// SnapshotFrom is resilient to nil slices and missing keys — the returned +// Snapshot is always well-formed (every chain in `Chains()` has an entry +// in LogsDB and DenyList, even if the entry is a nil slice). +func SnapshotFrom(v StateView) Snapshot { + if v == nil { + return Snapshot{} + } + chains := v.Chains() + s := Snapshot{ + ActivationTS: v.ActivationTS(), + Chains: append([]eth.ChainID{}, chains...), + LogsDB: make(map[eth.ChainID][]BlockWithLogs, len(chains)), + DenyList: make(map[eth.ChainID][]DenyListEntry, len(chains)), + Verified: v.Verified(), + } + for _, c := range chains { + s.LogsDB[c] = v.LogsDBFor(c) + s.DenyList[c] = v.DenyListFor(c) + } + return s +} + +// StaticStateView is a trivial StateView backed by an already-assembled +// Snapshot. Useful for tests and for replaying a serialized trace through +// the StateView machinery. +type StaticStateView struct { + S Snapshot +} + +func (v StaticStateView) ActivationTS() uint64 { return v.S.ActivationTS } +func (v StaticStateView) Chains() []eth.ChainID { return append([]eth.ChainID{}, v.S.Chains...) } + +// Verified deep-copies the Verified slice — including each entry's +// L2Heads map — so callers can mutate the result without aliasing the +// underlying snapshot. +func (v StaticStateView) Verified() []VerifiedEntry { + return cloneVerifiedSlice(v.S.Verified) +} + +// LogsDBFor deep-copies the per-chain LogsDB slice including each +// block's ExecMsgs slice. +func (v StaticStateView) LogsDBFor(c eth.ChainID) []BlockWithLogs { + return cloneBlockWithLogsSlice(v.S.LogsDB[c]) +} + +func (v StaticStateView) DenyListFor(c eth.ChainID) []DenyListEntry { + return append([]DenyListEntry{}, v.S.DenyList[c]...) +} diff --git a/op-supernode/invariants/bridge_test.go b/op-supernode/invariants/bridge_test.go new file mode 100644 index 00000000000..2fc5e5b26ba --- /dev/null +++ b/op-supernode/invariants/bridge_test.go @@ -0,0 +1,60 @@ +package invariants + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +func TestSnapshotFrom_RoundTrip(t *testing.T) { + // Build a non-trivial snapshot via the reference model. + original := NewInitialSnapshot(100, []eth.ChainID{chainA(), chainB()}) + original, err := ApplyAdvance(original, blockID(10, 1000), + map[eth.ChainID]BlockWithLogs{ + chainA(): block(1, 0, 1, 100), + chainB(): block(2, 0, 1, 100), + }) + if err != nil { + t.Fatal(err) + } + original, err = ApplyAdvance(original, blockID(11, 1001), + map[eth.ChainID]BlockWithLogs{ + chainA(): block(3, 1, 2, 101), + chainB(): block(4, 2, 2, 101), + }) + if err != nil { + t.Fatal(err) + } + + // Wrap in a StaticStateView and go through SnapshotFrom. + view := StaticStateView{S: original} + recovered := SnapshotFrom(view) + + // The recovered snapshot must still satisfy CheckAll. + if err := CheckAll(recovered); err != nil { + t.Fatalf("SnapshotFrom output violates invariants: %v", err) + } + + // And every structural field should match. + if recovered.ActivationTS != original.ActivationTS { + t.Fatalf("activation drift: %d vs %d", recovered.ActivationTS, original.ActivationTS) + } + if len(recovered.Chains) != len(original.Chains) { + t.Fatalf("chain count drift: %d vs %d", len(recovered.Chains), len(original.Chains)) + } + if len(recovered.Verified) != len(original.Verified) { + t.Fatalf("verified count drift: %d vs %d", len(recovered.Verified), len(original.Verified)) + } + for _, c := range original.Chains { + if len(recovered.LogsDB[c]) != len(original.LogsDB[c]) { + t.Fatalf("LogsDB[%s] drift: %d vs %d", c, len(recovered.LogsDB[c]), len(original.LogsDB[c])) + } + } +} + +func TestSnapshotFrom_Nil(t *testing.T) { + s := SnapshotFrom(nil) + if err := CheckAll(s); err != nil { + t.Fatalf("nil view should produce a valid empty snapshot: %v", err) + } +} diff --git a/op-supernode/invariants/doc.go b/op-supernode/invariants/doc.go new file mode 100644 index 00000000000..109785908c4 --- /dev/null +++ b/op-supernode/invariants/doc.go @@ -0,0 +1,21 @@ +// Package invariants is the Go counterpart of the Dafny models in +// op-supernode/dafny-models. It provides pure predicates that check the +// state invariants documented in op-supernode/invariants/SPEC.md. +// +// The package is organized around a single pure Snapshot type that mirrors +// the Dafny SupernodeState datatype. Every invariant check takes a Snapshot +// and returns an error identifying which invariant (by stable SPEC ID) +// failed. Checks are pure and have no hidden dependencies on the live +// Supernode — they can replay serialized traces from Dafny counter-examples, +// fuzz failures, or production logs through the same code path. +// +// Stable IDs (I1..I12, A1..A5, T1..T6) are the canonical reference for +// cross-layer bug triage. Every error message and every godoc comment that +// references an invariant MUST cite the ID so failures can be traced back +// to the specification. +// +// See: +// - op-supernode/invariants/SPEC.md (source of truth) +// - op-supernode/dafny-models/SupernodeState.dfy (Dafny mirror) +// - op-supernode/dafny-models/SupernodeView.dfy (bridge) +package invariants diff --git a/op-supernode/invariants/env.go b/op-supernode/invariants/env.go new file mode 100644 index 00000000000..e8c7d78a680 --- /dev/null +++ b/op-supernode/invariants/env.go @@ -0,0 +1,364 @@ +package invariants + +import ( + "errors" + "fmt" + + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +// Env is the environmental oracle required by I1, I3, I7 (second clause), +// I8 (ancestry clause), and I9. These invariants cannot be checked from a +// pure Snapshot because they reference facts about L1/L2 chains that are +// not stored in the state. +// +// An Env implementation may be: +// - A trusted live-chain adapter (production path, Step 3c). +// - A fake oracle seeded with known answers (tests, replay of a trace). +// - A strict "deny all" oracle that rejects every query (safety +// upper-bound: if CheckAllWithEnv passes under the strict oracle, +// every environmentally-dependent clause holds regardless of live +// chain state). +type Env interface { + // LogsBelongToBlock is SPEC I1 — checks that the supplied ExecMsgs + // are actually the logs emitted when executing `ref`. + // Returns nil if the logs belong, a non-nil error otherwise. + LogsBelongToBlock(ref BlockRef, execMsgs []ExecutingMessage) error + + // DeriveL1 is SPEC I9 — returns the L1 block from which the given + // L2 block was derived. Returns (zero, error) if the derivation is + // not known. + DeriveL1(l2 eth.BlockID) (eth.BlockID, error) + + // IsL1Ancestor is SPEC I8 (ancestry clause) — returns true iff + // `ancestor` appears on the same linear L1 chain as `descendant`, + // i.e. ancestor.Number <= descendant.Number AND walking parents from + // `descendant` reaches `ancestor`. + IsL1Ancestor(ancestor, descendant eth.BlockID) (bool, error) + + // HasHigherL2BlockAtOrBelow is SPEC I7 (environmental clause) — + // returns true iff the given L2 chain has a block with + // `timestamp <= ts` that is strictly higher than `head`. + HasHigherL2BlockAtOrBelow( + chain eth.ChainID, head eth.BlockID, ts uint64) (bool, error) +} + +// CheckAllWithEnv runs every invariant including the environmentally- +// dependent ones (I1, I3, I7-env, I8-env, I9). It composes CheckAll with +// the environmental predicates. Returns a joined error if any invariant +// failed. +// +// Note: I3 (executing message acyclicity) is partially state-local and +// partially environmental. The state-local half (each referenced +// initiating message exists in LogsDB at the expected position) is +// checked regardless of env; the acyclicity half is best-effort using +// Tarjan on the static snapshot graph. +func CheckAllWithEnv(s Snapshot, env Env) error { + errs := []error{CheckAll(s)} + if env != nil { + errs = append(errs, + CheckI1_LogsBelongToBlocks(s, env), + CheckI3_ExecutingMessageValidity(s, env), + CheckI7_NoHigherL2Block(s, env), + CheckI8_VerifiedL1Linear(s, env), + CheckI9_MinimalL1Cover(s, env), + ) + } else { + // Without an env, run only the state-local half of I3. + errs = append(errs, CheckI3_ExecutingMessageValidity(s, nil)) + } + return errors.Join(errs...) +} + +// ----------------------------------------------------------------------------- +// I1 — Logs belong to blocks (environmental). +// ----------------------------------------------------------------------------- + +// CheckI1_LogsBelongToBlocks delegates the logs-match-block check to the +// environmental oracle. See SPEC I1. +func CheckI1_LogsBelongToBlocks(s Snapshot, env Env) error { + if env == nil { + return nil + } + var errs []error + for chain, logs := range s.LogsDB { + for i, b := range logs { + if err := env.LogsBelongToBlock(b.Ref, b.ExecMsgs); err != nil { + errs = append(errs, newIndexedErr("I1", chain, i, + fmt.Sprintf("LogsBelongToBlock rejected: %v", err))) + } + } + } + return errors.Join(errs...) +} + +// ----------------------------------------------------------------------------- +// I3 — Executing message validity + acyclicity. +// +// State-local half: every ExecutingMessage references an initiating log +// that exists in the target chain's LogsDB at the named (BlockNum, +// Timestamp). No env required for this half. +// +// Environmental half: static acyclicity via Tarjan's SCC. A snapshot-only +// implementation is provided — env is not strictly required but reserved +// for future refinement. +// ----------------------------------------------------------------------------- + +func CheckI3_ExecutingMessageValidity(s Snapshot, env Env) error { + var errs []error + + // State-local half: initiating message existence. + for chain, logs := range s.LogsDB { + for i, b := range logs { + for k, m := range b.ExecMsgs { + if !initiatingMessageExists(s, m) { + errs = append(errs, newIndexedErr("I3", chain, i, + fmt.Sprintf("ExecMsg[%d] references %s:%d @ t=%d, not present in target LogsDB", + k, m.ChainID, m.BlockNum, m.Timestamp))) + } + } + } + } + + // Environmental / acyclicity half: Tarjan on the exec-msg graph. + // Nodes are (chain, blockNum). Edges go from an executing message to + // the initiating (chain, blockNum) it points at. + if cycleChain, cycleBlock, ok := findExecMsgCycle(s); ok { + errs = append(errs, newChainErr("I3", cycleChain, fmt.Sprintf( + "executing-message cycle involves chain=%s blockNum=%d", + cycleChain, cycleBlock))) + } + + _ = env // reserved for future env-enriched checks + return errors.Join(errs...) +} + +func initiatingMessageExists(s Snapshot, m ExecutingMessage) bool { + logs, ok := s.LogsDB[m.ChainID] + if !ok { + return false + } + for _, b := range logs { + if b.Ref.ID.Number == m.BlockNum && b.Ref.Time == m.Timestamp { + return true + } + } + return false +} + +// findExecMsgCycle runs an iterative DFS cycle detector over the static +// executing-message dependency graph. Iterative (rather than recursive) +// so a deep graph cannot blow the goroutine stack and panic the +// supernode under runtime assertions. Returns the first node observed on +// a back-edge if any cycle is found. Nodes are identified by +// (chain, blockNum). +func findExecMsgCycle(s Snapshot) (eth.ChainID, uint64, bool) { + type node struct { + chain eth.ChainID + num uint64 + } + const ( + white = 0 + gray = 1 + black = 2 + ) + color := make(map[node]int) + + // Build the adjacency list once. The lookup-by-number scan inside the + // per-call neighbors closure was O(n) per visit; this materializes it + // as O(total blocks) once. + adj := make(map[node][]node) + for chain, logs := range s.LogsDB { + for _, b := range logs { + n := node{chain: chain, num: b.Ref.ID.Number} + out := make([]node, 0, len(b.ExecMsgs)) + for _, m := range b.ExecMsgs { + out = append(out, node{chain: m.ChainID, num: m.BlockNum}) + } + adj[n] = out + } + } + + // frame is one entry on the explicit DFS stack. childIdx is the index + // of the next child to visit; when childIdx == len(adj[node]) the + // node is finished and gets colored black on pop. + type frame struct { + n node + childIdx int + } + + for chain, logs := range s.LogsDB { + for _, b := range logs { + start := node{chain: chain, num: b.Ref.ID.Number} + if color[start] != white { + continue + } + stack := []frame{{n: start}} + color[start] = gray + for len(stack) > 0 { + top := &stack[len(stack)-1] + children := adj[top.n] + if top.childIdx >= len(children) { + color[top.n] = black + stack = stack[:len(stack)-1] + continue + } + nx := children[top.childIdx] + top.childIdx++ + switch color[nx] { + case gray: + return nx.chain, nx.num, true + case white: + color[nx] = gray + stack = append(stack, frame{n: nx}) + } + } + } + } + return eth.ChainID{}, 0, false +} + +// ----------------------------------------------------------------------------- +// I7 — Environmental clause: no higher L2 block exists with timestamp <= ts. +// +// SPEC.md I7 second sentence: "All children of C^j_i have timestamp > i." +// The LogsDB-internal half is checked structurally by CheckI7_HighestBlockLeqTS +// in invariants.go. This env predicate handles the "live L2 may have a +// higher block we have not yet imported" case by asking the oracle. +// ----------------------------------------------------------------------------- + +// CheckI7_NoHigherL2Block delegates the "no descendant on the live L2 +// with timestamp <= verified timestamp other than C^j_i" clause to the +// environmental oracle. See SPEC I7 (HYBRID class) and SPEC §4. +func CheckI7_NoHigherL2Block(s Snapshot, env Env) error { + if env == nil { + return nil + } + var errs []error + for vi, v := range s.Verified { + for _, chain := range s.Chains { + head, ok := v.L2Heads[chain] + if !ok { + continue // structural CheckI7 already reports this + } + higher, err := env.HasHigherL2BlockAtOrBelow(chain, head, v.Timestamp) + if err != nil { + errs = append(errs, newIndexedErr("I7", chain, vi, fmt.Sprintf( + "env.HasHigherL2BlockAtOrBelow(%s, %s, %d) failed: %v", + chain, head, v.Timestamp, err))) + continue + } + if higher { + errs = append(errs, newIndexedErr("I7", chain, vi, fmt.Sprintf( + "live L2 has a block strictly higher than C^j_%d=%s with Time<=%d", + vi, head, v.Timestamp))) + } + } + } + return errors.Join(errs...) +} + +// ----------------------------------------------------------------------------- +// I8 — L1 linear-chain ancestry (environmental). +// ----------------------------------------------------------------------------- + +// CheckI8_VerifiedL1Linear walks consecutive Verified entries and asks the +// oracle whether the earlier L1 inclusion is an ancestor of the later one. +// This is the ancestry half of I8; the number-monotonicity half is covered +// by CheckI8_VerifiedL1Monotone in invariants.go. +func CheckI8_VerifiedL1Linear(s Snapshot, env Env) error { + if env == nil { + return nil + } + var errs []error + for i := 0; i+1 < len(s.Verified); i++ { + prev := s.Verified[i].L1Inclusion + nxt := s.Verified[i+1].L1Inclusion + if prev == nxt { + continue + } + ok, err := env.IsL1Ancestor(prev, nxt) + if err != nil { + errs = append(errs, newErr("I8", fmt.Sprintf( + "env.IsL1Ancestor(%s, %s) failed: %v", prev, nxt, err))) + continue + } + if !ok { + errs = append(errs, newErr("I8", fmt.Sprintf( + "L1 head %s is not an ancestor of %s", prev, nxt))) + } + } + return errors.Join(errs...) +} + +// ----------------------------------------------------------------------------- +// I9 — Minimal L1 cover. +// ----------------------------------------------------------------------------- + +// CheckI9_MinimalL1Cover uses env.DeriveL1 to verify that the recorded +// L1 inclusion for every Verified entry is the maximum of the DeriveL1 +// results for its L2 heads. +// +// "Maximum" is checked by full BlockID equality (not just .Number) so +// that two distinct L1 blocks at the same height after a reorg do not +// silently pass. Additionally, every per-chain DeriveL1(C^j) must be an +// ancestor of v.L1Inclusion (or equal), validating the "minimal L1 +// covering all heads" half via env.IsL1Ancestor. +func CheckI9_MinimalL1Cover(s Snapshot, env Env) error { + if env == nil { + return nil + } + var errs []error + for i, v := range s.Verified { + var maxL1 eth.BlockID + var maxSeen bool + // Pass 1: find the max-by-number derive among all L2 heads, and + // verify each derive is an ancestor of v.L1Inclusion. + for _, chain := range s.Chains { + head, ok := v.L2Heads[chain] + if !ok { + errs = append(errs, newErr("I9", fmt.Sprintf( + "Verified[%d] missing L2Head for chain %s", i, chain))) + continue + } + l1, err := env.DeriveL1(head) + if err != nil { + errs = append(errs, newErr("I9", fmt.Sprintf( + "Verified[%d] env.DeriveL1(%s) failed: %v", i, head, err))) + continue + } + if !maxSeen || l1.Number > maxL1.Number { + maxL1 = l1 + maxSeen = true + } + // Ancestry check: deriveL1(C^j) must lie on the same linear + // chain as v.L1Inclusion (either equal, or an ancestor). + if l1 != v.L1Inclusion { + ok, ancErr := env.IsL1Ancestor(l1, v.L1Inclusion) + if ancErr != nil { + errs = append(errs, newErr("I9", fmt.Sprintf( + "Verified[%d] env.IsL1Ancestor(%s, %s) failed: %v", + i, l1, v.L1Inclusion, ancErr))) + continue + } + if !ok { + errs = append(errs, newErr("I9", fmt.Sprintf( + "Verified[%d] DeriveL1(C^j on %s)=%s is not an ancestor of L1Inclusion=%s", + i, chain, l1, v.L1Inclusion))) + } + } + } + if !maxSeen { + continue + } + // Pass 2: the recorded L1Inclusion must equal the max DeriveL1 + // (by full BlockID, not just Number) so that two distinct L1 + // blocks at the same height after a reorg are not conflated. + if v.L1Inclusion != maxL1 { + errs = append(errs, newErr("I9", fmt.Sprintf( + "Verified[%d].L1Inclusion=%s != max_j(DeriveL1(C^j))=%s", + i, v.L1Inclusion, maxL1))) + } + } + return errors.Join(errs...) +} diff --git a/op-supernode/invariants/env_test.go b/op-supernode/invariants/env_test.go new file mode 100644 index 00000000000..ea7cf9cbbe4 --- /dev/null +++ b/op-supernode/invariants/env_test.go @@ -0,0 +1,369 @@ +package invariants + +import ( + "errors" + "fmt" + "strings" + "testing" + + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +// fakeEnv is a stub Env implementation that answers queries from two +// preloaded maps. It's the default fixture for I1/I8/I9 tests. +type fakeEnv struct { + logsAccepted map[eth.BlockID]bool + derive map[eth.BlockID]eth.BlockID + ancestor map[[2]eth.BlockID]bool + higherL2 map[eth.ChainID]map[uint64]bool +} + +func newFakeEnv() *fakeEnv { + return &fakeEnv{ + logsAccepted: map[eth.BlockID]bool{}, + derive: map[eth.BlockID]eth.BlockID{}, + ancestor: map[[2]eth.BlockID]bool{}, + higherL2: map[eth.ChainID]map[uint64]bool{}, + } +} + +func (f *fakeEnv) LogsBelongToBlock(ref BlockRef, _ []ExecutingMessage) error { + if f.logsAccepted[ref.ID] { + return nil + } + return fmt.Errorf("logs for %s not approved by fake env", ref.ID) +} + +func (f *fakeEnv) DeriveL1(l2 eth.BlockID) (eth.BlockID, error) { + l1, ok := f.derive[l2] + if !ok { + return eth.BlockID{}, fmt.Errorf("no derive mapping for %s", l2) + } + return l1, nil +} + +func (f *fakeEnv) IsL1Ancestor(a, d eth.BlockID) (bool, error) { + return f.ancestor[[2]eth.BlockID{a, d}], nil +} + +func (f *fakeEnv) HasHigherL2BlockAtOrBelow(c eth.ChainID, _ eth.BlockID, ts uint64) (bool, error) { + if m, ok := f.higherL2[c]; ok { + return m[ts], nil + } + return false, nil +} + +// ----------------------------------------------------------------------------- +// I1 — logs-match-block env check. +// ----------------------------------------------------------------------------- + +func TestCheckI1_OK(t *testing.T) { + s := Snapshot{ + Chains: []eth.ChainID{chainA()}, + LogsDB: map[eth.ChainID][]BlockWithLogs{ + chainA(): {block(1, 0, 1, 100)}, + }, + } + env := newFakeEnv() + env.logsAccepted[blockID(1, 1)] = true + if err := CheckI1_LogsBelongToBlocks(s, env); err != nil { + t.Fatalf("I1 should pass: %v", err) + } +} + +func TestCheckI1_Reject(t *testing.T) { + s := Snapshot{ + Chains: []eth.ChainID{chainA()}, + LogsDB: map[eth.ChainID][]BlockWithLogs{ + chainA(): {block(1, 0, 1, 100)}, + }, + } + env := newFakeEnv() // empty — rejects every block + err := CheckI1_LogsBelongToBlocks(s, env) + if err == nil || !strings.Contains(err.Error(), "[I1]") { + t.Fatalf("expected I1 failure, got: %v", err) + } +} + +// ----------------------------------------------------------------------------- +// I3 — executing-message validity (state-local + cycle detection). +// ----------------------------------------------------------------------------- + +func TestCheckI3_NoMessages_OK(t *testing.T) { + s := Snapshot{ + Chains: []eth.ChainID{chainA()}, + LogsDB: map[eth.ChainID][]BlockWithLogs{ + chainA(): {block(1, 0, 1, 100)}, + }, + } + if err := CheckI3_ExecutingMessageValidity(s, nil); err != nil { + t.Fatalf("I3 should pass with no messages: %v", err) + } +} + +func TestCheckI3_MissingInitiator(t *testing.T) { + a := BlockWithLogs{ + Ref: BlockRef{ID: blockID(1, 1), Time: 100}, + ExecMsgs: []ExecutingMessage{{ + ChainID: chainB(), + BlockNum: 999, // not in chainB's LogsDB + LogIdx: 0, + Timestamp: 100, + }}, + } + s := Snapshot{ + Chains: []eth.ChainID{chainA(), chainB()}, + LogsDB: map[eth.ChainID][]BlockWithLogs{ + chainA(): {a}, + chainB(): {}, + }, + } + err := CheckI3_ExecutingMessageValidity(s, nil) + if err == nil || !strings.Contains(err.Error(), "[I3]") { + t.Fatalf("expected I3 failure, got: %v", err) + } +} + +func TestCheckI3_Cycle(t *testing.T) { + // Two chains, each with one block, and each references the other's + // block at the same timestamp — a 2-cycle. + a := BlockWithLogs{ + Ref: BlockRef{ID: blockID(1, 1), Time: 100}, + ExecMsgs: []ExecutingMessage{{ + ChainID: chainB(), BlockNum: 1, LogIdx: 0, Timestamp: 100, + }}, + } + b := BlockWithLogs{ + Ref: BlockRef{ID: blockID(2, 1), Time: 100}, + ExecMsgs: []ExecutingMessage{{ + ChainID: chainA(), BlockNum: 1, LogIdx: 0, Timestamp: 100, + }}, + } + s := Snapshot{ + Chains: []eth.ChainID{chainA(), chainB()}, + LogsDB: map[eth.ChainID][]BlockWithLogs{ + chainA(): {a}, + chainB(): {b}, + }, + } + err := CheckI3_ExecutingMessageValidity(s, nil) + if err == nil || !strings.Contains(err.Error(), "cycle") { + t.Fatalf("expected cycle detection, got: %v", err) + } +} + +// ----------------------------------------------------------------------------- +// I8 — L1 ancestry. +// ----------------------------------------------------------------------------- + +func TestCheckI8Linear_OK(t *testing.T) { + s := Snapshot{ + Verified: []VerifiedEntry{ + {L1Inclusion: blockID(10, 1000)}, + {L1Inclusion: blockID(11, 1001)}, + }, + } + env := newFakeEnv() + env.ancestor[[2]eth.BlockID{blockID(10, 1000), blockID(11, 1001)}] = true + if err := CheckI8_VerifiedL1Linear(s, env); err != nil { + t.Fatalf("I8 should pass: %v", err) + } +} + +func TestCheckI8Linear_NotAncestor(t *testing.T) { + s := Snapshot{ + Verified: []VerifiedEntry{ + {L1Inclusion: blockID(10, 1000)}, + {L1Inclusion: blockID(11, 1001)}, + }, + } + env := newFakeEnv() // ancestor map is empty => returns false + err := CheckI8_VerifiedL1Linear(s, env) + if err == nil || !strings.Contains(err.Error(), "[I8]") { + t.Fatalf("expected I8 ancestor failure, got: %v", err) + } +} + +// ----------------------------------------------------------------------------- +// I9 — minimal L1 cover. +// ----------------------------------------------------------------------------- + +func TestCheckI9_Cover_OK(t *testing.T) { + s := Snapshot{ + Chains: []eth.ChainID{chainA(), chainB()}, + Verified: []VerifiedEntry{{ + Timestamp: 100, + L1Inclusion: blockID(11, 1001), + L2Heads: map[eth.ChainID]eth.BlockID{ + chainA(): blockID(1, 1), + chainB(): blockID(2, 2), + }, + }}, + } + env := newFakeEnv() + env.derive[blockID(1, 1)] = blockID(10, 1000) + env.derive[blockID(2, 2)] = blockID(11, 1001) // higher number; becomes the max + // I9 now requires every per-chain DeriveL1 to be an ancestor (or + // equal to) the recorded L1Inclusion. blockID(10,1000) must be an + // ancestor of blockID(11,1001). + env.ancestor[[2]eth.BlockID{blockID(10, 1000), blockID(11, 1001)}] = true + if err := CheckI9_MinimalL1Cover(s, env); err != nil { + t.Fatalf("I9 should pass: %v", err) + } +} + +func TestCheckI9_Cover_WrongMax(t *testing.T) { + s := Snapshot{ + Chains: []eth.ChainID{chainA(), chainB()}, + Verified: []VerifiedEntry{{ + L1Inclusion: blockID(10, 1000), // claims 1000 + L2Heads: map[eth.ChainID]eth.BlockID{ + chainA(): blockID(1, 1), + chainB(): blockID(2, 2), + }, + }}, + } + env := newFakeEnv() + env.derive[blockID(1, 1)] = blockID(10, 1000) + env.derive[blockID(2, 2)] = blockID(11, 1001) // but chain B derives from 1001 + err := CheckI9_MinimalL1Cover(s, env) + if err == nil || !strings.Contains(err.Error(), "[I9]") { + t.Fatalf("expected I9 max-mismatch, got: %v", err) + } +} + +// TestCheckI9_DifferentBlockSameNumber regression-tests the strengthened +// I9 check: a recorded L1Inclusion that has the same .Number as the max +// DeriveL1 but a different hash (post-reorg) must NOT pass I9. +func TestCheckI9_DifferentBlockSameNumber(t *testing.T) { + s := Snapshot{ + Chains: []eth.ChainID{chainA()}, + Verified: []VerifiedEntry{{ + Timestamp: 100, + L1Inclusion: blockID(0xAA, 1001), // hash-byte 0xAA at height 1001 + L2Heads: map[eth.ChainID]eth.BlockID{ + chainA(): blockID(1, 1), + }, + }}, + } + env := newFakeEnv() + // DeriveL1 returns a sibling at the same height with a different hash. + env.derive[blockID(1, 1)] = blockID(0xBB, 1001) + err := CheckI9_MinimalL1Cover(s, env) + if err == nil || !strings.Contains(err.Error(), "[I9]") { + t.Fatalf("expected I9 hash-mismatch failure, got: %v", err) + } +} + +// TestCheckI9_DeriveNotAncestor exercises the new ancestry clause: a +// per-chain DeriveL1 that is not an ancestor of L1Inclusion must fail. +func TestCheckI9_DeriveNotAncestor(t *testing.T) { + s := Snapshot{ + Chains: []eth.ChainID{chainA(), chainB()}, + Verified: []VerifiedEntry{{ + L1Inclusion: blockID(11, 1001), + L2Heads: map[eth.ChainID]eth.BlockID{ + chainA(): blockID(1, 1), + chainB(): blockID(2, 2), + }, + }}, + } + env := newFakeEnv() + env.derive[blockID(1, 1)] = blockID(10, 1000) + env.derive[blockID(2, 2)] = blockID(11, 1001) + // blockID(10,1000) is NOT marked as an ancestor of blockID(11,1001). + err := CheckI9_MinimalL1Cover(s, env) + if err == nil || !strings.Contains(err.Error(), "ancestor") { + t.Fatalf("expected ancestry failure, got: %v", err) + } +} + +// TestCheckI7_NoHigherL2Block_OK exercises the new env clause for I7 +// when the oracle says no higher block exists. +func TestCheckI7_NoHigherL2Block_OK(t *testing.T) { + s := Snapshot{ + Chains: []eth.ChainID{chainA()}, + Verified: []VerifiedEntry{{ + Timestamp: 100, + L2Heads: map[eth.ChainID]eth.BlockID{chainA(): blockID(1, 1)}, + }}, + } + env := newFakeEnv() // higherL2 map empty -> false + if err := CheckI7_NoHigherL2Block(s, env); err != nil { + t.Fatalf("I7 env clause should pass: %v", err) + } +} + +// TestCheckI7_NoHigherL2Block_Fail exercises the failure path: oracle +// reports a higher live L2 block that should have been imported. +func TestCheckI7_NoHigherL2Block_Fail(t *testing.T) { + s := Snapshot{ + Chains: []eth.ChainID{chainA()}, + Verified: []VerifiedEntry{{ + Timestamp: 100, + L2Heads: map[eth.ChainID]eth.BlockID{chainA(): blockID(1, 1)}, + }}, + } + env := newFakeEnv() + env.higherL2[chainA()] = map[uint64]bool{100: true} + err := CheckI7_NoHigherL2Block(s, env) + if err == nil || !strings.Contains(err.Error(), "[I7]") { + t.Fatalf("expected I7 env failure, got: %v", err) + } +} + +// ----------------------------------------------------------------------------- +// CheckAllWithEnv composition. +// ----------------------------------------------------------------------------- + +func TestCheckAllWithEnv_NilEnv(t *testing.T) { + s := NewInitialSnapshot(100, []eth.ChainID{chainA()}) + if err := CheckAllWithEnv(s, nil); err != nil { + t.Fatalf("initial state with nil env should pass: %v", err) + } +} + +func TestCheckAllWithEnv_CompositeFailure(t *testing.T) { + // Build a state that violates I2 AND I1 simultaneously. + s := Snapshot{ + Chains: []eth.ChainID{chainA()}, + LogsDB: map[eth.ChainID][]BlockWithLogs{ + chainA(): { + block(1, 0, 1, 100), + block(2, 99, 2, 101), // I2 parent-hash violation + }, + }, + } + env := newFakeEnv() // rejects everything => I1 violation + err := CheckAllWithEnv(s, env) + if err == nil { + t.Fatal("expected compound failure") + } + var ie *Error + foundI1, foundI2 := false, false + type multi interface{ Unwrap() []error } + var walk func(error) + walk = func(e error) { + if e == nil { + return + } + if errors.As(e, &ie) { + switch ie.ID { + case "I1": + foundI1 = true + case "I2": + foundI2 = true + } + ie = nil + } + if m, ok := e.(multi); ok { + for _, c := range m.Unwrap() { + walk(c) + } + } + } + walk(err) + if !foundI1 || !foundI2 { + t.Fatalf("expected both I1 and I2 in joined error, got: %v", err) + } +} diff --git a/op-supernode/invariants/fuzz_test.go b/op-supernode/invariants/fuzz_test.go new file mode 100644 index 00000000000..15c88e085c3 --- /dev/null +++ b/op-supernode/invariants/fuzz_test.go @@ -0,0 +1,376 @@ +package invariants + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +// FuzzReferenceModel drives the reference model (T3/T4/T5) with a random +// sequence of operations decoded from fuzz bytes. After every successful +// transition the snapshot must satisfy CheckAll. A failing seed indicates +// either (a) a reference-model bug, (b) a predicate bug, or (c) a SPEC +// inconsistency — any of which is a real finding. +// +// Run with: +// go test -run=^$ -fuzz=FuzzReferenceModel -fuzztime=30s ./invariants/ +// +// The byte stream is decoded as a sequence of ops, each framed as: +// opByte: 0 -> Advance(new hash byte, has-move) +// 1 -> Invalidate(hash byte) +// 2 -> Rewind +// 3..255 -> no-op (skip) +// Any out-of-bound consumption ends the sequence. +func FuzzReferenceModel(f *testing.F) { + // Seeds chosen to exercise T5, T4, T3, and mixed sequences. + f.Add([]byte{0x00, 0x01}) // single advance + f.Add([]byte{0x00, 0x01, 0x00, 0x02, 0x00, 0x03}) // three advances + f.Add([]byte{0x00, 0x01, 0x00, 0x02, 0x02}) // advance x2, rewind + f.Add([]byte{0x00, 0x01, 0x01, 0x99, 0x02}) // advance, invalidate, rewind + f.Add([]byte{0x00, 0x01, 0x00, 0x02, 0x01, 0x99, 0x00, 0x03}) + + f.Fuzz(func(t *testing.T, ops []byte) { + // Cap the number of operations processed to keep per-seed memory + // and runtime bounded. Without this, a pathological input of 10k+ + // Advance bytes can OOM the fuzz worker before any assertion is + // evaluated. + const maxOps = 512 + if len(ops) > maxOps*2 { + ops = ops[:maxOps*2] + } + + s := NewInitialSnapshot(100, []eth.ChainID{chainA()}) + + // Track a monotonic block-number counter so Advance always produces + // a properly-chained successor. The "new hash byte" from the fuzz + // input is the identity of the new block; the parent is whatever + // the current LogsDB tail is. + var nextNum uint64 = 1 + var currentTime uint64 = 100 + prevHashByte := byte(0) // genesis parent hash + + i := 0 + applied := 0 + for i < len(ops) && applied < maxOps { + applied++ + op := ops[i] + i++ + switch op { + case 0: // Advance + if i >= len(ops) { + return + } + newHashByte := ops[i] + i++ + + newL1 := blockID(byte(nextNum%255), 1000+nextNum) + newBlock := block(newHashByte, prevHashByte, nextNum, currentTime) + + next, err := ApplyAdvance(s, newL1, + map[eth.ChainID]BlockWithLogs{chainA(): newBlock}) + if err != nil { + // Overflow or degenerate input; stop exploration. + return + } + if inv := CheckAll(next); inv != nil { + t.Fatalf("invariants violated after advance hash=%d: %v", newHashByte, inv) + } + s = next + nextNum++ + currentTime++ + prevHashByte = newHashByte + + case 1: // Invalidate + if i >= len(ops) { + return + } + invalidHashByte := ops[i] + i++ + if len(s.Verified) == 0 { + continue // invalid; would error + } + next, err := ApplyInvalidate(s, map[eth.ChainID]eth.BlockID{ + chainA(): blockID(invalidHashByte, nextNum), + }) + if err != nil { + return + } + if inv := CheckAll(next); inv != nil { + t.Fatalf("invariants violated after invalidate hash=%d: %v", invalidHashByte, inv) + } + s = next + + case 2: // Rewind + if len(s.Verified) == 0 { + continue + } + next, err := ApplyRewind(s) + if err != nil { + return + } + if inv := CheckAll(next); inv != nil { + t.Fatalf("invariants violated after rewind: %v", inv) + } + // After rewind, the LogsDB tail may have been popped. + // Re-sync the fuzzer's local counters. + logs := next.LogsDB[chainA()] + if len(logs) == 0 { + nextNum = 1 + currentTime = 100 + prevHashByte = 0 + } else { + tail := logs[len(logs)-1] + nextNum = tail.Ref.ID.Number + 1 + currentTime = tail.Ref.Time + 1 + // Recover prevHashByte from the tail ID (last byte). + prevHashByte = tail.Ref.ID.Hash[31] + } + s = next + + default: + // skip / no-op + } + } + }) +} + +// FuzzReferenceModelMultiChain drives the reference model with TWO L2 +// chains advancing in lockstep, with the second chain's blocks carrying +// cross-chain executing messages that reference the first chain's most +// recent block. Exercises: +// +// - I3 state-local (initiating-message presence on a different chain) +// - I3 acyclicity (the iterative DFS over the cross-chain exec graph) +// - I9 (LogsDB shape across multiple chains; max-derive plumbing) +// - I6 (per-chain monotone heads while another chain is also moving) +// +// The single-chain fuzzer above caps these paths at zero; this one +// closes that gap. Operations are framed identically (op byte + arg +// byte) so the corpora are independent. +func FuzzReferenceModelMultiChain(f *testing.F) { + f.Add([]byte{0x00, 0x01}) + f.Add([]byte{0x00, 0x01, 0x00, 0x02, 0x00, 0x03}) + f.Add([]byte{0x00, 0x01, 0x00, 0x02, 0x02}) + f.Add([]byte{0x00, 0x01, 0x01, 0x99, 0x02}) + f.Add([]byte{0x00, 0x01, 0x00, 0x02, 0x01, 0x99, 0x00, 0x03}) + + f.Fuzz(func(t *testing.T, ops []byte) { + const maxOps = 256 // halved vs single-chain: 2x state per op + if len(ops) > maxOps*2 { + ops = ops[:maxOps*2] + } + + chains := []eth.ChainID{chainA(), chainB()} + s := NewInitialSnapshot(100, chains) + + // Per-chain counters. Both chains advance together at each + // Advance op so timestamps stay aligned. + var nextNum uint64 = 1 + var currentTime uint64 = 100 + // Use distinct hash-byte spaces per chain so blocks never alias. + // Chain A uses (newHashByte | 0x00); chain B uses (0x80). + const chainBHashBit = 0x80 + prevHashByteA := byte(0) + prevHashByteB := byte(0) + + i := 0 + applied := 0 + for i < len(ops) && applied < maxOps { + applied++ + op := ops[i] + i++ + switch op { + case 0: // Advance both chains in lockstep + if i >= len(ops) { + return + } + newHashByte := ops[i] + i++ + + newL1 := blockID(byte(nextNum%127)+1, 1000+nextNum) + + blockA := block(newHashByte&0x7f, prevHashByteA, nextNum, currentTime) + blockB := block(newHashByte|chainBHashBit, prevHashByteB, nextNum, currentTime) + + // Cross-chain executing message: chain B's new block + // references chain A's PREVIOUS block (strictly older + // timestamp). Skipped on the very first advance because + // chain A has no prior block to point at. + if nextNum > 1 { + blockB.ExecMsgs = []ExecutingMessage{{ + ChainID: chainA(), + BlockNum: nextNum - 1, + LogIdx: 0, + Timestamp: currentTime - 1, + }} + } + + next, err := ApplyAdvance(s, newL1, + map[eth.ChainID]BlockWithLogs{ + chainA(): blockA, + chainB(): blockB, + }) + if err != nil { + return + } + if inv := CheckAll(next); inv != nil { + t.Fatalf("invariants violated after multichain advance hash=%d: %v", newHashByte, inv) + } + // State-local I3 must hold given the cross-chain exec + // message just inserted. + if i3 := CheckI3_ExecutingMessageValidity(next, nil); i3 != nil { + t.Fatalf("I3 violated after multichain advance: %v", i3) + } + s = next + nextNum++ + currentTime++ + prevHashByteA = newHashByte & 0x7f + prevHashByteB = newHashByte | chainBHashBit + + case 1: // Invalidate chain B's current block + if i >= len(ops) { + return + } + invalidHashByte := ops[i] + i++ + if len(s.Verified) == 0 { + continue + } + next, err := ApplyInvalidate(s, map[eth.ChainID]eth.BlockID{ + chainB(): blockID(invalidHashByte|chainBHashBit, nextNum), + }) + if err != nil { + return + } + if inv := CheckAll(next); inv != nil { + t.Fatalf("invariants violated after multichain invalidate hash=%d: %v", invalidHashByte, inv) + } + s = next + + case 2: // Rewind + if len(s.Verified) == 0 { + continue + } + next, err := ApplyRewind(s) + if err != nil { + return + } + if inv := CheckAll(next); inv != nil { + t.Fatalf("invariants violated after multichain rewind: %v", inv) + } + // Re-sync local counters from chain A's tail (both + // chains advance in lockstep so their lengths match). + logs := next.LogsDB[chainA()] + if len(logs) == 0 { + nextNum = 1 + currentTime = 100 + prevHashByteA = 0 + prevHashByteB = 0 + } else { + tailA := logs[len(logs)-1] + nextNum = tailA.Ref.ID.Number + 1 + currentTime = tailA.Ref.Time + 1 + prevHashByteA = tailA.Ref.ID.Hash[31] + if logsB := next.LogsDB[chainB()]; len(logsB) > 0 { + prevHashByteB = logsB[len(logsB)-1].Ref.ID.Hash[31] + } else { + prevHashByteB = 0 + } + } + s = next + + default: + // skip + } + } + }) +} + +// Property test: seeded RNG equivalent that runs a small deterministic +// sample without relying on Go's fuzz infrastructure. Useful in CI that +// doesn't run -fuzz. +func TestReferenceModel_PropertySample(t *testing.T) { + seeds := [][]byte{ + {0x00, 0x01}, + {0x00, 0x01, 0x00, 0x02, 0x00, 0x03, 0x00, 0x04}, + {0x00, 0x01, 0x00, 0x02, 0x02}, + {0x00, 0x01, 0x01, 0x99, 0x02}, + {0x00, 0x01, 0x00, 0x02, 0x01, 0x99, 0x00, 0x03}, + {0x00, 0x01, 0x00, 0x02, 0x02, 0x00, 0x03, 0x02, 0x00, 0x04}, + } + for si, seed := range seeds { + si := si + seed := seed + t.Run("", func(t *testing.T) { + s := NewInitialSnapshot(100, []eth.ChainID{chainA()}) + var nextNum uint64 = 1 + var currentTime uint64 = 100 + prevHashByte := byte(0) + for i := 0; i < len(seed); { + op := seed[i] + i++ + switch op { + case 0: + if i >= len(seed) { + break + } + newHashByte := seed[i] + i++ + newL1 := blockID(byte(nextNum%255), 1000+nextNum) + newBlock := block(newHashByte, prevHashByte, nextNum, currentTime) + next, err := ApplyAdvance(s, newL1, + map[eth.ChainID]BlockWithLogs{chainA(): newBlock}) + if err != nil { + return + } + if inv := CheckAll(next); inv != nil { + t.Fatalf("seed=%d: %v", si, inv) + } + s = next + nextNum++ + currentTime++ + prevHashByte = newHashByte + case 1: + if i >= len(seed) || len(s.Verified) == 0 { + break + } + invalidHashByte := seed[i] + i++ + next, err := ApplyInvalidate(s, map[eth.ChainID]eth.BlockID{ + chainA(): blockID(invalidHashByte, nextNum), + }) + if err != nil { + return + } + if inv := CheckAll(next); inv != nil { + t.Fatalf("seed=%d: %v", si, inv) + } + s = next + case 2: + if len(s.Verified) == 0 { + break + } + next, err := ApplyRewind(s) + if err != nil { + return + } + if inv := CheckAll(next); inv != nil { + t.Fatalf("seed=%d: %v", si, inv) + } + logs := next.LogsDB[chainA()] + if len(logs) == 0 { + nextNum = 1 + currentTime = 100 + prevHashByte = 0 + } else { + tail := logs[len(logs)-1] + nextNum = tail.Ref.ID.Number + 1 + currentTime = tail.Ref.Time + 1 + prevHashByte = tail.Ref.ID.Hash[31] + } + s = next + } + } + }) + } +} diff --git a/op-supernode/invariants/invariants.go b/op-supernode/invariants/invariants.go new file mode 100644 index 00000000000..0d5a8671da5 --- /dev/null +++ b/op-supernode/invariants/invariants.go @@ -0,0 +1,339 @@ +package invariants + +import ( + "errors" + "fmt" + + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +// Error is the structured failure type returned by every CheckI* predicate. +// The SPEC ID (e.g. "I2") is the stable identifier that cross-references +// op-supernode/invariants/SPEC.md and dafny-models/SupernodeState.dfy. +type Error struct { + ID string // SPEC ID, e.g. "I2", "I10", "T3" + Chain *eth.ChainID + Index *int + Message string +} + +func (e *Error) Error() string { + prefix := "[" + e.ID + "] " + if e.Chain != nil { + prefix += fmt.Sprintf("chain=%s ", e.Chain.String()) + } + if e.Index != nil { + prefix += fmt.Sprintf("idx=%d ", *e.Index) + } + return prefix + e.Message +} + +func newErr(id, msg string) *Error { + return &Error{ID: id, Message: msg} +} + +func newChainErr(id string, chain eth.ChainID, msg string) *Error { + return &Error{ID: id, Chain: &chain, Message: msg} +} + +func newIndexedErr(id string, chain eth.ChainID, idx int, msg string) *Error { + return &Error{ID: id, Chain: &chain, Index: &idx, Message: msg} +} + +// ----------------------------------------------------------------------------- +// CheckAll — the top-level predicate. Mirrors Dafny's AllInvariants. +// ----------------------------------------------------------------------------- + +// CheckAll runs every applicable invariant on the snapshot and returns a +// joined error if any fail. A nil return means every structural invariant +// held. Environmental invariants (A1..A5) and hybrid-clause halves that +// require live L1/L2 queries are NOT checked here — see CheckAllWithEnv. +func CheckAll(s Snapshot) error { + checks := []func(Snapshot) error{ + CheckI2_LogsDBLinear, + CheckI4_VerifiedAnchor, + CheckI5_HeadMatchesTail, + CheckI6_MonotoneL2Heads, + CheckI7_HighestBlockLeqTS, + CheckI8_VerifiedL1Monotone, + CheckI10_LogsDBDisjointFromDenyList, + CheckI11_DenyListBounded, + } + var errs []error + for _, c := range checks { + if err := c(s); err != nil { + errs = append(errs, err) + } + } + return errors.Join(errs...) +} + +// ----------------------------------------------------------------------------- +// I2 — LogsDB linearity. +// SPEC.md I2 / overview.md line 65: B^j_i is the parent of B^j_{i+1}. +// ----------------------------------------------------------------------------- +func CheckI2_LogsDBLinear(s Snapshot) error { + var errs []error + for chain, logs := range s.LogsDB { + for i := 0; i+1 < len(logs); i++ { + cur := logs[i] + nxt := logs[i+1] + if nxt.Ref.ParentHash != cur.Ref.ID.Hash { + errs = append(errs, newIndexedErr("I2", chain, i, + fmt.Sprintf("LogsDB[%s][%d+1].ParentHash=%s != LogsDB[%s][%d].Hash=%s", + chain, i, nxt.Ref.ParentHash, chain, i, cur.Ref.ID.Hash))) + continue + } + if nxt.Ref.ID.Number != cur.Ref.ID.Number+1 { + errs = append(errs, newIndexedErr("I2", chain, i, + fmt.Sprintf("LogsDB[%s][%d+1].Number=%d != LogsDB[%s][%d].Number+1=%d", + chain, i, nxt.Ref.ID.Number, chain, i, cur.Ref.ID.Number+1))) + } + } + } + return errors.Join(errs...) +} + +// ----------------------------------------------------------------------------- +// I4 — VerifiedDB anchor. +// SPEC.md I4 / overview.md line 67: C^j_{t_0} = B^j_0 for all j. +// ----------------------------------------------------------------------------- +func CheckI4_VerifiedAnchor(s Snapshot) error { + if len(s.Verified) == 0 { + return nil // vacuous + } + first := s.Verified[0] + if first.Timestamp != s.ActivationTS { + return newErr("I4", fmt.Sprintf( + "Verified[0].Timestamp=%d != ActivationTS=%d", + first.Timestamp, s.ActivationTS)) + } + var errs []error + for _, chain := range s.Chains { + logs, ok := s.LogsDB[chain] + if !ok || len(logs) == 0 { + errs = append(errs, newChainErr("I4", chain, + "LogsDB missing or empty while Verified is non-empty")) + continue + } + head, ok := first.L2Heads[chain] + if !ok { + errs = append(errs, newChainErr("I4", chain, + "Verified[0] missing L2Head for this chain")) + continue + } + if head != logs[0].Ref.ID { + errs = append(errs, newChainErr("I4", chain, fmt.Sprintf( + "C^j_{t_0}=%s != B^j_0=%s", head, logs[0].Ref.ID))) + } + } + return errors.Join(errs...) +} + +// ----------------------------------------------------------------------------- +// I5 — VerifiedDB head = LogsDB tail. +// SPEC.md I5 / overview.md line 68: C^j_t = B^j_{n_j} for all j. +// ----------------------------------------------------------------------------- +func CheckI5_HeadMatchesTail(s Snapshot) error { + if len(s.Verified) == 0 { + return nil // vacuous + } + last := s.Verified[len(s.Verified)-1] + var errs []error + for _, chain := range s.Chains { + logs, ok := s.LogsDB[chain] + if !ok || len(logs) == 0 { + errs = append(errs, newChainErr("I5", chain, + "LogsDB missing or empty while Verified is non-empty")) + continue + } + head, ok := last.L2Heads[chain] + if !ok { + errs = append(errs, newChainErr("I5", chain, + "Verified[last] missing L2Head for this chain")) + continue + } + tail := logs[len(logs)-1].Ref.ID + if head != tail { + errs = append(errs, newChainErr("I5", chain, fmt.Sprintf( + "C^j_t=%s != B^j_{n_j}=%s", head, tail))) + } + } + return errors.Join(errs...) +} + +// ----------------------------------------------------------------------------- +// I6 — Monotone L2 heads across consecutive verified entries. +// SPEC.md I6 / overview.md line 69: C^j_i is C^j_{i+1} or its parent. +// ----------------------------------------------------------------------------- +func CheckI6_MonotoneL2Heads(s Snapshot) error { + var errs []error + for i := 0; i+1 < len(s.Verified); i++ { + prev := s.Verified[i] + nxt := s.Verified[i+1] + for _, chain := range s.Chains { + prevHead, ok1 := prev.L2Heads[chain] + nxtHead, ok2 := nxt.L2Heads[chain] + if !ok1 || !ok2 { + errs = append(errs, newIndexedErr("I6", chain, i, + "missing L2Head in one of the adjacent Verified entries")) + continue + } + if prevHead == nxtHead { + continue // "stay" case + } + logs := s.LogsDB[chain] + nxtBlock, found := LogsDBLookup(logs, nxtHead) + if !found { + errs = append(errs, newIndexedErr("I6", chain, i, fmt.Sprintf( + "C^j_{%d+1}=%s not found in LogsDB", i, nxtHead))) + continue + } + if nxtBlock.Ref.ParentHash != prevHead.Hash { + errs = append(errs, newIndexedErr("I6", chain, i, fmt.Sprintf( + "C^j_{%d+1}.ParentHash=%s != C^j_%d.Hash=%s", + i, nxtBlock.Ref.ParentHash, i, prevHead.Hash))) + continue + } + if nxtBlock.Ref.ID.Number != prevHead.Number+1 { + errs = append(errs, newIndexedErr("I6", chain, i, fmt.Sprintf( + "C^j_{%d+1}.Number=%d != C^j_%d.Number+1=%d", + i, nxtBlock.Ref.ID.Number, i, prevHead.Number+1))) + } + } + } + return errors.Join(errs...) +} + +// ----------------------------------------------------------------------------- +// I7 — LogsDB-internal half: C^j_i is the highest block with timestamp <= i. +// +// The "all children have timestamp > i" clause requires live L2 inspection +// and is ENVIRONMENTAL (SPEC.md §4). This check verifies only the LogsDB +// half: C^j_i is in LogsDB[j], has Time <= i, and either is the last entry +// or the next entry has Time > i. +// ----------------------------------------------------------------------------- +func CheckI7_HighestBlockLeqTS(s Snapshot) error { + var errs []error + for vi, v := range s.Verified { + for _, chain := range s.Chains { + head, ok := v.L2Heads[chain] + if !ok { + errs = append(errs, newIndexedErr("I7", chain, vi, + "Verified entry missing L2Head for this chain")) + continue + } + logs := s.LogsDB[chain] + var headIdx = -1 + for i, b := range logs { + if b.Ref.ID == head { + headIdx = i + break + } + } + if headIdx == -1 { + errs = append(errs, newIndexedErr("I7", chain, vi, fmt.Sprintf( + "C^j_%d=%s not found in LogsDB", vi, head))) + continue + } + if logs[headIdx].Ref.Time > v.Timestamp { + errs = append(errs, newIndexedErr("I7", chain, vi, fmt.Sprintf( + "C^j_%d.Time=%d > VerifiedTimestamp=%d", + vi, logs[headIdx].Ref.Time, v.Timestamp))) + continue + } + // If there's a next block in LogsDB, its Time must strictly + // exceed the verified timestamp (otherwise a higher LogsDB block + // with Time <= v.Timestamp exists). + if headIdx+1 < len(logs) && logs[headIdx+1].Ref.Time <= v.Timestamp { + errs = append(errs, newIndexedErr("I7", chain, vi, fmt.Sprintf( + "LogsDB entry after C^j_%d has Time=%d <= VerifiedTimestamp=%d", + vi, logs[headIdx+1].Ref.Time, v.Timestamp))) + } + } + } + return errors.Join(errs...) +} + +// ----------------------------------------------------------------------------- +// I8 — L1 inclusion number non-decreasing across consecutive verified entries. +// +// SPEC.md I8 / overview.md line 71: C_{t_0}, ..., C_t are on the same +// linear L1 chain. Ancestry cannot be checked from the snapshot — this is +// the necessary-condition half (number monotonicity). The ancestry half is +// environmental (A2 / SPEC.md §4) and verified by CheckAllWithEnv. +// ----------------------------------------------------------------------------- +func CheckI8_VerifiedL1Monotone(s Snapshot) error { + var errs []error + for i := 0; i+1 < len(s.Verified); i++ { + if s.Verified[i].L1Inclusion.Number > s.Verified[i+1].L1Inclusion.Number { + errs = append(errs, newErr("I8", fmt.Sprintf( + "Verified[%d].L1Inclusion.Number=%d > Verified[%d].L1Inclusion.Number=%d", + i, s.Verified[i].L1Inclusion.Number, + i+1, s.Verified[i+1].L1Inclusion.Number))) + } + } + return errors.Join(errs...) +} + +// ----------------------------------------------------------------------------- +// I10 — LogsDB disjoint from DenyList. +// SPEC.md I10 / overview.md line 73: B^j_i not in D_j for any j and i. +// ----------------------------------------------------------------------------- +func CheckI10_LogsDBDisjointFromDenyList(s Snapshot) error { + var errs []error + for chain, logs := range s.LogsDB { + denied := s.DenyList[chain] + if len(denied) == 0 { + continue + } + // Build a set for O(1) lookup. + deniedSet := make(map[eth.BlockID]struct{}, len(denied)) + for _, d := range denied { + deniedSet[d.Block] = struct{}{} + } + for i, b := range logs { + if _, bad := deniedSet[b.Ref.ID]; bad { + errs = append(errs, newIndexedErr("I10", chain, i, fmt.Sprintf( + "LogsDB[%s][%d].ID=%s appears in DenyList", chain, i, b.Ref.ID))) + } + } + } + return errors.Join(errs...) +} + +// ----------------------------------------------------------------------------- +// I11 — DenyList entries bounded by t + 1. +// SPEC.md I11 / overview.md line 74: DecisionTimestamp <= t + 1 (the next +// timestamp being cross-validated is allowed to have speculative entries). +// ----------------------------------------------------------------------------- +func CheckI11_DenyListBounded(s Snapshot) error { + last, hasLast := s.LastVerifiedTimestamp() + var bound uint64 + if hasLast { + if last == ^uint64(0) { + return newErr("I11", + "VerifiedDB head is MAX_UINT64, t+1 would overflow") + } + bound = last + 1 + } else { + // No cross-validation yet. DenyList entries for the activation + // timestamp are allowed from speculative rounds. + if s.ActivationTS == ^uint64(0) { + return newErr("I11", + "ActivationTS is MAX_UINT64, t_0+1 would overflow") + } + bound = s.ActivationTS + 1 + } + var errs []error + for chain, denied := range s.DenyList { + for i, d := range denied { + if d.DecisionTimestamp > bound { + errs = append(errs, newIndexedErr("I11", chain, i, fmt.Sprintf( + "DecisionTimestamp=%d > bound=%d (last verified t=%d, hasLast=%v)", + d.DecisionTimestamp, bound, last, hasLast))) + } + } + } + return errors.Join(errs...) +} diff --git a/op-supernode/invariants/invariants_test.go b/op-supernode/invariants/invariants_test.go new file mode 100644 index 00000000000..05ac6cbdc61 --- /dev/null +++ b/op-supernode/invariants/invariants_test.go @@ -0,0 +1,414 @@ +package invariants + +import ( + "errors" + "strings" + "testing" + + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum/go-ethereum/common" +) + +// chain helpers -------------------------------------------------------------- + +func chainA() eth.ChainID { return eth.ChainIDFromUInt64(1) } +func chainB() eth.ChainID { return eth.ChainIDFromUInt64(2) } + +func hash(b byte) common.Hash { + var h common.Hash + h[31] = b + return h +} + +func blockID(b byte, num uint64) eth.BlockID { + return eth.BlockID{Hash: hash(b), Number: num} +} + +func block(b, parent byte, num, time uint64) BlockWithLogs { + return BlockWithLogs{ + Ref: BlockRef{ + ID: blockID(b, num), + ParentHash: hash(parent), + Time: time, + }, + } +} + +// ----------------------------------------------------------------------------- +// I12 — initial (empty) state satisfies CheckAll vacuously. +// ----------------------------------------------------------------------------- +func TestI12_EmptyStateVacuous(t *testing.T) { + s := Snapshot{ + ActivationTS: 100, + Chains: []eth.ChainID{chainA(), chainB()}, + LogsDB: map[eth.ChainID][]BlockWithLogs{ + chainA(): nil, + chainB(): nil, + }, + DenyList: map[eth.ChainID][]DenyListEntry{ + chainA(): nil, + chainB(): nil, + }, + } + if err := CheckAll(s); err != nil { + t.Fatalf("expected empty state to satisfy all invariants, got: %v", err) + } +} + +// ----------------------------------------------------------------------------- +// I2 — LogsDB linearity. +// ----------------------------------------------------------------------------- +func TestI2_Linear_OK(t *testing.T) { + s := Snapshot{ + Chains: []eth.ChainID{chainA()}, + LogsDB: map[eth.ChainID][]BlockWithLogs{ + chainA(): { + block(1, 0, 1, 10), + block(2, 1, 2, 11), + block(3, 2, 3, 12), + }, + }, + DenyList: map[eth.ChainID][]DenyListEntry{chainA(): nil}, + } + if err := CheckI2_LogsDBLinear(s); err != nil { + t.Fatalf("I2 should pass: %v", err) + } +} + +func TestI2_Linear_BrokenParentHash(t *testing.T) { + s := Snapshot{ + Chains: []eth.ChainID{chainA()}, + LogsDB: map[eth.ChainID][]BlockWithLogs{ + chainA(): { + block(1, 0, 1, 10), + block(2, 99, 2, 11), // parent hash is 99, not 1 + }, + }, + } + err := CheckI2_LogsDBLinear(s) + if err == nil || !strings.Contains(err.Error(), "[I2]") { + t.Fatalf("expected I2 failure, got: %v", err) + } +} + +func TestI2_Linear_BrokenNumber(t *testing.T) { + s := Snapshot{ + Chains: []eth.ChainID{chainA()}, + LogsDB: map[eth.ChainID][]BlockWithLogs{ + chainA(): { + block(1, 0, 1, 10), + block(2, 1, 5, 11), // number jumps from 1 to 5 + }, + }, + } + err := CheckI2_LogsDBLinear(s) + if err == nil || !strings.Contains(err.Error(), "[I2]") { + t.Fatalf("expected I2 failure on number mismatch, got: %v", err) + } +} + +// ----------------------------------------------------------------------------- +// I4 — VerifiedDB anchor. +// ----------------------------------------------------------------------------- +func TestI4_Anchor_OK(t *testing.T) { + s := Snapshot{ + ActivationTS: 100, + Chains: []eth.ChainID{chainA()}, + LogsDB: map[eth.ChainID][]BlockWithLogs{ + chainA(): {block(1, 0, 1, 100)}, + }, + Verified: []VerifiedEntry{{ + Timestamp: 100, + L1Inclusion: blockID(10, 1000), + L2Heads: map[eth.ChainID]eth.BlockID{chainA(): blockID(1, 1)}, + }}, + } + if err := CheckI4_VerifiedAnchor(s); err != nil { + t.Fatalf("I4 should pass: %v", err) + } +} + +func TestI4_Anchor_TimestampMismatch(t *testing.T) { + s := Snapshot{ + ActivationTS: 100, + Chains: []eth.ChainID{chainA()}, + LogsDB: map[eth.ChainID][]BlockWithLogs{ + chainA(): {block(1, 0, 1, 99)}, + }, + Verified: []VerifiedEntry{{ + Timestamp: 99, // should be 100 + L2Heads: map[eth.ChainID]eth.BlockID{chainA(): blockID(1, 1)}, + }}, + } + if err := CheckI4_VerifiedAnchor(s); err == nil || + !strings.Contains(err.Error(), "[I4]") { + t.Fatalf("expected I4 timestamp mismatch, got: %v", err) + } +} + +// ----------------------------------------------------------------------------- +// I5 — VerifiedDB head = LogsDB tail. +// ----------------------------------------------------------------------------- +func TestI5_HeadMatchesTail_OK(t *testing.T) { + s := Snapshot{ + ActivationTS: 100, + Chains: []eth.ChainID{chainA()}, + LogsDB: map[eth.ChainID][]BlockWithLogs{ + chainA(): { + block(1, 0, 1, 100), + block(2, 1, 2, 101), + }, + }, + Verified: []VerifiedEntry{ + { + Timestamp: 100, + L2Heads: map[eth.ChainID]eth.BlockID{chainA(): blockID(1, 1)}, + }, + { + Timestamp: 101, + L2Heads: map[eth.ChainID]eth.BlockID{chainA(): blockID(2, 2)}, + }, + }, + } + if err := CheckI5_HeadMatchesTail(s); err != nil { + t.Fatalf("I5 should pass: %v", err) + } +} + +func TestI5_HeadMatchesTail_Divergent(t *testing.T) { + s := Snapshot{ + Chains: []eth.ChainID{chainA()}, + LogsDB: map[eth.ChainID][]BlockWithLogs{ + chainA(): { + block(1, 0, 1, 100), + block(2, 1, 2, 101), + }, + }, + Verified: []VerifiedEntry{{ + Timestamp: 101, + L2Heads: map[eth.ChainID]eth.BlockID{chainA(): blockID(1, 1)}, // not tail + }}, + } + if err := CheckI5_HeadMatchesTail(s); err == nil || + !strings.Contains(err.Error(), "[I5]") { + t.Fatalf("expected I5 divergence, got: %v", err) + } +} + +// ----------------------------------------------------------------------------- +// I6 — monotone L2 heads. +// ----------------------------------------------------------------------------- +func TestI6_Monotone_StayCase(t *testing.T) { + // L2 produces no block at timestamp 101; C^j stays the same. + s := Snapshot{ + Chains: []eth.ChainID{chainA()}, + LogsDB: map[eth.ChainID][]BlockWithLogs{ + chainA(): {block(1, 0, 1, 100)}, + }, + Verified: []VerifiedEntry{ + {Timestamp: 100, L2Heads: map[eth.ChainID]eth.BlockID{chainA(): blockID(1, 1)}}, + {Timestamp: 101, L2Heads: map[eth.ChainID]eth.BlockID{chainA(): blockID(1, 1)}}, + }, + } + if err := CheckI6_MonotoneL2Heads(s); err != nil { + t.Fatalf("I6 stay-case should pass: %v", err) + } +} + +func TestI6_Monotone_AdvanceCase(t *testing.T) { + s := Snapshot{ + Chains: []eth.ChainID{chainA()}, + LogsDB: map[eth.ChainID][]BlockWithLogs{ + chainA(): { + block(1, 0, 1, 100), + block(2, 1, 2, 101), + }, + }, + Verified: []VerifiedEntry{ + {Timestamp: 100, L2Heads: map[eth.ChainID]eth.BlockID{chainA(): blockID(1, 1)}}, + {Timestamp: 101, L2Heads: map[eth.ChainID]eth.BlockID{chainA(): blockID(2, 2)}}, + }, + } + if err := CheckI6_MonotoneL2Heads(s); err != nil { + t.Fatalf("I6 advance-case should pass: %v", err) + } +} + +func TestI6_Monotone_Skip(t *testing.T) { + // L2 head advances by 2 blocks between consecutive verified entries. + s := Snapshot{ + Chains: []eth.ChainID{chainA()}, + LogsDB: map[eth.ChainID][]BlockWithLogs{ + chainA(): { + block(1, 0, 1, 100), + block(2, 1, 2, 101), + block(3, 2, 3, 102), + }, + }, + Verified: []VerifiedEntry{ + {Timestamp: 100, L2Heads: map[eth.ChainID]eth.BlockID{chainA(): blockID(1, 1)}}, + {Timestamp: 102, L2Heads: map[eth.ChainID]eth.BlockID{chainA(): blockID(3, 3)}}, + }, + } + if err := CheckI6_MonotoneL2Heads(s); err == nil || + !strings.Contains(err.Error(), "[I6]") { + t.Fatalf("expected I6 failure on 2-block jump, got: %v", err) + } +} + +// ----------------------------------------------------------------------------- +// I8 — L1 inclusion monotonicity. +// ----------------------------------------------------------------------------- +func TestI8_L1Monotone_OK(t *testing.T) { + s := Snapshot{ + Verified: []VerifiedEntry{ + {Timestamp: 100, L1Inclusion: blockID(10, 1000)}, + {Timestamp: 101, L1Inclusion: blockID(11, 1000)}, // same number OK + {Timestamp: 102, L1Inclusion: blockID(12, 1001)}, + }, + } + if err := CheckI8_VerifiedL1Monotone(s); err != nil { + t.Fatalf("I8 should pass: %v", err) + } +} + +func TestI8_L1Monotone_Regress(t *testing.T) { + s := Snapshot{ + Verified: []VerifiedEntry{ + {L1Inclusion: blockID(10, 1001)}, + {L1Inclusion: blockID(11, 1000)}, // regression + }, + } + if err := CheckI8_VerifiedL1Monotone(s); err == nil || + !strings.Contains(err.Error(), "[I8]") { + t.Fatalf("expected I8 failure, got: %v", err) + } +} + +// ----------------------------------------------------------------------------- +// I10 — LogsDB disjoint from DenyList. +// ----------------------------------------------------------------------------- +func TestI10_Disjoint_OK(t *testing.T) { + s := Snapshot{ + Chains: []eth.ChainID{chainA()}, + LogsDB: map[eth.ChainID][]BlockWithLogs{ + chainA(): {block(1, 0, 1, 100)}, + }, + DenyList: map[eth.ChainID][]DenyListEntry{ + chainA(): {{Block: blockID(99, 1), DecisionTimestamp: 100}}, + }, + } + if err := CheckI10_LogsDBDisjointFromDenyList(s); err != nil { + t.Fatalf("I10 should pass: %v", err) + } +} + +func TestI10_Disjoint_Violation(t *testing.T) { + s := Snapshot{ + Chains: []eth.ChainID{chainA()}, + LogsDB: map[eth.ChainID][]BlockWithLogs{ + chainA(): {block(1, 0, 1, 100)}, + }, + DenyList: map[eth.ChainID][]DenyListEntry{ + chainA(): {{Block: blockID(1, 1), DecisionTimestamp: 100}}, // collides + }, + } + if err := CheckI10_LogsDBDisjointFromDenyList(s); err == nil || + !strings.Contains(err.Error(), "[I10]") { + t.Fatalf("expected I10 failure, got: %v", err) + } +} + +// ----------------------------------------------------------------------------- +// I11 — DenyList bounded by t + 1. +// ----------------------------------------------------------------------------- +func TestI11_Bounded_OK(t *testing.T) { + s := Snapshot{ + ActivationTS: 100, + Verified: []VerifiedEntry{ + {Timestamp: 100}, + {Timestamp: 101}, + }, + DenyList: map[eth.ChainID][]DenyListEntry{ + chainA(): { + {Block: blockID(1, 1), DecisionTimestamp: 101}, + {Block: blockID(2, 1), DecisionTimestamp: 102}, // t + 1 allowed + }, + }, + } + if err := CheckI11_DenyListBounded(s); err != nil { + t.Fatalf("I11 should pass: %v", err) + } +} + +func TestI11_Bounded_Exceeds(t *testing.T) { + s := Snapshot{ + Verified: []VerifiedEntry{{Timestamp: 101}}, + DenyList: map[eth.ChainID][]DenyListEntry{ + chainA(): {{Block: blockID(1, 1), DecisionTimestamp: 103}}, + }, + } + if err := CheckI11_DenyListBounded(s); err == nil || + !strings.Contains(err.Error(), "[I11]") { + t.Fatalf("expected I11 failure, got: %v", err) + } +} + +// ----------------------------------------------------------------------------- +// CheckAll composition: multiple independent failures are joined. +// ----------------------------------------------------------------------------- +func TestCheckAll_JoinsFailures(t *testing.T) { + s := Snapshot{ + Chains: []eth.ChainID{chainA()}, + LogsDB: map[eth.ChainID][]BlockWithLogs{ + chainA(): { + block(1, 0, 1, 100), + block(2, 99, 2, 101), // I2 failure (parent hash wrong) + }, + }, + DenyList: map[eth.ChainID][]DenyListEntry{ + chainA(): {{Block: blockID(1, 1), DecisionTimestamp: 100}}, // I10 + }, + } + err := CheckAll(s) + if err == nil { + t.Fatal("expected compound failure") + } + msg := err.Error() + if !strings.Contains(msg, "[I2]") { + t.Errorf("expected I2 in joined error: %v", err) + } + if !strings.Contains(msg, "[I10]") { + t.Errorf("expected I10 in joined error: %v", err) + } + // Ensure the returned error is the joined form (errors.Join produces + // an error that wraps each sub-error; errors.Is should find each). + for _, sub := range []string{"I2", "I10"} { + if !containsID(err, sub) { + t.Errorf("errors.Unwrap chain missing ID %q", sub) + } + } +} + +func containsID(err error, id string) bool { + if err == nil { + return false + } + if e, ok := err.(*Error); ok { + return e.ID == id + } + // errors.Join returns an error whose Unwrap() []error enumerates the children. + type multi interface{ Unwrap() []error } + if m, ok := err.(multi); ok { + for _, c := range m.Unwrap() { + if containsID(c, id) { + return true + } + } + } + var e *Error + if errors.As(err, &e) && e.ID == id { + return true + } + return false +} diff --git a/op-supernode/invariants/reference_model.go b/op-supernode/invariants/reference_model.go new file mode 100644 index 00000000000..3804839bffc --- /dev/null +++ b/op-supernode/invariants/reference_model.go @@ -0,0 +1,306 @@ +package invariants + +import ( + "fmt" + + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +// Reference model — pure Go implementation of the SPEC.md §3 transitions +// T1..T5. This is the "trusted" state machine that fuzz tests and traces +// diff against. Every method is a pure function of its inputs and must +// preserve AllInvariants (CheckAll) given valid inputs. +// +// Scope: T3 (Rollback), T4 (Invalidate), T5 (Advance). T1 and T2 are +// "no state update" — they are represented by the absence of a call, not +// by a method here. + +// CloneSnapshot deep-copies a Snapshot so Apply* functions do not mutate +// their input. Exported so tests and fuzzers can make defensive copies. +// +// "Deep" here means: every map, slice, and slice-of-slice referenced by +// the returned Snapshot is freshly allocated. After CloneSnapshot, the +// caller can mutate any field of the result without aliasing the source +// — including BlockWithLogs.ExecMsgs and VerifiedEntry.L2Heads. +func CloneSnapshot(s Snapshot) Snapshot { + out := Snapshot{ + ActivationTS: s.ActivationTS, + Chains: append([]eth.ChainID{}, s.Chains...), + LogsDB: make(map[eth.ChainID][]BlockWithLogs, len(s.LogsDB)), + Verified: make([]VerifiedEntry, len(s.Verified)), + DenyList: make(map[eth.ChainID][]DenyListEntry, len(s.DenyList)), + } + for k, v := range s.LogsDB { + out.LogsDB[k] = cloneBlockWithLogsSlice(v) + } + for k, v := range s.DenyList { + out.DenyList[k] = append([]DenyListEntry{}, v...) + } + for i, v := range s.Verified { + heads := make(map[eth.ChainID]eth.BlockID, len(v.L2Heads)) + for kk, vv := range v.L2Heads { + heads[kk] = vv + } + out.Verified[i] = VerifiedEntry{ + Timestamp: v.Timestamp, + L1Inclusion: v.L1Inclusion, + L2Heads: heads, + } + } + return out +} + +// cloneBlockWithLogsSlice copies a per-chain LogsDB slice, including the +// per-block ExecMsgs slice. Used by CloneSnapshot and StaticStateView. +func cloneBlockWithLogsSlice(in []BlockWithLogs) []BlockWithLogs { + if in == nil { + return nil + } + out := make([]BlockWithLogs, len(in)) + for i, b := range in { + out[i] = BlockWithLogs{ + Ref: b.Ref, + ExecMsgs: append([]ExecutingMessage{}, b.ExecMsgs...), + } + } + return out +} + +// cloneVerifiedSlice copies a Verified slice including each entry's +// L2Heads map. Used by StaticStateView. +func cloneVerifiedSlice(in []VerifiedEntry) []VerifiedEntry { + if in == nil { + return nil + } + out := make([]VerifiedEntry, len(in)) + for i, v := range in { + heads := make(map[eth.ChainID]eth.BlockID, len(v.L2Heads)) + for k, vv := range v.L2Heads { + heads[k] = vv + } + out[i] = VerifiedEntry{ + Timestamp: v.Timestamp, + L1Inclusion: v.L1Inclusion, + L2Heads: heads, + } + } + return out +} + +// ----------------------------------------------------------------------------- +// T5 — Advance +// +// SPEC.md T5 / overview.md lines 102-104: +// - Verified extended with (t+1, C_{t+1}, C^j_{t+1}) where C^j_{t+1} = B_j +// and C_{t+1} = max_j(B'_j). +// - For every j such that B_j != B^j_{n_j}, L_j is extended with +// (B_j, ℓ(B_j)); otherwise L_j is unchanged. +// ----------------------------------------------------------------------------- + +// ApplyAdvance advances the snapshot by one timestamp by committing a new +// VerifiedEntry and, for each chain whose head changed, extending its +// LogsDB with the new block. +// +// `newHeads[j]` is the full BlockWithLogs for chain j's new head. If a +// chain is NOT present in newHeads, its head stays the same (L2 block time +// exceeded the timestamp being verified). +func ApplyAdvance( + s Snapshot, + newL1 eth.BlockID, + newHeads map[eth.ChainID]BlockWithLogs, +) (Snapshot, error) { + var t uint64 + if len(s.Verified) == 0 { + t = s.ActivationTS + } else { + last := s.Verified[len(s.Verified)-1].Timestamp + if last == ^uint64(0) { + return s, fmt.Errorf("advance: t+1 would overflow uint64") + } + t = last + 1 + } + + out := CloneSnapshot(s) + entry := VerifiedEntry{ + Timestamp: t, + L1Inclusion: newL1, + L2Heads: make(map[eth.ChainID]eth.BlockID, len(s.Chains)), + } + + for _, chain := range s.Chains { + newBlock, moved := newHeads[chain] + if moved { + // SPEC A4: the VirtualNode never hands a denied block to the + // Supernode. The reference model enforces this at the + // precondition level rather than silently accepting an I10 + // violation. + for _, d := range out.DenyList[chain] { + if d.Block == newBlock.Ref.ID { + return s, fmt.Errorf( + "advance: chain %s block %s is in DenyList (A4 violation)", + chain, newBlock.Ref.ID) + } + } + entry.L2Heads[chain] = newBlock.Ref.ID + logs := out.LogsDB[chain] + // Only append if B_j != B^j_{n_j} (SPEC T5 second bullet). + tailMatches := len(logs) > 0 && + logs[len(logs)-1].Ref.ID == newBlock.Ref.ID + if !tailMatches { + out.LogsDB[chain] = append(logs, newBlock) + } + } else { + // Stay case: head is the previous verified head. Requires a + // previous verified entry to copy from. + if len(s.Verified) == 0 { + return s, fmt.Errorf( + "advance at t_0: chain %s has no newHead and no prior Verified", + chain) + } + prev, ok := s.Verified[len(s.Verified)-1].L2Heads[chain] + if !ok { + return s, fmt.Errorf( + "advance: chain %s missing from prior Verified.L2Heads", chain) + } + entry.L2Heads[chain] = prev + } + } + + out.Verified = append(out.Verified, entry) + return out, nil +} + +// ----------------------------------------------------------------------------- +// T4 — Invalidate +// +// SPEC.md T4 / overview.md lines 98-101: +// - Each invalid B_j is added to D_j with DecisionTimestamp = t + 1. +// - Verified is unchanged. +// - LogsDB is unchanged (any speculative additions are rolled back at +// this atomic boundary). +// ----------------------------------------------------------------------------- + +// ApplyInvalidate adds one or more blocks to per-chain DenyLists with the +// current t+1 as the decision timestamp. The input `invalidHeads` names +// the block IDs being invalidated, keyed by chain. +func ApplyInvalidate( + s Snapshot, + invalidHeads map[eth.ChainID]eth.BlockID, +) (Snapshot, error) { + if len(s.Verified) == 0 { + return s, fmt.Errorf("invalidate: no prior verification (t is undefined)") + } + t := s.Verified[len(s.Verified)-1].Timestamp + if t == ^uint64(0) { + return s, fmt.Errorf("invalidate: t+1 would overflow uint64") + } + + out := CloneSnapshot(s) + for chain, blockID := range invalidHeads { + entry := DenyListEntry{ + Block: blockID, + DecisionTimestamp: t + 1, + } + // T4 should be idempotent: invalidating the same block twice in + // the same round must not double-record it. Skip if an entry with + // the same (Block, DecisionTimestamp) is already present. + dup := false + for _, existing := range out.DenyList[chain] { + if existing == entry { + dup = true + break + } + } + if dup { + continue + } + out.DenyList[chain] = append(out.DenyList[chain], entry) + } + return out, nil +} + +// ----------------------------------------------------------------------------- +// T3 — Rollback +// +// SPEC.md T3 / overview.md lines 94-97: +// - Prune Verified by removing the last entry (t, C_t, {C^j_t}). +// - Prune D_j by removing all entries with DecisionTimestamp >= t. +// - For every chain j such that C^j_{t-1} != B^j_{n_j}, prune L_j by +// removing its last entry. +// ----------------------------------------------------------------------------- + +// ApplyRewind rolls back the most recently committed VerifiedEntry and +// performs the associated DenyList + LogsDB pruning per SPEC T3. +func ApplyRewind(s Snapshot) (Snapshot, error) { + if len(s.Verified) == 0 { + return s, fmt.Errorf("rewind: Verified is empty") + } + out := CloneSnapshot(s) + removed := out.Verified[len(out.Verified)-1] + t := removed.Timestamp + out.Verified = out.Verified[:len(out.Verified)-1] + + // Prune DenyList: remove any entry whose DecisionTimestamp >= t. + for chain, denied := range out.DenyList { + kept := denied[:0:0] + for _, d := range denied { + if d.DecisionTimestamp < t { + kept = append(kept, d) + } + } + out.DenyList[chain] = kept + } + + // Prune LogsDB tails: only pop if the (now-new) last verified head does + // not equal the LogsDB tail. If Verified became empty, pop the tail + // unconditionally (no "previous head" to compare against). + for _, chain := range s.Chains { + logs := out.LogsDB[chain] + if len(logs) == 0 { + continue + } + tail := logs[len(logs)-1].Ref.ID + + var popTail bool + if len(out.Verified) == 0 { + // No prior verified entry: the tail was added in the round we + // just rolled back. + popTail = true + } else { + prev, ok := out.Verified[len(out.Verified)-1].L2Heads[chain] + if !ok || prev != tail { + popTail = true + } + } + if popTail { + out.LogsDB[chain] = logs[:len(logs)-1] + } + } + + return out, nil +} + +// ----------------------------------------------------------------------------- +// Initial state constructor +// +// For tests and fuzzers. Constructs a Snapshot satisfying I12 (initial +// state trivially satisfies all structural invariants). No VerifiedDB +// anchor yet — the first ApplyAdvance creates it. +// ----------------------------------------------------------------------------- + +// NewInitialSnapshot constructs an empty snapshot for the given chains and +// activation timestamp. LogsDB and DenyList start empty for every chain. +// No Verified entries. Mirrors the Dafny IsInitialState predicate. +func NewInitialSnapshot(activationTS uint64, chains []eth.ChainID) Snapshot { + s := Snapshot{ + ActivationTS: activationTS, + Chains: append([]eth.ChainID{}, chains...), + LogsDB: make(map[eth.ChainID][]BlockWithLogs, len(chains)), + DenyList: make(map[eth.ChainID][]DenyListEntry, len(chains)), + } + for _, c := range chains { + s.LogsDB[c] = nil + s.DenyList[c] = nil + } + return s +} diff --git a/op-supernode/invariants/reference_model_test.go b/op-supernode/invariants/reference_model_test.go new file mode 100644 index 00000000000..16fa0b84c86 --- /dev/null +++ b/op-supernode/invariants/reference_model_test.go @@ -0,0 +1,203 @@ +package invariants + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +// ----------------------------------------------------------------------------- +// End-to-end scenario: start empty, perform a genuine sequence of T5 / T4 / +// T3 transitions, and assert CheckAll holds at every step. This exercises +// the reference model and the invariant predicates jointly. +// ----------------------------------------------------------------------------- + +func TestReferenceModel_AdvanceFromInitial(t *testing.T) { + s := NewInitialSnapshot(100, []eth.ChainID{chainA()}) + if err := CheckAll(s); err != nil { + t.Fatalf("initial state must satisfy invariants: %v", err) + } + + // Advance at t_0 = 100. Supply genesis block. + s, err := ApplyAdvance(s, + blockID(10, 1000), + map[eth.ChainID]BlockWithLogs{ + chainA(): block(1, 0, 1, 100), + }) + if err != nil { + t.Fatalf("advance failed: %v", err) + } + if err := CheckAll(s); err != nil { + t.Fatalf("post-advance invariants failed: %v", err) + } + if len(s.Verified) != 1 || s.Verified[0].Timestamp != 100 { + t.Fatalf("expected one verified entry at t=100, got %+v", s.Verified) + } + if len(s.LogsDB[chainA()]) != 1 { + t.Fatalf("expected LogsDB length 1, got %d", len(s.LogsDB[chainA()])) + } +} + +func TestReferenceModel_AdvanceManyTimestamps(t *testing.T) { + s := NewInitialSnapshot(100, []eth.ChainID{chainA()}) + s, err := ApplyAdvance(s, blockID(10, 1000), map[eth.ChainID]BlockWithLogs{ + chainA(): block(1, 0, 1, 100), + }) + mustOK(t, err, s) + + // Advance five more timestamps, one new block each. + for i := uint64(101); i <= 105; i++ { + prevBlock := byte(i - 100) + newBlock := byte(i - 99) + s, err = ApplyAdvance(s, + blockID(byte(i-90), 1000+(i-100)), + map[eth.ChainID]BlockWithLogs{ + chainA(): block(newBlock, prevBlock, i-99, i), + }) + mustOK(t, err, s) + } + + if len(s.LogsDB[chainA()]) != 6 { + t.Fatalf("expected LogsDB length 6, got %d", len(s.LogsDB[chainA()])) + } + if len(s.Verified) != 6 { + t.Fatalf("expected 6 verified entries, got %d", len(s.Verified)) + } +} + +func TestReferenceModel_AdvanceStayCase(t *testing.T) { + // Chain produces no new block at the next timestamp; head should stay. + s := NewInitialSnapshot(100, []eth.ChainID{chainA()}) + s, err := ApplyAdvance(s, blockID(10, 1000), map[eth.ChainID]BlockWithLogs{ + chainA(): block(1, 0, 1, 100), + }) + mustOK(t, err, s) + + // Advance with empty newHeads => stay case. + s, err = ApplyAdvance(s, blockID(11, 1001), map[eth.ChainID]BlockWithLogs{}) + mustOK(t, err, s) + + if len(s.LogsDB[chainA()]) != 1 { + t.Fatalf("stay-case should not extend LogsDB, got len %d", len(s.LogsDB[chainA()])) + } + if s.Verified[1].L2Heads[chainA()] != blockID(1, 1) { + t.Fatalf("stay-case should keep head, got %v", s.Verified[1].L2Heads[chainA()]) + } +} + +func TestReferenceModel_Invalidate(t *testing.T) { + // After one successful advance, invalidate a block at the next timestamp. + s := NewInitialSnapshot(100, []eth.ChainID{chainA()}) + s, err := ApplyAdvance(s, blockID(10, 1000), map[eth.ChainID]BlockWithLogs{ + chainA(): block(1, 0, 1, 100), + }) + mustOK(t, err, s) + + s, err = ApplyInvalidate(s, map[eth.ChainID]eth.BlockID{ + chainA(): blockID(2, 2), + }) + mustOK(t, err, s) + + if len(s.DenyList[chainA()]) != 1 { + t.Fatalf("expected one deny entry, got %d", len(s.DenyList[chainA()])) + } + if s.DenyList[chainA()][0].DecisionTimestamp != 101 { + t.Fatalf("expected DecisionTS=101, got %d", s.DenyList[chainA()][0].DecisionTimestamp) + } + // Verified should not have grown. + if len(s.Verified) != 1 { + t.Fatalf("invalidate should not extend Verified, got len %d", len(s.Verified)) + } +} + +func TestReferenceModel_Rewind(t *testing.T) { + // Build: advance twice, then rewind once. After rewind we should be + // back to the one-entry state. + s := NewInitialSnapshot(100, []eth.ChainID{chainA()}) + s, err := ApplyAdvance(s, blockID(10, 1000), map[eth.ChainID]BlockWithLogs{ + chainA(): block(1, 0, 1, 100), + }) + mustOK(t, err, s) + s, err = ApplyAdvance(s, blockID(11, 1001), map[eth.ChainID]BlockWithLogs{ + chainA(): block(2, 1, 2, 101), + }) + mustOK(t, err, s) + + if len(s.LogsDB[chainA()]) != 2 { + t.Fatalf("expected LogsDB len 2 pre-rewind, got %d", len(s.LogsDB[chainA()])) + } + + s, err = ApplyRewind(s) + mustOK(t, err, s) + + if len(s.Verified) != 1 { + t.Fatalf("expected 1 verified entry post-rewind, got %d", len(s.Verified)) + } + if len(s.LogsDB[chainA()]) != 1 { + t.Fatalf("expected LogsDB len 1 post-rewind, got %d", len(s.LogsDB[chainA()])) + } +} + +func TestReferenceModel_RewindAfterDeny(t *testing.T) { + // Advance, invalidate (deny at t+1), rewind: the deny entry should be + // pruned because its DecisionTimestamp >= t being rewound. + s := NewInitialSnapshot(100, []eth.ChainID{chainA()}) + s, err := ApplyAdvance(s, blockID(10, 1000), map[eth.ChainID]BlockWithLogs{ + chainA(): block(1, 0, 1, 100), + }) + mustOK(t, err, s) + s, err = ApplyAdvance(s, blockID(11, 1001), map[eth.ChainID]BlockWithLogs{ + chainA(): block(2, 1, 2, 101), + }) + mustOK(t, err, s) + // At t = 101, invalidate a speculative block for t+1 = 102. + s, err = ApplyInvalidate(s, map[eth.ChainID]eth.BlockID{ + chainA(): blockID(99, 3), + }) + mustOK(t, err, s) + if len(s.DenyList[chainA()]) != 1 { + t.Fatalf("pre-rewind deny list len %d, want 1", len(s.DenyList[chainA()])) + } + + // Now rewind t = 101. DecisionTS = 102 >= 101 => pruned. + s, err = ApplyRewind(s) + mustOK(t, err, s) + + if len(s.DenyList[chainA()]) != 0 { + t.Fatalf("post-rewind deny list len %d, want 0", len(s.DenyList[chainA()])) + } +} + +// ----------------------------------------------------------------------------- +// Error conditions +// ----------------------------------------------------------------------------- + +func TestReferenceModel_RewindEmpty(t *testing.T) { + s := NewInitialSnapshot(100, []eth.ChainID{chainA()}) + if _, err := ApplyRewind(s); err == nil { + t.Fatal("expected rewind of empty Verified to error") + } +} + +func TestReferenceModel_InvalidateNoVerified(t *testing.T) { + s := NewInitialSnapshot(100, []eth.ChainID{chainA()}) + if _, err := ApplyInvalidate(s, map[eth.ChainID]eth.BlockID{ + chainA(): blockID(1, 1), + }); err == nil { + t.Fatal("expected invalidate on empty Verified to error") + } +} + +// ----------------------------------------------------------------------------- +// Helper +// ----------------------------------------------------------------------------- + +func mustOK(t *testing.T, err error, s Snapshot) { + t.Helper() + if err != nil { + t.Fatalf("apply failed: %v", err) + } + if inv := CheckAll(s); inv != nil { + t.Fatalf("invariants violated after apply: %v", inv) + } +} diff --git a/op-supernode/invariants/runtime_off.go b/op-supernode/invariants/runtime_off.go new file mode 100644 index 00000000000..2c183f68855 --- /dev/null +++ b/op-supernode/invariants/runtime_off.go @@ -0,0 +1,28 @@ +//go:build !supernode_invariants + +package invariants + +// Runtime assertions are DISABLED in this build. +// +// All Assert* entry points compile to no-ops with zero allocations so +// that production binaries carry no runtime overhead from the invariant +// machinery. Enable by passing -tags=supernode_invariants to go build or +// go test. + +const AssertionsEnabled = false + +// Assert is a no-op when the supernode_invariants build tag is not set. +func Assert(s Snapshot) {} + +// AssertWith is a no-op when the supernode_invariants build tag is not set. +// The builder is NOT invoked, so any side effects inside it are skipped. +func AssertWith(build func() Snapshot) {} + +// SetPanicOnFailure is a no-op in release builds. +func SetPanicOnFailure(panic bool) {} + +// SetFailureSink is a no-op in release builds. +func SetFailureSink(sink func(err error)) {} + +// AssertionFailureCount always returns zero in release builds. +func AssertionFailureCount() uint64 { return 0 } diff --git a/op-supernode/invariants/runtime_on.go b/op-supernode/invariants/runtime_on.go new file mode 100644 index 00000000000..fc5b6ff2052 --- /dev/null +++ b/op-supernode/invariants/runtime_on.go @@ -0,0 +1,104 @@ +//go:build supernode_invariants + +package invariants + +import ( + "fmt" + "sync" + "sync/atomic" +) + +// Runtime assertions are ENABLED in this build. +// +// Build with: +// go build -tags=supernode_invariants ./... +// go test -tags=supernode_invariants ./... +// +// Every state-mutating entry point in op-supernode that wants invariant +// coverage should call: +// +// invariants.Assert(sn.SnapshotForInvariants()) +// +// immediately after the mutation is committed. When the build tag is NOT +// set, Assert is a no-op with zero allocation cost (see runtime_off.go). + +const AssertionsEnabled = true + +var ( + // failCount records the total number of invariant violations observed + // since process start. Exposed via AssertionFailureCount so metrics + // and tests can observe it without importing sync/atomic in callers. + failCount atomic.Uint64 + + // panicOnFailure controls whether Assert panics on the first failure + // (for tests / dev loops) or just logs-and-continues (for long-running + // integration runs). Default: panic. + panicOnFailure atomic.Bool + + // failureSink is an optional callback invoked on every failure. Used + // by tests to capture failures without panicking. Protected by + // failureSinkMu because atomic.Value doesn't work for naked funcs and + // atomic.Pointer[func(error)] adds an indirection layer for no win. + failureSinkMu sync.RWMutex + failureSink func(err error) +) + +func init() { + panicOnFailure.Store(true) +} + +// SetPanicOnFailure configures whether Assert panics (true) or logs +// (false) on invariant failure. Default: true. +func SetPanicOnFailure(panic bool) { + panicOnFailure.Store(panic) +} + +// SetFailureSink installs a callback for invariant failures. Calling with +// nil removes the sink. Thread-safe. +func SetFailureSink(sink func(err error)) { + failureSinkMu.Lock() + failureSink = sink + failureSinkMu.Unlock() +} + +// loadFailureSink returns the current failure sink under a read lock. +func loadFailureSink() func(error) { + failureSinkMu.RLock() + defer failureSinkMu.RUnlock() + return failureSink +} + +// AssertionFailureCount returns the total number of invariant violations +// observed by Assert since process start. +func AssertionFailureCount() uint64 { + return failCount.Load() +} + +// Assert runs CheckAll on the given snapshot and reports any failure. +// CheckAll on an empty snapshot is cheap (nanoseconds), so there is no +// fast-path skip — using ActivationTS == 0 as a sentinel would mask I12 +// for any supernode whose activation timestamp is genuinely 0. +func Assert(s Snapshot) { + err := CheckAll(s) + if err == nil { + return + } + failCount.Add(1) + if sink := loadFailureSink(); sink != nil { + sink(err) + } + if panicOnFailure.Load() { + panic(fmt.Sprintf("op-supernode invariant violation: %v", err)) + } +} + +// AssertWith runs CheckAll on a snapshot built lazily by the provided +// builder. Prefer this over Assert when constructing the snapshot is +// expensive, so that the runtime cost is paid only when assertions are +// enabled. +func AssertWith(build func() Snapshot) { + if build == nil { + return + } + Assert(build()) +} diff --git a/op-supernode/invariants/runtime_test.go b/op-supernode/invariants/runtime_test.go new file mode 100644 index 00000000000..bd451fb1958 --- /dev/null +++ b/op-supernode/invariants/runtime_test.go @@ -0,0 +1,77 @@ +//go:build supernode_invariants + +package invariants + +import ( + "errors" + "testing" + + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +// TestAssert_Violation exercises the enabled-build code path: a failing +// snapshot routes through the failureSink and failCount without panicking +// (panicOnFailure is disabled for the duration of the test). +func TestAssert_Violation(t *testing.T) { + SetPanicOnFailure(false) + defer SetPanicOnFailure(true) + + var captured error + SetFailureSink(func(err error) { captured = err }) + defer SetFailureSink(nil) + + before := AssertionFailureCount() + + bad := Snapshot{ + Chains: []eth.ChainID{chainA()}, + LogsDB: map[eth.ChainID][]BlockWithLogs{ + chainA(): { + block(1, 0, 1, 100), + block(2, 99, 2, 101), // I2 violation + }, + }, + } + Assert(bad) + + if captured == nil { + t.Fatal("failure sink should have been called") + } + var ie *Error + if !errors.As(captured, &ie) { + t.Fatalf("expected *invariants.Error, got %T: %v", captured, captured) + } + if AssertionFailureCount() != before+1 { + t.Fatalf("failure count should have incremented") + } +} + +func TestAssert_OK(t *testing.T) { + SetPanicOnFailure(false) + defer SetPanicOnFailure(true) + before := AssertionFailureCount() + good := NewInitialSnapshot(100, []eth.ChainID{chainA()}) + Assert(good) + if AssertionFailureCount() != before { + t.Fatalf("valid snapshot should not increment failure count") + } +} + +func TestAssert_ZeroSnapshot(t *testing.T) { + // An empty snapshot is a valid initial state and must not be skipped. + // CheckAll runs every predicate; with no chains/logs/verified/deny + // nothing has anything to violate, so the call must not increment + // failCount. + SetPanicOnFailure(false) + defer SetPanicOnFailure(true) + before := AssertionFailureCount() + Assert(Snapshot{}) + if AssertionFailureCount() != before { + t.Fatalf("empty snapshot should satisfy CheckAll") + } +} + +func TestAssertionsEnabledConst(t *testing.T) { + if !AssertionsEnabled { + t.Fatal("AssertionsEnabled should be true under -tags=supernode_invariants") + } +} diff --git a/op-supernode/invariants/snapshot.go b/op-supernode/invariants/snapshot.go new file mode 100644 index 00000000000..4e5338e50d2 --- /dev/null +++ b/op-supernode/invariants/snapshot.go @@ -0,0 +1,114 @@ +package invariants + +import ( + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum/go-ethereum/common" +) + +// Snapshot is the pure abstract state consumed by every CheckI* predicate. +// It mirrors the Dafny SupernodeState datatype 1:1; the two MUST stay in +// lockstep. See dafny-models/SupernodeState.dfy and SPEC.md §0. +// +// Snapshot is JSON-serializable so that counter-examples and traces can be +// shared between the Go fuzzer, the reference model, and any external +// replay tool. +type Snapshot struct { + // ActivationTS is t_0 from SPEC.md §0. + ActivationTS uint64 `json:"activationTs"` + + // Chains is the set of L2 chain IDs the Supernode is tracking. It + // corresponds to Dafny's `Chains : set`. + Chains []eth.ChainID `json:"chains"` + + // LogsDB[j] is L_j from SPEC.md §0: the per-chain sequence + // [(B^j_0, l^j_0), ..., (B^j_{n_j}, l^j_{n_j})]. + // + // By SPEC invariant I10 and the well-formedness constraint used in the + // Dafny IsInitialState predicate, LogsDB.keys should equal Chains. + LogsDB map[eth.ChainID][]BlockWithLogs `json:"logsDb"` + + // Verified is the VerifiedDB as an ordered slice: + // [(t_0, C_{t_0}, C^1_{t_0}, ..., C^k_{t_0}), + // ..., (t, C_t, C^1_t, ..., C^k_t)]. + // Ascending by Timestamp. The sort order matches + // SupernodeView.dfy :: VerifiedMapToSortedSeq. + Verified []VerifiedEntry `json:"verified"` + + // DenyList[j] is D_j from SPEC.md §0, tracked with explicit decision + // timestamps so I11 is checkable from a pure snapshot (SPEC.md §5 #7). + DenyList map[eth.ChainID][]DenyListEntry `json:"denyList"` +} + +// BlockWithLogs mirrors Dafny's `BlockWithLogs(Ref, ExecMsgs)` datatype. It +// is one LogsDB row. See SPEC.md §0, I1, I2, I3. +type BlockWithLogs struct { + Ref BlockRef `json:"ref"` + ExecMsgs []ExecutingMessage `json:"execMsgs"` +} + +// BlockRef is the minimal block reference needed for invariant checking: +// identity (hash + number), parent hash (for the linear-chain check in I2), +// and timestamp (for I7). It mirrors Dafny's BlockRef. +type BlockRef struct { + ID eth.BlockID `json:"id"` + ParentHash common.Hash `json:"parentHash"` + Time uint64 `json:"time"` +} + +// ExecutingMessage mirrors the Dafny ExecutingMessage datatype. An executing +// message names an initiating log on a different chain by +// (ChainID, BlockNum, LogIdx, Timestamp). See SPEC.md I3. +type ExecutingMessage struct { + ChainID eth.ChainID `json:"chainId"` + BlockNum uint64 `json:"blockNum"` + LogIdx uint32 `json:"logIdx"` + Timestamp uint64 `json:"timestamp"` +} + +// VerifiedEntry mirrors the Dafny VerifiedResult datatype. It is one +// VerifiedDB row at a single timestamp across all tracked chains. +type VerifiedEntry struct { + Timestamp uint64 `json:"timestamp"` + L1Inclusion eth.BlockID `json:"l1Inclusion"` + L2Heads map[eth.ChainID]eth.BlockID `json:"l2Heads"` +} + +// DenyListEntry carries the decision timestamp alongside the block ID so +// I11 can be checked from state alone. See SPEC.md §5 item 7 and Dafny's +// DenyListEntry datatype. +type DenyListEntry struct { + Block eth.BlockID `json:"block"` + DecisionTimestamp uint64 `json:"decisionTimestamp"` +} + +// LastVerifiedTimestamp returns the t from SPEC.md §0 along with a boolean +// indicating whether any cross-validation has occurred. It mirrors the +// Dafny LastVerifiedTimestamp helper. +func (s Snapshot) LastVerifiedTimestamp() (uint64, bool) { + if len(s.Verified) == 0 { + return 0, false + } + return s.Verified[len(s.Verified)-1].Timestamp, true +} + +// LogsDBLookup finds a block in a LogsDB slice by ID. It mirrors the Dafny +// LogsDBLookup function and is used by I6 / I7. +func LogsDBLookup(logs []BlockWithLogs, id eth.BlockID) (BlockWithLogs, bool) { + for _, b := range logs { + if b.Ref.ID == id { + return b, true + } + } + return BlockWithLogs{}, false +} + +// chainsSet returns s.Chains as a set for O(1) membership checks. Used by +// predicates that need to filter LogsDB / DenyList entries by the tracked +// chain set. +func (s Snapshot) chainsSet() map[eth.ChainID]struct{} { + m := make(map[eth.ChainID]struct{}, len(s.Chains)) + for _, c := range s.Chains { + m[c] = struct{}{} + } + return m +} diff --git a/op-supernode/invariants/testdata/traces/happy_invalidate_then_continue.json b/op-supernode/invariants/testdata/traces/happy_invalidate_then_continue.json new file mode 100644 index 00000000000..628b30dc81c --- /dev/null +++ b/op-supernode/invariants/testdata/traces/happy_invalidate_then_continue.json @@ -0,0 +1,176 @@ +{ + "description": "advance, invalidate speculative block at t+1, advance with replacement", + "origin": "hand-crafted", + "steps": [ + { + "activationTs": 100, + "chains": [ + "1" + ], + "logsDb": { + "1": null + }, + "verified": null, + "denyList": { + "1": null + } + }, + { + "activationTs": 100, + "chains": [ + "1" + ], + "logsDb": { + "1": [ + { + "ref": { + "id": { + "hash": "0x0000000000000000000000000000000000000000000000000000000000000001", + "number": 1 + }, + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "time": 100 + }, + "execMsgs": null + } + ] + }, + "verified": [ + { + "timestamp": 100, + "l1Inclusion": { + "hash": "0x000000000000000000000000000000000000000000000000000000000000000a", + "number": 1000 + }, + "l2Heads": { + "1": { + "hash": "0x0000000000000000000000000000000000000000000000000000000000000001", + "number": 1 + } + } + } + ], + "denyList": { + "1": [] + } + }, + { + "activationTs": 100, + "chains": [ + "1" + ], + "logsDb": { + "1": [ + { + "ref": { + "id": { + "hash": "0x0000000000000000000000000000000000000000000000000000000000000001", + "number": 1 + }, + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "time": 100 + }, + "execMsgs": null + } + ] + }, + "verified": [ + { + "timestamp": 100, + "l1Inclusion": { + "hash": "0x000000000000000000000000000000000000000000000000000000000000000a", + "number": 1000 + }, + "l2Heads": { + "1": { + "hash": "0x0000000000000000000000000000000000000000000000000000000000000001", + "number": 1 + } + } + } + ], + "denyList": { + "1": [ + { + "block": { + "hash": "0x0000000000000000000000000000000000000000000000000000000000000063", + "number": 2 + }, + "decisionTimestamp": 101 + } + ] + } + }, + { + "activationTs": 100, + "chains": [ + "1" + ], + "logsDb": { + "1": [ + { + "ref": { + "id": { + "hash": "0x0000000000000000000000000000000000000000000000000000000000000001", + "number": 1 + }, + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "time": 100 + }, + "execMsgs": null + }, + { + "ref": { + "id": { + "hash": "0x0000000000000000000000000000000000000000000000000000000000000002", + "number": 2 + }, + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000001", + "time": 101 + }, + "execMsgs": null + } + ] + }, + "verified": [ + { + "timestamp": 100, + "l1Inclusion": { + "hash": "0x000000000000000000000000000000000000000000000000000000000000000a", + "number": 1000 + }, + "l2Heads": { + "1": { + "hash": "0x0000000000000000000000000000000000000000000000000000000000000001", + "number": 1 + } + } + }, + { + "timestamp": 101, + "l1Inclusion": { + "hash": "0x000000000000000000000000000000000000000000000000000000000000000b", + "number": 1001 + }, + "l2Heads": { + "1": { + "hash": "0x0000000000000000000000000000000000000000000000000000000000000002", + "number": 2 + } + } + } + ], + "denyList": { + "1": [ + { + "block": { + "hash": "0x0000000000000000000000000000000000000000000000000000000000000063", + "number": 2 + }, + "decisionTimestamp": 101 + } + ] + } + } + ] +} \ No newline at end of file diff --git a/op-supernode/invariants/testdata/traces/happy_rewind_after_two_advances.json b/op-supernode/invariants/testdata/traces/happy_rewind_after_two_advances.json new file mode 100644 index 00000000000..a2e0e6d17f1 --- /dev/null +++ b/op-supernode/invariants/testdata/traces/happy_rewind_after_two_advances.json @@ -0,0 +1,160 @@ +{ + "description": "two advances followed by T3 rewind back to t=100", + "origin": "hand-crafted", + "steps": [ + { + "activationTs": 100, + "chains": [ + "1" + ], + "logsDb": { + "1": null + }, + "verified": null, + "denyList": { + "1": null + } + }, + { + "activationTs": 100, + "chains": [ + "1" + ], + "logsDb": { + "1": [ + { + "ref": { + "id": { + "hash": "0x0000000000000000000000000000000000000000000000000000000000000001", + "number": 1 + }, + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "time": 100 + }, + "execMsgs": null + } + ] + }, + "verified": [ + { + "timestamp": 100, + "l1Inclusion": { + "hash": "0x000000000000000000000000000000000000000000000000000000000000000a", + "number": 1000 + }, + "l2Heads": { + "1": { + "hash": "0x0000000000000000000000000000000000000000000000000000000000000001", + "number": 1 + } + } + } + ], + "denyList": { + "1": [] + } + }, + { + "activationTs": 100, + "chains": [ + "1" + ], + "logsDb": { + "1": [ + { + "ref": { + "id": { + "hash": "0x0000000000000000000000000000000000000000000000000000000000000001", + "number": 1 + }, + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "time": 100 + }, + "execMsgs": null + }, + { + "ref": { + "id": { + "hash": "0x0000000000000000000000000000000000000000000000000000000000000002", + "number": 2 + }, + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000001", + "time": 101 + }, + "execMsgs": null + } + ] + }, + "verified": [ + { + "timestamp": 100, + "l1Inclusion": { + "hash": "0x000000000000000000000000000000000000000000000000000000000000000a", + "number": 1000 + }, + "l2Heads": { + "1": { + "hash": "0x0000000000000000000000000000000000000000000000000000000000000001", + "number": 1 + } + } + }, + { + "timestamp": 101, + "l1Inclusion": { + "hash": "0x000000000000000000000000000000000000000000000000000000000000000b", + "number": 1001 + }, + "l2Heads": { + "1": { + "hash": "0x0000000000000000000000000000000000000000000000000000000000000002", + "number": 2 + } + } + } + ], + "denyList": { + "1": [] + } + }, + { + "activationTs": 100, + "chains": [ + "1" + ], + "logsDb": { + "1": [ + { + "ref": { + "id": { + "hash": "0x0000000000000000000000000000000000000000000000000000000000000001", + "number": 1 + }, + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "time": 100 + }, + "execMsgs": null + } + ] + }, + "verified": [ + { + "timestamp": 100, + "l1Inclusion": { + "hash": "0x000000000000000000000000000000000000000000000000000000000000000a", + "number": 1000 + }, + "l2Heads": { + "1": { + "hash": "0x0000000000000000000000000000000000000000000000000000000000000001", + "number": 1 + } + } + } + ], + "denyList": { + "1": [] + } + } + ] +} \ No newline at end of file diff --git a/op-supernode/invariants/testdata/traces/happy_single_chain_3_advances.json b/op-supernode/invariants/testdata/traces/happy_single_chain_3_advances.json new file mode 100644 index 00000000000..69eae8d0f07 --- /dev/null +++ b/op-supernode/invariants/testdata/traces/happy_single_chain_3_advances.json @@ -0,0 +1,208 @@ +{ + "description": "single chain, three T5 advances from t_0=100 through t=102", + "origin": "hand-crafted", + "steps": [ + { + "activationTs": 100, + "chains": [ + "1" + ], + "logsDb": { + "1": null + }, + "verified": null, + "denyList": { + "1": null + } + }, + { + "activationTs": 100, + "chains": [ + "1" + ], + "logsDb": { + "1": [ + { + "ref": { + "id": { + "hash": "0x0000000000000000000000000000000000000000000000000000000000000001", + "number": 1 + }, + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "time": 100 + }, + "execMsgs": null + } + ] + }, + "verified": [ + { + "timestamp": 100, + "l1Inclusion": { + "hash": "0x000000000000000000000000000000000000000000000000000000000000000a", + "number": 1000 + }, + "l2Heads": { + "1": { + "hash": "0x0000000000000000000000000000000000000000000000000000000000000001", + "number": 1 + } + } + } + ], + "denyList": { + "1": [] + } + }, + { + "activationTs": 100, + "chains": [ + "1" + ], + "logsDb": { + "1": [ + { + "ref": { + "id": { + "hash": "0x0000000000000000000000000000000000000000000000000000000000000001", + "number": 1 + }, + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "time": 100 + }, + "execMsgs": null + }, + { + "ref": { + "id": { + "hash": "0x0000000000000000000000000000000000000000000000000000000000000002", + "number": 2 + }, + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000001", + "time": 101 + }, + "execMsgs": null + } + ] + }, + "verified": [ + { + "timestamp": 100, + "l1Inclusion": { + "hash": "0x000000000000000000000000000000000000000000000000000000000000000a", + "number": 1000 + }, + "l2Heads": { + "1": { + "hash": "0x0000000000000000000000000000000000000000000000000000000000000001", + "number": 1 + } + } + }, + { + "timestamp": 101, + "l1Inclusion": { + "hash": "0x000000000000000000000000000000000000000000000000000000000000000b", + "number": 1001 + }, + "l2Heads": { + "1": { + "hash": "0x0000000000000000000000000000000000000000000000000000000000000002", + "number": 2 + } + } + } + ], + "denyList": { + "1": [] + } + }, + { + "activationTs": 100, + "chains": [ + "1" + ], + "logsDb": { + "1": [ + { + "ref": { + "id": { + "hash": "0x0000000000000000000000000000000000000000000000000000000000000001", + "number": 1 + }, + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "time": 100 + }, + "execMsgs": null + }, + { + "ref": { + "id": { + "hash": "0x0000000000000000000000000000000000000000000000000000000000000002", + "number": 2 + }, + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000001", + "time": 101 + }, + "execMsgs": null + }, + { + "ref": { + "id": { + "hash": "0x0000000000000000000000000000000000000000000000000000000000000003", + "number": 3 + }, + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000002", + "time": 102 + }, + "execMsgs": null + } + ] + }, + "verified": [ + { + "timestamp": 100, + "l1Inclusion": { + "hash": "0x000000000000000000000000000000000000000000000000000000000000000a", + "number": 1000 + }, + "l2Heads": { + "1": { + "hash": "0x0000000000000000000000000000000000000000000000000000000000000001", + "number": 1 + } + } + }, + { + "timestamp": 101, + "l1Inclusion": { + "hash": "0x000000000000000000000000000000000000000000000000000000000000000b", + "number": 1001 + }, + "l2Heads": { + "1": { + "hash": "0x0000000000000000000000000000000000000000000000000000000000000002", + "number": 2 + } + } + }, + { + "timestamp": 102, + "l1Inclusion": { + "hash": "0x000000000000000000000000000000000000000000000000000000000000000c", + "number": 1002 + }, + "l2Heads": { + "1": { + "hash": "0x0000000000000000000000000000000000000000000000000000000000000003", + "number": 3 + } + } + } + ], + "denyList": { + "1": [] + } + } + ] +} \ No newline at end of file diff --git a/op-supernode/invariants/trace.go b/op-supernode/invariants/trace.go new file mode 100644 index 00000000000..9d53167a872 --- /dev/null +++ b/op-supernode/invariants/trace.go @@ -0,0 +1,166 @@ +package invariants + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" +) + +// Trace is the on-disk format for an invariant-verification test case. It +// records a sequence of snapshots (one per step) plus a human-readable +// description. The Dafny counter-example printer, the Go fuzzer, and the +// live-execution snapshotter all serialize into this format so a single +// replay tool (`TestTraceCorpus`) can verify every source. +// +// On-disk location: invariants/testdata/traces/.json. +type Trace struct { + // Description is a short human note about the origin of the trace: + // "fuzz-FuzzReferenceModel-7419c...", "dafny-counterexample-I6-2026-03-...", + // "prod-op-sepolia-2026-03-15T12:00:00Z", etc. + Description string `json:"description"` + + // Origin names the layer that produced the trace. One of: + // "dafny" | "fuzz" | "reference" | "production" | "hand-crafted". + Origin string `json:"origin"` + + // Steps is the sequence of snapshots, one per transition. The first + // step is the initial state; every subsequent step MUST be the result + // of a single T3/T4/T5 transition from the previous step. + Steps []Snapshot `json:"steps"` + + // ExpectedFailures, if non-empty, names the SPEC invariant IDs that + // SHOULD fail on at least one step of this trace. Used for regression + // traces where the point of the trace is that the failure is caught. + // An empty list means every step must satisfy CheckAll. + ExpectedFailures []string `json:"expectedFailures,omitempty"` +} + +// LoadTrace reads a trace from disk. +func LoadTrace(path string) (*Trace, error) { + data, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("read trace %s: %w", path, err) + } + var tr Trace + if err := json.Unmarshal(data, &tr); err != nil { + return nil, fmt.Errorf("parse trace %s: %w", path, err) + } + return &tr, nil +} + +// SaveTrace writes a trace to disk, creating parent directories as needed. +func SaveTrace(path string, tr *Trace) error { + if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil { + return err + } + data, err := json.MarshalIndent(tr, "", " ") + if err != nil { + return err + } + return os.WriteFile(path, data, 0o644) +} + +// VerifyTrace runs CheckAll on every step. Returns (violations found in +// order, error). If ExpectedFailures is non-empty, VerifyTrace instead +// checks that at least one step failed with one of those IDs. +func VerifyTrace(tr *Trace) ([]error, error) { + if tr == nil { + return nil, fmt.Errorf("nil trace") + } + if len(tr.Steps) == 0 { + return nil, fmt.Errorf("trace has no steps") + } + + stepErrs := make([]error, len(tr.Steps)) + for i, step := range tr.Steps { + stepErrs[i] = CheckAll(step) + } + + if len(tr.ExpectedFailures) == 0 { + // Every step must pass. + var violations []error + for i, e := range stepErrs { + if e != nil { + violations = append(violations, + fmt.Errorf("step %d: %w", i, e)) + } + } + if len(violations) > 0 { + return violations, fmt.Errorf("%d step(s) violated invariants", + len(violations)) + } + return nil, nil + } + + // Regression trace: at least one expected failure ID must appear. + expected := make(map[string]struct{}, len(tr.ExpectedFailures)) + for _, id := range tr.ExpectedFailures { + expected[id] = struct{}{} + } + matched := make(map[string]struct{}) + for _, e := range stepErrs { + collectIDs(e, func(id string) { + if _, want := expected[id]; want { + matched[id] = struct{}{} + } + }) + } + if len(matched) == 0 { + return nil, fmt.Errorf("regression trace expected one of %v but none matched", + tr.ExpectedFailures) + } + return nil, nil +} + +// collectIDs walks a joined error tree invoking visit on every *Error's +// ID. Uses errors.As so wrapped errors (e.g. fmt.Errorf("%w", e)) are +// still discovered. +func collectIDs(err error, visit func(string)) { + if err == nil { + return + } + var ie *Error + if errors.As(err, &ie) { + visit(ie.ID) + } + // Walk multi-error trees too: errors.As only finds the first match, so + // for joined errors we need to descend manually. + type multi interface{ Unwrap() []error } + if m, ok := err.(multi); ok { + for _, c := range m.Unwrap() { + collectIDs(c, visit) + } + return + } + // Single-wrap chain: descend via the standard Unwrap to find any + // nested *Error past the first match. + type single interface{ Unwrap() error } + if s, ok := err.(single); ok { + collectIDs(s.Unwrap(), visit) + } +} + +// LoadTraceCorpus loads every *.json file under dir as a Trace, keyed by +// filename (without extension). Non-JSON files are skipped. +func LoadTraceCorpus(dir string) (map[string]*Trace, error) { + entries, err := os.ReadDir(dir) + if err != nil { + return nil, err + } + out := make(map[string]*Trace) + for _, e := range entries { + if e.IsDir() || filepath.Ext(e.Name()) != ".json" { + continue + } + p := filepath.Join(dir, e.Name()) + tr, err := LoadTrace(p) + if err != nil { + return nil, err + } + name := e.Name()[:len(e.Name())-len(".json")] + out[name] = tr + } + return out, nil +} diff --git a/op-supernode/invariants/trace_gen_test.go b/op-supernode/invariants/trace_gen_test.go new file mode 100644 index 00000000000..8d0d2c47c3f --- /dev/null +++ b/op-supernode/invariants/trace_gen_test.go @@ -0,0 +1,150 @@ +package invariants + +import ( + "flag" + "os" + "path/filepath" + "testing" + + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +// regenTraces, when set, rewrites the canonical traces under +// testdata/traces/. Run with: +// +// go test ./op-supernode/invariants/ -run=TestRegenCanonicalTraces -regen-traces +// +// The regenerator is the canonical writer — hand-edited trace JSON is +// discouraged because the (chain ID, block ID) serialization rules are +// finicky. Use Go constructors and Save. +var regenTraces = flag.Bool("regen-traces", false, + "rewrite canonical traces under testdata/traces/") + +func TestRegenCanonicalTraces(t *testing.T) { + if !*regenTraces { + t.Skip("pass -regen-traces to rewrite traces") + } + dir := filepath.Join("testdata", "traces") + if err := os.MkdirAll(dir, 0o755); err != nil { + t.Fatal(err) + } + + for _, c := range canonicalTraces(t) { + p := filepath.Join(dir, c.Name+".json") + if err := SaveTrace(p, c.Trace); err != nil { + t.Fatalf("save %s: %v", p, err) + } + t.Logf("wrote %s", p) + } +} + +// TestCanonicalTracesInline exercises the trace generators themselves so +// they stay correct even if the on-disk files drift. +func TestCanonicalTracesInline(t *testing.T) { + for _, c := range canonicalTraces(t) { + c := c + t.Run(c.Name, func(t *testing.T) { + violations, err := VerifyTrace(c.Trace) + if err != nil { + for _, v := range violations { + t.Log(v) + } + t.Fatalf("trace %q failed: %v", c.Name, err) + } + }) + } +} + +type namedTrace struct { + Name string + Trace *Trace +} + +func canonicalTraces(t *testing.T) []namedTrace { + return []namedTrace{ + {Name: "happy_single_chain_3_advances", Trace: traceHappySingleChain3(t)}, + {Name: "happy_invalidate_then_continue", Trace: traceInvalidateThenContinue(t)}, + {Name: "happy_rewind_after_two_advances", Trace: traceRewindAfterTwoAdvances(t)}, + } +} + +// traceHappySingleChain3: activation at t_0=100, then three advances. +func traceHappySingleChain3(t *testing.T) *Trace { + t.Helper() + s0 := NewInitialSnapshot(100, []eth.ChainID{chainA()}) + s1, err := ApplyAdvance(s0, blockID(10, 1000), + map[eth.ChainID]BlockWithLogs{chainA(): block(1, 0, 1, 100)}) + if err != nil { + t.Fatal(err) + } + s2, err := ApplyAdvance(s1, blockID(11, 1001), + map[eth.ChainID]BlockWithLogs{chainA(): block(2, 1, 2, 101)}) + if err != nil { + t.Fatal(err) + } + s3, err := ApplyAdvance(s2, blockID(12, 1002), + map[eth.ChainID]BlockWithLogs{chainA(): block(3, 2, 3, 102)}) + if err != nil { + t.Fatal(err) + } + return &Trace{ + Description: "single chain, three T5 advances from t_0=100 through t=102", + Origin: "hand-crafted", + Steps: []Snapshot{s0, s1, s2, s3}, + } +} + +// traceInvalidateThenContinue: advance, invalidate a speculative block at +// t+1, then advance with a different block at the same timestamp. +func traceInvalidateThenContinue(t *testing.T) *Trace { + t.Helper() + s0 := NewInitialSnapshot(100, []eth.ChainID{chainA()}) + s1, err := ApplyAdvance(s0, blockID(10, 1000), + map[eth.ChainID]BlockWithLogs{chainA(): block(1, 0, 1, 100)}) + if err != nil { + t.Fatal(err) + } + // At t=100, invalidate a block that would have been considered at t+1. + s2, err := ApplyInvalidate(s1, map[eth.ChainID]eth.BlockID{ + chainA(): blockID(99, 2), + }) + if err != nil { + t.Fatal(err) + } + // Now advance with a DIFFERENT block at t+1. + s3, err := ApplyAdvance(s2, blockID(11, 1001), + map[eth.ChainID]BlockWithLogs{chainA(): block(2, 1, 2, 101)}) + if err != nil { + t.Fatal(err) + } + return &Trace{ + Description: "advance, invalidate speculative block at t+1, advance with replacement", + Origin: "hand-crafted", + Steps: []Snapshot{s0, s1, s2, s3}, + } +} + +// traceRewindAfterTwoAdvances: build up two advances then rewind one. +func traceRewindAfterTwoAdvances(t *testing.T) *Trace { + t.Helper() + s0 := NewInitialSnapshot(100, []eth.ChainID{chainA()}) + s1, err := ApplyAdvance(s0, blockID(10, 1000), + map[eth.ChainID]BlockWithLogs{chainA(): block(1, 0, 1, 100)}) + if err != nil { + t.Fatal(err) + } + s2, err := ApplyAdvance(s1, blockID(11, 1001), + map[eth.ChainID]BlockWithLogs{chainA(): block(2, 1, 2, 101)}) + if err != nil { + t.Fatal(err) + } + s3, err := ApplyRewind(s2) + if err != nil { + t.Fatal(err) + } + return &Trace{ + Description: "two advances followed by T3 rewind back to t=100", + Origin: "hand-crafted", + Steps: []Snapshot{s0, s1, s2, s3}, + } +} diff --git a/op-supernode/invariants/trace_test.go b/op-supernode/invariants/trace_test.go new file mode 100644 index 00000000000..b1f69e3c84d --- /dev/null +++ b/op-supernode/invariants/trace_test.go @@ -0,0 +1,117 @@ +package invariants + +import ( + "os" + "path/filepath" + "testing" + + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +// TestTraceCorpus replays every JSON trace under testdata/traces/ through +// VerifyTrace. This is the single regression gate for all invariant +// failures — hand-crafted, fuzz-minimized, Dafny counter-example, and +// production-captured traces all live in that directory and are checked +// here. +func TestTraceCorpus(t *testing.T) { + dir := filepath.Join("testdata", "traces") + if _, err := os.Stat(dir); os.IsNotExist(err) { + t.Skipf("%s does not exist yet", dir) + } + corpus, err := LoadTraceCorpus(dir) + if err != nil { + t.Fatalf("load corpus: %v", err) + } + if len(corpus) == 0 { + t.Skip("no traces in corpus") + } + for name, tr := range corpus { + name := name + tr := tr + t.Run(name, func(t *testing.T) { + violations, err := VerifyTrace(tr) + if err != nil { + for _, v := range violations { + t.Log(v) + } + t.Fatalf("trace %q (%s) failed: %v", name, tr.Origin, err) + } + }) + } +} + +// TestTrace_RoundTrip verifies that a trace serialized to JSON and parsed +// back reproduces the original snapshot bytes-for-bytes. +func TestTrace_RoundTrip(t *testing.T) { + s := NewInitialSnapshot(100, []eth.ChainID{chainA()}) + s, err := ApplyAdvance(s, blockID(10, 1000), map[eth.ChainID]BlockWithLogs{ + chainA(): block(1, 0, 1, 100), + }) + if err != nil { + t.Fatal(err) + } + + tr := &Trace{ + Description: "round-trip test", + Origin: "hand-crafted", + Steps: []Snapshot{NewInitialSnapshot(100, []eth.ChainID{chainA()}), s}, + } + + dir := t.TempDir() + p := filepath.Join(dir, "rt.json") + if err := SaveTrace(p, tr); err != nil { + t.Fatal(err) + } + back, err := LoadTrace(p) + if err != nil { + t.Fatal(err) + } + if back.Description != tr.Description || back.Origin != tr.Origin { + t.Fatalf("metadata drift: %+v", back) + } + if len(back.Steps) != 2 { + t.Fatalf("step count drift: got %d want 2", len(back.Steps)) + } + violations, err := VerifyTrace(back) + if err != nil { + t.Fatalf("round-tripped trace failed: %v %v", violations, err) + } +} + +// TestTrace_ExpectedFailure verifies that a regression trace correctly +// marks its expected invariant ID as satisfied. +func TestTrace_ExpectedFailure(t *testing.T) { + // A snapshot that deliberately violates I2. + bad := Snapshot{ + Chains: []eth.ChainID{chainA()}, + LogsDB: map[eth.ChainID][]BlockWithLogs{ + chainA(): { + block(1, 0, 1, 100), + block(2, 99, 2, 101), + }, + }, + } + tr := &Trace{ + Description: "regression: I2 parent-hash mismatch", + Origin: "hand-crafted", + Steps: []Snapshot{bad}, + ExpectedFailures: []string{"I2"}, + } + if _, err := VerifyTrace(tr); err != nil { + t.Fatalf("regression trace should be accepted: %v", err) + } +} + +func TestTrace_ExpectedFailureMissing(t *testing.T) { + // A snapshot that violates I10, but the trace claims I2. The verify + // should report that no expected failure matched. + good := NewInitialSnapshot(100, []eth.ChainID{chainA()}) + tr := &Trace{ + Origin: "hand-crafted", + Steps: []Snapshot{good}, + ExpectedFailures: []string{"I2"}, + } + if _, err := VerifyTrace(tr); err == nil { + t.Fatal("expected regression trace to fail when no expected ID matched") + } +} diff --git a/op-supernode/justfile b/op-supernode/justfile index 97834da91c3..83a3e891726 100644 --- a/op-supernode/justfile +++ b/op-supernode/justfile @@ -25,4 +25,36 @@ coverage: @go tool cover -func=coverage.out | grep '^total:' @go tool cover -func=coverage.out | grep -v '^total:' +# Verify Dafny models. See dafny-models/ and invariants/SPEC.md. +# Requires a local Dafny install (brew install dafny). +verify-dafny: + @command -v dafny >/dev/null 2>&1 || { echo "dafny not installed — see https://dafny.org/"; exit 1; } + dafny verify --verification-time-limit 300 dafny-models/Types.dfy dafny-models/SupernodeState.dfy dafny-models/VerifiedDB.dfy dafny-models/Supernode.dfy dafny-models/SupernodeView.dfy + +# Run the Go invariants package tests. Mirrors the Dafny models; see SPEC.md. +verify-invariants-go: + go test ./invariants/... + +# Run the Go invariants tests with runtime assertions enabled (supernode_invariants tag). +verify-invariants-go-tagged: + go test -tags=supernode_invariants ./invariants/... + +# Run the state-machine fuzzers against the reference model. go test +# only accepts one -fuzz target per invocation, so the single-chain and +# multi-chain fuzzers run sequentially. Increase per-target duration +# with FUZZTIME=5m just fuzz-invariants. +FUZZTIME := env_var_or_default("FUZZTIME", "30s") +fuzz-invariants: + go test -run=^$ -fuzz=^FuzzReferenceModel$ -fuzztime={{FUZZTIME}} ./invariants/ + go test -run=^$ -fuzz=^FuzzReferenceModelMultiChain$ -fuzztime={{FUZZTIME}} ./invariants/ + +# Run every layer: Dafny proofs + Go invariant tests (untagged + tagged) + fuzz. +# Single gate for CI. +verify-invariants: verify-dafny verify-invariants-go verify-invariants-go-tagged fuzz-invariants + +# Regenerate the canonical trace corpus under invariants/testdata/traces/. +# Run after changing the reference model transitions. +regen-traces: + go test -run=TestRegenCanonicalTraces ./invariants/... -args -regen-traces + diff --git a/op-supernode/overview.md b/op-supernode/overview.md new file mode 100644 index 00000000000..1ca8983b8da --- /dev/null +++ b/op-supernode/overview.md @@ -0,0 +1,105 @@ +## Architecture + +```mermaid +flowchart LR + subgraph sn[Supernode] + + vdb[(VerifiedDB)] + ldb1[(LogsDB)] + ldb2[(LogsDB)] + ldb3[(LogsDB)] + + subgraph cc1[ChainContainer] + vn1[VirtualNode] + ec1[EngineController] + dl1[(DenyList)] + end + + subgraph cc2[ChainContainer] + vn2[VirtualNode] + ec2[EngineController] + dl2[(DenyList)] + end + + subgraph cc3[ChainContainer] + vn3[VirtualNode] + ec3[EngineController] + dl3[(DenyList)] + end + + cc1---ldb1 + cc2---ldb2 + cc3---ldb3 + end +``` + +A `Supernode` includes the following components: + +- A `VerifiedDB` storing the cross-validity history. The `VerifiedDB` contains an entry for every timestamp for which cross-validation has been performed. This entry is a `VerifiedResult` consisting of: + - The verified timestamp $t$. + - The L2 heads for each L2 chain at that timestamp. Since an L2 might not have a block at every timestamp, this is the highest block with timestamp $\leq t$. + - The L1 head at which cross-validation at that timestamp can be performed. Note that each L2 head might be derived from a different L1 block, so the L1 head is the highest among those. In other words, it's the earliest L1 block at which all L2 heads are available. +- A `ChainContainer` for each L2 chain, itself including the following components: + - An `EngineController`, which serves as an interface to the L2's execution layer. When cross-validation fails for a given timestamp, the `EngineController` for all affected chains rewinds the execution layer to the preceding timestamp by performing a fork choice update. This allows the execution layer to continue building the L2 chain on top of the most recent valid block. + - A `VirtualNode`, an in-memory `OpNode` instance used to derive the Local Safe chain from L1 batch data. When checking cross-validity for a given timestamp, the `Supernode` queries each L2's `VirtualNode` to retrieve the logs up to the target timestamp. `VirtualNode`s are intented to be fungible: when cross-validation detects an invalid Executing Message, invalidating the Local Safe chain, the `VirtualNode` is destroyed and recreated after the `EngineController` has been reset to the previous timestamp and the invalid blocks have been added to the `DenyList`. This forces the `VirtualNode` to rederive the Local Safe chain, this time avoiding the newly-invalidated blocks. + - A `DenyList` recording the blocks that have been invalidated during cross-validation. This `DenyList` is consulted by the `VirtualNode` when deriving the Local Safe chain to ensure that a block that has already been deemed invalid is not revisited. If the `VirtualNode` would derive a block that is present in the `DenyList`, it replaces it with a deposit-only block instead. +- A `LogsDB` for each L2 chain, storing the logs retrieved from the `VirtualNode` up to the latest verified timestamp. + +## Cross-Validation Logic + +The `Supernode` performs cross-validation one timestamp at a time, consistently incrementing the timestamp by 1 each time. If all L2 chains are caught up to the target timestamp, it downloads the logs for this timestamp and checks the Executing Messages for cross-validity. If they pass validation, the blocks are recorded in the `VerifiedDB`. Otherwise, the invalid blocks are recorded in the `DenyList` for their respective chains and the invalidated chains are rewound to the previous timestamp. + +### Supernode State and Invariants + +After performing cross-validation for timestamp $t$, and before performing cross-validation for $t + 1$, the internal state of the `Supernode` for a set of $k$ L2 chains consists of the following: + +- A `LogsDB` $L_j = (B^j_0, \ell^j_0), \ldots, (B^j_{n_j}, \ell^j_{n_j})$ for each L2 chain $j$, where $B^j_i$ is a block from chain $j$ and $\ell^j_i$ is the list of logs of $B^j_i$. +- A `VerifiedDB` containing the cross-validity history, given by $(t_0, C_{t_0}, C^1_{t_0}, \ldots, C^k_{t_0}), \ldots, (t, C_t, C^1_t, \ldots, C^k_t)$, where $t_0$ is the activation timestamp, $t$ is the last verified timestamp, and for each timestamp $t_0 \leq i \leq t$: + - $C^1_i, \ldots, C^k_i$ are the verified L2 heads for timestamp $i$ for each of the $k$ L2 chains. + - $C_i$ is the verified L1 head for timestamp $i$. +- A `DenyList` $D_j$ for each L2 chain $j$, containing blocks from chain $j$ that were invalidated at timestamps $\leq t + 1$. + +The following invariants are expected to be true at this state: + +- $\ell^j_i$ is the list of logs for block $B^j_i$, for all $i$ and $j$. +- $B^j_i$ is the parent block of $B^j_{i+1}$, for all $i$ and $j$. +- Every $B^j_i$ is cross-valid given the cross-validity history for timestamps $\leq i$. In other words, all Executing Messages in $\ell^j_i$ are valid and are not part of a cycle. +- $C^j_{t_0} = B^j_0$, for all $j$. +- $C^j_t = B^j_{n_j}$, for all $j$. +- $C^j_i$ is either equal to $C^j_{i+1}$ or its parent, for all $i$ and $j$ (consequently, $C^j_{t_0}, \ldots, C^j_t$ is equal to $B^j_0, \ldots, B^j_{n_j}$ with possible repetitions in the middle). +- $C^j_i$ is the highest block on its chain with timestamp $\leq i$. In other words, all children of $C^j_i$ have timestamp $> i$. +- $C_{t_0}, \ldots, C_t$ are all part of the same linear chain (but there might be missing blocks in the middle). +- $C_i$ is the earliest L1 block on its linear chain where all L2 blocks $C^1_i, \ldots, C^k_i$ are available. In other words, $C_i$ is the highest among the L1 blocks from which $C^1_i, \ldots, C^k_i$ were derived. +- $B^j_i \not \in D_j$ for any $j$ and $i$. In other words, a block in the `LogsDB` (and, consequently, in the `VerifiedDB`) does not appear in the `DenyList`. +- For every block $B \in D_j$, the timestamp of $B$ is $\leq t + 1$ (note that the `DenyList` _can_ contain invalidated blocks for the next timestamp $t + 1$, added from previous unsuccessful attempts to cross-validate $t + 1$). + +At the initial state of the `Supernode`, the `LogsDB`s, `DenyList`s and `VerifiedDB` are all empty. As a consequence, all of the above invariants hold by default in this state. + +### Assumptions on the L1 and L2 Chain + +During a round of cross-validation, the Supernode queries the state of the L1 chain (via an `L1Client`) and L2 chains (via the chain's respective `VirtualNode`). When making these queries, we make the following assumptions about the state and behavior of the L1 and L2 chains: + +- Timestamps don't repeat, i.e., there is at most one block per chain per timestamp. +- Outside of cross-validation, an L2 chain will only reorg if the L1 chain reorgs. This also means that by the time we are able to observe the results of a reorg in L2, we must necessarily be able to observe it in L1 as well. Note, however, that the reverse is not true: we might be able to observe that the L1 has had a reorg before an L2 chain has been able to sync up. +- Assuming sufficient time between L1 reorgs, each L2 chain will _eventually_ sync up to the L1. +- If an L2 chain $j$ tries to derive a block that's in its `DenyList` $D_j$, it will instead insert a deposit-only block in its place. Since the `VirtualNode` for an L2 chain is always destroyed and recreated after a block is added to the `DenyList`, forcing it to rederive the safe chain, we can guarantee that at the start of a round of cross-validation the safe chain for an L2 will never have a block that's currently in its `DenyList`. +- Technically speaking, L1 and L2 chains can reorg at any point between two rounds of cross-validation, or even in the middle of a round. Because of this, any chain can at any point be inconsistent with the other chains, with the internal state of the Supernode, or even with previous queries made to the same chain. Therefore, we should assume that any queries to the state of the L1/L2s during a round can return an arbitrary result, except for what the assumptions above guarantee. + +### State Changes from Cross-Validation + +The following state changes happen after a round of cross-validation (with last verified timestamp $t$ and next timestamp $t + 1$): + +1. For each L2 chain $j$, let $B_j$ be the highest block on safe chain $j$ with timestamp $\leq t + 1$. If there should be a higher block with timestamp $\leq t + 1$ (note that this can be predicted based on the L2 chain's block time) but it hasn't been derived yet, no state update happens (need to wait for chain $j$ to catch up to the next timestamp $t + 1$). +2. Otherwise, let $B'_j$ the L1 block from from which $B_j$ was derived. If the $B'_j$ are not all in the same linear chain, this means that a reorg has happened and some of the L2s haven't synced. In that case, no state update happens (we wait until next round to give the L2s time to sync up). +3. Otherwise, if $C_t$ is not in the same linear chain as the $B'_j$, this means that $C_t$ has been reorged out and is no longer a valid L1 block. Therefore, we need to roll back to the previous timestamp to cross-validate again: + - The cross-validity history in the `VerifiedDB` is pruned to $(t_0, C_{t_0}, C^1_{t_0}, \ldots, C^k_{t_0}), \ldots, (t-1, C_{t-1}, C^1_{t-1}, \ldots, C^k_{t-1})$ by removing the last entry $(t, C_t, C^1_t, \ldots, C^k_t)$. + - Every `DenyList` $D_j$ is pruned by removing all entries with timestamp $\geq t$ (i.e. $t$ or $t + 1$). + - For every chain $j$ such that $C^j_{t-1} \neq B^j_{n_j}$, the `LogsDB` $L_j$ is pruned to $(B^j_0, \ell^j_0), \ldots, (B^j_{n_j-1}, \ell^j_{n_j-1})$ by removing the last entry $(B^j_{n_j}, \ell^j_{n_j})$. +4. Otherwise, since every $B_j$ is consistent with the latest verified L1 block $C_t$, it must be on the same linear chain as $C^j_t = B^j_{n_j}$. Then, since there is at most one block per timestamp and $B^j_{n_j}$ has timestamp $t$, either $B_j = B^j_{n_j}$ or $B^j_{n_j}$ is $B_j$'s parent. $B_j$ is invalid if it has an invalid Executing Message or an Executing Message that is part of a cycle. If any $B_j$ is invalid, then + - Each invalid $B_j$ is added to its respective `DenyList` $D_j$. + - For each chain $j$ such that $B_j$ is invalid, the `ChainContainer` is reset by (a) rewinding the `EngineController` to the highest block with timestamp $\leq t$, and (b) destroying and recreating the `VirtualNode`. Since this will force the `VirtualNode` to rederive the safe chain, and now $B^j$ is in the `DenyList`, it will replace the invalid block with a deposit-only block. + - Note that no `LogsDB` should be updated in this case, so any logs that have been added in the cross-validation process must be removed again. +5. Otherwise (if all $B_j$ are valid), then + - The cross-validity history is extended to $(t_0, C_{t_0}, C^1_{t_0}, \ldots, C^k_{t_0}), \ldots, (t, C_t, C^1_t, \ldots, C^k_t), (t+1, C_{t+1}, C^1_{t+1}, \ldots, C^k_{t+1})$, where $C^j_{t+1} = B_j$ and $C_{t+1}$ is the highest L1 block among $B'_1, \ldots, B'_k$ (the blocks from which $C^1_{t+1}, \ldots, C^k_{t+1}$ were derived). + - Every logs database $L_j$ such that $B_j \neq B^j_{n_j}$ is extended to $(B^j_0, \ell^j_0), \ldots, (B^j_{n_j}, \ell^j_{n_j}), (B^j_{n_j+1}, \ell^j_{n_j+1})$, where $B^j_{n_j+1} = B_j$ and $\ell^j_{n_j+1}$ is the list of logs for $B^j_{n_j+1}$. + diff --git a/op-supernode/supernode/activity/interop/chain_fuzz_utils.go b/op-supernode/supernode/activity/interop/chain_fuzz_utils.go new file mode 100644 index 00000000000..0dd490b679c --- /dev/null +++ b/op-supernode/supernode/activity/interop/chain_fuzz_utils.go @@ -0,0 +1,556 @@ +package interop + +import ( + "context" + "math/big" + "math/rand" + "testing" + + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/testutils" + "github.com/ethereum-optimism/optimism/op-supernode/supernode/activity" + cc "github.com/ethereum-optimism/optimism/op-supernode/supernode/chain_container" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/processors" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + types2 "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + params2 "github.com/ethereum/go-ethereum/params" + "github.com/stretchr/testify/require" +) + +func (rc RandomChain) ExecMsgForLog(chain eth.ChainID, block eth.L2BlockRef, log *types2.Log) *types2.Log { + payloadHash := crypto.Keccak256Hash(types.LogToMessagePayload(log)) + + msg := types.Message{ + Identifier: types.Identifier{ + Origin: log.Address, + BlockNumber: block.Number, + LogIndex: uint32(log.Index), + Timestamp: block.Time, + ChainID: chain, + }, + PayloadHash: payloadHash, + } + topics, data := msg.EncodeEvent() + return &types2.Log{ + Address: params2.InteropCrossL2InboxAddress, + Data: data, + Topics: topics, + Index: log.Index, + } +} + +type ChainBlock struct { + chain eth.ChainID + block *eth.L2BlockRef +} + +type RandomChainParams struct { + chainCount int + + minLength int + maxLength int + + maxBlockTimeExclusive int + + invalidateChance int // Percentage [0-100] + dependencyChance int // Percentage [0-100] +} + +type L1Assignments struct { + L1Block eth.BlockRef + L2Blocks []*ChainBlock +} + +type RandomChain struct { + t *testing.T + randomGenerator *rand.Rand + chainIDs []eth.ChainID + allBlocks []ChainBlock + cbIndices map[*eth.L2BlockRef]int // Lookup for a ChainBlock's index in allBlocks + generatedLogs map[ChainBlock][]*types2.Log + dependencies map[ChainBlock][]ChainBlock + chainBlocks map[eth.ChainID][]*eth.L2BlockRef + l1SourceMap map[ChainBlock]eth.BlockRef + l1Source map[uint64]eth.BlockRef + receipts map[eth.ChainID]map[eth.BlockID]types2.Receipts + blockTimes map[eth.ChainID]int + isInvalid bool +} + +var _ cc.ChainContainer = RandomChainContainer{} + +type RandomChainContainer struct { + chainID eth.ChainID + randomChain *RandomChain +} + +func (c RandomChainContainer) ID() eth.ChainID { return c.chainID } +func (c RandomChainContainer) Start(ctx context.Context) error { return nil } +func (c RandomChainContainer) Stop(ctx context.Context) error { return nil } +func (c RandomChainContainer) Pause(ctx context.Context) error { return nil } +func (c RandomChainContainer) Resume(ctx context.Context) error { return nil } +func (c RandomChainContainer) RegisterVerifier(v activity.VerificationActivity) {} + +func (c RandomChainContainer) LocalSafeBlockAtTimestamp(ctx context.Context, ts uint64) (eth.L2BlockRef, error) { + var theblock *eth.L2BlockRef = nil + for _, block := range c.randomChain.chainBlocks[c.chainID] { + if block.Time <= ts { + theblock = block + } else { + break + } + } + if theblock == nil { + return eth.L2BlockRef{}, ethereum.NotFound + } + return *theblock, nil +} + +func (c RandomChainContainer) SyncStatus(ctx context.Context) (*eth.SyncStatus, error) { + blocks := c.randomChain.chainBlocks[c.chainID] + block := blocks[len(blocks)-1] + cb := ChainBlock{chain: c.chainID, block: block} + l1Origin := c.randomChain.l1SourceMap[cb] + return ð.SyncStatus{CurrentL1: l1Origin}, nil +} + +func (c RandomChainContainer) VerifiedAt(ctx context.Context, ts uint64) (l2, l1 eth.BlockID, err error) { + //TODO + return eth.BlockID{}, eth.BlockID{}, nil +} + +func (c RandomChainContainer) OptimisticAt(ctx context.Context, ts uint64) (l2, l1 eth.BlockID, err error) { + //TODO + block, err := c.LocalSafeBlockAtTimestamp(ctx, ts) + if err != nil { + return eth.BlockID{}, eth.BlockID{}, err + } + cb := ChainBlock{c.chainID, &block} + l1 = c.randomChain.l1SourceMap[cb].ID() + return block.ID(), l1, nil +} + +func (c RandomChainContainer) OutputRootAtL2BlockNumber(ctx context.Context, l2BlockNum uint64) (eth.Bytes32, error) { + //TODO + return eth.Bytes32{}, nil +} + +func (c RandomChainContainer) OptimisticOutputAtTimestamp(ctx context.Context, ts uint64) (*eth.OutputResponse, error) { + //TODO + return nil, nil +} + +func (c RandomChainContainer) RewindEngine(ctx context.Context, timestamp uint64, invalidatedBlock eth.BlockRef) error { + //TODO? + return nil +} + +func (c RandomChainContainer) FetchReceipts(ctx context.Context, blockHash eth.BlockID) (eth.BlockInfo, types2.Receipts, error) { + chainReceipts := c.randomChain.receipts[c.chainID] + receipt := chainReceipts[blockHash] + + for _, block := range c.randomChain.chainBlocks[c.chainID] { + if block.ID() == blockHash { + header := &types2.Header{ + ParentHash: block.ParentHash, + Number: new(big.Int).SetUint64(block.Number), + Time: block.Time, + } + return eth.HeaderBlockInfoTrusted(block.Hash, header), receipt, nil + } + } + return nil, nil, ethereum.NotFound +} + +func (c RandomChainContainer) BlockTime() uint64 { + return uint64(c.randomChain.blockTimes[c.chainID]) +} + +func (c RandomChainContainer) InvalidateBlock(ctx context.Context, height uint64, payloadHash common.Hash) (bool, error) { + //TODO + return true, nil +} + +func (c RandomChainContainer) IsDenied(height uint64, payloadHash common.Hash) (bool, error) { + //TODO + return false, nil +} + +func (c RandomChainContainer) SetResetCallback(cb cc.ResetCallback) { + //TODO +} + +func (rc RandomChain) GetContainers() map[eth.ChainID]cc.ChainContainer { + chains := make(map[eth.ChainID]cc.ChainContainer) + for _, chain := range rc.chainIDs { + container := RandomChainContainer{ + chainID: chain, + randomChain: &rc, + } + chains[chain] = container + } + return chains +} + +// Merge all of the chains' blocks from separate arrays into one, ordered by timestamp +func MergeBlocks(chainBlocks map[eth.ChainID][]*eth.L2BlockRef) []ChainBlock { + totalLength := 0 + for _, blocks := range chainBlocks { + totalLength += len(blocks) + } + + allBlocks := make([]ChainBlock, 0, totalLength) + chainIndices := make(map[eth.ChainID]int) + for range totalLength { + var finalChain eth.ChainID + var finalBlock *eth.L2BlockRef + + for chain := range chainBlocks { + idx := chainIndices[chain] + if idx < len(chainBlocks[chain]) { + block := chainBlocks[chain][idx] + if finalBlock == nil || block.Time < finalBlock.Time { + finalChain = chain + finalBlock = block + } + } + } + + chainIndices[finalChain]++ + + chainBlock := ChainBlock{ + chain: finalChain, + block: finalBlock, + } + allBlocks = append(allBlocks, chainBlock) + } + + return allBlocks +} + +// Given the chains' blocks and blockTimes, find the chain for which its next block wont put +// the other chains' current blocks behind on the new timestamp +func NextValidChain(chainBlocks map[eth.ChainID][]*eth.L2BlockRef, blockTimes map[eth.ChainID]int) eth.ChainID { + lastTimeStamp := make(map[eth.ChainID]uint64) + for chain := range chainBlocks { + blocks := chainBlocks[chain] + lastTimeStamp[chain] = blocks[len(blocks)-1].Time + } + first := true + var nextChain eth.ChainID + v := uint64(0) + for chain := range chainBlocks { + nextTimeStamp := lastTimeStamp[chain] + uint64(blockTimes[chain]) + if first || nextTimeStamp < v { + nextChain = chain + v = nextTimeStamp + first = false + } + } + return nextChain +} + +func SameTimeStampSets(blocks []ChainBlock) [][]ChainBlock { + res := make([][]ChainBlock, 0) + i := 0 + nextSet := make([]ChainBlock, 0) + for i < len(blocks)-1 { + if blocks[i].block.Time != blocks[i+1].block.Time { + if len(nextSet) != 0 { + nextSet = append(nextSet, blocks[i]) + res = append(res, nextSet) + nextSet = make([]ChainBlock, 0) + } + } else { + nextSet = append(nextSet, blocks[i]) + } + i++ + } + if len(nextSet) != 0 { + nextSet = append(nextSet, blocks[i]) + res = append(res, nextSet) + } + return res +} + +func (p *RandomChainParams) MakeRandomChain(t *testing.T, seed int64) (res RandomChain) { + r := rand.New(rand.NewSource(seed)) + + totalLength := randomInRange(r, p.minLength, p.maxLength) + 2 + + res = RandomChain{ + t: t, + randomGenerator: r, + chainIDs: make([]eth.ChainID, 0, p.chainCount), + allBlocks: make([]ChainBlock, 0, totalLength), + cbIndices: make(map[*eth.L2BlockRef]int), + generatedLogs: make(map[ChainBlock][]*types2.Log), + dependencies: make(map[ChainBlock][]ChainBlock), + chainBlocks: make(map[eth.ChainID][]*eth.L2BlockRef), + l1SourceMap: make(map[ChainBlock]eth.BlockRef), + l1Source: make(map[uint64]eth.BlockRef), + receipts: make(map[eth.ChainID]map[eth.BlockID]types2.Receipts), + blockTimes: make(map[eth.ChainID]int), + isInvalid: false, + } + + for i := range p.chainCount { + chain := eth.ChainIDFromUInt64(uint64(i)) + res.chainBlocks[chain] = make([]*eth.L2BlockRef, 0) + res.blockTimes[chain] = randomInRange(r, 1, p.maxBlockTimeExclusive) + res.chainIDs = append(res.chainIDs, chain) + res.receipts[chain] = make(map[eth.BlockID]types2.Receipts) + } + + // + // Create array of all blocks + // + + // First, guarantee that each chain contains at least one block + for _, chain := range res.chainIDs { + block := eth.L2BlockRef{} + block.Hash = testutils.RandomHash(r) + res.chainBlocks[chain] = append(res.chainBlocks[chain], &block) + } + + // Then, generate the rest of the blocks. + for range totalLength - p.chainCount { + // Select the chain with the next valid timestamp + nextChain := NextValidChain(res.chainBlocks, res.blockTimes) + + // Add a random block to it + lastBlock := res.chainBlocks[nextChain][len(res.chainBlocks[nextChain])-1] + block := testutils.NextRandomL2Ref(r, uint64(res.blockTimes[nextChain]), *lastBlock, eth.BlockID{}) + res.chainBlocks[nextChain] = append(res.chainBlocks[nextChain], &block) + res.addRandomLog(ChainBlock{nextChain, &block}) + } + + // Populate res.allBlocks + res.allBlocks = MergeBlocks(res.chainBlocks) + for i, cb := range res.allBlocks { + res.cbIndices[cb.block] = i + } + + // + // Create random dependencies between all blocks + // + for initIndex, initcb := range res.allBlocks { + block := initcb.block + if block.Number == 0 { + continue + } + + for r.Intn(100) < p.dependencyChance { + execIndex := randomInRange(r, initIndex, totalLength) + execcb := res.allBlocks[execIndex] + initiatingLog := res.addRandomLog(initcb) + res.addExecutingMessageWithDependency(execcb, initcb, initiatingLog) + } + } + + if r.Intn(100) < p.invalidateChance { + res.isInvalid = true + initIndex := len(res.chainIDs) + index := randomInRange(res.randomGenerator, initIndex, len(res.allBlocks)-1) + blockToInvalidate := res.allBlocks[index] + cbIndex := res.cbIndices[blockToInvalidate.block] + t.Logf("Randomly selected block index: %d", index) + t.Logf("cbIndex: %d", cbIndex) + + res.InvalidateBlock(blockToInvalidate) + } + + // + // Make L1 derivation info + // + taken := 0 + nextL1 := testutils.RandomBlockRef(r) + for taken < totalLength { + nextL1 = testutils.NextRandomRef(r, nextL1) + take := randomInRange(r, 1, 5) // Take 1-4 L2 blocks + take = min(totalLength-taken, take) + for _, l2Block := range res.allBlocks[taken : taken+take] { + res.l1SourceMap[l2Block] = nextL1 + } + res.l1Source[nextL1.Number] = nextL1 + taken += take + } + + res.GenerateReceiptsFromLogs() + + return res +} + +func TestMakeRandomChain(t *testing.T) { + params := RandomChainParams{ + chainCount: 3, + minLength: 5, + maxLength: 20, + invalidateChance: 70, + dependencyChance: 8, + } + + chain := params.MakeRandomChain(t, 0) + + t.Run("Correct number of chains", func(t *testing.T) { + require.Equal(t, params.chainCount, len(chain.chainIDs)) + }) +} + +func (rc RandomChain) addRandomLog(initcb ChainBlock) *types2.Log { + initiatingLog := testutils.RandomLog(rc.randomGenerator) + initiatingLog.Index = uint(len(rc.generatedLogs[initcb])) + rc.generatedLogs[initcb] = append(rc.generatedLogs[initcb], initiatingLog) + return initiatingLog +} + +func (rc RandomChain) addExecutingMessage(execcb ChainBlock, initcb ChainBlock, initiatingLog *types2.Log) { + execLog := rc.ExecMsgForLog(initcb.chain, *initcb.block, initiatingLog) + execLog.Index = uint(len(rc.generatedLogs[execcb])) + rc.generatedLogs[execcb] = append(rc.generatedLogs[execcb], execLog) +} + +func (rc RandomChain) addExecutingMessageWithDependency(execcb ChainBlock, initcb ChainBlock, initiatingLog *types2.Log) { + rc.addExecutingMessage(execcb, initcb, initiatingLog) + rc.dependencies[execcb] = append(rc.dependencies[execcb], initcb) +} + +func (rc RandomChain) addInvalidExecutingMessage(execcb ChainBlock, initcb ChainBlock, initiatingLog *types2.Log) { + execLog := rc.InvalidExecMsgForLog(initcb.chain, *initcb.block, initiatingLog) + execLog.Index = uint(len(rc.generatedLogs[execcb])) + rc.generatedLogs[execcb] = append(rc.generatedLogs[execcb], execLog) +} + +func (rc RandomChain) GenerateReceiptsFromLogs() { + for _, cb := range rc.allBlocks { + chainid, block := cb.chain, cb.block + logs := rc.generatedLogs[cb] + rcpt := types2.Receipt{ + Logs: logs, + } + rc.receipts[chainid][block.ID()] = types2.Receipts{&rcpt} + } +} + +// Returns a random integer in the interval [lowerIncluding, upperExcluding) +func randomInRange(r *rand.Rand, lowerIncluding int, upperExcluding int) int { + return r.Intn(upperExcluding-lowerIncluding) + lowerIncluding +} + +func (rc RandomChain) InvalidExecMsgForLog(chain eth.ChainID, block eth.L2BlockRef, log *types2.Log) *types2.Log { + r := rc.randomGenerator + msg := types.Message{ + Identifier: types.Identifier{ + Origin: log.Address, + BlockNumber: block.Number, + LogIndex: uint32(log.Index), + Timestamp: block.Time, + ChainID: chain, + }, + PayloadHash: processors.LogToLogHash(log), + } + + switch r.Intn(5) { + case 0: + // Invalid origin + msg.Identifier.Origin = common.HexToAddress("0xffffffffffffffffffffffffffffffffffffffff") + case 1: + // Invalid block number + msg.Identifier.BlockNumber += uint64(randomInRange(r, 1, 10)) + case 2: + // Invalid log index + msg.Identifier.LogIndex += uint32(randomInRange(r, 1, 5)) + case 3: + // Invalid timestamp + msg.Identifier.Timestamp -= uint64(randomInRange(r, 1, 100)) + case 4: + // Invalid chain ID + impossibleChainID := len(rc.chainIDs) + msg.Identifier.ChainID = eth.ChainIDFromUInt64(uint64(impossibleChainID)) + } + + topics, data := msg.EncodeEvent() + return &types2.Log{ + Address: params2.InteropCrossL2InboxAddress, + Data: data, + Topics: topics, + Index: log.Index, + } +} + +func (rc RandomChain) InsertMessageWithInvalidIdentifier(candidateIndex int) { + r := rc.randomGenerator + candidateBlock := rc.allBlocks[candidateIndex] + randomIndex := r.Intn(candidateIndex + 1) + randomBlock := rc.allBlocks[randomIndex] + randomLogIndex := r.Intn(len(rc.generatedLogs[randomBlock])) + randomLog := rc.generatedLogs[randomBlock][randomLogIndex] + + rc.addInvalidExecutingMessage(candidateBlock, randomBlock, randomLog) +} + +func (rc RandomChain) InvalidateBlock(candidate ChainBlock) { + r := rc.randomGenerator + switch r.Intn(2) { + case 0: + rc.CreateCycle() + case 1: + rc.InsertSelfDependency(candidate) + case 2: + rc.InsertMessageWithInvalidIdentifier(rc.cbIndices[candidate.block]) + case 3: + rc.InsertFutureDependency(rc.cbIndices[candidate.block]) + default: + } +} + +func (rc RandomChain) InsertFutureDependency(candidateIndex int) { + t := rc.t + r := rc.randomGenerator + candidateBlock := rc.allBlocks[candidateIndex] + t.Logf("Inserting a future dependency in candidate (%s, %2d)'s hazard set", candidateBlock.chain, candidateBlock.block.Number) + + // Find the next block with a timestamp in the future (guaranteed to exist since we added a special block at the end) + i := candidateIndex + 1 + for rc.allBlocks[i].block.Time <= candidateBlock.block.Time { + i++ + } + + // Randomly pick a future block and create an executing message to it + futureIndex := randomInRange(r, i, len(rc.allBlocks)) + futureBlock := rc.allBlocks[futureIndex] + initiatingLog := rc.addRandomLog(futureBlock) + rc.addExecutingMessageWithDependency(candidateBlock, futureBlock, initiatingLog) +} + +func (rc RandomChain) InsertSelfDependency(candidate ChainBlock) { + r := rc.randomGenerator + + // Create a random initiating message to be inserted at index N+1 + initiatingLog := testutils.RandomLog(r) + initiatingLog.Index = uint(len(rc.generatedLogs[candidate]) + 1) + + // Insert executing message at index N + rc.addExecutingMessageWithDependency(candidate, candidate, initiatingLog) + + // Insert initiating message at index N+1 + rc.generatedLogs[candidate] = append(rc.generatedLogs[candidate], initiatingLog) +} + +func (rc RandomChain) CreateCycle() { + sameTimeStampSets := SameTimeStampSets(rc.allBlocks[len(rc.chainIDs):]) + if len(sameTimeStampSets) == 0 { + rc.t.Logf("CreateCycle: No set of blocks with the same timestamp exists. No cycle created") + return + } + i := rc.randomGenerator.Intn(len(sameTimeStampSets)) + set := sameTimeStampSets[i] + for i, cb := range set { + initiatingLog := rc.addRandomLog(cb) + execcb := set[(i+1)%len(set)] + rc.addExecutingMessageWithDependency(execcb, cb, initiatingLog) + } +} diff --git a/op-supernode/supernode/activity/interop/interop_fuzz_test.go b/op-supernode/supernode/activity/interop/interop_fuzz_test.go new file mode 100644 index 00000000000..03cbafb3097 --- /dev/null +++ b/op-supernode/supernode/activity/interop/interop_fuzz_test.go @@ -0,0 +1,168 @@ +package interop + +import ( + "context" + "testing" + + gethlog "github.com/ethereum/go-ethereum/log" + "github.com/ethereum-optimism/optimism/op-service/eth" + cc "github.com/ethereum-optimism/optimism/op-supernode/supernode/chain_container" + "github.com/stretchr/testify/require" + "maps" +) + +func FuzzVerifyInteropMessages(f *testing.F) { + f.Add(int64(69), byte('\x00')) // Failing test (gap > blockTime, because the chain randomizer isn't respecting blockTime yet) + + f.Fuzz(func(t *testing.T, seed int64, numChainsRaw uint8) { + params := RandomChainParams { + chainCount: max(2, int(numChainsRaw>>6)), + minLength: 30, + maxLength: 60, + invalidateChance: 80, + dependencyChance: 20, + maxBlockTimeExclusive: 15, + } + + fuzzInterop := newInteropFuzzHarness(t).WithParams(params).WithSeed(seed) + + fuzzInterop.Build() + + interop := fuzzInterop.interop + + // Update the LogDBs for the chains + for { + advanced, err := interop.progressAndRecord() + require.NoError(t, err) + if !advanced { + break + } + } + + randomChain := fuzzInterop.randomChain + + safeBlock := randomChain.allBlocks[len(randomChain.allBlocks)-1] + safeTimestamp := safeBlock.block.Time + + blocksAtTimestamp, err := interop.checkChainsReady(safeTimestamp) + require.NoError(t, err) + + result, err := interop.verifyInteropMessages(safeTimestamp, blocksAtTimestamp) + if !randomChain.isInvalid { + require.NoError(t, err) + for chain, block := range result.L2Heads { + rcBlocks := randomChain.chainBlocks[chain] + lastBlock := rcBlocks[len(rcBlocks)-1] + require.Equal(t, block.Hash, lastBlock.Hash) + } + } + + // P1: Valid messages never produce InvalidHeads + require.True(t, result.IsValid(), "P1: valid messages should produce valid result, got InvalidHeads: %v", result.InvalidHeads) + + // P3: IsValid() ↔ len(InvalidHeads) == 0 + require.Empty(t, result.InvalidHeads, "P3: InvalidHeads should be empty for valid result") + }) +} + +// ============================================================================= +// Test Harness +// ============================================================================= + +type interopFuzzHarness struct { + t *testing.T + interop *Interop + params RandomChainParams + seed int64 + randomChain RandomChain + mocks map[eth.ChainID]cc.ChainContainer + activationTime uint64 + dataDir string + skipBuild bool // for tests that need custom construction +} + +// newInteropFuzzHarness creates a new test harness with sensible defaults. +func newInteropFuzzHarness(t *testing.T) *interopFuzzHarness { + t.Helper() + t.Parallel() + return &interopFuzzHarness{ + t: t, + mocks: make(map[eth.ChainID]cc.ChainContainer), + dataDir: t.TempDir(), + } +} + +// WithParams sets the parameters for random L2 chain generation. +func (h *interopFuzzHarness) WithParams(params RandomChainParams) *interopFuzzHarness { + h.params = params + return h +} + +// WithSeed sets the seed for random generation and then generates the random +// L2 chains with it. +func (h *interopFuzzHarness) WithSeed(seed int64) *interopFuzzHarness { + h.seed = seed + return h +} + +// WithActivation sets the interop activation timestamp. +func (h *interopFuzzHarness) WithActivation(ts uint64) *interopFuzzHarness { + h.activationTime = ts + return h +} + +// WithDataDir sets a custom data directory (useful for error testing). +func (h *interopFuzzHarness) WithDataDir(dir string) *interopFuzzHarness { + h.dataDir = dir + return h +} + +// SkipBuild marks that Build() should not create an Interop instance. +// Useful for tests that need to test New() directly. +func (h *interopFuzzHarness) SkipBuild() *interopFuzzHarness { + h.skipBuild = true + return h +} + +type testWriter struct{ t *testing.T } + +func (tw testWriter) Write(p []byte) (n int, err error) { + tw.t.Logf("%s", string(p)) + return len(p), nil +} + +// Build creates the Interop instance from configured mocks. +// Sets up context and registers cleanup. +func (h *interopFuzzHarness) Build() *interopFuzzHarness { + if h.skipBuild { + return h + } + h.randomChain = h.params.MakeRandomChain(h.t, h.seed) + + // Find an activationTime that all chains can satisfy + for _, blocks := range h.randomChain.chainBlocks { + h.activationTime = max(h.activationTime, blocks[0].Time) + } + + h.mocks = h.randomChain.GetContainers() + logger := gethlog.NewLogger(gethlog.NewTerminalHandler(testWriter{h.t}, true)) + h.interop = New(logger, h.activationTime, h.mocks, h.dataDir) + if h.interop != nil { + h.interop.ctx = context.Background() + h.t.Cleanup(func() { _ = h.interop.Stop(context.Background()) }) + } + return h +} + +// Chains returns the map of chain containers for use with New(). +func (h *interopFuzzHarness) Chains() map[eth.ChainID]cc.ChainContainer { + chains := make(map[eth.ChainID]cc.ChainContainer) + maps.Copy(chains, h.mocks) + return chains +} + +// Mock returns the mock for a given chain ID. +func (h *interopFuzzHarness) Mock(id uint64) cc.ChainContainer { + return h.mocks[eth.ChainIDFromUInt64(id)] +} + diff --git a/op-supervisor/supervisor/types/types.go b/op-supervisor/supervisor/types/types.go index 339f0a33418..d5b7ecfa898 100644 --- a/op-supervisor/supervisor/types/types.go +++ b/op-supervisor/supervisor/types/types.go @@ -114,6 +114,31 @@ func (m *Message) Access() Access { return m.ToCheckSumArgs().Access() } +func (m *Message) EncodeEvent() (topics []common.Hash, data []byte) { + topics = make([]common.Hash, 2) + topics[0] = ExecutingMessageEventTopic + topics[1] = m.PayloadHash + + putZeroes := func(length uint) { + zeroes := make([]byte, length) + data = append(data, zeroes...) + } + + data = make([]byte, 0, 32*5) + putZeroes(12) + data = append(data, m.Identifier.Origin.Bytes()...) + putZeroes(32 - 8) + data = binary.BigEndian.AppendUint64(data, m.Identifier.BlockNumber) + putZeroes(32 - 4) + data = binary.BigEndian.AppendUint32(data, m.Identifier.LogIndex) + putZeroes(32 - 8) + data = binary.BigEndian.AppendUint64(data, m.Identifier.Timestamp) + chainid := m.Identifier.ChainID.Bytes32() + data = append(data, chainid[:]...) + + return topics, data +} + func (m *Message) DecodeEvent(topics []common.Hash, data []byte) error { if len(topics) != 2 { // event hash, indexed payloadHash return fmt.Errorf("unexpected number of event topics: %d", len(topics)) diff --git a/op-supervisor/supervisor/types/types_test.go b/op-supervisor/supervisor/types/types_test.go index e30d20af404..ad9ab982fa1 100644 --- a/op-supervisor/supervisor/types/types_test.go +++ b/op-supervisor/supervisor/types/types_test.go @@ -279,6 +279,23 @@ func TestMessage(t *testing.T) { }) } +func TestMessageRoundTrip(t *testing.T) { + msg := Message{ + Identifier: Identifier{ + Origin: testOrigin, + BlockNumber: testBlockNumber, + LogIndex: testLogIndex, + Timestamp: testTimestamp, + ChainID: testChainID, + }, + PayloadHash: testMsgHash, + } + msg_topics, msg_data := msg.EncodeEvent() + var msg_again Message + msg_again.DecodeEvent(msg_topics, msg_data) + require.Equal(t, msg, msg_again) +} + func TestChecksumArgs(t *testing.T) { args := ChecksumArgs{ BlockNumber: testBlockNumber,